input stringlengths 2.65k 237k | output stringclasses 1 value |
|---|---|
from typing import Callable, Tuple
import numpy as np
def student_id():
"""
Returns a tuple of student's ID and TAU email address.
"""
return '123456789', r'<EMAIL>'
class QuadraticFunction:
"""
Implementation of a quadratice function f(x) = 1/2 * x^T*Q*x + b^T*x
with its gradient and Hessian matrix.
"""
def __init__(
self,
Q: np.ndarray,
b: np.ndarray
) -> None:
"""
Initializes a quadratic function object with Q, b
Arguments:
Q : (n,n) matrix
b : (n,) vector
"""
self.Q = Q
self.b = b
def __call__(
self,
x: np.ndarray
) -> np.ndarray:
"""
Evaluates f(x)
Arguments:
x : (n,) vector
Return:
fx : scalar
"""
qx = np.dot(self.Q, x)
fx = 0.5 * np.dot(x, qx) + np.dot(self.b,x)
return fx
def grad(
self,
x: np.ndarray
) -> np.ndarray:
"""
Evaluates g(x), the gradient of f, at x
Arguments:
x : (n,) vector
Return:
gx : (n,) vector
"""
gx = 0.5 * np.dot(self.Q.T + self.Q, x) + self.b
return gx
def hessian(
self,
x: np.ndarray
) -> np.ndarray:
"""
Evaluates h(x), the Hessian matrix of f, at x
Arguments:
x : (n,) vector
Return:
hx : (n,n) matrix
"""
hx = 0.5 * (self.Q.T + self.Q)
return hx
class NewtonOptimizer:
"""
Implements Newton's method optimizer with constant alpha.
"""
def __init__(
self,
objective: Callable,
x_0: np.ndarray,
alpha: float,
threshold: float = 1e-10,
max_iters: int = 100
) -> None:
"""
Arguments:
objective : callable to an objective function
x_0 : initial point, (n,) numpy array
alpha : a scalar step size
threshold : (float) stopping criteria |x_k-▁x_(k-1)|<threshold
max_iters : (int) maximal number of iterations (stopping criteria)
"""
self.objective = objective
self.curr_x = x_0
self.alpha = alpha
self.threshold = threshold
self.max_iters = max_iters
def step(self):
"""
Executes a single step of Newton's method.
Return:
next_x : an N-dimensional numpy vector, next x
gx : vector, the gradient of f(x) evaluated at the current x
(the one used for the step calculation)
hx : matrix, the Hessian of f(x) evaluated current x
(the one used for the step calculation)
"""
gx = self.objective.grad(self.curr_x)
hx = self.objective.hessian(self.curr_x)
next_x = self.curr_x - self.alpha * np.dot(np.linalg.inv(hx), gx)
return next_x, gx, hx
def optimize(self):
"""
Execution of optimization flow
Return:
fmin : objective function evaluated at x_opt
minimizer : x_opt
num_iters : number of iterations until convergence
"""
for iter_idx in range(self.max_iters):
prev_x = self.curr_x
self.curr_x, _, _ = self.step() # update self.curr_x internally
if np.linalg.norm(self.curr_x - prev_x) < self.threshold:
break
return self.objective(self.curr_x), self.curr_x, iter_idx+1
class BFGSOptimizer:
"""
Implements the Quasi-Newton algorithm, using BFGS algorithm
for approximating the inverse Hessian.
"""
def __init__(
self,
objective: Callable,
x_0: np.ndarray,
B_0: np.ndarray,
alpha_0: float,
beta: float = 0.2,
sigma: float = 0.3,
threshold: float = 1e-10,
max_iters: int = 100
) -> None:
"""
Arguments:
objective : callable to an objective function
x_0 : initial point, (n,) numpy array
B_0 : initial guess of the inverse Hessian
alpha_0 : initial step size of Armijo line search (scalar)
beta : (float) beta parameter of Armijo line search, a float in range (0,1)
sigma : (float) sigma parameter of Armijo line search, a float in range (0,1)
threshold : (float) stopping criteria |x_k-▁x_(k-1)|<threshold
max_iters : (int) maximal number of iterations (stopping criteria)
"""
self.objective = objective
self.init_step_size = alpha_0
self.beta = beta
self.sigma = sigma
self.threshold = threshold
self.max_iters = max_iters
self.curr_x = x_0
self.curr_inv_hessian = B_0
self.curr_step = 0
self.curr_dir = np.zeros_like(x_0)
def update_dir(self) -> np.ndarray:
"""
Compute step direction
Return:
next_d : updated direction, (n,) numpy array
"""
grad = self.objective.grad(self.curr_x)
return -np.dot(self.curr_inv_hessian, grad)
def update_step_size(self) -> np.ndarray:
"""
Compute the new step size using Backtracking Line Search algorithm (Armijo rule).
Return:
step_size : scalar
"""
# Initialize
curr_step = self.init_step_size
objective_1d = lambda alpha: self.objective(self.curr_x + alpha*self.curr_dir) # restrict f to a line
h = lambda alpha: objective_1d(alpha) - objective_1d(0)
g = np.dot(self.objective.grad(self.curr_x), self.curr_dir) # Note: d_phi(alpha)/dalpha = g(x+alpha*d)^T*d, alpha=0
if h(curr_step) > self.sigma * g * curr_step:
while h(curr_step) > self.sigma * g * curr_step:
prev_step = curr_step
curr_step = self.beta * curr_step
step_size = curr_step
else:
while h(curr_step) <= self.sigma * g * curr_step:
prev_step = curr_step
curr_step = curr_step / self.beta
step_size = prev_step
return step_size
def update_x(self) -> np.ndarray:
"""
Take a step of size curr_step in direction dk.
Return:
next_x : updated point, (n,) numpy array
"""
return self.curr_x + self.curr_step * self.curr_dir
def update_inv_hessian(
self,
prev_x: np.ndarray
) -> np.ndarray:
"""
Take a rank 2 BFGS update step to update the inverse Hessian.
Arguments:
prev_x : previous point, (n,) numpy array
Return:
next_inv_hessian : updated inverse Hessian, (n,n) numpy array
"""
p = self.curr_x - prev_x
q = self.objective.grad(self.curr_x) - self.objective.grad(prev_x)
s = np.dot(self.curr_inv_hessian, q)
tau = np.dot(s, q)
mu = np.dot(p, q)
v = p / mu - s / tau
next_inv_hessian = self.curr_inv_hessian + np.outer(p, p) / mu - np.outer(s, s) / tau + tau * np.outer(v, v)
return next_inv_hessian
def step(self):
"""
Executes a single step of Quasi-Newton algorithm.
Return:
next_x : previous point, (n,) numpy array
next_d : vector, updated direction
next_step_size : scalar
next_inv_hessian : (n,n) numpy array, approximator of the inverse Hessian matrix
"""
self.curr_dir = self.update_dir()
self.curr_step = self.update_step_size()
prev_x = self.curr_x
self.curr_x = self.update_x()
self.curr_inv_hessian = self.update_inv_hessian(prev_x)
return self.curr_x, self.curr_dir, self.curr_step, self.curr_inv_hessian
def optimize(self) -> Tuple:
"""
Execution of optimization flow
Return:
fmin : objective function evaluated at x_opt
minimizer : x_opt
num_iters : number of iterations until convergence
"""
for iter_idx in range(self.max_iters):
prev_x = self.curr_x
_ = self.step() # update self.curr_x internally
if np.linalg.norm(self.curr_x - prev_x) < self.threshold:
break
return self.objective(self.curr_x), self.curr_x, iter_idx+1
class TotalVariationObjective:
"""
Implementation of a Total Variation objective function (MSE + TV) for image denoising.
"""
def __init__(
self,
src_img: np.ndarray,
mu: float = 1e-3,
eps: float = 1e-8
) -> None:
"""
Initializes a FastRoute problem
Arguments:
src_img : (n,m) matrix, input noisy image
mu : regularization parameter, determines the weight of total variation term
eps : small number for numerical stability
"""
self.src_img = src_img
self.mu = mu
self.eps = eps
def __call__(
self,
img: np.ndarray
) -> np.ndarray:
"""
Compute TV objective.
Arguments:
img : (nxm,) vector, denoised image
Return:
total_variation : scalar, objective's value
"""
# Convert vector to matrix
img = self._vec_to_mat(img)
mse = np.square(self.src_img - img).mean()
row_diff, col_diff = self._diff(img)
row_diff_pad = np.pad(row_diff, ((0,1), (0,0)), 'constant', constant_values=0)
col_diff_pad = np.pad(col_diff, ((0,0), (0,1)), 'constant', constant_values=0)
total_variation = np.sqrt(row_diff_pad**2 + col_diff_pad**2 + self.eps).sum()
return mse + self.mu * total_variation
def grad(
self,
img: np.ndarray
) -> np.ndarray:
"""
Evaluate the gradient of the objective.
Arguments:
img : (nxm,) row-major vector, denoised image
Return:
grad : (nxm,) row-major vector, the objective's gradient
"""
# Convert vector to matrix
img = self._vec_to_mat(img)
# MSE term
mse_grad = -2*(self.src_img - img) / img.size
# TV terms
eps = self.eps
row_diff, col_diff = self._diff(img, pad=True)
term_1 = row_diff[:-1,1:-1] / np.sqrt(row_diff[:-1,1:-1]**2 + col_diff[:-2,1:]**2 + eps)
term_2 = (row_diff[1:,1:-1] + col_diff[1:-1,1:]) / np.sqrt(row_diff[1:,1:-1]**2 + col_diff[1:-1,1:]**2 + eps)
term_3 = col_diff[1:-1,:-1] / np.sqrt(row_diff[1:,:-2]**2 + col_diff[1:-1,:-1]**2 + eps)
grad = mse_grad + self.mu * (term_1 - term_2 + term_3)
return self._mat_to_vec(grad)
def _mat_to_vec(
self,
img: np.ndarray
) -> np.ndarray:
"""
Converts a vector to an (n,m) matrix.
"""
return img.flatten()
def _vec_to_mat(
self,
img: np.ndarray
) -> np.ndarray:
"""
Converts an image to a row major (nxm,) vector
"""
return img.reshape(self.src_img.shape)
def _diff(
self,
x: np.ndarray,
pad: bool = False
) -> Tuple:
"""
Calculate the row and collumn difference arrays
Arguments:
x : (n,m) matrix
pad : False, zero padding on all boundaries
Return:
row_diff : row_diff[i,:] = x[i+1,:] - x[i,:],
output dimensions:
pad is False: (n-1,m)
pad is True: | |
300
for (sstm, count) in zip(self.sst, range(len(self.sst))):
fullfile = os.path.join(self.file_store, '{:}{:}{:}'.format('particle', str(count + 1), '.dat'))
with open(fullfile, 'w') as out:
frag_count = 1
out.write('{:>12d}\n'.format(frag_count))
out.write('{:>21f}{:>21f}\n'.format(self.temperature, 0))
tmplte = '{:<10}{:<24f}{:<24f}{:<24f}\n'
for prt in sstm.particles:
out.write(tmplte.format(prt.type.name, prt.x, prt.y, prt.z))
self.frag_file.append(fullfile)
def make_system(self, text_output):
sstm = system.System(ff_class='1')
count = 0 # counter of the lines in the input file
sys_idx = 0 # counter of the gas molecules to lookup
while count < len(text_output) - 1:
tmp = self.sst[sys_idx].copy()
dictn = text_output[count:(len(tmp.particles) + count)]
if self.__fit_atoms__(tmp, dictn):
for p in tmp.particles:
vals = dictn[p.tag - 1].split()
# Read the coordinates from the text output of the CASSANDRA simulation
p.x, p.y, p.z = map(float, vals[1:4])
# Force velocities of the particles to be 0
p.vx, p.vy, p.vz = 0.0, 0.0, 0.0
p.molecule.syst_tag = 0
if self.is_rigid[sys_idx]:
for p in tmp.particles:
p.is_rigid = True
sstm.add(tmp)
self.made_ins[sys_idx] += 1
count += len(tmp.particles)
sys_idx = 0
else:
sys_idx += 1
if sys_idx >= len(self.sst):
self.logger.error('Wasn\'t able to read CASSANDRA .chk file. '
'Please check either MC-simulation provided to PySIMM or the CASSANDRA '
'checkpoint file ')
exit(1)
sstm.update_tags()
sstm.objectify()
return sstm
def __fit_atoms__(self, molec, text_lines):
flag = True
# Cannot map anything if number of molecules is different from number of data lines
if len(molec.particles) != len(text_lines):
return False
# Check the sequence of element names they
for p in molec.particles:
vals = text_lines[p.tag - 1].split()
if vals[0] != p.type.elem:
return False
return flag
class Cassandra(object):
"""
pysimm.cassandra.Cassandra
Organizational object for CASSANDRA simulations that is able to run
e.g. Gibbs Canonical Monte-Carlo (GCMC) simulations (see the GCMC class)
"""
def __init__(self, init_sst, **kwargs):
self.logger = logging.getLogger('CSNDRA')
self.init_sst = init_sst
self.final_sst = None
self.run_queue = []
def run(self, is_replace=False):
global CASSANDRA_EXEC
for task in self.run_queue:
# Write .inp file
task.write()
# Write .xyz of the fixed system if provided
if task.fxd_sst.particles.count > 0:
if task.fixed_syst_mcf_file is not None:
McfWriter(task.fxd_sst, task.fixed_syst_mcf_file).write('atoms')
task.fxd_sst.write_xyz(task.fxd_sst_xyz)
try:
self.logger.info('Starting the GCMC simulations with CASSANDRA')
print('{:.^60}'.format(''))
subprocess.call([CASSANDRA_EXEC, task.props_file])
task.upd_simulation()
self.final_sst = task.tot_sst
except OSError as ose:
self.logger.error('There was a problem calling CASSANDRA executable')
exit(1)
except IOError as ioe:
if check_cs_exec():
self.logger.error('There was a problem running CASSANDRA. The process started but did not finish')
exit(1)
else:
self.logger.error('There was a problem running CASSANDRA: seems it is not configured properly.\n'
'Please, be sure the CSNDRA_EXEC environment variable is set to the correct '
'CASSANDRA executable path. The current path is set to:'
'\n\n{}\n\n'.format(CASSANDRA_EXEC))
exit(1)
def add_gcmc(self, obj1=None, **kwargs):
new_job = None
if isinstance(obj1, GCMC):
new_job = obj1
else:
specs = kwargs.get('species')
if specs:
mc_sst = McSystem(specs, kwargs.get('chem_pot'),
max_ins=kwargs.get('max_ins'),
is_rigid=kwargs.get('is_rigid'))
new_job = GCMC(mc_sst, self.init_sst, **kwargs)
else:
self.logger.error('Unknown GCMC initialization. Please provide either '
'correct GCMC parameters or GCMC simulation object')
exit(1)
if kwargs.get('is_new'):
self.run_queue[:] = []
if new_job:
new_job.__check_params__()
self.run_queue.append(new_job)
def read_input(self, inp_file):
tmp_dict = {}
if os.path.isfile(inp_file):
self.logger.info('Reading simulation parameters from {:} file'.format(inp_file))
# Reading the cassandra .inp file as one long string
inp_stream = open(inp_file, 'r')
lines = inp_stream.read()
raw_props = lines.split('#')
for prop in raw_props:
line = re.sub('\n!.*', '', prop) # Get rid of the CASSANDRA comments
line = re.sub('\n(e|E)(n|N)(d|D)', '', line) # Get rid of the 'END in the end of the file
tmp = line.split()
if len(tmp) > 1:
tmp_dict[tmp[0]] = self.__parse_value__(tmp)
# File seems fine let's close the stream and return true in the flag
inp_stream.close()
self.logger.info('Reading finished sucsessfully')
else:
self.logger.error('Cannot find specified file: ""{:}""'.format(inp_file))
return tmp_dict
def __parse_value__(self, cells):
title = cells[0].lower()
if title == 'run_type':
return OrderedDict([('type', cells[1]), ('steps', int(cells[2]))])
elif title == 'charge_style':
return OrderedDict([('type', cells[1]),
('sum_type', cells[2]),
('cut_val', float(cells[3])),
('accuracy', float(cells[4]))])
elif title == 'vdw_style':
return OrderedDict([('type', cells[1]),
('cut_type', cells[2]),
('cut_val', float(cells[3]))])
elif title == 'simulation_length_info':
tmp = OrderedDict([('units', cells[2]),
('prop_freq', int(cells[4])),
('coord_freq', int(cells[6])),
('run', int(cells[8]))])
if len(cells) > 10:
tmp['steps_per_sweep'] = int(cells[10])
if len(cells) > 12:
tmp['block_averages'] = int(cells[12])
return tmp
elif title == 'cbmc_info':
return OrderedDict([('kappa_ins', int(cells[2])),
('kappa_dih', int(cells[4])),
('rcut_cbmc', float(cells[6]))])
elif title == 'box_info':
size = float(cells[3])
if len(cells) > 6:
size = [float(cells[3]), float(cells[4]), float(cells[5])]
return OrderedDict([('box_count', int(cells[1])), ('box_type', cells[2]), ('box_size', size)])
elif title == 'prob_translation':
vals = []
for i in range(2, len(cells)):
vals.append(float(cells[i]))
return OrderedDict([('tot_prob', float(cells[1])),
('limit_vals', vals)])
elif title == 'prob_insertion':
vals = []
for i in range(2, len(cells)):
vals.append(cells[i])
return OrderedDict([('tot_prob', float(cells[1])),
('types', vals)])
elif title == 'prob_rotation':
vals = []
for i in range(2, len(cells)):
vals.append(float(cells[i]))
return OrderedDict([('tot_prob', float(cells[1])),
('limit_vals', vals)])
elif (title == 'molecule_files') or (title == 'fragment_files'):
tmp = OrderedDict()
for i, c in zip(range(1, len(cells) - 1, 2), range(1, 1 + len(cells) / 2)):
tmp['file' + str(c)] = [cells[i], int(cells[i + 1])]
return tmp
elif title == 'start_type':
if cells[1] == 'read_config':
specs = []
for i in range(2, len(cells) - 1):
specs.append(int(cells[i]))
return OrderedDict([('start_type', 'read_config'),
('species', specs),
('file_name', cells[-1])])
if cells[1] == 'make_config':
specs = []
for i in range(2, len(cells)):
specs.append(int(cells[i]))
return OrderedDict([('start_type', 'make_config'),
('species', specs),
('file_name', '')])
if cells[1] == 'add to config':
self.logger.error('Sorry, \'add to config\' regime of ''Start_Type option is not supported yet')
exit(1)
if cells[1] == 'checkpoint':
self.logger.error('Sorry, \'checkpoint\' regime of ''Start_Type option is not supported yet ')
exit(1)
elif title == 'property_info':
if int(cells[1]) == 1:
tmp = OrderedDict()
for i in range(2, len(cells)):
tmp['prop' + str(i - 1)] = str.lower(cells[i])
return tmp
elif title == 'seed_info':
return [int(cells[1]), int(cells[2])]
elif (title == 'prob_deletion') or (title == 'rcutoff_low') or \
(title == 'bond_prob_cutoff') or (title == 'chemical_potential_info'):
return float(cells[1])
elif (title == 'average_Info') or (title == 'nbr_species') or (title == 'temperature_info'):
return int(cells[1])
else:
return cells[1]
class McfWriter(object):
# Static section names in MCF file
mcf_tags = ['# Atom_Info', '# Bond_Info', '# Angle_Info', '# Dihedral_Info',
'# Improper_Info', '# Intra_Scaling', '# Fragment_Info', '# Fragment_Connectivity']
def __init__(self, psm_syst, file_ref, **kwargs):
self.out_stream = None
self.empty_line = '0'
self.syst = psm_syst
self.file_ref = file_ref
def write(self, typing='all'):
# Initializing output stream
with open(self.file_ref, 'w') as out_stream:
for (name, is_write) in zip(self.mcf_tags, self.__to_tags__(typing)):
if is_write:
try:
method = getattr(self, '__write_' + str.lower(name[2:]) + '__')
method(out_stream)
except AttributeError:
self.__write_empty__(out_stream, name)
else:
self.__write_empty__(out_stream, name)
out_stream.write('\nEND')
out_stream.close()
def __write_empty__(self, out, name):
out.write('{0:}\n{1:}\n\n'.format(name, self.empty_line))
def __write_atom_info__(self, out):
global KCALMOL_2_K
text_tag = '# Atom_Info'
if self.syst.particles.count > 0:
# writing section header
out.write('{:}\n'.format(text_tag))
# Verify and fix net system charge
self.syst.zero_charge()
# writing total number of particles
out.write('{0:<6}\n'.format(self.syst.particles.count))
count = 0
line_template = '{l[0]:<6}{l[1]:<7}{l[2]:<5}{l[3]:<8.3f}{l[4]:<10.6f}' \
'{l[5]:<6}{l[6]:<11.3f}{l[7]:<9.3f}\n'
for item in self.syst.particles:
line = [count + 1, '', '', '', 0, 'LJ', 0, 0]
if hasattr(item, 'charge'):
line[4] = item.charge
else:
line[4] = 0
if hasattr(item, 'type'):
if hasattr(item.type, 'name'):
line[1] = item.type.name
if hasattr(item.type, 'elem'):
line[2] = item.type.elem
if hasattr(item.type, 'mass'):
line[3] = item.type.mass
if hasattr(item.type, 'epsilon'):
line[6] = KCALMOL_2_K * item.type.epsilon
line[7] = item.type.sigma
else:
continue
out.write(line_template.format(l=line))
count += 1
else:
self.__write_empty__(out, text_tag)
out.write('\n')
def __write_bond_info__(self, out):
text_tag = '# Bond_Info'
if self.syst.bonds.count > 0:
# writing section header
out.write('{:}\n'.format(text_tag))
# writing total number of bonds
out.write('{0:<6}\n'.format(self.syst.bonds.count))
line_template = '{l[0]:<6d}{l[1]:<6d}{l[2]:<6d}{l[3]:<9}{l[4]:<6.3f}\n'
count = 1
for bond in self.syst.bonds:
tmp = 'fixed' # Fixed bond is the only option for CASSANDRA V-1.2
line = [count, bond.a.tag, bond.b.tag, tmp, bond.type.r0]
count += 1
out.write(line_template.format(l=line))
out.write('\n')
else:
self.__write_empty__(out, text_tag)
def __write_angle_info__(self, out):
text_tag = '# Angle_Info'
if self.syst.angles.count > 0:
# writing section header
out.write('{:}\n'.format(text_tag))
# writing total number of angles
out.write('{0:<6}\n'.format(self.syst.angles.count))
count = 1
for angle in self.syst.angles:
line_template = '{l[0]:<6d}{l[1]:<6d}{l[2]:<6d}{l[3]:<6d}{l[4]:<10}{l[5]:<13.3f}'
line = [count, angle.a.tag, angle.b.tag, angle.c.tag]
if hasattr(angle.type, 'is_fixed') and angle.type.is_fixed:
addon = ['fixed', angle.type.theta0]
else:
addon = ['harmonic', angle.type.k, angle.type.theta0]
line_template += '{l[6]:<13.3f}'
count += 1
out.write(line_template.format(l=line + addon) + '\n')
out.write('\n')
else:
self.__write_empty__(out, text_tag)
def __write_intra_scaling__(self, out):
format_line = '{:<6.2f}{:<6.2f}{:<6.2f}{:<6.2f}'
# writing section header
out.write('{:}\n'.format('# Intra_Scaling'))
# writing vdW scaling: 1-2 1-3 1-4 1-N
out.write(format_line.format(0, 0, 0, 0) + '\n')
# writing charge scaling: 1-2 1-3 1-4 1-N
out.write(format_line.format(0, 0, 0, 0) + '\n\n')
def __write_dihedral_info__(self, out):
text_tag = '# Dihedral_Info'
self.__write_empty__(out, text_tag)
def __write_improper_info__(self, out):
text_tag = '# Improper_Info'
self.__write_empty__(out, text_tag)
| |
cli_type = get_cfg_cli_type(dut, **kwargs)
if cli_type in ["click", "vtysh"]:
# command = "sonic-clear ip bgp"
command = "clear ip bgp *"
st.config(dut, command, type=cli_type, conf=False)
elif cli_type == 'klish':
command = 'clear bgp ipv4 unicast *'
st.config(dut, command, type=cli_type)
else:
st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
return True
def clear_bgp_vtysh(dut, **kwargs):
"""
:param dut:
:param value:
:param address_family: ipv4|ipv6|all
:return:
"""
cli_type = get_cfg_cli_type(dut, **kwargs)
address_family = kwargs.get('address_family', 'all')
af_list = ['ipv4','ipv6']
if address_family == 'ipv4':
if cli_type == 'vtysh':
af_list = ['ipv4']
elif cli_type == 'klish':
af_list = ['ipv4 unicast']
elif address_family == 'ipv6':
if cli_type == 'vtysh':
af_list = ['ipv6']
elif cli_type == 'klish':
af_list = ['ipv6 unicast']
else:
if cli_type == "vtysh":
af_list=["ipv4", "ipv6"]
elif cli_type == "klish":
af_list = ["ipv4 unicast", "ipv6 unicast"]
for each_af in af_list:
if cli_type == 'vtysh':
command = "clear ip bgp {} *".format(each_af)
elif cli_type == 'klish':
command = "clear bgp {} *".format(each_af)
st.config(dut, command, type=cli_type, conf=False)
def clear_ip_bgp_vtysh(dut, value="*", **kwargs):
cli_type = get_cfg_cli_type(dut, **kwargs)
if cli_type == 'vtysh':
command = "clear ip bgp ipv4 {}".format(value)
st.config(dut, command, type='vtysh', conf=False)
elif cli_type == 'klish':
command = "clear bgp ipv4 unicast {}".format(value)
st.config(dut, command, type='klish', conf=False)
else:
st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
def clear_ipv6_bgp_vtysh(dut, value="*", **kwargs):
cli_type = get_cfg_cli_type(dut, **kwargs)
if cli_type == 'vtysh':
command = "clear ip bgp ipv6 {}".format(value)
elif cli_type == 'klish':
command = "clear bgp ipv6 unicast {}".format(value)
st.config(dut, command, type= cli_type, conf=False)
def clear_ip_bgp_vrf_vtysh(dut,vrf,family='ipv4',value="*", **kwargs):
cli_type = get_cfg_cli_type(dut, **kwargs)
if cli_type == 'vtysh':
command = "clear bgp vrf {} {} {}".format(vrf,family,value)
st.config(dut, command, type='vtysh', conf=False)
elif cli_type == 'klish':
if family == 'ipv4':
family = 'ipv4 unicast'
elif family == 'ipv6':
family = 'ipv6 unicast'
command = "clear bgp {} vrf {} {}".format(family, vrf, value)
st.config(dut, command, type='klish', conf=False)
else:
st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
def create_bgp_aggregate_address(dut, **kwargs):
"""
API to create the BGP aggregate address
Author: <NAME> (<EMAIL>)
:param dut:
:param local_asn:
:param address_range:
:param as_set:
:param summary:
:return:
"""
cli_type = get_cfg_cli_type(dut, **kwargs)
if "local_asn" not in kwargs and "address_range" not in kwargs and "config" not in kwargs and "family" not in kwargs:
st.error("Mandatory parameters not provided")
skip_error_check = kwargs.get("skip_error_check", True)
# cli_type=kwargs.get("cli_type","vtysh")
config_router_bgp_mode(dut, kwargs["local_asn"], cli_type=cli_type)
if cli_type == "vtysh":
command = "address-family {}\n".format(kwargs["family"])
if kwargs["config"] == "add":
command += "aggregate-address {}".format(kwargs["address_range"])
elif kwargs["config"] == "delete":
command += "no aggregate-address {}".format(kwargs["address_range"])
if "summary" in kwargs:
command += " summary-only"
if "as_set" in kwargs:
command += " as-set"
st.config(dut, command, type=cli_type)
elif cli_type=="klish":
commands = list()
commands.append("address-family {} unicast".format(kwargs["family"]))
if kwargs.get("config") == "add":
command = "aggregate-address {}".format(kwargs["address_range"])
if "summary" in kwargs:
command += " summary-only"
if "as_set" in kwargs:
command += " as-set"
else:
command = "no aggregate-address {}".format(kwargs["address_range"])
commands.append(command)
commands.append("exit")
commands.append("exit")
st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check)
else:
st.error("Unsupported CLI TYPE -- {}".format(cli_type))
return False
def create_bgp_update_delay(dut, local_asn, time=0, cli_type="", skip_error_check=True):
"""
:param dut:
:param local_asn:
:param time:
:return:
"""
cli_type = get_cfg_cli_type(dut, cli_type=cli_type)
config_router_bgp_mode(dut, local_asn, cli_type=cli_type)
command = "update-delay {}".format(time)
st.config(dut, command,type=cli_type, skip_error_check=skip_error_check)
def create_bgp_always_compare_med(dut, local_asn):
"""
:param dut:
:param local_asn:
:return:
"""
#No usage in scripts
config_router_bgp_mode(dut, local_asn)
command = "bgp always-compare-med"
st.config(dut, command, type='vtysh')
def create_bgp_best_path(dut, local_asn, user_command, cli_type=""):
"""
:param dut:
:param local_asn:
:param user_command:
:return:
"""
cli_type = get_cfg_cli_type(dut, cli_type=cli_type)
config_router_bgp_mode(dut, local_asn, cli_type=cli_type)
if cli_type == 'vtysh':
command = "bgp bestpath {}".format(user_command)
elif cli_type == 'klish':
command = list()
command.append("bestpath {}".format(user_command))
command.append("exit")
else:
st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
st.config(dut, command, type=cli_type)
def create_bgp_client_to_client_reflection(dut, local_asn, config='yes', cli_type="", skip_error_check=True):
"""
:param dut:
:param local_asn:
:return:
"""
cli_type = get_cfg_cli_type(dut, cli_type=cli_type)
cfgmode = 'no' if config != 'yes' else ''
if cli_type == "vtysh":
command = "router bgp {}".format(local_asn)
command += "\n {} bgp client-to-client reflection".format(cfgmode)
'''
config_router_bgp_mode(dut, local_asn)
if config == 'yes':
command = "bgp client-to-client reflection"
else :
command = "no bgp client-to-client reflection"
'''
st.config(dut, command, type=cli_type)
return True
elif cli_type == "klish":
commands = list()
commands.append("router bgp {}".format(local_asn))
commands.append("{} client-to-client reflection".format(cfgmode))
commands.append("exit")
st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check)
return True
else:
st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
def create_bgp_route_reflector_client(dut, local_asn, addr_family, nbr_ip, config='yes', cli_type="", skip_error_check=True):
"""
:param dut:
:param local_asn:
:param addr_family:
:param nbr_ip:
:return:
"""
cli_type = get_cfg_cli_type(dut, cli_type=cli_type)
cfgmode = 'no' if config != 'yes' else ''
if cli_type == "vtysh":
command = "router bgp {}".format(local_asn)
command += "\n address-family {} {}".format(addr_family, "unicast")
command += "\n {} neighbor {} route-reflector-client".format(cfgmode, nbr_ip)
st.config(dut, command, type=cli_type)
return True
elif cli_type == "klish":
addr_family_type = "unicast"
neigh_name = nbr_ip
# if re.findall(r'Ethernet|Vlan|PortChannel', nbr_ip):
# neigh_name = get_interface_number_from_name(nbr_ip)
commands = list()
commands.append("router bgp {}".format(local_asn))
# if re.findall(r'Ethernet|Vlan|PortChannel', nbr_ip):
# neigh_name = get_interface_number_from_name(nbr_ip)
# commands.append("{} neighbor interface {} {}".format(cfgmode, neigh_name["type"], neigh_name["number"]))
# elif is_valid_ip_address(neigh_name, addr_family):
# commands.append("{} neighbor {}".format(cfgmode, nbr_ip))
# else:
# commands.append("{} peer-group {}".format(cfgmode, nbr_ip))
# if config == "yes":
# if addr_family == 'l2vpn' : addr_family_type = "evpn"
# commands.append("address-family {} {}".format(addr_family, addr_family_type))
# commands.append("{} route-reflector-client".format(cfgmode))
# commands.append("exit")
# else:
# commands.append("exit")
if re.findall(r'Ethernet|Vlan|PortChannel|Eth', nbr_ip):
neigh_name = get_interface_number_from_name(nbr_ip)
commands.append("neighbor interface {} {}".format( neigh_name["type"], neigh_name["number"]))
elif addr_family == 'l2vpn' :
commands.append("neighbor {}".format(nbr_ip))
elif is_valid_ip_address(neigh_name, addr_family):
commands.append("neighbor {}".format(nbr_ip))
else:
commands.append("peer-group {}".format(nbr_ip))
if addr_family == 'l2vpn' : addr_family_type = "evpn"
commands.append("address-family {} {}".format(addr_family, addr_family_type))
commands.append("{} route-reflector-client".format(cfgmode))
commands.append("exit")
commands.append("exit")
commands.append("exit")
st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check)
return True
else:
st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
def create_bgp_next_hop_self(dut, local_asn, addr_family, nbr_ip, force='no', config='yes', cli_type="", skip_error_check=True):
"""
:param dut:
:param local_asn:
:param addr_family:
:param nbr_ip:
:param config:
:return:
"""
cli_type = get_cfg_cli_type(dut, cli_type=cli_type)
cfgmode = 'no' if config != 'yes' else ''
if cli_type == "vtysh":
command = "router bgp {}".format(local_asn)
command += "\n address-family {} {}".format(addr_family, "unicast")
command += "\n {} neighbor {} next-hop-self".format(cfgmode, nbr_ip)
if force == 'yes' :
command += " force"
'''
config_router_bgp_mode(dut, local_asn)
command = "address-family {} unicast".format(addr_family)
st.config(dut, command, type='vtysh')
if config == 'yes':
command = "neighbor {} next-hop-self".format(nbr_ip)
elif config == 'no' :
command = "no neighbor {} next-hop-self".format(nbr_ip)
else:
return False
if force == 'yes' :
command += " force"
'''
st.config(dut, command, type=cli_type)
return True
elif cli_type == "klish":
commands = list()
commands.append("router bgp {}".format(local_asn))
if is_valid_ip_address(nbr_ip, addr_family):
commands.append("{} neighbor {}".format(cfgmode, nbr_ip))
else:
commands.append("{} peer-group {}".format(cfgmode, nbr_ip))
#commands.append("address-family {} {}".format(addr_family, "unicast"))
if config == "yes":
force_cmd = "force" if force == 'yes' else ""
commands.append("address-family {} {}".format(addr_family, "unicast"))
commands.append("next-hop-self {}".format(force_cmd))
commands.append("exit")
commands.append("exit")
commands.append("exit")
st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check)
return True
else:
st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
def create_bgp_cluster_id(dut, local_asn, cluster_id, cluster_ip):
"""
:param dut:
:param local_asn:
:param cluster_id:
:param cluster_ip:
:return:
"""
#No usage in test scripts
config_router_bgp_mode(dut, local_asn)
command = "bgp cluster-id {}".format(cluster_id)
st.config(dut, command, type='vtysh')
command = "bgp cluster-id {}".format(cluster_ip)
st.config(dut, command, type='vtysh')
def create_bgp_confideration(dut, local_asn, confd_id_as, confd_peers_as):
"""
:param dut:
:param local_asn:
:param confd_id_as:
:param confd_peers_as:
:return:
"""
# No usage in test scripts
config_router_bgp_mode(dut, local_asn)
command = "bgp confideration identifier {}".format(confd_id_as)
st.config(dut, command, type='vtysh')
command = "bgp confideration peers {}".format(confd_peers_as)
st.config(dut, command, type='vtysh')
def create_bgp_dampening(dut, local_asn, half_life_time, timer_start, timer_start_supress, max_duration):
"""
:param dut:
:param local_asn:
:param half_life_time:
:param timer_start:
:param timer_start_supress:
:param max_duration:
:return:
"""
# No usage in test scripts
config_router_bgp_mode(dut, local_asn)
command = "bgp dampening {} {} {} {}".format(half_life_time, timer_start, timer_start_supress, max_duration)
st.config(dut, command, type='vtysh')
def config_bgp_default(dut, local_asn, user_command, config='yes', cli_type="", skip_error_check=True):
"""
:param dut:
:param local_asn:
:param user_command:
:return:
"""
cli_type = get_cfg_cli_type(dut, cli_type=cli_type)
cfgmode = 'no' if config != 'yes' else ''
if cli_type == "vtysh":
command = "router bgp {}".format(local_asn)
command += "\n {} bgp default {}".format(cfgmode, user_command)
'''
config_router_bgp_mode(dut, local_asn)
if config == 'yes':
command = "bgp default {}".format(user_command)
else:
command = "no bgp default {}".format(user_command)
'''
st.config(dut, command, type=cli_type, skip_error_check=skip_error_check)
return True
elif cli_type == "klish":
commands = list()
commands.append("router bgp {}".format(local_asn))
commands.append("{} default {}".format(cfgmode, user_command))
commands.append("exit")
st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check)
return True
else:
st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type))
return False
def config_bgp_always_compare_med(dut, local_asn, config='yes', cli_type=""):
"""
:param dut:
:param local_asn:
:return:
"""
cli_type = get_cfg_cli_type(dut, cli_type=cli_type)
config_router_bgp_mode(dut, local_asn, cli_type=cli_type)
if cli_type == "vtysh":
if config == | |
"Values": [
{"FrontEndValue": "Allow",
"BackEndValue": "allow"
},
{"FrontEndValue": "Deny",
"BackEndValue": "deny"
}
]
}
]
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
enable_cdp=False,
mac_register_mode="Only Native VLAN",
action_on_uplink_fail="Link Down",
mac_forging="Allow",
lldp_enable_transmit=False,
lldp_enable_receive=False
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client
)
self.enable_cdp = enable_cdp
self.mac_register_mode = mac_register_mode
self.action_on_uplink_fail = action_on_uplink_fail
self.mac_forging = mac_forging
self.lldp_enable_transmit = lldp_enable_transmit
self.lldp_enable_receive = lldp_enable_receive
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description,
"CdpEnabled": self.enable_cdp,
"LldpSettings": {
"TransmitEnabled": self.lldp_enable_transmit,
"ReceiveEnabled": self.lldp_enable_receive
}
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"('{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"{self.enable_cdp}, "
f"'{self.mac_register_mode}', "
f"'{self.action_on_uplink_fail}', "
f"'{self.mac_forging}', "
f"{self.lldp_enable_transmit}, "
f"{self.lldp_enable_receive})"
)
def ethernet_network_control_policy_maker(intersight_api_key_id,
intersight_api_key,
policy_name,
enable_cdp=False,
mac_register_mode="Only Native VLAN",
action_on_uplink_fail="Link Down",
mac_forging="Allow",
lldp_enable_transmit=False,
lldp_enable_receive=False,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None
):
"""This is a function used to make an Ethernet Network Control Policy on
Cisco Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
policy_name (str):
The name of the policy to be created.
enable_cdp (bool):
Optional; The administrative state of Cisco Discovery Protocol
(CDP) on an interface. The default value is False.
mac_register_mode (str):
Optional; The MAC address registration mode. The accepted values
are "Only Native VLAN" and "All Host VLANs". The default value is
"Only Native VLAN".
action_on_uplink_fail (str):
Optional; The state of a virtual interface if an uplink is not
pinned. The accepted values are "Link Down" or "Warning". The
"Link Down" option will take down the virtual interface if an
uplink is not pinned. The "Warning" option will leave the virtual
interface up even if there is no pinned uplink. The default value
is "Link Down".
mac_forging (str):
Optional; Determines if MAC forging is allowed or denied on an
interface. The accepted values are "Allow" and "Deny". The default
value is "Allow".
lldp_enable_transmit (bool):
Optional; The administrative state of Link Layer Discovery Protocol
(LLDP) frames being transmitted on an interface. The default value
is False.
lldp_enable_receive (bool):
Optional; The administrative state of Link Layer Discovery Protocol
(LLDP) frames being received on an interface. The default value
is False.
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create Ethernet Network Control Policy object in Intersight
builder(EthernetNetworkControlPolicy(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
enable_cdp=enable_cdp,
mac_register_mode=mac_register_mode,
action_on_uplink_fail=action_on_uplink_fail,
mac_forging=mac_forging,
lldp_enable_transmit=lldp_enable_transmit,
lldp_enable_receive=lldp_enable_receive
))
# Establish classes and functions to make Flow Control Policy
class FlowControlPolicy(UcsPolicy):
"""This class is used to configure a Flow Control Policy in Intersight.
"""
object_type = "Flow Control Policy"
intersight_api_path = "fabric/FlowControlPolicies"
object_variable_value_maps = [
{"VariableName": "priority",
"Description": "Flow Control Priority Mode",
"AttributeName": "PriorityFlowControlMode",
"Values": [
{"FrontEndValue": "Auto",
"BackEndValue": "auto"
},
{"FrontEndValue": "On",
"BackEndValue": "on"
}
]
}
]
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
priority="Auto",
receive="Disabled",
send="Disabled"
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client
)
self.priority = priority
self.receive = receive
self.send = send
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description,
"ReceiveDirection": self.receive,
"SendDirection": self.send
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.priority}', "
f"'{self.receive}', "
f"'{self.send}')"
)
def flow_control_policy_maker(intersight_api_key_id,
intersight_api_key,
policy_name,
priority="Auto",
receive="Disabled",
send="Disabled",
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None
):
"""This is a function used to make a Flow Control Policy on Cisco
Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
policy_name (str):
The name of the policy to be created.
priority (str):
Optional; The flow control priority mode. The accepted values are
"Auto" and "On". The default value is "Auto".
receive (str):
Optional; The administrative state of link-level flow control in
the receive direction. The accepted values are "Enabled" and
"Disabled". The default value is "Disabled".
send (str):
Optional; The administrative state of link-level flow control in
the send direction. The accepted values are "Enabled" and
"Disabled". The default value is "Disabled".
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create Flow Control Policy object in Intersight
builder(FlowControlPolicy(intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
priority=priority,
receive=receive,
send=send
))
# Establish classes and functions to make Link Control Policy
class LinkControlPolicy(UcsPolicy):
"""This class is used to configure a Link Control Policy in Intersight.
"""
object_type = "Link Control Policy"
intersight_api_path = "fabric/LinkControlPolicies"
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
admin_state="Enabled",
mode="Normal"
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client
)
self.admin_state = admin_state
self.mode = mode
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.admin_state}', "
f"'{self.mode}')"
)
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Reformat the provided Link Control Mode value to lowercase format for back-end Intersight API compatibility
try:
lowercase_mode = self.mode.lower()
except Exception:
print("\nA configuration error has occurred!\n")
print(f"During the configuration of the {self.object_type} named "
f"{self.policy_name}, there was an issue with the value "
"provided for the Link Control UDLD Mode settings.")
print(f"The value provided was {self.mode}.")
print("To proceed, the value provided for the Link Control UDLD "
"Mode settings should be updated to an accepted string "
"format.")
| |
import collections
from itertools import chain
from typing import Iterable, List, Mapping, MutableMapping, Optional, Tuple, Union, cast
from adam.language_specific.english.english_integrated_experiment_lexicon import (
INTEGRATED_EXPERIMENT_ENGLISH_LEXICON,
)
from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest
from attr import Factory, attrib, attrs
from attr.validators import instance_of
from immutablecollections import ImmutableSet, immutableset, immutablesetmultidict
from more_itertools import first, only
from networkx import DiGraph
from adam.axes import FacingAddresseeAxis, GRAVITATIONAL_DOWN_TO_UP_AXIS
from adam.language.dependency import (
DependencyRole,
DependencyTree,
DependencyTreeLinearizer,
DependencyTreeToken,
LinearizedDependencyTree,
)
from adam.language.dependency.universal_dependencies import (
ADJECTIVAL_MODIFIER,
ADPOSITION,
ADVERB,
ADVERBIAL_MODIFIER,
CASE_POSSESSIVE,
CASE_SPATIAL,
DETERMINER,
DETERMINER_ROLE,
INDIRECT_OBJECT,
NOMINAL_MODIFIER,
NOMINAL_MODIFIER_POSSESSIVE,
NOMINAL_SUBJECT,
NUMERAL,
NUMERIC_MODIFIER,
OBJECT,
OBLIQUE_NOMINAL,
PROPER_NOUN,
VERB,
IS_ATTRIBUTE,
OTHER,
MARKER,
ADJECTIVE,
)
from adam.language.language_generator import LanguageGenerator
from adam.language.lexicon import LexiconEntry
from adam.language.ontology_dictionary import OntologyLexicon
from adam.language_specific.english.english_phase_1_lexicon import (
GAILA_PHASE_1_ENGLISH_LEXICON,
I,
ME,
YOU,
GRAB,
SHOVE,
TOSS,
RUN,
)
from adam.language_specific.english.english_phase_2_lexicon import (
GAILA_PHASE_2_ENGLISH_LEXICON,
)
from adam.language_specific import (
FIRST_PERSON,
SECOND_PERSON,
ALLOWS_DITRANSITIVE,
MASS_NOUN,
)
from adam.language_specific.english.english_syntax import (
SIMPLE_ENGLISH_DEPENDENCY_TREE_LINEARIZER,
)
from adam.ontology import IN_REGION, IS_ADDRESSEE, IS_SPEAKER, OntologyNode
from adam.ontology.phase1_ontology import (
SIDE,
AGENT,
COLOR,
FALL,
GOAL,
GROUND,
HAS,
LEARNER,
PATIENT,
SIT,
THEME,
JUMP,
FAST,
SLOW,
BIGGER_THAN,
SMALLER_THAN,
WALK,
PASS,
PUSH,
TAKE,
HARD_FORCE,
DRINK,
)
from adam.ontology.phase1_spatial_relations import (
EXTERIOR_BUT_IN_CONTACT,
GRAVITATIONAL_DOWN,
INTERIOR,
PROXIMAL,
Region,
TOWARD,
GRAVITATIONAL_UP,
SpatialPath,
AWAY_FROM,
TO,
DISTAL,
VIA,
)
from adam.random_utils import SequenceChooser
from adam.relation import Relation
from adam.situation import Action, SituationObject, SituationRegion
from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation
@attrs(frozen=True, slots=True)
class SimpleRuleBasedEnglishLanguageGenerator(
LanguageGenerator[HighLevelSemanticsSituation, LinearizedDependencyTree]
):
r"""
A simple rule-based approach for translating `HighLevelSemanticsSituation`\ s
to English dependency trees.
We currently only generate a single possible `LinearizedDependencyTree`
for a given situation.
"""
_ontology_lexicon: OntologyLexicon = attrib(
validator=instance_of(OntologyLexicon), kw_only=True
)
"""
A mapping from nodes in our concept ontology to English words.
"""
_dependency_tree_linearizer: DependencyTreeLinearizer = attrib(
init=False, default=SIMPLE_ENGLISH_DEPENDENCY_TREE_LINEARIZER, kw_only=True
)
"""
How to assign a word order to our dependency trees.
This is hard-coded for now but may become flexible in the future.
"""
def generate_language(
self,
situation: HighLevelSemanticsSituation,
chooser: SequenceChooser, # pylint:disable=unused-argument
) -> ImmutableSet[LinearizedDependencyTree]:
return SimpleRuleBasedEnglishLanguageGenerator._Generation(
self, situation
).generate()
@attrs(frozen=True, slots=True)
class _Generation:
"""
This object encapsulates all the mutable state for an execution
of `SimpleRuleBasedEnglishLanguageGenerator` on a single input.
"""
# we need to keep this reference explicitly
# because Python doesn't have real inner classes.
generator: "SimpleRuleBasedEnglishLanguageGenerator" = attrib()
# the situation being translated to language
situation: HighLevelSemanticsSituation = attrib()
# the dependency tree we are building
dependency_graph: DiGraph = attrib(init=False, default=Factory(DiGraph))
objects_to_dependency_nodes: MutableMapping[
SituationObject, DependencyTreeToken
] = attrib(init=False, factory=dict)
"""
Don't access this directly;
instead use `_noun_for_object`
"""
object_counts: Mapping[OntologyNode, int] = attrib(init=False)
"""
These are used to determine what quantifier to use when there are multiple objects.
"""
def generate(self) -> ImmutableSet[LinearizedDependencyTree]:
try:
return self._real_generate()
except Exception as e:
raise RuntimeError(
f"Error while generating English for situation " f"{self.situation}"
) from e
def _real_generate(self) -> ImmutableSet[LinearizedDependencyTree]:
# The learner appears in a situation so they items may have spatial relations
# with respect to it, but our language currently never refers to the learner itself.
# We need to translate objects which appear in relations;
# right now we only translate persistent relations to language
# because it is unclear how to handle the others.
if len(self.situation.actions) > 1:
raise RuntimeError(
"Currently only situations with 0 or 1 actions are supported"
)
# handle the special case of a static situation with only
# multiple objects of the same type
object_types_in_situation = set(
object_.ontology_node for object_ in self.situation.salient_objects
)
action: Optional[Action[OntologyNode, SituationObject]]
if self.situation.is_dynamic:
# the situation contains an action, which we now translate.
action = only(self.situation.actions)
# mypy isn't smart enough to realized action can't be None here
self._translate_action_to_verb(action) # type: ignore
# translate any leftover objects we didn't find while translating the action
untranslated_objects = self.situation.salient_objects.difference(
self.objects_to_dependency_nodes.keys()
)
for untranslated_object in untranslated_objects:
self._noun_for_object(untranslated_object)
else:
# the situation is static (only objects and relations, no actions)
action = None
if len(object_types_in_situation) == 1:
# only one type of object is present
# e.g. three boxes
# doesn't matter which object we choose; they are all the same
first_object = first(self.situation.salient_objects)
self._noun_for_object(first_object)
else:
# multiple objects of different types
for object_ in self.situation.salient_objects:
if not self._only_translate_if_referenced(object_):
self._noun_for_object(object_)
# Always translate those relations the user specifically calls out,
# not the many "background" relations which are also true.
for persisting_relation in self.situation.always_relations:
self._translate_relation(action, persisting_relation)
return immutableset(
[
self.generator._dependency_tree_linearizer.linearize( # pylint:disable=protected-access
DependencyTree(self.dependency_graph)
)
]
)
def _noun_for_object(
self,
_object: SituationObject,
*,
syntactic_role_if_known: Optional[DependencyRole] = None,
) -> DependencyTreeToken:
if _object in self.objects_to_dependency_nodes:
return self.objects_to_dependency_nodes[_object]
count = self.object_counts[_object.ontology_node]
# Eventually we will need to ensure that pluralized objects are
# separated by their respective relations and actions
# (e.g. in a situation where one box is on a table and one is below it,
# don't output "two boxes")
# https://github.com/isi-vista/adam/issues/129
# Check if the situation object is the speaker
if IS_SPEAKER in _object.properties:
if syntactic_role_if_known == NOMINAL_SUBJECT:
noun_lexicon_entry = I
# For when HAS (which is a RELATION) is the verb and the speaker is the subject.
# (This Special case is needed because HAS is a RELATION and not an ACTION in the
# ontology, so HAS is never recognized as the NOMINAL_SUBJECT as this
# determination normally occurs in translate_action_to_verb(), which only
# processes ACTIONs)
elif any(
relation
for relation in self.situation.always_relations
if (
relation.relation_type == HAS
and any(
property_ in relation.first_slot.properties
for property_ in [IS_SPEAKER]
)
and not self.situation.actions
)
):
noun_lexicon_entry = I
else:
noun_lexicon_entry = ME
elif IS_ADDRESSEE in _object.properties:
# you works for both nominative and accusative
noun_lexicon_entry = YOU
else:
noun_lexicon_entry = self._unique_lexicon_entry(
_object.ontology_node # pylint:disable=protected-access
)
word_form = noun_lexicon_entry.base_form
dependency_node = DependencyTreeToken(
word_form,
noun_lexicon_entry.part_of_speech,
morphosyntactic_properties=noun_lexicon_entry.intrinsic_morphosyntactic_properties,
)
self.dependency_graph.add_node(dependency_node)
self.add_determiner(
_object, count, dependency_node, noun_lexicon_entry=noun_lexicon_entry
)
# IF plural, add a separate +s marker token
if (
count > 1
and noun_lexicon_entry.plural_form
and len(self.object_counts) == 1
):
# If plural
plural_marker_node = DependencyTreeToken("s", OTHER) # PoS tag
self.dependency_graph.add_node(plural_marker_node)
self.dependency_graph.add_edge(
plural_marker_node, dependency_node, role=MARKER
)
if ATTRIBUTES_AS_X_IS_Y in self.situation.syntax_hints:
self._translate_attribute_as_verb(_object, dependency_node)
else:
self._add_attributes(_object, dependency_node)
self.objects_to_dependency_nodes[_object] = dependency_node
return dependency_node
def _only_translate_if_referenced(self, object_: SituationObject) -> bool:
"""
Some special objects in the situation,
like the ground, the speaker, and the addressee,
should only be translated if referenced by an action or relation.
"""
return (
object_.ontology_node == GROUND
or object_.ontology_node == LEARNER
or IS_SPEAKER in object_.properties
or IS_ADDRESSEE in object_.properties
)
def add_determiner(
self,
_object: SituationObject,
count: int,
noun_dependency_node: DependencyTreeToken,
*,
noun_lexicon_entry: LexiconEntry,
) -> None:
# not "the Mom"
if (
noun_dependency_node.part_of_speech == PROPER_NOUN
# not "a sand"
or MASS_NOUN in noun_lexicon_entry.properties
# not "a you"
or noun_lexicon_entry in (I, YOU, ME)
):
return
possession_relations = [
relation
for relation in self.situation.always_relations
if relation.relation_type == HAS and relation.second_slot == _object
]
if len(possession_relations) > 1:
raise RuntimeError("Can not handle multiple possession relations")
else:
# e.g. it's always "the ground"
if _object.ontology_node in ALWAYS_USE_THE_OBJECTS:
determiner_node = DependencyTreeToken("the", DETERMINER)
determiner_role = DETERMINER_ROLE
# If speaker possesses the noun
elif (
len(possession_relations) == 1
and IS_SPEAKER in possession_relations[0].first_slot.properties
):
determiner_node = DependencyTreeToken("my", DETERMINER)
determiner_role = NOMINAL_MODIFIER_POSSESSIVE
# If addressee possess the noun
elif (
len(possession_relations) == 1
and IS_ADDRESSEE in possession_relations[0].first_slot.properties
):
determiner_node = DependencyTreeToken("your", DETERMINER)
determiner_role = DETERMINER_ROLE
# add 's if a non-agent possess the noun
elif (
len(possession_relations) == 1
and self.situation.is_dynamic
and possession_relations[0].first_slot
not in only(self.situation.actions).argument_roles_to_fillers[AGENT]
):
determiner_node = self._noun_for_object(
possession_relations[0].first_slot
)
determiner_role = DETERMINER_ROLE
case_node = DependencyTreeToken("'s", DETERMINER)
case_role = CASE_POSSESSIVE
self.dependency_graph.add_edge(
case_node, determiner_node, role=case_role
)
# only add one of the following quantifiers in situations with
# multiples of the same object
elif len(self.object_counts) == 1 and count > 1:
if count == 2:
determiner_node = DependencyTreeToken("two", NUMERAL)
determiner_role = NUMERIC_MODIFIER
# Currently, any number of objects greater than two is considered "many"
else:
determiner_node = DependencyTreeToken("many", DETERMINER)
determiner_role = DETERMINER_ROLE
# otherwise do the normal determiner behavior
else:
determiner_node = DependencyTreeToken("a", DETERMINER)
determiner_role = DETERMINER_ROLE
self.dependency_graph.add_edge(
determiner_node, noun_dependency_node, role=determiner_role
)
def _add_attributes(
self, _object: SituationObject, noun_dependency_node: DependencyTreeToken
) -> None:
if IGNORE_COLORS not in self.situation.syntax_hints:
# Begin work on translating modifiers of Nouns with Color
for property_ in _object.properties:
if self.situation.ontology.is_subtype_of(property_, COLOR):
color_lexicon_entry = self._unique_lexicon_entry(property_)
color_node = DependencyTreeToken(
color_lexicon_entry.base_form,
color_lexicon_entry.part_of_speech,
color_lexicon_entry.intrinsic_morphosyntactic_properties,
)
self.dependency_graph.add_edge(
color_node, noun_dependency_node, role=ADJECTIVAL_MODIFIER
| |
181,
387,
1075,
3921,
731,
2187,
3335, # 4806
7544,
3265,
310,
313,
3435,
2299,
770,
4134,
54,
3034,
189,
4397,
3082,
3769,
3922,
7545, # 4822
1230,
1617,
1849,
355,
3542,
4135,
4398,
3336,
111,
4136,
3650,
1350,
3135,
3436,
3035,
4137, # 4838
2149,
3266,
3543,
7546,
2784,
3923,
3924,
2991,
722,
2008,
7547,
1071,
247,
1207,
2338,
2471, # 4854
1378,
4399,
2009,
864,
1437,
1214,
4400,
373,
3770,
1142,
2216,
667,
4401,
442,
2753,
2555, # 4870
3771,
3925,
1968,
4138,
3267,
1839,
837,
170,
1107,
934,
1336,
1882,
7548,
7549,
2118,
4139, # 4886
2828,
743,
1569,
7550,
4402,
4140,
582,
2384,
1418,
3437,
7551,
1802,
7552,
357,
1395,
1729, # 4902
3651,
3268,
2418,
1564,
2237,
7553,
3083,
3772,
1633,
4403,
1114,
2085,
4141,
1532,
7554,
482, # 4918
2446,
4404,
7555,
7556,
1492,
833,
1466,
7557,
2717,
3544,
1641,
2829,
7558,
1526,
1272,
3652, # 4934
4142,
1686,
1794,
416,
2556,
1902,
1953,
1803,
7559,
3773,
2785,
3774,
1159,
2316,
7560,
2867, # 4950
4405,
1610,
1584,
3036,
2419,
2754,
443,
3269,
1163,
3136,
7561,
7562,
3926,
7563,
4143,
2499, # 4966
3037,
4406,
3927,
3137,
2103,
1647,
3545,
2010,
1872,
4144,
7564,
4145,
431,
3438,
7565,
250, # 4982
97,
81,
4146,
7566,
1648,
1850,
1558,
160,
848,
7567,
866,
740,
1694,
7568,
2201,
2830, # 4998
3195,
4147,
4407,
3653,
1687,
950,
2472,
426,
469,
3196,
3654,
3655,
3928,
7569,
7570,
1188, # 5014
424,
1995,
861,
3546,
4148,
3775,
2202,
2685,
168,
1235,
3547,
4149,
7571,
2086,
1674,
4408, # 5030
3337,
3270,
220,
2557,
1009,
7572,
3776,
670,
2992,
332,
1208,
717,
7573,
7574,
3548,
2447, # 5046
3929,
3338,
7575,
513,
7576,
1209,
2868,
3339,
3138,
4409,
1080,
7577,
7578,
7579,
7580,
2527, # 5062
3656,
3549,
815,
1587,
3930,
3931,
7581,
3550,
3439,
3777,
1254,
4410,
1328,
3038,
1390,
3932, # 5078
1741,
3933,
3778,
3934,
7582,
236,
3779,
2448,
3271,
7583,
7584,
3657,
3780,
1273,
3781,
4411, # 5094
7585,
308,
7586,
4412,
245,
4413,
1851,
2473,
1307,
2575,
430,
715,
2136,
2449,
7587,
270, # 5110
199,
2869,
3935,
7588,
3551,
2718,
1753,
761,
1754,
725,
1661,
1840,
4414,
3440,
3658,
7589, # 5126
7590,
587,
14,
3272,
227,
2598,
326,
480,
2265,
943,
2755,
3552,
291,
650,
1883,
7591, # 5142
1702,
1226,
102,
1547,
62,
3441,
904,
4415,
3442,
1164,
4150,
7592,
7593,
1224,
1548,
2756, # 5158
391,
498,
1493,
7594,
1386,
1419,
7595,
2055,
1177,
4416,
813,
880,
1081,
2363,
566,
1145, # 5174
4417,
2286,
1001,
1035,
2558,
2599,
2238,
394,
1286,
7596,
7597,
2068,
7598,
86,
1494,
1730, # 5190
3936,
491,
1588,
745,
897,
2948,
843,
3340,
3937,
2757,
2870,
3273,
1768,
998,
2217,
2069, # 5206
397,
1826,
1195,
1969,
3659,
2993,
3341,
284,
7599,
3782,
2500,
2137,
2119,
1903,
7600,
3938, # 5222
2150,
3939,
4151,
1036,
3443,
1904,
114,
2559,
4152,
209,
1527,
7601,
7602,
2949,
2831,
2625, # 5238
2385,
2719,
3139,
812,
2560,
7603,
3274,
7604,
1559,
737,
1884,
3660,
1210,
885,
28,
2686, # 5254
3553,
3783,
7605,
4153,
1004,
1779,
4418,
7606,
346,
1981,
2218,
2687,
4419,
3784,
1742,
797, # 5270
1642,
3940,
1933,
1072,
1384,
2151,
896,
3941,
3275,
3661,
3197,
2871,
3554,
7607,
2561,
1958, # 5286
4420,
2450,
1785,
7608,
7609,
7610,
3942,
4154,
1005,
1308,
3662,
4155,
2720,
4421,
4422,
1528, # 5302
2600,
161,
1178,
4156,
1982,
987,
4423,
1101,
4157,
631,
3943,
1157,
3198,
2420,
1343,
1241, # 5318
1016,
2239,
2562,
372,
877,
2339,
2501,
1160,
555,
1934,
911,
3944,
7611,
466,
1170,
169, # 5334
1051,
2907,
2688,
3663,
2474,
2994,
1182,
2011,
2563,
1251,
2626,
7612,
992,
2340,
3444,
1540, # 5350
2721,
1201,
2070,
2401,
1996,
2475,
7613,
4424,
528,
1922,
2188,
1503,
1873,
1570,
2364,
3342, # 5366
3276,
7614,
557,
1073,
7615,
1827,
3445,
2087,
2266,
3140,
3039,
3084,
767,
3085,
2786,
4425, # 5382
1006,
4158,
4426,
2341,
1267,
2176,
3664,
3199,
778,
3945,
3200,
2722,
1597,
2657,
7616,
4427, # 5398
7617,
3446,
7618,
7619,
7620,
3277,
2689,
1433,
3278,
131,
95,
1504,
3946,
723,
4159,
3141, # 5414
1841,
3555,
2758,
2189,
3947,
2027,
2104,
3665,
7621,
2995,
3948,
1218,
7622,
3343,
3201,
3949, # 5430
4160,
2576,
248,
1634,
3785,
912,
7623,
2832,
3666,
3040,
3786,
654,
53,
7624,
2996,
7625, # 5446
1688,
4428,
777,
3447,
1032,
3950,
1425,
7626,
191,
820,
2120,
2833,
971,
4429,
931,
3202, # 5462
135,
664,
783,
3787,
1997,
772,
2908,
1935,
3951,
3788,
4430,
2909,
3203,
282,
2723,
640, # 5478
1372,
3448,
1127,
922,
325,
3344,
7627,
7628,
711,
2044,
7629,
7630,
3952,
2219,
2787,
1936, # 5494
3953,
3345,
2220,
2251,
3789,
2300,
7631,
4431,
3790,
1258,
3279,
3954,
3204,
2138,
2950,
3955, # 5510
3956,
7632,
2221,
258,
3205,
4432,
101,
1227,
7633,
3280,
1755,
7634,
1391,
3281,
7635,
2910, # 5526
2056,
893,
7636,
7637,
7638,
1402,
4161,
2342,
7639,
7640,
3206,
3556,
7641,
7642,
878,
1325, # 5542
1780,
2788,
4433,
259,
1385,
2577,
744,
1183,
2267,
4434,
7643,
3957,
2502,
7644,
684,
1024, # 5558
4162,
7645,
472,
3557,
3449,
1165,
3282,
3958,
3959,
322,
2152,
881,
455,
1695,
1152,
1340, # 5574
660,
554,
2153,
4435,
1058,
4436,
4163,
830,
1065,
3346,
3960,
4437,
1923,
7646,
1703,
1918, # 5590
7647,
932,
2268,
122,
7648,
4438,
947,
677,
7649,
3791,
2627,
297,
1905,
1924,
2269,
4439, # 5606
2317,
3283,
7650,
7651,
4164,
7652,
4165,
84,
4166,
112,
989,
7653,
547,
1059,
3961,
701, # 5622
3558,
1019,
7654,
4167,
7655,
3450,
942,
639,
457,
2301,
2451,
993,
2951,
407,
851,
494, # 5638
4440,
3347,
927,
7656,
1237,
7657,
2421,
3348,
573,
4168,
680,
921,
2911,
1279,
1874,
285, # 5654
790,
1448,
1983,
719,
2167,
7658,
7659,
4441,
3962,
3963,
1649,
7660,
1541,
563,
7661,
1077, # 5670
7662,
3349,
3041,
3451,
511,
2997,
3964,
3965,
3667,
3966,
1268,
2564,
3350,
3207,
4442,
4443, # 5686
7663,
535,
1048,
1276,
1189,
2912,
2028,
3142,
1438,
1373,
2834,
2952,
1134,
2012,
7664,
4169, # 5702
1238,
2578,
3086,
1259,
7665,
700,
7666,
2953,
3143,
3668,
4170,
7667,
4171,
1146,
1875,
1906, # 5718
4444,
2601,
3967,
781,
2422,
132,
1589,
203,
147,
273,
2789,
2402,
898,
1786,
2154,
3968, # 5734
3969,
7668,
3792,
2790,
7669,
7670,
4445,
4446,
7671,
3208,
7672,
1635,
3793,
965,
7673,
1804, # 5750
2690,
1516,
3559,
1121,
1082,
1329,
3284,
3970,
1449,
3794,
65,
1128,
2835,
2913,
2759,
1590, # 5766
3795,
7674,
7675,
12,
2658,
45,
976,
2579,
3144,
4447,
517,
2528,
1013,
1037,
3209,
7676, # 5782
3796,
2836,
7677,
3797,
7678,
3452,
7679,
2602,
614,
1998,
2318,
3798,
3087,
2724,
2628,
7680, # 5798
2580,
4172,
599,
1269,
7681,
1810,
3669,
7682,
2691,
3088,
759,
1060,
489,
1805,
3351,
3285, # 5814
1358,
7683,
7684,
2386,
1387,
1215,
2629,
2252,
490,
7685,
7686,
4173,
1759,
2387,
2343,
7687, # 5830
4448,
3799,
1907,
3971,
2630,
1806,
3210,
4449,
3453,
3286,
2760,
2344,
874,
7688,
7689,
3454, # 5846
3670,
1858,
91,
2914,
3671,
3042,
3800,
4450,
7690,
3145,
3972,
2659,
7691,
3455,
1202,
1403, # 5862
3801,
2954,
2529,
1517,
2503,
4451,
3456,
2504,
7692,
4452,
7693,
2692,
1885,
1495,
1731,
3973, # 5878
2365,
4453,
7694,
2029,
7695,
7696,
3974,
2693,
1216,
237,
2581,
4174,
2319,
3975,
3802,
4454, # 5894
4455,
2694,
3560,
3457,
445,
4456,
7697,
7698,
7699,
7700,
2761,
61,
3976,
3672,
1822,
3977, # 5910
7701,
687,
2045,
935,
925,
405,
2660,
703,
1096,
1859,
2725,
4457,
3978,
1876,
1367,
2695, # 5926
3352,
918,
2105,
1781,
2476,
334,
3287,
1611,
1093,
4458,
564,
| |
<gh_stars>10-100
#!/usr/local/bin/python
'''
Training a HyperTree network on CoSTAR Block Stacking Dataset
and cornell grasping dataset for detecting grasping positions.
Author: <NAME>
Apache License 2.0 https://www.apache.org/licenses/LICENSE-2.0
Small Portions of Cornell Dataset Code Based on:
https://github.com/tnikolla/robot-grasp-detection
'''
import os
import re
import errno
import sys
import json
import csv
import argparse
import os.path
import glob
import datetime
import tensorflow as tf
import numpy as np
import six
import random
from shapely.geometry import Polygon
import cornell_grasp_dataset_reader
from block_stacking_reader import CostarBlockStackingSequence
from block_stacking_reader import block_stacking_generator
import time
from tensorflow.python.platform import flags
# TODO(ahundt) consider removing this dependency
import grasp_visualization
# progress bars https://github.com/tqdm/tqdm
# import tqdm without enforcing it as a dependency
try:
from tqdm import tqdm
except ImportError:
def tqdm(*args, **kwargs):
if args:
return args[0]
return kwargs.get('iterable', None)
from keras import backend as K
import keras
import keras_contrib
import keras_tqdm
from keras.layers import Input, Dense, Concatenate
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.models import Model
from keras.models import model_from_json
from hypertree_model import concat_images_with_tiled_vector_layer
from hypertree_model import top_block
from hypertree_model import create_tree_roots
from hypertree_model import choose_hypertree_model
from cornell_grasp_dataset_reader import parse_and_preprocess
from callbacks import EvaluateInputGenerator
from callbacks import PrintLogsCallback
from callbacks import FineTuningCallback
from callbacks import SlowModelStopping
from callbacks import InaccurateModelStopping
from keras.utils import OrderedEnqueuer
import grasp_loss
import hypertree_pose_metrics
import hypertree_utilities
flags.DEFINE_float(
'learning_rate',
0.02,
'Initial learning rate.'
)
flags.DEFINE_float(
'random_augmentation',
0.25,
'Frequency from 0.0 to 1.0 with which random augmentation is performed. Currently for block stacking dataset only.'
)
flags.DEFINE_float(
'fine_tuning_learning_rate',
0.001,
'Initial learning rate, this is the learning rate used if load_weights is passed.'
)
flags.DEFINE_integer(
'fine_tuning_epochs',
100,
'Number of epochs to run trainer with all weights marked as trainable.'
)
flags.DEFINE_integer(
'epochs',
200,
'Number of epochs to run trainer.'
)
flags.DEFINE_integer(
'initial_epoch',
0,
'the epoch from which you should start counting, use when loading existing weights.'
)
flags.DEFINE_integer(
'batch_size',
16,
'Batch size.'
)
flags.DEFINE_string(
'log_dir',
'./logs_cornell/',
'Directory for tensorboard, model layout, model weight, csv, and hyperparam files'
)
flags.DEFINE_string(
'run_name',
'',
'A string that will become part of the logged directories and filenames.'
)
flags.DEFINE_integer(
'num_folds',
5,
'Total number of folds, how many times should the data be split between training and validation'
)
flags.DEFINE_integer(
'num_train',
8,
'Number of files used for training in one fold, '
'must be less than the number of tfrecord files, aka splits.'
)
flags.DEFINE_integer(
'num_validation',
2,
'Number of tfrecord files for validation.'
'must be less than the number of tfrecord files, aka splits.'
'This number also automatically determines the number of folds '
'when running when the pipeline_stage flag includes k_fold.'
)
flags.DEFINE_integer(
'num_test',
0,
'num of fold used for the test dataset'
'must be less than the number of tfrecord files, aka splits.'
'This must be 0 when the pipeline_stage flag includes k_fold'
)
flags.DEFINE_string('load_weights', None,
"""Path to hdf5 file containing model weights to load and continue training.""")
flags.DEFINE_string('load_hyperparams', None,
"""Load hyperparams from a json file.""")
flags.DEFINE_string('pipeline_stage', 'train_test',
"""Choose to "train", "test", "train_test", or "train_test_kfold" with the grasp_dataset
data for training and grasp_dataset_test for testing.""")
flags.DEFINE_integer(
'override_train_steps',
None,
'TODO(ahundt) REMOVE THIS HACK TO SKIP TRAINING BUT KEEP USING CALLBACKS.'
)
flags.DEFINE_string(
'split_dataset', 'objectwise',
"""Options are imagewise and objectwise, this is the type of split chosen when the tfrecords were generated.""")
flags.DEFINE_string('tfrecord_filename_base', 'cornell-grasping-dataset', 'base of the filename used for the cornell dataset tfrecords and csv files')
flags.DEFINE_string('costar_filename_base', 'costar_block_stacking_v0.4_success_only',
'base of the filename used for the costar block stacking dataset txt file containing the list of files to load for train val test, '
'specifying None or empty string will generate a new file list from the files in FLAGS.data_dir.'
'Options: costar_block_stacking_v0.4_success_only, costar_combined_block_plush_stacking_v0.4_success_only')
flags.DEFINE_string(
'feature_combo', 'image_preprocessed_norm_sin2_cos2_width_3',
"""
feature_combo: The name for the combination of input features being utilized.
Options include 'image_preprocessed', image_preprocessed_width_1,
'image_preprocessed_norm_sin2_cos2_width_3'
See choose_features_and_metrics() for details.
"""
)
flags.DEFINE_string(
'problem_type', 'grasp_classification',
"""Choose between different formulations of the grasping task.
Problem type options are 'segmentation', 'classification',
'image_center_grasp_regression',
'grasp_regression' which tries to predict successful grasp bounding boxes,
'grasp_classification'
'grasp_segmentation' which tries to classify input grasp parameters at each pixel.
'pixelwise_grasp_regression' which tries to predict successful grasp bounding boxes at each pixel.
"""
)
flags.DEFINE_boolean(
'fine_tuning', False,
""" If true the model will be fine tuned the entire training run.
This means that any imagenet weights will be made trainable,
and the learning rate will be set to fine_tuning_learning_rate.
""")
flags.DEFINE_string(
'kfold_params', None,
""" Load the json file containing parameters from a kfold cross validation run.
"""
)
flags.DEFINE_string(
'dataset_name',
'cornell_grasping',
'Configure training run for a specific dataset.'
' Options are: cornell_grasping and costar_block_stacking.'
)
flags.DEFINE_string(
'learning_rate_schedule',
'reduce_lr_on_plateau',
"""Options are: reduce_lr_on_plateau, triangular, triangular2, exp_range, none.
For details see the keras callback ReduceLROnPlateau and the
keras_contrib callback CyclicLR. With triangular, triangular2,
and exp_range the maximum learning rate
will be double the input learning rate you specify
so that the average initial learning rate is as specified..
"""
)
FLAGS = flags.FLAGS
def save_user_flags(save_filename, line_limit=80, verbose=1):
""" print and save the tf FLAGS
based on https://github.com/melodyguan/enas
"""
if verbose > 0:
print("-" * 80)
flags_dict = FLAGS.flag_values_dict()
for flag_name, flag_value in six.iteritems(flags_dict):
value = "{}".format(getattr(FLAGS, flag_name))
flags_dict[flag_name] = value
log_string = flag_name
log_string += "." * (line_limit - len(flag_name) - len(value))
log_string += value
if save_filename is not None:
with open(save_filename, 'w') as fp:
# save out all flags params so they can be reloaded in the future
json.dump(flags_dict, fp)
if verbose > 0:
print(log_string)
class GraspJaccardEvaluateCallback(keras.callbacks.Callback):
""" Validate a model which needs custom numpy metrics during training.
Note that this may have bugs due to issues when multiple tf sessions are created.
Therefore, this may be deleted in the future.
#TODO(ahundt) replace when https://github.com/keras-team/keras/pull/9105 is available
# Arguments
model: Keras model on which to call model.evaluate().
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
"""
def __init__(self, filenames=None, example_generator=None, steps=None, metrics_prefix='val', verbose=1):
# parameter of callbacks passed during initialization
# pass evalation mode directly
super(GraspJaccardEvaluateCallback, self).__init__()
print('filenames: ' + str(filenames))
print('generator: ' + str(example_generator))
self.num_steps = steps
self.verbose = verbose
self.metrics_prefix = metrics_prefix
self.filenames = filenames
self.example_generator = example_generator
def on_epoch_end(self, epoch, logs={}):
# results = self.model.evaluate_generator(self.generator, steps=int(self.num_steps))
metrics_str = '\n'
metric_name = self.metrics_prefix + '_grasp_jaccard'
# all our results come together in this call
# TODO(ahundt) VAL_ON_TRAIN_TEMP_REMOVEME
results = evaluate(self.model, example_generator=self.example_generator, val_filenames=self.filenames, visualize=True)
for name, result in results:
metric_name = self.metrics_prefix + '_' + name
logs[metric_name] = result
if self.verbose > 0:
metrics_str = metrics_str + metric_name + ': ' + str(result) + ' '
if self.verbose > 0:
print(metrics_str)
def run_training(
learning_rate=None,
batch_size=None,
num_gpus=1,
top='classification',
epochs=None,
preprocessing_mode=None,
train_data=None,
validation_data=None,
train_filenames=None,
train_size=None,
val_filenames=None,
val_size=None,
test_filenames=None,
test_size=None,
feature_combo_name=None,
problem_name=None,
image_model_name='vgg',
optimizer_name='sgd',
log_dir=None,
hyperparams=None,
load_weights=None,
pipeline=None,
run_name=None,
fine_tuning_learning_rate=None,
fine_tuning=None,
fine_tuning_epochs=None,
loss=None,
checkpoint=True,
dataset_name=None,
should_initialize=False,
hyperparameters_filename=None,
initial_epoch=None,
**kwargs):
"""
top: options are 'segmentation' and 'classification'.
problem_name: options are 'grasp_regression', 'grasp_classification',
'pixelwise_grasp_regression', 'pixelwise_grasp_classification',
'image_center_grasp_regression'. Image center grasp regression is
a pretraining step for pixel
Make sure this is properly coordinated with 'top' param.
feature_combo_name: The name for the combination of input features being utilized.
Options include 'image_preprocessed', image_preprocessed_width_1,
image_preprocessed_sin2_cos2_width_3
'grasp_regression', image_center_grasp_regression.
See choose_features_and_metrics() for details.
hyperparams: a dictionary of hyperparameter selections made for this training run.
If provided these values will simply be dumped to a file and
not utilized in any other way.
checkpoint: if True, checkpoints will be save, if false they will not.
should_initialize: Workaround for some combined tf/keras bug (Maybe fixed in tf 1.8?)
see https://github.com/keras-team/keras/issues/4875#issuecomment-313166165,
TODO(ahundt) remove should_initialize and the corresponding code below if it has been False for a while without issue.
hyperparameters_filename: Write a file '*source_hyperparameters_filename.txt' to a txt file with a path to baseline hyperparams
on which this training run is based. The file will not be loaded, only the filename will be copied for
purposes of tracing where models were generated from, such as if they are the product of hyperparmeter optimization.
Specify the actual hyperparams using the argument "hyperparams".
"""
if epochs is None:
epochs = FLAGS.epochs
if batch_size is None:
batch_size = FLAGS.batch_size
if learning_rate is None:
learning_rate = FLAGS.learning_rate
if log_dir is None:
log_dir = FLAGS.log_dir
if load_weights is None:
load_weights = FLAGS.load_weights
if pipeline is None:
pipeline = FLAGS.pipeline_stage
if problem_name is None:
problem_name = FLAGS.problem_type
if run_name is None:
run_name = FLAGS.run_name
if fine_tuning_learning_rate is None:
fine_tuning_learning_rate = FLAGS.fine_tuning_learning_rate
if fine_tuning is None:
fine_tuning = FLAGS.fine_tuning
if fine_tuning_epochs is None:
fine_tuning_epochs = FLAGS.fine_tuning_epochs
if feature_combo_name is None:
| |
to get here is if the search got ENOENT every time.
raise msg
#This is a hack for running duplication on a machine without trusted
# status. We allow for all types of PNFS filesystems to be used.
if intf and getattr(intf, "make_failed_copies", None):
use_path_type = enstore_constants.BOTH
else:
use_path_type = enstore_constants.FS
src = None
try:
src = __find_chimeraid_path(pnfs_id, bfid1,
file_record = file_record,
path_type = use_path_type)
except (KeyboardInterrupt, SystemExit):
raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
except (OSError, IOError):
exc_type, exc_value, exc_tb = sys.exc_info()
if exc_value.args[0] in [errno.EEXIST, errno.ENOENT]:
try:
if bfid2:
# If the migration is interrupted part way through the swap,
# we need to check if the other bfid is current in layer 1.
src = __find_chimeraid_path(pnfs_id, bfid2,
file_record = alt_file_record,
path_type = use_path_type)
except (KeyboardInterrupt, SystemExit):
raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
except (OSError, IOError):
pass
except:
pass
if not src:
# Don't fill the log file when the situation is known.
#Trace.handle_error(exc_type, exc_value, exc_tb, severity=99)
pass
del exc_tb #avoid resource leaks
if not src:
raise exc_type, exc_value, sys.exc_info()[2]
return src
def File(path):
# get all pnfs metadata
if do_seteuid:
file_utils.match_euid_egid(path)
else:
file_utils.acquire_lock_euid_egid()
#If another thread doesn't use "reset_ids_back = True" then
# be sure that the euid and egid are for roots, which it what the
# rest of this function assumes the euid and egid are set to.
file_utils.set_euid_egid(0, 0)
try:
p_File = chimera.File(path)
finally:
if do_seteuid:
file_utils.end_euid_egid(reset_ids_back = True)
else:
file_utils.release_lock_euid_egid()
return p_File
def update_layers(pnfs_File):
# update file's layers 1 and 4
if do_seteuid:
file_utils.match_euid_egid(pnfs_File.path)
else:
file_utils.acquire_lock_euid_egid()
#If another thread doesn't use "reset_ids_back = True" then
# be sure that the euid and egid are for roots, which it what the
# rest of this function assumes the euid and egid are set to.
file_utils.set_euid_egid(0, 0)
try:
pnfs_File.update() # update layers 1 and 4
finally:
if do_seteuid:
file_utils.end_euid_egid(reset_ids_back = True)
else:
file_utils.release_lock_euid_egid()
def change_pkg_name(file_old,file_new,volume):
# change file name in layer 4 of name space
if do_seteuid:
file_utils.match_euid_egid(file_new)
try:
_change_pkg_name(file_old,file_new,volume)
finally:
file_utils.end_euid_egid(reset_ids_back = True)
else:
file_utils.acquire_lock_euid_egid()
#If another thread doesn't use "reset_ids_back = True" then
# be sure that the euid and egid are for roots, which it what the
# rest of this function assumes the euid and egid are set to.
file_utils.set_euid_egid(0, 0)
try:
_change_pkg_name(file_old,file_new,volume)
finally:
file_utils.release_lock_euid_egid()
def _change_pkg_name(file_old,file_new,volume):
# change file name in layer 4 of name space
try:
sfs = namespace.StorageFS(file_new)
xrefs = sfs.get_xreference(file_new)
if volume:
xrefs[0] = volume
xrefs[4] = file_new
new_xrefs = sfs.set_xreference(*xrefs)
except:
# placeholder for debugging
raise
###############################################################################
#function: is a string name of the function to call using apply()
#arg_list: is the argument list to pass to the function using apply()
#my_task: overrides the default step name for logging errors
#on_exception: 2-tuple consisting of function and arugument list to execute
# if function throws an exception.
def run_in_process(function, arg_list, my_task = "RUN_IN_PROCESS",
on_exception = None):
global pid_list
global errors
global log_f
try:
pid = os.fork()
except OSError, msg:
error_log("fork() failed: %s\n" % (str(msg),))
return
if pid > 0: #parent
#Add this to the list.
pid_list.append(pid)
else: #child
#Clear the list of the parent's other childern. They are
# not the childern of this current process.
pid_list = []
#Also, clear the error count for the child.
errors = 0
try:
if debug:
print "Starting %s." % (str(function),)
apply(function, arg_list)
res = errors
if debug:
print "Started %s." % (str(function),)
except (KeyboardInterrupt, SystemExit):
res = 1
except:
res = 1
exc, msg, tb = sys.exc_info()
Trace.handle_error(exc, msg, tb)
del tb #Avoid cyclic references.
error_log(my_task, str(exc), str(msg))
#Execute this function only if an exception occurs.
if type(on_exception) == types.TupleType \
and len(on_exception) == 2:
try:
apply(on_exception[0], on_exception[1])
except:
message = "exception handler: %s: %s"\
% (sys.exc_info()[0],
sys.exc_info()[1])
Trace.log(e_errors.ERROR, message)
try:
#Make an attempt to tell the parent
# process to stop.
parent_pid = os.getppid()
if parent_pid > 1:
os.kill(parent_pid, signal.SIGTERM)
except:
pass
#Try and force pending output to go where it needs to go.
Trace.flush_and_sync(sys.stdout)
Trace.flush_and_sync(sys.stderr)
Trace.flush_and_sync(log_f)
os._exit(res) #child exit
def wait_for_process(kill = False):
global pid_list
#If we want them to die right now, send the signal.
if kill:
os.kill(pid_list[0], signal.SIGTERM)
#We need to wait for a process to finsish.
done_pid, done_exit_status = os.wait()
#Remove the pid from the list of active pids.
try:
pid_list.remove(done_pid)
except (IndexError), msg:
try:
sys.stderr.write("%s\n" % (msg,))
sys.stderr.flush()
except IOError:
pass
return done_exit_status
def wait_for_processes(kill = False):
global pid_list
rtn = 0
while len(pid_list) > 0:
rtn = rtn + wait_for_process(kill)
return rtn
def __run_in_thread(function, on_exception, arg_list):
try:
apply(function, arg_list)
except:
Trace.handle_error()
error_log(threading.currentThread().getName(),
"UNHANDLED EXCEPTION", str(sys.exc_info()[1]))
#Execute this function only if an exception occurs.
if type(on_exception) == types.TupleType \
and len(on_exception) == 2:
try:
apply(on_exception[0], on_exception[1])
except:
message = "exception handler: %s: %s"\
% (sys.exc_info()[0],
sys.exc_info()[1])
Trace.log(e_errors.ERROR, message)
#Try and force pending output to go where it needs to go.
Trace.flush_and_sync(sys.stdout)
Trace.flush_and_sync(sys.stderr)
Trace.flush_and_sync(log_f)
try:
#Make an attempt to tell the entire process to stop.
pid = os.getpid()
if pid > 1:
os.kill(pid, signal.SIGTERM)
except:
pass
raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
#function: is a string name of the function to call using apply()
#arg_list: is the argument list to pass to the function using apply()
#my_task: overrides the default step name for logging errors
#on_exception: 2-tuple consisting of function and arugument list to execute
# if function throws an exception.
def run_in_thread(function, arg_list, my_task = "RUN_IN_THREAD",
on_exception = None):
global tid_list
# start a thread
if debug:
print "Starting %s." % (str(function),)
try:
#We use ThreadWithResult wrapper around thread.Thread() for its ability
# to tell you if it is ready to be joined; and not for the exit
# status of the thread, since migration threads handle errors on
# their own.
tid = ThreadWithResult(target=__run_in_thread, name=my_task,
args=(function, on_exception, arg_list))
tid_list.append(tid) #append to the list of thread ids.
tid.start()
except (KeyboardInterrupt, SystemExit):
pass
except:
exc, msg = sys.exc_info()[:2]
error_log(my_task, "start_new_thread() failed: %s: %s\n" \
% (str(exc), str(msg)))
if debug:
print "Started %s [%s]." % (str(function), str(tid))
def wait_for_thread():
global tid_list
rtn = 0
while len(tid_list) > 0:
for i in range(len(tid_list)):
#If we blindly go into the join(), we will be stuck waiting
# for the thread to finish. The problem with this, is that
# the python thread join() function blocks signals like SIGINT
# (Ctrl-C) allowing the other threads in the program to continue
# to run.
if tid_list[i].is_joinable:
#We should only be trying to join threads that are done.
try:
tid_list[i].join(1)
except RuntimeError:
rtn = rtn + 1
if debug:
print "Completed %s." % (str(tid_list[i]),)
try:
del tid_list[i]
except IndexError, msg:
rtn = rtn + 1
try:
sys.stderr.write("%s\n" % (msg,))
sys.stderr.flush()
except IOError:
pass
return rtn #Only do one to avoid "i" being off.
else:
time.sleep(5)
return rtn
def wait_for_threads():
global tid_list
rtn = 0
while len(tid_list) > 0:
for i in range(len(tid_list)):
tid_list[i].join()
result = tid_list[i].get_result()
del tid_list[i]
if result:
rtn = rtn + 1
#If we joined a thread, go back to the top of the while.
# If we don't we will have a discrepancy between indexes
# from before the "del" and after the "del" of ts_check[i].
break
return rtn
#Run in either threads or processes depending on the value of USE_THREADS.
def run_in_parallel(function, arg_list, my_task = "RUN_IN_PARALLEL",
on_exception = None):
if USE_THREADS:
run_in_thread(function, arg_list, my_task = my_task,
on_exception = on_exception)
else:
run_in_process(function, arg_list, my_task = my_task,
on_exception = on_exception)
def wait_for_parallel(kill = False):
global tid_list, pid_list
my_task = "WAIT_FOR_PARALLEL"
if USE_THREADS:
log(my_task, "thread_count:", str(len(tid_list)))
return wait_for_threads()
else:
log(my_task, "process_count:", str(len(pid_list)))
return wait_for_processes(kill = kill)
###############################################################################
def get_migration_db_path():
#This value of mig_path isn't the greatest. First, it assumes that
# the values underneath the Migration DB path all reference this
# top directory. Second, it assumes that there is a Migration
# DB path, instead of using a temporary file in the sam directory
# as the original file.
#
#The function get_enstore_admin_mount_point() can return more than
# one fs path if more are mounted. Loop accordingly.
pnfs_fs_paths = chimera.get_enstore_admin_mount_point()
for pnfs_fs_path in pnfs_fs_paths:
try:
mig_path = os.path.join(pnfs_fs_path,MIGRATION_DB)
os.stat(mig_path)
#If we get here, then we found a migration path.
# Lets use it, hope it is the correct one.
break
except (OSError, IOError):
continue
else:
return
return mig_path
###############################################################################
#If the source path is just a made up string, return true, otherwise false.
def is_deleted_path(filepath):
if filepath[:8] == "deleted-": #Made up paths begin with "deleted-".
return True
return False
def is_migration_path(filepath):
#Make sure this is a string.
if type(filepath) != types.StringType:
raise TypeError("Expected string filename.",e_errors.WRONGPARAMETER)
dname, fname = os.path.split(filepath)
#Is this good enough? Or does something more stringent need to
# be used. Only check the directories (scans of the PNFS migration
# DB were failing because they | |
<filename>ionoscloud/models/volume_properties.py
# coding: utf-8
"""
CLOUD API
IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501
The version of the OpenAPI document: 6.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class VolumeProperties(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'type': 'str',
'size': 'float',
'availability_zone': 'str',
'image': 'str',
'image_password': 'str',
'image_alias': 'str',
'ssh_keys': 'list[str]',
'bus': 'str',
'licence_type': 'str',
'cpu_hot_plug': 'bool',
'ram_hot_plug': 'bool',
'nic_hot_plug': 'bool',
'nic_hot_unplug': 'bool',
'disc_virtio_hot_plug': 'bool',
'disc_virtio_hot_unplug': 'bool',
'device_number': 'int',
'pci_slot': 'int',
'backupunit_id': 'str',
'user_data': 'str',
'boot_server': 'str',
}
attribute_map = {
'name': 'name',
'type': 'type',
'size': 'size',
'availability_zone': 'availabilityZone',
'image': 'image',
'image_password': '<PASSWORD>',
'image_alias': 'imageAlias',
'ssh_keys': 'sshKeys',
'bus': 'bus',
'licence_type': 'licenceType',
'cpu_hot_plug': 'cpuHotPlug',
'ram_hot_plug': 'ramHotPlug',
'nic_hot_plug': 'nicHotPlug',
'nic_hot_unplug': 'nicHotUnplug',
'disc_virtio_hot_plug': 'discVirtioHotPlug',
'disc_virtio_hot_unplug': 'discVirtioHotUnplug',
'device_number': 'deviceNumber',
'pci_slot': 'pciSlot',
'backupunit_id': 'backupunitId',
'user_data': 'userData',
'boot_server': 'bootServer',
}
def __init__(self, name=None, type=None, size=None, availability_zone=None, image=None, image_password=None, image_alias=None, ssh_keys=None, bus=None, licence_type=None, cpu_hot_plug=None, ram_hot_plug=None, nic_hot_plug=None, nic_hot_unplug=None, disc_virtio_hot_plug=None, disc_virtio_hot_unplug=None, device_number=None, pci_slot=None, backupunit_id=None, user_data=None, boot_server=None, local_vars_configuration=None): # noqa: E501
"""VolumeProperties - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._type = None
self._size = None
self._availability_zone = None
self._image = None
self._image_password = None
self._image_alias = None
self._ssh_keys = None
self._bus = None
self._licence_type = None
self._cpu_hot_plug = None
self._ram_hot_plug = None
self._nic_hot_plug = None
self._nic_hot_unplug = None
self._disc_virtio_hot_plug = None
self._disc_virtio_hot_unplug = None
self._device_number = None
self._pci_slot = None
self._backupunit_id = None
self._user_data = None
self._boot_server = None
self.discriminator = None
if name is not None:
self.name = name
if type is not None:
self.type = type
self.size = size
if availability_zone is not None:
self.availability_zone = availability_zone
if image is not None:
self.image = image
if image_password is not None:
self.image_password = image_password
if image_alias is not None:
self.image_alias = image_alias
if ssh_keys is not None:
self.ssh_keys = ssh_keys
if bus is not None:
self.bus = bus
if licence_type is not None:
self.licence_type = licence_type
if cpu_hot_plug is not None:
self.cpu_hot_plug = cpu_hot_plug
if ram_hot_plug is not None:
self.ram_hot_plug = ram_hot_plug
if nic_hot_plug is not None:
self.nic_hot_plug = nic_hot_plug
if nic_hot_unplug is not None:
self.nic_hot_unplug = nic_hot_unplug
if disc_virtio_hot_plug is not None:
self.disc_virtio_hot_plug = disc_virtio_hot_plug
if disc_virtio_hot_unplug is not None:
self.disc_virtio_hot_unplug = disc_virtio_hot_unplug
if device_number is not None:
self.device_number = device_number
if pci_slot is not None:
self.pci_slot = pci_slot
if backupunit_id is not None:
self.backupunit_id = backupunit_id
if user_data is not None:
self.user_data = user_data
if boot_server is not None:
self.boot_server = boot_server
@property
def name(self):
"""Gets the name of this VolumeProperties. # noqa: E501
The name of the resource. # noqa: E501
:return: The name of this VolumeProperties. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VolumeProperties.
The name of the resource. # noqa: E501
:param name: The name of this VolumeProperties. # noqa: E501
:type name: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this VolumeProperties. # noqa: E501
Hardware type of the volume. DAS (Direct Attached Storage) could be used only in a composite call with a Cube server. # noqa: E501
:return: The type of this VolumeProperties. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this VolumeProperties.
Hardware type of the volume. DAS (Direct Attached Storage) could be used only in a composite call with a Cube server. # noqa: E501
:param type: The type of this VolumeProperties. # noqa: E501
:type type: str
"""
allowed_values = ["HDD", "SSD", "SSD Standard", "SSD Premium", "DAS", "ISO"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def size(self):
"""Gets the size of this VolumeProperties. # noqa: E501
The size of the volume in GB. # noqa: E501
:return: The size of this VolumeProperties. # noqa: E501
:rtype: float
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this VolumeProperties.
The size of the volume in GB. # noqa: E501
:param size: The size of this VolumeProperties. # noqa: E501
:type size: float
"""
if self.local_vars_configuration.client_side_validation and size is None: # noqa: E501
raise ValueError("Invalid value for `size`, must not be `None`") # noqa: E501
self._size = size
@property
def availability_zone(self):
"""Gets the availability_zone of this VolumeProperties. # noqa: E501
The availability zone in which the volume should be provisioned. The storage volume will be provisioned on as few physical storage devices as possible, but this cannot be guaranteed upfront. This is uavailable for DAS (Direct Attached Storage), and subject to availability for SSD. # noqa: E501
:return: The availability_zone of this VolumeProperties. # noqa: E501
:rtype: str
"""
return self._availability_zone
@availability_zone.setter
def availability_zone(self, availability_zone):
"""Sets the availability_zone of this VolumeProperties.
The availability zone in which the volume should be provisioned. The storage volume will be provisioned on as few physical storage devices as possible, but this cannot be guaranteed upfront. This is uavailable for DAS (Direct Attached Storage), and subject to availability for SSD. # noqa: E501
:param availability_zone: The availability_zone of this VolumeProperties. # noqa: E501
:type availability_zone: str
"""
allowed_values = ["AUTO", "ZONE_1", "ZONE_2", "ZONE_3"] # noqa: E501
if self.local_vars_configuration.client_side_validation and availability_zone not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `availability_zone` ({0}), must be one of {1}" # noqa: E501
.format(availability_zone, allowed_values)
)
self._availability_zone = availability_zone
@property
def image(self):
"""Gets the image of this VolumeProperties. # noqa: E501
Image or snapshot ID to be used as template for this volume. # noqa: E501
:return: The image of this VolumeProperties. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this VolumeProperties.
Image or snapshot ID to be used as template for this volume. # noqa: E501
:param image: The image of this VolumeProperties. # noqa: E501
:type image: str
"""
self._image = image
@property
def image_password(self):
"""Gets the image_password of this VolumeProperties. # noqa: E501
Initial password to be set for installed OS. Works with public images only. Not modifiable, forbidden in update requests. Password rules allows all characters from a-z, A-Z, 0-9. # noqa: E501
:return: The image_password of this VolumeProperties. # noqa: E501
:rtype: str
"""
return self._image_password
@image_password.setter
def image_password(self, image_password):
"""Sets the image_password of this VolumeProperties.
Initial password to be set for installed OS. Works with public images only. Not modifiable, forbidden in update requests. Password rules allows all characters from a-z, A-Z, 0-9. # noqa: E501
:param image_password: The image_password of this VolumeProperties. # noqa: E501
:type image_password: str
"""
self._image_password = image_password
@property
def image_alias(self):
"""Gets the image_alias of this VolumeProperties. # noqa: E501
:return: The image_alias of this VolumeProperties. # noqa: E501
:rtype: str
"""
return self._image_alias
@image_alias.setter
def image_alias(self, image_alias):
"""Sets the image_alias of this VolumeProperties.
:param image_alias: The image_alias of this VolumeProperties. # noqa: E501
:type image_alias: str
"""
self._image_alias = image_alias
@property
def ssh_keys(self):
"""Gets the ssh_keys of this VolumeProperties. # noqa: E501
Public SSH keys are set on the image as authorized keys for appropriate SSH login to the instance using the corresponding private | |
<gh_stars>0
import sklearn
import torch
import numpy as np
from torch.utils.data import Dataset, SubsetRandomSampler, Subset
from util.utils import cpuStats
from os import path
from tqdm import tqdm, trange
from util.utils import report_gpumem, cpuStats
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
def minmax_scale(x, new_min=-1., new_max=1.):
x_max, x_min = [x.max(1, keepdim=True)[0], x.min(1, keepdim=True)[0]]
return (x - x_min) / (x_max - x_min) * (new_max - new_min) + new_min
def parse_filenames(pfiles):
'''
Given path to a txt file containing data file paths, whitespace seperated,
Return file names as a list.
'''
with open(pfiles, 'r') as f:
files = [line.rstrip() for line in f]
return files
def write_stats(particle, outpath):
'''
get min max value of particle data (N, C) and write to npy file
return: (S, C), S={min, max}
'''
attrmin = particle.min(0, keepdims=True)
attrmax = particle.max(0, keepdims=True)
stats = np.concatenate([attrmin, attrmax], 0)
np.save(outpath, stats)
print(f'stats shaped {stats.shape} saved {outpath}')
def standardization(x:torch.Tensor, dim=None):
if dim is None:
xmean = x.mean()
xstd = x.std()
else:
xmean = torch.mean(x, dim=dim, keepdim=True)[0]
xstd = torch.min(x, dim=dim, keepdim=True)[0]
return (x - xmean / xstd)
def normalization(x:np.ndarray, new_min=-1, new_max=1, dim=None):
if dim is None:
curr_max = x.max()
curr_min = x.min()
else:
curr_max = np.max(x, dim, keepdims=True)
curr_min = np.min(x, dim, keepdims=True)
return (x - curr_min) / (curr_max - curr_min) * (new_max - new_min) + new_min
# def normalization(x:torch.Tensor, new_min=-1, new_max=1, dim=None):
# if dim is None:
# curr_max = x.max()
# curr_min = x.min()
# else:
# curr_max = torch.max(x, dim=dim, keepdim=True)[0]
# curr_min = torch.min(x, dim=dim, keepdim=True)[0]
# return (x - curr_min) / (curr_max - curr_min) * (new_max - new_min) + new_min
def stand_norm(x):
x = standardization(x)
x = normalization(x)
return x
def train_val_indices(dataset_size, valid_split):
random_seed= 42
# Creating data indices for training and validation splits:
indices = np.arange(dataset_size)
np.random.seed(random_seed)
np.random.shuffle(indices)
val_count = int(np.floor(valid_split * dataset_size))
train_indices, val_indices = indices[val_count:], indices[:val_count]
return train_indices, val_indices
def train_val(dataset: Dataset, valid_split):
validation_split = .2
shuffle_dataset = True
random_seed= 42
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
train_indices, val_indices = train_val_indices(dataset_size, valid_split)
# Creating training sampler and validation dataset:
train_sampler = SubsetRandomSampler(train_indices)
val_dataset = Subset(dataset, val_indices)
return train_sampler, val_dataset
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=valid_sampler)
validation_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)
class NormScaler():
def __init__(self, x, new_range=[-1,1], dim=None):
if dim is None:
curr_max = x.max()
curr_min = x.min()
else:
curr_max = torch.max(x, dim=dim, keepdim=True)[0]
curr_min = torch.min(x, dim=dim, keepdim=True)[0]
self.range = [curr_min, curr_max]
self.new_range = new_range
def __call__(self, x):
return (x - self.range[0]) / (self.range[1] - self.range[0]) * (self.new_range[1] - self.new_range[0]) + self.new_range[0]
# swap new and old minmax (e.g. for use of recovering orignal range)
def swap_minmax(self):
tmp = self.range
self.range = self.new_range
self.new_range = tmp
# return the orignal data from tranformed data
def recover(self, transed_x):
self.swap_minmax()
x = self(transed_x)
self.swap_minmax()
return x
class StandScaler():
def __init__(self, x, dim=None):
if dim is None:
mean = x.mean()
std = x.std()
else:
mean = torch.mean(x, dim=dim, keepdim=True)[0]
std = torch.std(x, dim=dim, keepdim=True)[0]
self.mean = mean
self.std = std
def __call__(self, x):
return (x - self.mean / self.std)
# return the orignal data from tranformed data
def recover(self, transed_x):
return transed_x * self.std + self.mean
# class TransPipeline():
# def __init__(self):
# self.norm = NormScaler()
# self.stand = StandScaler()
def fitTransPipeline(x: np.ndarray):
pp = Pipeline1D([
('stand', StandardScaler()),
('norm', MinMaxScaler((-1, 1))),
])
x_shape = x.shape
x = pp.fit_transform(x)
return x, pp
# global pipeline (norm and stand coordinates in SphericalDataset's case)
class Pipeline1D(Pipeline):
def fit_transform(self, X, y=None, **fit_params):
x_shape = X.shape
X = X.reshape(-1, 1)
X = super().fit_transform(X, y, **fit_params)
return X.reshape(x_shape)
def fit(self, X, y=None, **fit_params):
x_shape = X.shape
X = X.reshape(-1, 1)
return super()._fit(X, y, **fit_params)
def transform(self, X):
x_shape = X.shape
X = X.reshape(-1, 1)
X = super()._transform(X)
return X.reshape(x_shape)
def inverse_transform(self, X):
x_shape = X.shape
X = X.reshape(-1, 1)
X = super()._inverse_transform(X)
return X.reshape(x_shape)
'''
data_dir: "../data/",
curv_idx: [0,1,2],
cart_idx: [3,4,5],
'''
class SphericalDataset(Dataset):
def __init__(self, data_path, curv_idx, cart_idx, intrans=fitTransPipeline, outtrans=fitTransPipeline):
self.data_path = data_path
self.coords = np.load(data_path)
self.dims = self.coords.shape[:-1]
self.curv_idx = curv_idx
self.cart_idx = cart_idx
self.curv = self.coords[..., curv_idx]
self.cart = self.coords[..., cart_idx]
assert len(self.curv) == len(self.cart)
if intrans is not None:
self.cart_prep, self.inpp = fitTransPipeline(self.cart.reshape(-1, len(cart_idx)))
# print(self.cart.mean())
if outtrans is not None:
self.curv_prep, self.outpp = fitTransPipeline(self.curv.reshape(-1, len(curv_idx)))
# print(self.curv.mean())
self.cart_prep = torch.tensor(self.cart_prep)
self.curv_prep = torch.tensor(self.curv_prep)
def __len__(self):
return len(self.curv_prep)
def __getitem__(self, idx):
return self.cart_prep[idx], self.curv_prep[idx]
class SphericalBlockDataset(Dataset):
def __init__(self, data_path, curv_idx, cart_idx, intrans=fitTransPipeline, outtrans=fitTransPipeline, downscale=8, axis=[0,1,2], valid_split=0.2):
self.data_path = data_path
self.coords = np.load(data_path)
self.dims = self.coords.shape[:-1]
self.curv_idx = curv_idx
self.cart_idx = cart_idx
self.curv = self.coords[..., curv_idx]
self.cart = self.coords[..., cart_idx]
assert self.curv.shape == self.cart.shape
self.downscale = downscale
self.axis = axis
self.curv = torch.tensor(self.curv)
self.cart = torch.tensor(self.cart)
self.carts = self.get_block(self.cart, self.downscale, self.axis)
self.curvs = self.get_block(self.curv, self.downscale, self.axis)
# self.cart = self.coords[..., cart_idx].reshape(-1, len(cart_idx))
self.hasIntrans = False
self.hasOutrans = False
if intrans is not None:
self.hasIntrans = True
print("transforming inputs")
for i, coords in enumerate(self.carts):
new_coord, pp = fitTransPipeline(coords.numpy())
self.carts[i] = (new_coord, pp)
# print(self.cart.mean())
if outtrans is not None:
self.hasOutrans = True
print("transforming targets")
for i, coords in enumerate(self.curvs):
new_coord, pp = fitTransPipeline(coords.numpy())
self.curvs[i] = (new_coord, pp)
# print(self.curv.mean())
# train valid split for cart
print("creating train test splits for inputs")
self.cart_train, self.cart_valid = self.train_valid(self.carts, valid_split)
print("creating train test splits for targets")
self.curv_train, self.curv_valid = self.train_valid(self.curvs, valid_split)
def train_valid(self, x, valid_split):
x_train = [0] * len(x)
x_valid = [0] * len(x)
for i, coords in enumerate(x):
if isinstance(coords, tuple):
coords = coords[0]
dataset_size = len(coords)
train_indices, val_indices = train_val_indices(dataset_size, valid_split)
x_train[i] = coords[train_indices]
x_valid[i] = coords[val_indices]
return x_train, x_valid
def get_block(self, coords, downscale, axis):
dim_chunks = [np.arange(0, length+1, length//downscale) for length in coords.shape[:-1]]
coords_block = []
dimi, dimj, dimk = dim_chunks
for i in range(len(dimi)-1):
block_i = [dimi[i], dimi[i+1]]
for j in range(len(dimj)-1):
block_j = [dimj[j], dimj[j+1]]
for k in range(len(dimk)-1):
block_k = [dimk[k], dimk[k+1]]
# print(block_i, block_j, block_k)
block = coords[ block_i[0]:block_i[1], block_j[0]:block_j[1], block_k[0]:block_k[1] ]
# reshape coord block to (N, 3)
coords_block.append(block.reshape(-1, block.shape[-1]))
return coords_block
def __len__(self):
return len(self.cart_train)
def __getitem__(self, idx):
return (
self.cart_train[idx], self.curv_train[idx],
self.cart_valid[idx], self.curv_valid[idx],
)
# trajectory data: (N, Time, C), C=6: harm, cart
# input: (pos_t, t)
# target: (pos_t+1 - pos_t)
class TrajectoryDeltaDataset(Dataset):
def __init__(self, data_dir, file_name, len_file_name, in_idx, input_trans=None):
self.fp = path.join(data_dir, file_name)
self.len_fp = path.join(data_dir, len_file_name)
self.input_trans = input_trans
self.in_idx = in_idx
print("loading")
data = np.load(self.fp)
self.timelen = np.load(self.len_fp)
self.input = torch.tensor(data[:, :, in_idx], dtype=torch.float32)
# self.input_coord = torch.tensor(data[:, 0, in_idx], dtype=torch.float32)
if self.input_trans is not None:
self.input = self.input_trans(self.input, dim=None)
input_list = []
target_list = []
# PREDICT DELTA
# for i in trange(self.timelen.shape[0]):
# curr_pos = self.input[i, :(self.timelen[i]-1)]
# curr_time = torch.arange(0, self.timelen[i]-1, 1).unsqueeze(-1)
# next_pos = self.input[i, 1:self.timelen[i]]
# delta = next_pos - curr_pos
# input_list.append(torch.concat([curr_pos, curr_time], dim=-1))
# target_list.append(delta)
# print(len(input_list))
# self.input = torch.concat(input_list, dim=0)
# self.target = torch.concat(target_list, dim=0)
# PREDICT FLOW MAP
self.starts = self.input[:, 0, :].view(-1, len(in_idx))
# for each pathline i
for i in trange(len(self.timelen)):
curr_start = self.starts[i].unsqueeze(0).repeat(self.timelen[i]-1, 1)
curr_time = torch.arange(1, self.timelen[i], 1).unsqueeze(-1)
curr_target = self.input[i, 1:self.timelen[i], :]
input_list.append(torch.concat([curr_start, curr_time], dim=-1))
target_list.append(curr_target)
self.input = torch.concat(input_list, dim=0)
self.target = torch.concat(target_list, dim=0)
print("Data processed.")
print("input shape:", self.input.shape)
print("target shape:", self.target.shape)
def __len__(self):
return sum(self.timelen) - len(self.timelen)
def __getitem__(self, idx):
# random sample
return self.input[idx], self.target[idx]
class VBHGridDataset(Dataset):
def __init__(self, data_dir, files_path, in_idx, target_idx, input_trans=standardization, target_trans=None):
self.dir = data_dir
self.in_idx = in_idx
self.target_idx = target_idx
self.files = parse_filenames(files_path)
self.input_trans = input_trans
self.target_trans = target_trans
self.pdata = path.join(self.dir, self.files[0])
data = np.load(self.pdata)
self.grid_dim = data.shape[:-1]
data = torch.tensor(data, dtype=torch.float32)
data = data.reshape(-1, data.shape[-1])
self.coord = data[:, in_idx]
self.val = data[:, target_idx]
if self.input_trans is not None:
self.coord = self.input_trans(self.coord, dim=0)
if self.target_trans is not None:
self.val = self.target_trans(self.val)
def __len__(self):
return len(self.coord)
def __getitem__(self, idx):
return self.coord[idx], self.val[idx]
# tracer features:
# ['Xcart/0-2', 'mass/3', 'rho/4', 'T/5', 'Press/6', 'uu/7', 'Ye/8', 'Ye_em/9']
# gt_path: full res tracer
# sampple_path: downsampeld tracer
class TracerDataset(Dataset):
def __init__(self, gt_dir, sample_dir, files_path, attr_idx=[0,1,2,8], downscale=2, transform=None, target_transform=minmax_scale,):
# assert len(gt_path) == len(sample_path)
self.gt_dir = gt_dir
self.smp_dir = sample_dir
self.files = parse_filenames(files_path)
self.attr_idx = attr_idx
self.transform = transform
| |
'Kuwaiti'
elif doc.one_fm_have_a_valid_visa_in_kuwait:
source_of_hire = 'Local'
if doc.one_fm_have_a_valid_visa_in_kuwait and doc.one_fm_visa_type:
filters['visa_type'] = doc.one_fm_visa_type
filters['source_of_hire'] = source_of_hire
from one_fm.one_fm.doctype.recruitment_document_checklist.recruitment_document_checklist import get_recruitment_document_checklist
document_checklist_obj = get_recruitment_document_checklist(filters)
document_checklist = False
if document_checklist_obj and document_checklist_obj.recruitment_documents:
document_checklist = document_checklist_obj.recruitment_documents
if document_checklist:
for checklist in document_checklist:
doc_required = doc.append('one_fm_documents_required')
fields = ['document_required', 'required_when', 'or_required_when', 'type_of_copy', 'or_type_of_copy', 'not_mandatory']
for field in fields:
doc_required.set(field, checklist.get(field))
def set_job_applicant_fields(doc):
doc.email_id = doc.one_fm_email_id
def validate_mandatory_fields(doc):
field_list = [{'First Name':'one_fm_first_name'}, {'Last Name':'one_fm_last_name'}, {'Passport Number':'one_fm_passport_number'},
{'Place of Birth':'one_fm_place_of_birth'}, {'Email ID':'one_fm_email_id'},
{'Marital Status':'one_fm_marital_status'}, {'Passport Holder of':'one_fm_passport_holder_of'},
{'Passport Issued on':'one_fm_passport_issued'}, {'Passport Expires on ':'one_fm_passport_expire'},
{'Gender':'one_fm_gender'}, {'Religion':'one_fm_religion'},
{'Date of Birth':'one_fm_date_of_birth'}, {'Educational Qualification':'one_fm_educational_qualification'},
{'University / School':'one_fm_university'}]
field_list.extend(get_mandatory_for_dependent_fields(doc))
mandatory_fields = []
for fields in field_list:
for field in fields:
if not doc.get(fields[field]):
mandatory_fields.append(field)
if len(mandatory_fields) > 0:
message = 'Mandatory fields required in Job Applicant<br><br><ul>'
for mandatory_field in mandatory_fields:
message += '<li>' + mandatory_field +'</li>'
message += '</ul>'
frappe.throw(message)
def get_mandatory_for_dependent_fields(doc):
field_list = []
field_list.extend(get_mandatory_fields_current_employment(doc))
field_list.extend(get_mandatory_fields_visa_details(doc))
field_list.extend(get_mandatory_fields_contact_details(doc))
field_list.extend(get_mandatory_fields_work_details(doc))
return field_list
def get_mandatory_fields_work_details(doc):
field_list = []
if doc.one_fm_erf:
erf = frappe.get_doc('ERF', doc.one_fm_erf)
if erf.shift_working:
field_list.append({'Rotation Shift': 'one_fm_rotation_shift'})
if erf.night_shift:
field_list.append({'Night Shift': 'one_fm_night_shift'})
if erf.travel_required:
if erf.type_of_travel:
field_list.append({'Type of Travel': 'one_fm_type_of_travel'})
field_list.append({'Type of Driving License': 'one_fm_type_of_driving_license'})
return field_list
def get_mandatory_fields_contact_details(doc):
# if not doc.one_fm_is_agency_applying:
# return [{'Country Code for Primary Contact Number': 'one_fm_country_code'},
# {'Primary Contact Number': 'one_fm_contact_number'}]
return []
def get_mandatory_fields_visa_details(doc):
if doc.one_fm_have_a_valid_visa_in_kuwait:
return [{'Visa Type': 'one_fm_visa_type'}, {'Civil ID Number': 'one_fm_cid_number'},
{'Civil ID Valid Till': 'one_fm_cid_expire'}]
return []
def get_mandatory_fields_current_employment(doc):
if doc.one_fm_i_am_currently_working:
return [{'Current Employeer': 'one_fm_current_employer'}, {'Employment Start Date': 'one_fm_employment_start_date'},
{'Employment End Date': 'one_fm_employment_end_date'}, {'Current Job Title': 'one_fm_current_job_title'},
{'Current Salary in KWD': 'one_fm_current_salary'}, {'Country of Employment': 'one_fm_country_of_employment'},
{'Notice Period in Days': 'one_fm_notice_period_in_days'}
]
return []
def set_job_applicant_status(doc, method):
if doc.one_fm_applicant_status != 'Selected':
if doc.one_fm_documents_required:
verified = True
exception = False
status = 'Verified'
for document_required in doc.one_fm_documents_required:
if not document_required.received:
if not document_required.exception:
status = 'Not Verified'
else:
status = 'Verified - With Exception'
doc.one_fm_document_verification = status
def create_job_offer_from_job_applicant(job_applicant):
if not frappe.db.exists('Job Offer', {'job_applicant': job_applicant, 'docstatus': ['<', 2]}):
job_app = frappe.get_doc('Job Applicant', job_applicant)
if not job_app.number_of_days_off:
frappe.throw(_("Please set the number of days off."))
if job_app.day_off_category == "Weekly" and frappe.utils.cint(job_app.number_of_days_off) > 7:
frappe.throw(_("Number of days off cannot be more than a Week!"))
elif job_app.day_off_category == "Monthly" and frappe.utils.cint(job_app.number_of_days_off) > 30:
frappe.throw(_("Number of days off cannot be more than a Month!"))
job_offer = frappe.new_doc('Job Offer')
job_offer.job_applicant = job_app.name
job_offer.applicant_name = job_app.applicant_name
job_offer.day_off_category = job_app.day_off_category
job_offer.number_of_days_off = job_app.number_of_days_off
job_offer.designation = job_app.designation
job_offer.offer_date = today()
if job_app.one_fm_erf:
erf = frappe.get_doc('ERF', job_app.one_fm_erf)
set_erf_details(job_offer, erf)
job_offer.save(ignore_permissions = True)
def set_erf_details(job_offer, erf):
job_offer.erf = erf.name
if not job_offer.designation:
job_offer.designation = erf.designation
job_offer.one_fm_provide_accommodation_by_company = erf.provide_accommodation_by_company
job_offer.one_fm_provide_transportation_by_company = erf.provide_transportation_by_company
set_salary_details(job_offer, erf)
set_other_benefits_to_terms(job_offer, erf)
def set_salary_details(job_offer, erf):
job_offer.one_fm_provide_salary_advance = erf.provide_salary_advance
total_amount = 0
job_offer.base = erf.base
for salary in erf.salary_details:
total_amount += salary.amount
salary_details = job_offer.append('one_fm_salary_details')
salary_details.salary_component = salary.salary_component
salary_details.amount = salary.amount
job_offer.one_fm_job_offer_total_salary = total_amount
def set_other_benefits_to_terms(job_offer, erf):
# if erf.other_benefits:
# for benefit in erf.other_benefits:
# terms = job_offer.append('offer_terms')
# terms.offer_term = benefit.benefit
# terms.value = 'Company Provided'
options = [{'provide_mobile_with_line':'Mobile with Line'}, {'provide_health_insurance':'Health Insurance'},
{'provide_company_insurance': 'Company Insurance'}, {'provide_laptop_by_company': 'Personal Laptop'},
{'provide_vehicle_by_company': 'Personal Vehicle'}]
for option in options:
if erf.get(option):
terms = job_offer.append('offer_terms')
terms.offer_term = options[option]
terms.value = 'Company Provided'
terms_list = ['Kuwait Visa processing Fees', 'Kuwait Residency Fees', 'Kuwait insurance Fees']
for term in terms_list:
terms = job_offer.append('offer_terms')
terms.offer_term = term
terms.value = 'Borne By The Company'
hours = erf.shift_hours if erf.shift_hours else 9
vacation_days = erf.vacation_days if erf.vacation_days else 30
off_days = erf.off_days if erf.off_days else 4
terms = job_offer.append('offer_terms')
terms.offer_term = 'Working Hours'
terms.value = str(hours)+' hours a day, (Subject to Operational Requirements), '+str(off_days)+' days off per month'
terms = job_offer.append('offer_terms')
terms.offer_term = 'Annual Leave'
terms.value = '('+str(vacation_days)+') days paid leave, as per Kuwait Labor Law (Private Sector)'
terms = job_offer.append('offer_terms')
terms.offer_term = 'Probation Period'
terms.value = '(100) working days'
@frappe.whitelist(allow_guest=True)
def get_job_opening(job_opening_id):
return frappe.get_doc('Job Opening', job_opening_id)
@frappe.whitelist(allow_guest=True)
def get_erf(erf_id):
return frappe.get_doc('ERF', erf_id)
def get_applicant():
return frappe.get_value("Job Applicant",{"one_fm_email_id": frappe.session.user}, "name")
def applicant_has_website_permission(doc, ptype, user, verbose=False):
if doc.name == get_applicant():
return True
else:
return False
@frappe.whitelist(allow_guest=True)
def check_if_user_exist_as_desk_user(user):
user_exist = frappe.db.exists('User', user)
if user_exist:
return frappe.db.get_value('User', user_exist, 'user_type')
return False
def get_job_applicant_transferable_overseas(applicant):
job_applicant = frappe.get_doc('Job Applicant', applicant)
result = {'overseas': False, 'transferable': False}
if job_applicant.one_fm_applicant_is_overseas_or_local:
result['overseas'] = job_applicant.one_fm_applicant_is_overseas_or_local
if job_applicant.one_fm_is_transferable:
result['transferable'] = job_applicant.one_fm_is_transferable
return result
def validate_applicant_overseas_transferable(applicant):
transferable_details = get_job_applicant_transferable_overseas(applicant)
if not transferable_details['overseas']:
frappe.throw(_('Mark the Applicant is Overseas or Local'))
elif transferable_details['overseas'] == 'Local':
if not transferable_details['transferable']:
frappe.throw(_('Mark the Applicant is Transferable or Not'))
if transferable_details['transferable'] == "No":
frappe.throw(_("Applicant is Not Transferable"))
@frappe.whitelist()
def set_warehouse_contact_from_project(doc, method):
if doc.one_fm_project and doc.one_fm_site:
site = frappe.get_doc("Operations Site", doc.one_fm_site)
# if site.site_poc:
# for poc in site.site_poc:
# if poc.poc:
# contact = frappe.get_doc('Contact', poc.poc)
# links = contact.append('links')
# links.link_doctype = doc.doctype
# links.link_name = doc.name
# contact.save(ignore_permissions=True)
if site.address:
address = frappe.get_doc('Address', site.address)
links = address.append('links')
links.link_doctype = doc.doctype
links.link_name = doc.name
address.save(ignore_permissions=True)
def validate_iban_is_filled(doc, method):
if not doc.iban and doc.workflow_state == 'Active Account':
frappe.throw(_("Please Set IBAN before you Mark Open the Bank Account"))
def bank_account_on_update(doc, method):
update_onboarding_doc_for_bank_account(doc)
def bank_account_on_trash(doc, method):
if doc.onboard_employee:
oe = frappe.get_doc('Onboard Employee', doc.onboard_employee)
oe.bank_account = ''
oe.bank_account_progress = 0
oe.bank_account_docstatus = ''
oe.bank_account_status = ''
oe.account_name = doc.account_name
oe.bank = doc.bank
oe.save(ignore_permissions=True)
def update_onboarding_doc_for_bank_account(doc):
if doc.onboard_employee:
progress_wf_list = {'Draft': 0, 'Open Request': 30, 'Processing Bank Account Opening': 70,
'Rejected by Accounts': 100, 'Active Account': 100}
bank_account_status = 1
if doc.workflow_state == 'Rejected by Accounts':
bank_account_status = 2
if doc.workflow_state in progress_wf_list:
progress = progress_wf_list[doc.workflow_state]
oe = frappe.get_doc('Onboard Employee', doc.onboard_employee)
oe.bank_account = doc.name
oe.bank_account_progress = progress
oe.bank_account_docstatus = bank_account_status
oe.bank_account_status = doc.workflow_state
oe.account_name = doc.account_name
oe.bank = doc.bank
if oe.workflow_state == 'Duty Commencement':
oe.workflow_state = 'Bank Account'
oe.save(ignore_permissions=True)
def issue_roster_actions():
# Queue roster actions functions to backgrounds jobs
frappe.enqueue(create_roster_employee_actions, is_async=True, queue='long')
frappe.enqueue(create_roster_post_actions, is_async=True, queue='long')
def create_roster_employee_actions():
"""
This function creates a Roster Employee Actions document and issues notifications to relevant supervisors
directing them to schedule employees that are unscheduled and assigned to them.
It computes employees not scheduled for the span of two weeks, starting from tomorrow.
"""
# start date to be from tomorrow
start_date = add_to_date(cstr(getdate()), days=1)
# end date to be 14 days after start date
end_date = add_to_date(start_date, days=14)
#-------------------- Roster Employee actions ------------------#
# fetch employees that are active and don't have a schedule in the specified date range
employees_not_rostered = frappe.db.sql("""
select
employee from `tabEmployee`
where
employee not in
(select employee
from `tabEmployee Schedule`
where date >= %(start)s and date <=%(end)s) """,
{'start': start_date, 'end': end_date})
employees = ()
# fetch employees that are not rostered from the result returned by the query and append to tuple
for emp in employees_not_rostered:
employees = employees + emp
# fetch supervisors and list of employees(not rostered) under them
result = frappe.db.sql("""select sv.employee, group_concat(e.employee)
from `tabEmployee` e
join `tabOperations Shift` sh on sh.name = e.shift
join `tabEmployee` sv on sh.supervisor=sv.employee
where e.employee in {employees}
group by sv.employee """.format(employees=employees))
# for each supervisor, create a roster action
for res in result:
supervisor = res[0]
employees = res[1].split(",")
roster_employee_actions_doc = frappe.new_doc("Roster Employee Actions")
roster_employee_actions_doc.start_date = start_date
roster_employee_actions_doc.end_date = end_date
roster_employee_actions_doc.status = "Pending"
roster_employee_actions_doc.action_type = "Roster Employee"
roster_employee_actions_doc.supervisor = supervisor
for emp in employees:
roster_employee_actions_doc.append('employees_not_rostered', {
'employee': emp
})
roster_employee_actions_doc.save()
frappe.db.commit()
#-------------------- END Roster Employee actions ------------------#
def create_roster_post_actions():
"""
This function creates a Roster Post Actions document that issues actions to supervisors to fill post types that are not filled for a given date range.
"""
# start date to be from tomorrow
start_date = add_to_date(cstr(getdate()), days=1)
# end date to be 14 days after start date
end_date = add_to_date(start_date, days=14)
post_types_not_filled_set = set()
# Fetch post schedules in the date range that are active
post_schedules = frappe.db.get_list("Post Schedule", {'date': ['between', (start_date, end_date)], 'post_status': 'Planned'}, ["date", "shift", "post_type", "post"], order_by="date asc")
# Fetch employee schedules for employees who are working
employee_schedules = frappe.db.get_list("Employee Schedule", {'date': ['between', (start_date, end_date)], 'employee_availability': 'Working'}, ["date", "shift", "post_type"], order_by="date asc")
for ps in post_schedules:
# if there is not any employee schedule that matches the post schedule for the specified date, add to post types not filled
if not any(cstr(es.date).split(" | |
"""A Common Universal Data Structure.
The CUDS object is an ontology individual that can be used like a container. It
has attributes and is connected to other cuds objects via relationships.
"""
import logging
from copy import deepcopy
from uuid import uuid4, UUID
from typing import Any, Dict, Hashable, Iterable, Iterator, List, Optional, \
Tuple, Union
import rdflib
from rdflib import URIRef, BNode, RDF, Graph, Literal
import osp.core.warnings as warning_settings
from osp.core.namespaces import cuba, from_iri
from osp.core.ontology.relationship import OntologyRelationship
from osp.core.ontology.attribute import OntologyAttribute
from osp.core.ontology.oclass import OntologyClass
from osp.core.ontology.datatypes import CUDS_IRI_PREFIX
from osp.core.session.core_session import core_session
from osp.core.session.session import Session
from osp.core.neighbor_dict import NeighborDictRel
from osp.core.utils.wrapper_development import check_arguments, \
clone_cuds_object, create_from_cuds_object, get_neighbor_diff
logger = logging.getLogger("osp.core")
CUDS_NAMESPACE_IRI = URIRef(CUDS_IRI_PREFIX)
class Cuds:
"""A Common Universal Data Structure.
The CUDS object is an ontology individual that can be used like a
container. It has attributes and is connected to other cuds objects via
relationships.
"""
_session = core_session
def __init__(self,
# Create from oclass and attributes dict.
attributes: Dict[OntologyAttribute, Any],
oclass: Optional[OntologyClass] = None,
session: Session = None,
iri: URIRef = None,
uid: Union[UUID, URIRef] = None,
# Specify extra triples for the CUDS object.
extra_triples: Iterable[
Tuple[Union[URIRef, BNode],
Union[URIRef, BNode],
Union[URIRef, BNode]]] = tuple()):
"""Initialize a CUDS object."""
# Set uid. This is a "user-facing" method, so strict types
# checks are performed.
if len(set(filter(lambda x: x is not None, (uid, iri)))) > 1:
raise ValueError("Tried to initialize a CUDS object specifying, "
"both its IRI and UID. A CUDS object is "
"constrained to have just one UID.")
elif uid is not None and type(uid) not in (UUID, URIRef):
raise ValueError('Provide either a UUID or a URIRef object'
'as UID.')
elif iri is not None and type(iri) is not URIRef:
raise ValueError('Provide a URIRef object as IRI.')
else:
self._uid = uid or iri or uuid4()
# Create CUDS triples in internal temporary graph.
self._graph = Graph()
if attributes:
for k, v in attributes.items():
self._graph.add((
self.iri, k.iri, Literal(k.convert_to_datatype(v),
datatype=k.datatype)
))
if oclass:
self._graph.add((
self.iri, RDF.type, oclass.iri
))
extra_oclass = False
for s, p, o in extra_triples:
if s != self.iri:
raise ValueError("Trying to add extra triples to a CUDS "
"object with a subject that does not match "
"the CUDS object's IRI.")
elif p == RDF.type:
extra_oclass = True
self._graph.add((s, p, o))
oclass_assigned = bool(oclass) or extra_oclass
if not oclass_assigned:
raise TypeError(f"No oclass associated with {self}! "
f"Did you install the required ontology?")
self._session = session or Cuds._session
# Copy temporary graph to the session graph and discard it.
self.session._store(self)
@property
def iri(self) -> URIRef:
"""Get the IRI of the CUDS object."""
return self.uid if type(self.uid) is URIRef else \
URIRef(CUDS_NAMESPACE_IRI + str(self.uid))
@property
def uid(self) -> Union[URIRef, UUID]:
"""Get the uid of the CUDS object.
This is the public getter of the property.
"""
return self._uid
@property
def _uid(self) -> Union[URIRef, UUID]:
"""Get the uid of the CUDS object.
This is the private getter of the property.
"""
return self.__uid
@_uid.setter
def _uid(self, value: Union[URIRef, UUID, int]):
"""Set the uid of a CUDS object.
This is the private setter of the property.
"""
if type(value) is int:
value = UUID(int=value)
if type(value) is UUID:
invalid = value.int == 0
else:
split = value.split(':')
invalid = not len(split) > 1 or any(x == "" for x in split)
if invalid:
raise ValueError(f"Invalid uid: {value}.")
self.__uid = value
@property
def session(self) -> Session:
"""Get the session of the cuds object."""
return self._session
@property
def oclasses(self):
"""Get the ontology classes of this CUDS object."""
result = list()
for s, p, o in self._graph.triples((self.iri, RDF.type, None)):
r = from_iri(o, raise_error=False)
if r is not None:
result.append(r)
return result
@property
def oclass(self):
"""Get the type of the cuds object."""
oclasses = self.oclasses
if oclasses:
return oclasses[0]
return None
@property
def _neighbors(self):
return NeighborDictRel(self)
@property
def _stored(self):
return self.session is not None and self._graph is self.session.graph
def get_triples(self, include_neighbor_types=False):
"""Get the triples of the cuds object."""
o_set = set()
for s, p, o in self._graph.triples((self.iri, None, None)):
yield s, p, o
o_set.add(o)
if include_neighbor_types:
for o in o_set:
yield from self._graph.triples((o, RDF.type, None))
def get_attributes(self):
"""Get the attributes as a dictionary."""
if self.session:
self.session._notify_read(self)
result = {}
for s, p, o in self._graph.triples((self.iri, None, None)):
obj = from_iri(p, raise_error=False)
if isinstance(obj, OntologyAttribute):
value = self._rdflib_5_inplace_modification_prevention_filter(
o.toPython(), obj)
result[obj] = value
return result
def is_a(self, oclass):
"""Check if the CUDS object is an instance of the given oclass.
Args:
oclass (OntologyClass): Check if the CUDS object is an instance of
this oclass.
Returns:
bool: Whether the CUDS object is an instance of the given oclass.
"""
return any(oc in oclass.subclasses for oc in self.oclasses)
def add(self,
*args: "Cuds",
rel: OntologyRelationship = None) -> Union["Cuds", List["Cuds"]]:
"""Add CUDS objects to their respective relationship.
If the added objects are associated with the same session,
only a link is created. Otherwise, the a deepcopy is made and added
to the session of this Cuds object.
Before adding, check for invalid keys to avoid inconsistencies later.
Args:
args (Cuds): The objects to be added
rel (OntologyRelationship): The relationship between the objects.
Raises:
TypeError: Ne relationship given and no default specified.
ValueError: Added a CUDS object that is already in the container.
Returns:
Union[Cuds, List[Cuds]]: The CUDS objects that have been added,
associated with the session of the current CUDS object.
Result type is a list, if more than one CUDS object is
returned.
"""
check_arguments(Cuds, *args)
rel = rel or self.oclass.namespace.get_default_rel()
if rel is None:
raise TypeError("Missing argument 'rel'! No default "
"relationship specified for namespace %s."
% self.oclass.namespace)
result = list()
# update cuds objects if they are already in the session
old_objects = self._session.load(
*[arg.uid for arg in args if arg.session != self.session])
for arg in args:
# Recursively add the children to the registry
if rel in self._neighbors \
and arg.uid in self._neighbors[rel]:
message = '{!r} is already in the container'
raise ValueError(message.format(arg))
if self.session != arg.session:
arg = self._recursive_store(arg, next(old_objects))
self._add_direct(arg, rel)
arg._add_inverse(self, rel)
result.append(arg)
return result[0] if len(args) == 1 else result
def get(self,
*uids: Union[UUID, URIRef],
rel: OntologyRelationship = cuba.activeRelationship,
oclass: OntologyClass = None,
return_rel: bool = False) -> Union["Cuds", List["Cuds"]]:
"""Return the contained elements.
Filter elements by given type, uid or relationship.
Expected calls are get(), get(*uids), get(rel), get(oclass),
get(*indentifiers, rel), get(rel, oclass).
If uids are specified:
The position of each element in the result is determined by to the
position of the corresponding uid in the given list of
uids. In this case, the result can contain None values if a
given uid is not a child of this cuds_object.
If only a single indentifier is given, only this one element is
returned (i.e. no list).
If no uids are specified:
The result is a collection, where the elements are ordered
randomly.
Args:
uids (Union[UUID, URIRef]): uids of the elements.
rel (OntologyRelationship, optional): Only return cuds_object
which are connected by subclass of given relationship.
Defaults to cuba.activeRelationship.
oclass (OntologyClass, optional): Only return elements which are a
subclass of the given ontology class. Defaults to None.
return_rel (bool, optional): Whether to return the connecting
relationship. Defaults to False.
Returns:
Union[Cuds, List[Cuds]]: The queried objects.
"""
result = list(
self.iter(*uids, rel=rel, oclass=oclass,
return_rel=return_rel)
)
if len(uids) == 1:
return result[0]
return result
def update(self, *args: "Cuds") -> List["Cuds"]:
"""Update the Cuds object.
Updates the object by providing updated versions of CUDS objects
that are directly in the container of this CUDS object.
The updated versions must be associated with a different session.
Args:
args (Cuds): The updated versions to use to update the current
object.
Raises:
ValueError: Provided a CUDS objects is not in the container of the
current CUDS
ValueError: Provided CUDS object is associated with the same
session as the current CUDS object. Therefore it is not an
updated version.
Returns:
Union[Cuds, List[Cuds]]: The CUDS objects that have been updated,
associated with the session of the | |
1
else:
self.item_pool[item.name] = ItemPoolRecord()
def collect_starters(self, state):
for (name, record) in self.starting_items.items():
for _ in range(record.count):
item = ItemFactory("Bottle" if name == "Bottle with Milk (Half)" else name)
state.collect(item)
def pool_replace_item(self, item_pools, item_group, player_id, new_item, worlds):
removed_item = self.pool_remove_item(item_pools, item_group, 1, world_id=player_id)[0]
item_matcher = lambda item: pattern_matcher(new_item)(item.name)
if self.item_pool[removed_item.name].count > 1:
self.item_pool[removed_item.name].count -= 1
else:
del self.item_pool[removed_item.name]
if new_item == "#Junk":
if self.distribution.settings.enable_distribution_file:
return ItemFactory(get_junk_item(1, self.base_pool, self.item_pool))[0]
else: # Generator settings that add junk to the pool should not be strict about the item_pool definitions
return ItemFactory(get_junk_item(1))[0]
return random.choice(list(ItemIterator(item_matcher, worlds[player_id])))
def set_shuffled_entrances(self, worlds, entrance_pools, target_entrance_pools, locations_to_ensure_reachable, itempool):
for (name, record) in self.entrances.items():
if record.region is None:
continue
if not worlds[self.id].get_entrance(name):
raise RuntimeError('Unknown entrance in world %d: %s' % (self.id + 1, name))
entrance_found = False
for pool_type, entrance_pool in entrance_pools.items():
try:
matched_entrance = next(filter(lambda entrance: entrance.name == name, entrance_pool))
except StopIteration:
continue
entrance_found = True
if matched_entrance.connected_region != None:
if matched_entrance.type == 'Overworld':
continue
else:
raise RuntimeError('Entrance already shuffled in world %d: %s' % (self.id + 1, name))
target_region = record.region
matched_targets_to_region = list(filter(lambda target: target.connected_region and target.connected_region.name == target_region,
target_entrance_pools[pool_type]))
if not matched_targets_to_region:
raise RuntimeError('No entrance found to replace with %s that leads to %s in world %d' %
(matched_entrance, target_region, self.id + 1))
if record.origin:
target_parent = record.origin
try:
matched_target = next(filter(lambda target: target.replaces.parent_region.name == target_parent, matched_targets_to_region))
except StopIteration:
raise RuntimeError('No entrance found to replace with %s that leads to %s from %s in world %d' %
(matched_entrance, target_region, target_parent, self.id + 1))
else:
matched_target = matched_targets_to_region[0]
target_parent = matched_target.parent_region.name
if matched_target.connected_region == None:
raise RuntimeError('Entrance leading to %s from %s is already shuffled in world %d' %
(target_region, target_parent, self.id + 1))
try:
check_entrances_compatibility(matched_entrance, matched_target)
change_connections(matched_entrance, matched_target)
validate_world(matched_entrance.world, worlds, None, locations_to_ensure_reachable, itempool)
except EntranceShuffleError as error:
raise RuntimeError('Cannot connect %s To %s in world %d (Reason: %s)' %
(matched_entrance, matched_entrance.connected_region or matched_target.connected_region, self.id + 1, error))
confirm_replacement(matched_entrance, matched_target)
if not entrance_found:
raise RuntimeError('Entrance does not belong to a pool of shuffled entrances in world %d: %s' % (self.id + 1, name))
def fill_bosses(self, world, prize_locs, prizepool):
count = 0
used_items = []
for (name, record) in pattern_dict_items(self.locations):
boss = pull_item_or_location([prize_locs], world, name)
if boss is None:
try:
location = LocationFactory(name)
except KeyError:
raise RuntimeError('Unknown location in world %d: %s' % (world.id + 1, name))
if location.type == 'Boss':
raise RuntimeError('Boss or already placed in world %d: %s' % (world.id + 1, name))
else:
continue
if record.player is not None and (record.player - 1) != self.id:
raise RuntimeError('A boss can only give rewards in its own world')
valid_items = get_valid_items_from_record(prizepool, used_items, record)
if valid_items: # Choices still available in the item pool, choose one, mark it as a used item
record.item = random_choices(valid_items)[0]
if used_items is not None:
used_items.append(record.item)
reward = pull_item_or_location([prizepool], world, record.item)
if reward is None:
if record.item not in item_groups['DungeonReward']:
raise RuntimeError('Cannot place non-dungeon reward %s in world %d on location %s.' % (record.item, self.id + 1, name))
if IsItem(record.item):
raise RuntimeError('Reward already placed in world %d: %s' % (world.id + 1, record.item))
else:
raise RuntimeError('Reward unknown in world %d: %s' % (world.id + 1, record.item))
count += 1
world.push_item(boss, reward, True)
return count
def fill(self, window, worlds, location_pools, item_pools):
"""Fills the world with restrictions defined in a plandomizer JSON distribution file.
:param window:
:param worlds: A list of the world objects that define the rules of each game world.
:param location_pools: A list containing all of the location pools.
0: Shop Locations
1: Song Locations
2: Fill locations
:param item_pools: A list containing all of the item pools.
0: Shop Items
1: Dungeon Items
2: Songs
3: Progression Items
4: Priority Items
5: The rest of the Item pool
"""
world = worlds[self.id]
locations = {}
if self.locations:
locations = {loc: self.locations[loc] for loc in random.sample(sorted(self.locations), len(self.locations))}
used_items = []
for (location_name, record) in pattern_dict_items(locations):
if record.item is None:
continue
valid_items = get_valid_items_from_record(world.itempool, used_items, record)
if not valid_items:
# Item pool values exceeded. Remove limited items from the list and choose a random value from it
limited_items = ['Weird Egg', '#AdultTrade', '#Bottle']
if isinstance(record.item, list):
allowed_choices = []
for item in record.item:
if item in limited_items or item in item_groups['AdultTrade'] or item in item_groups['Bottle']:
continue
allowed_choices.append(item)
record.item = random_choices(allowed_choices)[0]
else: # Choices still available in item pool, choose one, mark it as a used item
record.item = random_choices(valid_items)[0]
if used_items is not None:
used_items.append(record.item)
player_id = self.id if record.player is None else record.player - 1
location_matcher = lambda loc: loc.world.id == world.id and loc.name.lower() == location_name.lower()
location = pull_first_element(location_pools, location_matcher)
if location is None:
try:
location = LocationFactory(location_name)
except KeyError:
raise RuntimeError('Unknown location in world %d: %s' % (world.id + 1, location_name))
if location.type == 'Boss':
continue
elif location.name in world.disabled_locations:
continue
else:
raise RuntimeError('Location already filled in world %d: %s' % (self.id + 1, location_name))
if record.item in item_groups['DungeonReward']:
raise RuntimeError('Cannot place dungeon reward %s in world %d in location %s.' % (record.item, self.id + 1, location_name))
if record.item == '#Junk' and location.type == 'Song' and world.shuffle_song_items == 'song':
record.item = '#JunkSong'
ignore_pools = None
is_invert = pattern_matcher(record.item)('!')
if is_invert and location.type != 'Song' and world.shuffle_song_items == 'song':
ignore_pools = [2]
if is_invert and location.type == 'Song' and world.shuffle_song_items == 'song':
ignore_pools = [i for i in range(len(item_pools)) if i != 2]
if location.type == 'Shop':
ignore_pools = [i for i in range(len(item_pools)) if i != 0]
item = self.get_item(ignore_pools, item_pools, location, player_id, record, worlds)
if record.price is not None and item.type != 'Shop':
location.price = record.price
world.shop_prices[location.name] = record.price
if location.type == 'Song' and item.type != 'Song':
self.song_as_items = True
location.world.push_item(location, item, True)
if item.advancement:
search = Search.max_explore([world.state for world in worlds], itertools.chain.from_iterable(item_pools))
if not search.can_beat_game(False):
raise FillError('%s in world %d is not reachable without %s in world %d!' % (location.name, self.id + 1, item.name, player_id + 1))
window.fillcount += 1
window.update_progress(5 + ((window.fillcount / window.locationcount) * 30))
def get_item(self, ignore_pools, item_pools, location, player_id, record, worlds):
"""Get or create the item specified by the record and replace something in the item pool with it
:param ignore_pools: Pools to not replace items in
:param item_pools: A list containing all of the item pools.
:param location: Location record currently being assigned an item
:param player_id: Integer representing the current player's ID number
:param record: Item record from the distribution file to assign to a location
:param worlds: A list of the world objects that define the rules of each game world.
:return: item
"""
world = worlds[player_id]
if ignore_pools:
pool = [pool for i, pool in enumerate(item_pools) if i not in ignore_pools]
else:
pool = item_pools
try:
if record.item == "#Bottle":
try:
item = self.pool_replace_item(pool, "#Bottle", player_id, record.item, worlds)
# Update item_pool
if item.name not in self.item_pool:
self.item_pool[item.name] = ItemPoolRecord()
else:
self.item_pool[item.name].count += 1
except KeyError:
raise RuntimeError(
'Too many bottles were added to world %d, and not enough bottles are available in the item pool to be removed.' % (
self.id + 1))
elif record.item == "#AdultTrade":
try:
item = self.pool_replace_item(pool, "#AdultTrade", player_id, record.item, worlds)
# Update item_pool
if item.name not in self.item_pool:
self.item_pool[item.name] = ItemPoolRecord()
else:
self.item_pool[item.name].count += 1
except KeyError:
raise RuntimeError(
'Too many adult trade items were added to world %d, and not enough adult trade items are available in the item pool to be removed.' % (
self.id + 1))
else:
item = self.pool_remove_item(pool, record.item, 1, world_id=player_id)[0]
except KeyError:
if location.type == 'Shop' and "Buy" in record.item:
try:
self.pool_remove_item(pool, "Buy *", 1, world_id=player_id)
item = ItemFactory([record.item], world=world)[0]
except KeyError:
raise RuntimeError(
'Too many shop buy items were added to world %d, and not enough shop buy items are available in the item pool to be removed.' % (
self.id + 1))
elif record.item in item_groups['Bottle']:
try:
item = self.pool_replace_item(pool, "#Bottle", player_id, record.item, worlds)
| |
check(v, 'userList', list)
for i in v['userList']:
check(i, 'firstname', str)
check(i, 'id', str)
check(i, 'lastname', str)
check(i, 'status', str)
check(i, 'username', str)
@pytest.mark.vcr()
def test_analysis_vulns_trend_tool(sc):
vulns = sc.analysis.vulns(tool='trend', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
@pytest.mark.vcr()
def test_analysis_vulns_vulndetails_tool(sc):
vulns = sc.analysis.vulns(tool='vulndetails', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'acceptRisk', str)
check(v, 'baseScore', str)
check(v, 'bid', str)
check(v, 'checkType', str)
check(v, 'cpe', str)
check(v, 'cve', str)
check(v, 'cvssV3BaseScore', str)
check(v, 'cvssV3TemporalScore', str)
check(v, 'cvssV3Vector', str)
check(v, 'cvssVector', str)
check(v, 'description', str)
check(v, 'dnsName', str)
check(v, 'exploitAvailable', str)
check(v, 'exploitEase', str)
check(v, 'exploitFrameworks', str)
check(v, 'family', dict)
check(v['family'], 'type', str)
check(v['family'], 'id', str)
check(v['family'], 'name', str)
check(v, 'firstSeen', str)
check(v, 'hasBeenMitigated', str)
check(v, 'ip', str)
check(v, 'lastSeen', str)
check(v, 'macAddress', str)
check(v, 'netbiosName', str)
check(v, 'patchPubDate', str)
check(v, 'pluginID', str)
check(v, 'pluginInfo', str)
check(v, 'pluginModDate', str)
check(v, 'pluginName', str)
check(v, 'pluginPubDate', str)
check(v, 'pluginText', str)
check(v, 'port', str)
check(v, 'protocol', str)
check(v, 'recastRisk', str)
check(v, 'repository', dict)
check(v['repository'], 'dataFormat', str)
check(v['repository'], 'description', str)
check(v['repository'], 'id', str)
check(v['repository'], 'name', str)
check(v, 'riskFactor', str)
check(v, 'seeAlso', str)
check(v, 'severity', dict)
check(v['severity'], 'description', str)
check(v['severity'], 'id', str)
check(v['severity'], 'name', str)
check(v, 'solution', str)
check(v, 'stigSeverity', str)
check(v, 'synopsis', str)
check(v, 'temporalScore', str)
check(v, 'uniqueness', str)
check(v, 'uuid', str)
check(v, 'version', str)
check(v, 'vulnPubDate', str)
check(v, 'xref', str)
@pytest.mark.vcr()
def test_analysis_vulns_vulnipdetail_tool(sc):
vulns = sc.analysis.vulns(tool='vulnipdetail', pages=2, limit=5)
for v in vulns:
check(v, 'family', dict)
check(v['family'], 'type', str)
check(v['family'], 'id', str)
check(v['family'], 'name', str)
check(v, 'hosts', list)
i = v['hosts'][0]
check(i, 'iplist', list)
check(i, 'repository', dict)
check(i['repository'], 'dataFormat', str)
check(i['repository'], 'description', str)
check(i['repository'], 'id', str)
check(i['repository'], 'name', str)
for j in i['iplist']:
check(j, 'ip', str)
check(j, 'netbiosName', str)
check(j, 'dnsName', str)
check(j, 'uuid', str)
check(j, 'macAddress', str)
check(v, 'name', str)
check(v, 'pluginDescription', str)
check(v, 'pluginID', str)
check(v, 'severity', dict)
check(v['severity'], 'description', str)
check(v['severity'], 'id', str)
check(v['severity'], 'name', str)
check(v, 'total', str)
@pytest.mark.vcr()
def test_analysis_vulns_vulnipsummary_tool(sc):
vulns = sc.analysis.vulns(tool='vulnipsummary', pages=2, limit=5)
for v in vulns:
check(v, 'family', dict)
check(v['family'], 'type', str)
check(v['family'], 'id', str)
check(v['family'], 'name', str)
check(v, 'hosts', list)
i = v['hosts'][0]
check(i, 'iplist', str)
check(i, 'repository', dict)
check(i['repository'], 'dataFormat', str)
check(i['repository'], 'description', str)
check(i['repository'], 'id', str)
check(i['repository'], 'name', str)
check(v, 'name', str)
check(v, 'pluginDescription', str)
check(v, 'pluginID', str)
check(v, 'severity', dict)
check(v['severity'], 'description', str)
check(v['severity'], 'id', str)
check(v['severity'], 'name', str)
check(v, 'total', str)
@pytest.mark.vcr()
def test_analysis_console_logs(sc):
logs = sc.analysis.console(pages=2, limit=5)
for i in logs:
assert isinstance(i, dict)
check(i, 'initiator', dict)
check(i['initiator'], 'username', str)
check(i['initiator'], 'firstname', str)
check(i['initiator'], 'lastname', str)
try:
check(i['initiator'], 'id', int)
except AssertionError:
check(i['initiator'], 'id', str)
check(i, 'severity', dict)
check(i['severity'], 'description', str)
check(i['severity'], 'id', str)
check(i['severity'], 'name', str)
check(i, 'rawLog', str)
check(i, 'module', str)
check(i, 'date', 'datetime')
check(i, 'organization', dict)
check(i['organization'], 'description', str)
check(i['organization'], 'id', str)
check(i['organization'], 'name', str)
check(i, 'message', str)
@pytest.mark.vcr()
def test_analysis_mobile_listvuln(sc):
vulns = sc.analysis.mobile(tool='listvuln', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'identifier', str)
check(v, 'pluginID', str)
check(v, 'pluginName', str)
check(v, 'repository', dict)
check(v['repository'], 'description', str)
check(v['repository'], 'id', str)
check(v['repository'], 'name', str)
check(v, 'severity', dict)
check(v['severity'], 'description', str)
check(v['severity'], 'id', str)
check(v['severity'], 'name', str)
@pytest.mark.vcr()
def test_analysis_mobile_sumdeviceid(sc):
vulns = sc.analysis.mobile(tool='sumdeviceid', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'identifier', str)
check(v, 'model', str)
check(v, 'repository', dict)
check(v['repository'], 'description', str)
check(v['repository'], 'id', str)
check(v['repository'], 'name', str)
check(v, 'score', str)
check(v, 'serial', str)
check(v, 'severityCritical', str)
check(v, 'severityHigh', str)
check(v, 'severityInfo', str)
check(v, 'severityLow', str)
check(v, 'severityMedium', str)
check(v, 'total', str)
@pytest.mark.vcr()
def test_analysis_mobile_summdmuser(sc):
vulns = sc.analysis.mobile(tool='summdmuser', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'score', str)
check(v, 'severityCritical', str)
check(v, 'severityHigh', str)
check(v, 'severityInfo', str)
check(v, 'severityLow', str)
check(v, 'severityMedium', str)
check(v, 'total', str)
check(v, 'user', str)
@pytest.mark.vcr()
def test_analysis_mobile_summodel(sc):
vulns = sc.analysis.mobile(tool='summodel', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'deviceCount', str)
check(v, 'model', str)
check(v, 'score', str)
check(v, 'severityCritical', str)
check(v, 'severityHigh', str)
check(v, 'severityInfo', str)
check(v, 'severityLow', str)
check(v, 'severityMedium', str)
check(v, 'total', str)
@pytest.mark.vcr()
def test_analysis_mobile_sumoscpe(sc):
vulns = sc.analysis.mobile(tool='sumoscpe', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'deviceCount', str)
check(v, 'osCPE', str)
check(v, 'score', str)
check(v, 'severityCritical', str)
check(v, 'severityHigh', str)
check(v, 'severityInfo', str)
check(v, 'severityLow', str)
check(v, 'severityMedium', str)
check(v, 'total', str)
@pytest.mark.vcr()
def test_analysis_mobile_sumpluginid(sc):
vulns = sc.analysis.mobile(tool='sumpluginid', pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'name', str)
check(v, 'pluginID', str)
check(v, 'severity', dict)
check(v['severity'], 'description', str)
check(v['severity'], 'id', str)
check(v['severity'], 'name', str)
check(v, 'total', str)
@pytest.mark.vcr()
def test_analysis_mobile_vulndetails(sc):
vulns = sc.analysis.mobile(pages=2, limit=5)
for v in vulns:
assert isinstance(v, dict)
check(v, 'baseScore', str)
check(v, 'bid', str)
check(v, 'checkType', str)
check(v, 'cpe', str)
check(v, 'cve', str)
check(v, 'cvssVector', str)
check(v, 'description', str)
check(v, 'deviceVersion', str)
check(v, 'exploitAvailable', str)
check(v, 'exploitEase', str)
check(v, 'exploitFrameworks', str)
check(v, 'identifier', str)
check(v, 'lastSeen', str)
check(v, 'mdmType', str)
check(v, 'model', str)
check(v, 'osCPE', str)
check(v, 'patchPubDate', str)
check(v, 'pluginID', str)
check(v, 'pluginInfo', str)
check(v, 'pluginModDate', str)
check(v, 'pluginName', str)
check(v, 'pluginOutput', str)
check(v, 'pluginPubDate', str)
check(v, 'port', str)
check(v, 'protocol', str)
check(v, 'repository', dict)
check(v['repository'], 'description', str)
check(v['repository'], 'id', str)
check(v['repository'], 'name', str)
check(v, 'riskFactor', str)
check(v, 'seeAlso', str)
check(v, 'serial', str)
check(v, 'severity', dict)
check(v['severity'], 'description', str)
check(v['severity'], 'id', str)
check(v['severity'], 'name', str)
check(v, 'solution', str)
check(v, 'stigSeverity', str)
check(v, 'synopsis', str)
check(v, 'temporalScore', str)
check(v, 'user', str)
check(v, 'version', str)
check(v, 'vulnPubDate', str)
check(v, 'xref', str)
@pytest.mark.vcr()
def test_analysis_events_listdata(sc):
events = sc.analysis.events(tool='listdata', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'destination ip', str)
check(e, 'destination port', str)
check(e, 'event', str)
check(e, 'number of vulns', str)
check(e, 'protocol', str)
check(e, 'sensor', str)
check(e, 'source ip', str)
check(e, 'time', str)
check(e, 'type', str)
check(e, 'va/ids', str)
@pytest.mark.vcr()
def test_analysis_events_sumasset(sc):
events = sc.analysis.events(tool='sumasset', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'asset', dict)
check(e['asset'], 'description', str)
check(e['asset'], 'id', str)
check(e['asset'], 'name', str)
check(e['asset'], 'status', str)
check(e['asset'], 'type', str)
try:
check(e, 'count', str)
except AssertionError:
check(e, 'count', int)
@pytest.mark.vcr()
def test_analysis_events_sumclassa(sc):
events = sc.analysis.events(tool='sumclassa', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'class-a', str)
check(e, 'count', str)
@pytest.mark.vcr()
def test_analysis_events_sumclassb(sc):
events = sc.analysis.events(tool='sumclassb', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'class-b', str)
check(e, 'count', str)
@pytest.mark.vcr()
def test_analysis_events_sumclassc(sc):
events = sc.analysis.events(tool='sumclassc', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'class-c', str)
check(e, 'count', str)
@pytest.mark.vcr()
def test_analysis_events_sumconns(sc):
events = sc.analysis.events(tool='sumconns', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'count', str)
check(e, 'destination ip', str)
check(e, 'source ip', str)
@pytest.mark.vcr()
def test_analysis_events_sumdate(sc):
events = sc.analysis.events(tool='sumdate', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, '24-hour plot', str)
check(e, 'count', str)
check(e, 'date', str)
check(e, 'time block start', str)
check(e, 'time block stop', str)
@pytest.mark.vcr()
def test_analysis_events_sumdstip(sc):
events = sc.analysis.events(tool='sumdstip', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'address', str)
check(e, 'count', str)
check(e, 'lce', dict)
check(e['lce'], 'description', str)
check(e['lce'], 'id', str)
check(e['lce'], 'name', str)
check(e['lce'], 'status', str)
@pytest.mark.vcr()
def test_analysis_events_sumevent(sc):
events = sc.analysis.events(tool='sumevent', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, '24-hour plot', str)
check(e, 'count', str)
check(e, 'description', str)
check(e, 'event', str)
check(e, 'file', str)
@pytest.mark.vcr()
def test_analysis_events_sumevent2(sc):
events = sc.analysis.events(tool='sumevent2', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, '24-hour plot', str)
check(e, 'count', str)
check(e, 'description', str)
check(e, 'event', str)
check(e, 'file', str)
@pytest.mark.vcr()
def test_analysis_events_sumip(sc):
events = sc.analysis.events(tool='sumip', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'address', str)
check(e, 'count', str)
check(e, 'lce', dict)
check(e['lce'], 'description', str)
check(e['lce'], 'id', str)
check(e['lce'], 'name', str)
check(e['lce'], 'status', str)
@pytest.mark.vcr()
def test_analysis_events_sumport(sc):
events = sc.analysis.events(tool='sumport', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'count', str)
check(e, 'port', str)
@pytest.mark.vcr()
def test_analysis_events_sumprotocol(sc):
events = sc.analysis.events(tool='sumprotocol', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'count', str)
check(e, 'protocol', str)
@pytest.mark.vcr()
def test_analysis_events_sumsrcip(sc):
events = sc.analysis.events(tool='sumsrcip', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'address', str)
check(e, 'count', str)
check(e, 'lce', dict)
check(e['lce'], 'description', str)
check(e['lce'], 'id', str)
check(e['lce'], 'name', str)
check(e['lce'], 'status', str)
@pytest.mark.vcr()
def test_analysis_events_sumtime(sc):
events = sc.analysis.events(tool='sumtime', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, 'count', str)
check(e, 'time block start', str)
check(e, 'time block stop', str)
@pytest.mark.vcr()
def test_analysis_events_sumtype(sc):
events = sc.analysis.events(tool='sumtype', pages=2, limit=5)
for e in events:
assert isinstance(e, dict)
check(e, '24-hour plot', str)
| |
<filename>src/python/turicreate/toolkits/regression/decision_tree_regression.py
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
This package contains the decision tree model class and the create function.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate as _turicreate
from turicreate.toolkits._supervised_learning import SupervisedLearningModel as _SupervisedLearningModel
import turicreate.toolkits._supervised_learning as _sl
import turicreate.toolkits._main as _toolkits_main
from turicreate.toolkits._internal_utils import _toolkit_repr_print
from turicreate.toolkits._internal_utils import _raise_error_evaluation_metric_is_valid
from turicreate.toolkits._internal_utils import _raise_error_if_column_exists
from turicreate.toolkits._tree_model_mixin import TreeModelMixin as _TreeModelMixin
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
from turicreate.toolkits._internal_utils import _map_unity_proxy_to_object
_DECISION_TREE_MODEL_PARAMS_KEYS = ['max_depth', 'min_child_weight',
'min_loss_reduction']
_DECISION_TREE_TRAINING_PARAMS_KEYS = ['objective', 'training_time',
'training_error', 'validation_error', 'evaluation_metric']
_DECISION_TREE_TRAINING_DATA_PARAMS_KEYS = ['target', 'features',
'num_features', 'num_examples', 'num_validation_examples']
class DecisionTreeRegression(_SupervisedLearningModel, _TreeModelMixin):
"""
The prediction is based on a collection of base learners, `regression trees
<http://en.wikipedia.org/wiki/Decision_tree_learning>`_. This algorithm is
a special case for boosted trees regression with number of trees set to 1.
Different from linear models, e.g. linear regression, the gradient boost
trees model is able to model non-linear interactions between the features
and the target using decision trees as the subroutine. It is good for
handling numerical features and categorical features with tens of
categories but is less suitable for highly sparse features such as text
data.
This model cannot be constructed directly. Instead, use
:func:`turicreate.decision_tree_regression.create` to create an instance of
this model. A detailed list of parameter options and code samples are
available in the documentation for the create function.
See Also
--------
create
"""
def __init__(self, proxy):
"""__init__(self)"""
self.__proxy__ = proxy
self.__name__ = self.__class__._native_name()
@classmethod
def _native_name(cls):
return "decision_tree_regression"
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the model.
"""
return self.__repr__()
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
data_fields = [
('Number of examples', 'num_examples'),
('Number of feature columns', 'num_features'),
('Number of unpacked features', 'num_unpacked_features')]
training_fields = [
("Max tree depth", 'max_depth'),
("Train RMSE", 'training_rmse'),
("Validation RMSE", 'validation_rmse'),
("Training time (sec)", 'training_time')]
return ( [data_fields, training_fields], ['Schema', 'Settings'])
def __repr__(self):
"""
Print a string description of the model, when the model name is entered
in the terminal.
"""
(sections, section_titles) = self._get_summary_struct()
return _toolkit_repr_print(self, sections, section_titles, width=30)
def _get(self, field):
"""
Get the value of a given field. The list of all queryable fields is
detailed below, and can be obtained programmatically using the
:func:`~turicreate.decision_tree_regression._list_fields` method.
+-------------------------+--------------------------------------------------------------------------------+
| Field | Description |
+=========================+================================================================================+
| column_subsample | Percentage of the columns for training each individual tree |
+-------------------------+--------------------------------------------------------------------------------+
| features | Names of the feature columns |
+-------------------------+--------------------------------------------------------------------------------+
| max_depth | The maximum depth of individual trees |
+-------------------------+--------------------------------------------------------------------------------+
| min_child_weight | Minimum weight required on the leave nodes |
+-------------------------+--------------------------------------------------------------------------------+
| min_loss_reduction | Minimum loss reduction required for splitting a node |
+-------------------------+--------------------------------------------------------------------------------+
| num_features | Number of features in the model |
+-------------------------+--------------------------------------------------------------------------------+
| num_unpacked_features | Number of features in the model (including unpacked dict/list type columns) |
+-------------------------+--------------------------------------------------------------------------------+
| num_examples | Number of training examples |
+-------------------------+--------------------------------------------------------------------------------+
| num_validation_examples | Number of validation examples |
+-------------------------+--------------------------------------------------------------------------------+
| target | Name of the target column |
+-------------------------+--------------------------------------------------------------------------------+
| training_error | Error on training data |
+-------------------------+--------------------------------------------------------------------------------+
| training_time | Time spent on training the model in seconds |
+-------------------------+--------------------------------------------------------------------------------+
| trees_json | Tree encoded using JSON |
+-------------------------+--------------------------------------------------------------------------------+
| validation_error | Error on validation data |
+-------------------------+--------------------------------------------------------------------------------+
| unpacked_features | Feature names (including expanded list/dict features) |
+-------------------------+--------------------------------------------------------------------------------+
| random_seed | Seed for row and column subselection |
+-------------------------+--------------------------------------------------------------------------------+
| metric | Performance metric(s) that are tracked during training |
+-------------------------+--------------------------------------------------------------------------------+
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out : [various]
The current value of the requested field.
Examples
--------
>>> m.get('training_error')
"""
return super(DecisionTreeRegression, self)._get(field)
def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model on the given dataset.
Parameters
----------
dataset : SFrame
Dataset in the same format used for training. The columns names and
types of the dataset must be the same as that used in training.
metric : str, optional
Name of the evaluation metric. Can be one of:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
A dictionary containing the evaluation result.
See Also
----------
create, predict
Examples
--------
..sourcecode:: python
>>> results = model.evaluate(test_data, 'rmse')
"""
_raise_error_evaluation_metric_is_valid(
metric, ['auto', 'rmse', 'max_error'])
return super(DecisionTreeRegression, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric)
def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
"""
from turicreate.toolkits import _coreml_utils
display_name = "decision tree regression"
short_description = _coreml_utils._mlmodel_short_description(display_name)
context = {"mode" : "regression",
"model_type" : "decision_tree",
"version": _turicreate.__version__,
"class": self.__class__.__name__,
"short_description": short_description,
'user_defined':{
'turicreate_version': _turicreate.__version__
}
}
self._export_coreml_impl(filename, context)
def predict(self, dataset, missing_value_action='auto'):
"""
Predict the target column of the given dataset.
The target column is provided during
:func:`~turicreate.decision_tree_regression.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, predict
Examples
--------
>>> m.predict(testdata)
"""
return super(DecisionTreeRegression, self).predict(dataset, output_type='margin',
missing_value_action=missing_value_action)
def create(dataset, target,
features=None,
validation_set='auto',
max_depth=6,
min_loss_reduction=0.0, min_child_weight=0.1,
verbose=True,
random_seed = None,
metric = 'auto',
**kwargs):
"""
Create a :class:`~turicreate.decision_tree_regression.DecisionTreeRegression` to predict
a scalar target variable using one or more features. In addition to standard
numeric and categorical types, features can also be extracted automatically
from list- or dictionary-type SFrame columns.
Parameters
----------
dataset : SFrame
A training dataset containing feature columns and a target column.
Only numerical typed (int, float) target column is allowed.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type.
features : list[str], optional
A list of columns names of features used for training the model.
Defaults to None, using all columns.
validation_set : SFrame, optional
The validation set that is used to watch the validation result as
boosting progress.
max_depth : float, optional
Maximum depth of a tree. Must be at least 1.
min_loss_reduction : float, optional (non-negative)
Minimum loss reduction required to make a further partition/split a
node during the tree learning phase. Larger (more positive) values
can help prevent overfitting by avoiding splits that do not
sufficiently reduce the | |
to return all possible values
#and overload browser with too much data. This is a version of 'pagination.'
return jsonify(dict(results=record_list[arrayLowerBound:arrayUpperBound], num_entries=(len(record_list)/int(records_per_page))))
else:
return Response('Bad or missing session id.', status=401)
except:
return Response('Bad request for records', 400)
@api.route('/api/metadata/doi/<string:page_number>/<string:records_per_page>/<string:sort_on>/<string:doi_ark_value>', methods=['POST'])
@cross_origin(origin='*', methods=['POST'],
headers=['X-Requested-With', 'Content-Type', 'Origin'])
def set_doi_ark(page_number, records_per_page, sort_on, doi_ark_value):
"""
Retrieve all metadata records for admin view. Retrieval is done
via POST because we must pass a session id so that the user is
authenticated.
Access control is done here. A user can modify only their own records
because their session_id sent with the request.
"""
username = _authenticate_admin_from_session(request)
#pageNumber is 0 based index. Need first page to start at 0 for math for setting arrayLowerBound and arrayUpperBound.
try:
if username:
if request.method == 'POST':
#need to do input sanitization on all these values! Separating variables so outside does not have direct access to
#database query.
sort_by = validate_admin_sort_by(sort_on)
record_list = Metadata.objects(__raw__={'published':'pending'}).order_by(sort_by)
arrayLowerBound = int(page_number) * int(records_per_page)
arrayUpperBound = int(page_number) * int(records_per_page) + int(records_per_page)
#Only return array elements between indicies. Don't want to return all possible values
#and overload browser with too much data. This is a version of 'pagination.'
return jsonify(dict(results=record_list[arrayLowerBound:arrayUpperBound], num_entries=(len(record_list)/int(records_per_page))))
else:
return Response('Bad or missing session id.', status=401)
except:
return Response('Bad request for records', 400)
@api.route('/api/metadata/admin/search/<string:search_term>/<string:page_number>/<string:records_per_page>/<string:sort_by>', methods=['POST'])
@cross_origin(origin='*', methods=['POST'],
headers=['X-Requested-With', 'Content-Type', 'Origin'])
def search_metadata(search_term, page_number, records_per_page, sort_on):
"""
Retrieve all metadata records for admin view. Retrieval is done
via POST because we must pass a session id so that the user is
authenticated.
Access control is done here. A user can modify only their own records
because their session_id sent with the request.
"""
username = _authenticate_admin_from_session(request)
#pageNumber is 0 based index. Need first page to start at 0 for math for setting arrayLowerBound and arrayUpperBound.
try:
if username:
record_state = request.json['record_state']
#sanitize record_state
record_publish_states = ['false', 'pending', 'true']
if request.method == 'POST':
#Input sanitization on record_state
if record_state not in record_publish_states:
return Response("Error: record_state value not one of the allowed states.", 400)
#Sanitizing input for sort type
sort_by = validate_admin_sort_by(sort_on)
#This query returns a list of records that have been published and either the title, summary, or one of the authors have the
#search term in it.
record_list = Metadata.objects(__raw__={'$and':[{'published':record_state}, {'$or':[{'title':{'$regex':".*" + search_term + ".*", '$options': '-i'}}, {'summary':{'$regex':".*" + search_term + ".*", '$options': '-i'}}, {'citation': {'$elemMatch':{'name':{'$regex':".*" + search_term + ".*", '$options': '-i'}}}}]}]}).order_by(sort_by)
arrayLowerBound = int(page_number) * int(records_per_page)
arrayUpperBound = int(page_number) * int(records_per_page) + int(records_per_page)
#Only return array elements between indicies. Don't want to return all possible values
#and overload browser with too much data. This is a version of 'pagination.'
return jsonify(dict(results=record_list[arrayLowerBound:arrayUpperBound], num_entries=(len(record_list)/int(records_per_page))))
else:
return Response('Bad or missing session id.', status=401)
except:
return Response('Bad request for records', 400)
@api.route('/api/metadata/<string:_oid>/delete', methods=['POST'])
@cross_origin(origin='*', methods=['POST'],
headers=['X-Requested-With', 'Content-Type', 'Origin'])
def delete_metadata_record(_oid):
username = _authenticate_user_from_session(request)
admin_username = _authenticate_user_from_session(request)
if username or admin_username:
md = Metadata.objects.get_or_404(pk=_oid)
if md.published == "false" or md.published == "pending":
#Delete from MongoDB
md.delete()
#Only delete files on file system if the record has been submitted for publication. Otherwise, files will not exist.
if md.published == "pending":
#Delete uploaded files from file system
preprod_dir = app.config['PREPROD_DIRECTORY']
preprod_path = os.path.join(preprod_dir, _oid)
try:
shutil.rmtree(preprod_path)
except ValueError:
pass
else:
return jsonify({"message":"File has already been published. Cannot delete!"})
return jsonify({"message":"File deleted!"})
else:
return Response('Bad or missing session id.', status=401)
@api.route('/api/metadata/<string:_oid>/admin-publish', methods=['POST'])
@cross_origin(origin='*', methods=['POST'],
headers=['X-Requested-With', 'Content-Type', 'Origin'])
def admin_publish_metadata_record(_oid):
username = _authenticate_admin_from_session(request)
if username:
#Buffer size we will break file into for hashing files. Need this for large files!
BUF_SIZE = 65536 # lets read stuff in 64kb chunks!
#Config file is 'getUsername.conf' located in the current directory
config_file = os.path.dirname(__file__) + '/checksum.conf'
elasticsearch_record = request.json['elasticsearch_record']
str_id = elasticsearch_record["uid"]
schema_type = request.json['schema_type']
#Move file from pre-prod directory to production directory
preprod_dir = app.config['PREPROD_DIRECTORY']
prod_dir = app.config['PROD_DIRECTORY']
if os.path.exists(os.path.dirname(preprod_dir)):
preprod_path = os.path.join(preprod_dir, str_id)
prod_path = os.path.join(prod_dir, str_id)
#Make the directory in the production directory
#Move the XML file from the preprod directory to the prod directory
try:
os.rename(preprod_path, prod_path)
except OSError:
return "Moving file on backend filesystem error"
#set permissions on the new directory in prod: record's directory: read and execute; directories inside record's directory: read only; and all files read only
try:
for root, dirs, files in os.walk(prod_path):
for f in files:
os.chmod(os.path.join(prod_path, f), stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
os.chmod(prod_path, stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
except OSError:
return "chmod error on record's directory."
else:
return "Error: path of record's file does not exist."
try:
res = es.index(index='test_metadata', doc_type='metadata', body=elasticsearch_record)
except:
#Move file back to preprod directory since complete publishing failed
os.rename(prod_path, preprod_path)
#Need to reset permissions on file too... TODO!!
return "Elasticsearch posting error"
#Create checksum for record's directory
md5 = hashlib.md5()
for root, dirs, files in os.walk(prod_path):
for file in files:
with open(os.path.join(prod_path, file), 'rb') as f:
#while (data = f.read(BUF_SIZE)) is not None:
for data in iter(lambda: f.read(BUF_SIZE), b''):
md5.update(data)
checksum = md5.hexdigest()
#Connect to checksum database and insert checksum
config = get_config(config_file)
conn_param = dict(config.items('checksum'))
time = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
#Take /datastore-pre or /datastore-prod out of path to allow for different mount points in path
path_without_mount_dir = re.sub(r'^\/datastore-[a-zA-Z]*\/{1}', "", prod_path)
#Set up and execute the query
query = "INSERT INTO " + conn_param['database'] + "." + conn_param['table'] + " ([path], [md5], [isMetadata], [isCanonicalMetadata], [metadataStandard], [created], [published]) VALUES ('" + path_without_mount_dir + "', '" + checksum + "', 'true', 'true', '" + schema_type + "', '" + time + "', '" + time + "');"
try:
with pymssql.connect(host=conn_param['host'], user=conn_param['user'], password=conn_param['password'], database=conn_param['database']) as conn:
try:
with conn.cursor() as cursor:
cursor.execute(query)
conn.commit()
except:
#Should move file back to preprod directory in case of failure too
return Response('Error: insertion in to checksum database error', status=500)
except:
#Should move file back to preprod directory in case of failure too
return Response('Error: connection to checksum database error', status=500)
return jsonify(res)
else:
return Response('Bad or missing session id.', status=401)
@api.route('/api/metadata/<string:_oid>/publish', methods=['POST'])
@cross_origin(origin='*', methods=['POST'],
headers=['X-Requested-With', 'Content-Type', 'Origin'])
def publish_metadata_record(_oid):
# update or create record in databas
username = _authenticate_user_from_session(request)
session_id = request.json['session_id']
if username:
try:
record = Metadata.objects.get_or_404(pk=_oid)
updater = Metadata.from_json(json.dumps(request.json['record']))
for f in record._fields:
if f != 'id':
record[f] = updater[f]
except ValidationError:
record = Metadata.from_json(json.dumps(request.json['record']))
record.id = None
record.placeholder = False
record.md_pub_date = datetime.utcnow()
record.save()
if record.schema_type == 'Dataset (ISO)':
# generate iso string
str_id = str(record.id)
iso = get_single_iso_metadata(str_id).data
# print app.config
save_dir = app.config['PREPROD_DIRECTORY']
save_path = os.path.join(save_dir,
str_id,
'metadata.xml')
if not os.path.exists(os.path.dirname(save_path)):
os.mkdir(os.path.dirname(save_path))
with open(save_path, 'w+') as f:
f.write(iso)
if app.config['PRODUCTION']:
nkn_upload_url = app.config['SIMPLE_UPLOAD_URL']
rep = requests.post(nkn_upload_url,{'uuid': str_id, 'session_id': session_id}, files={'uploadedfile': open(save_path, 'rb')})
#Send email about new dataset
email_publishing_group(record.title, record.username, str(record.id))
return jsonify(record=record)
else:
str_id = str(record.id)
dc = get_single_dc_metadata(str_id).data
# print app.config
save_dir = app.config['PREPROD_DIRECTORY']
save_path = os.path.join(save_dir, str_id, 'metadata.xml')
if not os.path.exists(os.path.dirname(save_path)):
os.mkdir(os.path.dirname(save_path))
with open(save_path, 'w+') as f:
f.write(dc)
f.close()
if app.config['PRODUCTION']:
nkn_upload_url = app.config['SIMPLE_UPLOAD_URL']
rep = requests.post(nkn_upload_url,
{'uuid': str_id,
'session_id': session_id},
files={'uploadedfile': open(save_path, 'rb')})
return jsonify(record=record)
else:
return Response('Bad or missing session id.', status=401)
@api.route('/api/metadata/<string:_oid>/iso')
@cross_origin(origin="*", methods=['GET'])
def get_single_iso_metadata(_oid):
"""
Produce the ISO 19115 representation of the metadata by
using an XSLT transform operated on the generic xml found at /xml
"""
xml_str = get_single_xml_metadata(_oid).data
md_xml = ET.fromstring(xml_str)
iso_xslt = ET.parse(os.path.join(os.path.dirname(__file__), '..', '..',
'xslt', 'XSLT_for_mdedit.xsl'))
iso_transform = ET.XSLT(iso_xslt)
iso_str = str(iso_transform(md_xml))
return Response(iso_str, 200, mimetype='application/xml')
@api.route('/api/metadata/<string:_oid>/dc')
@cross_origin(origin="*", methods=['GET'])
def get_single_dc_metadata(_oid):
"""
Produce the Dublin Core representation of the metadata by
using an XSLT transform operated on the generic xml found at /xml
"""
xml_str = get_single_xml_metadata(_oid).data
md_xml = ET.fromstring(xml_str)
dc_xslt = ET.parse(os.path.join(os.path.dirname(__file__), '..', '..',
'xslt', 'XSLT_for_mdedit_dublineCore.xsl'))
dc_transform = ET.XSLT(dc_xslt)
dc_str = str(dc_transform(md_xml))
return Response(dc_str, 200, mimetype='application/xml')
@api.route('/api/metadata/<string:_oid>/esri')
@cross_origin(origin="*", methods=['GET'])
def get_single_esri_metadata(_oid):
"""
Produce the ESRI combined with ISO representation of the metadata by
using an XSLT transform operated on the generic xml found at /xml
"""
xml_str = get_single_xml_metadata(_oid).data
md_xml = ET.fromstring(xml_str)
esri_xslt = ET.parse(os.path.join(os.path.dirname(__file__), '..', '..',
'xslt', 'XSLT_for_mdedit_ESRI.xsl'))
esri_transform = ET.XSLT(esri_xslt)
esri_str = str(esri_transform(md_xml))
return Response(esri_str, 200, mimetype='application/xml')
@api.route('/api/geocode/<string:place>', methods=['GET'])
@cross_origin(origin='*', methods=['GET'],
headers=['X-Requested-With', 'Content-Type', 'Origin'])
def get_bbox(place):
g = geocoder.google(place)
bbox_dict = dict(north=g.north, south=g.south, east=g.east, west=g.west)
return jsonify(bbox_dict)
@api.route('/api/metadata/<string:_oid>/xml')
@cross_origin(origin='*', methods=['GET'])
def get_single_xml_metadata(_oid):
"""
Get the common XML representation of the metadata record with
given id.
"""
record = Metadata.objects.get_or_404(pk=_oid)
json_rec = json.loads(record.to_json())
d_fmt = '%Y-%m-%d'
d_fmt1 = '%Y-%m-%dT%H:%M:%SZ'
try:
#start/end date might not exist yet
| |
<filename>QRegisterConst.py
# built-in package
import os
import sys
import re
# pyside2 package
from PySide2.QtWidgets import QFileDialog, QMessageBox, QProgressDialog
from PySide2.QtCore import Qt, QDir, QCoreApplication
from PySide2.QtSql import QSqlQuery, QSqlQueryModel
from PySide2.QtGui import QColor
# python-docx package
from docx import Document, oxml, shared
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from docx.enum.table import WD_TABLE_ALIGNMENT
from docx.shared import Inches
class QRegisterConst:
# tool version
Version = "0.1.0-(beta)"
# Base Directory
BaseDir = getattr(sys, '_MEIPASS', os.path.abspath(os.path.dirname(__file__)))
# style file
StyleFile = os.path.join(BaseDir, "style/style.qss")
# design file ext
DesignFileExt = ".reg"
# reg access log file ext
RegLogFileExt = ".reglog"
# tab type const
WelcomeTab = 0
ModuleTab = 1
RegLogTab = 2
# module view const
DesignView = 0
DebugView = 1
# regmap type
RegMap = 0
RegMod = 1
# bf access options
AccessTypes = ['read-write', 'read-only', 'write-only', 'read-writeOnce', 'writeOnce']
# bf reset types
ResetTypes = ['async', 'sync']
# visibility options
VisibilityOptions = ['public', 'private']
# bool options
BoolOptions = ['true', 'false']
# special role for treeview item
TableNameRole = Qt.UserRole + 1
infoIdRole = Qt.UserRole + 2
MemMapIdRole = Qt.UserRole + 3
RegMapIdRole = Qt.UserRole + 4
RegIdRole = Qt.UserRole + 5
BfIdRole = Qt.UserRole + 6
BfEnumIdRole = Qt.UserRole + 7
RegMapTypeRole = Qt.UserRole + 8
BfReadOnlyRole = Qt.UserRole + 9
# hardware access driver
RegisterAccessDriverClass= None
# value column index in debug view
ValueColumnOfDebugView = 3
@staticmethod
def strToInt(text):
if text is None:
return 0
if text.startswith("0x"): # 0x1234
return int(text, 16)
if text.startswith("'h"): # 'h1234
text = text.replace("'h", "")
return int(text, 16)
if text.startswith("'d"):
text = text.replace("'d", "") # 'd1234
return int(text)
if text.startswith("'b"):
text = text.replace("'b", "") # 'b1011
return int(text, 2)
if "'h" in text: # 16'h1234
t = text.split("'h")
return int(t[1], 16)
if "'d" in text: # 16'd1234
t = text.split("'d")
return int(t[1])
if "'b" in text: # 16'b1011
t = text.split("'b")
return int(t[1], 2)
return int(text)
@staticmethod
def recordExist(record):
exist = record.value("Exist")
if exist is None:
return True
else:
if exist == 'false' or exist == '0' or exist == "n" or exist == 'no':
return False
else:
return True
@staticmethod
def isReadOnly(value):
if value is None:
return False
valueString = str(value)
if valueString is '':
return False
if 'w' in valueString:
return False
else:
return True
@staticmethod
def findRegAccessDriverClass():
if QRegisterConst.RegisterAccessDriverClass is None:
if os.path.isfile(QDir.homePath() + "/QRegisterAccessDriver/QRegisterAccess.py"):
DriverPath = QDir.homePath() + "/QRegisterAccessDriver"
sys.path.append(DriverPath)
DriverMod = __import__("QRegisterAccess")
QRegisterConst.RegisterAccessDriverClass = getattr(DriverMod, "QRegisterAccess")
@staticmethod
def genColoredRegBitsUsage(conn, bfId, regId, regW, fontSize):
if regW == None:
return
bfColorsIndex = 0
bfColors = ["DarkSeaGreen", "LightSalmon", "PowderBlue", "LightPink", "Aquamarine", "Bisque", "LightSteelBlue", "DarkKhaki"]
bfQColors = [QColor(0x8FBC8F), QColor(0xFFA07A), QColor(0xB0E0E6), QColor(0xFFB6C1), QColor(0x66CDAA), QColor(0xFFE4C4), QColor(0xB0C4DE), QColor(0xBDB76B)]
value = []
regW = int(regW)
regB = regW - 1
bfQuery = QSqlQuery("SELECT * FROM Bitfield WHERE RegisterId=%s ORDER BY CAST(RegisterOffset as int) DESC"%(regId), conn)
while bfQuery.next():
_bfId = bfQuery.value("id")
_regOff = QRegisterConst.strToInt(bfQuery.value("RegisterOffset"))
_bfW = int(bfQuery.value("Width"))
# unused bits before bitfield
if _bfW > 0 and regB > (_regOff + _bfW - 1):
text = ""
start = _regOff + _bfW
end = regB + 1
for i in range(start, end):
text += "%s,"%(regB) if i < (end - 1) and regB > 0 else "%s"%(regB)
regB -= 1
if regB < 0:
break
value.append((None, text))
# bitfield bits
if _bfW > 0 and regB >= 0:
text = ""
start = _regOff
end = _regOff + _bfW
for j in range(_regOff, _regOff + _bfW):
text += "%s,"%(regB) if j < (end - 1) and regB > 0 else "%s"%(regB)
regB -= 1
if regB < 0:
break
if bfId == _bfId:
value.append((bfQColors[bfColorsIndex], text, _bfId, 1))
else:
value.append((bfQColors[bfColorsIndex], text, _bfId))
bfColorsIndex = 0 if (bfColorsIndex + 1) >= len(bfQColors) else bfColorsIndex + 1
# left unsed bits
if regB >= 0:
text = ""
for k in range(0, regB + 1):
text += "%s,"%(regB) if regB > 0 else "%s"%(regB)
regB -= 1
value.append((None, text))
return value
@staticmethod
def genRegValueFromBitfields(conn, regId):
regValue = 0
bfQuery = conn.exec_("SELECT * FROM Bitfield WHERE RegisterId=%s"%(regId))
while bfQuery.next():
regOff = QRegisterConst.strToInt(bfQuery.value("RegisterOffset"))
bfDefault = QRegisterConst.strToInt(bfQuery.value("DefaultValue"))
regValue += bfDefault << regOff
return regValue
@staticmethod
def exporDocx(parent, conn):
fileName, filterUsed = QFileDialog.getSaveFileName(parent, "Export Word file", QDir.homePath(), "Word File (*.docx)")
if fileName == '':
return
f_name, f_ext = os.path.splitext(os.path.basename(fileName))
if f_ext != ".docx":
fileName += ".docx"
docx = Document()
docx.styles['Heading 1'].font.size = shared.Pt(11)
docx.styles['Heading 2'].font.size = shared.Pt(10)
docx.styles['Heading 3'].font.size = shared.Pt(9)
docx.styles['Heading 4'].font.size = shared.Pt(8)
docx.styles['Normal'].font.size = shared.Pt(8)
# memory map
memoryMapQueryModel = QSqlQueryModel()
memoryMapQueryModel.setQuery("SELECT * FROM MemoryMap", conn)
title = docx.add_heading('MemoryMap Table\n', level = 1)
title.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
fields = ['Name', 'Description']
table = docx.add_table(rows=memoryMapQueryModel.rowCount() + 1, cols=len(fields), style='Table Grid')
for i, row in enumerate(table.rows):
for j, (cell, field) in enumerate(zip(row.cells, fields)):
if i == 0: # table header
cell.text = fields[j]
cell._tc.get_or_add_tcPr().append(oxml.parse_xml(r'<w:shd {} w:fill="c0c0c0"/>'.format(oxml.ns.nsdecls('w'))))
else:
memMapRecord = memoryMapQueryModel.record(i - 1)
if field == 'Name':
cell.text = memMapRecord.value("Name")
docx.add_page_break()
# setup progress dialog
dlgProgress = QProgressDialog(parent)
dlgProgress.setWindowTitle("Exporting ...")
dlgProgress.setWindowModality(Qt.WindowModal)
dlgProgress.setMinimum(0)
dlgProgress.show()
for i in range(memoryMapQueryModel.rowCount()):
memMapRecord = memoryMapQueryModel.record(i)
docx.add_heading('MemoryMap: %s'%(memMapRecord.value("Name")), level = 2)
# register map
regMapQueryModel = QSqlQueryModel()
regMapQueryModel.setQuery("SELECT * FROM RegisterMap WHERE memoryMapId=%s ORDER BY DisplayOrder ASC"%memMapRecord.value("id"), conn)
# progress dialog
dlgProgress.setMaximum(memoryMapQueryModel.rowCount())
dlgProgress.setValue(0)
QCoreApplication.processEvents()
for j in range(regMapQueryModel.rowCount()):
regMapRecord = regMapQueryModel.record(j)
docx.add_heading('RegisterMap: %s'%(regMapRecord.value("Name")), level = 3)
docx.add_paragraph("Description : %s\n" \
"BaseAddress : %s"%(regMapRecord.value("Description"), regMapRecord.value("OffsetAddress")))
# update progress dialog
dlgProgress.setLabelText("Exporting register map '%s' to %s "%(regMapRecord.value("Name"), fileName))
dlgProgress.setValue(j)
QCoreApplication.processEvents()
# register
regQueryModel = QSqlQueryModel()
regQueryModel.setQuery("SELECT * FROM Register WHERE RegisterMapId=%s ORDER BY DisplayOrder ASC"%regMapRecord.value("id"), conn)
fields = ['Name', 'Address', 'Description']
table = docx.add_table(1, cols=len(fields), style='Table Grid')
for c, (cell, field) in enumerate(zip(table.rows[0].cells, fields)):
cell.text = fields[c]
cell._tc.get_or_add_tcPr().append(oxml.parse_xml(r'<w:shd {} w:fill="c0c0c0"/>'.format(oxml.ns.nsdecls('w'))))
regRe = re.compile('\d+:\d+')
for r in range(regQueryModel.rowCount()):
regRecord = regQueryModel.record(r)
regWidth = int(regRecord.value("Width"))
regDesc = regRecord.value("Description")
regMatch = regRe.match(regRecord.value("Array"))
if regMatch is None:
regName = regRecord.value("Name")
regAddr = "%s"%regRecord.value("OffsetAddress")
row = table.add_row()
row.cells[0].text = regName
row.cells[1].text = regAddr
row.cells[2].text = regDesc
else:
regArray = regMatch.string.split(':')
regArray0 = int(regArray[0])
regArray1 = int(regArray[1])
start = min(regArray0, regArray1)
end = max(regArray0, regArray1)
for regI in range(start, end + 1):
regName = "%s%s"%(regRecord.value("Name"), regI)
regAddr = hex(QRegisterConst.strToInt(regRecord.value("OffsetAddress")) + int(regWidth * (regI - start) / 8))
row = table.add_row()
row.cells[0].text = regName
row.cells[1].text = regAddr
row.cells[2].text = regDesc
for k in range(regQueryModel.rowCount()):
regRecord = regQueryModel.record(k)
regMatch = regRe.match(regRecord.value("Array"))
if regMatch is None:
docx.add_heading('Register: %s'%(regRecord.value("Name")), level = 4)
docx.add_paragraph('Description : %s\n' \
'Address : %s'%(regRecord.value("Description"), regRecord.value("OffsetAddress")))
else:
regArray = regMatch.string.split(':')
regArray0 = int(regArray[0])
regArray1 = int(regArray[1])
start = min(regArray0, regArray1)
end = max(regArray0, regArray1)
regAddrStart = hex(QRegisterConst.strToInt(regRecord.value("OffsetAddress")))
regAddrend = hex(QRegisterConst.strToInt(regRecord.value("OffsetAddress")) + int(regWidth * (end - start) / 8))
docx.add_heading('Register: %s%s ~ %s%s'%(regRecord.value("Name"), start, regRecord.value("Name"), end), level = 4)
docx.add_paragraph('Description : %s\n' \
'Address : %s ~ %s'%(regRecord.value("Description"), regAddrStart, regAddrend))
# bitfield
bfQueryModel = QSqlQueryModel()
bfQueryModel.setQuery("SELECT * FROM Bitfield WHERE RegisterId=%s ORDER BY DisplayOrder ASC"%regRecord.value("id"), conn)
fields = ['Name', 'Bits', 'ResetValue', 'Description']
table = docx.add_table(rows=bfQueryModel.rowCount() + 1, cols=len(fields), style='Table Grid')
table.allow_autofit = True
for r, row in enumerate(table.rows):
for c, (cell, field) in enumerate(zip(row.cells, fields)):
if r == 0:
cell.text = fields[c]
cell._tc.get_or_add_tcPr().append(oxml.parse_xml(r'<w:shd {} w:fill="c0c0c0"/>'.format(oxml.ns.nsdecls('w'))))
else:
bfRecord = bfQueryModel.record(r - 1)
if field == 'Name':
cell.text = bfRecord.value("Name")
if field == 'Bits':
cell.text = "[%s:%s]"%(int(bfRecord.value("Width")) + QRegisterConst.strToInt(bfRecord.value("RegisterOffset")) - 1, QRegisterConst.strToInt(bfRecord.value("RegisterOffset")))
if field == 'ResetValue':
cell.text = "%s"%(bfRecord.value("DefaultValue"))
if field == 'Description':
cell.text = bfRecord.value("Description")
docx.add_page_break()
docx.add_page_break()
docx.save(fileName)
dlgProgress.close()
QMessageBox.information(parent, "Exporting docx", "Done!", QMessageBox.Yes)
return
@staticmethod
def exportVerilog(parent, conn):
folder = QFileDialog.getExistingDirectory(parent, "Export Verilog file", QDir.homePath())
if os.path.exists(folder) is False:
return
if parent.newModule is True:
f_name, f_ext = os.path.splitext(os.path.basename(parent.newFileName))
else:
f_name, f_ext = os.path.splitext(os.path.basename(parent.fileName))
folder = folder + "/" + f_name
if os.path.exists(folder) is False:
os.mkdir(folder)
# info
infoQueryModel = QSqlQueryModel()
infoQueryModel.setQuery("SELECT * FROM info", conn)
infoRecord = infoQueryModel.record(0)
moduleName = infoRecord.value("Name")
# memory map
memoryMapQueryModel = QSqlQueryModel()
memoryMapQueryModel.setQuery("SELECT * FROM MemoryMap", conn)
# output uvm top
svUVMTopFileName = folder + "/" + moduleName.lower() + "_top.sv"
| |
#!/usr/bin/env python3
# To run this script, I recommend using pipenv to create an isolated Python environment with the
# correct packages so that you do not interfere with other Python projects in your system.
# After installing pipenv, run the following commands from the current folder:
#
# pipenv --python /usr/local/opt/python@3.9/bin/python3
# pipenv install
# pipenv shell
# ./build.py
"""Converts markdown into HTML and extracts JavaScript code blocks.
For each markdown file, this script invokes mistletoe twice: once
to generate HTML, and once to extract JavaScript code blocks.
Literate code fragments are marked by "// TODO: <name>". These get
replaced according to extra properties on the code fences.
For example, the following snippet would replace "// TODO: create
wombat".
```js {fragment="create wombat"}
var wombat = new Wombat();
```
This script also generates reference documentation by extracting
doxygen style comments of the form:
/// [name] ::tags:: brief description
/// detailed description
Where "tags" consists of one or more of the following words:
class, core, method, argument, retval, function
These docstrings are used to build a JSON hierarchy where the roots
are classes, free functions, and enums. The JSON is then traversed to
generate a markdown string, which then produces HTML.
"""
import glob
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + '/'
ROOT_DIR = SCRIPT_DIR + '../../'
OUTPUT_DIR = ROOT_DIR + 'docs/webgl/'
ENABLE_EMBEDDED_DEMO = True
BUILD_DIR = ROOT_DIR + 'out/cmake-webgl-release/'
TOOLS_DIR = ROOT_DIR + 'out/cmake-release/tools/'
TUTORIAL_PREAMBLE = """
## Literate programming
The markdown source for this tutorial is not only used to generate this
web page, it's also used to generate the JavaScript for the above demo.
We use a small Python script for weaving (generating HTML) and tangling
(generating JS). In the code samples, you'll often see
`// TODO: <some task>`. These are special markers that get replaced by
subsequent code blocks.
"""
REFERENCE_PREAMBLE = """
All type names in this reference belong to the Filament namespace.
For example, **[init](#init)** actually refers to **Filament.init**.
""".strip().replace("\n", " ")
import argparse
import jsbeautifier
import mistletoe
import pygments
import re
import shutil
from itertools import chain
from mistletoe import HTMLRenderer, BaseRenderer
from mistletoe import span_token
from mistletoe.block_token import CodeFence as CF
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_lexer_by_name as get_lexer
from pygments.styles import get_style_by_name as get_style
class PygmentsRenderer(HTMLRenderer):
"Extends HTMLRenderer by adding syntax highlighting"
formatter = HtmlFormatter()
formatter.noclasses = True
def __init__(self, *extras, style='default'):
super().__init__(*extras)
self.formatter.style = get_style(style)
def render_block_code(self, token):
code = token.children[0].content
lexer = get_lexer(token.language)
return highlight(code, lexer, self.formatter)
class CodeFence(CF):
"Extends the standard CodeFence with optional properties"
ppattern = r' *(\{ *(.+) *\= *\"(.+)\" *\})?'
pattern = re.compile(r'( {0,3})((?:`|~){3,}) *(\S*)' + ppattern)
_open_info = None
def __init__(self, match):
lines, open_info = match
self.language = span_token.EscapeSequence.strip(open_info[2])
self.children = (span_token.RawText(''.join(lines)),)
self.properties = open_info[3]
@classmethod
def start(cls, line):
match_obj = cls.pattern.match(line)
if not match_obj:
return False
prepend, leader, lang, props = match_obj.groups()[:4]
if leader[0] in lang or leader[0] in line[match_obj.end():]:
return False
cls._open_info = len(prepend), leader, lang, props
return True
class JsRenderer(BaseRenderer):
ppattern = re.compile(CodeFence.ppattern)
def __init__(self, *extras):
self.root = ''
self.fragments = {}
super().__init__(*chain((CodeFence,), extras))
def __exit__(self, *args): super().__exit__(*args)
def render_strong(self, token): return ''
def render_emphasis(self, token): return ''
def render_inline_code(self, token): return ''
def render_strikethrough(self, token): return ''
def render_image(self, token): return ''
def render_link(self, token): return ''
def render_auto_link(self, token): return ''
def render_escape_sequence(self, token): return ''
def render_raw_text(self, token): return ''
def render_heading(self, token): return ''
def render_quote(self, token): return ''
def render_paragraph(self, token): return ''
def render_list(self, token): return ''
def render_list_item(self, token): return ''
def render_document(self, token):
for child in token.children:
self.render(child)
label_pattern = re.compile(r'\s*// TODO:\ (.+)')
result = ''
for line in self.root.split('\n'):
m = label_pattern.match(line)
label = m.group(1) if m else None
if label in self.fragments:
result += self.fragments[label]
else:
result += line + '\n'
opts = jsbeautifier.default_options()
opts.indent_size = 2
opts.end_with_newline = True
opts.preserve_newlines = False
opts.break_chained_methods = True
opts.wrap_line_length = 100
return jsbeautifier.beautify(result, opts)
def render_code_fence(self, token):
if token.language != 'js' or not token.properties:
return ''
match = JsRenderer.ppattern.match(token.properties)
key = match.groups()[2]
if key == 'root':
self.root = token.children[0].content
return
fragments = self.fragments
val = token.children[0].content
fragments[key] = fragments.get(key, '') + val
return ''
def weave(name):
with open(SCRIPT_DIR + f'tutorial_{name}.md', 'r') as fin:
markdown = fin.read()
if ENABLE_EMBEDDED_DEMO:
if name == 'triangle':
markdown = TUTORIAL_PREAMBLE + markdown
markdown = '<div class="demo_frame">' + \
f'<iframe src="demo_{name}.html"></iframe>' + \
f'<a href="demo_{name}.html">🔗</a>' + \
'</div>\n' + markdown
rendered = mistletoe.markdown(markdown, PygmentsRenderer)
template = open(SCRIPT_DIR + 'tutorial_template.html').read()
rendered = template.replace('$BODY', rendered)
outfile = os.path.join(OUTPUT_DIR, f'tutorial_{name}.html')
with open(outfile, 'w') as fout:
fout.write(rendered)
def generate_demo_html(name):
template = open(SCRIPT_DIR + 'demo_template.html').read()
rendered = template.replace('$SCRIPT', f'tutorial_{name}.js')
outfile = os.path.join(OUTPUT_DIR, f'demo_{name}.html')
with open(outfile, 'w') as fout:
fout.write(rendered)
def tangle(name):
with open(SCRIPT_DIR + f'tutorial_{name}.md', 'r') as fin:
rendered = mistletoe.markdown(fin, JsRenderer)
outfile = os.path.join(OUTPUT_DIR, f'tutorial_{name}.js')
with open(outfile, 'w') as fout:
fout.write(rendered)
def build_filamat(name):
matsrc = SCRIPT_DIR + name + '.mat'
matdst = os.path.join(OUTPUT_DIR, name + '.filamat')
flags = '-a opengl -p mobile'
matc_exec = os.path.join(TOOLS_DIR, 'matc/matc')
retval = os.system(f"{matc_exec} {flags} -o {matdst} {matsrc}")
if retval != 0:
exit(retval)
def copy_built_file(pattern, destfolder=None):
outdir = OUTPUT_DIR
if destfolder:
outdir = os.path.join(outdir, destfolder)
if not os.path.exists(outdir):
os.mkdir(outdir)
pattern = os.path.join(BUILD_DIR, pattern)
for src in glob.glob(pattern):
dst = os.path.join(outdir, os.path.basename(src))
shutil.copyfile(src, dst)
def copy_src_file(src):
src = os.path.join(ROOT_DIR, src)
dst = os.path.join(OUTPUT_DIR, os.path.basename(src))
shutil.copyfile(src, dst)
def spawn_local_server():
import http.server
import socketserver
Handler = http.server.SimpleHTTPRequestHandler
Handler.extensions_map.update({ '.wasm': 'application/wasm' })
Handler.directory = OUTPUT_DIR
os.chdir(OUTPUT_DIR)
socketserver.TCPServer.allow_reuse_address = True
port = 8000
print(f"serving docs at http://localhost:{port}")
with socketserver.TCPServer(("", port), Handler) as httpd:
httpd.allow_reuse_address = True
httpd.serve_forever()
def expand_refs(comment_line):
"""Adds hrefs to markdown links that do not already have them; e.g.
expands [Foo] to [Foo](#Foo) but leaves [Foo](https://foo) alone.
"""
result = comment_line
result = re.sub(r"\[(\S+)\]([^(])", r"[\1](#\1)\2", result)
result = re.sub(r"\[(\S+)\]$", r"[\1](#\1)", result)
return result
def gather_docstrings(paths):
"""Given a list of paths to JS and CPP files, builds a JSON tree of
type descriptions."""
result = []
stack = [{"tags": ["root"]}]
previous = stack[0]
docline = re.compile(r' */// (.+)')
enumline = re.compile(r' *enum_.*\"(.*)\"')
enumvalue = re.compile(r' *\.value\("(.*)\"')
tagged = re.compile(r'(\S+)? *::(.+):: *(.*)')
lines = []
enumerating = False
current_enumeration = None
for path in paths:
lines += open(path).readlines()
for line in lines:
match_obj = docline.match(line)
if not match_obj:
match_obj = enumline.match(line)
if match_obj:
result.append({
"name": match_obj.groups()[0],
"tags": "enum",
"brief": "",
"detail": None,
"children": [],
})
current_enumeration = result[-1]["children"]
enumerating = True
continue
match_obj = enumvalue.match(line)
if match_obj:
val = match_obj.groups()[0]
current_enumeration.append(val)
continue
ln = match_obj.groups()[0]
match_obj = tagged.match(ln)
if match_obj:
name = match_obj.groups()[0]
tags = match_obj.groups()[1].split()
brief = match_obj.groups()[2]
entity = {
"name": name,
"tags": tags,
"brief": brief,
"detail": None,
"children": []
}
# Check if this is continuation of a previous type.
if brief == '':
for existing_type in result:
if existing_type['name'] == name:
entity = existing_type
result.remove(existing_type)
break
top = stack[-1]["tags"]
if 'root' in top:
result.append(entity)
stack.append(entity)
elif 'class' in tags or 'function' in tags:
result.append(entity)
stack[-1] = entity
elif 'method' in tags and 'class' in top:
stack[-1]["children"].append(entity)
stack.append(entity)
elif 'method' in tags:
stack[-2]["children"].append(entity)
stack[-1] = entity
elif 'retval' in tags or 'argument' in tags:
stack[-1]["children"].append(entity)
previous = entity
else:
brief = previous["brief"]
detail = previous["detail"]
if brief.endswith("\\"):
previous["brief"] = brief[:-1] + ln
elif not detail:
previous["detail"] = ln
else:
previous["detail"] += "\n" + ln
return result
def generate_class_reference(entity):
name = entity["name"]
brief, detail = entity["brief"], entity["detail"]
brief = expand_refs(brief)
result = f"\n## class <a id='{name}' href='#{name}'>{name}</a>\n\n"
result += brief + "\n\n"
entity["children"].sort(key = lambda t: t["name"])
for method in entity["children"]:
result += "- **"
if "static" in method["tags"]:
# Write the class name before the method name.
result += name + "."
else:
# Instances are lowercase by convention.
result += name[0].lower() + name[1:] + "."
mname = method.get("name")
assert mname, f"Missing method name on {name}"
args = []
for child in method["children"]:
if "argument" in child["tags"]:
cname = child.get("name")
assert cname, f"Missing arg name on {mname}"
args.append(cname)
result += f"{mname}(" + ", ".join(args) + ")**\n"
if method["brief"] != "":
result += " - " + method["brief"] + "\n"
for child in method["children"]:
argname = child["name"]
argbrief = expand_refs(child["brief"])
if "argument" in child["tags"]:
result += f" - *{argname}* {argbrief}\n"
elif "retval" in child["tags"]:
result += f" - *returns* {argbrief}\n"
result += "\n"
if detail:
result += expand_refs(detail) + "\n"
| |
<filename>src/compas/datastructures/volmesh/volmesh.py<gh_stars>0
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from ast import literal_eval as _eval
from math import sqrt
from copy import deepcopy
from compas.files.obj import OBJ
from compas.geometry import centroid_points
from compas.datastructures import Datastructure
from compas.datastructures import Mesh
from compas.datastructures._mixins import VertexAttributesManagement
from compas.datastructures._mixins import VertexHelpers
from compas.datastructures._mixins import VertexCoordinatesDescriptors
from compas.datastructures._mixins import VertexFilter
from compas.datastructures._mixins import EdgeAttributesManagement
from compas.datastructures._mixins import EdgeHelpers
from compas.datastructures._mixins import EdgeGeometry
from compas.datastructures._mixins import FaceAttributesManagement
from compas.datastructures._mixins import FaceHelpers
from compas.datastructures._mixins import FromToData
from compas.datastructures._mixins import FromToJson
def center_of_mass(edges, sqrt=sqrt):
L = 0
cx = 0
cy = 0
cz = 0
for sp, ep in edges:
l = sqrt(sum((sp[axis] - ep[axis]) ** 2 for axis in range(3)))
cx += l * 0.5 * (sp[0] + ep[0])
cy += l * 0.5 * (sp[1] + ep[1])
cz += l * 0.5 * (sp[2] + ep[2])
L += l
cx = cx / L
cy = cy / L
cz = cz / L
return cx, cy, cz
class VolMesh(FromToData,
FromToJson,
FaceHelpers,
EdgeHelpers,
VertexHelpers,
EdgeGeometry,
VertexCoordinatesDescriptors,
FaceAttributesManagement,
EdgeAttributesManagement,
VertexAttributesManagement,
Datastructure,
VertexFilter):
"""Class for working with volumetric meshes.
Attributes
----------
vertex : dict
The vertices of the volmesh. Each vertex is represented by a key-value pair
in the vertex dictionary. The key is the unique identifier of the vertex,
and the value is itself a dictionary of named vertex attributes.
``self.vertex[key] -> attribute dict``
cell : dict
The cells of the volmesh. Each cell is represted by a key-value pair in
the cell dictionary. The key is the unique identifier of the cell, and
the value id itself a dictionary. The keys of this dictionary correspond
to the vertices that make up the cell. The values are again dictionaries.
Each key in the latter dictionary is a neighbor of the previous vertex.
Together they form a halfedge of the cell, pointing at one of the cell's
halffaces.
``self.cell[ckey][u][v] -> fkey``
halfface : dict
The halffaces of the volmesh. Each halfface is represented by
``self.halfface[fkey] -> vertex cycle``
plane : dict
The planes of the volmesh. Every plane is uniquely defined by three
neighboring vertices of the volmesh in a specific order. At the first level,
each vertex in the plane dict points at a new dictionary. This keys of this
dictionary are the (undirected) neighbors of the previous vertex. The values
are again dictionaries. In combination with the first two keys, the keys
of the latter identify oriented faces (planes) of the volmesh, finally
pointing at the cells of the volmesh.
``self.plane[u][v][w] -> ckey``.
Notes
-----
Volumetric meshes are 3-mainfold, cellular structures.
The implementation of *VolMesh* is based on the notion of *x-maps*
and the concepts behind the *OpenVolumeMesh* library [vci2016]_.
In short, we add an additional entity compared to polygonal meshes,
the *cell*, and relate cells not through *half-edges*, but through a combination
of *half-faces* and *planes*. Each cell consists of a series of vertex pairs,
forming half-edges. Every half-edge points at a half-face of the cell. The half-
faces are stored as vertex cycles. Every three adjacent vertices in the cycle,
through the planes, point at the cell of which they form the boundary.
References
----------
.. [vci2016] Visual Computing Institute *Open Volum Mesh*.
Available at: http://www.openvolumemesh.org
"""
def __init__(self):
self._max_int_key = -1
self._max_int_fkey = -1
self._max_int_ckey = -1
self._key_to_str = False
self.vertex = {}
self.plane = {}
self.halfface = {}
self.cell = {}
self.edge = {}
self.attributes = {
'name' : 'VolMesh',
'color.vertex' : (255, 255, 255),
'color.edge' : (0, 0, 0),
'color.face' : (200, 200, 200),
'color.normal:vertex' : (0, 255, 0),
'color.normal:face' : (0, 255, 0),
}
self.default_vertex_attributes = {
'x': 0.0,
'y': 0.0,
'z': 0.0
}
self.default_edge_attributes = {}
# --------------------------------------------------------------------------
# customisation
# --------------------------------------------------------------------------
def __str__(self):
""""""
raise NotImplementedError
# --------------------------------------------------------------------------
# special properties
# --------------------------------------------------------------------------
@property
def name(self):
"""The name of the mesh."""
return self.attributes.get('name', None)
@name.setter
def name(self, value):
self.attributes['name'] = value
@property
def color(self):
return dict(
(key[6:], self.attributes[key])
for key in self.attributes if key.startswith('color.')
)
@color.setter
def color(self, value):
try:
value[0]
value[1]
value[1][2]
except Exception:
return
self.attributes['color.{0}'.format(value[0])] = value[1]
@property
def data(self):
"""The data representing the mesh."""
data = {
'attributes' : self.attributes,
'default_vertex_attributes': self.default_vertex_attributes,
'default_edge_attributes' : self.default_edge_attributes,
'vertex' : {},
'cell' : {},
'halfface' : {},
'plane' : {},
'edge' : {},
'max_int_key' : self._max_int_key,
'max_int_fkey' : self._max_int_fkey,
'max_int_ckey' : self._max_int_ckey, }
key_rkey = {}
for key in self.vertex:
rkey = repr(key)
key_rkey[key] = rkey
data['vertex'][rkey] = self.vertex[key]
data['plane'][rkey] = {}
data['edge'][rkey] = {}
for u in self.edge:
ru = key_rkey[u]
for v in self.edge[u]:
rv = key_rkey[v]
data['edge'][ru][rv] = self.edge[u][v]
for f in self.halfface:
_f = repr(f)
data['halfface'][_f] = {}
for u, v in self.halfface[f].iteritems():
_u = repr(u) # use the map?
_v = repr(v) # use the map?
data['halfface'][_f][_u] = _v
for u in self.plane:
_u = repr(u)
for v in self.plane[u]:
_v = repr(v)
if _v not in data['plane'][_u]:
data['plane'][_u][_v] = {}
for w, c in self.plane[u][v].iteritems():
_w = repr(w)
_c = repr(c)
data['plane'][_u][_v][_w] = _c
for c in self.cell:
_c = repr(c)
data['cell'][_c] = {}
for u in self.cell[c]:
_u = repr(u)
if _u not in data['cell'][_c]:
data['cell'][_c][_u] = {}
for v, f in self.cell[c][u].iteritems():
_v = repr(v)
_f = repr(f)
data['cell'][_c][_u][_v] = _f
return data
@data.setter
def data(self, data):
""""""
attributes = data.get('attributes') or {}
default_vertex_attributes = data.get('default_vertex_attributes') or {}
default_edge_attributes = data.get('default_edge_attributes') or {}
vertex = data.get('vertex') or {}
cell = data.get('cell') or {}
halfface = data.get('halfface') or {}
plane = data.get('plane') or {}
edge = data.get('edge') or {}
max_int_key = data.get('max_int_key', - 1)
max_int_fkey = data.get('max_int_fkey', - 1)
max_int_ckey = data.get('max_int_ckey', - 1)
if not vertex or not edge or not plane or not halfface or not cell:
return
self.clear()
self.attributes.update(attributes)
self.default_vertex_attributes.update(default_vertex_attributes)
self.default_edge_attributes.update(default_edge_attributes)
for _k, attr in vertex.iteritems():
k = _eval(_k)
self.vertex[k] = self.default_vertex_attributes.copy()
if attr:
self.vertex[k].update(attr)
self.plane[k] = {}
self.edge[k] = {}
for _u, nbrs in edge.iteritems():
nbrs = nbrs or {}
u = _eval(_u)
for _v, attr in nbrs.iteritems():
v = _eval(_v)
self.edge[u][v] = self.default_edge_attributes.copy()
if attr:
self.edge[u][v].update(attr)
for _f in halfface:
f = _eval(_f)
self.halfface[f] = {}
for _u, _v in halfface[_f].iteritems():
u = _eval(_u)
v = _eval(_v)
self.halfface[f][u] = v
for _u in plane:
u = _eval(_u)
for _v in plane[_u]:
v = _eval(_v)
if v not in self.plane[u]:
self.plane[u][v] = {}
for _w, _c in plane[_u][_v].iteritems():
w = _eval(_w)
c = _eval(_c)
self.plane[u][v][w] = c
for _c in cell:
c = _eval(_c)
self.cell[c] = {}
for _u in cell[_c]:
u = _eval(_u)
if u not in self.cell[c]:
self.cell[c][u] = {}
for _v, _f in cell[_c][_u].iteritems():
v = _eval(_v)
f = _eval(_f)
self.cell[c][u][v] = f
self._max_int_key = max_int_key
self._max_int_fkey = max_int_fkey
self._max_int_ckey = max_int_ckey
# --------------------------------------------------------------------------
# constructors
# --------------------------------------------------------------------------
@classmethod
def from_obj(cls, filepath):
obj = OBJ(filepath)
vertices = obj.parser.vertices
faces = obj.parser.faces
groups = obj.parser.groups
cells = []
for name in groups:
group = groups[name]
cell = []
for item in group:
if item[0] != 'f':
continue
face = faces[item[1]]
cell.append(face)
cells.append(cell)
return cls.from_vertices_and_cells(vertices, cells)
@classmethod
def from_vertices_and_cells(cls, vertices, cells):
mesh = cls()
for x, y, z in vertices:
mesh.add_vertex(x=x, y=y, z=z)
for halffaces in cells:
mesh.add_cell(halffaces)
return mesh
@classmethod
def from_vertices_and_edges(cls, vertices, edges):
raise NotImplementedError
# --------------------------------------------------------------------------
# converters
# --------------------------------------------------------------------------
def to_obj(self, filepath):
raise NotImplementedError
# --------------------------------------------------------------------------
# helpers
# --------------------------------------------------------------------------
def _get_vertex_key(self, key):
if key is None:
key = self._max_int_key = self._max_int_key + 1
else:
try:
i = int(key)
except (ValueError, TypeError):
pass
else:
if i > self._max_int_key:
self._max_int_key = i
if self._key_to_str:
return str(key)
return key
def _get_face_key(self, fkey):
if fkey is None:
fkey = self._max_int_fkey = self._max_int_fkey + 1
else:
try:
i = int(fkey)
except (ValueError, TypeError):
pass
else:
if i > self._max_int_fkey:
self._max_int_fkey = i
return fkey
def _get_cellkey(self, ckey):
if ckey is None:
ckey = self._max_int_ckey = self._max_int_ckey + 1
else:
try:
i = int(ckey)
except (ValueError, TypeError):
pass
else:
if i > self._max_int_ckey:
self._max_int_ckey = | |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.firewall.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Excludelist(VapiInterface):
"""
"""
CHECKIFEXISTS_OBJECT_TYPE_NSGROUP = "NSGroup"
"""
Possible value for ``objectType`` of method :func:`Excludelist.checkifexists`.
"""
CHECKIFEXISTS_OBJECT_TYPE_LOGICALPORT = "LogicalPort"
"""
Possible value for ``objectType`` of method :func:`Excludelist.checkifexists`.
"""
CHECKIFEXISTS_OBJECT_TYPE_LOGICALSWITCH = "LogicalSwitch"
"""
Possible value for ``objectType`` of method :func:`Excludelist.checkifexists`.
"""
REMOVEMEMBER_OBJECT_TYPE_NSGROUP = "NSGroup"
"""
Possible value for ``objectType`` of method :func:`Excludelist.removemember`.
"""
REMOVEMEMBER_OBJECT_TYPE_LOGICALPORT = "LogicalPort"
"""
Possible value for ``objectType`` of method :func:`Excludelist.removemember`.
"""
REMOVEMEMBER_OBJECT_TYPE_LOGICALSWITCH = "LogicalSwitch"
"""
Possible value for ``objectType`` of method :func:`Excludelist.removemember`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.firewall.excludelist'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ExcludelistStub)
self._VAPI_OPERATION_IDS = {}
def addmember(self,
resource_reference,
):
"""
Add a new object in the exclude list
:type resource_reference: :class:`com.vmware.nsx.model_client.ResourceReference`
:param resource_reference: (required)
:rtype: :class:`com.vmware.nsx.model_client.ResourceReference`
:return: com.vmware.nsx.model.ResourceReference
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('addmember',
{
'resource_reference': resource_reference,
})
def checkifexists(self,
object_id,
deep_check=None,
object_type=None,
):
"""
Check if the object a member of the exclude list
:type object_id: :class:`str`
:param object_id: identifier of the object (required)
:type deep_check: :class:`bool` or ``None``
:param deep_check: Check all parents (optional, default to false)
:type object_type: :class:`str` or ``None``
:param object_type: Object type of an entity (optional)
:rtype: :class:`com.vmware.nsx.model_client.ResourceReference`
:return: com.vmware.nsx.model.ResourceReference
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('checkifexists',
{
'object_id': object_id,
'deep_check': deep_check,
'object_type': object_type,
})
def get(self):
"""
Get list of entities in exclude list
:rtype: :class:`com.vmware.nsx.model_client.ExcludeList`
:return: com.vmware.nsx.model.ExcludeList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
def removemember(self,
object_id,
deep_check=None,
object_type=None,
):
"""
Remove an existing object from the exclude list
:type object_id: :class:`str`
:param object_id: identifier of the object (required)
:type deep_check: :class:`bool` or ``None``
:param deep_check: Check all parents (optional, default to false)
:type object_type: :class:`str` or ``None``
:param object_type: Object type of an entity (optional)
:rtype: :class:`com.vmware.nsx.model_client.ResourceReference`
:return: com.vmware.nsx.model.ResourceReference
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('removemember',
{
'object_id': object_id,
'deep_check': deep_check,
'object_type': object_type,
})
def update(self,
exclude_list,
):
"""
Modify exclude list
:type exclude_list: :class:`com.vmware.nsx.model_client.ExcludeList`
:param exclude_list: (required)
:rtype: :class:`com.vmware.nsx.model_client.ExcludeList`
:return: com.vmware.nsx.model.ExcludeList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'exclude_list': exclude_list,
})
class Profiles(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.firewall.profiles'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ProfilesStub)
self._VAPI_OPERATION_IDS = {}
def create(self,
base_firewall_profile,
):
"""
Create a firewall profile with values provided. It creates profile
based resource_type in the payload.
:type base_firewall_profile: :class:`vmware.vapi.struct.VapiStruct`
:param base_firewall_profile: (required)
The parameter must contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseFirewallProfile`.
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx.model.BaseFirewallProfile
The return value will contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseFirewallProfile`.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'base_firewall_profile': base_firewall_profile,
})
def delete(self,
profile_id,
):
"""
Deletes a firewall profile.
:type profile_id: :class:`str`
:param profile_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'profile_id': profile_id,
})
def get(self,
profile_id,
):
"""
Return firewall session timer profile.
:type profile_id: :class:`str`
:param profile_id: (required)
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx.model.BaseFirewallProfile
The return value will contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseFirewallProfile`.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'profile_id': profile_id,
})
def list(self,
resource_type,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
List all the firewall profiles available by requested resource_type.
:type resource_type: :class:`str`
:param resource_type: Profile resource type (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.FirewallProfileListResult`
:return: com.vmware.nsx.model.FirewallProfileListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'resource_type': resource_type,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def update(self,
profile_id,
base_firewall_profile,
):
"""
Update user configurable properties of firewall profile.
:type profile_id: :class:`str`
:param profile_id: (required)
:type base_firewall_profile: :class:`vmware.vapi.struct.VapiStruct`
:param base_firewall_profile: (required)
The parameter must contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseFirewallProfile`.
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx.model.BaseFirewallProfile
The return value will contain all the attributes defined in
:class:`com.vmware.nsx.model_client.BaseFirewallProfile`.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'profile_id': profile_id,
'base_firewall_profile': base_firewall_profile,
})
class Rules(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.firewall.rules'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _RulesStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
rule_id,
):
"""
Return existing firewall rule information.
:type rule_id: :class:`str`
:param rule_id: (required)
:rtype: :class:`com.vmware.nsx.model_client.FirewallRule`
:return: com.vmware.nsx.model.FirewallRule
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'rule_id': rule_id,
})
class Sections(VapiInterface):
"""
"""
CREATE_OPERATION_TOP = "insert_top"
"""
Possible value for ``operation`` of method :func:`Sections.create`.
"""
CREATE_OPERATION_BOTTOM = "insert_bottom"
"""
Possible value for ``operation`` of method :func:`Sections.create`.
"""
CREATE_OPERATION_AFTER = "insert_after"
"""
Possible value for ``operation`` of method :func:`Sections.create`.
"""
CREATE_OPERATION_BEFORE = "insert_before"
"""
Possible value for ``operation`` of method :func:`Sections.create`.
"""
CREATEWITHRULES_OPERATION_TOP = "insert_top"
"""
Possible value for ``operation`` of method :func:`Sections.createwithrules`.
"""
CREATEWITHRULES_OPERATION_BOTTOM = "insert_bottom"
"""
Possible value for ``operation`` of method :func:`Sections.createwithrules`.
"""
CREATEWITHRULES_OPERATION_AFTER = "insert_after"
"""
Possible value for ``operation`` of method :func:`Sections.createwithrules`.
"""
CREATEWITHRULES_OPERATION_BEFORE = "insert_before"
"""
Possible value for ``operation`` of method :func:`Sections.createwithrules`.
"""
LIST_ENFORCED_ON_VIF = "VIF"
"""
Possible value for ``enforcedOn`` of method :func:`Sections.list`.
"""
LIST_ENFORCED_ON_LOGICALROUTER = "LOGICALROUTER"
"""
Possible value for ``enforcedOn`` of method :func:`Sections.list`.
"""
LIST_ENFORCED_ON_BRIDGEENDPOINT = "BRIDGEENDPOINT"
"""
Possible value for ``enforcedOn`` of method :func:`Sections.list`.
"""
LIST_ENFORCED_ON_DHCP_SERVICE = "DHCP_SERVICE"
"""
Possible value for ``enforcedOn`` of method :func:`Sections.list`.
"""
LIST_ENFORCED_ON_METADATA_PROXY = "METADATA_PROXY"
"""
Possible value for ``enforcedOn`` of method :func:`Sections.list`.
"""
LIST_ENFORCED_ON_L2VPN_SESSION = "L2VPN_SESSION"
"""
Possible value for ``enforcedOn`` of method | |
from mock import patch, Mock, call, ANY
import tempfile
from django.apps import apps
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.test import TestCase
from django_webtest import WebTest
from django_dynamic_fixture import G
from .utils import ClientError, parse_dimensions_string, \
parse_region, parse_size, make_canonical_path
User = get_user_model()
Image = apps.get_model('icekit_plugins_image.Image')
class TestImageApiUtils(TestCase):
def test_parse_dimensions_string(self):
self.assertEqual(
(0, 0, 1, 1), parse_dimensions_string('0,0,1,1'))
self.assertEqual(
(0.1, 0.1, 0.1, 0.1),
parse_dimensions_string('0.1,0.1,0.1,0.1', permit_floats=True))
# Must use `permit_floats=True` for float values
self.assertRaises(
ClientError,
parse_dimensions_string,
'0.1,0,1,1')
# Wrong number of elements
self.assertRaises(
ClientError,
parse_dimensions_string,
'0,1,1')
self.assertRaises(
ClientError,
parse_dimensions_string,
'0,0,0,1,1')
# Negative numbers not permitted
self.assertRaises(
ClientError,
parse_dimensions_string,
'-1,0,1,1')
# Elements must be numerical
self.assertRaises(
ClientError,
parse_dimensions_string,
'a,0,1,1')
def test_parse_region(self):
# Region: full
self.assertEqual(
(0, 0, 300, 200),
parse_region('full', 300, 200))
# Region: square
self.assertEqual(
(50, 0, 200, 200),
parse_region('square', 300, 200))
self.assertEqual(
(0, 25, 200, 200),
parse_region('square', 200, 250))
# Region: x,y,width,height pixels
self.assertEqual(
(0, 25, 100, 200),
parse_region('0,25,100,200', 200, 250))
self.assertEqual(
(0, 25, 200, 175), # Don't extend crop beyond image height
parse_region('0,25,200,200', 200, 200))
self.assertEqual(
(100, 0, 100, 200), # Don't extend crop beyond image width
parse_region('100,0,200,200', 200, 250))
# Region: x,y,width,height percentages
self.assertEqual(
(50, 25, 375, 100),
parse_region('pct:10,12.5,75,50', 500, 200))
self.assertEqual(
(375, 0, 125, 100), # Don't extend crop beyond image width
parse_region('pct:75,0,50,50', 500, 200))
self.assertEqual(
(0, 0, 500, 200), # Don't extend crop beyond image height
parse_region('pct:0,0,100,200', 500, 200))
def test_parse_size(self):
# Size: full/max
self.assertEqual(
(100, 200), parse_size('full', 100, 200))
self.assertEqual(
(200, 100), parse_size('max', 200, 100))
# Size: pct
self.assertEqual(
(100, 200), parse_size('pct:100', 100, 200))
self.assertEqual(
(50, 100), parse_size('pct:50', 100, 200))
# Size: w,h
self.assertEqual(
(25, 75), parse_size('25,75', 100, 200))
# Size: w, (maintain aspect ratio)
self.assertEqual(
(50, 100), parse_size('50,', 100, 200))
self.assertEqual(
# Can scale beyond original image size
(125, 250), parse_size('125,', 100, 200))
# Size: ,h (maintain aspect ratio)
self.assertEqual(
(50, 100), parse_size(',100', 100, 200))
self.assertEqual(
# Can scale beyond original image size
(125, 250), parse_size(',250', 100, 200))
# Size: !w,h (best-fit)
self.assertEqual(
# Width is best fit
(80, 160), parse_size('!80,200', 100, 200))
self.assertEqual(
# Width is best fit
(80, 160), parse_size('!80,160', 100, 200))
self.assertEqual(
# Height is best fit
(75, 150), parse_size('!80,150', 100, 200))
self.assertEqual(
# Height is best fit, can scale beyond original image size
(250, 250), parse_size('!400,250', 200, 200))
def test_make_canonical_url(self):
# No image transformation
self.assertEqual(
'/iiif/1/full/full/0/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(False, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# No-op region change for square image
self.assertEqual(
'/iiif/1/full/full/0/default.jpg',
make_canonical_path(
1, 800, 800, # Image identifier and dimensions
(0, 0, 800, 800), # Region
(800, 800), # Size
(False, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# Region changed top-left
self.assertEqual(
'/iiif/1/0,1,800,599/800,/0/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 1, 800, 599), # Region
(800, 600), # Size
(False, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# Region changed bottom-right
self.assertEqual(
'/iiif/1/0,0,799,600/800,/0/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 799, 600), # Region
(800, 600), # Size
(False, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# Size changed: same aspect ratio so w, only
self.assertEqual(
'/iiif/1/full/400,/0/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(400, 300), # Size
(False, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# Size changed: width, aspect ratio changed
self.assertEqual(
'/iiif/1/full/750,600/0/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(750, 600), # Size
(False, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# Size changed: height, aspect ratio changed
self.assertEqual(
'/iiif/1/full/800,900/0/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 900), # Size
(False, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# Rotation changed: mirrored
self.assertEqual(
'/iiif/1/full/full/!0/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(True, 0), # Rotation
'default', # Quality
'jpg', # Format
))
# Rotation changed: rotated
self.assertEqual(
'/iiif/1/full/full/120/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(False, 120), # Rotation
'default', # Quality
'jpg', # Format
))
# Rotation changed: negative rotation
self.assertEqual(
'/iiif/1/full/full/-90/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(False, -90), # Rotation
'default', # Quality
'jpg', # Format
))
# Rotation changed: rotated and mirrored
self.assertEqual(
'/iiif/1/full/full/!180/default.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(True, 180), # Rotation
'default', # Quality
'jpg', # Format
))
# Quality change: color
self.assertEqual(
'/iiif/1/full/full/0/color.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(False, 0), # Rotation
'color', # Quality
'jpg', # Format
))
# Quality changed: gray
self.assertEqual(
'/iiif/1/full/full/0/gray.jpg',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(False, 0), # Rotation
'gray', # Quality
'jpg', # Format
))
# Format change: tif
self.assertEqual(
'/iiif/1/full/full/0/default.tif',
make_canonical_path(
1, 800, 600, # Image identifier and dimensions
(0, 0, 800, 600), # Region
(800, 600), # Size
(False, 0), # Rotation
'default', # Quality
'tif', # Format
))
class TestImageAPIViews(WebTest):
def setUp(self):
# Disable file storage engine for tests
from icekit.plugins.iiif import views
views.iiif_storage = None
self.superuser = G(
User,
is_active=True,
is_superuser=True,
)
self.ik_image = G(
Image,
width=200,
height=300,
credit="IC Arts Collection",
source="Interaction Consortium",
license="CC",
)
# Set up mocks used by default
patcher = patch(
'icekit.plugins.iiif.views.FileResponse')
self.FileResponse = patcher.start()
self.addCleanup(patcher.stop)
self.FileResponse.return_value = HttpResponse('mocked')
def mock_image(self, width=200, height=300, name='test.jpg', mode='RGB',
return_from=None):
"""
Return a mock to simulate a PIL Image with some default attributes set,
and optionally hooked up to return from a `_get_image_or_404` mock.
"""
image = Mock()
image.configure_mock(**{
# Set image attributes
'width': width,
'height': height,
'name': name,
'mode': mode,
})
if return_from is not None:
return_from.return_value = (self.ik_image, image)
return image
def test_iiif_image_api_info(self):
self.maxDiff = None # Show whole diff on mismatch
path = reverse('iiif_image_api_info', args=[self.ik_image.pk])
# Not a privileged user
user = G(User)
response = self.app.get(path, user=user, expect_errors=True)
self.assertEqual(302, response.status_code)
self.assertTrue(
response.headers.get('Location', '').endswith(
'/login/?next=/iiif/%d/info.json' % self.ik_image.pk))
# Valid response including basic data, attribution, & license
response = self.app.get(path, user=self.superuser)
expected = {
"@context": "http://iiif.io/api/image/2/context.json",
"@id": path,
"@type": "iiif:Image",
"protocol": "http://iiif.io/api/image",
"width": self.ik_image.width,
"height": self.ik_image.height,
"license": ["CC"],
"attribution": [{
"@value": "Credit: IC Arts Collection."
" Provided by: Interaction Consortium.",
"@language": "en",
}],
}
self.assertEqual(expected, response.json)
# JSON-LD response not yet supported
response = self.app.get(
path,
user=self.superuser,
headers={'accept': 'application/ld+json'},
expect_errors=True,
)
self.assertEqual(501, response.status_code)
self.assertEqual(
"JSON-LD response is not yet supported", response.content)
def test_iiif_image_api_basics(self):
# Not a privileged user
user = G(User)
response = self.app.get(
reverse(
'iiif_image_api',
args=[self.ik_image.pk, 'full', 'max', '0', 'default', 'jpg']),
user=user,
expect_errors=True,
)
self.assertEqual(302, response.status_code)
self.assertTrue(
response.headers.get('Location', '').endswith(
'/login/?next=/iiif/%d/full/max/0/default.jpg'
% self.ik_image.pk))
# Now is a privileged user
user.user_permissions.add(
Permission.objects.get(codename='can_use_iiif_image_api'))
response = self.app.get(
reverse(
'iiif_image_api',
args=[self.ik_image.pk, 'full', 'max', '0', 'default', 'jpg']),
user=user,
)
# Invalid image identifier
response = self.app.get(
reverse(
'iiif_image_api',
args=[0, 'full', 'max', '0', 'default', 'jpg']),
user=self.superuser,
expect_errors=True,
)
self.assertEqual(404, response.status_code)
# Correct use
image = self.mock_image()
with patch('icekit.plugins.iiif.views._get_image_or_404') as _getter:
_getter.return_value = (self.ik_image, image)
response = self.app.get(
reverse(
'iiif_image_api',
args=[self.ik_image.pk,
'full', 'full', '0', 'default', 'jpg']),
user=self.superuser,
)
# No image transform operations necessary or called, just save
self.assertEqual(image.mock_calls, [
call.save(ANY, format='jpeg')
])
self.FileResponse.assert_called_with(
ANY, content_type='image/jpeg')
@patch('icekit.plugins.iiif.views._get_image_or_404')
def test_iiif_image_api_region(self, _getter):
# Region: full
image = self.mock_image(return_from=_getter)
self.app.get(
reverse(
'iiif_image_api',
args=[self.ik_image.pk, 'full', 'max', '0', 'default', 'jpg']),
user=self.superuser,
).follow()
self.assertEqual(image.mock_calls, [call.save(ANY, format='jpeg')])
# Region: square, 200 x 300 image
image = self.mock_image(return_from=_getter)
self.app.get(
reverse(
'iiif_image_api', args=[
self.ik_image.pk, 'square', 'max', '0', 'default', 'jpg']),
user=self.superuser,
).follow()
self.assertEqual(image.mock_calls, [
# | |
def pretty_print(self):
return colored('P0(ell=%1.1f, sf=%1.1f, dim=%s)' % (self.lengthscale, self.output_variance,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
#return 'SE(\\ell=%1.1f, \\sigma=%1.1f)' % (self.lengthscale, self.output_variance)
#return 'SE(\\ell=%1.1f)' % self.lengthscale
return 'P0'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance), (other.lengthscale, other.output_variance))
def depth(self):
return 0
def out_of_bounds(self, constraints):
return self.lengthscale < constraints['min_lengthscale']
class PP1KernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
lengthscale, output_variance, eff_dimensions = params
return PP1Kernel(lengthscale, output_variance, eff_dimensions)
def num_params(self):
return 3
def pretty_print(self):
return colored('P1', self.depth())
def default(self, eff_dimensions):
return PP1Kernel(0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'PP1'
@staticmethod
def description():
return "Piecewise Polynomial 1"
@staticmethod
def params_description():
return "lengthscale"
class PP1Kernel(BaseKernel):
def __init__(self, lengthscale, output_variance, eff_dimensions):
self.output_variance = output_variance
self.lengthscale = lengthscale
self.eff_dimensions = eff_dimensions
def family(self):
return PP1KernelFamily()
def gpml_kernel_expression(self):
return '{@covPPiso, 1}'
def english_name(self):
return 'P1'
def id_name(self):
return 'PP1'
def param_vector(self):
# order of args matches GPML
return np.array([self.lengthscale, self.output_variance, self.eff_dimensions])
def default_params_replaced(self, sd=1, data_shape=None):
result = self.param_vector()
if result[0] == 0:
# Set lengthscale with input scale
if np.random.rand() < 0.5:
result[0] = np.random.normal(loc=data_shape['input_scale'], scale=sd)
else:
result[0] = np.random.normal(loc=0, scale=sd)
if result[1] == 0:
# Set scale factor with output scale
if np.random.rand() < 0.5:
result[1] = np.random.normal(loc=data_shape['output_scale'], scale=sd)
else:
result[1] = np.random.normal(loc=0, scale=sd)
return result
def copy(self):
return PP1Kernel(self.lengthscale, self.output_variance, self.eff_dimensions)
def __repr__(self):
return 'PP1Kernel(lengthscale=%f, output_variance=%f, eff_dimensions=%s)' % (self.lengthscale,
self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('P1(ell=%1.1f, sf=%1.1f, dim=%s)' % (self.lengthscale, self.output_variance,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
#return 'SE(\\ell=%1.1f, \\sigma=%1.1f)' % (self.lengthscale, self.output_variance)
#return 'SE(\\ell=%1.1f)' % self.lengthscale
return 'P1'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance), (other.lengthscale, other.output_variance))
def depth(self):
return 0
def out_of_bounds(self, constraints):
return self.lengthscale < constraints['min_lengthscale']
class PP2KernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
lengthscale, output_variance, eff_dimensions = params
return PP2Kernel(lengthscale, output_variance, eff_dimensions)
def num_params(self):
return 3
def pretty_print(self):
return colored('P2', self.depth())
def default(self, eff_dimensions):
return PP2Kernel(0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'PP2'
@staticmethod
def description():
return "Piecewise Polynomial 2"
@staticmethod
def params_description():
return "lengthscale"
class PP2Kernel(BaseKernel):
def __init__(self, lengthscale, output_variance, eff_dimensions):
self.output_variance = output_variance
self.lengthscale = lengthscale
self.eff_dimensions = eff_dimensions
def family(self):
return PP2KernelFamily()
def gpml_kernel_expression(self):
return '{@covPPiso, 2}'
def english_name(self):
return 'P2'
def id_name(self):
return 'PP2'
def param_vector(self):
# order of args matches GPML
return np.array([self.lengthscale, self.output_variance, self.eff_dimensions])
def default_params_replaced(self, sd=1, data_shape=None):
result = self.param_vector()
if result[0] == 0:
# Set lengthscale with input scale
if np.random.rand() < 0.5:
result[0] = np.random.normal(loc=data_shape['input_scale'], scale=sd)
else:
result[0] = np.random.normal(loc=0, scale=sd)
if result[1] == 0:
# Set scale factor with output scale
if np.random.rand() < 0.5:
result[1] = np.random.normal(loc=data_shape['output_scale'], scale=sd)
else:
result[1] = np.random.normal(loc=0, scale=sd)
return result
def copy(self):
return PP2Kernel(self.lengthscale, self.output_variance, self.eff_dimensions)
def __repr__(self):
return 'PP2Kernel(lengthscale=%f, output_variance=%f, eff_dimensions=%s)' % (self.lengthscale,
self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('P2(ell=%1.1f, sf=%1.1f, dim=%s)' % (self.lengthscale, self.output_variance,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
#return 'SE(\\ell=%1.1f, \\sigma=%1.1f)' % (self.lengthscale, self.output_variance)
#return 'SE(\\ell=%1.1f)' % self.lengthscale
return 'P2'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance), (other.lengthscale, other.output_variance))
def depth(self):
return 0
def out_of_bounds(self, constraints):
return self.lengthscale < constraints['min_lengthscale']
class PP3KernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
lengthscale, output_variance, eff_dimensions = params
return PP3Kernel(lengthscale, output_variance, eff_dimensions)
def num_params(self):
return 3
def pretty_print(self):
return colored('P3', self.depth())
def default(self, eff_dimensions):
return PP3Kernel(0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'PP3'
@staticmethod
def description():
return "Piecewise Polynomial 3"
@staticmethod
def params_description():
return "lengthscale"
class PP3Kernel(BaseKernel):
def __init__(self, lengthscale, output_variance, eff_dimensions):
self.output_variance = output_variance
self.lengthscale = lengthscale
self.eff_dimensions = eff_dimensions
def family(self):
return PP3KernelFamily()
def gpml_kernel_expression(self):
return '{@covPPiso, 3}'
def english_name(self):
return 'P3'
def id_name(self):
return 'PP3'
def param_vector(self):
# order of args matches GPML
return np.array([self.lengthscale, self.output_variance, self.eff_dimensions])
def default_params_replaced(self, sd=1, data_shape=None):
result = self.param_vector()
if result[0] == 0:
# Set lengthscale with input scale
if np.random.rand() < 0.5:
result[0] = np.random.normal(loc=data_shape['input_scale'], scale=sd)
else:
result[0] = np.random.normal(loc=0, scale=sd)
if result[1] == 0:
# Set scale factor with output scale
if np.random.rand() < 0.5:
result[1] = np.random.normal(loc=data_shape['output_scale'], scale=sd)
else:
result[1] = np.random.normal(loc=0, scale=sd)
return result
def copy(self):
return PP3Kernel(self.lengthscale, self.output_variance, self.eff_dimensions)
def __repr__(self):
return 'PP3Kernel(lengthscale=%f, output_variance=%f, eff_dimensions=%s)' % (self.lengthscale,
self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('P3(ell=%1.1f, sf=%1.1f, dim=%s)' % (self.lengthscale, self.output_variance,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
#return 'SE(\\ell=%1.1f, \\sigma=%1.1f)' % (self.lengthscale, self.output_variance)
#return 'SE(\\ell=%1.1f)' % self.lengthscale
return 'P3'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance), (other.lengthscale, other.output_variance))
def depth(self):
return 0
def out_of_bounds(self, constraints):
return self.lengthscale < constraints['min_lengthscale']
class MaternKernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
lengthscale, output_variance, eff_dimensions = params
return MaternKernel(lengthscale, output_variance, eff_dimensions)
def num_params(self):
return 3
def pretty_print(self):
return colored('MT', self.depth())
def default(self, eff_dimensions):
return MaternKernel(0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'MT'
@staticmethod
def description():
return "Mat\\'{e}rn"
@staticmethod
def params_description():
return "lengthscale"
class MaternKernel(BaseKernel):
def __init__(self, lengthscale, output_variance, eff_dimensions):
self.lengthscale = lengthscale
self.output_variance = output_variance
self.eff_dimensions = eff_dimensions
def family(self):
return MaternKernelFamily()
def gpml_kernel_expression(self):
return '{@covMaterniso, 1}' # nu = 0.5
def english_name(self):
return 'MT'
def id_name(self):
return 'MT'
def param_vector(self):
# order of args matches GPML
return np.array([self.lengthscale, self.output_variance, self.eff_dimensions])
def default_params_replaced(self, sd=1, data_shape=None):
result = self.param_vector()
if result[0] == 0:
# Set lengthscale with input scale
if np.random.rand() < 0.5:
result[0] = np.random.normal(loc=data_shape['input_scale'], scale=sd)
else:
result[0] = np.random.normal(loc=0, scale=sd)
if result[1] == 0:
# Set scale factor with output scale
if np.random.rand() < 0.5:
result[1] = np.random.normal(loc=data_shape['output_scale'], scale=sd)
else:
result[1] = np.random.normal(loc=0, scale=sd)
return result
def copy(self):
return MaternKernel(self.lengthscale, self.output_variance, self.eff_dimensions)
def __repr__(self):
return 'MaternKernel(lengthscale=%f, output_variance=%f, eff_dimensions=%s)' % (self.lengthscale,
self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('MT(ell=%1.1f, sf=%1.1f, dim=%s)' % (self.lengthscale, self.output_variance,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
#return 'SE(\\ell=%1.1f, \\sigma=%1.1f)' % (self.lengthscale, self.output_variance)
#return 'SE(\\ell=%1.1f)' % self.lengthscale
return 'MT'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.lengthscale - other.lengthscale, self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance), (other.lengthscale, other.output_variance))
def depth(self):
return 0
def out_of_bounds(self, constraints):
return self.lengthscale < constraints['min_lengthscale']
class ChangeKernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
steepness, location, eff_dimensions = params
return ChangeKernel(steepness, | |
df[~df.fold.isin(excluded_folds)]
# if not OOD_flag:
val_df = df[df.fold == val_fold]
# else:
# val_df = OOD_df[OOD_df.fold == val_fold]
train_df.reset_index(drop=True, inplace=True)
val_df.reset_index(drop=True, inplace=True)
print(f'The length of the training is {len(train_df)}')
print(f'The length of the validation is {len(val_df)}')
print(f'The length of the inference is {len(inf_df)}')
model.cuda()
print(f'\nStarted {training_mode}-ing!')
for epoch in range(loaded_epoch, EPOCHS):
print(f'Training Epoch: {epoch}')
running_loss = 0.0
model.train()
train_acc = 0
total_dice = 0
new_seed = np.random.randint(10000)
# Shuffle training and validation:
new_train_df = reshuffle_csv(og_csv=train_df, batch_size=batch_size)
new_val_df = reshuffle_csv(og_csv=val_df, batch_size=batch_size)
# Val test
if val_test:
new_train_df = new_train_df[:20]
# And generate new loaders
patches_training_set = BespokeDataset(new_train_df, training_transform, patch_size, batch_seed=new_seed,
queue_length=batch_size)
train_loader = DataLoader(patches_training_set, batch_size=batch_size, shuffle=False)
patches_validation_set = BespokeDataset(new_val_df, validation_transform, patch_size, batch_seed=new_seed,
queue_length=val_batch_size)
val_loader = DataLoader(patches_validation_set, batch_size=val_batch_size, shuffle=False)
# Early stopping
best_val_dice = 0.0
best_counter = 0
# Patch test
if patch_test and epoch == 0 and fold == 0:
visualise_batch_patches(loader=train_loader, bs=batch_size, ps=patch_size, comparisons=4)
if training_mode != 'inference':
for i, sample in enumerate(train_loader):
if i != 0:
print(f'The time between iterations was {time.time() - start}')
start = time.time()
images = sample['mri']['data'].cuda()
labels = sample['seg']['data'].cuda()
physics = sample['physics'].cuda().float().squeeze()
names = sample['mri']['path']
names = [os.path.basename(name) for name in names]
# print(f'The physics are {physics}')
# print(f'The physics shapes are {physics.shape}')
# print(f'The image shapes are {images.shape}')
# print(f'The names are {names}')
# Need to replace names to include physics (3 decimal points should suffice)
new_names = []
affine_array = np.array([[-1, 0, 0, 89],
[0, 1, 0, -125],
[0, 0, 1, -71],
[0, 0, 0, 1]])
for k in range(4):
if physics_experiment_type == 'MPRAGE':
new_names.append(names[k].rsplit('.nii.gz')[0] + f'_TI_{physics[k]:.5f}' + '.nii.gz')
elif physics_experiment_type == 'SPGR':
new_names.append(names[k].rsplit('.nii.gz')[0] + f'_TR_{physics[k, 0]:.5f}'
+ f'_TE_{physics[k, 1]:.5f}'
+ f'_FA_{physics[k, 2]:.2f}'
+ '.nii.gz')
# save_img(images[k, ...].squeeze().detach().cpu().numpy(), affine_array,
# os.path.join(FIG_PATH, os.path.basename(new_names[k])))
# print(f'The min and max of the images is {images[k, ...].squeeze().detach().cpu().numpy().min()},'
# f'{images[k, ...].squeeze().detach().cpu().numpy().max()}')
names = new_names
# print(f'The new names are {names}')
# Zero grad optimizer
optimizer.zero_grad()
# print(images.shape, labels.shape, physics.shape)
# Pass images to the model
if not uncertainty_flag:
if physics_flag:
# Calculate physics extensions
processed_physics = physics_preprocessing(physics, physics_experiment_type)
# print(f'The physics are {names}, {physics}, {processed_physics}')
# print(f'Processed physics shape is {processed_physics.shape}')
# print(processed_physics.shape, images.shape)
out, features_out = model(images, processed_physics)
else:
out, features_out = model(images)
# Loss
eps = 1e-10
loss_start = time.time()
data_loss = F.binary_cross_entropy_with_logits(out + eps, labels, reduction='mean')
loss_end = time.time()
else:
if physics_flag:
# Calculate physics extensions
processed_physics = physics_preprocessing(physics, physics_experiment_type)
# print(f'Processed physics shape is {processed_physics.shape}')
out, unc_out, features_out = model(images, processed_physics)
# print(f'Images shape is {images.shape}')
else:
out, unc_out, features_out = model(images)
loss_start = time.time()
data_loss, data_vol_std = corrected_paper_stochastic_loss(out, unc_out, labels,
num_passes=num_loss_passes)
loss_end = time.time()
if training_mode == 'standard':
loss = data_loss
total_feature_loss = 0.1 * dynamic_calc_feature_loss(
features_out, tm='stratification') # NOTE: This needs to be the feature tensor!
writer.add_scalar('Loss/Feature_loss', total_feature_loss, running_iter)
elif training_mode == 'stratification' or training_mode == 'kld':
total_feature_loss = 0.1 * dynamic_calc_feature_loss(
features_out, tm=training_mode) # NOTE: This needs to be the feature tensor!
# regulatory_ratio = data_loss / total_feature_loss
loss = data_loss + stratification_epsilon * total_feature_loss / (
1 + dynamic_stratification_checker(labels) * float(1e9)) ** 2
writer.add_scalar('Loss/Feature_loss', total_feature_loss, running_iter)
# Softmax to convert to probabilities
out = torch.softmax(out, dim=1)
# pGM = PairwiseMeasures(labels[:, 0, ...].detach().cpu().numpy(), out[:, 0, ...].detach().cpu().numpy())
# print(pGM.dice_score())
pGM_dice = soft_dice_score(labels.cpu().detach().numpy(), out.cpu().detach().numpy())
# print(pGM_dice)
# for param in model.parameters():
# param.grad = None
# optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.detach().cpu().item()
# Name check: Shuffling sanity check
if i == 0:
print(f'The test names are: {names[0]}, {names[-2]}')
# Terminal logging
print(f"iter: {running_iter}, Loss: {loss.detach().item():.4f}, Dice: {pGM_dice:.3f}, "
f"strat: {stratification_checker(labels):.3f}"
f" ({loss_end - loss_start:.3f} s) ({(time.time() - start):.3f} s)")
# Writing to tensorboard
if running_iter % 50 == 0:
# Normalise images
images = normalise_image(images.cpu().detach().numpy())
out = normalise_image(out.cpu().detach().numpy())
labels = normalise_image(labels.cpu().detach().numpy())
writer.add_scalar('Loss/train', loss.detach().item(), running_iter)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=images[0, ...],
tag=f'Visuals/Images_Fold_{fold}', max_out=patch_size // 2,
scale_factor=255, global_step=running_iter)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=labels[0, 0, ...][None, ...],
tag=f'Visuals/Labels_Fold_{fold}', max_out=patch_size//2,
scale_factor=255, global_step=running_iter)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=out[0, 0, ...][None, ...],
tag=f'Visuals/Output_Fold_{fold}', max_out=patch_size//2,
scale_factor=255, global_step=running_iter)
if uncertainty_flag:
unc_out = unc_out.cpu().detach().numpy()
unc_out = normalise_image(unc_out)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=unc_out[0, 0, ...][None, ...],
tag=f'Validation/Unc_Output_Fold_{fold}', max_out=patch_size // 4,
scale_factor=255, global_step=running_iter)
running_iter += 1
del sample, images, labels, physics, names, out, features_out
if uncertainty_flag:
del unc_out
import gc
gc.collect()
print("Epoch: {}, Loss: {},\n Train Dice: Not implemented".format(epoch, running_loss))
print('Validation step')
model.eval()
val_metric = DiceLoss(include_background=True, to_onehot_y=False, sigmoid=False, softmax=True)
val_running_loss = 0
# correct = 0
val_counter = 0
names_collector = []
metric_collector = []
metric_collector2 = []
gm_volumes_collector = []
gm_volumes_collector2 = []
CoV_collector = []
CoV_collector2 = []
val_start = time.time()
if epoch % validation_interval == 0:
with torch.no_grad():
for val_sample in val_loader:
val_images = val_sample['mri']['data'].squeeze().cuda()
val_names = val_sample['mri']['path']
# Readjust dimensions to match expected shape for network
# if len(val_images.shape) == 3:
# val_images = torch.unsqueeze(torch.unsqueeze(val_images, 0), 0)
# elif len(val_images.shape) == 4:
# val_images = torch.unsqueeze(val_images, 0)
val_labels = val_sample['seg']['data'].squeeze().cuda()
# print(f'val_images shape is {val_images.shape}')
# print(f'val_labels shape is {val_labels.shape}')
# Readjust dimensions to match expected shape
if len(val_labels.shape) == 4:
val_labels = torch.unsqueeze(val_labels, 1)
if len(val_images.shape) == 4:
val_images = torch.unsqueeze(val_images, 1)
val_physics = val_sample['physics'].squeeze().cuda().float()
val_names = [os.path.basename(val_name) for val_name in val_names]
new_names = []
affine_array = np.array([[-1, 0, 0, 89],
[0, 1, 0, -125],
[0, 0, 1, -71],
[0, 0, 0, 1]])
for k in range(4):
if physics_experiment_type == 'MPRAGE':
new_names.append('val_' + val_names[k].rsplit('.nii.gz')[0]
+ f'_TI_{val_physics[k]:.5f}'
+ '.nii.gz')
elif physics_experiment_type == 'SPGR':
new_names.append('val_' + val_names[k].rsplit('.nii.gz')[0]
+ f'_TR_{val_physics[k, 0]:.5f}'
+ f'_TE_{val_physics[k, 1]:.5f}'
+ f'_FA_{val_physics[k, 2]:.2f}'
+ '.nii.gz')
# save_img(val_images[k, ...].squeeze().detach().cpu().numpy(), affine_array,
# os.path.join(FIG_PATH, os.path.basename(new_names[k])))
val_names = new_names
# Small name check
# print(f'Val names are {val_names}')
# Pass images to the model
if not uncertainty_flag:
if physics_flag:
# Calculate physics extensions
val_processed_physics = physics_preprocessing(val_physics, physics_experiment_type)
# print(f'The val physics are {val_names}, {val_physics}, {val_processed_physics}')
out, features_out = model(val_images, val_processed_physics)
else:
out, features_out = model(val_images)
val_data_loss = F.binary_cross_entropy_with_logits(out, val_labels, reduction="mean")
else:
if physics_flag:
# Calculate physics extensions
val_processed_physics = physics_preprocessing(val_physics, physics_experiment_type)
# print(f'Processed physics shape is {processed_physics.shape}')
out, unc_out, features_out = model(val_images, val_processed_physics)
else:
out, unc_out, features_out = model(val_images)
val_data_loss, val_data_vol_std = corrected_paper_stochastic_loss(out, unc_out, val_labels,
num_passes=num_loss_passes)
# Loss depends on training mode
if training_mode == 'standard':
val_loss = val_data_loss
elif training_mode == 'stratification' or training_mode == 'kld':
val_total_feature_loss = 0.1 * dynamic_calc_feature_loss(
features_out, tm=training_mode) # NOTE: This needs to be the feature tensor!
# regulatory_ratio = val_data_loss / val_total_feature_loss
val_loss = val_data_loss + stratification_epsilon * val_total_feature_loss / (
1 + dynamic_stratification_checker(val_labels) * float(1e9)) ** 2
# print(f"out val shape is {out.shape}") # Checking for batch dimension inclusion or not
out = torch.softmax(out, dim=1)
gm_out = out[:, 0, ...]
val_running_loss += val_loss.detach().item()
# Metric calculation
# print(pGM_dice)
# dice_performance = val_metric.forward(out, val_labels)
gm_volume = gm_out.view(4, -1).sum(1)
names_collector += val_names
gm_volumes_collector += gm_volume
# Calculate CoVs
gm_volume_np = gm_volume.cpu().detach().numpy()
val_CoV = np.std(gm_volume_np) / np.mean(gm_volume_np)
for i in range(val_batch_size):
pGM_dice = soft_dice_score(val_labels[i, ...].cpu().detach().numpy(), out[i, ...].cpu().detach().numpy())
metric_collector += [pGM_dice.tolist()]
CoV_collector.append(val_CoV)
# writer.add_scalar('Loss/Val_Feature_loss', val_total_feature_loss, running_iter)
# Convert to numpy arrays
val_images = val_images.cpu().detach().numpy()
val_labels = val_labels.cpu().detach().numpy()
val_images = normalise_image(val_images)
out = out.cpu().detach().numpy()
out = normalise_image(out)
val_counter += val_batch_size #Should probably be one to properly match training
# Cleaning up
# del val_sample, val_images, val_labels, val_physics, val_names
print(f'This validation step took {time.time() - val_start} s')
# Write to tensorboard
writer.add_scalar('Loss/val', val_running_loss / val_counter, running_iter)
writer.add_scalar('Loss/dice_val', np.mean(metric_collector), running_iter)
writer.add_scalar('Loss/CoV', np.mean(CoV_collector), running_iter)
writer.add_scalar('Loss/CoV2', np.mean(CoV_collector2), running_iter)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=val_images[0, ...],
tag=f'Validation/Images_Fold_{fold}', max_out=patch_size // 4,
scale_factor=255, global_step=running_iter)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=val_labels[0, 0, ...][None, ...],
tag=f'Validation/Labels_Fold_{fold}', max_out=patch_size // 4,
scale_factor=255, global_step=running_iter)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=out[0, 0, ...][None, ...],
tag=f'Validation/Output_Fold_{fold}', max_out=patch_size // 4,
scale_factor=255, global_step=running_iter)
if uncertainty_flag:
unc_out = unc_out.cpu().detach().numpy()
unc_out = normalise_image(unc_out)
img2tensorboard.add_animated_gif(writer=writer, image_tensor=unc_out[0, 0, ...][None, ...],
tag=f'Validation/Unc_Output_Fold_{fold}', max_out=patch_size // 4,
scale_factor=255, global_step=running_iter)
# Check if current val dice is better than previous best
# true_dice = np.mean(metric_collector)
# true_val = val_running_loss / val_counter # alternative
# if true_dice > best_val_dice:
# best_val_dice = true_dice
# append_string = 'not_best'
# best_counter = 0
# else:
# append_string = 'nb'
# best_counter += 1
# Aggregation
running_val_metric.append(np.mean(metric_collector))
running_val_names.append(names_collector)
running_gm_volumes.append(gm_volumes_collector)
running_gm_volumes2.append(gm_volumes_collector2)
# # Save model
# if SAVE and append_string == 'best':
# | |
NameError(
"Operator name %r is already taken "
"(node=%r)." % (
node.name, node))
self.onnx_operator_names.add(node.name)
def rename_onnx_name(self, old_name, new_name):
if new_name in self.variables:
raise RuntimeError(
"Name %r already in variables (%r)." % (
new_name, self.variables[new_name]))
if old_name not in self.variables:
raise RuntimeError(
"Unable to find name %r in variables." % old_name)
logger.debug(
'[Scope] update onnx_name, from %r to %r',
old_name, new_name)
self.variables[new_name] = self.variables[old_name]
del self.variables[old_name]
def declare_local_input(self, raw_name, type=None, prepend=False,
rename=True):
"""
Calls `declare_local_variable`. Registers this variable
as an input.
"""
var = self.declare_local_variable(
raw_name, type=type, prepend=prepend, rename=rename)
self.input_variables.append(var)
return var
def declare_local_output(self, raw_name, type=None, prepend=False,
missing_type=False):
"""
Calls `declare_local_variable`. Registers this variable
as an output.
"""
var = self.declare_local_variable(
raw_name, type=type, prepend=prepend,
missing_type=missing_type)
self.output_variables.append(var)
return var
def declare_local_operator(self, type, raw_model=None):
"""
This function is used to declare new local operator.
"""
onnx_name = self.get_unique_operator_name(str(type))
operator = Operator(onnx_name, self.name, type, raw_model,
self.target_opset, scope_inst=self)
self.operators[onnx_name] = operator
return operator
def _get_allowed_options(self, model, fail=True):
if self.registered_models is not None:
if type(model) not in self.registered_models['aliases']:
if fail:
raise NotImplementedError(
"No registered models, no known allowed options "
"for model '{}'.".format(model.__class__.__name__))
return {}
alias = self.registered_models['aliases'][type(model)]
conv = self.registered_models['conv'][alias]
allowed = conv.get_allowed_options()
return allowed
raise NotImplementedError(
"No registered models, no known allowed options "
"for model '{}'.".format(model.__class__.__name__))
def add_options(self, model_id, options):
"""
Adds an option, for example,
``add_options(id(clr), {'raw_scores': True})``
tells the converter associated to ``clr`` to
use raw score instead of probabilities.
:param model_id: class or ``id(instance)``
:param options: dictionary with the new values
"""
if options is None:
return
if self.options is None:
self.options = {}
if model_id not in self.options:
self.options[model_id] = None
if self.options[model_id] is None:
self.options[model_id] = {}
self.options[model_id].update(options)
def get_options(self, model, default_values=None, fail=True):
"""
Returns additional options for a model.
It first looks by class then by id (``id(model)``).
:param model: model being converted
:param default_values: default options (it is modified by
the function)
:param fail: fails if option it not found
:return: dictionary
"""
return _build_options(
model, self.options, default_values,
self._get_allowed_options(model, fail=fail),
fail=fail)
def replace_raw_operator(self, op1, op2, alias):
"""
Replaces every raw operator op1 by op2.
The function uses `id()` to detect op1.
"""
for v in self.operators.values():
if id(v.raw_operator) == id(op1):
logger.debug(
'[Scope] replace %d by %d in %r.',
id(v.raw_operator), id(op1), v)
v.raw_operator = op2
v.type = alias
class Topology:
"""
Holds instances on :class:`Scope <skl2onnx.common._topology.Scope>` and
:class:`SklearnModelContainer
<skl2onnx.common._container.SklearnModelContainer>`.
These are filled by the converters while a pipeline is being converted.
"""
def __init__(self, model, default_batch_size=1, initial_types=None,
target_opset=None, custom_conversion_functions=None,
custom_shape_calculators=None, registered_models=None):
"""
Initializes a *Topology* object, which is an intermediate
representation of a computational graph.
:param model: RawModelContainer object or one of its derived
classes. It contains the original model.
:param default_batch_size: batch_size prepend to scalar and
array types from CoreML. It's usually
1 or None.
:param initial_types: A list providing some types for some
root variables.
Each element is a tuple of a variable name and a type defined
in *data_types.py*.
:param custom_conversion_functions: a dictionary for specifying
the user customized conversion function
:param custom_shape_calculators: a dictionary for specifying the
user customized shape calculator
:param registered_models: registered models
"""
self.scopes = []
self.raw_model = model
self.scope_names = set()
self.initial_types = initial_types if initial_types else list()
self.default_batch_size = default_batch_size
self.target_opset = target_opset
self.custom_conversion_functions = (
custom_conversion_functions if custom_conversion_functions else {})
self.custom_shape_calculators = (
custom_shape_calculators if custom_shape_calculators else {})
for k in self.custom_conversion_functions:
if not callable(k):
raise TypeError("Keys in custom_conversion_functions must be "
"types not strings.")
for k in self.custom_shape_calculators:
if not callable(k):
raise TypeError("Keys in custom_shape_calculators must be "
"types not strings.")
# A map of local overwritten model aliases.
self.model_aliases = {}
all_model_types = (set(self.custom_conversion_functions)
| set(self.custom_shape_calculators))
for mtype in all_model_types:
alias = "{}_{}".format(mtype.__name__, id(self))
self.model_aliases[mtype] = alias
# Registered models
if registered_models is None:
raise AssertionError()
self.registered_models = registered_models
@property
def scope(self):
if len(self.scopes) != 1:
raise RuntimeError(
"Only one scope is allowed not %d." % len(self.scopes))
return self.scopes[0]
@staticmethod
def _generate_unique_name(seed, existing_names):
"""
Produce an unique string based on the seed
:param seed: a string
:param existing_names: a set containing strings which cannot be
produced
:return: a string similar to the seed
"""
if seed == '':
raise ValueError('Name seed must be a non-empty string.')
# Make the seed meet C-style naming convention
# Only alphabets and numbers are allowed
seed = re.sub('[^\\w+]', '_', seed)
# The first symbol cannot be a number
if re.match('^[0-9]', seed):
seed = '_' + seed
# If seed has never been seen, we return it as it is. Otherwise,
# we will append an number to make it unique.
if seed not in existing_names:
existing_names.add(seed)
return seed
else:
i = 1
while seed + str(i) in existing_names:
i += 1
new_name = seed + str(i)
existing_names.add(new_name)
return new_name
def get_unique_scope_name(self, seed):
return Topology._generate_unique_name(seed, self.scope_names)
def declare_scope(self, seed, parent_scopes=None, options=None,
naming=None):
"""
Creates a new :class:`Scope <skl2onnx.common._topology.Scope>`
and appends it to the list of existing scopes.
"""
if len(self.scopes) != 0:
raise RuntimeError(
"Only one scope can be created.")
scope = Scope(
self.get_unique_scope_name(seed), target_opset=self.target_opset,
custom_shape_calculators=self.custom_shape_calculators,
options=options, registered_models=self.registered_models,
naming=naming)
# Declare input variables.
# They should be the inputs of the scikit-learn
# model you want to convert into ONNX.
for var_name, initial_type in self.initial_types:
scope.declare_local_input(var_name, initial_type, rename=False)
self.scopes.append(scope)
return scope
def unordered_operator_iterator(self):
for scope in self.scopes:
for operator in scope.operators.values():
yield operator
def unordered_variable_iterator(self):
for scope in self.scopes:
for variable in scope.variables.values():
yield variable
def call_converter(self, operator, container, verbose=0):
"Calls converter for operator *operator*."
mtype = type(operator.raw_operator)
if mtype in self.custom_conversion_functions:
conv = self.custom_conversion_functions[mtype]
elif operator.type in self.custom_conversion_functions:
conv = self.custom_conversion_functions[operator.type]
elif hasattr(operator.raw_operator, "onnx_converter"):
conv = operator.raw_operator.onnx_converter()
else:
# Convert the selected operator into some ONNX objects and
# save them into the container
try:
conv = _registration.get_converter(operator.type)
except ValueError:
raise MissingConverter(
"Unable to find converter for alias '{}' type "
"'{}'. You may raise an issue at "
"https://github.com/onnx/sklearn-onnx/issues."
"".format(operator.type,
type(getattr(operator, 'raw_model', None))))
container.validate_options(operator)
if verbose > 0:
print("[call_converter] call converter for %r." % operator.type)
logger.debug(
"[Conv] call %r fed %r - %r", operator,
"".join(str(i.is_fed) for i in operator.inputs),
"".join(str(i.is_fed) for i in operator.outputs))
conv(self.scopes[0], operator, container)
logger.debug("[Conv] end - %r", operator)
def call_shape_calculator(self, operator):
"Calls shape_calculator for operator *operator*."
mtype = type(operator.raw_operator)
if mtype in self.custom_shape_calculators:
# overwritten operator.
source = 'custom'
shape_calc = self.custom_shape_calculators[mtype]
elif operator.type in self.custom_shape_calculators:
source = 'custom'
shape_calc = self.custom_shape_calculators[operator.type]
elif hasattr(operator.raw_operator, "onnx_shape_calculator"):
source = 'onnx_shape_calculator'
shape_calc = operator.raw_operator.onnx_shape_calculator()
else:
source = ""
shape_calc = None
if shape_calc is not None:
logger.debug(
"[Shape1] %r fed %r - %r (source=%r)", operator,
",".join(str(i.is_fed) for i in operator.inputs),
",".join(str(i.is_fed) for i in operator.outputs),
source)
shape_calc(operator)
else:
logger.debug('[Shape2] call infer_types for %r', operator)
operator.infer_types()
def _initialize_graph_status_for_traversing(self):
"""
Initialize the status of all variables and operators before
traversing the graph. Only used by convert_operators.
"""
if len(self.scopes) != 1:
raise RuntimeError(
"Only one scope is allowed not %d." % len(self.scopes))
input_names = set(v.onnx_name for v in self.scopes[0].input_variables)
if len(input_names) == 0:
raise RuntimeError("No detected inputs.")
for variable in self.unordered_variable_iterator():
is_input = variable.onnx_name in input_names
variable.init_status(is_fed=is_input)
for operator in self.unordered_operator_iterator():
operator.init_status(is_evaluated=False)
def _propagate_status(self, operator, container, fed_variables,
verbose=0):
"""
Propagates status *is_fed* based on output variable
and node added in the container.
"""
if verbose > 1:
print("[_propagate_status] after op=%r" % operator)
vars = {}
for node in container.nodes:
for i in node.input:
if i not in vars:
vars[i] = []
vars[i].append(node)
if verbose > 1:
print("[_propagate_status] newly fed=%r" % list(
v.onnx_name for v in operator.outputs if v.is_fed))
stack = list(fed_variables)
scope = self.scopes[0]
while len(stack) > 0:
nodes = {}
for name in stack:
if name not in vars:
continue
for n in vars[name]:
nodes[id(n)] = n
stack = []
for node in nodes.values():
if all(fed_variables.get(n, False) for n in node.input):
for o in node.output:
if o not in fed_variables:
if verbose > 1:
print("[_propagate_status] add=%r" % o)
fed_variables[o] = o
stack.append(o)
if o in scope.variables:
var = scope.variables[o]
var.init_status(is_fed=True)
| |
<filename>model.py
from typing import Dict
from PySide2.QtCore import Signal, Slot, QObject, QTimer
import cv2, h5py, math
import numpy as np
import matplotlib.pyplot as plt
# YOLOv4 & DeepSORT code is taken from :
# https://github.com/theAIGuysCode/yolov4-deepsort
# deep sort imports
from deep_sort import preprocessing, nn_matching
from deep_sort import tracker
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.python.saved_model import tag_constants
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
from core.yolov4 import filter_boxes
from core.config import cfg
import core.utils as utils
from tools import generate_detections as gdet
MAX_DETECTION_NUM = 50
nn_budget = None
nms_max_overlap = 1.0
input_size = 416
model_filename = 'model_data/mars-small128.pb'
weights_path = './checkpoints/yolov4-416'
class_id_map = {
'none' : '0',
'truck' : '1',
'car' : '2',
'bus' : '3'
}
class_id_map.update({item[1]: item[0] for item in class_id_map.items()})
class Model(QObject):
frame_update_signal = Signal(np.ndarray, int)
max_frame_update_signal = Signal(int)
process_done_signal = Signal()
error_signal = Signal(str)
vehicle_count_signal = Signal(int,int,int,np.ndarray)
def __init__(self):
super().__init__()
# Definition of the parameters
self.sess = None
self.infer = None
self.encoder = None
self.saved_model_loaded = None
self.max_cosine_distance = 0.4
self.iou_thresh = 0.45
self.score_thresh = 0.7
self.input_video_path = ''
self.output_video_path = ''
self.output_data_path = ''
self.mask_path = ''
self.cache_data = None
self.vid = None
self.detected_vehicles = None
self.frame_counter = 0
self.finishLine = (0,0,0,0)
self.stop_inference = True
self.stop_counting = True
self.count_method = 0
self.imgMask = None
self.initialize_counting()
#initialize color map
cmap = plt.get_cmap('tab20b')
self.colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
#======================= Setters ===========================
def initialize_counting(self):
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
def setInputVideoPath(self, path):
self.input_video_path = path
self.vid = cv2.VideoCapture(self.input_video_path)
_, frame = self.vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.frame_update_signal.emit(frame, 0)
def setOutputVideoPath(self, path):
self.output_video_path = path
def setOutputDataPath(self, path):
self.output_data_path = path
def setCacheDataPath(self, path):
self.cache_data_path = path
# parse cache data and send signal with max frame num
cache = h5py.File(self.cache_data_path, 'r')
cache_data = cache.get('dataset_1')
self.cache_data = np.array(cache_data)
self.max_frame_update_signal.emit(self.cache_data.shape[0])
def setMaskFile(self, path):
self.mask_path = path
mask = h5py.File(self.mask_path, 'r')
mask = mask.get('mask')
self.imgMask = np.array(mask)
def saveMask(self, path, mask):
self.imgMask = mask
data = h5py.File(path, 'w')
data.create_dataset('mask', data=self.imgMask)
data.close()
def getMask(self):
return self.imgMask
def setParams(self, params:dict):
self.imgMask = params['mask']
self.iou_thresh = params['iou_thresh']
self.score_thresh = params['score_thresh']
self.max_cosine_distance = params['cos_dist']
self.filt_x_vec = params['x_vect']
self.filt_y_vec = params['y_vect']
self.filt_width = params['filt_width']
self.filt_dist = params['filt_dist']
self.filt_frame = params['filt_frames']
self.finishFrames = params['finish_frames']
self.finishLine = params['finish_line']
self.count_method = params['count_method']
#==================== Counting Functions ========================
def countVehicles(self, frame, frame_num, detection) -> bool:
class_id = detection[0]
uid = str(detection[1])
# xmin, ymin, xmax, ymax
x_min = detection[2]
y_min = detection[3]
x_max = detection[4]
y_max = detection[5]
width = x_max - x_min
height = y_max - y_min
cx = x_min + (width / 2)
cy = y_min + (height / 2)
centroid = [cx, cy]
tracker_dict = self.detected_vehicles[str(class_id)]
# detecting for the first time
if uid not in tracker_dict.keys():
tracker_dict[uid] = {
'initial_centroid' : [cx, cy],
'prev_centroid': [cx, cy],
'prev_frame_num': frame_num,
'dist': 0,
'counted': False
}
return False
# already counted this car, skip
elif tracker_dict[uid]['counted'] == True:
return True
# count with vector filter method
if self.count_method == 0:
# reset distance travelled if previous detected frame is too far off
if frame_num - tracker_dict[uid]['prev_frame_num'] > self.filt_frame:
tracker_dict[uid]['prev_centroid'] = centroid
# compute distance traveled
prev_centroid = tracker_dict[uid]['prev_centroid']
tracker_dict[uid]['dist'] = tracker_dict[uid]['dist'] + math.dist(prev_centroid, centroid)
tracker_dict[uid]['prev_centroid'] = centroid
tracker_dict[uid]['prev_frame_num'] = frame_num
# count the object if distance traveled exceeds a threshold
if tracker_dict[uid]['dist'] > self.filt_dist:
# computer direction vector
initial_centroid = tracker_dict[uid]['initial_centroid']
vect = [cx - initial_centroid[0], cy - initial_centroid[1]]
# only count vehicles travelling south
x_min = self.filt_x_vec - self.filt_width
x_max = self.filt_x_vec + self.filt_width
if (x_min < vect[0] < x_max) and (vect[1] > 0) == (self.filt_y_vec > 0):
tracker_dict[uid]['counted'] = True
cnt = sum([param['counted'] for id, param in tracker_dict.items()])
img = self.getVehicleImage(detection, frame)
self.vehicle_count_signal.emit(class_id, int(uid), cnt, img)
return True
# count with finishing line method
elif self.count_method == 1:
bx = self.finishLine[0]
by = self.finishLine[1]
bw = self.finishLine[2]
bh = self.finishLine[3]
# check if centroid within bounds of finish line
if (cx > bx) and (cx < bx + bw) and (cy > by) and (cy < by + bh):
tracker_dict[uid]['dist'] += 1
if tracker_dict[uid]['dist'] > self.finishFrames:
tracker_dict[uid]['counted'] = True
cnt = sum([param['counted'] for id, param in tracker_dict.items()])
img = self.getVehicleImage(detection, frame)
self.vehicle_count_signal.emit(class_id, int(uid), cnt, img)
return True
return False
@Slot()
def startCounting(self):
if not self.validateInputFiles():
return
total_frames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
# tally total frame num in cahce data and video
if total_frames != self.cache_data.shape[0]:
self.error_signal.emit('Video and cache frame count does not match')
return
# reinitialize dict for counting
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
# go to first frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
for frame_num, frame_data in enumerate(self.cache_data):
_, frame = self.vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.bitwise_and(frame, frame, mask=self.imgMask)
for detection in frame_data:
self.countVehicles(frame, frame_num, detection)
self.process_done_signal.emit()
@Slot()
def analyzeFrames(self):
if not self.counting_timer.isActive():
self.counting_timer.setInterval(30)
self.counting_timer.start()
return
success , frame = self.vid.read()
if success and not self.stop_counting:
frame_original = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.bitwise_and(frame_original, frame_original, mask=self.imgMask)
frame_data = self.cache_data[self.frame_counter]
for detection in frame_data:
class_name = self.getClassName(str(detection[0]))
uid = detection[1]
x_min = detection[2]
y_min = detection[3]
x_max = detection[4]
y_max = detection[5]
detected = self.countVehicles(frame, self.frame_counter, detection)
frame = self.drawBoundingBox(frame_original, class_name, uid, x_min, y_min, x_max, y_max, detected)
self.frame_counter += 1
self.frame_update_signal.emit(frame, self.frame_counter)
else:
self.stop_counting = True
self.counting_timer.stop()
self.frame_counter = 0
self.process_done_signal.emit()
@Slot()
def stopCountingAnalysis(self):
self.stop_counting = True
@Slot()
def startCountingAnalysis(self):
self.counting_timer = QTimer()
self.counting_timer.timeout.connect(self.analyzeFrames)
if not self.validateInputFiles():
return
total_frames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
# tally total frame num in cahce data and video
if total_frames != self.cache_data.shape[0]:
self.error_signal.emit('Video and cache frame count does not match')
return
# reinitialize dict for counting
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
self.stop_counting = False
# go to first frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
self.analyzeFrames()
def validateInputFiles(self) -> bool:
if self.cache_data is None:
self.error_signal.emit('Cache data not specified!')
return False
elif self.vid is None:
self.error_signal.emit('No input video specified')
return False
else:
return True
@Slot(int)
def previewFrame(self, frame_num):
if not self.validateInputFiles():
return
# go to specified frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
_, frame = self.vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# draw bb box
for detection in self.cache_data[frame_num]:
class_name = self.getClassName(str(detection[0]))
uid = detection[1]
x_min = detection[2]
y_min = detection[3]
x_max = detection[4]
y_max = detection[5]
frame = self.drawBoundingBox(frame, class_name, uid, x_min, y_min, x_max, y_max)
# draw counting annotation
# update frame signal
self.frame_update_signal.emit(frame, frame_num)
#==================== Inference Functions ========================
def stopInference(self):
self.stop_inference = True
@Slot()
def startInference(self):
if self.vid is None:
self.error_signal.emit('No input video specified')
return
self.stop_inference = False
self.detected_vehicles = {class_id : {} for class_name, class_id in class_id_map.items()}
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric("cosine", self.max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load standard tensorflow saved model for YOLO and Deepsort
if self.sess is None:
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
self.sess = Session(config=config)
self.saved_model_loaded = tf.saved_model.load(weights_path)
self.infer = self.saved_model_loaded.signatures['serving_default']
self.encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# begin video capture
total_frames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
self.max_frame_update_signal.emit(total_frames)
# go to first frame
self.vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
# get video ready to save locally
# by default VideoCapture returns float instead of int
width = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(self.vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(self.output_video_path, codec, fps, (width, height))
# initialize buffer to store cache
cache = []
# buffer to track and count vehicles
cars = {}
trucks = {}
car_cnt = 0
truck_cnt = 0
frame_num = 0
# while video is running
while not self.stop_inference:
frame_data = np.zeros((MAX_DETECTION_NUM, 6), dtype=int)
return_value, frame = self.vid.read()
if return_value:
frame_original = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.bitwise_and(frame_original, frame_original, mask=self.imgMask)
else:
print('Video has ended or failed, try a different video format!')
break
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
batch_data = tf.constant(image_data)
pred_bbox = self.infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=MAX_DETECTION_NUM,
max_total_size=MAX_DETECTION_NUM,
iou_threshold= self.iou_thresh,
score_threshold= self.score_thresh
)
| |
<filename>sympy/mpmath/mptypes.py<gh_stars>0
"""
This module defines the mpf, mpc classes, and standard functions for
operating with them.
"""
__docformat__ = 'plaintext'
import re
from string import strip
from operator import gt, lt
from settings import (MP_BASE, MP_ZERO, MP_ONE, int_types, repr_dps,
round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps)
from libmpf import (
ComplexResult, to_pickable, from_pickable, normalize,
from_int, from_float, from_str, to_int, to_float, to_str,
from_rational, from_man_exp,
fone, fzero, finf, fninf, fnan,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
mpf_hash, mpf_rand,
mpf_sum,
bitcount
)
from libmpc import (
mpc_to_str,
mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
mpc_mpf_div
)
from libelefun import mpf_pow
from libmpi import (
mpi_mid, mpi_delta, mpi_str,
mpi_abs, mpi_pos, mpi_neg, mpi_add, mpi_sub,
mpi_mul, mpi_div, mpi_pow_int, mpi_pow
)
import quadrature
new = object.__new__
class PrecisionManager:
def __init__(self, mp, precfun, dpsfun, normalize_output=False):
self.mp = mp
self.precfun = precfun
self.dpsfun = dpsfun
self.normalize_output = normalize_output
def __call__(self, f):
def g(*args, **kwargs):
orig = self.mp.prec
try:
if self.precfun:
self.mp.prec = self.precfun(self.mp.prec)
else:
self.mp.dps = self.dpsfun(self.mp.dps)
if self.normalize_output:
v = f(*args, **kwargs)
if type(v) is tuple:
return tuple([+a for a in v])
return +v
else:
return f(*args, **kwargs)
finally:
self.mp.prec = orig
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def __enter__(self):
self.origp = self.mp.prec
if self.precfun:
self.mp.prec = self.precfun(self.mp.prec)
else:
self.mp.dps = self.dpsfun(self.mp.dps)
def __exit__(self, exc_type, exc_val, exc_tb):
self.mp.prec = self.origp
return False
class Context(object):
pass
class MultiPrecisionArithmetic(Context, quadrature.QuadratureMethods):
"""
Context for multiprecision arithmetic with a global precision.
"""
def __init__(ctx):
# Settings
ctx._prec_rounding = [53, round_nearest]
ctx.trap_complex = False
ctx.pretty = False
ctx.mpf = type('mpf', (_mpf,), {})
ctx.mpc = type('mpc', (_mpc,), {})
ctx.mpi = type('mpi', (_mpi,), {})
ctx.constant = type('constant', (_constant,), {})
ctx.types = [ctx.mpf, ctx.mpc, ctx.mpi, ctx.constant]
# For fast access
ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding]
ctx.mpi._ctxdata = [ctx.mpi, new, ctx._prec_rounding]
ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
ctx.constant.context = ctx
ctx.mpf.context = ctx
ctx.mpc.context = ctx
ctx.mpi.context = ctx
# Predefined data
ctx._create_constants({})
quadrature.QuadratureMethods.__init__(ctx)
# pi, etc
_constants = []
def _create_constants(ctx, namespace):
ctx.one = ctx.make_mpf(fone)
ctx.zero = ctx.make_mpf(fzero)
ctx.inf = ctx.make_mpf(finf)
ctx.ninf = ctx.make_mpf(fninf)
ctx.nan = ctx.make_mpf(fnan)
ctx.j = ctx.make_mpc((fzero,fone))
eps = ctx.constant(lambda prec, rnd: (0, MP_ONE, 1-prec, 1),
"epsilon of working precision")
ctx.eps = eps
import function_docs
for name, func, descr in ctx._constants:
doc = function_docs.__dict__.get(name, descr)
const_cls = type("_" + name, (ctx.constant,), {'__doc__':doc})
const_inst = const_cls(func, descr)
setattr(ctx, name, const_inst)
namespace[name] = const_inst
def clone(ctx):
"""
Create a copy of the context, with the same working precision.
"""
a = MultiPrecisionArithmetic()
a.prec = ctx.prec
return a
# Several helper methods
# TODO: add more of these, make consistent, write docstrings, ...
def is_real_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return False
return True
def is_complex_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return True
return False
def make_mpf(ctx, v):
a = new(ctx.mpf)
a._mpf_ = v
return a
def make_mpc(ctx, v):
a = new(ctx.mpc)
a._mpc_ = v
return a
def make_mpi(ctx, v):
a = new(ctx.mpi)
a._mpi_ = v
return a
def isnpint(ctx, x):
if not x:
return True
if hasattr(x, '_mpf_'):
sign, man, exp, bc = x._mpf_
return sign and exp >= 0
if hasattr(x, '_mpc_'):
return not x.imag and ctx.isnpint(x.real)
if type(x) in (int, long):
return x <= 0
return ctx.isnpint(ctx.convert(x))
def bad_domain(ctx, msg):
raise ValueError(msg)
def __str__(ctx):
lines = ["Mpmath settings:",
(" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
(" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
(" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
]
return "\n".join(lines)
def default(ctx):
ctx._prec = ctx._prec_rounding[0] = 53
ctx._dps = 15
ctx.trap_complex = False
def set_prec(ctx, n):
ctx._prec = ctx._prec_rounding[0] = max(1, int(n))
ctx._dps = prec_to_dps(n)
def set_dps(ctx, n):
ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n)
ctx._dps = max(1, int(n))
prec = property(lambda ctx: ctx._prec, set_prec)
dps = property(lambda ctx: ctx._dps, set_dps)
@property
def repr_digits(ctx):
return repr_dps(ctx._prec)
@property
def str_digits(ctx):
return ctx._dps
verbose = False
def extraprec(ctx, n, normalize_output=False):
"""
The block
with extraprec(n):
<code>
increases the precision n bits, executes <code>, and then
restores the precision.
extraprec(n)(f) returns a decorated version of the function f
that increases the working precision by n bits before execution,
and restores the parent precision afterwards. With
normalize_output=True, it rounds the return value to the parent
precision.
"""
return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
def extradps(ctx, n, normalize_output=False):
"""
This function is analogous to extraprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
def workprec(ctx, n, normalize_output=False):
"""
The block
with workprec(n):
<code>
sets the precision to n bits, executes <code>, and then restores
the precision.
workprec(n)(f) returns a decorated version of the function f
that sets the precision to n bits before execution,
and restores the precision afterwards. With normalize_output=True,
it rounds the return value to the parent precision.
"""
return PrecisionManager(ctx, lambda p: n, None, normalize_output)
def workdps(ctx, n, normalize_output=False):
"""
This function is analogous to workprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: n, normalize_output)
def nstr(ctx, x, n=6, **kwargs):
"""
Convert an ``mpf``, ``mpc`` or ``mpi`` to a decimal string literal with *n*
significant digits. The small default value for *n* is chosen to
make this function useful for printing collections of numbers
(lists, matrices, etc).
If *x* is an ``mpi``, there are some extra options, notably *mode*, which
can be 'brackets', 'diff', 'plusminus' or 'percent'. See ``mpi_to_str`` for
a more complete documentation.
If *x* is a list or tuple, :func:`nstr` is applied recursively
to each element. For unrecognized classes, :func:`nstr`
simply returns ``str(x)``.
The companion function :func:`nprint` prints the result
instead of returning it.
>>> from mpmath import *
>>> nstr([+pi, ldexp(1,-500)])
'[3.14159, 3.05494e-151]'
>>> nprint([+pi, ldexp(1,-500)])
[3.14159, 3.05494e-151]
"""
if isinstance(x, list):
return "[%s]" % (", ".join(nstr(c, n) for c in x))
if isinstance(x, tuple):
return "(%s)" % (", ".join(nstr(c, n) for c in x))
if hasattr(x, '_mpf_'):
return to_str(x._mpf_, n)
if hasattr(x, '_mpc_'):
return "(" + mpc_to_str(x._mpc_, n) + ")"
if isinstance(x, basestring):
return repr(x)
from matrices import matrix
if isinstance(x, matrix):
return x.__nstr__(n)
if hasattr(x, '_mpi_'):
return ctx.mpi_to_str(x, n, **kwargs)
return str(x)
def nprint(ctx, x, n=6, **kwargs):
"""
Equivalent to ``print nstr(x, n)``.
"""
print ctx.nstr(x, n, **kwargs)
def convert(ctx, x, strings=True):
"""
Converts *x* to an ``mpf``, ``mpc`` or ``mpi``. If *x* is of type ``mpf``,
``mpc``, ``int``, ``float``, ``complex``, the conversion
will be performed losslessly.
If *x* is a string, the result will be rounded to the present
working precision. Strings representing fractions or complex
numbers are permitted.
>>> from mpmath import *
>>> mp.dps = 15
>>> mpmathify(3.5)
mpf('3.5')
>>> mpmathify('2.1')
mpf('2.1000000000000001')
>>> mpmathify('3/4')
mpf('0.75')
>>> mpmathify('2+3j')
mpc(real='2.0', imag='3.0')
"""
if type(x) in ctx.types: return x
if isinstance(x, int_types): return ctx.make_mpf(from_int(x))
if isinstance(x, float): return ctx.make_mpf(from_float(x))
if isinstance(x, complex): return ctx.mpc(x)
prec, rounding = ctx._prec_rounding
if strings and isinstance(x, basestring):
try:
_mpf_ = from_str(x, prec, rounding)
return ctx.make_mpf(_mpf_)
except Exception, e:
if '/' in x:
fract = x.split('/')
assert len(fract) == 2
return ctx.convert(fract[0]) / ctx.convert(fract[1])
if 'j' in x.lower():
x = x.lower().replace(' ', '')
match = get_complex.match(x)
re = match.group('re')
if not re:
re = 0
im = match.group('im').rstrip('j')
return ctx.mpc(ctx.convert(re), ctx.convert(im))
if '[' in x or '(' in x or '+-' in x:
# XXX
return ctx.mpi_from_str(x)
raise e
if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_)
if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_)
if hasattr(x, '_mpmath_'):
return ctx.convert(x._mpmath_(*prec_rounding))
raise TypeError("cannot create mpf from " + repr(x))
mpmathify = convert
def _parse_prec(ctx, kwargs):
if kwargs:
if kwargs.get('exact'):
return 0, 'f'
prec, rounding = ctx._prec_rounding
if 'rounding' in kwargs:
rounding = kwargs['rounding']
if 'prec' in kwargs:
prec = kwargs['prec']
if prec == ctx.inf:
return 0, 'f'
elif 'dps' in kwargs:
dps = kwargs['dps']
if dps == ctx.inf:
return 0, 'f'
prec = dps_to_prec(dps)
return | |
list(filter(lambda x: x['name'] == 'TBox' and (x['anglez']>>9) == id, bzs['OBJS']))
if len(tboxs) == 0:
print(tboxs)
obj = tboxs[0] # anglez >> 9 is chest id
patch_tbox_item(obj, itemid)
def rando_patch_item(bzs: OrderedDict, itemid: int, id: str):
id = int(id)
obj = next(filter(lambda x: x['name'] == 'Item' and ((x['params1'] >> 10) & 0xFF) == id, bzs['OBJ '])) # (params1 >> 10) & 0xFF is sceneflag
patch_item_item(obj, itemid)
def rando_patch_chandelier(bzs: OrderedDict, itemid: int, id: str):
obj = next(filter(lambda x: x['name'] == 'Chandel', bzs['OBJ ']))
patch_chandelier_item(obj, itemid)
def rando_patch_soil(bzs: OrderedDict, itemid: int, id: str):
id = int(id)
obj = next(filter(lambda x: x['name'] == 'Soil' and ((x['params1'] >> 4) & 0xFF) == id, bzs['OBJ '])) # (params1 >> 4) & 0xFF is sceneflag
patch_soil_item(obj, itemid)
def rando_patch_bokoblin(bzs: OrderedDict, itemid: int, id: str):
id = int(id, 0)
obj = next(filter(lambda x: x['name'] == 'EBc' and x['id']== id, bzs['OBJ ']))
patch_key_bokoblin_item(obj, itemid)
# functions, that patch the object, they take: the bzs of that layer, the item id and optionally an id, then patches the object in place
RANDO_PATCH_FUNCS = {
'chest': rando_patch_chest,
'HeartCo': rando_patch_heartco,
'WarpObj': rando_patch_warpobj,
'TBox': rando_patch_tbox,
'Item': rando_patch_item,
'Chandel': rando_patch_chandelier,
'Soil': rando_patch_soil,
'EBc': rando_patch_bokoblin,
'Tbox': rando_patch_tbox,
}
def get_patches_from_location_item_list(all_checks, filled_checks):
with (RANDO_ROOT_PATH / 'items.yaml').open() as f:
items = yaml.safe_load(f)
by_item_name=dict((x['name'],x) for x in items)
# make sure dungeon items exist
DUNGEONS = ['SW', 'ET', 'LMF', 'AC', 'SS', 'FS', 'SK', 'LanayruCaves'] # caves has a key, no spaces because the randomizer splits by spaces
for dungeon in DUNGEONS:
by_item_name[f'{dungeon} Small Key'] = by_item_name['Small Key']
by_item_name[f'{dungeon} Map'] = by_item_name['Map']
# (stage, room) -> (object name, layer, id?, itemid)
stagepatchv2 = defaultdict(list)
# (stage, layer) -> oarc
stageoarcs = defaultdict(set)
# # eventfile: (line, itemid)
eventpatches = defaultdict(list)
stage_re = re.compile(r'stage/(?P<stage>[^/]+)/r(?P<room>[0-9]+)/l(?P<layer>[0-9]+)/(?P<objname>[a-zA-Z]+)(/(?P<objid>[^/]+))?')
event_re = re.compile(r'event/(?P<eventfile>[^/]+)/(?P<eventid>[^/]+)')
oarc_re = re.compile(r'oarc/(?P<stage>[^/]+)/l(?P<layer>[^/]+)')
for checkname, itemname in filled_checks.items():
# single gratitude crystals aren't randomized
if itemname == 'Gratitude Crystal':
continue
check = all_checks[checkname]
item = by_item_name[itemname]
for path in check['Paths']:
stage_match = stage_re.match(path)
event_match = event_re.match(path)
oarc_match = oarc_re.match(path)
if stage_match:
stage = stage_match.group('stage')
room = int(stage_match.group('room'))
layer = int(stage_match.group('layer'))
objname = stage_match.group('objname')
objid = stage_match.group('objid')
oarc = item['oarc']
if oarc:
if isinstance(oarc, list):
for o in oarc:
stageoarcs[(stage, layer)].add(o)
else:
stageoarcs[(stage, layer)].add(oarc)
stagepatchv2[(stage, room)].append((objname, layer, objid, item['id']))
elif event_match:
eventfile = event_match.group('eventfile')
eventid = event_match.group('eventid')
eventpatches[eventfile].append((eventid, item['id']))
elif oarc_match:
stage = oarc_match.group('stage')
layer = int(oarc_match.group('layer'))
oarc = item['oarc']
if oarc:
if isinstance(oarc, list):
for o in oarc:
stageoarcs[(stage, layer)].add(o)
else:
stageoarcs[(stage, layer)].add(oarc)
else:
print(f'ERROR: {path} didn\'t match any regex!')
return stagepatchv2, stageoarcs, eventpatches
def get_entry_from_bzs(bzs: OrderedDict, objdef: dict, remove: bool=False) -> Optional[OrderedDict]:
id = objdef.get('id',None)
index = objdef.get('index',None)
layer = objdef.get('layer', None)
objtype = objdef['objtype'].ljust(4) # OBJ has an whitespace but thats was too error prone for the yaml, so just pad it here
if layer is None:
objlist = bzs[objtype]
else:
objlist = bzs['LAY '][f'l{layer}'][objtype]
if not id is None:
objs = [x for x in objlist if x['id'] == id]
if len(objs) != 1:
print(f'Error finding object: {json.dumps(objdef)}')
return None
obj = objs[0]
if remove:
objlist.remove(obj)
elif not index is None:
if index >= len(objlist):
print(f'Error list index out of range: {json.dumps(objdef)}')
return None
if remove:
obj = objlist.pop(index)
else:
obj = objlist[index]
else:
print(f'ERROR: neither id nor index given for object {json.dumps(objdef)}')
return None
return obj
def do_gamepatches(rando):
patcher = AllPatcher(
actual_extract_path=rando.actual_extract_path,
modified_extract_path=rando.modified_extract_path,
oarc_cache_path=rando.oarc_cache_path,
copy_unmodified=False)
with (RANDO_ROOT_PATH / "patches.yaml").open() as f:
patches = yaml.safe_load(f)
with (RANDO_ROOT_PATH / "eventpatches.yaml").open() as f:
eventpatches = yaml.safe_load(f)
rando.progress_callback('building arc cache...')
with (RANDO_ROOT_PATH / "extracts.yaml").open() as f:
extracts = yaml.safe_load(f)
patcher.create_oarc_cache(extracts)
def filter_option_requirement(entry):
return not (isinstance(entry, dict) and 'onlyif' in entry \
and not rando.logic.check_logical_expression_string_req(entry['onlyif']))
filtered_storyflags = []
for storyflag in patches['global']['startstoryflags']:
# conditionals are an object
if not isinstance(storyflag, int):
if filter_option_requirement(storyflag):
storyflag = storyflag['storyflag']
else:
continue
filtered_storyflags.append(storyflag)
# filter startstoryflags
patches['global']['startstoryflags'] = filtered_storyflags
# Add sword story/itemflags if required
start_sword_count = rando.starting_items.count('Progressive Sword')
for i in range(start_sword_count):
patches['global']['startstoryflags'].append(PROGRESSIVE_SWORD_STORYFLAGS[i])
if start_sword_count > 0:
patches['global']['startitems'].append(PROGRESSIVE_SWORD_ITEMIDS[start_sword_count-1])
# if 'Sailcloth' in rando.starting_items:
# patches['global']['startstoryflags'].append(32)
# patches['global']['startitems'].append(15)
if 'Progressive Pouch' in rando.starting_items:
patches['global']['startstoryflags'].append(30) # storyflag for pouch
patches['global']['startstoryflags'].append(931) # rando storyflag for progressive pouch 1
patches['global']['startitems'].append(112) # itemflag for pouch
rando_stagepatches, stageoarcs, rando_eventpatches = get_patches_from_location_item_list(rando.logic.item_locations, rando.logic.done_item_locations)
# Add required dungeon patches to eventpatches
DUNGEON_TO_EVENTFILE = {
'Skyview': '201-ForestD1',
'Earth Temple': '301-MountainD1',
'Lanayru Mining Facility': '400-Desert',
'Ancient Cistern': '202-ForestD2',
'Sandship': '401-DesertD2',
'Fire Sanctuary': '304-MountainD2',
}
REQUIRED_DUNGEON_STORYFLAGS = [902, 903, 926, 927, 928, 929]
for i, dungeon in enumerate(rando.required_dungeons):
dungeon_events = eventpatches[DUNGEON_TO_EVENTFILE[dungeon]]
required_dungeon_storyflag_event = next(filter(lambda x: x['name'] == 'rando required dungeon storyflag', dungeon_events))
required_dungeon_storyflag_event['flow']['param2'] = REQUIRED_DUNGEON_STORYFLAGS[i] # param2 is storyflag of event
required_dungeon_count = len(rando.required_dungeons)
# set flags for unrequired dungeons beforehand
for required_dungeon_storyflag in REQUIRED_DUNGEON_STORYFLAGS[required_dungeon_count:]:
patches['global']['startstoryflags'].append(required_dungeon_storyflag)
# patch required dungeon text in
if required_dungeon_count == 0:
required_dungeons_text = 'No Dungeons'
elif required_dungeon_count == 6:
required_dungeons_text = 'All Dungeons'
elif required_dungeon_count < 4:
required_dungeons_text = 'Required Dungeons:\n'+('\n'.join(rando.required_dungeons))
else:
required_dungeons_text = 'Required: ' + ', '.join(rando.required_dungeons)
# try to fit the text in as few lines as possible, breaking up at spaces if necessary
cur_line = ''
combined = ''
for part in required_dungeons_text.split(' '):
if len(cur_line + part) > 27: # limit of one line
combined += cur_line + '\n'
cur_line = part + ' '
else:
cur_line += part + ' '
combined += cur_line
required_dungeons_text = combined.strip()
eventpatches['107-Kanban'].append({
"name": "Knight Academy Billboard text",
"type": "textpatch",
"index": 18,
"text": required_dungeons_text,
})
# Add storyflags for startitems (only tablets for now)
for item in rando.starting_items:
if item in START_ITEM_STORYFLAGS:
patches['global']['startstoryflags'].append(START_ITEM_STORYFLAGS[item])
# add startflags to eventpatches
startstoryflags = patches['global'].get('startstoryflags',None)
startsceneflags = patches['global'].get('startsceneflags',None)
startitems = patches['global'].get('startitems',None)
def pop_or_default(lst, default=-1):
if len(lst) == 0:
return default
else:
return lst.pop(0)
for cs_stage, cs_room, cs_index in START_CUTSCENES:
if not cs_stage in patches:
patches[cs_stage] = []
if cs_stage.startswith('F0'):
# make sure to only set sceneflags on skyloft
patches[cs_stage].append({
'name': 'Startflags',
'type': 'objpatch',
'room': cs_room,
'index': cs_index,
'objtype': 'EVNT',
'object': {
'item': pop_or_default(startitems),
'story_flag1': pop_or_default(startstoryflags),
'story_flag2': pop_or_default(startstoryflags),
'sceneflag1': pop_or_default(startsceneflags),
'sceneflag2': pop_or_default(startsceneflags),
},
})
else:
patches[cs_stage].append({
'name': 'Startflags',
'type': 'objpatch',
'room': cs_room,
'index': cs_index,
'objtype': 'EVNT',
'object': {
'item': pop_or_default(startitems),
'story_flag1': pop_or_default(startstoryflags),
'story_flag2': pop_or_default(startstoryflags),
},
})
# for now, we can only set scene and storyflags here, so make sure all items were handled in the event
assert len(startitems) == 0, "Not all items were handled in events!"
while startsceneflags or startstoryflags:
patches['F001r'].append({
'name': 'Startflags',
'type':'objadd',
'room': 1, # Link's room
'layer': 0,
'objtype': 'STAG',
'object': {
"params1": 0xFFFFFF00 | (pop_or_default(startsceneflags) & 0xFF),
"params2": 0xFF5FFFFF,
"posx": 761,
"posy": -22,
"posz": -2260,
"sizex": 1000,
"sizey": 1000,
"sizez": 1000,
"anglex": pop_or_default(startstoryflags) & 0xFFFF,
"angley": 0,
"anglez": 65535,
"name": "SwAreaT",
}
})
def find_event(filename, name):
return next((patch for patch in eventpatches[filename] if patch['name'] == name), None)
# Trial Hints
trial_checks = {
# (getting it text patch, inventory text line)
'Skyloft Silent Realm - Stone of Trials': ('Full SotH text',659, "The song that leads you to the final trial."),
'Faron Silent Realm - Water Scale': ("Farore's Courage Text",653, "This song opens the trial located in Faron\nWoods."),
'Lanayru Silent Realm - Clawshots': ("Nayru's Wisdom Text",654, "This song opens the trial located in\nLanayru Desert."),
'Eldin Silent Realm - Fireshield Earrings': ("Din's Power Text",655, "This song opens the trial located on\nEldin Volcano."),
}
for trial_check_name, (obtain_text_name, inventory_text_idx, inventory_text) in trial_checks.items():
item = rando.logic.done_item_locations[trial_check_name]
if item in rando.logic.all_progress_items:
useful_text = '\nYou might need what it reveals...'
# print(f'{item} in {trial_check} is useful')
else:
useful_text = '\nIt\'s probably not too important...'
# print(f'{item} in {trial_check} is not useful')
find_event('003-ItemGet', obtain_text_name)["text"] += useful_text
eventpatches['003-ItemGet'].append({
'name': "Harp Text",
'type': "textpatch",
'index': inventory_text_idx,
'text': inventory_text + useful_text
})
remove_stageoarcs = defaultdict(set)
for stage, stagepatches in patches.items():
if stage == 'global':
continue
for patch in stagepatches:
if patch['type'] == 'oarcadd':
stageoarcs[(stage, patch['destlayer'])].add(patch['oarc'])
elif patch['type'] == 'oarcdelete':
remove_stageoarcs[(stage, patch['layer'])].add(patch['oarc'])
# stageoarcs[('D000',0)].add('GetSwordA')
for (stage, layer), oarcs in stageoarcs.items():
patcher.add_stage_oarc(stage, layer, oarcs)
for (stage, layer), oarcs in remove_stageoarcs.items():
patcher.delete_stage_oarc(stage, layer, oarcs)
if not | |
<filename>tests/test_ogm.py
import sys
import unittest
import decimal
import os.path
from datetime import datetime
from pyorient import PyOrientCommandException, PyOrientSQLParsingException
from pyorient.ogm import Graph, Config
from pyorient.groovy import GroovyScripts
from pyorient.ogm.declarative import declarative_node, declarative_relationship
from pyorient.ogm.property import (
String, Date, DateTime, Decimal, Double, Integer, EmbeddedMap, EmbeddedSet,
Link, UUID)
from pyorient.ogm.what import expand, in_, out, distinct, sysdate
AnimalsNode = declarative_node()
AnimalsRelationship = declarative_relationship()
class Animal(AnimalsNode):
element_type = 'animal'
element_plural = 'animals'
name = String(nullable=False, unique=True)
species = String(nullable=False)
class Food(AnimalsNode):
element_type = 'food'
element_plural = 'foods'
name = String(nullable=False, unique=True)
color = String(nullable=False)
class Beverage(AnimalsNode):
element_type = 'beverage'
element_plural = 'beverages'
name = String(nullable=False, unique=True)
color = String(nullable=False)
class Eats(AnimalsRelationship):
label = 'eats'
modifier = String()
class Dislikes(AnimalsRelationship):
label = 'dislikes'
class Drinks(AnimalsRelationship):
label = 'drinks'
modifier = String()
class OGMAnimalsTestCaseBase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMAnimalsTestCaseBase, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('animals', 'root', 'root', initial_drop=True))
g.create_all(AnimalsNode.registry)
g.create_all(AnimalsRelationship.registry)
def testGraph(self):
assert len(AnimalsNode.registry) == 3
assert len(AnimalsRelationship.registry) == 3
g = self.g
rat = g.animals.create(name='rat', species='rodent')
mouse = g.animals.create(name='mouse', species='rodent')
queried_rat = g.query(Animal).filter(
Animal.name.endswith('at') | (Animal.name == 'tiger')).one()
assert rat == queried_rat
invalid_query_args = {'name': 'rat', 'name="rat" OR 1': 1}
try:
g.animals.query(**invalid_query_args).all()
except:
pass
else:
assert False and 'Invalid params did not raise an exception!'
queried_mouse = g.query(mouse).one()
assert mouse == queried_mouse
assert mouse == g.get_vertex(mouse._id)
assert mouse == g.get_element(mouse._id)
try:
rat2 = g.animals.create(name='rat', species='rodent')
except:
pass
else:
assert False and 'Uniqueness not enforced correctly'
pea = g.foods.create(name='pea', color='green')
queried_pea = g.foods.query(color='green', name='pea').one()
cheese = g.foods.create(name='cheese', color='yellow')
assert queried_pea == pea
rat_eats_pea = g.eats.create(queried_rat, queried_pea, modifier='lots')
mouse_eats_pea = g.eats.create(mouse, pea)
mouse_eats_cheese = Eats.objects.create(mouse, cheese)
assert rat_eats_pea.modifier == 'lots'
assert rat_eats_pea == g.get_edge(rat_eats_pea._id)
assert rat_eats_pea == g.get_element(rat_eats_pea._id)
water = g.beverages.create(name='water', color='clear')
mouse_drinks_water = g.drinks.create(mouse, water)
assert [water] == mouse.out(Drinks)
assert [mouse_drinks_water] == mouse.outE(Drinks)
assert [water] == mouse.both(Drinks)
assert [mouse_drinks_water] == mouse.bothE(Drinks)
nut = g.foods.create(name='nut', color='brown')
rat_dislikes_nut = g.dislikes.create(rat, nut)
mouse_eats_nut = g.eats.create(mouse, nut)
assert [rat] == nut.in_(Dislikes)
assert [rat_dislikes_nut] == nut.inE(Dislikes)
eaters = g.in_(Food, Eats)
assert rat in eaters
# Who eats the peas?
pea_eaters = g.foods.query(name='pea').what(expand(in_(Eats)))
for animal in pea_eaters:
print(animal.name, animal.species)
# Which animals eat each food
# FIXME Currently calling all() here, as iteration over expand()
# results is currently broken.
animal_foods = \
g.animals.query().what(expand(distinct(out(Eats)))).all()
for food in animal_foods:
print(food.name, food.color,
g.query(
g.foods.query(name=food.name).what(expand(in_(Eats)))) \
.what(Animal.name).all())
for food_name, food_color in g.query(Food.name, Food.color):
print(food_name, food_color) # 'pea green' # 'cheese yellow'
# FIXME While it is nicer to use files, parser should be more
# permissive with whitespace
g.scripts.add(GroovyScripts.from_string(
"""
def get_eaters_of(food_type) {
return g.V('@class', 'food').has('name', T.eq, food_type).inE().outV();
}
def get_foods_eaten_by(animal) {
return g.v(animal).outE('eats').inV()
}
def get_colored_eaten_foods(animal, color) {
return g.v(animal).outE('eats').inV().has('color', T.eq, color)
}
"""))
pea_eaters = g.gremlin('get_eaters_of', 'pea')
for animal in pea_eaters:
print(animal.name, animal.species) # 'rat rodent' # 'mouse rodent'
rat_cuisine = g.gremlin('get_foods_eaten_by', (rat,))
for food in rat_cuisine:
print(food.name, food.color) # 'pea green'
batch = g.batch()
batch['zombie'] = batch.animals.create(name='zombie',species='undead')
batch['brains'] = batch.foods.create(name='brains', color='grey')
# Retry up to twenty times
batch[:] = batch.eats.create(batch[:'zombie'], batch[:'brains']).retry(20)
batch['unicorn'] = batch.animals.create(name='unicorn', species='mythical')
batch['unknown'] = batch.foods.create(name='unknown', color='rainbow')
batch['mystery_diet'] = batch[:'unicorn'](Eats) > batch[:'unknown']
# Commits and clears batch
zombie = batch['$zombie']
assert zombie.species == 'undead'
class OGMAnimalsRegistryTestCase(OGMAnimalsTestCaseBase):
def testRegistry(self):
g = self.g
schema_registry = g.build_mapping(declarative_node(), declarative_relationship(), auto_plural=True)
assert all(c in schema_registry for c in ['animal', 'food', 'eats'])
assert type(schema_registry['animal'].species) == String
# Plurals not communicated to schema; postprocess registry before
# include() if you have a better solution than auto_plural.
assert schema_registry['food'].registry_plural != Food.registry_plural
g.clear_registry()
assert len(g.registry) == 0
g.include(schema_registry)
assert set(g.registry.keys()) == set(['food', 'dislikes', 'eats', 'beverage', 'animal', 'drinks'])
rat = g.animal.create(name='rat', species='rodent')
mouse = g.animal.create(name='mouse', species='rodent')
rat_class = g.registry['animal']
queried_rat = g.query(rat_class).filter(
rat_class.name.endswith('at') | (rat_class.name == 'tiger')).one()
assert rat == queried_rat
# try again, to make sure that brokers get cleared correctly
schema_registry = g.build_mapping(
declarative_node(), declarative_relationship(), auto_plural=True)
g.clear_registry()
g.include(schema_registry)
assert set(g.registry.keys()) == set(['food', 'dislikes', 'eats', 'beverage', 'animal', 'drinks'])
MoneyNode = declarative_node()
MoneyRelationship = declarative_relationship()
class Person(MoneyNode):
element_plural = 'people'
full_name = String(nullable=False)
uuid = String(nullable=False, default=UUID())
class Wallet(MoneyNode):
element_plural = 'wallets'
amount_precise = Decimal(name='amount', nullable=False)
amount_imprecise = Double()
class Carries(MoneyRelationship):
# No label set on relationship; Broker will not be attached to graph.
pass
class OGMMoneyTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMMoneyTestCase, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('money', 'root', 'root'
, initial_drop=True))
g.create_all(MoneyNode.registry)
g.create_all(MoneyRelationship.registry)
def testDoubleSerialization(self):
# Using str() on a float object in Python 2 sometimes
# returns scientific notation, which causes queries to be misapplied.
# Similarly, many alternative approaches of turning floats to strings
# in Python can cause loss of precision.
g = self.g
# Try very large values, very small values, and values with a lot of decimals.
target_values = [1e50, 1e-50, 1.23456789012]
for value in target_values:
amount_imprecise = value
amount_precise = decimal.Decimal(amount_imprecise)
original_wallet = g.wallets.create(amount_imprecise=amount_imprecise,
amount_precise=amount_precise)
wallet = g.query(Wallet).filter(
(Wallet.amount_imprecise > (value * (1 - 1e-6))) &
(Wallet.amount_imprecise < (value * (1 + 1e+6)))
).one()
assert wallet.amount_imprecise == original_wallet.amount_imprecise
assert wallet.amount_precise == original_wallet.amount_precise
def testMoney(self):
assert len(MoneyNode.registry) == 2
assert len(MoneyRelationship.registry) == 1
g = self.g
if g.server_version.major == 1:
self.skipTest(
'UUID method does not exists in OrientDB version < 2')
costanzo = g.people.create(full_name='<NAME>', uuid=UUID())
valerius = g.people.create(full_name='<NAME>'
, uuid=UUID())
if g.server_version >= (2,1,0):
# Default values supported
oliver = g.people.create(full_name='<NAME>')
else:
oliver = g.people.create(full_name='<NAME>', uuid=UUID())
# If you override nullable properties to be not-mandatory, be aware that
# OrientDB version < 2.1.0 does not count null
assert Person.objects.query().what(distinct(Person.uuid)).count() == 3
original_inheritance = decimal.Decimal('1520841.74309871919')
inheritance = g.wallets.create(
amount_precise = original_inheritance
, amount_imprecise = original_inheritance)
assert inheritance.amount_precise == original_inheritance
assert inheritance.amount_precise != inheritance.amount_imprecise
pittance = decimal.Decimal('0.1')
poor_pouch = g.wallets.create(
amount_precise=pittance
, amount_imprecise=pittance)
assert poor_pouch.amount_precise == pittance
assert poor_pouch.amount_precise != poor_pouch.amount_imprecise
# Django-style creation
costanzo_claim = Carries.objects.create(costanzo, inheritance)
valerius_claim = Carries.objects.create(valerius, inheritance)
oliver_carries = Carries.objects.create(oliver, poor_pouch)
g.scripts.add(GroovyScripts.from_file(
os.path.join(
os.path.split(
os.path.abspath(__file__))[0], 'money.groovy')), 'money')
rich_list = g.gremlin('rich_list', 1000000, namespace='money')
assert costanzo in rich_list and valerius in rich_list \
and oliver not in rich_list
bigwallet_query = g.query(Wallet).filter(Wallet.amount_precise > 100000)
smallerwallet_query = g.query(Wallet).filter(
Wallet.amount_precise < 100000)
# Basic query slicing
assert len(bigwallet_query[:]) == 1
assert len(smallerwallet_query) == 1
assert bigwallet_query.first() == inheritance
pouch = smallerwallet_query[0]
assert pouch == poor_pouch
assert len(pouch.outE()) == len(pouch.out())
assert pouch.in_() == pouch.both() and pouch.inE() == pouch.bothE()
first_inE = pouch.inE()[0]
assert first_inE == oliver_carries
assert first_inE.outV() == oliver and first_inE.inV() == poor_pouch
for i, wallet in enumerate(g.query(Wallet)):
print(decimal.Decimal(wallet.amount_imprecise) -
wallet.amount_precise)
assert i < 2
schema_registry = g.build_mapping(MoneyNode, MoneyRelationship)
assert all(c in schema_registry for c in ['person', 'wallet', 'carries'])
WalletType = schema_registry['wallet']
# Original property name, amount_precise, lost-in-translation
assert type(WalletType.amount) == Decimal
assert type(WalletType.amount_imprecise) == Double
g.include(schema_registry)
debt = decimal.Decimal(-42.0)
WalletType.objects.create(amount=debt, amount_imprecise=0)
assert g.query(Wallet)[2].amount == -42
class OGMClassTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OGMClassTestCase, self).__init__(*args, **kwargs)
self.g = None
def setUp(self):
g = self.g = Graph(Config.from_url('classes', 'root', 'root'
, initial_drop=True))
def testGraph(self):
g = self.g
try:
# The WRONG way to do multiple inheritance
# Here, Foo.registry and Bar.registry reference different classes,
# and therefore g.create_all() can not work.
class Foo(declarative_node()):
pass
class Bar(declarative_node()):
pass
class Fubar(Foo, Bar):
pass
except TypeError:
pass
else:
assert False and 'Failed to enforce correct vertex base classes.'
DateTimeNode = declarative_node()
class OGMDateTimeTestCase(unittest.TestCase):
class DateTimeV(DateTimeNode):
element_type = 'datetime'
element_plural = 'datetime'
name = String(nullable=False, unique=True)
at = DateTime(nullable=False)
class DateV(DateTimeNode):
element_type = 'dt'
element_plural = 'dt'
name = String(nullable=False, unique=True)
at = Date(nullable=False)
def setUp(self):
g = self.g = Graph(Config.from_url('test_datetime', 'root', 'root',
initial_drop=True))
g.create_all(DateTimeNode.registry)
def testDateTime(self):
g = self.g
# orientdb does not store microseconds
# so make sure the generated datetime has none
at = datetime.now().replace(microsecond=0)
g.datetime.create(name='now', at=at)
returned_dt = g.datetime.query(name='now').one()
assert returned_dt.at == at
# FIXME This returns microseconds, so there's nothing wrong with
# OrientDB's storage. What's breaking for the above case?
server_now = g.datetime.create(name='server_now', at=sysdate())
assert server_now.at >= returned_dt.at
def testDate(self):
g = self.g
at = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None).date()
g.dt.create(name='today', at=at)
returned_dt = g.dt.query(name='today').one()
assert returned_dt.at == at
UnicodeNode = declarative_node()
class UnicodeV(UnicodeNode):
element_type = 'unicode'
element_plural = 'unicode'
name = String(nullable=False, unique=True)
value = String(nullable=False)
alias = EmbeddedSet(linked_to=String(), nullable=True)
class OGMUnicodeTestCase(unittest.TestCase):
def setUp(self):
g = self.g = Graph(Config.from_url('test_unicode', 'root', 'root',
initial_drop=True))
g.create_all(UnicodeNode.registry)
def testUnicode(self):
g = self.g
data = [
(u'general_unicode', u'unicode value\u2017\u00c5'),
(u'special chars: | |
503 383
1 1 2 0 500 620
1 1 2 0 500 380
1 1 2 0 497 617
1 1 2 0 497 377
1 1 2 0 494 614
1 1 2 0 494 374
1 1 2 0 491 611
1 1 2 0 491 371
1 1 2 0 488 608
1 1 2 0 488 368
1 1 2 0 485 605
1 1 2 0 485 365
1 1 2 0 482 602
1 1 2 0 482 362
1 1 2 0 479 599
1 1 2 0 479 359
1 1 2 0 476 596
1 1 2 0 476 356
1 1 2 0 473 593
1 1 2 0 473 353
1 1 2 0 470 590
1 1 2 0 470 350
1 1 2 0 467 587
1 1 2 0 467 347
1 1 2 0 464 584
1 1 2 0 464 344
1 1 2 0 461 581
1 1 2 0 461 341
1 1 2 0 458 578
1 1 2 0 458 338
1 1 2 0 455 575
1 1 2 0 455 335
1 1 2 0 452 692
1 1 2 0 452 572
1 1 2 0 449 689
1 1 2 0 449 569
1 1 2 0 446 686
1 1 2 0 446 566
1 1 2 0 443 683
1 1 2 0 443 563
1 1 2 0 440 680
1 1 2 0 440 560
1 1 2 0 437 677
1 1 2 0 437 557
1 1 2 0 434 674
1 1 2 0 434 554
1 1 2 0 431 671
1 1 2 0 431 551
1 1 2 0 428 668
1 1 2 0 428 548
1 1 2 0 425 665
1 1 2 0 425 545
1 1 2 0 422 662
1 1 2 0 422 542
1 1 2 0 419 659
1 1 2 0 419 539
1 1 2 0 416 656
1 1 2 0 416 536
1 1 2 0 413 653
1 1 2 0 413 533
1 1 2 0 410 650
1 1 2 0 410 530
1 1 2 0 407 647
1 1 2 0 407 527
1 1 2 0 404 644
1 1 2 0 404 524
1 1 2 0 401 641
1 1 2 0 401 521
1 1 2 0 398 638
1 1 2 0 398 518
1 1 2 0 395 635
1 1 2 0 395 515
1 1 2 0 392 632
1 1 2 0 392 512
1 1 2 0 389 629
1 1 2 0 389 509
1 1 2 0 386 626
1 1 2 0 386 506
1 1 2 0 383 623
1 1 2 0 383 503
1 1 2 0 380 620
1 1 2 0 380 500
1 1 2 0 377 617
1 1 2 0 377 497
1 1 2 0 374 614
1 1 2 0 374 494
1 1 2 0 371 611
1 1 2 0 371 491
1 1 2 0 368 608
1 1 2 0 368 488
1 1 2 0 365 605
1 1 2 0 365 485
1 1 2 0 362 602
1 1 2 0 362 482
1 1 2 0 359 599
1 1 2 0 359 479
1 1 2 0 356 596
1 1 2 0 356 476
1 1 2 0 353 593
1 1 2 0 353 473
1 1 2 0 350 590
1 1 2 0 350 470
1 1 2 0 347 587
1 1 2 0 347 467
1 1 2 0 344 584
1 1 2 0 344 464
1 1 2 0 341 581
1 1 2 0 341 461
1 1 2 0 338 578
1 1 2 0 338 458
1 1 2 0 335 575
1 1 2 0 335 455
1 1 2 0 692 677
1 1 2 0 692 632
1 1 2 0 692 617
1 1 2 0 692 611
1 1 2 0 692 593
1 1 2 0 689 686
1 1 2 0 689 656
1 1 2 0 689 647
1 1 2 0 689 635
1 1 2 0 689 596
1 1 2 0 689 590
1 1 2 0 689 584
1 1 2 0 686 677
1 1 2 0 686 656
1 1 2 0 686 623
1 1 2 0 686 620
1 1 2 0 686 602
1 1 2 0 686 593
1 1 2 0 686 581
1 1 2 0 683 680
1 1 2 0 683 665
1 1 2 0 683 647
1 1 2 0 683 644
1 1 2 0 683 611
1 1 2 0 683 602
1 1 2 0 680 677
1 1 2 0 680 611
1 1 2 0 680 596
1 1 2 0 680 584
1 1 2 0 680 578
1 1 2 0 677 662
1 1 2 0 677 653
1 1 2 0 677 644
1 1 2 0 677 641
1 1 2 0 677 638
1 1 2 0 677 629
1 1 2 0 677 590
1 1 2 0 677 575
1 1 2 0 674 656
1 1 2 0 674 650
1 1 2 0 674 641
1 1 2 0 674 623
1 1 2 0 674 620
1 1 2 0 674 614
1 1 2 0 671 647
1 1 2 0 671 635
1 1 2 0 671 620
1 1 2 0 671 608
1 1 2 0 671 599
1 1 2 0 671 584
1 1 2 0 671 581
1 1 2 0 671 575
1 1 2 0 668 641
1 1 2 0 668 638
1 1 2 0 668 632
1 1 2 0 668 629
1 1 2 0 668 602
1 1 2 0 668 599
1 1 2 0 665 659
1 1 2 0 665 623
1 1 2 0 665 614
1 1 2 0 665 596
1 1 2 0 665 584
1 1 2 0 665 575
1 1 2 0 662 659
1 1 2 0 662 650
1 1 2 0 662 632
1 1 2 0 662 629
1 1 2 0 662 623
1 1 2 0 659 653
1 1 2 0 659 614
1 1 2 0 659 593
1 1 2 0 656 653
1 1 2 0 656 626
1 1 2 0 656 623
1 1 2 0 656 620
1 1 2 0 656 587
1 1 2 0 653 626
1 1 2 0 653 620
1 1 2 0 653 605
1 1 2 0 653 596
1 1 2 0 653 581
1 1 2 0 650 644
1 1 2 0 650 635
1 1 2 0 650 626
1 1 2 0 650 614
1 1 2 0 650 605
1 1 2 0 647 638
1 1 2 0 647 629
1 1 2 0 647 626
1 1 2 0 647 620
1 1 2 0 647 581
1 1 2 0 644 617
1 1 2 0 644 614
1 1 2 0 644 602
1 1 2 0 644 575
1 1 2 0 641 626
1 1 2 0 641 620
1 1 2 0 641 617
1 1 2 0 641 590
1 1 2 0 641 581
1 1 2 0 638 611
1 1 2 0 638 605
1 1 2 0 635 632
1 1 2 0 635 626
1 1 2 0 635 623
1 1 2 0 635 602
1 1 2 0 635 593
1 1 2 0 635 584
1 1 2 0 632 620
1 1 2 0 632 614
1 1 2 0 632 608
1 1 2 0 632 599
1 1 2 0 632 578
1 1 2 0 629 626
1 1 2 0 629 611
1 1 2 0 629 602
1 1 2 0 629 596
1 1 2 0 629 587
1 1 2 0 626 617
1 1 2 0 623 614
1 1 2 0 623 605
1 1 2 0 623 590
1 1 2 0 623 587
1 1 2 0 620 617
1 1 2 0 620 611
1 1 2 0 620 608
1 1 2 0 620 602
1 1 2 0 617 596
1 1 2 0 617 593
1 1 2 0 617 584
1 1 2 0 614 608
1 1 2 0 614 602
1 1 2 0 614 581
1 1 2 0 611 587
1 1 2 0 611 584
1 1 2 0 611 575
1 1 2 0 605 596
1 1 2 0 605 590
1 1 2 0 602 599
1 1 2 0 602 590
1 1 2 0 596 587
1 1 2 0 593 590
1 1 2 0 | |
<filename>tslearn/tests/sklearn_patches.py
from tslearn.generators import random_walk_blobs
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
import sklearn
from sklearn.base import clone
from sklearn.base import is_classifier, is_outlier_detector, is_regressor
from sklearn.base import ClusterMixin
from sklearn.exceptions import DataConversionWarning
from sklearn.datasets import make_blobs
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import shuffle
from sklearn.utils.testing import (
set_random_state, assert_equal, assert_greater, assert_array_equal,
assert_raises, assert_array_almost_equal, assert_greater_equal,
assert_allclose, assert_raises_regex, assert_allclose_dense_sparse
)
from sklearn.utils.estimator_checks import (
pairwise_estimator_convert_X, choose_check_classifiers_labels,
check_classifiers_predictions,
check_fit2d_1sample,
check_fit2d_1feature,
check_fit1d,
check_get_params_invariance,
check_set_params,
check_dict_unchanged,
check_dont_overwrite_parameters,
check_estimators_data_not_an_array,
check_fit2d_predict1d,
check_methods_subset_invariance,
check_regressors_int,
_boston_subset
)
from sklearn.utils.testing import ignore_warnings, SkipTest
from sklearn.exceptions import SkipTestWarning
from sklearn.utils.estimator_checks import (_yield_classifier_checks,
_yield_regressor_checks,
_yield_transformer_checks,
_yield_clustering_checks,
_yield_outliers_checks)
try:
from sklearn.utils.estimator_checks import _yield_checks
except ImportError:
from sklearn.utils.estimator_checks import _yield_non_meta_checks
_yield_checks = _yield_non_meta_checks
from sklearn.metrics import adjusted_rand_score, accuracy_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection._validation import _safe_split
from sklearn.pipeline import make_pipeline
import warnings
import numpy as np
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'allow_variable_length': False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True}
def _safe_tags(estimator, key=None):
# if estimator doesn't have _get_tags, use _DEFAULT_TAGS
# if estimator has tags but not key, use _DEFAULT_TAGS[key]
if hasattr(estimator, "_get_tags"):
if key is not None:
return estimator._get_tags().get(key, _DEFAULT_TAGS[key])
tags = estimator._get_tags()
return {key: tags.get(key, _DEFAULT_TAGS[key])
for key in _DEFAULT_TAGS.keys()}
if key is not None:
return _DEFAULT_TAGS[key]
return _DEFAULT_TAGS
def _create_small_ts_dataset():
return random_walk_blobs(n_ts_per_blob=5, n_blobs=3, random_state=1,
sz=10, noise_level=0.025)
def enforce_estimator_tags_y(estimator, y):
# Estimators with a `requires_positive_y` tag only accept strictly positive
# data
if _safe_tags(estimator, "requires_positive_y"):
# Create strictly positive y. The minimal increment above 0 is 1, as
# y could be of integer dtype.
y += 1 + abs(y.min())
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if _safe_tags(estimator, "multioutput_only"):
return np.reshape(y, (-1, 1))
return y
def multioutput_estimator_convert_y_2d(estimator, y):
# This function seems to be removed in version 0.22, so let's make
# a copy here.
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in estimator.__class__.__name__:
return np.reshape(y, (-1, 1))
return y
# Patch BOSTON dataset of sklearn to fix _csv.Error: line contains NULL byte
# Moreover, it makes more sense to use a timeseries dataset for our estimators
BOSTON = _create_small_ts_dataset()
sklearn.utils.estimator_checks.BOSTON = BOSTON
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_clustering(name, clusterer_orig, readonly_memmap=False):
clusterer = clone(clusterer_orig)
X, y = _create_small_ts_dataset()
X, y = shuffle(X, y, random_state=7)
X = TimeSeriesScalerMeanVariance().fit_transform(X)
rng = np.random.RandomState(42)
X_noise = X + (rng.randn(*X.shape) / 5)
n_samples, n_features, dim = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert_equal(pred.shape, (n_samples,))
assert_greater(adjusted_rand_score(pred, y), 0.4)
if _safe_tags(clusterer, 'non_deterministic'):
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert pred.dtype in [np.dtype('int32'), np.dtype('int64')]
assert pred2.dtype in [np.dtype('int32'), np.dtype('int64')]
# Add noise to X to test the possible values of the labels
labels = clusterer.fit_predict(X_noise)
# There should be at least one sample in every original cluster
labels_sorted = np.unique(labels)
assert_array_equal(labels_sorted, np.arange(0, 3))
# Labels should be less than n_clusters - 1
if hasattr(clusterer, 'n_clusters'):
n_clusters = getattr(clusterer, 'n_clusters')
assert_greater_equal(n_clusters - 1, labels_sorted[-1])
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_non_transf_est_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
X, y = _create_small_ts_dataset()
set_random_state(estimator, 0)
estimator.fit(X, y)
assert estimator.n_iter_ >= 1
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_fit_idempotent(name, estimator_orig):
# Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would
# check that the estimated parameters during training (e.g. coefs_) are
# the same, but having a universal comparison function for those
# attributes is difficult and full of edge cases. So instead we check that
# predict(), predict_proba(), decision_function() and transform() return
# the same results.
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
rng = np.random.RandomState(0)
if _safe_tags(estimator_orig, 'non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params().keys():
estimator.set_params(warm_start=False)
n_samples = 100
X, _ = _create_small_ts_dataset()
X = X.reshape((X.shape[0], X.shape[1]))
X = pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
train, test = next(ShuffleSplit(test_size=.2, random_state=rng).split(X))
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
# Fit for the first time
estimator.fit(X_train, y_train)
result = {method: getattr(estimator, method)(X_test)
for method in check_methods
if hasattr(estimator, method)}
# Fit again
set_random_state(estimator)
estimator.fit(X_train, y_train)
for method in check_methods:
if hasattr(estimator, method):
new_result = getattr(estimator, method)(X_test)
if np.issubdtype(new_result.dtype, np.floating):
tol = 2*np.finfo(new_result.dtype).eps
else:
tol = 2*np.finfo(np.float64).eps
assert_allclose_dense_sparse(
result[method], new_result,
atol=max(tol, 1e-9), rtol=max(tol, 1e-7),
err_msg="Idempotency check failed for method {}".format(method)
)
def check_classifiers_classes(name, classifier_orig):
# Skip shapelet models
if name in ['ShapeletModel', 'SerializableShapeletModel']:
raise SkipTest('Skipping check_classifiers_classes for shapelets'
' due to convergence issues...')
X_multiclass, y_multiclass = _create_small_ts_dataset()
X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass,
random_state=7)
scaler = TimeSeriesScalerMeanVariance()
X_multiclass = scaler.fit_transform(X_multiclass)
X_multiclass = np.reshape(X_multiclass, (X_multiclass.shape[0],
X_multiclass.shape[1]))
X_binary = X_multiclass[y_multiclass != 2]
y_binary = y_multiclass[y_multiclass != 2]
X_multiclass = pairwise_estimator_convert_X(X_multiclass, classifier_orig)
X_binary = pairwise_estimator_convert_X(X_binary, classifier_orig)
labels_multiclass = ["one", "two", "three"]
labels_binary = ["one", "two"]
y_names_multiclass = np.take(labels_multiclass, y_multiclass)
y_names_binary = np.take(labels_binary, y_binary)
problems = [(X_binary, y_binary, y_names_binary)]
if not _safe_tags(classifier_orig, 'binary_only'):
problems.append((X_multiclass, y_multiclass, y_names_multiclass))
for X, y, y_names in problems:
for y_names_i in [y_names, y_names.astype('O')]:
y_ = choose_check_classifiers_labels(name, y, y_names_i)
check_classifiers_predictions(X, y_, name, classifier_orig)
labels_binary = [-1, 1]
y_names_binary = np.take(labels_binary, y_binary)
y_binary = choose_check_classifiers_labels(name, y_binary, y_names_binary)
check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, classifier_orig, readonly_memmap=False):
# Skip shapelet models
if name in ['ShapeletModel', 'SerializableShapeletModel']:
raise SkipTest('Skipping check_classifiers_train for shapelet models'
' due to convergence issues...')
# Generate some random walk blobs, shuffle them and normalize them
X_m, y_m = _create_small_ts_dataset()
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = TimeSeriesScalerMeanVariance().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
# We will test for both binary and multiclass case
problems = [(X_b, y_b), (X_m, y_m)]
tags = _safe_tags(classifier_orig)
for (X, y) in problems:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features, dim = X.shape
classifier = clone(classifier_orig)
X = pairwise_estimator_convert_X(X, classifier)
set_random_state(classifier)
# raises error on malformed input for fit
if not tags["no_validation"]:
with assert_raises(
ValueError,
msg="The classifier {} does not "
"raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of labels. "
"Perhaps use check_X_y in fit.".format(name)):
classifier.fit(X, y[:-1])
# fit with lists
classifier.fit(X.tolist(), y.tolist())
assert hasattr(classifier, "classes_")
y_pred = classifier.predict(X)
assert y_pred.shape == (n_samples,)
# training set performance
if not tags['poor_score']:
assert accuracy_score(y, y_pred) > 0.83
# raises error on malformed input for predict
msg_pairwise = (
"The classifier {} does not raise an error when shape of X in "
" {} is not equal to (n_test_samples, n_training_samples)")
msg = ("The classifier {} does not raise an error when the number of "
"features in {} is different from the number of features in "
"fit.")
if not tags["no_validation"] and not tags["allow_variable_length"]:
if bool(getattr(classifier, "_pairwise", False)):
with assert_raises(ValueError,
msg=msg_pairwise.format(name, "predict")):
classifier.predict(X.reshape(-1, 1))
else:
with assert_raises(ValueError,
msg=msg.format(name, "predict")):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
if not tags["multioutput_only"]:
assert decision.shape == (n_samples,)
else:
assert decision.shape == (n_samples, 1)
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
else:
assert decision.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
if not tags["no_validation"]:
if bool(getattr(classifier, "_pairwise", False)):
error_msg = msg_pairwise.format(name,
"decision_function")
with assert_raises(ValueError, msg=error_msg):
classifier.decision_function(X.reshape(-1, 1))
else:
error_msg = msg_pairwise.format(name,
"decision_function")
with assert_raises(ValueError, msg=error_msg):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert y_prob.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
if not tags["no_validation"] and not tags["allow_variable_length"]:
# raises error on malformed input for predict_proba
if bool(getattr(classifier_orig, "_pairwise", False)):
with assert_raises(ValueError, msg=msg_pairwise.format(
name, "predict_proba")):
classifier.predict_proba(X.reshape(-1, 1))
else:
with assert_raises(ValueError, msg=msg.format(
| |
the mouse pointer, if it belongs to
this application. Otherwise it returns None.
"""
return _misc_.FindWindowAtPointer(*args)
def GetActiveWindow(*args):
"""
GetActiveWindow() -> Window
Get the currently active window of this application, or None
"""
return _misc_.GetActiveWindow(*args)
def GenericFindWindowAtPoint(*args, **kwargs):
"""GenericFindWindowAtPoint(Point pt) -> Window"""
return _misc_.GenericFindWindowAtPoint(*args, **kwargs)
def FindWindowAtPoint(*args, **kwargs):
"""FindWindowAtPoint(Point pt) -> Window"""
return _misc_.FindWindowAtPoint(*args, **kwargs)
def GetTopLevelParent(*args, **kwargs):
"""GetTopLevelParent(Window win) -> Window"""
return _misc_.GetTopLevelParent(*args, **kwargs)
def LaunchDefaultBrowser(*args, **kwargs):
"""
LaunchDefaultBrowser(String url) -> bool
Launches the user's default browser and tells it to open the location
at ``url``. Returns ``True`` if the application was successfully
launched.
"""
return _misc_.LaunchDefaultBrowser(*args, **kwargs)
def GetKeyState(*args, **kwargs):
"""
GetKeyState(int key) -> bool
Get the state of a key (true if pressed or toggled on, false if not.)
This is generally most useful getting the state of the modifier or
toggle keys. On some platforms those may be the only keys that this
function is able to detect.
"""
return _misc_.GetKeyState(*args, **kwargs)
class MouseState(object):
"""
`wx.MouseState` is used to hold information about mouse button and
modifier key states and is what is returned from `wx.GetMouseState`.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""
__init__(self) -> MouseState
`wx.MouseState` is used to hold information about mouse button and
modifier key states and is what is returned from `wx.GetMouseState`.
"""
_misc_.MouseState_swiginit(self,_misc_.new_MouseState(*args, **kwargs))
__swig_destroy__ = _misc_.delete_MouseState
__del__ = lambda self : None;
def GetX(*args, **kwargs):
"""GetX(self) -> int"""
return _misc_.MouseState_GetX(*args, **kwargs)
def GetY(*args, **kwargs):
"""GetY(self) -> int"""
return _misc_.MouseState_GetY(*args, **kwargs)
def LeftDown(*args, **kwargs):
"""LeftDown(self) -> bool"""
return _misc_.MouseState_LeftDown(*args, **kwargs)
def MiddleDown(*args, **kwargs):
"""MiddleDown(self) -> bool"""
return _misc_.MouseState_MiddleDown(*args, **kwargs)
def RightDown(*args, **kwargs):
"""RightDown(self) -> bool"""
return _misc_.MouseState_RightDown(*args, **kwargs)
def ControlDown(*args, **kwargs):
"""ControlDown(self) -> bool"""
return _misc_.MouseState_ControlDown(*args, **kwargs)
def ShiftDown(*args, **kwargs):
"""ShiftDown(self) -> bool"""
return _misc_.MouseState_ShiftDown(*args, **kwargs)
def AltDown(*args, **kwargs):
"""AltDown(self) -> bool"""
return _misc_.MouseState_AltDown(*args, **kwargs)
def MetaDown(*args, **kwargs):
"""MetaDown(self) -> bool"""
return _misc_.MouseState_MetaDown(*args, **kwargs)
def CmdDown(*args, **kwargs):
"""CmdDown(self) -> bool"""
return _misc_.MouseState_CmdDown(*args, **kwargs)
def SetX(*args, **kwargs):
"""SetX(self, int x)"""
return _misc_.MouseState_SetX(*args, **kwargs)
def SetY(*args, **kwargs):
"""SetY(self, int y)"""
return _misc_.MouseState_SetY(*args, **kwargs)
def SetLeftDown(*args, **kwargs):
"""SetLeftDown(self, bool down)"""
return _misc_.MouseState_SetLeftDown(*args, **kwargs)
def SetMiddleDown(*args, **kwargs):
"""SetMiddleDown(self, bool down)"""
return _misc_.MouseState_SetMiddleDown(*args, **kwargs)
def SetRightDown(*args, **kwargs):
"""SetRightDown(self, bool down)"""
return _misc_.MouseState_SetRightDown(*args, **kwargs)
def SetControlDown(*args, **kwargs):
"""SetControlDown(self, bool down)"""
return _misc_.MouseState_SetControlDown(*args, **kwargs)
def SetShiftDown(*args, **kwargs):
"""SetShiftDown(self, bool down)"""
return _misc_.MouseState_SetShiftDown(*args, **kwargs)
def SetAltDown(*args, **kwargs):
"""SetAltDown(self, bool down)"""
return _misc_.MouseState_SetAltDown(*args, **kwargs)
def SetMetaDown(*args, **kwargs):
"""SetMetaDown(self, bool down)"""
return _misc_.MouseState_SetMetaDown(*args, **kwargs)
x = property(GetX, SetX)
y = property(GetY, SetY)
leftDown = property(LeftDown, SetLeftDown)
middleDown = property(MiddleDown, SetMiddleDown)
rightDown = property(RightDown, SetRightDown)
controlDown = property(ControlDown, SetControlDown)
shiftDown = property(ShiftDown, SetShiftDown)
altDown = property(AltDown, SetAltDown)
metaDown = property(MetaDown, SetMetaDown)
cmdDown = property(CmdDown)
_misc_.MouseState_swigregister(MouseState)
FileSelectorPromptStr = cvar.FileSelectorPromptStr
FileSelectorDefaultWildcardStr = cvar.FileSelectorDefaultWildcardStr
DirSelectorPromptStr = cvar.DirSelectorPromptStr
def GetMouseState(*args):
"""
GetMouseState() -> MouseState
Returns the current state of the mouse. Returns an instance of a
`wx.MouseState` object that contains the current position of the mouse
pointer in screen coordinants, as well as boolean values indicating
the up/down status of the mouse buttons and the modifier keys.
"""
return _misc_.GetMouseState(*args)
def WakeUpMainThread(*args):
"""WakeUpMainThread()"""
return _misc_.WakeUpMainThread(*args)
def MutexGuiEnter(*args):
"""MutexGuiEnter()"""
return _misc_.MutexGuiEnter(*args)
def MutexGuiLeave(*args):
"""MutexGuiLeave()"""
return _misc_.MutexGuiLeave(*args)
class MutexGuiLocker(object):
"""Proxy of C++ MutexGuiLocker class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> MutexGuiLocker"""
_misc_.MutexGuiLocker_swiginit(self,_misc_.new_MutexGuiLocker(*args, **kwargs))
__swig_destroy__ = _misc_.delete_MutexGuiLocker
__del__ = lambda self : None;
_misc_.MutexGuiLocker_swigregister(MutexGuiLocker)
def Thread_IsMain(*args):
"""Thread_IsMain() -> bool"""
return _misc_.Thread_IsMain(*args)
#---------------------------------------------------------------------------
class ToolTip(_core.Object):
"""Proxy of C++ ToolTip class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String tip) -> ToolTip"""
_misc_.ToolTip_swiginit(self,_misc_.new_ToolTip(*args, **kwargs))
__swig_destroy__ = _misc_.delete_ToolTip
__del__ = lambda self : None;
def SetTip(*args, **kwargs):
"""SetTip(self, String tip)"""
return _misc_.ToolTip_SetTip(*args, **kwargs)
def GetTip(*args, **kwargs):
"""GetTip(self) -> String"""
return _misc_.ToolTip_GetTip(*args, **kwargs)
def GetWindow(*args, **kwargs):
"""GetWindow(self) -> Window"""
return _misc_.ToolTip_GetWindow(*args, **kwargs)
def Enable(*args, **kwargs):
"""Enable(bool flag)"""
return _misc_.ToolTip_Enable(*args, **kwargs)
Enable = staticmethod(Enable)
def SetDelay(*args, **kwargs):
"""SetDelay(long milliseconds)"""
return _misc_.ToolTip_SetDelay(*args, **kwargs)
SetDelay = staticmethod(SetDelay)
Tip = property(GetTip,SetTip,doc="See `GetTip` and `SetTip`")
Window = property(GetWindow,doc="See `GetWindow`")
_misc_.ToolTip_swigregister(ToolTip)
def ToolTip_Enable(*args, **kwargs):
"""ToolTip_Enable(bool flag)"""
return _misc_.ToolTip_Enable(*args, **kwargs)
def ToolTip_SetDelay(*args, **kwargs):
"""ToolTip_SetDelay(long milliseconds)"""
return _misc_.ToolTip_SetDelay(*args, **kwargs)
class Caret(object):
"""Proxy of C++ Caret class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, Window window, Size size) -> Caret"""
_misc_.Caret_swiginit(self,_misc_.new_Caret(*args, **kwargs))
__swig_destroy__ = _misc_.delete_Caret
__del__ = lambda self : None;
def Destroy(*args, **kwargs):
"""
Destroy(self)
Deletes the C++ object this Python object is a proxy for.
"""
args[0].this.own(False)
return _misc_.Caret_Destroy(*args, **kwargs)
def IsOk(*args, **kwargs):
"""IsOk(self) -> bool"""
return _misc_.Caret_IsOk(*args, **kwargs)
def IsVisible(*args, **kwargs):
"""IsVisible(self) -> bool"""
return _misc_.Caret_IsVisible(*args, **kwargs)
def GetPosition(*args, **kwargs):
"""GetPosition(self) -> Point"""
return _misc_.Caret_GetPosition(*args, **kwargs)
def GetPositionTuple(*args, **kwargs):
"""GetPositionTuple() -> (x,y)"""
return _misc_.Caret_GetPositionTuple(*args, **kwargs)
def GetSize(*args, **kwargs):
"""GetSize(self) -> Size"""
return _misc_.Caret_GetSize(*args, **kwargs)
def GetSizeTuple(*args, **kwargs):
"""GetSizeTuple() -> (width, height)"""
return _misc_.Caret_GetSizeTuple(*args, **kwargs)
def GetWindow(*args, **kwargs):
"""GetWindow(self) -> Window"""
return _misc_.Caret_GetWindow(*args, **kwargs)
def MoveXY(*args, **kwargs):
"""MoveXY(self, int x, int y)"""
return _misc_.Caret_MoveXY(*args, **kwargs)
def Move(*args, **kwargs):
"""Move(self, Point pt)"""
return _misc_.Caret_Move(*args, **kwargs)
def SetSizeWH(*args, **kwargs):
"""SetSizeWH(self, int width, int height)"""
return _misc_.Caret_SetSizeWH(*args, **kwargs)
def SetSize(*args, **kwargs):
"""SetSize(self, Size size)"""
return _misc_.Caret_SetSize(*args, **kwargs)
def Show(*args, **kwargs):
"""Show(self, int show=True)"""
return _misc_.Caret_Show(*args, **kwargs)
def Hide(*args, **kwargs):
"""Hide(self)"""
return _misc_.Caret_Hide(*args, **kwargs)
def __nonzero__(self): return self.IsOk()
def GetBlinkTime(*args, **kwargs):
"""GetBlinkTime() -> int"""
return _misc_.Caret_GetBlinkTime(*args, **kwargs)
GetBlinkTime = staticmethod(GetBlinkTime)
def SetBlinkTime(*args, **kwargs):
"""SetBlinkTime(int milliseconds)"""
return _misc_.Caret_SetBlinkTime(*args, **kwargs)
SetBlinkTime = staticmethod(SetBlinkTime)
Position = property(GetPosition,doc="See `GetPosition`")
Size = property(GetSize,SetSize,doc="See `GetSize` and `SetSize`")
Window = property(GetWindow,doc="See `GetWindow`")
_misc_.Caret_swigregister(Caret)
def Caret_GetBlinkTime(*args):
"""Caret_GetBlinkTime() -> int"""
return _misc_.Caret_GetBlinkTime(*args)
def Caret_SetBlinkTime(*args, **kwargs):
"""Caret_SetBlinkTime(int milliseconds)"""
return _misc_.Caret_SetBlinkTime(*args, **kwargs)
class BusyCursor(object):
"""Proxy of C++ BusyCursor class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, Cursor cursor=wxHOURGLASS_CURSOR) -> BusyCursor"""
_misc_.BusyCursor_swiginit(self,_misc_.new_BusyCursor(*args, **kwargs))
__swig_destroy__ = _misc_.delete_BusyCursor
__del__ = lambda self : None;
_misc_.BusyCursor_swigregister(BusyCursor)
class WindowDisabler(object):
"""Proxy of C++ WindowDisabler class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, Window winToSkip=None) -> WindowDisabler"""
_misc_.WindowDisabler_swiginit(self,_misc_.new_WindowDisabler(*args, **kwargs))
__swig_destroy__ = _misc_.delete_WindowDisabler
__del__ = lambda self : None;
_misc_.WindowDisabler_swigregister(WindowDisabler)
class BusyInfo(_core.Object):
"""Proxy of C++ BusyInfo class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, String message, Window parent=None) -> BusyInfo"""
_misc_.BusyInfo_swiginit(self,_misc_.new_BusyInfo(*args, **kwargs))
__swig_destroy__ = _misc_.delete_BusyInfo
__del__ = lambda self : None;
def Destroy(self): pass
_misc_.BusyInfo_swigregister(BusyInfo)
class StopWatch(object):
"""Proxy of C++ StopWatch class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self) -> StopWatch"""
_misc_.StopWatch_swiginit(self,_misc_.new_StopWatch(*args, **kwargs))
__swig_destroy__ = _misc_.delete_StopWatch
__del__ = lambda self : None;
def Start(*args, **kwargs):
"""Start(self, long t0=0)"""
return _misc_.StopWatch_Start(*args, **kwargs)
def Pause(*args, **kwargs):
"""Pause(self)"""
return _misc_.StopWatch_Pause(*args, **kwargs)
def Resume(*args, **kwargs):
"""Resume(self)"""
return _misc_.StopWatch_Resume(*args, **kwargs)
def Time(*args, **kwargs):
"""Time(self) -> long"""
return _misc_.StopWatch_Time(*args, **kwargs)
_misc_.StopWatch_swigregister(StopWatch)
class FileHistory(_core.Object):
"""Proxy of C++ FileHistory class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args, **kwargs):
"""__init__(self, int maxFiles=9, int idBase=ID_FILE1) -> FileHistory"""
_misc_.FileHistory_swiginit(self,_misc_.new_FileHistory(*args, **kwargs))
__swig_destroy__ = _misc_.delete_FileHistory
__del__ = lambda self : None;
def AddFileToHistory(*args, **kwargs):
"""AddFileToHistory(self, String file)"""
return _misc_.FileHistory_AddFileToHistory(*args, **kwargs)
def RemoveFileFromHistory(*args, **kwargs):
"""RemoveFileFromHistory(self, int i)"""
return _misc_.FileHistory_RemoveFileFromHistory(*args, **kwargs)
def GetMaxFiles(*args, **kwargs):
"""GetMaxFiles(self) -> int"""
return _misc_.FileHistory_GetMaxFiles(*args, **kwargs)
def UseMenu(*args, **kwargs):
"""UseMenu(self, Menu menu)"""
return _misc_.FileHistory_UseMenu(*args, **kwargs)
def RemoveMenu(*args, **kwargs):
"""RemoveMenu(self, Menu menu)"""
return _misc_.FileHistory_RemoveMenu(*args, **kwargs)
def Load(*args, **kwargs):
"""Load(self, ConfigBase config)"""
return _misc_.FileHistory_Load(*args, **kwargs)
def Save(*args, **kwargs):
"""Save(self, ConfigBase config)"""
return _misc_.FileHistory_Save(*args, **kwargs)
def AddFilesToMenu(*args, **kwargs):
"""AddFilesToMenu(self)"""
return _misc_.FileHistory_AddFilesToMenu(*args, **kwargs)
def AddFilesToThisMenu(*args, **kwargs):
"""AddFilesToThisMenu(self, Menu menu)"""
return _misc_.FileHistory_AddFilesToThisMenu(*args, **kwargs)
def GetHistoryFile(*args, **kwargs):
"""GetHistoryFile(self, int i) -> String"""
return _misc_.FileHistory_GetHistoryFile(*args, **kwargs)
def GetCount(*args, **kwargs):
"""GetCount(self) -> int"""
return _misc_.FileHistory_GetCount(*args, **kwargs)
GetNoHistoryFiles = GetCount
Count = property(GetCount,doc="See `GetCount`")
HistoryFile = property(GetHistoryFile,doc="See `GetHistoryFile`")
MaxFiles = property(GetMaxFiles,doc="See `GetMaxFiles`")
| |
#
# A convenient, gym-like wrapper for pybullet
# https://github.com/liusida/PyBulletWrapper
#
# Author of this file:
# 2020 <NAME> (<EMAIL>)
# License:
# MIT
# Description:
# This wrapper provides some Python style interfaces to pybullet.
# The original pybullet returns are arrays, but dictionaries are more handy if we don't want to remember the details.
# Support Version:
# pybullet 2.8.5 (installed via `pip install pybullet`)
# Python 3.6
# Reference:
# (1) PyBullet Quickstart Guide
# (2) pybullet.c
import time
import math
import tempfile
import pybullet_data
from .base import BaseWrapperPyBullet
from .utils import *
class HandyPyBullet(BaseWrapperPyBullet):
def __init__(self, p):
self.__physics_client_id = -1
super().__init__(p)
# Private:
def __checkReturn(self, API, ret, length):
"""Check the length of the return from API."""
if len(ret) != length:
error(f"ERROR: The structure returned from {API} has {len(ret)} items, which is inconsistent with the documentation.")
print(f"Please check PyBullet's version. Only 2.8.5 is supported.")
print("\n")
print(ret)
return False
return True
def __parseCommonReturn(self, API, retArray, keys):
"""Parse the return (one record) from API into dictionary."""
if self.__checkReturn(API, retArray, len(keys)):
retDictionary = {}
for i, k in enumerate(keys):
retDictionary[k] = retArray[i]
return retDictionary
return retArray
def __parseListReturns(self, API, retArray, keys):
"""Parse the return (a list of records) from API into a list of dictionaries."""
if len(retArray) > 0 and self.__checkReturn(API, retArray[0], len(keys)):
retList = []
for item in retArray:
retDictionary = {}
for i, k in enumerate(keys):
retDictionary[k] = item[i]
retList.append(retDictionary)
return retList
return retArray
def __constructOutParameters(self, in_parameters, valid_parameters):
"""
To deal with default parameters, we need to construct out_parameters to call pybullet.c
if the value is None, it will be consider as default.
valid_parameters in each functions are copied from pybullet.c (from `kwlist`)
"""
out_parameters = {}
for key in valid_parameters:
if key in in_parameters and in_parameters[key] is not None:
out_parameters[key] = in_parameters[key]
return out_parameters
# Public:
# New handy additional inteface (by combining the functionalities in the original inteface)
def connectPy(self, withGUI=True, withPanels=False):
"""Start the simulation."""
if withGUI:
self.__physics_client_id = self.connect(self.GUI)
self.showPanelsPy(withPanels)
else:
self.__physics_client_id = self.connect(self.DIRECT)
self.resetPy()
return self.__physics_client_id
def resetPy(self, withGUI=True, withData=True, withGravity=True, withFloor=True, defaultTimeStep=.01):
if self.__physics_client_id < 0:
self.__physics_client_id = self.connectPy(withGUI=withGUI)
self.resetSimulation()
if withData:
self.setAdditionalSearchPath(pybullet_data.getDataPath())
if withFloor:
planeId = self.loadURDFPy("plane.urdf", globalScaling=10)
self.changeDynamicsPy(bodyUniqueId=planeId, linkIndex=-1, restitution=0.3)
if withGravity:
self.setGravity(0, 0, -9.8)
self.setTimeStep(defaultTimeStep)
def sleepPy(self, n=0.03):
"""time.sleep(n)"""
time.sleep(n)
def showPanelsPy(self, on=True):
self.configureDebugVisualizer(self.COV_ENABLE_GUI, on)
def addUserDebugBoxPy(self, twoCorners=[(0, 0, 0), (1, 1, 1)], color=[1, 0, 0], label=""):
"""
Draw a box on the screen using addUserDebugLine()
Label the box using addUserDebugText()
"""
a, b = twoCorners
lines = []
a1 = [a[0], a[1], b[2]]
a2 = [a[0], b[1], a[2]]
a3 = [b[0], a[1], a[2]]
b1 = [b[0], b[1], a[2]]
b2 = [b[0], a[1], b[2]]
b3 = [a[0], b[1], b[2]]
lines.append((a, a1))
lines.append((a, a2))
lines.append((a, a3))
lines.append((b, b1))
lines.append((b, b2))
lines.append((b, b3))
lines.append((a1, b2))
lines.append((a1, b3))
lines.append((a2, b1))
lines.append((a2, b3))
lines.append((a3, b1))
lines.append((a3, b2))
lineIds = []
for line in lines:
lineId = self.addUserDebugLine(line[0], line[1], lineColorRGB=color)
lineIds.append(lineId)
if len(label) > 0:
self.addUserDebugText(label, textPosition=a1)
return lineIds
def getRayFromTo(self, mouseX, mouseY):
"""
Copied from pybullet examples: addPlanarReflection.py
"""
width, height, viewMat, projMat, cameraUp, camForward, horizon, vertical, _, _, dist, camTarget = self.getDebugVisualizerCamera()
camPos = [
camTarget[0] - dist * camForward[0], camTarget[1] - dist * camForward[1],
camTarget[2] - dist * camForward[2]
]
farPlane = 10000
rayForward = [(camTarget[0] - camPos[0]), (camTarget[1] - camPos[1]), (camTarget[2] - camPos[2])]
invLen = farPlane * 1. / (math.sqrt(rayForward[0] * rayForward[0] + rayForward[1] *
rayForward[1] + rayForward[2] * rayForward[2]))
rayForward = [invLen * rayForward[0], invLen * rayForward[1], invLen * rayForward[2]]
rayFrom = camPos
oneOverWidth = float(1) / float(width)
oneOverHeight = float(1) / float(height)
dHor = [horizon[0] * oneOverWidth, horizon[1] * oneOverWidth, horizon[2] * oneOverWidth]
dVer = [vertical[0] * oneOverHeight, vertical[1] * oneOverHeight, vertical[2] * oneOverHeight]
rayToCenter = [
rayFrom[0] + rayForward[0], rayFrom[1] + rayForward[1], rayFrom[2] + rayForward[2]
]
rayTo = [
rayFrom[0] + rayForward[0] - 0.5 * horizon[0] + 0.5 * vertical[0] + float(mouseX) * dHor[0] -
float(mouseY) * dVer[0], rayFrom[1] + rayForward[1] - 0.5 * horizon[1] + 0.5 * vertical[1] +
float(mouseX) * dHor[1] - float(mouseY) * dVer[1], rayFrom[2] + rayForward[2] -
0.5 * horizon[2] + 0.5 * vertical[2] + float(mouseX) * dHor[2] - float(mouseY) * dVer[2]
]
return rayFrom, rayTo
# Override pybullet functions below:
# Those ...Py() functions are proxy to the originial interface.
# These functions can give a hit of the parameters and give dictionary returns.
# Some original functions, like getKeyboardEvents(), already returns a dictionary, so there's no need to implement handy version.
def getDebugVisualizerCameraPy(self):
keys = ["width", "height", "viewMatrix", "projectionMatrix", "cameraUp", "cameraForward", "horizontal", "vertical", "yaw", "pitch", "dist", "target", ]
retArray = self.getDebugVisualizerCamera()
return self.__parseCommonReturn("getDebugVisualizerCamera", retArray, keys)
def getMouseEventsPy(self, physicsClientId=None):
keys = ["eventType", "mousePosX", "mousePosY", "buttonIndex", "buttonState"]
valid_parameters = ["physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getMouseEvents(**out_parameters)
return self.__parseListReturns("getMouseEvents", retArray, keys)
def getBasePositionAndOrientationPy(self, bodyUniqueId, physicsClientId=None):
# Note: The orientation of an object can be represented as a rotation of an object from its original unrotated orientation, i.e. (0,0,0,1).
# orientation.rotate(relative_position) + translation = global_position
# relative_position = orientation.inverse.rorate(global_position - translation)
keys = ["position", "orientation"]
valid_parameters = ["bodyUniqueId", "physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getBasePositionAndOrientation(**out_parameters)
return self.__parseCommonReturn("getBasePositionAndOrientation", retArray, keys)
def getJointInfoPy(self, bodyUniqueId, jointIndex, physicsClientId=None):
keys = ["jointIndex", "jointName", "jointType", "qIndex", "uIndex", "flags", "jointDamping", "jointFriction", "jointLowerLimit",
"jointUpperLimit", "jointMaxForce", "jointMaxVelocity", "linkName", "jointAxis", "parentFramePos", "parentFrameOrn", "parentIndex", ]
valid_parameters = ["bodyUniqueId", "jointIndex", "physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getJointInfo(**out_parameters)
return self.__parseCommonReturn("getJointInfo", retArray, keys)
def getJointStatePy(self, bodyUniqueId, jointIndex, physicsClientId=None):
keys = ["jointPosition", "jointVelocity", "jointReactionForces", "appliedJointMotorTorque", ]
valid_parameters = ["bodyUniqueId", "jointIndex", "physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getJointState(**out_parameters)
return self.__parseCommonReturn("getJointState", retArray, keys)
def getJointStatesPy(self, bodyUniqueId, jointIndices, physicsClientId=None):
keys = ["jointPosition", "jointVelocity", "jointReactionForces", "appliedJointMotorTorque", ]
valid_parameters = ["bodyUniqueId", "jointIndices", "physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getJointStates(**out_parameters)
return self.__parseListReturns("getJointStates", retArray, keys)
# Note:
# According to the source code `pybullet.c`, the returned value has 6 items when computeLinkVelocity is 0, but 8 otherwise.
# This is different from the documentation.
def getLinkStatePy(self, bodyUniqueId, linkIndex, computeLinkVelocity=None, computeForwardKinematics=None, physicsClientId=None,):
keys = ["linkWorldPosition", "linkWorldOrientation", "localInertialFramePosition", "localInertialFrameOrientation",
"worldLinkFramePosition", "worldLinkFrameOrientation", "worldLinkLinearVelocity", "worldLinkAngularVelocity", ]
keys_without_computeLinkVelocity = keys[:6]
valid_parameters = ["bodyUniqueId", "linkIndex", "computeLinkVelocity", "computeForwardKinematics", "physicsClientId", ]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getLinkState(**out_parameters)
if computeLinkVelocity:
return self.__parseCommonReturn("getLinkState", retArray, keys)
return self.__parseCommonReturn("getLinkState", retArray, keys_without_computeLinkVelocity)
def getLinkStatesPy(self, bodyUniqueId, linkIndices, computeLinkVelocity=None, computeForwardKinematics=None, physicsClientId=None,):
keys = ["linkWorldPosition", "linkWorldOrientation", "localInertialFramePosition", "localInertialFrameOrientation",
"worldLinkFramePosition", "worldLinkFrameOrientation", "worldLinkLinearVelocity", "worldLinkAngularVelocity", ]
keys_without_computeLinkVelocity = keys[:6]
valid_parameters = ["bodyUniqueId", "linkIndices", "computeLinkVelocity", "computeForwardKinematics", "physicsClientId", ]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getLinkStates(**out_parameters)
if computeLinkVelocity:
return self.__parseListReturns("getLinkStates", retArray, keys)
return self.__parseListReturns("getLinkStates", retArray, keys_without_computeLinkVelocity)
def getConstraintInfoPy(self, constraintUniqueId, physicsClientId=None):
keys = ["parentBodyUniqueId", "parentJointIndex", "childBodyUniqueId", "childLinkIndex", "constraintType", "jointAxis", "jointPivotInParent", "jointPivotInChild",
"jointFrameOrientationParent", "jointFrameOrientationChild", "maxAppliedForce", "gearRatio", "gearAuxLink", "relativePositionTarget", "erp", ]
valid_parameters = ["constraintUniqueId", "physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getConstraintInfo(**out_parameters)
return self.__parseCommonReturn("getConstraintInfo", retArray, keys)
def getDynamicsInfoPy(self, bodyUniqueId, linkIndex, physicsClientId=None):
keys = ["mass", "lateralFriction", "localInertiaDiagonal", "localInertialPos", "localInertialOrn", "restitution",
"rollingFriction", "spinningFriction", "contactDamping", "contactStiffness", "bodyType", "collisionMargin", ]
valid_parameters = ["bodyUniqueId", "linkIndex", "physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getDynamicsInfo(**out_parameters)
return self.__parseCommonReturn("getDynamicsInfo", retArray, keys)
def getCameraImagePy(self, width, height, viewMatrix=None, projectionMatrix=None, lightDirection=None, lightColor=None, lightDistance=None, shadow=None,
lightAmbientCoeff=None, lightDiffuseCoeff=None, lightSpecularCoeff=None, renderer=None, flags=None, physicsClientId=None,):
keys = ["width", "height", "rgbPixels", "depthPixels", "segmentationMaskBuffer"]
valid_parameters = [
"width", "height", "viewMatrix", "projectionMatrix", "lightDirection", "lightColor", "lightDistance", "shadow", "lightAmbientCoeff",
"lightDiffuseCoeff", "lightSpecularCoeff", "renderer", "flags", "projectiveTextureView", "projectiveTextureProj", "physicsClientId", ]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getCameraImage(**out_parameters)
return self.__parseCommonReturn("getDynamicsInfo", retArray, keys)
def getVisualShapeDataPy(self, objectUniqueId, flags=None, physicsClientId=None):
keys = ["objectUniqueId", "linkIndex", "visualGeometryType", "dimensions", "meshAssetFileName",
"localVisualFramePosition", "localVisualFrameOrientation", "rgbaColor", "textureUniqueId", ]
keys_without_VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS = ["objectUniqueId", "linkIndex", "visualGeometryType",
"dimensions", "meshAssetFileName", "localVisualFramePosition", "localVisualFrameOrientation", "rgbaColor", ]
valid_parameters = ["objectUniqueId", "flags", "physicsClientId"]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getVisualShapeData(**out_parameters)
if flags & self.VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS:
return self.__parseListReturns("getVisualShapeData", retArray, keys)
return self.__parseListReturns("getVisualShapeData", retArray, keys_without_VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS,)
def getContactPointsPy(self, bodyA=None, bodyB=None, linkIndexA=None, linkIndexB=None, physicsClientId=None,):
# Note: positionOnA and positionOnB are in global Cartesian coordinates.
keys = ["contactFlag", "bodyUniqueIdA", "bodyUniqueIdB", "linkIndexA", "linkIndexB", "positionOnA", "positionOnB", "contactNormalOnB",
"contactDistance", "normalForce", "lateralFriction1", "lateralFrictionDir1", "lateralFriction2", "lateralFrictionDir2", ]
valid_parameters = ["bodyA", "bodyB", "linkIndexA", "linkIndexB", "physicsClientId", ]
valid_parameters = ["bodyA", "bodyB", "linkIndexA", "linkIndexB", "physicsClientId", ]
out_parameters = self.__constructOutParameters(locals(), valid_parameters)
retArray = self.getContactPoints(**out_parameters)
return self.__parseListReturns("getContactPoints", retArray, keys)
def getClosestPointsPy(self, bodyA, bodyB, distance, linkIndexA=None, linkIndexB=None, physicsClientId=None,):
keys = | |
<filename>lecture.py
from trad_chiffre_mot import tradn
import os
import numpy as np
from scipy.sparse import csr_matrix
import pandas as pd
from nltk.tag import StanfordPOSTagger
from nltk.tokenize import RegexpTokenizer
from keras import Input
from keras.layers import Bidirectional, LSTM, Dropout, RepeatVector, Concatenate, Dense, Activation, Dot, GRU
from keras.models import Model
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
def filter_length(df, n=8, phon="2_phon", sup_n=True):
phonemes = df.loc[:, phon]
if sup_n:
filtre = phonemes.apply(lambda x: len(x) >= n)
else:
filtre = phonemes.apply(lambda x: len(x) < n)
return df.loc[filtre, :]
def sample(df, m=1000, mots="1_ortho", phon="2_phon", occurances="10_freqlivres", ln_dist=False, seed=23):
"""
:param df: pd.dataframe contenant le lexique
:param mots: "1_ortho" variable de df contenant les orthographes
:param phon: "2_phon" variable de df contenant les phonemes
:param occurances: "10_freqlivres" variable de df contenant les frequences des mots
:param ln_dist: False passage au log
:param m: 1000 taille des donnees
:param seed: graine aleatoire de l'echantillonage
:return: liste de tuples (mot, prononciation), liste contenant les probabilités
"""
list_w2p = []
list_occ = []
for row in df[[mots, phon, occurances]].to_numpy():
w, p, o = tuple(row)
list_w2p.append([w, p])
list_occ.append(o)
list_occ = np.array(list_occ)
# normalisation
if ln_dist:
list_occ = np.log(list_occ + 1)
list_occ = list_occ / np.sum(list_occ)
# format liste
list_tuples = [tuple(couple) for couple in list_w2p]
list_occ = list_occ.tolist()
n_occ = len(list_tuples)
np.random.seed(seed)
distr = np.random.choice(a=range(n_occ), size=m, p=list_occ).tolist()
return [list_tuples[i] for i in distr]
def train_dev(df, test_size=0.01, m=1000, forced_train=None, mots="1_ortho", phon="2_phon",
occurances="10_freqlivres", ln_dist=False, seed=23):
"""
:param df: pd.dataframe contenant le lexique
:param test_size: 0.01
:param m: 1000 taille des donnees de train
:param forced_train: liste de mots a avoir dans les donnees d'entrainement
:param mots: "1_ortho" variable de df contenant les orthographes
:param phon: "2_phon" variable de df contenant les phonemes
:param occurances: "10_freqlivres" variable de df contenant les frequences des mots
:param ln_dist: False passage au log
:param seed: graine aleatoire du train_test_split et de l'echantillonage
:return: listes de tuples des train
"""
if forced_train is None:
forced_train = []
train_df, test_df = train_test_split(df, test_size=test_size, random_state=seed)
if len(forced_train) > 0: # rajout des mots dans les donnees de test
forced_idx = test_df[mots].apply(lambda x: x in forced_train)
forced = test_df.loc[forced_idx, :]
train_df = train_df.append(forced, ignore_index=True)
test_df = test_df.loc[-forced_idx, :]
train_s = sample(train_df, m=m, mots=mots, phon=phon, occurances=occurances, ln_dist=ln_dist, seed=seed)
test_s = sample(test_df, m=int(m * test_size), mots=mots, phon=phon, occurances=occurances,
ln_dist=ln_dist, seed=seed)
return train_s, test_s
def model_test(tx, ty, n_l, n_p, n_brnn1=32, n_h1=64):
x = Input(shape=(tx, n_l))
c0 = Input(shape=(n_h1,), name='c0')
h0 = Input(shape=(n_h1,), name='h0')
c = c0
h = h0
outputs = list() # initialisation de la derniere couche
# c'est parti
a = Bidirectional(LSTM(units=n_brnn1, return_sequences=True, name="LSTM_mot"))(x)
a = Dropout(0.2, name="dropout_LSTM_orthographe")(a)
for t in range(ty):
# Attention
h_rep = RepeatVector(tx, name="att_repeat_phoneme{}".format(t))(h)
ah = Concatenate(axis=-1, name="att_concat_phoneme{}".format(t))([h_rep, a])
energies = Dense(units=n_h1, activation="tanh", name="att_caractere_phoneme{}".format(t))(ah)
energies = Dense(units=1, activation="relu", name="att_moyenne_phoneme{}".format(t))(energies)
alpha = Activation("softmax", name="att_alpha_phoneme{}".format(t))(energies)
context = Dot(axes=1, name="att_application_phoneme{}".format(t))([alpha, a])
h, c = GRU(units=n_h1, activation='tanh', recurrent_activation='tanh', return_state=True,
name="LSTM_phoneme{}".format(t))(inputs=context, initial_state=c)
h = Dropout(rate=0.1, name="dropout_phoneme{}".format(t))(h)
c = Dropout(rate=0.1, name="dropout_memory_phoneme{}".format(t))(c)
outy = Dense(activation="softmax", units=n_p, name="LSTM_{}".format(t))(h)
outputs.append(outy)
net = Model(inputs=[x, c0, h0], outputs=outputs)
return net
def pos_tag(mots,
jar=os.path.join(".", "models", "stanford-postagger", "stanford-postagger-3.8.0.jar"),
mdl=os.path.join(".", "models", "stanford-postagger", "french-ud.tagger")):
try:
pos_tagger = StanfordPOSTagger(mdl, jar, encoding='utf8')
except LookupError:
java_path = r"C:\Program Files (x86)\Java\jre1.8.0_261\bin\java.exe"
os.environ['JAVAHOME'] = java_path
pos_tagger = StanfordPOSTagger(mdl, jar, encoding='utf8')
tagged = pos_tagger.tag(mots)
tags = [g for m, g in tagged]
forced_det = ["au", "aux"]
absent_of_table = ["PART", "SCONJ"]
if any(item in mots for item in forced_det) or any(item in tags for item in absent_of_table):
for i, couple in enumerate(tagged):
mot = couple[0]
gram = couple[1]
if mot in forced_det:
tagged[i] = (mot, "DET")
if gram == "PART":
tagged[i] = (mot, "ADV")
if gram == "SCONJ":
tagged[i] = (mot, "CONJ")
return tagged
def check_liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs):
"""
Fonction qui verifie si la liaison est possible entre deux mots
:param ortho1: orthographe du mot en position 1
:param ortho2: orthographe du mot en position 2
:param phon1: phonemes du mot en position 1
:param phon2: phonemes du mot en position 2
:param nat1: nature du mot en position 1
:param nat2: nature du mot en position 2
:param phrase: phrase de contexte
:return: booleen sur la possibilite de liaison
"""
voyelles_p = kwargs.get("voyelles_p", ['a', 'E', '§', 'o', 'O', '1', 'i', '5', 'e', 'u', '@', '°', '9', 'y', '2'])
y_p = kwargs.get("y_p", ['w', 'j', '8'])
consonnes_liaisons = {'d': ['d'], 'p': ["p"], 'r': ["R"], 's': ['s', 'z'], 't': ['t'], 'x': ['s', 'z'],
'n': ['n', 'G'], 'z': ['z', 's']}
liables = False
mot2_voyelle = ((phon2[0] in y_p) or (phon2[0] in voyelles_p)) and (ortho2[0] != 'h')
if mot2_voyelle:
mot1_consonne_liaison = (ortho1[-1] in consonnes_liaisons.keys()) and\
(phon1[-1] not in consonnes_liaisons[ortho1[-1]])
if mot1_consonne_liaison:
mot1_dern_son_voyelle = (ortho1[-1] in consonnes_liaisons.keys()) and (phon1[-1] in voyelles_p)
pas_ponctuation = (" ".join([ortho1, ortho2]) in phrase) or ("-".join([ortho1, ortho2]) in phrase)
if pas_ponctuation:
if (nat1 in ["NUM", "DET", "ADJ"]) and (nat2 in ["NOUN", "PROPN"]):
liables = True
elif ortho1 in ["on", "nous", "vous", "ils", "elles", "en", "tout"] and nat2 in ["AUX", "VERB"]:
liables = True
elif nat1 in ["AUX", "VERB"] and mot1_dern_son_voyelle:
liables = True
elif nat1 in ["ADP"]:
liables = True
elif (nat1 in ["NOUN"]) and (ortho1[-1] in ['s']) and (nat2 in ["ADJ"]):
liables = True
elif (nat1 == "ADV") and (nat2 in ["ADV", "ADJ", "NOUN"]):
liables = True
elif (ortho1 == "quand") and (nat2 not in ["AUX", "VERB"]):
liables = True
elif (ortho1 == "plus") and (ortho2 == "ou"):
liables = True
elif (ortho1 == "tout") and (ortho2 in ["à", "autour"]):
liables = True
return liables
def liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs):
dico_liaisons_simples = kwargs.get("dico_liaisons", {'d': 't', 'p': 'p', 's': 'z', 't': 't', 'x': 'z', 'z': 'z'})
mots_nasale_simples = kwargs.get("mots_nasale_simples", ["aucun", "bien", "en", "on", "rien", "un", "non", "mon",
"ton", "son"])
liaison_a_faire = check_liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs)
if liaison_a_faire:
derniere_lettre = ortho1[-1]
if derniere_lettre in dico_liaisons_simples.keys():
phon1 = "{}{}".format(phon1, dico_liaisons_simples[derniere_lettre])
if derniere_lettre == 'r' and phon1[-1] == "e": # comme "premier"
phon1 = "{}{}".format(phon1[:-1], "ER")
if derniere_lettre == 'n': # comme "bon", "certain", "commun"
dernier_phoneme = phon1[-1]
if (ortho1 in mots_nasale_simples) or (dernier_phoneme == "1"):
phon1 = "{}{}".format(phon1, 'n')
else:
if dernier_phoneme == "§":
phon1 = "{}{}".format(phon1[:-1], "On")
if dernier_phoneme == "@" and ortho1[-2:] == "an":
phon1 = "{}{}".format(phon1[:-1], "an")
if dernier_phoneme == "5":
if ortho1[-2:] == "en":
phon1 = "{}{}".format(phon1[:-1], "En")
if ortho1[-2:] == "in":
phon1 = "{}{}".format(phon1[:-1], "in")
if ortho1[-3:] in ["ein", "ain"]:
phon1 = "{}{}".format(phon1[:-2], "En")
return phon1
def e_final(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs):
e_potentiel = (ortho1[-1] == 'e') or (ortho1[-2:] == 'es') or (ortho1[-3:] == 'ent')
son_final = phon1[-1]
son_initial = phon2[0]
lettre_initiale = ortho2[0]
consonnes_p = ['k', 'p', 'l', 't', 'R', 'j', 'f', 's', 'd', 'Z', 'n', 'b', 'v', 'g',
'v', 'g', 'm', 'z', 'w', 'S', 'N', '8', 'G', 'x']
lien_mots = (son_final in consonnes_p) and ((son_initial in consonnes_p) or lettre_initiale == 'h')
if e_potentiel and lien_mots:
phon1 = "{}°".format(phon1)
elif e_potentiel and (son_final in consonnes_p): # "e" et liaison quand le 2e mot commence par une voyelle
phon1_e = "{}°".format(phon1)
phon1_e_liaison = liaison(ortho1, ortho2, phon1_e, phon2, nat1, nat2, phrase, **kwargs)
if phon1_e != phon1_e_liaison:
phon1 = phon1_e_liaison
return phon1
def liaisons_tokens(mots, prononciation, pos_mots, phrase):
n = len(prononciation)
for i in range(n - 1):
prononciation[i] = liaison(mots[i], mots[i + 1], prononciation[i], prononciation[i + 1],
pos_mots[i][1], pos_mots[i + 1][1], phrase.lower())
return prononciation
def e_final_tokens(mots, prononciation, pos_mots, phrase):
n = len(prononciation)
for i in range(n - 1):
prononciation[i] = e_final(mots[i], mots[i + 1], prononciation[i], prononciation[i + 1],
pos_mots[i][1], pos_mots[i + 1][1], phrase)
return prononciation
class Lecteur:
""""Classe definissant le lecteur
"""
def __init__(self, tx, ty, l2idx, p2idx, dico_unique, dico_multiple, n_brnn1=90, n_h1=80, net=None, blank="_"):
self.tx = tx
self.ty = ty
self.l2idx = l2idx
self.p2idx = p2idx
self._dico_unique = dico_unique
self._ortho_unique = dico_unique.keys()
self._dico_multiple = dico_multiple
self._ortho_multiple = list(set([w for w, _ in dico_multiple.keys()]))
self.n_brnn1 = n_brnn1
self.n_h1 = n_h1
self.net = net
self.blank = blank
self.count_lecture = 0
# setters et getters
def _get_dico_unique(self):
return self._dico_unique
def _set_dico_unique(self, dico_unique):
self._dico_unique = dico_unique
self._ortho_unique = dico_unique.keys()
def _get_ortho_unique(self):
return | |
# ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import binascii
from typing import Optional, Any, Mapping, Union
from requests.structures import CaseInsensitiveDict
from azure.core import MatchConditions
from azure.core.pipeline import Pipeline
from azure.core.pipeline.policies import (
UserAgentPolicy,
DistributedTracingPolicy,
HttpLoggingPolicy,
BearerTokenCredentialPolicy,
ContentDecodePolicy,
)
from azure.core.tracing.decorator import distributed_trace
from azure.core.pipeline.transport import RequestsTransport
from azure.core.exceptions import (
HttpResponseError,
ClientAuthenticationError,
ResourceExistsError,
ResourceNotFoundError,
ResourceModifiedError,
ResourceNotModifiedError,
)
from ._azure_appconfiguration_error import ResourceReadOnlyError
from ._generated import AzureAppConfiguration
from ._generated._configuration import AzureAppConfigurationConfiguration
from ._models import ConfigurationSetting
from ._azure_appconfiguration_requests import AppConfigRequestsCredentialsPolicy
from ._azure_appconfiguration_credential import AppConfigConnectionStringCredential
from ._utils import (
get_endpoint_from_connection_string,
prep_if_match,
prep_if_none_match,
)
from ._sync_token import SyncTokenPolicy
from ._user_agent import USER_AGENT
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from azure.core.paging import ItemPaged
class AzureAppConfigurationClient:
"""Represents an client that calls restful API of Azure App Configuration service.
:param str base_url: base url of the service
:param credential: An object which can provide secrets for the app configuration service
:type credential: :class:`~azure.appconfiguration.AppConfigConnectionStringCredential`
:keyword Pipeline pipeline: If omitted, the standard pipeline is used.
:keyword HttpTransport transport: If omitted, the standard pipeline is used.
:keyword list[HTTPPolicy] policies: If omitted, the standard pipeline is used.
"""
# pylint:disable=protected-access
def __init__(self, base_url, credential, **kwargs):
# type: (str, Any, **Any) -> None
try:
if not base_url.lower().startswith("http"):
base_url = "https://" + base_url
except AttributeError:
raise ValueError("Base URL must be a string.")
if not credential:
raise ValueError("Missing credential")
self._credential_scopes = base_url.strip("/") + "/.default"
self._config = AzureAppConfigurationConfiguration(
credential, base_url, credential_scopes=self._credential_scopes, **kwargs
)
self._config.user_agent_policy = UserAgentPolicy(
base_user_agent=USER_AGENT, **kwargs
)
self._sync_token_policy = SyncTokenPolicy()
pipeline = kwargs.get("pipeline")
if pipeline is None:
self._sync_token_policy = SyncTokenPolicy()
aad_mode = not isinstance(credential, AppConfigConnectionStringCredential)
pipeline = self._create_appconfig_pipeline(
credential=credential, aad_mode=aad_mode, base_url=base_url, **kwargs
)
self._impl = AzureAppConfiguration(
credential, base_url, pipeline=pipeline, credential_scopes=self._credential_scopes
)
@classmethod
def from_connection_string(cls, connection_string, **kwargs):
# type: (str, **Any) -> AzureAppConfigurationClient
"""Create AzureAppConfigurationClient from a Connection String.
:param str connection_string: Connection String
(one of the access keys of the Azure App Configuration resource)
used to access the Azure App Configuration.
:return: An AzureAppConfigurationClient authenticated with the connection string
:rtype: :class:`~azure.appconfiguration.AzureAppConfigurationClient`
Example
.. code-block:: python
from azure.appconfiguration import AzureAppConfigurationClient
connection_str = "<my connection string>"
client = AzureAppConfigurationClient.from_connection_string(connection_str)
"""
base_url = "https://" + get_endpoint_from_connection_string(connection_string)
return cls(
credential=AppConfigConnectionStringCredential(connection_string),
base_url=base_url,
**kwargs
)
def _create_appconfig_pipeline(
self, credential, base_url=None, aad_mode=False, **kwargs
):
transport = kwargs.get("transport")
policies = kwargs.get("policies")
if policies is None: # [] is a valid policy list
if aad_mode:
scope = base_url.strip("/") + "/.default"
if hasattr(credential, "get_token"):
credential_policy = BearerTokenCredentialPolicy(credential, scope)
else:
raise TypeError(
"Please provide an instance from azure-identity "
"or a class that implement the 'get_token protocol"
)
else:
credential_policy = AppConfigRequestsCredentialsPolicy(credential)
policies = [
self._config.headers_policy,
self._config.user_agent_policy,
self._config.retry_policy,
self._sync_token_policy,
credential_policy,
self._config.logging_policy, # HTTP request/response log
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
ContentDecodePolicy(**kwargs),
]
if not transport:
transport = RequestsTransport(**kwargs)
return Pipeline(transport, policies)
@distributed_trace
def list_configuration_settings(
self, key_filter=None, label_filter=None, **kwargs
): # type: (Optional[str], Optional[str], **Any) -> ItemPaged[ConfigurationSetting]
"""List the configuration settings stored in the configuration service, optionally filtered by
label and accept_datetime
:param key_filter: filter results based on their keys. '*' can be
used as wildcard in the beginning or end of the filter
:type key_filter: str
:param label_filter: filter results based on their label. '*' can be
used as wildcard in the beginning or end of the filter
:type label_filter: str
:keyword datetime accept_datetime: filter out ConfigurationSetting created after this datetime
:keyword list[str] fields: specify which fields to include in the results. Leave None to include all fields
:keyword dict headers: if "headers" exists, its value (a dict) will be added to the http request header
:return: An iterator of :class:`ConfigurationSetting`
:rtype: ~azure.core.paging.ItemPaged[ConfigurationSetting]
:raises: :class:`HttpResponseError`, :class:`ClientAuthenticationError`
Example
.. code-block:: python
from datetime import datetime, timedelta
accept_datetime = datetime.today() + timedelta(days=-1)
all_listed = client.list_configuration_settings()
for item in all_listed:
pass # do something
filtered_listed = client.list_configuration_settings(
label_filter="Labe*", key_filter="Ke*", accept_datetime=accept_datetime
)
for item in filtered_listed:
pass # do something
"""
select = kwargs.pop("fields", None)
if select:
select = ["locked" if x == "read_only" else x for x in select]
error_map = {401: ClientAuthenticationError}
try:
return self._impl.get_key_values( # type: ignore
label=label_filter,
key=key_filter,
select=select,
cls=lambda objs: [
ConfigurationSetting._from_generated(x) for x in objs
],
error_map=error_map,
**kwargs
)
except HttpResponseError as error:
e = error_map[error.status_code]
raise e(message=error.message, response=error.response)
except binascii.Error:
raise binascii.Error("Connection string secret has incorrect padding")
@distributed_trace
def get_configuration_setting(
self,
key, # type: str
label=None, # type: Optional[str]
etag="*", # type: Optional[str]
match_condition=MatchConditions.Unconditionally, # type: Optional[MatchConditions]
**kwargs # type: Any
): # type: (...) -> Union[None, ConfigurationSetting]
"""Get the matched ConfigurationSetting from Azure App Configuration service
:param key: key of the ConfigurationSetting
:type key: str
:param label: label of the ConfigurationSetting
:type label: str
:param etag: check if the ConfigurationSetting is changed. Set None to skip checking etag
:type etag: str or None
:param match_condition: The match condition to use upon the etag
:type match_condition: :class:`~azure.core.MatchConditions`
:keyword datetime accept_datetime: the retrieved ConfigurationSetting that created no later than this datetime
:keyword dict headers: if "headers" exists, its value (a dict) will be added to the http request header
:return: The matched ConfigurationSetting object
:rtype: :class:`~azure.appconfiguration.ConfigurationSetting`
:raises: :class:`HttpResponseError`, :class:`ClientAuthenticationError`, \
:class:`ResourceNotFoundError`, :class:`ResourceModifiedError`, :class:`ResourceExistsError`
Example
.. code-block:: python
fetched_config_setting = client.get_configuration_setting(
key="MyKey", label="MyLabel"
)
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError}
if match_condition == MatchConditions.IfNotModified:
error_map[412] = ResourceModifiedError
if match_condition == MatchConditions.IfModified:
error_map[304] = ResourceNotModifiedError
if match_condition == MatchConditions.IfPresent:
error_map[412] = ResourceNotFoundError
if match_condition == MatchConditions.IfMissing:
error_map[412] = ResourceExistsError
try:
key_value = self._impl.get_key_value(
key=key,
label=label,
if_match=prep_if_match(etag, match_condition),
if_none_match=prep_if_none_match(etag, match_condition),
error_map=error_map,
**kwargs
)
return ConfigurationSetting._from_generated(key_value)
except ResourceNotModifiedError:
return None
except HttpResponseError as error:
e = error_map[error.status_code]
raise e(message=error.message, response=error.response)
except binascii.Error:
raise binascii.Error("Connection string secret has incorrect padding")
@distributed_trace
def add_configuration_setting(self, configuration_setting, **kwargs):
# type: (ConfigurationSetting, **Any) -> ConfigurationSetting
"""Add a ConfigurationSetting instance into the Azure App Configuration service.
:param configuration_setting: the ConfigurationSetting object to be added
:type configuration_setting: :class:`~azure.appconfiguration.ConfigurationSetting`
:keyword dict headers: if "headers" exists, its value (a dict) will be added to the http request header
:return: The ConfigurationSetting object returned from the App Configuration service
:rtype: :class:`~azure.appconfiguration.ConfigurationSetting`
:raises: :class:`HttpResponseError`, :class:`ClientAuthenticationError`, :class:`ResourceExistsError`
Example
.. code-block:: python
config_setting = ConfigurationSetting(
key="MyKey",
label="MyLabel",
value="my value",
content_type="my content type",
tags={"my tag": "my tag value"}
)
added_config_setting = client.add_configuration_setting(config_setting)
"""
key_value = configuration_setting._to_generated()
custom_headers = CaseInsensitiveDict(kwargs.get("headers")) # type: Mapping[str, Any]
error_map = {401: ClientAuthenticationError, 412: ResourceExistsError}
try:
key_value_added = self._impl.put_key_value(
entity=key_value,
key=key_value.key, # type: ignore
label=key_value.label,
if_none_match="*",
headers=custom_headers,
error_map=error_map,
)
return ConfigurationSetting._from_generated(key_value_added)
except HttpResponseError as error:
e = error_map[error.status_code]
raise e(message=error.message, response=error.response)
except binascii.Error:
raise binascii.Error("Connection string secret has incorrect padding")
@distributed_trace
def set_configuration_setting(
self,
configuration_setting,
match_condition=MatchConditions.Unconditionally,
**kwargs
): # type: (ConfigurationSetting, Optional[MatchConditions], **Any) -> ConfigurationSetting
"""Add or update a ConfigurationSetting.
If the configuration setting identified by key and label does not exist, this is a create.
Otherwise this is an update.
:param configuration_setting: the ConfigurationSetting to be added (if not exists) \
or updated (if exists) to the service
:type configuration_setting: :class:`ConfigurationSetting`
:param match_condition: The match condition to use upon the etag
:type match_condition: :class:`~azure.core.MatchConditions`
:keyword dict headers: if "headers" exists, its value (a dict) will be added to the http request header
:return: The ConfigurationSetting returned from the service
:rtype: :class:`~azure.appconfiguration.ConfigurationSetting`
:raises: :class:`HttpResponseError`, :class:`ClientAuthenticationError`, \
:class:`ResourceReadOnlyError`, :class:`ResourceModifiedError`, :class:`ResourceNotModifiedError`, \
:class:`ResourceNotFoundError`, :class:`ResourceExistsError`
Example
.. code-block:: python
config_setting = ConfigurationSetting(
key="MyKey",
label="MyLabel",
value="my set value",
content_type="my set content type",
tags={"my set tag": "my set tag value"}
)
returned_config_setting = client.set_configuration_setting(config_setting)
"""
key_value = configuration_setting._to_generated()
custom_headers = CaseInsensitiveDict(kwargs.get("headers")) # type: Mapping[str, Any]
error_map = {401: ClientAuthenticationError, 409: ResourceReadOnlyError}
if match_condition == MatchConditions.IfNotModified:
error_map[412] = ResourceModifiedError
if match_condition == MatchConditions.IfModified:
error_map[412] = ResourceNotModifiedError
if match_condition == MatchConditions.IfPresent:
error_map[412] = ResourceNotFoundError
if match_condition == MatchConditions.IfMissing:
error_map[412] = ResourceExistsError
try:
key_value_set = self._impl.put_key_value(
entity=key_value,
key=key_value.key, # type: ignore
label=key_value.label,
if_match=prep_if_match(configuration_setting.etag, match_condition),
if_none_match=prep_if_none_match(
configuration_setting.etag, match_condition
),
headers=custom_headers,
error_map=error_map,
)
return ConfigurationSetting._from_generated(key_value_set)
except HttpResponseError as error:
e = error_map[error.status_code]
raise e(message=error.message, response=error.response)
except binascii.Error:
raise binascii.Error("Connection string secret has incorrect padding")
@distributed_trace
def delete_configuration_setting(self, key, label=None, **kwargs):
# type: (str, Optional[str], **Any) -> ConfigurationSetting
"""Delete a ConfigurationSetting if it exists
:param key: key used to identify the ConfigurationSetting
| |
> columns:
self.cursor.moveAbsolute(columns - 1, cursorY)
# get cursor position again, maybe it was moved
cursorX, cursorY = self.cursor.getPosition()
qp.setOpacity(0.8)
if self.isInEditMode():
qp.setOpacity(0.5)
# cursor on text
qp.drawRect((self.COLUMNS * 3 + self.gap + cursorX) * self.fontWidth, cursorY * self.fontHeight + 2,
self.fontWidth, self.fontHeight)
# cursor on hex
if not self.isInEditMode():
qp.drawRect(cursorX * 3 * self.fontWidth, cursorY * self.fontHeight + 2, 2 * self.fontWidth,
self.fontHeight)
else:
if self.highpart:
qp.drawRect(cursorX * 3 * self.fontWidth, cursorY * self.fontHeight + 2, 1 * self.fontWidth,
self.fontHeight)
else:
qp.drawRect(cursorX * 3 * self.fontWidth + self.fontWidth, cursorY * self.fontHeight + 2,
1 * self.fontWidth, self.fontHeight)
qp.setOpacity(1)
def keyFilter(self):
return [
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Right),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Left),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Up),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Down),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_End),
(QtCore.Qt.ControlModifier, QtCore.Qt.Key_Home),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Right),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Left),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Up),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Down),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_End),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_Home),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_PageDown),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_PageUp)
]
def anon(self, dx, dy):
self.scroll(dx, dy)
# scroll modifies datamodel offset, so we must do scroll and cursor
# operations toghether
y, x = self.dataModel.getXYInPage(self.dataModel.getDataSize() - 1)
if self.getCursorAbsolutePosition() >= self.dataModel.getDataSize():
y, x = self.dataModel.getXYInPage(self.dataModel.getDataSize() - 1)
self.cursor.moveAbsolute(x, y)
# we call draw() again because it was called before by scroll()
# and the cursor is already painted but it's not in correct position
# kinda hack, don't really like it
self.draw()
def handleEditMode(self, modifiers, key, event):
if str(event.text()).lower() in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e',
'f']:
offs = self.getCursorOffsetInPage()
b = self.dataModel.getBYTE(self.dataModel.getOffset() + offs)
if b is None:
return
z = int(str(event.text()), 16)
# compute nibble
if self.highpart:
b = ((z << 4) | (b & 0x0F)) & 0xFF
else:
b = ((b & 0xF0) | (z & 0x0F)) & 0xFF
block = modifiers == QtCore.Qt.AltModifier and self.selector.getCurrentSelection()
# change block or single byte
if block:
# multiple, with ALT key
if self.selector.getCurrentSelection():
u, v = self.selector.getCurrentSelection()
for x in range(u, v):
b = self.dataModel.getBYTE(x)
if self.highpart:
b = ((z << 4) | (b & 0x0F)) & 0xFF
else:
b = ((b & 0xF0) | (z & 0x0F)) & 0xFF
self.dataModel.setData_b(x, chr(b))
else:
self.dataModel.setData_b(self.dataModel.getOffset() + offs, chr(b))
if block:
self.transformationEngine = RangePen(self.original_textdecorator, u, v,
QtGui.QPen(QtGui.QColor(218, 94, 242), 0, QtCore.Qt.SolidLine),
ignoreHighlights=True)
else:
z = self.dataModel.getOffset() + offs
# TODO: sa nu se repete, tre original_transformengine
self.transformationEngine = RangePen(self.original_textdecorator, z, z + 0,
QtGui.QPen(QtGui.QColor(218, 94, 242), 0, QtCore.Qt.SolidLine),
ignoreHighlights=True)
# se if we are at end of row, we must also redraw previous line
highpart = self.highpart
# for block mode, move cursor
if not block:
x, old_y = self.cursor.getPosition()
if not self.highpart:
self.moveCursor(Directions.Right)
x, y = self.cursor.getPosition()
if highpart:
self.highpart = False
else:
self.highpart = True
if block:
self.draw(refresh=True)
else:
self.draw(refresh=True, row=y, howMany=1)
if y > old_y:
self.draw(refresh=True, row=y - 1, howMany=1)
def handleKeyEvent(self, modifiers, key, event=None):
if event.type() == QtCore.QEvent.KeyRelease:
if key == QtCore.Qt.Key_Shift:
self.stopSelection()
return True
if event.type() == QtCore.QEvent.KeyPress:
if modifiers == QtCore.Qt.ShiftModifier:
keys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Down, QtCore.Qt.Key_Up,
QtCore.Qt.Key_End, QtCore.Qt.Key_Home]
if key in keys:
self.startSelection()
if key == QtCore.Qt.Key_Question:
self.annotationWindow()
if modifiers == QtCore.Qt.AltModifier:
if key == QtCore.Qt.Key_A:
self.add_annotation(1)
return True
if modifiers == QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_A:
self.add_annotation(2)
if key == QtCore.Qt.Key_Right:
self.addop((self.anon, -1, 0))
if key == QtCore.Qt.Key_Left:
self.addop((self.scroll, 1, 0))
if key == QtCore.Qt.Key_Down:
self.addop((self.anon, 0, -1))
if key == QtCore.Qt.Key_Up:
self.addop((self.scroll, 0, 1))
if key == QtCore.Qt.Key_End:
self.moveCursor(Directions.CtrlEnd)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Home:
self.moveCursor(Directions.CtrlHome)
self.addop((self.draw,))
return True
else: # selif modifiers == QtCore.Qt.NoModifier:
if key == QtCore.Qt.Key_Escape:
self.selector.resetSelections()
self.addop((self.draw,))
if key == QtCore.Qt.Key_Left:
self.moveCursor(Directions.Left)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Right:
self.moveCursor(Directions.Right)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Down:
self.moveCursor(Directions.Down)
self.addop((self.draw,))
if key == QtCore.Qt.Key_End:
self.moveCursor(Directions.End)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Home:
self.moveCursor(Directions.Home)
self.addop((self.draw,))
if key == QtCore.Qt.Key_Up:
self.moveCursor(Directions.Up)
self.addop((self.draw,))
if key == QtCore.Qt.Key_PageDown:
self.addop((self.scrollPages, 1))
if key == QtCore.Qt.Key_PageUp:
self.addop((self.scrollPages, -1))
if key == QtCore.Qt.Key_F6:
self.changeHexColumns()
x, y = self.cursor.getPosition()
columns = self.HexColumns[self.idxHexColumns]
if x > columns:
self.cursor.moveAbsolute(columns - 1, y)
self.addop((self.draw,))
if self.isInEditMode():
self.handleEditMode(modifiers, key, event)
return True
return False
def isEditable(self):
return True
def setEditMode(self, mode):
super(HexViewMode, self).setEditMode(mode)
if not mode:
self.highpart = True
self.transformationEngine = self.original_textdecorator
self.transformationEngine.reset()
self.draw(refresh=True)
def addop(self, t):
self.Ops.append(t)
def getHeaderInfo(self):
s = ''
for i in range(self.HexColumns[self.idxHexColumns]):
s += '{0} '.format('{0:x}'.format(i).zfill(2))
s += self.gap * ' ' + 'Text'
return s
def annotationWindow(self):
w = self.ann_w.treeWidget
w.setDragEnabled(True)
w.viewport().setAcceptDrops(True)
w.setDropIndicatorShown(True)
self.ann_w.show()
@QtCore.pyqtSlot("QItemSelection, QItemSelection")
def selectionChanged(self, selected, deselected):
item = self.ann_w.treeWidget.currentItem()
if item:
offset = item.getOffset()
size = item.getSize()
u = offset
v = offset + size
self.selector.addSelection((u, v, QtGui.QBrush(QtGui.QColor(125, 255, 0)), 0.2),
type=TextSelection.SelectionType.NORMAL)
self.goTo(u)
@QtCore.pyqtSlot("QTreeWidgetItem*, int")
def itemChanged(self, item, column):
ID_NAME = 0
ID_DESCRIPTION = 4
s = str(item.text(column))
if column == ID_NAME:
item.setName(s)
if column == ID_DESCRIPTION:
item.setDescription(s)
def add_annotation(self, mode):
QtCore.QObject.connect(self.ann_w.treeWidget.selectionModel(),
QtCore.SIGNAL('selectionChanged(QItemSelection, QItemSelection)'), self.selectionChanged)
QtCore.QObject.connect(self.ann_w.treeWidget, QtCore.SIGNAL('itemChanged(QTreeWidgetItem*, int)'),
self.itemChanged)
ID_NAME = 0
ID_OFFSET = 1
ID_SIZE = 2
ID_VALUE = 3
ID_DESCRIPTION = 4
ID_COLOR = 5
if self.selector.getCurrentSelection():
u, v = self.selector.getCurrentSelection()
else:
return
import random
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
opacity = 0.4
if mode == 2:
opacity = 0.25
qcolor = QtGui.QColor(r, g, b)
added = self.selector.addSelection((u, v, QtGui.QBrush(qcolor), opacity),
type=TextSelection.SelectionType.PERMANENT)
# if not added:
# return
t = self.ann_w.treeWidget
row = AnnonItem(None, self.ann_w.treeWidget, qcolor.name())
row.setFlags(QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsDropEnabled |
QtCore.Qt.ItemIsDragEnabled)
t.setAcceptDrops(True)
t.setDragEnabled(True)
t.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
delegate = NoEditDelegate()
t.setItemDelegateForColumn(1, delegate)
t.setItemDelegateForColumn(2, delegate)
t.setItemDelegateForColumn(3, delegate)
t.setItemDelegateForColumn(5, delegate)
row.setName(self.ann_w.newFieldName())
row.setOffset(u)
# row.setText(ID_NAME, 'field_0')
# row.setText(ID_OFFSET, hex(u))
size = v - u
# row.setText(ID_SIZE, hex(size))
row.setSize(size)
value = ''
if size == 1:
value = self.dataModel.getBYTE(u, asString=True)
elif size == 2:
value = self.dataModel.getWORD(u, asString=True)
elif size == 4:
value = self.dataModel.getDWORD(u, asString=True)
else:
value = repr(str(self.dataModel.getStream(u, v)))
# row.setText(ID_VALUE, value)
row.setValue(value)
# cmb.setCurrentIndex(cmb.findData(w))
if mode == 2:
self.ann_w.treeWidget.addTopLevelItem(row)
if mode == 1:
selected = t.selectedItems()
if len(selected) == 1:
selected = selected[0]
else:
selected = t.topLevelItem(0)
if selected:
selected.addChild(row)
t.expandItem(row)
# cmb = QColorButton()
# cmb.setColor(qcolor.name())
# self.ann_w.treeWidget.setItemWidget(row, ID_COLOR, cmb)
self.ann_w.treeWidget.setItemWidget(row, ID_COLOR, row.cmb)
# self.ann_w.treeWidget.openPersistentEditor(row, 0)
# self.ann_w.treeWidget.editItem(row, 0)
# self.ann_w.treeWidget.editItem(row, 3)
class NoEditDelegate(QtWidgets.QStyledItemDelegate):
def __init__(self, parent=None):
super(NoEditDelegate, self).__init__(parent)
def createEditor(self, parent, option, index):
return None
class AnnonItem(QtWidgets.QTreeWidgetItem):
ID_NAME = 0
ID_OFFSET = 1
ID_SIZE = 2
ID_VALUE = 3
ID_DESCRIPTION = 4
ID_COLOR = 5
def __init__(self, x, parent, color):
super(AnnonItem, self).__init__(x)
self._color = color
self._t_parent = parent
self.cmb = QColorButton()
self.cmb.setColor(self._color)
# self._t_parent.setItemWidget(self, self.ID_COLOR, self.cmb)
def setName(self, name):
self._name = name
self.setText(self.ID_NAME, name)
def getName(self):
return self._name
def setOffset(self, offset):
self._offset = offset
self.setText(self.ID_OFFSET, hex(offset))
def getOffset(self):
return self._offset
def setSize(self, size):
self._size = size
self.setText(self.ID_SIZE, hex(size))
def getSize(self):
return self._size
def setValue(self, value):
self._value = value
self.setText(self.ID_VALUE, value)
def getValue(self):
return self._value
def setDescription(self, description):
self._description = description
self.setText(self.ID_DESCRIPTION, description)
def getDescription(self):
return self._description
class QColorButton(QtWidgets.QPushButton):
"""
Custom Qt Widget to show a chosen color.
Left-clicking the button shows the color-chooser, while
right-clicking resets the color to None (no-color).
"""
'''
based on http://martinfitzpatrick.name/article/qcolorbutton-a-color-selector-tool-for-pyqt/
'''
colorChanged = QtCore.pyqtSignal()
def __init__(self, *args, **kwargs):
super(QColorButton, self).__init__(*args, **kwargs)
self._color = None
self.setMaximumWidth(32)
self.pressed.connect(self.onColorPicker)
def setColor(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit()
if self._color:
self.setStyleSheet("background-color: %s;" % self._color)
else:
self.setStyleSheet("")
def color(self):
return self._color
def onColorPicker(self):
"""
Show color-picker dialog to select color.
Qt will use the native dialog by default.
"""
dlg = QtGui.QColorDialog(QtGui.QColor(self._color), None)
# if self._color:
# dlg.setCurrentColor(QtGui.QColor(self._color))
if dlg.exec_():
self.setColor(dlg.currentColor().name())
def mousePressEvent(self, e):
if e.button() == QtCore.Qt.RightButton:
self.setColor(None)
return super(QColorButton, self).mousePressEvent(e)
class ComboBoxItem(QtWidgets.QComboBox):
def __init__(self, item, column):
super(ComboBoxItem, self).__init__()
self.item = item
self.column = column
class Annotation(QtWidgets.QDialog):
_fieldIdx = 0
def __init__(self, parent, view):
super(Annotation, self).__init__(parent)
self.parent = parent
self.view = view
self.oshow = super(Annotation, self).show
root = os.path.dirname(os.path.realpath(__file__))
self.ui = loadUi(os.path.join(root, 'annotation.ui'), baseinstance=self)
# self.ei = ImportsEventFilter(plugin, self.ui.treeWidgetImports)
self.ei = treeEventFilter(view, self.ui.treeWidget)
self.ui.treeWidget.installEventFilter(self.ei)
self.initUI()
def newFieldName(self):
name = 'field_{}'.format(self._fieldIdx)
self._fieldIdx += 1
return name
def show(self):
# TODO: remember position? resize plugin windows when parent resize?
pwidth = self.parent.parent.size().width()
pheight = self.parent.parent.size().height()
width = self.ui.treeWidget.size().width() + 15
height = self.ui.treeWidget.size().height() + 15
self.setGeometry(pwidth - width | |
of this ComputerGroup. # noqa: E501
:rtype: str
"""
return self._virtual_name
@virtual_name.setter
def virtual_name(self, virtual_name):
"""Sets the virtual_name of this ComputerGroup.
Name of the ComputerGroup as it exists in VMware vCloud. Ignored if the ComputerGroup is not from vCloud. Searchable as String. # noqa: E501
:param virtual_name: The virtual_name of this ComputerGroup. # noqa: E501
:type: str
"""
self._virtual_name = virtual_name
@property
def cloud_type(self):
"""Gets the cloud_type of this ComputerGroup. # noqa: E501
Cloud platform of the ComputerGroup. Ignored if the ComputerGroup does not represent a cloud container. Searchable as Choice. # noqa: E501
:return: The cloud_type of this ComputerGroup. # noqa: E501
:rtype: str
"""
return self._cloud_type
@cloud_type.setter
def cloud_type(self, cloud_type):
"""Sets the cloud_type of this ComputerGroup.
Cloud platform of the ComputerGroup. Ignored if the ComputerGroup does not represent a cloud container. Searchable as Choice. # noqa: E501
:param cloud_type: The cloud_type of this ComputerGroup. # noqa: E501
:type: str
"""
allowed_values = ["amazon", "vcloud-private-cloud", "azure", "azure-arm", "gcp"] # noqa: E501
if cloud_type not in allowed_values:
raise ValueError(
"Invalid value for `cloud_type` ({0}), must be one of {1}" # noqa: E501
.format(cloud_type, allowed_values)
)
self._cloud_type = cloud_type
@property
def cloud_resource_type(self):
"""Gets the cloud_resource_type of this ComputerGroup. # noqa: E501
Cloud container type of the ComputerGroup. This is platform dependent. Ignored if the ComputerGroup does not represent a cloud container. Searchable as Numeric. # noqa: E501
:return: The cloud_resource_type of this ComputerGroup. # noqa: E501
:rtype: str
"""
return self._cloud_resource_type
@cloud_resource_type.setter
def cloud_resource_type(self, cloud_resource_type):
"""Sets the cloud_resource_type of this ComputerGroup.
Cloud container type of the ComputerGroup. This is platform dependent. Ignored if the ComputerGroup does not represent a cloud container. Searchable as Numeric. # noqa: E501
:param cloud_resource_type: The cloud_resource_type of this ComputerGroup. # noqa: E501
:type: str
"""
allowed_values = ["physical", "top-level", "partition", "aws-ec2-instance", "aws-workspace", "vcloud-organization", "vcloud-catalog", "vcloud-networks", "vcloud-virtual-data-center", "vcloud-virtual-application", "vcloud-virtual-application-template", "vcloud-virtual-machine", "azure-instance", "azure-arm-instance"] # noqa: E501
if cloud_resource_type not in allowed_values:
raise ValueError(
"Invalid value for `cloud_resource_type` ({0}), must be one of {1}" # noqa: E501
.format(cloud_resource_type, allowed_values)
)
self._cloud_resource_type = cloud_resource_type
@property
def cloud_id(self):
"""Gets the cloud_id of this ComputerGroup. # noqa: E501
Cloud container ID of the ComputerGroup. Ignored if the ComputerGroup does not represent a cloud container. Searchable as Numeric. # noqa: E501
:return: The cloud_id of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._cloud_id
@cloud_id.setter
def cloud_id(self, cloud_id):
"""Sets the cloud_id of this ComputerGroup.
Cloud container ID of the ComputerGroup. Ignored if the ComputerGroup does not represent a cloud container. Searchable as Numeric. # noqa: E501
:param cloud_id: The cloud_id of this ComputerGroup. # noqa: E501
:type: int
"""
self._cloud_id = cloud_id
@property
def amazon_account_id(self):
"""Gets the amazon_account_id of this ComputerGroup. # noqa: E501
Amazon Web Services account ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services account. Searchable as Numeric. # noqa: E501
:return: The amazon_account_id of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._amazon_account_id
@amazon_account_id.setter
def amazon_account_id(self, amazon_account_id):
"""Sets the amazon_account_id of this ComputerGroup.
Amazon Web Services account ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services account. Searchable as Numeric. # noqa: E501
:param amazon_account_id: The amazon_account_id of this ComputerGroup. # noqa: E501
:type: int
"""
self._amazon_account_id = amazon_account_id
@property
def amazon_region_id(self):
"""Gets the amazon_region_id of this ComputerGroup. # noqa: E501
Amazon Web Services region ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services region. amazonWorkspacesID will be used instead if the ComputerGroup represents an Amazon Web Services WorkSpaces node. Searchable as Numeric. # noqa: E501
:return: The amazon_region_id of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._amazon_region_id
@amazon_region_id.setter
def amazon_region_id(self, amazon_region_id):
"""Sets the amazon_region_id of this ComputerGroup.
Amazon Web Services region ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services region. amazonWorkspacesID will be used instead if the ComputerGroup represents an Amazon Web Services WorkSpaces node. Searchable as Numeric. # noqa: E501
:param amazon_region_id: The amazon_region_id of this ComputerGroup. # noqa: E501
:type: int
"""
self._amazon_region_id = amazon_region_id
@property
def amazon_vpcid(self):
"""Gets the amazon_vpcid of this ComputerGroup. # noqa: E501
Amazon Web Services Virtual Private Cloud ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services Virtual Private Cloud. Searchable as Numeric. # noqa: E501
:return: The amazon_vpcid of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._amazon_vpcid
@amazon_vpcid.setter
def amazon_vpcid(self, amazon_vpcid):
"""Sets the amazon_vpcid of this ComputerGroup.
Amazon Web Services Virtual Private Cloud ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services Virtual Private Cloud. Searchable as Numeric. # noqa: E501
:param amazon_vpcid: The amazon_vpcid of this ComputerGroup. # noqa: E501
:type: int
"""
self._amazon_vpcid = amazon_vpcid
@property
def amazon_subnet_id(self):
"""Gets the amazon_subnet_id of this ComputerGroup. # noqa: E501
Amazon Web Services subnet ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services subnet. Searchable as Numeric. # noqa: E501
:return: The amazon_subnet_id of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._amazon_subnet_id
@amazon_subnet_id.setter
def amazon_subnet_id(self, amazon_subnet_id):
"""Sets the amazon_subnet_id of this ComputerGroup.
Amazon Web Services subnet ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services subnet. Searchable as Numeric. # noqa: E501
:param amazon_subnet_id: The amazon_subnet_id of this ComputerGroup. # noqa: E501
:type: int
"""
self._amazon_subnet_id = amazon_subnet_id
@property
def amazon_workspaces_id(self):
"""Gets the amazon_workspaces_id of this ComputerGroup. # noqa: E501
Amazon Web Services WorkSpaces ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services WorkSpace. Will be used instead of amazonRegionID if the ComputerGroup represents a WorkSpaces node under a region. Searchable as Numeric. # noqa: E501
:return: The amazon_workspaces_id of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._amazon_workspaces_id
@amazon_workspaces_id.setter
def amazon_workspaces_id(self, amazon_workspaces_id):
"""Sets the amazon_workspaces_id of this ComputerGroup.
Amazon Web Services WorkSpaces ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services WorkSpace. Will be used instead of amazonRegionID if the ComputerGroup represents a WorkSpaces node under a region. Searchable as Numeric. # noqa: E501
:param amazon_workspaces_id: The amazon_workspaces_id of this ComputerGroup. # noqa: E501
:type: int
"""
self._amazon_workspaces_id = amazon_workspaces_id
@property
def amazon_directory_id(self):
"""Gets the amazon_directory_id of this ComputerGroup. # noqa: E501
Amazon Web Services directory ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services directory. Searchable as Numeric. # noqa: E501
:return: The amazon_directory_id of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._amazon_directory_id
@amazon_directory_id.setter
def amazon_directory_id(self, amazon_directory_id):
"""Sets the amazon_directory_id of this ComputerGroup.
Amazon Web Services directory ID of the ComputerGroup. Set to `0` if the ComputerGroup does not represent an Amazon Web Services directory. Searchable as Numeric. # noqa: E501
:param amazon_directory_id: The amazon_directory_id of this ComputerGroup. # noqa: E501
:type: int
"""
self._amazon_directory_id = amazon_directory_id
@property
def id(self):
"""Gets the id of this ComputerGroup. # noqa: E501
ID of the ComputerGroup. Searchable as ID. # noqa: E501
:return: The id of this ComputerGroup. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ComputerGroup.
ID of the ComputerGroup. Searchable as ID. # noqa: E501
:param id: The id of this ComputerGroup. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
| |
"estimated %sCP zenith Tsys in vacuum= %6.2f" % (pol, Tvac)
self.header.add_history(msg)
# average records here if needed.
subch_IDs = list(range(self.props['num cycles']))
self.logger.debug("fit_mean_power_to_airmass: subchannels: %s",
subch_IDs)
for subch in subch_IDs:
subchannel = subch+1
# Get the data for this subchannel.
mean_power = good_wx_data['TSYS'][sig][:,subch,beam_idx,IFidx]
elevation = good_wx_data['ELEVATIO'][sig]
if last:
E = elevation[first:last+1]
P = mean_power[first:last+1]
else:
E = elevation[first:]
P = mean_power[first:]
# fit the data
# estimate the slope and intercept:
ind_max = (1/numpy.sin(numpy.deg2rad(E))).argmax()
ind_min = (1/numpy.sin(numpy.deg2rad(E))).argmin()
x_max = (1/numpy.sin(numpy.deg2rad(E)))[ind_max]
x_min = (1/numpy.sin(numpy.deg2rad(E)))[ind_min]
y_max = P[ind_max]
y_min = P[ind_min]
slope = (y_max-y_min)/(x_max-x_min)
intercept = y_min - slope*x_min
self.logger.debug(
"fit_mean_power_to_airmass: est. intercept = %f and slope = %f",
intercept,slope)
#popt, pcov = curve_fit(opacity_fitting, E, P, p0=[intercept, slope])
#intercept, slope = popt[0], popt[1]
x = 1/numpy.sin(numpy.deg2rad(E))
slope, intercept, r_value, p_value, std_err = linregress(x, P)
errors = (0, std_err)
self.logger.info("fit_mean_power_to_airmass:" +
" B%dP%d sch%d %s intercept, slope: %f, %f",
beam_idx+1, IFidx+1, subchannel, sw_state[sig],
intercept, slope)
#self.logger.debug("fit_mean_power_to_airmass: covariance = %s", pcov)
#if pcov == numpy.inf:
# continue
#errors = numpy.sqrt(numpy.diag(pcov))
msg = "IF%d, subch%d %s gain=%9.3e +/- %9.3e counts, gain_slope=%9.3e +/- %9.3e counts/am" % \
(IFidx+1, subchannel, sw_state[sig],
intercept, errors[0], slope, errors[1])
self.header.add_history(msg)
gain = Tvac/intercept
gain_err = (gain/intercept)*errors[0]
K_per_am = gain*slope
K_per_am_err = gain*errors[1]
self.logger.info("fit_mean_power_to_airmass:" +
" B%dP%d sch%d %s gain, K/am: %6.2f +/- %5.2f, %4.2f +/- %4.2f",
beam_idx+1, IFidx+1, subchannel, sw_state[sig],
gain, gain_err, K_per_am, K_per_am_err)
if replace:
# there are conversion constants for each switch state (SIG),
# beam (6th data axis), IF (or pol, 4th data axis) and
# subchannel (CYCLE)
if len(self.data['TSYS'].shape) == 5:
# WVSR data
rowlist = list(self.select({'SIG': sig, 'CYCLE': subchannel}))
rowlist.sort()
first = rowlist[0]
last = rowlist[-1]+1
step = rowlist[1]-rowlist[0]
self.data['TSYS'][first:last:step, IFidx, 0, 0] *= gain
else:
self.logger.warning(
"fit_mean_power_to_airmass: unknown shape; Tsys not computed")
def properties(self):
"""
get table properties
properties::
num chans - number of spectrometer channels in diagnostic spectra
num IFs - at most two, one per pol
full Stokes - four Stkes parameters instead of an IF for each pol
time axis - True if scan is divided into records
num beams - number of beams
num records - if 'time axis', number of records in each scan
Notes
=====
* In a DSN SDFITS file there is only one backend
* If there is a time axis, the number of records per scan may differ
* A subchannel is a piece of band for a spectrometer with coarse and fine
channels
* cycles are used for dfferent subchannels and for position switching
"""
props = {}
# most parameters must be the same for all rows in a session
spectrumshape = self.data[self.dataname][0].shape # for common dims
props["num cycles"] = \
len(numpy.unique(self.data['CYCLE'][self.data['CYCLE'] != 0]))
if len(spectrumshape) == 3: # (dec, RA, freq)
# bare minimum SPECTRUM dimensions
props["num chans"] = int(spectrumshape[-1])
props["num IFs"] = 1 # no polarization axis
props["num beams"] = 1 # no beam axis
elif len(spectrumshape) >= 4: # (at least pol, dec, RA, freq)
# one beam with polarization at least
props["num chans"] = int(spectrumshape[-1])
props["num IFs"] = 2
if spectrumshape[-4] == 4:
# has STOKES dimension
props["num beams"] = 1 # no beam axis
props["full Stokes"] = True
if 'TSYS' in self.data.columns.names:
props["num IFs"] = 2
IFspecshape = self.data['IFSPECTR'][0].shape
props["num IFspec chans"] = int(IFspecshape[-1])
else: # must be 3 or less
props["num _IFs"] = 1
self.logger.warning(
"properties: no IF data; will use Stokes I for monitoring")
props["num IFspec chans"] = props["num chans"]
elif spectrumshape[-4] == 2:
# has straight pols (L,R or H,V)
props["num IFs"] = 2
props["full Stokes"] = False
props["num beams"] = 1 # no beam axis
if len(spectrumshape) >= 5: # (time, pol, dec, RA, freq)
props["time axis"] = True
if len(spectrumshape) == 5:
props["num beams"] = 1 # no beam axis
else:
props["time axis"] = False
if len(spectrumshape) == 6: # (beam, time, pol, dec, RA, freq)
props["num beams"] = int(spectrumshape[0])
else:
props["num beams"] = 1
# time axis length may vary due to corrupted records
if len(spectrumshape) >= 5: # (time, pol, dec, RA, freq)
props["num records"] = {}
cycle_indices = list(range(props["num cycles"]))
for row in cycle_indices: # check each cycle
cycle = self.data['CYCLE'][row]
spectrumshape = self.data[self.dataname][row].shape
# just do the first scan
if props["num beams"] > 1:
props["num records"][cycle] = int(spectrumshape[1])
else:
props["num records"][cycle] = int(spectrumshape[0])
else:
props["num records"] = {}
for cycle in self.cycle_keys:
props["num records"][cycle] = 1
self.logger.debug("properties:\n %s", props)
return props, len(spectrumshape)
def remove_tones(self, rows=None):
"""
"""
if rows:
pass
else:
rows = self.acs_rows
# the nearest tone below the center frequency
for row in rows:
freq_offset = self.data['OBSFREQ'][row] - \
round(self.data['OBSFREQ'][row],-6)
self.logger.debug("remove_tones: frequency offset = %d", freq_offset)
num_chans = self.props['num chans']
# the center of the band is the lowest channel of the upper half
cntr_chan = num_chans/2
# the nearest tone below the center channel
chan_offset = round(num_chans*freq_offset/self.data['BANDWIDT'][row])
self.logger.debug("remove_tones: channel offset = %d", chan_offset)
number_of_tones = int(self.data['BANDWIDT'][row]/1e6)
tone_spacing = self.props['num chans']/number_of_tones
for tone in range(-number_of_tones/2, number_of_tones/2):
if chan_offset > 0:
# there are more tones above the tone nearest the center than below
tone += 1
# center freq is middle of channel N/2
tone_channel = cntr_chan - chan_offset-1 + tone*tone_spacing
else:
tone_channel = cntr_chan - chan_offset + tone*tone_spacing
self.logger.debug("remove_tones: tone %d channel = %d",
tone, tone_channel)
if self.props['full Stokes']:
pols = list(range(4))
else:
pols = self.props['num IFs']
for pol in pols:
self.data[self.dataname][row][pol,0,0] = support.clobber(
self.data[self.dataname][row][pol,0,0], tone_channel)
def get_rows(self, keyword, value):
"""
Get all the rows where the in the 'keyword' column equals 'value'
"""
return numpy.where(self.data[keyword] == value)[0]
def select(self, selections):
"""
Select rows according to multiple criteria.
'selections' is a dict like
"""
keys = list(selections.keys())
key = keys[0]
value = selections[key]
selected = set(self.get_rows(key, value))
for key in keys[1:]:
value = selections[key]
selected = selected.intersection(set(self.get_rows(key, value)))
return list(selected)
class Data(object):
"""
A subset of data extracted from an average difference spectrum
Attributes::
channels - channel range selected from the original spectrum
frame - reference frame for the X-axis
logger - logging.Logger
rows - rows selected from parent
x - X-axis values
y - Multi-dimensional data array
"""
def __init__(self, parent, x, y):
"""
initiate a Data object
@param x : X-values
@type x : list
"""
self.logger = logging.getLogger(parent.logger.name+".Data")
self.parent = parent
if type(x) == list:
self.x = numpy.array(x)
elif type(x) == numpy.ndarray:
self.x = x
else:
self.logger.error("__init__: type %s is not valid for X", type(x))
self.y = y
self.frame = None
self.channels = None
self.rows = None
def __add__(self, other):
"""
combine two data objects
"""
if type(other) != DSNFITSexaminer.Table.Data:
raise RuntimeError("__add__: other type wrong: %s", type(other))
if other.frame != self.frame:
raise RuntimeError("__add__: other must have frame type %s",
self.frame)
new_y = numpy.append(self.y, other.y, axis=0)
new_data = DSNFITSexaminer.Table.Data(self.parent, self.x, new_y)
new_data.rows = self.rows + other.rows
new_data.frame = self.frame
new_data.channels = self.channels
return new_data
def remove_baseline(self, exclude=None):
"""
"""
if exclude:
minexcl, maxexcl = exclude
x = numpy.append(self.x[:minexcl], self.x[maxexcl:])
y = numpy.append(self.y[:minexcl], self.y[maxexcl:])
else:
x = self.x
y = self.y
if x[0] > x[-1]:
reverse = True
x = x[::-1]
y = y[:,::-1]
else:
reverse = False
new_y = numpy.empty_like(self.y)
rms = []
for pol in range(self.y.shape[0]):
spline = interpolate.splrep(x, y[pol], k=5, s=10)
residuals = y[pol] - interpolate.splev(x, spline, der=0)
rms += [residuals.std()]
# apply baseline correction
if reverse:
new_y[pol] = self.y[pol] - interpolate.splev(self.x[::-1], spline, der=0)[::-1]
else:
new_y[pol] = self.y[pol] - interpolate.splev(self.x, spline, der=0)
return self.x, new_y, numpy.array(rms)
# these are now obsolete methods of the Table class
def get_good_wx_data(self):
"""
eliminates rows which have bad data, like 0 for CYCLE, nan, etc
Note that this provides a method for flagging bad data by setting the
CYCLE value of a row to 0.
The good data flags are True if there are any good data and False if
there are no good data | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from generate_unicode_normalization_data import cccs
from generate_unicode_normalization_data import expand_decomp_canonical
from generate_unicode_normalization_data import get_decompositions
from generate_unicode_collation_data import get_frac_uca_cet
from generate_unicode_collation_data import ce_to_cpp
import re
perf_test_form = decls = '''\
// Copyright (C) 2020 <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/collate.hpp>
#include <boost/text/collation_table.hpp>
#include <benchmark/benchmark.h>
boost::text::detail::collation_trie_t const & trie()
{{
static auto const retval = boost::text::detail::make_default_trie();
return retval;
}}
boost::text::collation_table const & table()
{{
static auto const retval = boost::text::default_collation_table();
return retval;
}}
{0}
BENCHMARK_MAIN()
'''
relative_collation_tests_form = '''\
// Copyright (C) 2020 <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include "collation_tests.hpp"
#include <algorithm>
#include <gtest/gtest.h>
std::array<uint32_t, {1}> const g_cps = {{{{
{0}
}}}};
std::array<std::pair<uint32_t, uint32_t>, {3}> const g_cp_ranges = {{{{
{2}
}}}};
TEST(collation, relative_{4}_{5}_fcc)
{{
std::vector<uint32_t> prev_un_norm;
std::vector<uint32_t> prev_cps;
boost::text::text_sort_key prev_key;
std::vector<uint32_t> curr_un_norm;
std::vector<uint32_t> curr_cps;
boost::text::text_sort_key curr_key;
bool first = true;
std::pair<uint32_t, uint32_t> prev_r;
for (auto r : g_cp_ranges) {{
curr_un_norm.assign(
g_cps.begin() + r.first, g_cps.begin() + r.second);
curr_cps.clear();
boost::text::normalize<boost::text::nf::fcc>(
curr_un_norm, std::back_inserter(curr_cps));
curr_key = boost::text::collation_sort_key(
curr_cps.begin(),
curr_cps.end(),
table(),
boost::text::collation_strength::identical,
boost::text::case_first::off,
boost::text::case_level::off,
boost::text::variable_weighting::{4});
if (!first) {{
EXPECT_LE(compare(prev_key, curr_key), 0)
<< "prev un-norm cps: " << ce_dumper(prev_un_norm)
<< "prev_cps (FCC): " << ce_dumper(prev_cps)
<< "prev_key: " << ce_dumper(prev_key) << "\\n"
<< "curr un-norm cps: " << ce_dumper(curr_un_norm)
<< "curr_cps (FCC): " << ce_dumper(curr_cps)
<< "curr_key: " << ce_dumper(curr_key) << "\\n"
;
std::string prev = boost::text::to_string(prev_cps);
std::string curr = boost::text::to_string(curr_cps);
auto const prev_32 = boost::text::as_utf32(prev);
auto const curr_32 = boost::text::as_utf32(curr);
EXPECT_LE(
boost::text::collate(
prev_32.begin(),
prev_32.end(),
curr_32.begin(),
curr_32.end(),
table(),
boost::text::collation_strength::identical,
boost::text::case_first::off,
boost::text::case_level::off,
boost::text::variable_weighting::{4}),
0)
<< "prev un-norm cps: " << ce_dumper(prev_un_norm)
<< "prev_cps (FCC): " << ce_dumper(prev_cps)
<< "prev_key: " << ce_dumper(prev_key) << "\\n"
<< "curr un-norm cps: " << ce_dumper(curr_un_norm)
<< "curr_cps (FCC): " << ce_dumper(curr_cps)
<< "curr_key: " << ce_dumper(curr_key) << "\\n"
;
}}
std::swap(curr_un_norm, prev_un_norm);
std::swap(curr_cps, prev_cps);
std::swap(curr_key, prev_key);
first = false;
prev_r = r;
}}
}}
TEST(collation, relative_{4}_{5}_nfd)
{{
std::vector<uint32_t> prev_un_norm;
std::vector<uint32_t> prev_cps;
boost::text::text_sort_key prev_key;
std::vector<uint32_t> curr_un_norm;
std::vector<uint32_t> curr_cps;
boost::text::text_sort_key curr_key;
bool first = true;
std::pair<uint32_t, uint32_t> prev_r;
for (auto r : g_cp_ranges) {{
curr_un_norm.assign(
g_cps.begin() + r.first, g_cps.begin() + r.second);
curr_cps.clear();
boost::text::normalize<boost::text::nf::d>(
curr_un_norm, std::back_inserter(curr_cps));
curr_key = boost::text::collation_sort_key(
curr_cps.begin(),
curr_cps.end(),
table(),
boost::text::collation_strength::identical,
boost::text::case_first::off,
boost::text::case_level::off,
boost::text::variable_weighting::{4});
if (!first) {{
EXPECT_LE(compare(prev_key, curr_key), 0)
<< "prev un-norm cps: " << ce_dumper(prev_un_norm)
<< "prev_cps (NFD): " << ce_dumper(prev_cps)
<< "prev_key: " << ce_dumper(prev_key) << "\\n"
<< "curr un-norm cps: " << ce_dumper(curr_un_norm)
<< "curr_cps (NFD): " << ce_dumper(curr_cps)
<< "curr_key: " << ce_dumper(curr_key) << "\\n"
;
std::string prev = boost::text::to_string(prev_cps);
std::string curr = boost::text::to_string(curr_cps);
auto const prev_32 = boost::text::as_utf32(prev);
auto const curr_32 = boost::text::as_utf32(curr);
EXPECT_LE(
boost::text::collate(
prev_32.begin(),
prev_32.end(),
curr_32.begin(),
curr_32.end(),
table(),
boost::text::collation_strength::identical,
boost::text::case_first::off,
boost::text::case_level::off,
boost::text::variable_weighting::{4}),
0)
<< "prev un-norm cps: " << ce_dumper(prev_un_norm)
<< "prev_cps (NFD): " << ce_dumper(prev_cps)
<< "prev_key: " << ce_dumper(prev_key) << "\\n"
<< "curr un-norm cps: " << ce_dumper(curr_un_norm)
<< "curr_cps (NFD): " << ce_dumper(curr_cps)
<< "curr_key: " << ce_dumper(curr_key) << "\\n"
;
}}
std::swap(curr_un_norm, prev_un_norm);
std::swap(curr_cps, prev_cps);
std::swap(curr_key, prev_key);
first = false;
prev_r = r;
}}
}}
'''
def indices_to_list(indices, all_cps):
return all_cps[indices[0]:indices[1]]
def generate_lookup_perf_test(ducet):
chunk_size = 50
chunks_per_file = 100
chunk_arrays = []
chunk = 0
i = 0
cps = []
cp_ranges = []
for k,v in sorted(ducet.items()):
cp_ranges.append((len(cps), len(cps) + len(k)))
cps += list(k)
i += 1
if i == chunk_size:
chunk_arrays.append((cps, cp_ranges))
chunk += 1
i = 0
cps = []
cp_ranges = []
chunk_idx = 0
lines = ''
for i in range(len(chunk_arrays)):
if i != 0 and i % chunks_per_file == 0:
cpp_file = open('collation_element_lookup_perf_{0:03}.cpp'.format(chunk_idx), 'w')
cpp_file.write(perf_test_form.format(lines))
chunk_idx += 1
lines = ''
cps = chunk_arrays[i][0]
cp_ranges = chunk_arrays[i][1]
lines += '''\
uint32_t cps_{0:03}[] = {{
{1}
}};
void BM_collation_element_lookup_{0:03}(benchmark::State & state)
{{
while (state.KeepRunning()) {{
'''.format(i, ', '.join(map(lambda x: type(x) == str and '0x' + x or hex(x), cps)), len(cps))
for first,last in cp_ranges:
lines += '''\
benchmark::DoNotOptimize(trie().longest_match(cps_{0:03} + {1}, cps_{0:03} + {2}));
'''.format(i, first, last)
lines += '''\
}}
}}
BENCHMARK(BM_collation_element_lookup_{0:03});
'''.format(i)
cpp_file = open('collation_element_lookup_perf_{0:03}.cpp'.format(chunk_idx), 'w')
cpp_file.write(perf_test_form.format(lines))
def generate_collation_perf_test(ducet):
chunk_size = 100
chunks_per_file = 100
chunk_arrays = []
chunk = 0
i = 0
cps = []
cp_ranges = []
for k,v in sorted(ducet.items()):
cp_ranges.append((len(cps), len(cps) + len(k)))
cps += list(k)
i += 1
if i == chunk_size:
chunk_arrays.append((cps, cp_ranges))
chunk += 1
i = 0
cps = []
cp_ranges = []
chunk_idx = 0
lines = ''
for i in range(len(chunk_arrays)):
if i != 0 and i % chunks_per_file == 0:
cpp_file = open('collation_perf_{0:03}.cpp'.format(chunk_idx), 'w')
cpp_file.write(perf_test_form.format(lines))
chunk_idx += 1
lines = ''
cps = chunk_arrays[i][0]
cp_ranges = chunk_arrays[i][1]
lines += '''\
uint32_t cps_{0:03}[] = {{
{1}
}};
void BM_collation_{0:03}(benchmark::State & state)
{{
while (state.KeepRunning()) {{
'''.format(i, ', '.join(map(lambda x: type(x) == str and '0x' + x or hex(x), cps)), len(cps))
lines += '''\
benchmark::DoNotOptimize(boost::text::collation_sort_key(cps_{0:03}, cps_{0:03} + {2}, table(), boost::text::collation_strength::quaternary, boost::text::case_first::off, boost::text::case_level::off, boost::text::variable_weighting::shifted));
'''.format(i, cp_ranges[0][0], cp_ranges[-1][1])
lines += '''\
}}
}}
BENCHMARK(BM_collation_{0:03});
'''.format(i)
cpp_file = open('collation_perf_{0:03}.cpp'.format(chunk_idx), 'w')
cpp_file.write(perf_test_form.format(lines))
collation_elements_regex = re.compile(r'\[([ |0123456789ABCDEF]+)\]')
def generate_relative_collation_tests(filename, weighting):
lines = open(filename, 'r').readlines()
all_cps = []
all_ranges = []
chunk_idx = 0
line_idx = 0
for line in lines:
if line_idx == 25000:
cps_string = ', '.join(map(lambda x: '0x' + x, all_cps))
ranges_string = ', '.join(map(lambda x: '{{{}, {}}}'.format(x[0], x[1]), all_ranges))
cpp_file = open('relative_collation_test_{0}_{1}.cpp'.format(weighting, chunk_idx), 'w')
cpp_file.write(relative_collation_tests_form.format(cps_string, len(all_cps), ranges_string, len(all_ranges), weighting, chunk_idx))
chunk_idx += 1
all_cps = []
all_ranges = []
line_idx = 0
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
comment_start = line.find('#')
comment = ''
if comment_start != -1:
comment = line[comment_start + 1:].strip()
line = line[:comment_start]
if 'surrogate' in comment:
continue
if 'noncharacter' in comment:
continue
cps = line.split(';')[0].split(' ')
first = len(all_cps)
all_cps += cps
last = len(all_cps)
all_ranges.append((first, last))
line_idx += 1
if line_idx != 0:
cps_string = ', '.join(map(lambda x: '0x' + x, all_cps))
ranges_string = ', '.join(map(lambda x: '{{{}, {}}}'.format(x[0], x[1]), all_ranges))
cpp_file = open('relative_collation_test_{0}_{1}.cpp'.format(weighting, chunk_idx), 'w')
cpp_file.write(relative_collation_tests_form.format(cps_string, len(all_cps), ranges_string, len(all_ranges), weighting, chunk_idx))
import sys
if '--perf' in sys.argv:
cet = get_frac_uca_cet('FractionalUCA.txt')
generate_lookup_perf_test(cet)
generate_collation_perf_test(cet)
exit(0)
data_file_test_form = '''\
// Copyright (C) 2020 <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/collation_table.hpp>
#include <boost/text/collate.hpp>
#include <boost/text/normalize_string.hpp>
#include <boost/text/data/all.hpp>
#include <gtest/gtest.h>
using namespace boost;
auto const error = [](std::string const & s) {{ std::cout << s; }};
auto const warning = [](std::string const & s) {{}};
{0}
'''
single_data_file_test_form = '''\
//{0}
TEST(collation_and_tailoring, data_file_test_{1})
{{
auto const table = {2};
{3}
}}
'''
def make_string(s, rules, view):
type_ = 'std::string'
if view:
type_ += '_view'
if rules:
retval = '{}(u8R"({})"'.format(type_, s)
else:
retval = '{}(u8"{}"'.format(type_, s)
if view and 'x00' in s:
if rules:
retval += ', {}'.format(len(s))
else:
retval += ', {}'.format(len(eval("'" + s + "'")))
retval += ')'
return retval
def comparison_tests(compares, test_strength):
retval = ''
prev = ''
for c in compares:
(compare, curr) = filter(lambda x: len(x) != 0, c[0].split(' '))
if compare == '=':
strength = test_strength
result = 0
else:
result = -1
if compare == '<':
strength = test_strength
if compare == '<i':
strength = 'identical'
if compare == '<4':
strength = 'quaternary'
if compare == '<3':
strength = 'tertiary'
if compare == '<2':
strength = 'secondary'
if compare == '<1':
strength = 'primary'
retval += '''\
{{
// {0} # {1}
std::string a = {2};
std::string b = {3};
normalize<boost::text::nf::fcc>(a);
normalize<boost::text::nf::fcc>(b);
EXPECT_EQ(text::collate(
text::utf32_range(a),
text::utf32_range(b),
table,
text::collation_strength::{4}),
{5});
}}
'''.format(c[0], c[1], make_string(prev, False, False), make_string(curr, False, False), strength, result)
prev = curr
return retval
# Expects a file like the collationtest.txt file in ICU.
def generate_datafile_collation_tests(lines):
test_lines = ''
test_idx = 0
line_idx = 0
test_comment = ''
rules = ''
strength = 'tertiary'
compares = []
skip_rules = False
while line_idx < len(lines):
line = lines[line_idx].strip()
if line.startswith('@'):
strength = 'tertiary'
if line.startswith('#') or len(line) == 0:
line_idx += 1
continue
elif line.startswith('** test'):
test_comment = line[len('** test:'):]
line_idx += 1
elif line.startswith('@ root'):
skip_rules = False
rules = 'default'
line_idx += 1
elif line.startswith('@ rules'):
skip_rules = False
rules = ''
line_idx += 1
line = lines[line_idx].strip()
while not line.startswith('*') and not line.startswith('%'):
comment_start = line.find('#')
comment | |
StaticLink(AbstractLink):
def __init__(self, value):
self._value = value
super().__init__()
def execute(self, obj):
return self._value
class MapSubjectLink(AbstractLink):
with open(settings.SUBJECT_SYNONYMS_JSON) as fobj:
MAPPING = json.load(fobj)
def execute(self, obj):
if not obj:
return None
if isinstance(obj, list):
return [self.execute(x) for x in obj]
assert isinstance(obj, str), 'Subjects must be strings. Got {}.'.format(type(obj))
mapped = self.MAPPING.get(obj.lower())
if not mapped:
logger.debug('No synonyms found for term "%s"', obj)
return mapped
class OneOfLink(AbstractLink):
def __init__(self, *chains):
self._chains = chains
super().__init__()
def execute(self, obj):
errors = []
for chain in self._chains:
try:
return chain.chain()[0].run(obj)
except TransformError as e:
errors.append(e)
raise NoneOf('All chains failed {}'.format(errors))
class AbstractIRILink(AbstractLink):
"""Normalize IRIs
"""
SAFE_SEGMENT_CHARS = ":@-._~!$&'()*+,;=" # https://github.com/gruns/furl/blob/master/furl/furl.py#L385
@classmethod
def hint(cls, obj):
"""A percentage expressed as a float of how likely a the given object can be parsed as this class
"""
raise NotImplementedError
def execute(self, obj):
if not isinstance(obj, str):
raise InvalidIRI('\'{}\' is not of type str.'.format(obj))
parsed = self._parse(obj)
parsed = self._process(**parsed)
return iris.parse(iris.compose(**parsed))
def _parse(self, obj):
return iris.parse(obj)
def _process(self, **attrs):
processed = {}
for key in sorted(attrs.keys()):
if hasattr(self, '_process_' + key):
processed[key] = getattr(self, '_process_' + key)(attrs[key])
return processed
def _process_scheme(self, scheme):
return scheme.lower()
def _process_authority(self, authority):
return authority.lower()
def _process_path(self, path):
return path
def _process_query(self, query):
return query
def _process_fragment(self, fragment):
return fragment or None
class ISSNLink(AbstractIRILink):
ISSN_RE = re.compile(r'(?:^|\s+)(\d{4})-(\d{3}[\dxX])\s*$')
@classmethod
def hint(cls, obj):
if re.search(cls.ISSN_RE, obj):
return 0.9
return int('issn' in obj) * 0.35
@classmethod
def checksum(cls, digits):
total, checksum = 0, digits[-1]
for i, digit in enumerate(digits[:-1]):
total += (8 - i) * int(digit)
actual = (11 - (total % 11)) % 11
if actual == 10:
actual = 'X'
if checksum != str(actual):
raise InvalidIRI('\'{}\' is not a valid ISSN; failed checksum.'.format(digits))
def _parse(self, obj):
match = re.search(self.ISSN_RE, obj.upper())
if not match:
raise InvalidIRI('\'{}\' cannot be expressed as an ISSN.'.format(obj))
self.checksum(''.join(match.groups()))
return {
'scheme': 'urn',
'authority': 'ISSN',
'path': '/{}-{}'.format(*match.groups())
}
class URNLink(AbstractIRILink):
SCHEMES = {'urn', 'oai'}
URN_RE = re.compile(r'\b({schemes}):((?:\w|[.-])+):(\S+)'.format(schemes='|'.join(SCHEMES)), flags=re.I)
PARSED_URN_RE = re.compile(r'^({schemes})://([^/\s]+)/(\S+)$'.format(schemes='|'.join(SCHEMES)), flags=re.I)
@classmethod
def hint(cls, obj):
if cls.URN_RE.search(obj) is not None:
return 0.9
if cls.PARSED_URN_RE.search(obj) is not None:
return 0.9
return 0.0
def _parse(self, obj):
match = self.URN_RE.search(obj.lower()) or self.PARSED_URN_RE.search(obj.lower())
if not match:
raise InvalidIRI('\'{}\' is not a valid URN.'.format(obj))
return {
'scheme': match.group(1),
'authority': match.group(2),
'path': '/{}'.format(match.group(3))
}
class ISNILink(AbstractIRILink):
DOMAIN = 'isni.org'
SCHEME = 'http'
FORMAT = 'ISNI'
FORMAT_STR = '/{}{}{}{}'
BOUNDS = (
# (lower, upper)
(None, 150000007),
(350000001, None),
)
ISNI_RE = re.compile(r'^(?:https?://)?[^=/\d]*/?(\d{4})-?(\d{4})-?(\d{4})-?(\d{3}(?:\d|x))\b', re.I)
@classmethod
def hint(cls, obj):
try:
cls().execute(obj)
except InvalidIRI:
return 0
return 1.0
@classmethod
def checksum(cls, digits):
total, checksum = 0, digits[-1]
for digit in digits[:-1]:
total = (total + int(digit, 36)) * 2
check = (12 - (total % 11)) % 11
literal = (int(digits[:-1]) * 10) + check
if check == 10:
check = 'X'
if str(check) != checksum:
raise InvalidIRI('\'{}\' is not a valid {}; failed checksum.'.format(digits, cls.FORMAT))
for lower, upper in cls.BOUNDS:
if (not lower or lower < literal) and (not upper or upper > literal):
return
raise InvalidIRI('\'{0}\' is outside reserved {1} range.'.format(digits, cls.FORMAT))
def _parse(self, obj):
match = re.search(self.ISNI_RE, obj.upper())
if not match:
raise InvalidIRI('\'{}\' cannot be expressed as an {}.'.format(obj, self.FORMAT))
self.checksum(''.join(match.groups()))
return {
'scheme': self.SCHEME,
'authority': self.DOMAIN,
'path': self.FORMAT_STR.format(*match.groups())
}
class OrcidLink(ISNILink):
"""Reformat Orcids to the cannonical form
https://orcid.org/xxx-xxxx-xxxx-xxxx
0000000248692419
0000-0002-4869-2419
https://orcid.org/0000-0002-4869-2419
Any of the above would be transformed into https://orcid.org/0000-0002-4869-2419
ORCID is a subset of the International Standard Name Identifier (ISNI) in the range 0000-0001-5000-0007 to 0000-0003-5000-0001.
"""
DOMAIN = 'orcid.org'
SCHEME = 'http'
FORMAT = 'ORCID'
FORMAT_STR = '/{}-{}-{}-{}'
BOUNDS = (
(150000007, 350000001),
)
class DOILink(AbstractIRILink):
"""Reformt DOIs to the cannonical form
* All DOIs will be valid URIs
* All DOIs will use https
* All DOI paths will be uppercased
Reference:
https://www.doi.org/doi_handbook/2_Numbering.html
https://stackoverflow.com/questions/27910/finding-a-doi-in-a-document-or-page
While having characters like <>[] in URLs is technically valid, rfc3987 does not seem to like.
For that reason we escape them here using furl. The regex ensure we won't pick up invalid URLS
"""
DOI_SCHEME = 'http'
DOI_DOMAIN = 'dx.doi.org'
DOI_RE = re.compile(r'^(?:https?://)?[^=/]*/?(10\.\d{4,}(?:\.\d+)*(?:/|%2F)\S+(?:(?![\"&\'<>])))\b', re.I)
@classmethod
def hint(cls, obj):
if cls.DOI_RE.search(obj) is not None:
return 0.9
return 0
def _process_scheme(self, _):
return self.DOI_SCHEME
def _process_authority(self, _):
return self.DOI_DOMAIN
def _parse(self, obj):
match = self.DOI_RE.search(obj.upper())
if not match:
raise InvalidIRI('\'{}\' is not a valid DOI.'.format(obj))
return {
'scheme': None,
'authority': None,
'path': '/' + '/'.join(urllib.parse.quote(x, safe=self.SAFE_SEGMENT_CHARS) for y in match.groups() for x in urllib.parse.unquote(y).split('/'))
}
class URLLink(AbstractIRILink):
SCHEMES = {'http', 'https', 'ftp', 'ftps'}
SCHEMELESS_STARTS = ('www.', 'www2.')
IMPLICIT_PORTS = {80, 443}
IP_RE = re.compile(r'\b({schemes})://(\d{{1,3}}.){{4}}(?:\d{{2,5}})\b([-a-z0-9@:%_\+.~#?&//=]*)'.format(schemes='|'.join(SCHEMES)), flags=re.I)
URL_RE = re.compile(r'\b({schemes})://[-a-z0-9@:%._\+~#=]{{2,256}}\.[a-z]{{2,6}}\b([-a-z0-9@:%_\+.~#?&//=]*)'.format(schemes='|'.join(SCHEMES)), flags=re.I)
LOCAL_URL_RE = re.compile(r'\b({schemes})://localhost:[0-9]{{2,5}}\b([-a-z0-9@:%_\+.~#?&//=]*)'.format(schemes='|'.join(SCHEMES)), flags=re.I)
@classmethod
def hint(cls, obj):
# BePress double escapes in OAI feeds
obj = obj.replace('&', '&')
if settings.DEBUG and cls.LOCAL_URL_RE.search(obj) is not None:
return 0.25
if cls.URL_RE.search(obj) is not None:
return 0.25
if cls.IP_RE.search(obj) is not None:
return 0.25
if obj.lower().startswith(cls.SCHEMELESS_STARTS):
return 0.1
return 0
def _parse(self, obj):
# BePress double escapes in OAI feeds
obj = obj.replace('&', '&')
match = None
if settings.DEBUG:
match = self.LOCAL_URL_RE.search(obj)
if not match:
match = self.URL_RE.search(obj) or self.IP_RE.search(obj)
if not match and obj.lower().startswith(self.SCHEMELESS_STARTS):
match = self.URL_RE.search('http://{}'.format(obj))
return super(URLLink, self)._parse(match.group(0))
def _process_scheme(self, scheme):
scheme = scheme.lower()
if scheme not in self.SCHEMES:
raise InvalidIRI('\'{}\' is not a valid scheme for URLs.'.format(scheme))
return scheme.rstrip('s') # Standardize on non-secure
def _process_query(self, query):
return query # TODO Order me
def _process_authority(self, authority):
authority = super()._process_authority(authority)
if ':' in authority:
host, port = authority.split(':')
if port and int(port) in self.IMPLICIT_PORTS:
authority = host
return authority
class EmailLink(AbstractIRILink):
EMAIL_RE = re.compile(r'(?P<scheme>mailto:)?(?P<mailbox>[éa-zA-Z0-9_.+-]+)@(?P<authority>[a-zA-Z0-9\u2010ü-]+\.[a-zA-Z0-9-.]+)') # http://emailregex.com/
@classmethod
def hint(self, obj):
if self.EMAIL_RE.search(obj) is not None:
return 1.0
return 0
def execute(self, obj):
if not isinstance(obj, str):
raise InvalidIRI('\'{}\' is not of type str.'.format(obj))
# Handle unicode hyphens
emails = self.EMAIL_RE.findall(obj.replace('\u2010', '-'))
if len(emails) < 1:
raise InvalidIRI('\'{}\'is not a valid email address.'.format(obj))
if len(emails) > 1:
raise InvalidIRI('Found many email addresses in \'{}\'.'.format(obj))
return {
'scheme': 'mailto',
'authority': emails[0][2],
'IRI': 'mailto:{1}@{2}'.format(*emails[0])
}
class ArXivLink(AbstractIRILink):
# https://arxiv.org/help/arxiv_identifier
ARXIV_SCHEME = 'http'
ARXIV_DOMAIN = 'arxiv.org'
ARXIV_PATH = '/abs/{}'
ARXIV_RE = re.compile(r'\barXiv:(\d{4}.\d{5})(v\d)?', flags=re.I)
@classmethod
def hint(cls, obj):
if cls.ARXIV_RE.search(obj) is not None:
return 1.0
return 0
def _parse(self, obj):
match = self.ARXIV_RE.search(obj)
if not match:
raise InvalidIRI('\'{}\' is not a valid ArXiv Identifier.'.format(obj))
return {
'scheme': self.ARXIV_SCHEME,
'authority': self.ARXIV_DOMAIN,
'path': self.ARXIV_PATH.format(match.group(1))
}
class ARKLink(AbstractIRILink):
# https://en.wikipedia.org/wiki/Archival_Resource_Key
# https://wiki.ucop.edu/download/attachments/16744455/arkspec.pdf
ARK_SCHEME = 'ark'
ARK_RE = re.compile(r'\bark://?(\d+)(/\S+)', flags=re.I)
@classmethod
def hint(cls, obj):
if cls.ARK_RE.search(obj) is not None:
return 0.9
return 0
def _parse(self, obj):
match = self.ARK_RE.search(obj)
if not match:
raise InvalidIRI('\'{}\' is not a valid ARK Identifier.'.format(obj))
return {
'scheme': self.ARK_SCHEME,
'authority': match.group(1),
'path': match.group(2)
}
class InfoURILink(AbstractIRILink):
'info:eu-repo/grantAgreement/EC/FP7/280632/'
# http://info-uri.info/registry/docs/misc/faq.html
# https://tools.ietf.org/html/rfc4452
SCHEME = 'info'
INFO_RE = re.compile(r'^\s*info:([\w-]+)(/\S+)\s*$')
@classmethod
def hint(cls, obj):
if cls.INFO_RE.search(obj) is not None:
return 0.9
return 0
def _parse(self, obj):
match = self.INFO_RE.search(obj)
if not match:
raise InvalidIRI('\'{}\' is not a valid Info URI.'.format(obj))
return {
'scheme': self.SCHEME,
'authority': match.group(1),
'path': match.group(2)
}
class ISBNLink(AbstractIRILink):
SCHEME = 'urn'
AUTHORITY = 'isbn'
ISBN10_RE = re.compile(r'^(?:urn:\/\/isbn\/|ISBN:? ?)?(\d\d?)-(\d{3,7})-(\d{1,6})-(\d|x)$', re.I)
ISBN13_RE = re.compile(r'^(?:urn://isbn/|ISBN:? ?)?(978|979)-(\d\d?)-(\d{3,5})-(\d{2,5})-(\d)$', re.I)
@classmethod
def hint(cls, obj):
if cls.ISBN13_RE.match(obj) or cls.ISBN10_RE.match(obj):
return 1.0
return 0
def _parse(self, obj):
match = self.ISBN13_RE.match(obj.upper()) or self.ISBN10_RE.match(obj.upper())
if not match or len(''.join(match.groups())) not in (13, 10):
raise InvalidIRI('\'{}\' cannot be expressed as an ISBN.'.format(obj))
if match.re == self.ISBN13_RE:
digits = ''.join(match.groups())
check = (10 - sum(int(x) * (i % 2 * 2 + 1) for i, x in enumerate(digits[:-1])) % 10) % 10
if str(check) != digits[-1]:
raise InvalidIRI('\'{}\' is not a valid ISBN; failed checksum.'.format(obj))
if match.re == self.ISBN10_RE:
digits = ''.join(match.groups())
check = sum(10 if x == 'X' else int(x) * (10 - i) for i, x in enumerate(digits))
if check % 11 != 0:
raise InvalidIRI('\'{}\' is not a valid ISBN; failed checksum.'.format(obj))
# Add prefix and compute new checksum
digits = '978' + digits
digits = digits[:-1] + str((10 - sum(int(x) * (i % 2 * 2 + 1) for i, x in enumerate(digits[:-1])) % 10) % 10)
| |
POST.
.. attribute:: name
Required. The short name or "slug" representing this
action. This name should not be changed at runtime.
.. attribute:: verbose_name
A string which will be rendered as the link text. (Required)
.. attribute:: url
A string or a callable which resolves to a url to be used as the link
target. You must either define the ``url`` attribute or override
the ``get_link_url`` method on the class.
.. attribute:: allowed_data_types
A list that contains the allowed data types of the action. If the
datum's type is in this list, the action will be shown on the row
for the datum.
Defaults to be an empty list (``[]``). When set to empty, the action
will accept any kind of data.
"""
# class attribute name is used for ordering of Actions in table
name = "link"
ajax = False
def __init__(self, attrs=None, **kwargs):
super(LinkAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "GET")
self.bound_url = kwargs.get('bound_url', None)
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', self.name.title())
self.url = kwargs.get('url', None)
self.allowed_data_types = kwargs.get('allowed_data_types', [])
self.icon = kwargs.get('icon', None)
self.kwargs = kwargs
if not kwargs.get('verbose_name', None):
raise NotImplementedError('A LinkAction object must have a '
'verbose_name attribute.')
if attrs:
self.attrs.update(attrs)
if self.ajax:
self.classes = list(self.classes) + ['ajax-update']
def get_ajax_update_url(self):
table_url = self.table.get_absolute_url()
params = urlencode(
SortedDict([("action", self.name), ("table", self.table.name)])
)
return "%s?%s" % (table_url, params)
def render(self):
return render_to_string("horizon/common/_data_table_table_action.html",
{"action": self})
def associate_with_table(self, table):
super(LinkAction, self).associate_with_table(table)
if self.ajax:
self.attrs['data-update-url'] = self.get_ajax_update_url()
def get_link_url(self, datum=None):
"""Returns the final URL based on the value of ``url``.
If ``url`` is callable it will call the function.
If not, it will then try to call ``reverse`` on ``url``.
Failing that, it will simply return the value of ``url`` as-is.
When called for a row action, the current row data object will be
passed as the first parameter.
"""
if not self.url:
raise NotImplementedError('A LinkAction class must have a '
'url attribute or define its own '
'get_link_url method.')
if callable(self.url):
return self.url(datum, **self.kwargs)
try:
if datum:
obj_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=(obj_id,))
else:
return urlresolvers.reverse(self.url)
except urlresolvers.NoReverseMatch as ex:
LOG.info('No reverse found for "%s": %s' % (self.url, ex))
return self.url
class FilterAction(BaseAction):
"""A base class representing a filter action for a table.
.. attribute:: name
The short name or "slug" representing this action. Defaults to
``"filter"``.
.. attribute:: verbose_name
A descriptive name used for display purposes. Defaults to the
value of ``name`` with the first letter of each word capitalized.
.. attribute:: param_name
A string representing the name of the request parameter used for the
search term. Default: ``"q"``.
.. attribute:: filter_type
A string representing the type of this filter. If this is set to
``"server"`` then ``filter_choices`` must also be provided.
Default: ``"query"``.
.. attribute:: filter_choices
Required for server type filters. A tuple of tuples representing the
filter options. Tuple composition should evaluate to (string, string,
boolean), representing the filter parameter, display value, and whether
or not it should be applied to the API request as an API query
attribute. API type filters do not need to be accounted for in the
filter method since the API will do the filtering. However, server
type filters in general will need to be performed in the filter method.
By default this attribute is not provided.
.. attribute:: needs_preloading
If True, the filter function will be called for the initial
GET request with an empty ``filter_string``, regardless of the
value of ``method``.
"""
# TODO(gabriel): The method for a filter action should be a GET,
# but given the form structure of the table that's currently impossible.
# At some future date this needs to be reworked to get the filter action
# separated from the table's POST form.
# class attribute name is used for ordering of Actions in table
name = "filter"
def __init__(self, **kwargs):
super(FilterAction, self).__init__(**kwargs)
self.method = kwargs.get('method', "POST")
self.name = kwargs.get('name', self.name)
self.verbose_name = kwargs.get('verbose_name', _("Filter"))
self.filter_type = kwargs.get('filter_type', "query")
self.filter_choices = kwargs.get('filter_choices')
self.needs_preloading = kwargs.get('needs_preloading', False)
self.param_name = kwargs.get('param_name', 'q')
self.icon = "search"
if self.filter_type == 'server' and self.filter_choices is None:
raise NotImplementedError(
'A FilterAction object with the '
'filter_type attribute set to "server" must also have a '
'filter_choices attribute.')
def get_param_name(self):
"""Returns the full query parameter name for this action.
Defaults to
``{{ table.name }}__{{ action.name }}__{{ action.param_name }}``.
"""
return "__".join([self.table.name, self.name, self.param_name])
def assign_type_string(self, table, data, type_string):
for datum in data:
setattr(datum, table._meta.data_type_name, type_string)
def data_type_filter(self, table, data, filter_string):
filtered_data = []
for data_type in table._meta.data_types:
func_name = "filter_%s_data" % data_type
filter_func = getattr(self, func_name, None)
if not filter_func and not callable(filter_func):
# The check of filter function implementation should happen
# in the __init__. However, the current workflow of DataTable
# and actions won't allow it. Need to be fixed in the future.
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
_data = filter_func(table, data, filter_string)
self.assign_type_string(table, _data, data_type)
filtered_data.extend(_data)
return filtered_data
def filter(self, table, data, filter_string):
"""Provides the actual filtering logic.
This method must be overridden by subclasses and return
the filtered data.
"""
return data
def is_api_filter(self, filter_field):
"""Determine if the given filter field should be used as an
API filter.
"""
if self.filter_type == 'server':
for choice in self.filter_choices:
if (choice[0] == filter_field and len(choice) > 2 and
choice[2] is True):
return True
return False
class FixedFilterAction(FilterAction):
"""A filter action with fixed buttons."""
def __init__(self, **kwargs):
super(FixedFilterAction, self).__init__(**kwargs)
self.filter_type = kwargs.get('filter_type', "fixed")
self.needs_preloading = kwargs.get('needs_preloading', True)
self.fixed_buttons = self.get_fixed_buttons()
self.filter_string = ''
def filter(self, table, images, filter_string):
self.filter_string = filter_string
categories = self.categorize(table, images)
self.categories = defaultdict(list, categories)
for button in self.fixed_buttons:
button['count'] = len(self.categories[button['value']])
if not filter_string:
return images
return self.categories[filter_string]
def get_fixed_buttons(self):
"""Returns a list of dictionaries describing the fixed buttons
to use for filtering.
Each list item should be a dict with the following keys:
* ``text``: Text to display on the button
* ``icon``: Icon class for icon element (inserted before text).
* ``value``: Value returned when the button is clicked. This value is
passed to ``filter()`` as ``filter_string``.
"""
return []
def categorize(self, table, images):
"""Override to separate images into categories.
Return a dict with a key for the value of each fixed button,
and a value that is a list of images in that category.
"""
return {}
class BatchAction(Action):
"""A table action which takes batch action on one or more
objects. This action should not require user input on a
per-object basis.
.. attribute:: name
An internal name for this action.
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (PendingDeprecation)
String or tuple/list. The display forms of the name.
Should be a transitive verb, capitalized and translated. ("Delete",
"Rotate", etc.) If tuple or list - then setting
self.current_present_action = n will set the current active item
from the list(action_present[n])
You can pass a complete action name including 'data_type' by specifying
'%(data_type)s' substitution in action_present ("Delete %(data_type)s").
Otherwise a complete action name is a format of "<action> <data_type>".
<data_type> is determined based on the number of items.
By passing a complete action name you allow translators to control
the order of words as they want.
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (PendingDeprecation)
String or tuple/list. The past tense of action_present. ("Deleted",
"Rotated", etc.) If tuple or list - then
setting self.current_past_action = n will set the current active | |
querydict.has_key('buyer_email'):
data['buyer_email'] = querydict['buyer_email']
if querydict.has_key('seller_id'):
data['seller_id'] = querydict['seller_id']
if querydict.has_key('buyer_id'):
data['buyer_id'] = querydict['buyer_id']
if querydict.has_key('notify_time'):
data['notify_time'] = querydict['notify_time']
if querydict.has_key('notify_type'):
data['notify_type'] = querydict['notify_type']
if querydict.has_key('notify_id'):
data['notify_id'] = querydict['notify_id']
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
if querydict.has_key('subject'):
data['subject'] = querydict['subject']
if querydict.has_key('payment_type'):
data['payment_type'] = querydict['payment_type']
if querydict.has_key('trade_no'):
data['trade_no'] = querydict['trade_no']
if querydict.has_key('trade_status'):
data['trade_status'] = querydict['trade_status']
if gSecurityConfig['alipay']['trade_status'].has_key(data['trade_status']):
data['trade_status_desc'] = gSecurityConfig['alipay']['trade_status'][data['trade_status']]
if querydict.has_key('gmt_create'):
data['gmt_create'] = querydict['gmt_create']
if querydict.has_key('gmt_payment'):
data['gmt_payment'] = querydict['gmt_payment']
if querydict.has_key('gmt_close'):
data['gmt_close'] = querydict['gmt_close']
if querydict.has_key('gmt_refund'):
data['gmt_refund'] = querydict['gmt_refund']
if querydict.has_key('body'):
data['body'] = querydict['body']
if querydict.has_key('error_code'):
data['error_code'] = querydict['error_code']
if querydict.has_key('bank_seq_no'):
data['bank_seq_no'] = querydict['bank_seq_no']
if querydict.has_key('out_channel_type'):
data['out_channel_type'] = querydict['out_channel_type']
if querydict.has_key('out_channel_amount'):
data['out_channel_amount'] = querydict['out_channel_amount']
if querydict.has_key('out_channel_inst'):
data['out_channel_inst'] = querydict['out_channel_inst']
if querydict.has_key('business_scene'):
data['business_scene'] = querydict['business_scene']
if querydict.has_key('total_fee'):
data['total_fee'] = querydict['total_fee']
if data.has_key('out_trade_no'):
g = gevent.spawn(update_pay_log, data['out_trade_no'], data, False)
def handle_alipay_notify_url(environ):
global gConfig, gSecurityConfig
buf = environ['wsgi.input'].read()
ds_plus = urllib.unquote_plus(buf)
ds_plus = dec_by_code(gConfig['pay_platform']['alipay']['input_charset'], ds_plus)
querydict = {}
data = {}
data['pay_channel'] = 'alipay'
try:
querydict = urlparse.parse_qs(ds_plus)
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
except:
querydict = {}
if querydict.has_key('seller_email'):
data['seller_email'] = querydict['seller_email']
if querydict.has_key('buyer_email'):
data['buyer_email'] = querydict['buyer_email']
if querydict.has_key('seller_id'):
data['seller_id'] = querydict['seller_id']
if querydict.has_key('buyer_id'):
data['buyer_id'] = querydict['buyer_id']
if querydict.has_key('notify_time'):
data['notify_time'] = querydict['notify_time']
if querydict.has_key('notify_id'):
data['notify_id'] = querydict['notify_id']
if querydict.has_key('notify_type'):
data['notify_type'] = querydict['notify_type']
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
if querydict.has_key('subject'):
data['subject'] = querydict['subject']
if querydict.has_key('payment_type'):
data['payment_type'] = querydict['payment_type']
if querydict.has_key('trade_no'):
data['trade_no'] = querydict['trade_no']
if querydict.has_key('trade_status'):
data['trade_status'] = querydict['trade_status']
if gSecurityConfig['alipay']['trade_status'].has_key(data['trade_status']):
data['trade_status_desc'] = gSecurityConfig['alipay']['trade_status'][data['trade_status']]
if querydict.has_key('gmt_create'):
data['gmt_create'] = querydict['gmt_create']
if querydict.has_key('gmt_payment'):
data['gmt_payment'] = querydict['gmt_payment']
if querydict.has_key('gmt_close'):
data['gmt_close'] = querydict['gmt_close']
if querydict.has_key('gmt_refund'):
data['gmt_refund'] = querydict['gmt_refund']
if querydict.has_key('body'):
data['body'] = querydict['body']
if querydict.has_key('error_code'):
data['error_code'] = querydict['error_code']
if querydict.has_key('bank_seq_no'):
data['bank_seq_no'] = querydict['bank_seq_no']
if querydict.has_key('out_channel_type'):
data['out_channel_type'] = querydict['out_channel_type']
if querydict.has_key('out_channel_amount'):
data['out_channel_amount'] = querydict['out_channel_amount']
if querydict.has_key('out_channel_inst'):
data['out_channel_inst'] = querydict['out_channel_inst']
if querydict.has_key('business_scene'):
data['business_scene'] = querydict['business_scene']
if querydict.has_key('total_fee'):
data['total_fee'] = querydict['total_fee']
if querydict.has_key('notify_type') and 'trade_status_' in querydict['notify_type'] and data.has_key('out_trade_no'):
g = gevent.spawn(update_pay_log, data['out_trade_no'], data, False)
if querydict.has_key('notify_type') and querydict['notify_type'] == 'batch_refund_notify':
if querydict.has_key('batch_no'):
data['batch_no'] = querydict['batch_no']
if querydict.has_key('success_num'):
data['success_num'] = int(querydict['success_num'])
if querydict.has_key('result_details'):
arr = querydict['result_details'].split('^')
trade_no = arr[0]
refund_fee = float(arr[1])
refund_status = arr[2]
data['trade_no'] = trade_no
data['refund_fee'] = refund_fee
data['refund_status'] = refund_status
g = gevent.spawn(update_refund_log, data['trade_no'], data, False)
def handle_alipay_error_notify_url(environ):
global gConfig, gSecurityConfig
buf = environ['wsgi.input'].read()
ds_plus = urllib.unquote_plus(buf)
ds_plus = dec_by_code(gConfig['pay_platform']['alipay']['input_charset'], ds_plus)
querydict = {}
data = {}
data['pay_channel'] = 'alipay'
try:
querydict = urlparse.parse_qs(ds_plus)
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
except:
querydict = {}
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
if querydict.has_key('error_code'):
data['error_code'] = querydict['error_code']
if gSecurityConfig['alipay']['error_code'].has_key(data['error_code']):
data['error_desc'] = gSecurityConfig['alipay']['error_code'][data['error_code']]
if data.has_key('out_trade_no'):
g = gevent.spawn(update_pay_log, data['out_trade_no'], data, False)
#g.join()
def get_querydict_by_GET_POST(environ):
querydict = {}
buf = None
if environ.has_key('QUERY_STRING'):
querystring = environ['QUERY_STRING']
querystring = urllib.unquote_plus(querystring)
querystring = dec(querystring)
try:
d = json.loads(querystring)
if isinstance(d, dict):
for k in d.keys():
querydict[k] = d[k]
except:
querydict = urlparse.parse_qs(querystring)
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
# try:
# # buf = environ['wsgi.input'].read()
# buf = stream.read()
# print('buf=')
# print(buf)
# ds_plus = urllib.unquote_plus(buf)
# obj = json.loads(dec(ds_plus))
# for k in obj.keys():
# querydict[k] = obj[k]
# except:
# pass
stream, form, files = werkzeug.formparser.parse_form_data(environ, charset='utf-8')
if len(form.keys()) > 0:
for key in form.keys():
try:
if isinstance(key, str):
key = dec(key)
obj = json.loads(key)
if isinstance(obj, dict):
for k in obj.keys():
querydict[k] = obj[k]
if isinstance(obj, list):
querydict = obj
except Exception,e:
print(e)
querydict[key] = form[key]
file_storage_list = []
if len(files.keys()) > 0:
for key in files.keys():
file_storage_list.extend(files.getlist(key))
for file_storage in file_storage_list:
if isinstance(file_storage, werkzeug.datastructures.FileStorage):
querydict['filename'] = file_storage.filename
querydict['content_type'] = file_storage.content_type
querydict['mimetype'] = file_storage.mimetype
# querydict['content_length'] = file_storage.content_length
buf = file_storage.read()
break
return querydict, buf
def handle_combiz_platform(environ):
global ENCODING
global gConfig, gRequest, gFormTemplate
def get_collection(collection):
ret = None
db_util.mongo_init_client('combiz_platform')
db = db_util.gClientMongo['combiz_platform'][gConfig['combiz_platform']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
#Rule('/workflow_add', endpoint='workflow_add'),
#Rule('/workflow_query', endpoint='workflow_query'),
#Rule('/workflow_query/<_id>', endpoint='workflow_query'),
#Rule('/workflow_update', endpoint='workflow_update'),
#Rule('/workflow_delete', endpoint='workflow_delete'),
#Rule('/workflow_delete/<_id>', endpoint='workflow_delete'),
#Rule('/workflow_template_add', endpoint='workflow_template_add'),
#Rule('/workflow_template_query', endpoint='workflow_template_query'),
#Rule('/workflow_template_query/<_id>', endpoint='workflow_template_query'),
#Rule('/workflow_template_update', endpoint='workflow_template_update'),
#Rule('/workflow_template_delete', endpoint='workflow_template_delete'),
#Rule('/workflow_template_delete/<_id>', endpoint='workflow_template_delete'),
def workflow_add(querydict):
ret = ''
if querydict.has_key('order_id'):
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
existone = collection.find_one({'order_id':querydict['order_id']})
if existone:
ret = json.dumps({'result':u'workflow_add_order_id_already_exist' }, ensure_ascii=True, indent=4)
else:
_id = collection.save(querydict)
o = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_add_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_add_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_add_order_id_required' }, ensure_ascii=True, indent=4)
return ret
def workflow_query(querydict):
ret = ''
o = None
try:
#print(querydict)
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
limit = 10
skip = 0
ssort = None
cond = {}
if querydict.has_key('limit'):
limit = int(querydict['limit'])
if querydict.has_key('offset'):
skip = int(querydict['offset'])
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort = [('order_id', pymongo.ASCENDING),]
if querydict['order'] == 'desc':
ssort = [('order_id', pymongo.DESCENDING),]
if querydict.has_key('_id'):
o = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
elif querydict.has_key('order_id'):
if '*' in querydict['order_id']:
cond = {'order_id': {'$regex':'^.*' + querydict['order_id'].replace('*', '') + '.*$'}}
#print(cond)
o = list(collection.find(cond, skip=skip, limit=limit, sort=ssort))
#print(o)
else:
o = collection.find_one({'order_id':querydict['order_id']})
else:
ssort = None
cond = {}
if querydict.has_key('search_field') and querydict.has_key('search'):
cond = {str(querydict['search_field']): {'$regex':'^.*' + querydict['search'].replace('*', '') + '.*$'}}
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort = [(str(querydict['search_field']), pymongo.ASCENDING),]
if querydict['order'] == 'desc':
ssort = [(str(querydict['search_field']), pymongo.DESCENDING),]
o = list(collection.find(cond, skip=skip, limit=limit, sort=ssort))
if o:
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_query_workflow_not_exist' }, ensure_ascii=True, indent=4)
#if not querydict.has_key('_id') and not querydict.has_key('order_id'):
#ret = json.dumps({'result':u'workflow_query_id_or_order_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_query_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_query_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_update(querydict):
ret = ''
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
if querydict.has_key('_id'):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.update({'_id':existone['_id']}, {'$set': db_util.add_mongo_id(querydict)}, multi=False, upsert=False)
one = collection.find_one(db_util.add_mongo_id({'_id':existone['_id']}))
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_update_workflow_not_exist' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_update_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_update_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_update_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_delete(querydict):
ret = ''
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_delete_workflow_not_exist' }, ensure_ascii=True, indent=4)
if isinstance(querydict['_id'], list):
ids = db_util.add_mongo_id(querydict['_id'])
cond = {'_id':{'$in':ids}}
collection.remove(cond)
ret = json.dumps(db_util.remove_mongo_id(querydict['_id']), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_delete_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_delete_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_delete_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_template_add(querydict):
ret = ''
if querydict.has_key('name') \
and querydict.has_key('nodes') \
and querydict.has_key('edges'):
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow_template'])
existone = collection.find_one({'name':querydict['name']})
if existone:
ret = json.dumps({'result':u'workflow_template_add_name_already_exist' }, ensure_ascii=True, indent=4)
else:
_id = collection.save(db_util.add_mongo_id(querydict))
o = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_template_add_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_add_fail' }, ensure_ascii=True, indent=4)
else:
if not querydict.has_key('name'):
ret = json.dumps({'result':u'workflow_template_add_name_required' }, ensure_ascii=True, indent=4)
if not querydict.has_key('nodes'):
ret = json.dumps({'result':u'workflow_template_add_nodes_required' }, ensure_ascii=True, indent=4)
if not querydict.has_key('edges'):
ret = json.dumps({'result':u'workflow_template_add_edges_required' }, ensure_ascii=True, indent=4)
return ret
def workflow_template_query(querydict):
ret = ''
o = None
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow_template'])
o = None
limit = 10
skip = 0
ssort = None
cond = {}
if querydict.has_key('limit'):
limit = int(querydict['limit'])
if querydict.has_key('offset'):
skip = int(querydict['offset'])
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort = [('name', pymongo.ASCENDING),]
if querydict['order'] == 'desc':
ssort = [('name', pymongo.DESCENDING),]
if querydict.has_key('name'):
if '*' in querydict['name']:
cond = {'name': {'$regex':'^.*' + querydict['name'].replace('*', '') + '.*$'}}
o = list(collection.find(cond, skip=skip, limit=limit, sort=ssort))
else:
o = collection.find_one({'name':querydict['name']})
elif querydict.has_key('_id'):
o = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if o:
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_query_workflow_not_exist' }, ensure_ascii=True, indent=4)
else:
ssort = None
cond = {}
if querydict.has_key('search_field') and querydict.has_key('search'):
cond = {str(querydict['search_field']): {'$regex':'^.*' + querydict['search'].replace('*', '') + '.*$'}}
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort | |
"""
Gridder class (and directly supporting utils) for efficient and parallelized mapping of
Py-ART Radar objects to a common, regular grid. Supported by the underlying subgrid mapping
of the GatesToSubgridMapper.
Portions inspired by the map_gates_to_grid implementation of Py-ART (license reproduced in
_map_gates_to_subgrid.pyx, which more directly reuses Py-ART code).
"""
from time import sleep
import logging
from pathlib import Path
from joblib import dump, load
import warnings
from cftime import num2date
import numpy as np
import pyart
from pyart.core.radar import Radar
from pyart.filters import GateFilter, moment_based_gate_filter
import pyproj
from sklearn.linear_model import ElasticNet
import xarray as xr
from ._map_gates_to_subgrid import GatesToSubgridMapper
from .grid_utils import generate_rectangular_grid
from .vendored import (
_determine_cy_weighting_func,
_parse_gatefilters,
_determine_fields,
_parse_roi_func
)
log = logging.getLogger(__name__)
_grid_param_labels_z = ['nz', 'dz', 'z_min', 'z_max']
_grid_param_labels_y = ['ny', 'dy', 'y_min', 'y_max']
_grid_param_labels_x = ['nx', 'dx', 'x_min', 'x_max']
def radar_coords_to_grid_coords(
gate_radar_x,
gate_radar_y,
site_id,
radar_crs_kwargs,
target_crs_cf_attrs,
wait_for_cache=False,
cache_dir='/tmp/'
):
"""Map radar x/y to grid x/y using a quadratic model of projection transform.
Uses an ElasticNet regression over 2D quadratic terms to transform coordinates from radar
azimuthal equidistant space to common grid. If the cached regression for a particular
radar site is not available, perform the slow transform with pyproj in order to train the
regression for later use.
This technique can give a more than order of magnitude speed up, with errors less than
100 m within 300 km of the radar site (also, faster and signficantly less error than
using pyproj.Proj from Py-ART's calcuated lat/lons).
WARNING: If you change the destination grid, you must clear the cached regressions in the
.regressions directory.
"""
# Ensure float32
gate_x = gate_radar_x.astype('float32')
gate_y = gate_radar_y.astype('float32')
gate_x_r = gate_x.ravel()
gate_y_r = gate_y.ravel()
# Check for cached version
regression_path = Path(cache_dir) / "./regressions"
regression_path.mkdir(parents=True, exist_ok=True)
use_saved = False
x_regression_path = regression_path / f"{site_id}_reg_x.joblib"
y_regression_path = regression_path / f"{site_id}_reg_y.joblib"
if x_regression_path.exists() and y_regression_path.exists():
x_clf = load(x_regression_path)
y_clf = load(y_regression_path)
use_saved = True
elif wait_for_cache:
# Wait 5 seconds for cached version
sleep(5)
if x_regression_path.exists() and y_regression_path.exists():
x_clf = load(x_regression_path)
y_clf = load(y_regression_path)
save_to_cache = True
if not use_saved:
# Create the transformed coordinate, and train the regression
# (slices every 11 since subset performs better, and 11 not factor of 360 to avoid
# periodicity)
radar_crs = pyproj.CRS(radar_crs_kwargs)
target_crs = pyproj.CRS.from_cf(target_crs_cf_attrs)
transformer = pyproj.Transformer.from_crs(radar_crs, target_crs)
grid_x, grid_y = transformer.transform(gate_x, gate_y)
x_clf = ElasticNet().fit(np.stack([
gate_x_r,
gate_y_r,
gate_x_r**2,
gate_y_r**2,
gate_x_r * gate_y_r
], axis=1)[::11], grid_x.ravel()[::11])
y_clf = ElasticNet().fit(np.stack([
gate_x_r,
gate_y_r,
gate_x_r**2,
gate_y_r**2,
gate_x_r * gate_y_r
], axis=1)[::11], grid_y.ravel()[::11])
dump(x_clf, x_regression_path)
dump(y_clf, y_regression_path)
# Use the 2D quadratic model of proj transform
grid_x = x_clf.predict(np.stack([
gate_x_r,
gate_y_r,
gate_x_r**2,
gate_y_r**2,
gate_x_r * gate_y_r
], axis=1)).reshape(gate_x.shape)
grid_y = y_clf.predict(np.stack([
gate_x_r,
gate_y_r,
gate_x_r**2,
gate_y_r**2,
gate_x_r * gate_y_r
], axis=1)).reshape(gate_y.shape)
return np.stack([grid_x, grid_y], axis=0)
def map_gates_to_subgrid(
subgrid_shape,
subgrid_starts,
subgrid_steps,
field_shape,
field_data,
field_mask,
excluded_gates,
gate_z,
gate_y,
gate_x,
gate_range,
gate_timedelta,
toa,
roi_func_args,
cy_weighting_function
):
"""Delayed functional wrapper of GatesToSubgridMapper class."""
subgrid_sum = np.zeros(subgrid_shape, dtype=np.float32)
subgrid_wsum = np.zeros(subgrid_shape, dtype=np.float32)
gatemapper = GatesToSubgridMapper(
subgrid_shape[:-1],
subgrid_starts,
subgrid_steps,
subgrid_sum,
subgrid_wsum
)
roi_func = _parse_roi_func(*roi_func_args)
gatemapper.map_gates_to_subgrid(
field_shape[1],
field_shape[0],
gate_z.astype('float32'),
gate_y.astype('float32'),
gate_x.astype('float32'),
gate_range.astype('float32'),
gate_timedelta.astype('float32'),
field_data,
field_mask,
excluded_gates,
toa,
roi_func,
cy_weighting_function
)
return {
'sum': subgrid_sum,
'wsum': subgrid_wsum
}
def prepare_single_radar(i, radar, radar_coords, grid_params, r_max, cf_attrs, cache_dir):
# Define projection objects
crs_kwargs = {
'proj': 'aeqd',
'lat_0': radar.latitude['data'].item(),
'lon_0': radar.longitude['data'].item()
}
# Determine subset of grid that this radar will map data on to
x_radar, y_radar = radar_coords[i]
# Inclusive index of destination grid point to the left of left-most influence of
# radar data
xi_min = max(
0,
int((x_radar - r_max - grid_params['x_min']) / grid_params['dx'])
)
# Exclusive index (upper) of destination grid point to the right of the right-most
# influence of radar data
xi_max = min(
grid_params['nx'],
int(
(x_radar + r_max - grid_params['x_min'])
/ grid_params['dx']
) + 2
)
# Inclusive index of destination grid point below the bottom-most influence of
# radar data
yi_min = max(
0,
int((y_radar - r_max - grid_params['y_min']) / grid_params['dy'])
)
# Exclusive index (upper) of destination grid point above the top-most
# influence of radar data
yi_max = min(
grid_params['ny'],
int(
(y_radar + r_max - grid_params['y_min'])
/ grid_params['dy']
) + 2
)
# Sanity check that we didn't exceed our bounds
if (
xi_max <= 0
or xi_min >= grid_params['nx']
or yi_max <= 0
or yi_min >= grid_params['ny']
):
warnings.warn("Radar included outside of maximum range box. Skipping.")
return
# Prep coordinates of gates on destination grid
site_id = radar.metadata['instrument_name']
gate_dest_xy = radar_coords_to_grid_coords(
radar.gate_x['data'],
radar.gate_y['data'],
site_id=site_id,
radar_crs_kwargs=crs_kwargs,
target_crs_cf_attrs=cf_attrs,
wait_for_cache=False, # TODO validate assumption
cache_dir=cache_dir
)
gate_dest_z = radar.gate_altitude['data']
return {
'site_id': site_id,
'radar': radar,
'x_radar': x_radar,
'y_radar': y_radar,
'xi_min': xi_min,
'xi_max': xi_max,
'yi_min': yi_min,
'yi_max': yi_max,
'gate_dest_x': gate_dest_xy[0],
'gate_dest_y': gate_dest_xy[1],
'gate_dest_z': gate_dest_z
}
class Gridder:
"""Map gates from one or more radars to a common grid.
TODO: document
"""
def __init__(self, cf_projection, **grid_params):
"""Set up Gridder by defining grid.
Parameters
----------
cf_projection : dict-like
Dictionary of projection parameters as defined by the CF Conventions
pool : multiprocessing.Pool
Pool for multiprocessing parallelization where useful
**grid_params
Additional keyword arguments for controlling the grid points in the specified
projected space. Four options for each dimension (z, y, x) are available, be sure
to only specify three.
- ``nz``: number of grid points
- ``dz``: grid spacing
- ``z_min``: inclusive lower bound
- ``z_max``: inclusive upper bound
(likewise for y and x)
"""
self.cf_attrs = dict(cf_projection)
self.cache_dir = "/tmp/"
self.grid_params = {}
self.r_max = 3.0e5
self._assign_grid_params('x', {k: v for k, v in grid_params.items() if k in _grid_param_labels_x})
self._assign_grid_params('y', {k: v for k, v in grid_params.items() if k in _grid_param_labels_y})
self._assign_grid_params('z', {k: v for k, v in grid_params.items() if k in _grid_param_labels_z})
def assign_from_subbatch_and_spacing(self, subbatch, spacing):
"""Set horizontal grid params from subbatch and spacing in units of grid projection.
Parameters
----------
subbatch : pandas.Series or dict-like
Subscriptable with keys 'x_min', 'x_max', 'y_min', and 'y_max'
spacing : float
Regular horizontal grid spacing in projection space units
"""
self._assign_grid_params(
'x',
self.round_grid_params(
{'x_min': subbatch['x_min'], 'x_max': subbatch['x_max'], 'dx': spacing},
dims='x'
)
)
self._assign_grid_params(
'y',
self.round_grid_params(
{'y_min': subbatch['y_min'], 'y_max': subbatch['y_max'], 'dy': spacing},
dims='y'
)
)
def _assign_grid_params(self, dim, params):
"""Assign grid parameters for dimension."""
if not params:
# Skip if empty
return
n = params.get(f"n{dim}", None)
d = params.get(f"d{dim}", None)
min = params.get(f"{dim}_min", None)
max = params.get(f"{dim}_max", None)
if len([param for param in [n, d, min, max] if param is None]) != 1:
warnings.warn(str((n, d, min, max)))
raise ValueError(
f"Exactly three of four grid parameters must be specified for a "
f"well-defined grid for dimension {dim}."
)
if n is None:
n = int((max - min) / d) + 1
if np.abs(max - min - d * (n - 1)) > 1e-6:
raise ValueError(f"Grid min and max not evenly separated by d{dim}.")
elif d is None:
d = (max - min) / (n - 1)
elif min is None:
min = max - d * (n - 1)
elif max is None:
max = min + d * (n - 1)
self.grid_params[f"n{dim}"] = n
self.grid_params[f"d{dim}"] = d
self.grid_params[f"{dim}_min"] = min
self.grid_params[f"{dim}_max"] = max
@staticmethod
def round_grid_params(grid_params, dims=None):
"""Regularize grid params so that grid is periodic with a 0 origin.
Grid spacing (dz, dy, dx) is the unmodified parameter, with z_min rounded down and
z_max rounded up in order to fully encompass the originally specified region. Count is
then recomputed with the new bounds and fixed spacing.
Parameters
----------
dims : iterable or None, optional
Collection of 'x', 'y', and 'z' dims to regularize. Defaults to None, which rounds each dim.
"""
dims = ('z', 'y', 'x') if dims is None else dims
for dim in dims:
grid_params[f"{dim}_min"] = (
np.floor(grid_params[f"{dim}_min"] / grid_params[f"d{dim}"])
* grid_params[f"d{dim}"]
)
grid_params[f"{dim}_max"] = (
np.ceil(grid_params[f"{dim}_max"] / grid_params[f"d{dim}"])
* grid_params[f"d{dim}"]
)
if f"n{dim}" in grid_params:
grid_params[f"n{dim}"] = int(
(grid_params[f"{dim}_max"] - grid_params[f"{dim}_min"])
/ grid_params[f"d{dim}"]
) + 1
return grid_params
def prepare_radar_subgrids(self, radar_site_ids, radar_sites, r_max=3.0e5):
self.subgrid_params = {}
for radar_id in radar_site_ids:
# Get radar location
radar_info = radar_sites.loc[radar_id]
radar_location_x = radar_info['x']
radar_location_y = radar_info['y']
# Prepare naive subgrid for this radar
subgrid_params = self.round_grid_params({
'x_min': radar_location_x | |
" + output_folder + "/all_cmds_log.txt"
remove = "rm -r " + output_folder
if os.system(remove) !=0:
errors['remove_folder'] = "Could not detele output_directory"
flash("Warning: Could not delete {}".format(output_folder))
return errors, output_folder
make_dir = 'mkdir ' + output_folder
if os.system(make_dir) != 0:
errors['mkdir'] = "Failed to create output directory, please check parent path exists and has write permission"
flash("Warning: Failed to create output directory, please check parent path exists and has write permission")
return errors, output_folder
elif check_override(output_folder, override_data, skip) and os.path.exists(output_input):
errors['override'] = True
flash("Warning: Output folder is NOT empty. Please choose another folder or delete/move files in it.")
return errors, output_folder
# Make empty log file for initial progress rendering
make_log = 'touch \"' + output_folder + '\"/all_cmds_log.txt'
if os.system(make_log) != 0:
errors['touch'] = "Failed to write to output directory, please check path exists and has write permission"
flash("Warning: Failed to create log file, please check parent path exists and has write permission")
return errors, output_folder
#check length parameters are valid
if min_length.isdigit() == False:
errors['invalid_length'] = "Invalid minimum length."
if max_length.isdigit() == False:
errors['invalid_length'] = "Invalid maximum and minimum length."
elif max_length.isdigit() == False:
errors['invalid_length'] = "Invalid maximum length."
elif int(max_length) < int(min_length):
errors['invalid_length'] = "Invalid parameters: Maximum length smaller than minimum length."
return errors, output_folder
def getInputFolders(filepath):
# find all the current input folders
checkFoldersCmd = "cd && cd " + filepath + " && ls"
print("check folders command")
print(checkFoldersCmd)
folders = subprocess.check_output(checkFoldersCmd, shell=True, stderr=subprocess.STDOUT).decode("utf8").split("\n")
# folders = subprocess.check_output(checkFoldersCmd, shell=True, stderr=subprocess.STDOUT).decode("ascii").split("\n")
return folders
@app.route("/parameters", methods = ["POST","GET"])
def parameters():
# get global variables for use
global input_filepath
global sample_csv
global schemes
# get a list of all the folders in the input and csv folders to be displayed to the user
folders = getInputFolders(input_filepath)
csvs = getInputFolders(sample_csv)
if request.method == "POST":
# get curr queue
queueList = []
if not qSys.queue.empty():
for item in qSys.queue.getItems():
queueList.append({item._job_name : url_for('progress', job_name=item._job_name, task_id = item._task_id)})
queueDict = {'jobs': queueList}
displayQueue = json.htmlsafe_dumps(queueDict)
#get parameters
job_name = request.form.get('job_name')
input_folder = request.form.get('input_folder')
read_file = request.form.get('read_file')
primer_scheme_dir = request.form.get('primer_scheme_dir')
primer_scheme = request.form.get('primer_scheme')
primer_type = request.form.get('primer_type')
other_primer_type = request.form.get('other_primer_type')
output_folder = request.form.get('output_folder')
normalise = request.form.get('normalise')
num_threads = request.form.get('num_threads')
pipeline = request.form.get('pipeline')
num_samples = request.form.get('num_samples')
min_length = request.form.get('min_length')
max_length = request.form.get('max_length')
bwa = request.form.get('bwa')
skip_nanopolish = request.form.get('skip_nanopolish')
dry_run = request.form.get('dry_run')
# num_samples = request.form.get('num_samples')
guppyplex = request.form.get('guppyplex')
barcode_type = request.form.get('barcode_type')
csv_file = request.form.get('csv_file')
virus = request.form.get('virus')
override_data = request.form.get('override_data')
# DEBUG
step = int(request.form.get('step'))
sys.stderr.write("override_data: {}\n".format(override_data))
sys.stderr.write("guppyplex: {}\n".format(guppyplex))
# set correct primer_type - if primer type is other, get the correct primer type from the tet input
# primer_select is so that on reload, the correct radio button will be selected
primer_select = primer_type
if virus == 'custom':
if other_primer_type:
primer_type = other_primer_type
else:
primer_type = "Custom-primer-scheme"
# store input_name
input_name = input_folder
#csv filepath
csv_filepath = sample_csv + '/' + csv_file
# concat /data to input folder
# global input_filepath
input_folder = input_filepath + '/' + input_folder
filename = os.path.dirname(os.path.realpath(__file__))
# if no output folder entered, creates one inside of input folder
# Do this to put output above input folder to stop fastq cross talk
# if not output_folder:
# output_folder = input_folder + "/output"
# else:
# if output_folder[0] != "/":
# output_folder = input_folder + output_folder
if not os.path.isdir(input_folder):
input_folder = ""
output_input = ""
else:
os.chdir(input_folder)
tmp_oi = os.getcwd()
output_input = tmp_oi
# get the correct input folder filepath from user input
# path = glob.glob(input_folder + '/*/*')[0]
# use fnmatch with walk to get fastq_pass, fastq_fail folders
# then split off the last bit to get the top folder for the gather command
tmp_folder_list = []
for dName, sdName, fList in os.walk(input_folder):
for fileName in sdName:
if fnmatch.fnmatch(fileName, "fastq*"):
tmp_folder_list.append(os.path.join(dName, fileName))
elif fnmatch.fnmatch(fileName, "barcode*"):
tmp_folder_list.append(os.path.join(dName, fileName))
if len(tmp_folder_list) == 0:
queueList = []
flash("Warning: Could not locate fastq files in {}. Check the file names, demultiplexing options and the directory structure is compatible".format(input_folder))
errors = {}
if qSys.queue.empty():
return render_template("parameters.html", job_name=job_name, queue=None,
input_name=input_name, input_folder=input_folder,
output_folder=output_folder, virus=virus,
pipeline=pipeline, min_length=min_length,
max_length=max_length, primer_scheme=primer_scheme,
primer_type=primer_type, num_samples=num_samples,
primer_scheme_dir=primer_scheme_dir, guppyplex=guppyplex, barcode_type=barcode_type,
errors=errors, folders=folders, csvs=csvs, csv_name=csv_file,
other_primer_type=other_primer_type, primer_select=primer_select,
schemes=schemes, override_data=override_data, VERSION=VERSION, ARTIC_VERSION=ARTIC_VERSION, DOCS=DOCS)
return render_template("parameters.html", job_name=job_name, queue=displayQueue,
input_name=input_name, input_folder=input_folder,
output_folder=output_folder, virus=virus,
pipeline=pipeline, min_length=min_length,
max_length=max_length, primer_scheme=primer_scheme,
primer_type=primer_type, num_samples=num_samples,
primer_scheme_dir=primer_scheme_dir, guppyplex=guppyplex, barcode_type=barcode_type,
errors=errors,folders=folders, csvs=csvs, csv_name=csv_file,
other_primer_type=other_primer_type, primer_select=primer_select,
schemes=schemes, override_data=override_data, VERSION=VERSION, ARTIC_VERSION=ARTIC_VERSION, DOCS=DOCS)
# this takes the first encountered (so fastq_pass or a barcode folder depending on how the user demuxed or not)
# It looks at sdName above, not filenames!!!! so here when it splits, it keeps all BUT the sdName, so it gets the containing parent directory
tmp_path = tmp_folder_list[0].split("/")[:-1]
path = "/".join(tmp_path)
os.chdir(path)
input_folder = os.getcwd()
#if user agrees output can override files with the same name in output folder
if request.form.get('override_data'):
override_data = True
else:
override_data = False
# check errors
errors = {}
errors, output_folder_checked = checkInputs(input_folder, output_folder, primer_scheme_dir,
read_file, pipeline, override_data, min_length,
max_length, job_name, output_input, csv_filepath, step, num_samples)
# if an output folder does not exist, make one
# if not output_folder:
# output_folder = output_folder_checked
output_folder = output_folder_checked
# validate csv contents.
# No special characters -
# comma separated -
# 2 columns -
# 2nd column should have NB or RB or BC-
def _detect_special(pass_string):
regex= re.compile('^[a-zA-Z0-9,_-]+$')
if(regex.search(pass_string) == None):
ret = True
else:
ret = False
return ret
sys.stderr.write("checking CSV file: {}\n".format(csv_filepath))
if os.path.isfile(csv_filepath):
sys.stderr.write("csv file exists\n")
with open(csv_filepath, 'r') as c:
for l in c:
l = l.strip("\n")
if _detect_special(l):
flash("Warning: csv file malformed: special characters detected ")
errors['csv_malformed'] = "csv is malformed, special characters detected a-zA-Z0-9,_- only"
break
l = l.split(",")
if len(l) != 2:
errors['csv_malformed'] = "csv is malformed, more or less than 2 columns"
flash("Warning: csv file malformed: more or less than 2 columns")
break
else:
if l[1][:2] not in ["NB", "RB", "BC"]:
errors['csv_malformed'] = "csv is malformed, not NB or RB or BC for barcode"
flash("Warning: csv file malformed: not NB or RB or BC for barcode")
break
sys.stderr.write("printing errors:\n")
k = ["{}: {}".format(key, errors[key]) for key in errors.keys()]
sys.stderr.write(",".join(k))
sys.stderr.write("\n")
# if queue is full, add an error to the list
if qSys.queue.full():
errors['full_queue'] = "Job queue is full."
# display errors if errors exist
if len(errors) != 0:
# k = ["{}: {}".format(key, errors[key]) for key in errors.keys()]
# sys.stderr.write(",".join(k))
# sys.stderr.write("\n")
#Update displayed queue on home page
queueList = []
if qSys.queue.empty():
return render_template("parameters.html", job_name=job_name, queue=None,
input_name=input_name, input_folder=input_folder,
output_folder=output_folder, virus=virus,
pipeline=pipeline, min_length=min_length,
max_length=max_length, primer_scheme=primer_scheme,
primer_type=primer_type, num_samples=num_samples,
primer_scheme_dir=primer_scheme_dir, guppyplex=guppyplex, barcode_type=barcode_type,
errors=errors, folders=folders, csvs=csvs, csv_name=csv_file,
other_primer_type=other_primer_type, primer_select=primer_select,
schemes=schemes, override_data=override_data, VERSION=VERSION, ARTIC_VERSION=ARTIC_VERSION, DOCS=DOCS)
return render_template("parameters.html", job_name=job_name, queue=displayQueue,
input_name=input_name, input_folder=input_folder,
output_folder=output_folder, virus=virus,
pipeline=pipeline, min_length=min_length,
max_length=max_length, primer_scheme=primer_scheme,
primer_type=primer_type, num_samples=num_samples,
primer_scheme_dir=primer_scheme_dir, guppyplex=guppyplex, barcode_type=barcode_type,
errors=errors,folders=folders, csvs=csvs, csv_name=csv_file,
other_primer_type=other_primer_type, primer_select=primer_select,
schemes=schemes, override_data=override_data, VERSION=VERSION, ARTIC_VERSION=ARTIC_VERSION, DOCS=DOCS)
#no spaces in the job name - messes up commands
job_name = job_name.replace(" ", "_")
# create new jobs
if pipeline != "both":
#Create a new instance of the Job class
new_job = qSys.newJob(job_name, input_folder, read_file, primer_scheme_dir, primer_scheme, primer_type, output_folder, normalise, num_threads, pipeline, min_length, max_length, bwa, skip_nanopolish, dry_run, override_data, num_samples, guppyplex, barcode_type, input_name, csv_filepath, primer_select, input_name)
#Add job to queue
qSys.addJob(new_job)
print("qSys has jobs: ", qSys.printQueue())
new_task = executeJob.apply_async(args=[new_job.job_name, new_job.gather_cmd, new_job.guppyplex_cmd, new_job.demult_cmd, new_job.min_cmd, new_job.plot_cmd, step])
new_job.task_id = new_task.id
#if both pipelines
else:
#Create a new medaka instance of the Job class
new_job_m = qSys.newJob(job_name + "_medaka", input_folder, read_file, primer_scheme_dir, primer_scheme, primer_type, output_folder + "/medaka", normalise, num_threads, "medaka", min_length, max_length, bwa, skip_nanopolish, dry_run, override_data, num_samples, guppyplex, barcode_type, input_name, csv_filepath, primer_select, input_name)
#Create a new nanopolish instance of the Job class
new_job_n = qSys.newJob(job_name + "_nanopolish", input_folder, read_file, primer_scheme_dir, primer_scheme, primer_type, output_folder + "/nanopolish", normalise, num_threads, "nanopolish", min_length, max_length, bwa, skip_nanopolish, dry_run, override_data, num_samples, guppyplex, barcode_type, input_name, csv_filepath, primer_select, input_name)
#Add medaka job to queue
qSys.addJob(new_job_m)
task_m = executeJob.apply_async(args=[new_job_m.job_name, new_job_m.gather_cmd, new_job_m.guppyplex_cmd, new_job_m.demult_cmd, new_job_m.min_cmd, new_job_m.plot_cmd, step])
new_job_m.task_id = task_m.id
#Add nanopolish job to queue
qSys.addJob(new_job_n)
task_n = executeJob.apply_async(args=[new_job_n.job_name, new_job_n.gather_cmd, new_job_n.guppyplex_cmd, new_job_n.demult_cmd, new_job_n.min_cmd, | |
# -*- coding: UTF-8 -*-
import logging
import math
import os
import arcpy
from pyspatialopt import version
def generate_query(unique_ids, unique_field_name, wrap_values_in_quotes=False):
"""
Generates a select or definition query that can applied to the input layers
:param unique_ids: (list) A list of ids to query
:param unique_field_name: (string) The name of field that the ids correspond to
:param wrap_values_in_quotes: (bool) Should the ids be wrapped in quotes (if unique_field_name is string)
:return: (string) A query string that can be applied to a layer
"""
if unique_ids:
if wrap_values_in_quotes:
query = "{} in (-1,{})".format(unique_field_name, ",".join("'{0}'".format(w) for w in unique_ids))
else:
query = "{} in (-1,{})".format(unique_field_name, ",".join(unique_ids))
else:
query = "{} in (-1)".format(unique_field_name)
return query
def reset_layers(*args):
"""
Clears the selection and definition query applied to the layers
:param args: (Feature Layers) The feature layers to reset
:return:
"""
for layer in args:
arcpy.SelectLayerByAttribute_management(layer, "CLEAR_SELECTION")
layer.definitionQuery = ""
def generate_serviceable_demand(dl, dl_demand_field, dl_id_field, *args):
"""
Finds to total serviceable coverage when 2 facility layers are used
Merges polygons & dissolves them to form one big area of total coverage
Then intersects with demand layer. Only used for partial coverages
:param dl: (Feature Layer) The demand polygon or point layer
:param dl_demand_field: (string) The field representing demand
:param dl_id_field: (string) The name of the unique field for the demand layer
:param args: (Feature Layer) The facility layers to use
:return: (dictionary) A dictionary of similar format to the coverage format
"""
# Reset DF
# Check parameters so we get useful exceptions and messages
reset_layers(dl)
reset_layers(*args)
if arcpy.Describe(dl).shapeType not in ["Polygon", "Point"]:
raise TypeError("Demand layer must have polygon geometry")
dl_field_names = [f.name for f in arcpy.Describe(dl).fields]
if dl_demand_field not in dl_field_names:
raise ValueError("'{}' field not found in demand layer".format(dl_demand_field))
if dl_id_field not in dl_field_names:
raise ValueError("'{}' field not found in demand layer".format(dl_id_field))
# Check that all facility layers are polygon
for fl in args:
if arcpy.Describe(fl).shapeType != "Polygon":
raise TypeError("{} is not a polygon layer".format(fl.desc.name))
if fl is None:
raise ValueError("No facility service area feature layers specified")
dl_desc = arcpy.Describe(dl)
logging.getLogger().info("Initializing output...")
if dl_desc.shapeType == "Polygon":
output = {
"version": version.__version__,
"demand": {},
"type": {
"mode": "serviceableDemand",
"type": "partial"}
}
elif dl_desc.shapeType == "Point":
output = {
"version": version.__version__,
"demand": {},
"type": {
"mode": "serviceableDemand",
"type": "binary"}
}
else:
raise TypeError("Demand layer must be point or polygon")
logging.getLogger().info("Combining facilities...")
dissovled_geom = None
for layer in args:
with arcpy.da.SearchCursor(layer, ['SHAPE@']) as fcursor:
for f in fcursor:
if dissovled_geom is None:
dissovled_geom = f[0]
dissovled_geom = dissovled_geom.union(f[0])
logging.getLogger().info("Determining possible service coverage for each demand unit...")
with arcpy.da.SearchCursor(dl, [dl_id_field, dl_demand_field, "SHAPE@"]) as dcursor:
if arcpy.Describe(dl).shapeType == "Polygon":
for d in dcursor:
if not dissovled_geom.disjoint(d[2]):
intersected = dissovled_geom.intersect(d[2], 4)
if intersected.area > 0:
serviceable_demand = math.ceil(
float(intersected.area / d[2].area) * d[1])
else:
serviceable_demand = 0.0
else:
serviceable_demand = 0.0
# Make sure serviceable is less than or equal to demand, floating point issues
if serviceable_demand < d[1]:
output["demand"][str(d[0])] = {"serviceableDemand": serviceable_demand}
else:
output["demand"][str(d[0])] = {"serviceableDemand": d[1]}
else: # Point
for d in dcursor:
intersected = dissovled_geom.intersect(d[2], 1)
if intersected.centroid: # check if valid
serviceable_demand = d[1]
else:
serviceable_demand = 0.0
output["demand"][str(d[0])] = {"serviceableDemand": serviceable_demand}
logging.getLogger().info("Serviceable demand successfully created.")
reset_layers(dl)
reset_layers(*args)
return output
def generate_binary_coverage(dl, fl, dl_demand_field, dl_id_field, fl_id_field, fl_variable_name=None):
"""
Generates a dictionary representing the binary coverage of a facility to demand points
:param dl: (Feature Layer) The demand polygon or point layer
:param fl: (Feature Layer) The facility service area polygon layer
:param dl_demand_field: (string) The name of the field in the demand layer that describes the demand
:param dl_id_field: (string) The name of the unique identifying field on the demand layer
:param fl_id_field: (string) The name of the unique identifying field on the facility layer
:param fl_variable_name: (string) The name to use to represent the facility variable
:return: (dictionary) A nested dictionary storing the coverage relationships
"""
# Check parameters so we get useful exceptions and messages
if arcpy.Describe(dl).shapeType not in ["Polygon", "Point"]:
raise TypeError("Demand layer must have polygon or point geometry")
if arcpy.Describe(fl).shapeType != "Polygon":
raise TypeError("Facility service area layer must have polygon geometry")
dl_field_names = [f.name for f in arcpy.Describe(dl).fields]
if dl_demand_field not in dl_field_names:
raise ValueError("'{}' field not found in demand layer".format(dl_demand_field))
if dl_id_field not in dl_field_names:
raise ValueError("'{}' field not found in demand layer".format(dl_id_field))
fl_field_names = [f.name for f in arcpy.Describe(fl).fields]
if fl_id_field not in fl_field_names:
raise ValueError("'{}' field not found in facility service area layer".format(fl_id_field))
reset_layers(dl, fl)
if fl_variable_name is None:
fl_variable_name = os.path.splitext(os.path.basename(arcpy.Describe(fl).name))[0]
logging.getLogger().info("Initializing facilities in output...")
output = {
"version": version.__version__,
"type": {
"mode": "coverage",
"type": "binary",
},
"demand": {},
"totalDemand": 0.0,
"totalServiceableDemand": 0.0,
"facilities": {fl_variable_name: []}
}
# List all of the facilities
with arcpy.da.SearchCursor(fl, [fl_id_field]) as cursor:
for row in cursor:
output["facilities"][fl_variable_name].append(str(row[0]))
# Build empty data structure
with arcpy.da.SearchCursor(dl, [dl_id_field, dl_demand_field, "SHAPE@AREA"]) as cursor:
for row in cursor:
output["demand"][str(row[0])] = {
"area": round(row[2]),
"demand": round(row[1]),
"serviceableDemand": 0,
"coverage": {fl_variable_name: {}}
}
logging.getLogger().info("Determining binary coverage for each demand unit...")
with arcpy.da.SearchCursor(fl, [fl_id_field, "SHAPE@"]) as fcursor:
if arcpy.Describe(dl).shapeType == "Point":
for f in fcursor:
with arcpy.da.SearchCursor(dl, [dl_id_field, "SHAPE@"]) as dcursor:
for d in dcursor:
if not f[1].disjoint(d[1]):
output["demand"][str(d[0])]["serviceableDemand"] = \
output["demand"][str(d[0])]["demand"]
output["demand"][str(d[0])]["coverage"][fl_variable_name][str(f[0])] = 1
else: # Polygon
for f in fcursor:
with arcpy.da.SearchCursor(dl, [dl_id_field, "SHAPE@"]) as dcursor:
for d in dcursor:
if not f[1].disjoint(d[1]):
if f[1].contains(d[1]):
output["demand"][str(d[0])]["serviceableDemand"] = \
output["demand"][str(d[0])]["demand"]
output["demand"][str(d[0])]["coverage"][fl_variable_name][str(f[0])] = 1
with arcpy.da.SearchCursor(dl, [dl_id_field, dl_demand_field]) as cursor:
for row in cursor:
output["totalServiceableDemand"] += output["demand"][str(row[0])]["serviceableDemand"]
output["totalDemand"] += row[1]
logging.getLogger().info("Binary coverage successfully generated.")
reset_layers(dl, fl)
return output
def generate_partial_coverage(dl, fl, dl_demand_field, dl_id_field="OBJECTID", fl_id_field="OBJECTID",
fl_variable_name=None):
"""
Generates a dictionary representing the partial coverage (based on area) of a facility to demand areas
:param dl: (Feature Layer) The demand polygon layer
:param fl: (Feature Layer) The facility service area polygon layer
:param dl_demand_field: (string) The name of the field in the demand layer that describes the demand
:param dl_id_field: (string) The name of the unique identifying field on the demand layer
:param fl_id_field: (string) The name of the unique identifying field on the facility layer
:param fl_variable_name: (string) The name to use to represent the facility variable
:return: (dictionary) A nested dictionary storing the coverage relationships
"""
# Reset DF
# Check parameters so we get useful exceptions and messages
if arcpy.Describe(dl).shapeType != "Polygon":
raise TypeError("Demand layer must have polygon geometry")
if arcpy.Describe(fl).shapeType != "Polygon":
raise TypeError("Facility service area layer must have polygon geometry")
dl_field_names = [f.name for f in arcpy.Describe(dl).fields]
if dl_demand_field not in dl_field_names:
raise ValueError("'{}' field not found in demand layer".format(dl_demand_field))
if dl_id_field not in dl_field_names:
raise ValueError("'{}' field not found in demand layer".format(dl_id_field))
fl_field_names = [f.name for f in arcpy.Describe(fl).fields]
if fl_id_field not in fl_field_names:
raise ValueError("'{}' field not found in facility service area layer".format(fl_id_field))
reset_layers(dl, fl)
if fl_variable_name is None:
fl_variable_name = os.path.splitext(os.path.basename(arcpy.Describe(fl).name))[0]
# Create the initial data structure
logging.getLogger().info("Initializing facilities in output...")
output = {
"version": version.__version__,
"type": {
"mode": "coverage",
"type": "partial",
},
"demand": {},
"totalDemand": 0.0,
"totalServiceableDemand": 0.0,
"facilities": {fl_variable_name: []}
}
# Populate the facility ids
with arcpy.da.SearchCursor(fl, [fl_id_field]) as cursor:
for row in cursor:
output["facilities"][fl_variable_name].append(str(row[0]))
# populate the coverage dictionary with all demand areas (i)
logging.getLogger().info("Initializing demand in output...")
with arcpy.da.SearchCursor(dl, [dl_id_field, dl_demand_field, "SHAPE@AREA"]) as cursor:
for row in cursor:
output["demand"][str(row[0])] = {
"area": round(row[2]),
"demand": round(row[1]),
"serviceableDemand": 0.0,
"coverage": {fl_variable_name: {}}
}
# Dissolve all facility service areas so we can find the total serviceable area
logging.getLogger().info("Combining facilities...")
dissovled_geom = None
with arcpy.da.SearchCursor(fl, ['SHAPE@']) as fcursor:
for f in fcursor:
if dissovled_geom is None:
dissovled_geom = f[0]
dissovled_geom = dissovled_geom.union(f[0])
logging.getLogger().info("Determining partial coverage for each demand unit...")
with arcpy.da.SearchCursor(dl, [dl_id_field, dl_demand_field, "SHAPE@"]) as dcursor:
for d in dcursor:
if not dissovled_geom.disjoint(d[2]):
intersected = dissovled_geom.intersect(d[2], 4)
if intersected.area > 0:
serviceable_demand = math.ceil(
float(intersected.area / d[2].area) * d[1])
else:
serviceable_demand = 0.0
else:
serviceable_demand = 0.0
# Make sure serviceable is less than or equal to demand, floating point issues
if serviceable_demand | |
<filename>ldap2sql.py
#!/usr/bin/python
import inspect
import os
import sys
import urllib
import urllib2
import hashlib
import logging
from sqlalchemy import create_engine
reload(sys)
sys.setdefaultencoding('UTF8')
cmd_folder = os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "contrib"))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from activedirectory import ActiveDirectory
jira_stats = """
select 'issues' as metric, count(*) as value from jiraissue
UNION
select 'projects', count(*) from project
UNION
select 'customfields', count(*) from customfield
UNION
select 'workflows', count(distinct name) from os_wfentry
UNION
select 'users', count(*) from cwd_user
UNION
SELECT 'users_active', count(*)
FROM cwd_user, cwd_user_attributes
WHERE cwd_user_attributes.user_id = cwd_user.id
AND cwd_user_attributes.attribute_name = 'login.previousLoginMillis'
UNION
select 'roles', count(*) as roles from projectrole
UNION
select 'dashboards', count(*) as dashboards from portalpage
UNION
select 'plugins', count(*) as plugins from pluginstate where pluginenabled = 'true'
UNION
select 'actions', count(*) as actions from jiraaction
UNION
select 'issuetypes', count(*) as issuetype from issuetype
UNION
select 'statuses', count(*) as issuestatus from issuestatus
UNION
select 'issuetypescreenschemes', count(*) from issuetypescreenscheme
UNION
select 'issuelinktypes', count(*) from issuelinktype
UNION
select 'fieldscreenschemes', count(*) from fieldscreenscheme
UNION
select 'fieldscreens', count(*) from fieldscreen
UNION
select 'fieldlayouts', count(*) from fieldlayout
UNION
select 'fieldlayoutschemes', count(*) from fieldlayoutscheme
UNION
select 'fieldconfigscheme', count(*) from fieldconfigscheme
UNION
select 'changegroup', count(*) from changegroup
UNION
select 'changeitem', count(*) from changeitem
UNION
select 'agileboards', count(*) from "AO_60DB71_RAPIDVIEW"
UNION
select 'attachments', count(*) as attachments from fileattachment
UNION
select 'attachments_gb', round(sum(filesize)/1024/1024/1024) as attachments_gb from fileattachment
order by metric
;
"""
class CustomUpdater(object):
"""The methods both update and insert elements in the table as folows:
UPDATE table SET some_column='something' WHERE another_column='something else';
INSER INTO table (some_column) 'something' WHERE NOT EXISTS (SELECT 1 FROM table WHERE another_column='something else')
"""
def __init__(self, stats_uri=None, activedirectory_uri=None):
if stats_uri is not None:
self.engine = create_engine(stats_uri, convert_unicode=True)
if activedirectory_uri is not None:
self.ad = ActiveDirectory(activedirectory_uri, paged_size=1000, size_limit=50000)
self.fields = ['mail', 'title', 'manager', 'distinguishedName', 'postalCode', 'telephoneNumber', 'givenName', 'name', 'facsimileTelephoneNumber',
'department', 'company', 'streetAddress', 'sAMAccountType', 'mobile', 'c', 'l', 'st', 'extensionAttribute14',
'extensionAttribute15', 'extensionAttribute3', 'sAMAccountName', 'userAccountControl']
self.sql_names = ['mail', 'title', 'managerdn', 'distinguishedname', 'postalcode', 'phone', 'givenname', 'name', 'fax',
'department', 'company', 'streetaddress', 'samaccounttype', 'mobile', 'country', 'locale', 'state', 'vp',
'region', 'office', 'username', 'useraccountcontrol']
self.sql_times = ['created', 'changed']
self.time_fields = ['whenCreated', 'whenChanged']
self.exists = None
self.elem_dict = {}
self.users = []
"""Updates all the fields in custom.stats"""
def update_stats(self):
try:
self.engine.execute('INSERT INTO custom.stats (date) (SELECT CURRENT_DATE);')
except Exception:
pass
for row in self.engine.execute(jira_stats):
self.elem_dict[str(row[0])] = row[1]
for key, value in self.elem_dict.iteritems():
update_query = 'UPDATE custom.stats SET %s=%s WHERE date=CURRENT_DATE;' % (key, value)
self.engine.execute(update_query)
"""Updates most of the fields in custom.activedirectory
The method gets all the attributes for each user whose account was modified since the day of the last update
and parses those attributes to meet the fields in the table"""
def update_activedirectory(self, full=False):
if full:
newf = None
else:
newf = "(whenChanged>=" + self.get_max_date_ad() + ")"
self.users = self.ad.get_users(new_filter=newf, attrlist=self.fields)
logging.info('Found %s users in AD using filter = %s' % (len(self.users), newf))
if not self.users:
raise NotImplemented("WTH")
for count, user in enumerate(self.users):
if count % 100 == 0:
logging.info("%s..." % count)
#print count, user
try:
atr = self.users[user]
except NotImplementedError as e:
logging.error("Skipped user %s because %s" % (user, e))
continue
update_query = 'UPDATE custom.activedirectory SET counter = counter+1 '
for i in range(len(self.fields)):
update_query = self.update_fields(update_query, atr, self.fields[i], self.sql_names[i])
update_query = self.update_times(update_query, atr)
if int(atr['userAccountControl']) & 0x02:
update_query += ', is_active=\'false\''
else:
update_query += ', is_active=\'true\''
update_query += ' WHERE username=\'' + user + '\';'
insert_query = 'INSERT INTO custom.activedirectory ('
first = True
for i in range(len(self.sql_names)):
try:
atr[self.fields[i]]
if not first:
insert_query += ','
insert_query += self.sql_names[i]
first = False
except (IndexError, KeyError):
pass
for i in range(len(self.sql_times)):
try:
atr[self.time_fields[i]]
insert_query += ', ' + self.sql_times[i]
except (IndexError, KeyError):
pass
# UPSERT implementation based on http://stackoverflow.com/a/6527838/99834
insert_query += ',is_active) SELECT '
insert_query = self.insert_fields(insert_query, atr)
insert_query = self.insert_times(insert_query, atr)
if int(atr['userAccountControl']) & 0x02:
insert_query += ',\'false\''
else:
insert_query += ',\'true\''
insert_query += ' WHERE NOT EXISTS (SELECT 1 FROM custom.activedirectory WHERE username= \''\
+ self.escape_quote(user) + '\');'
self.engine.execute(update_query)
self.engine.execute(insert_query)
# updating managers, LDAP returns DN instead of username for managers
# we look for all mana
"""Checks the deleted users from ldap by comparing the users from ldap with those from the database"""
def update_deleted(self):
sql_user = []
for row in self.engine.execute("SELECT samaccountname FROM custom.activedirectory WHERE is_deleted = 'false' ORDER BY samaccountname"):
if row[0]:
sql_user.append(row[0].encode('utf-8'))
self.users = self.ad.get_users()
for i in sql_user:
if not i in self.users:
logging.info("User %s was deleted from LDAP" % i)
self.engine.execute("UPDATE custom.activedirectory SET is_deleted = 'true' and deleted = now() where username = '%s'" % i)
"""Creates the url that should exist if the user has a gravatar picture conected with his email.
Then it checks if the url exists"""
def check_gravatar(self):
return # TODO: re-enable gravator check
self.users = self.ad.get_users()
for count, user in enumerate (self.users):
atr = self.ad.get_attributes(user = user)
try:
email = atr['mail']
default = 'http://www.gravatar.com/avatar/'
size = 40
gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'d':default, 's':str(size)})
try:
u = self.find_matches(gravatar_url)
if len(u) == 0:
has_avatar = 'true'
else:
has_avatar = 'false'
except (urllib2.HTTPError, urllib2.URLError):
has_avatar = 'false'
except (IndexError, KeyError, TypeError):
has_avatar = 'false'
self.engine.execute('UPDATE custom.activedirectory SET has_gravatar=\'%s\' WHERE username=\'%s\';' % (has_avatar, user))
def find_matches(self, newu):
urls = []
urls.append('http://www.gravatar.com/avatar/64908bc7260a8be06b142d34f83b9781?s=40&d=http%3A%2F%2Fwww.gravatar.com%2Favatar%2F')
urls.append(newu)
d = {}
url_contents = {}
matches = []
for url in urls:
c = urllib2.urlopen(url)
url_contents[url] = []
while 1:
r = c.read(4096)
if not r: break
md5 = hashlib.md5(r).hexdigest()
url_contents[url].append(md5)
if md5 in d:
url2 = d[md5]
matches.append((md5, url, url2))
else:
d[md5] = []
d[md5].append(url)
return matches
def update_all(self, full=False):
"""Updates all the fields in all the custom tables"""
logging.info("Updating changes from AD...")
self.update_activedirectory(full=full)
for row in self.engine.execute('SELECT CURRENT_DATE'):
current_date = str(row[0])
current_date = current_date[:10]
break
for row in self.engine.execute('SELECT MAX(gravatar_check_date) FROM custom.activedirectory;'):
check_date = str(row[0])
check_date = check_date[:10]
break
if check_date == current_date:
self.check_gravatar()
self.update_stats()
logging.info("Updating deleted accounts...")
self.update_deleted() # must be before managers!
logging.info("Updating managers...")
self.update_managers()
def update_managers(self):
"""
This will populate the manager field with the username of the manager, based on the managerdn (the field returned by ldap)
:return:
"""
for row in self.engine.execute("""select ad.username, ad.manager as oldmanager, ad2.username as newmanager
from custom.activedirectory ad
left join custom.activedirectory ad2 on ad.managerdn = ad2.distinguishedname and NOT ad2.is_deleted
where ad.managerdn is not NULL AND ad.manager != ad2.username
--and ad.manager != ad2.username
--limit 100;"""):
(username, oldmanager, newmanager) = row
self.engine.execute("UPDATE custom.activedirectory SET manager='%s' where username='%s'" % (newmanager, username))
def update_fields(self, update_query, atr, varname, sql_name):
"""Updates the update_query string with the fields that don't require special parsing"""
try:
atr[varname]
update_query += ', ' + sql_name + "='" + self.escape_quote(atr[varname]).encode('utf-8') + "'"
except (IndexError, KeyError):
pass
return update_query
def insert_fields(self, insert_query, atr):
"""Updates the insert_query string with the same fields as the ones above"""
first = True
for i in range(len(self.sql_names)):
try:
atr[self.fields[i]]
if not first:
insert_query += ','
insert_query += '\'' + self.escape_quote(atr[self.fields[i]]).encode('utf-8') + '\''
first = False
except (IndexError, KeyError):
pass
return insert_query
def update_times(self, update_query, atr):
"""Updates the update_query string with the fields that require special parsing (date variables)"""
for i in range(len(self.time_fields)):
try:
update_query += ', ' + self.sql_times[i] + '=\'' + self.convert_date(atr[self.time_fields[i]]).encode('utf-8') + '\''
except (IndexError, KeyError):
pass
return update_query
def insert_times(self, insert_query, atr):
"""Same as the above just for insert_query"""
for i in range(len(self.sql_times)):
try:
atr[self.time_fields[i]]
insert_query += ', \'' + self.convert_date(atr[self.time_fields[i]]).encode('utf-8') + '\''
except (IndexError, KeyError):
pass
return insert_query
def escape_quote(self, string):
"""Escapes the quotes in a string with double quote:
someone's string => someone''s string"""
new_str = string
count = 0
for i in range(len(string)):
if string[i] == '\'':
new_str = new_str[:count] + '\'' + string[i:]
count += 1
count += 1
return new_str
def get_max_date_ad(self):
"""Determines the last date at which the table was updated.
Finds the last date at which an account from the table was updated
and returns that date"""
for row in self.engine.execute("SELECT MAX(changed) FROM custom.activedirectory"):
date = row[0]
break
date = (str(date)).split('-')
if len(date) != 3 or len(date[0]) != 4 or len(date[1]) != 2 or len(date[2]) != 2:
logging.fatal("Couldn't get maximum date from custom.activedirectory")
sys.exit(1)
max_date = date[0] + date[1] + date[2] + "000000.0Z"
return max_date
def convert_date(self, string):
"""Converts date from the ldap timestamp to the sql timestamp
20010101121212.0Z => 2001-01-01 """
string = string[:8]
if | |
self._modules.keys()
@_copy_to_script_wrapper
def items(self) -> Iterable[Tuple[str, Module]]:
r"""Return an iterable of the ModuleDict key/value pairs.
"""
return self._modules.items()
@_copy_to_script_wrapper
def values(self) -> Iterable[Module]:
r"""Return an iterable of the ModuleDict values.
"""
return self._modules.values()
def update(self, modules: Mapping[str, Module]) -> None:
r"""Update the :class:`~torch.nn.ModuleDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,
or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError("ModuleDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(modules).__name__)
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for key, module in modules.items():
self[key] = module
else:
# modules here can be a list with two items
for j, m in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError("ModuleDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(m).__name__)
if not len(m) == 2:
raise ValueError("ModuleDict update sequence element "
"#" + str(j) + " has length " + str(len(m)) +
"; 2 is required")
# modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]
# that's too cumbersome to type correctly with overloads, so we add an ignore here
self[m[0]] = m[1] # type: ignore[assignment]
# remove forward alltogether to fallback on Module's _forward_unimplemented
class ParameterList(Module):
r"""Holds parameters in a list.
:class:`~torch.nn.ParameterList` can be used like a regular Python
list, but Tensors that are :class:`~torch.nn.Parameter` are properly registered,
and will be visible by all :class:`~torch.nn.Module` methods.
Note that the constructor, assigning an element of the list, the
:meth:`~torch.nn.ParameterDict.append` method and the :meth:`~torch.nn.ParameterDict.extend`
method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`.
Args:
parameters (iterable, optional): an iterable of elements to add to the list.
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
"""
def __init__(self, values: Optional[Iterable[Any]] = None) -> None:
super(ParameterList, self).__init__()
self._size = 0
if values is not None:
self += values
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
@overload
def __getitem__(self, idx: int) -> Any:
...
@overload
def __getitem__(self: T, idx: slice) -> T:
...
def __getitem__(self, idx):
if isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
out = self.__class__()
for i in range(start, stop, step):
out.append(self[i])
return out
else:
idx = self._get_abs_string_index(idx)
return getattr(self, str(idx))
def __setitem__(self, idx: int, param: Any) -> None:
# Note that all other function that add an entry to the list part of
# the ParameterList end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the list part and thus won't
# call into this function.
idx = self._get_abs_string_index(idx)
if isinstance(param, torch.Tensor) and not isinstance(param, Parameter):
param = Parameter(param)
return setattr(self, str(idx), param)
def __len__(self) -> int:
return self._size
def __iter__(self) -> Iterator[Any]:
return iter(self[i] for i in range(len(self)))
def __iadd__(self, parameters: Iterable[Any]) -> 'ParameterList':
return self.extend(parameters)
def __dir__(self):
keys = super(ParameterList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, value: Any) -> 'ParameterList':
"""Appends a given value at the end of the list.
Args:
value (Any): value to append
"""
new_idx = len(self)
self._size += 1
self[new_idx] = value
return self
def extend(self, values: Iterable[Any]) -> 'ParameterList':
"""Appends values from a Python iterable to the end of the list.
Args:
values (iterable): iterable of values to append
"""
# Tensor is an iterable but we never want to unpack it here
if not isinstance(values, container_abcs.Iterable) or isinstance(values, torch.Tensor):
raise TypeError("ParameterList.extend should be called with an "
"iterable, but got " + type(values).__name__)
for value in values:
self.append(value)
return self
def extra_repr(self) -> str:
child_lines = []
for k, p in enumerate(self):
if isinstance(p, torch.Tensor):
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = '{} containing: [{} of size {}{}]'.format(
"Parameter" if isinstance(p, Parameter) else "Tensor",
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
else:
child_lines.append(' (' + str(k) + '): Object of type: ' + type(p).__name__)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, *args, **kwargs):
raise RuntimeError('ParameterList should not be called.')
class ParameterDict(Module):
r"""Holds parameters in a dictionary.
ParameterDict can be indexed like a regular Python dictionary, but Parameters it
contains are properly registered, and will be visible by all Module methods.
Other objects are treated as would be done by a regular Python dictionary
:class:`~torch.nn.ParameterDict` is an **ordered** dictionary.
:meth:`~torch.nn.ParameterDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict`
will preserve their ordering.
Note that the constructor, assigning an element of the dictionary and the
:meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into
:class:`~torch.nn.Parameter`.
Args:
values (iterable, optional): a mapping (dictionary) of
(string : Any) or an iterable of key-value pairs
of type (string, Any)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterDict({
'left': nn.Parameter(torch.randn(5, 10)),
'right': nn.Parameter(torch.randn(5, 10))
})
def forward(self, x, choice):
x = self.params[choice].mm(x)
return x
"""
def __init__(self, parameters: Any = None) -> None:
super(ParameterDict, self).__init__()
self._keys: Dict[str, None] = {}
if parameters is not None:
self.update(parameters)
def _key_to_attr(self, key: str) -> str:
if not isinstance(key, str):
raise TypeError("Index given to ParameterDict cannot be used as a key as it is "
f"not a string (type is '{type(key).__name__}'). Open an issue on "
"github if you need non-string keys.")
else:
# Use the key as-is so that `.named_parameters()` returns the right thing
return key
def __getitem__(self, key: str) -> Any:
attr = self._key_to_attr(key)
return getattr(self, attr)
def __setitem__(self, key: str, value: Any) -> None:
# Note that all other function that add an entry to the dictionary part of
# the ParameterDict end up here. So this is the only place where we need
# to wrap things into Parameter if needed.
# Objects added via setattr() are not in the dictionary part and thus won't
# call into this function.
self._keys[key] = None
attr = self._key_to_attr(key)
if isinstance(value, torch.Tensor) and not isinstance(value, Parameter):
value = Parameter(value)
setattr(self, attr, value)
def __delitem__(self, key: str) -> None:
del self._keys[key]
attr = self._key_to_attr(key)
delattr(self, attr)
def __len__(self) -> int:
return len(self._keys)
def __iter__(self) -> Iterator[str]:
return iter(self._keys)
def __reversed__(self) -> Iterator[str]:
return reversed(list(self._keys))
def copy(self) -> 'ParameterDict':
"""Returns a copy of this :class:`~torch.nn.ParameterDict` instance.
"""
# We have to use an OrderedDict because the ParameterDict constructor
# behaves differently on plain dict vs OrderedDict
return ParameterDict(OrderedDict((k, self[k]) for k in self._keys))
def __contains__(self, key: str) -> bool:
return key in self._keys
def setdefault(self, key: str, default: Optional[Any] = None) -> Any:
"""If key is in the ParameterDict, return its value.
If not, insert `key` with a parameter `default` and return `default`.
`default` defaults to `None`.
Args:
key (string): key to set default for
default (Any): the parameter set to the key
"""
if key not in self:
self[key] = default
return self[key]
def clear(self) -> None:
"""Remove all items from the ParameterDict.
"""
for k in self._keys.copy():
del self[k]
def pop(self, key: str) -> Any:
r"""Remove key from the ParameterDict and return its parameter.
Args:
key (string): key to pop from the ParameterDict
"""
v = self[key]
| |
categoryNode (nodes[proc][chan][category + branchpostfix]
Hstop = len(defineNodes[processName][decayChannel])
#Guard against histogram names already included (via keys in histNodes) as well as variables that aren't present in branches
# print("==============================> {} {} start: {} stop: {}".format(processName, decayChannel, Hstart, Hstop))
for dnode in defineNodes[processName][decayChannel][Hstart:Hstop]:
defHName = dnode[0][0]
#Need to determine which kind of histo function to use... have to be careful, this guess will be wrong if anyone ever does an unweighted histo!
if defHName in histoNodes[processName][decayChannel]:
raise RuntimeError("This histogram name already exists in memory or is intentionally being overwritten:"\
"processName - {}\t decayChannel - {}\t defHName - {}".format(processName, decayChannel, defHName))
else:
for i in xrange(1, len(dnode)):
if dnode[i] not in listOfColumns:
raise RuntimeError("This histogram's variable/weight is not defined:"\
"processName - {}\t decayChannel - {}\t variable/weight - {}".format(processName, decayChannel, dnode[i]))
guessDim = 0
if len(dnode) == 3:
guessDim = 1
histoNodes[processName][decayChannel][defHName] = categoryNode.Histo1D(dnode[0], dnode[1], dnode[2])
elif len(dnode) == 4:
guessDim = 2
histoNodes[processName][decayChannel][defHName] = categoryNode.Histo2D(dnode[0], dnode[1], dnode[2], dnode[3])
elif len(dnode) == 4:
guessDim = 3
histoNodes[processName][decayChannel][defHName] = categoryNode.Histo3D(dnode[0], dnode[1], dnode[2], dnode[3], dnode[4])
packedNodes = {}
packedNodes["filterNodes"] = filterNodes
packedNodes["defineNodes"] = defineNodes
packedNodes["countNodes"] = countNodes
packedNodes["diagnosticNodes"] = diagnosticNodes
packedNodes["nodes"] = nodes
return packedNodes
def jetMatchingEfficiency(input_df, max_eta = 2.5, min_pt = 30.0, wgtVar="wgt_SUMW_PU_L1PF", stats_dict=None,
isData=True):
if isData == True:
pass
else:
theCats = collections.OrderedDict()
#Subtract 2 for the GenJets which are actually leptons
theCats["nGenJet2"] = "jetmatch_nGenJet == 4"
theCats["nGenJet3"] = "jetmatch_nGenJet == 5"
theCats["nGenJet4"] = "jetmatch_nGenJet == 6"
theCats["nGenJet5"] = "jetmatch_nGenJet == 7"
theCats["nGenJet6"] = "jetmatch_nGenJet == 8"
theCats["nGenJet7"] = "jetmatch_nGenJet == 9"
theCats["nGenJet8"] = "jetmatch_nGenJet == 10"
theCats["nGenJet9"] = "jetmatch_nGenJet == 11"
theCats["nGenJet10+"] = "jetmatch_nGenJet >= 12"
#define genjets as needed for this study
input_df_defined = input_df.Define("jetmatch_nGenJet", "GenJet_pt[GenJet_pt >= {} && abs(GenJet_eta) <= {}].size()".format(min_pt, max_eta))
cat_df = collections.OrderedDict()
for ck, cs in theCats.items():
cat_df[ck] = input_df_defined.Filter(cs, "Jet Matching Efficiency " + cs)
stats_dict[ck] = {}
stats_dict[ck]["nJet"] = cat_df[ck].Stats("nGJet", wgtVar)
stats_dict[ck]["nJet_genMatched"] = cat_df[ck].Stats("nGJet_genMatched", wgtVar)
stats_dict[ck]["nJet_puIdLoose"] = cat_df[ck].Stats("nGJet_puIdLoose", wgtVar)
stats_dict[ck]["nJet_genMatched_puIdLoose"] = cat_df[ck].Stats("nGJet_genMatched_puIdLoose", wgtVar)
def fillHLTMeans(input_df, wgtVar="wgt_SUMW_PU_L1PF", stats_dict=None):
theCats = collections.OrderedDict()
theCats["Inclusive"] = "nGJet >= 4"
theCats["nJet4to5"] = "nGJet == 4 || nGJet == 5"
theCats["nJet6+"] = "nGJet >= 6"
branches = [branch for branch in input_df.GetColumnNames() if "HLT_" in branch and "Ele" not in branch
and "Mu" not in branch and "Tau" not in branch]
#and ("PF" in branch or "HT" in branch or "MET" in branch)]
#print(branches)
input_df_defined = input_df
branches_weighted = []
for branch in branches:
branches_weighted.append("{}_weighted".format(branch))
input_df_defined = input_df_defined.Define("{}_weighted".format(branch),
"{} == true ? {} : 0".format(branch, wgtVar))
cat_df = collections.OrderedDict()
for ck, cs in theCats.items():
cat_df[ck] = input_df_defined.Filter(cs, "HLT Report " + cs)
if stats_dict != None:
if "unweighted" not in stats_dict:
stats_dict["unweighted"] = {}
if "weighted" not in stats_dict:
stats_dict["weighted"] = {}
if "weightedStats" not in stats_dict:
stats_dict["weightedStats"] = {}
if "weightedStatsSMT" not in stats_dict:
stats_dict["weightedStatsSMT"] = {}
if "counts" not in stats_dict:
stats_dict["counts"] = {}
for tc, cut in theCats.items():
if tc not in stats_dict["unweighted"]:
stats_dict["unweighted"][tc] = {}
if tc not in stats_dict["weighted"]:
stats_dict["weighted"][tc] = {}
if tc not in stats_dict["weightedStats"]:
stats_dict["weightedStats"][tc] = {}
if tc not in stats_dict["weightedStatsSMT"]:
stats_dict["weightedStatsSMT"][tc] = {}
if tc not in stats_dict["counts"]:
stats_dict["counts"][tc] = cat_df[tc].Count()
for branch in branches:
stats_dict["unweighted"][tc]["{}".format(branch)] = cat_df[tc].Sum("{}".format(branch)) #instead of mean
stats_dict["weightedStatsSMT"][tc]["{}".format(branch)] = cat_df[tc].Stats("{}".format(branch), wgtVar)
for branch in branches_weighted:
stats_dict["weighted"][tc]["{}".format(branch)] = cat_df[tc].Sum("{}".format(branch))
stats_dict["weightedStats"][tc]["{}".format(branch)] = cat_df[tc].Stats("{}".format(branch))
def writeHistosForCombine(histDict, directory, levelsOfInterest, dict_key="Mountains", mode="RECREATE"):
rootDict = {}
if not os.path.isdir(directory):
os.makedirs(directory)
for name, levelsDict in histDict.items():
for level, objDict in levelsDict.items():
if level not in levelsOfInterest: continue
if not os.path.isdir(directory + "/" + level):
os.makedirs(directory + "/" + level)
for preObjName, objVal in objDict[dict_key].items():
for hname, hist in objVal.items():
dictKey = preObjName + "_" + hname
if dictKey not in rootDict:
rootDict[dictKey] = ROOT.TFile.Open("{}.root".format(directory + "/" + level + "/"+ dictKey), mode)
rootDict[dictKey].cd()
hptr = hist.GetPtr()
oldname = hptr.GetName()
hptr.SetName("{}".format(name))
hptr.Write()
hptr.SetName("{}".format(oldname)) #Avoid overwriting things by switching back, save from segfault
#hptr.SetDirectory(0)
for f in rootDict.values():
f.Close()
def histoCombine(directory, outDirectory="{}/Combine", globKey="*.root", stripKey=".root", internalSeperator="*",
systematicSeperator="__", mode="RECREATE"):
"""take list of files in <directory>, with optional <globKey>, and create individual root files containing
each sample (nominal and systematic variation) for each histogram category. Keys can be parsed with
<internalSeperator> (default '*') and <systematicSeperator> (default '__') such that file 'ttWH.root' with
'Mountains*nJet4*DeepCSV_jet1__JESup' will generate a file 'nJet4_DeepCSV_jet1.root' containing the systematic
variation histogram 'ttWH_JESup'"""
if "{}/" in outDirectory:
outDirectory = outDirectory.format(directory)
if not os.path.isdir(outDirectory):
print("Checking for (and if necessary, creating) directory {}".format(outDirectory))
os.makedirs(outDirectory)
#Get the files
if 'glob' not in dir():
try:
import glob
except Exception as e:
raise RuntimeError("Could not import the glob module in method histoCombine")
files = glob.glob("{}/{}".format(directory, globKey))
#deduce names from the filenames, with optional stripKey parameter that defaults to .root
names = [fname.split("/")[-1].replace(stripKey, "") for fname in files]
fileDict = {}
keysDict = {}
nominalDict = {}
keySet = set([])
for name, fname in zip(names, files):
#inFiles
fileDict[name] = ROOT.TFile.Open(fname)
#hist names
keysDict[name] = [hist.GetName() for hist in fileDict[name].GetListOfKeys()]
#for creation of outFiles (group nominal + systematic variations!)
nominalDict[name] = [hist.split("{}".format(systematicSeperator))[0] for hist in keysDict[name]]
keySet = keySet.union(set(nominalDict[name]))
#start parsing to generate outFile names
for outname_raw in keySet:
splt = outname_raw.split("{}".format(internalSeperator))
n = len(splt)
if n == 1:
var = splt[0]
elif n == 2:
cat, var = splt
elif n == 3:
super_cat, cat, var = splt
#ignore super_cat names for now, create the name with the outDirectory and guard against doubled '/' character
outname = "{}/{}_{}.root".format(outDirectory, cat, var).replace("//", "/")
oFile = ROOT.TFile.Open(outname, mode)
for name, hNameList_raw in keysDict.items():
hNameList = [hName for hName in hNameList_raw if outname_raw == hName.split("{}".format(systematicSeperator))[0]]
for hName in hNameList:
hist = fileDict[name].Get(hName)
original_name = hist.GetName()
#format the new name by replacing the non-systematic portion with the sample's name
new_name = hName.replace(outname_raw, name)
hist.SetName(new_name)
hist.Write()
hist.SetName(original_name)
oFile.Close()
def writeHistosV1(histDict, directory, levelsOfInterest="All", samplesOfInterest="All", dict_keys="All", mode="RECREATE"):
rootDict = {}
if not os.path.isdir(directory):
os.makedirs(directory)
for name, levelsDict in histDict.items():
if samplesOfInterest == "All": pass
elif name not in samplesOfInterest: continue
for level, objDict in levelsDict.items():
if levelsOfInterest == "All": pass
elif level not in levelsOfInterest: continue
if not os.path.isdir(directory + "/" + level):
os.makedirs(directory + "/" + level)
rootDict[name] = ROOT.TFile.Open("{}.root".format(directory + "/" + level + "/"+ name), mode)
for dict_key in objDict.keys():
if dict_keys == "All": pass
elif dict_key not in dict_keys: continue
for preObjName, objVal in objDict[dict_key].items():
if type(objVal) == dict:
for hname, hist in objVal.items():
#help(hist)
#dictKey = preObjName + "$" + hname
#if dictKey not in rootDict:
#rootDict[dictKey].cd()
hptr = hist.GetPtr()
oldname = hptr.GetName()
#hptr.SetName("{}".format(dict_key + "*" + preObjName + "*" + hname))
hptr.Write()
#hptr.SetName("{}".format(oldname)) #Avoid overwriting things by switching back, save from segfault
elif "ROOT.TH" in str(type(objVal)):
hptr = objVal.GetPtr()
oldname = hptr.GetName()
#hptr.SetName("{}".format(dict_key + "*" + preObjName))
hptr.Write()
#hptr.SetName("{}".format(oldname)) #Avoid overwriting things by switching back, save from segfault
print("Wrote histogram file for {} - {}".format(name, directory + "/" + level + "/"+ name))
for f in rootDict.values():
f.Close()
def writeHistos(histDict, directory, samplesOfInterest="All", channelsOfInterest="All", dict_keys="All", mode="RECREATE"):
rootDict = {}
if not os.path.isdir(directory):
os.makedirs(directory)
for name, channelsDict in histDict.items():
if samplesOfInterest == "All": pass
elif name not in samplesOfInterest: continue
for channel, objDict in channelsDict.items():
counter = 0
if channelsOfInterest == "All": pass
elif channel not in channelsOfInterest: continue
elif len(objDict.values()) < 1:
print("No objects to write, not creating directory or writing root file for {} {}".format(name, channel))
continue
if not os.path.isdir(directory + "/" + channel):
os.makedirs(directory + "/" + channel)
rootDict[name] = ROOT.TFile.Open("{}.root".format(directory + "/" + channel + "/"+ name), mode)
# for dict_key in objDict.keys():
# if dict_keys == "All": pass
# elif dict_key not in dict_keys: continue
for objname, obj in objDict.items():
if type(obj) == dict:
for hname, hist in obj.items():
if "ROOT.RDF.RResultPtr" in str(type(obj)):
hptr = hist.GetPtr()
else:
hptr = hist
hptr.Write()
counter += 1
elif "ROOT.RDF.RResultPtr" in str(type(obj)):
hptr = obj.GetPtr()
else:
hptr = obj
hptr.Write()
counter += 1
print("Wrote {} histograms into | |
is None else len(roots),
autonomous_exprs=_is_autonomous(self.indep, self.exprs),
**kwargs)
if self.sparse:
self.nnz = len(self._rowvals)
self.linear_invariants = linear_invariants
self.nonlinear_invariants = nonlinear_invariants
self.linear_invariant_names = linear_invariant_names or None
if self.linear_invariant_names is not None:
if len(self.linear_invariant_names) != self.linear_invariants.shape[0]:
raise ValueError("Incorrect length of linear_invariant_names: %d (expected %d)" % (
len(self.linear_invariant_names), linear_invariants.shape[0]))
self.nonlinear_invariant_names = nonlinear_invariant_names or None
if self.nonlinear_invariant_names is not None:
if len(self.nonlinear_invariant_names) != len(nonlinear_invariants):
raise ValueError("Incorrect length of nonlinear_invariant_names: %d (expected %d)" % (
len(self.nonlinear_invariant_names), len(nonlinear_invariants)))
if self.autonomous_interface is None:
self.autonomous_interface = self.autonomous_exprs
def _Symbol(self, name, be=None):
be = be or self.be
try:
return be.Symbol(name, real=True)
except TypeError:
return be.Symbol(name)
def _mk_init_indep(self, name, be=None, prefix='i_', suffix=''):
name = name or 'indep'
be = be or self.be
name = prefix + str(name) + suffix
if getattr(self, 'indep', None) is not None:
if self.indep.name == name:
raise ValueError("Name ambiguity in independent variable name")
return self._Symbol(name, be)
def _mk_init_dep(self, names=None, be=None, ny=None, prefix='i_', suffix=''):
be = be or self.be
ny = ny or self.ny
names = names or getattr(self, 'names', [str(i) for i in range(ny)])
if getattr(self, 'dep', None) is not None:
for dep in self.dep:
if dep.name.startswith(prefix):
raise ValueError("Name ambiguity in dependent variable names")
use_names = names is not None and len(names) > 0
return tuple(self._Symbol(prefix + names[idx] if use_names else str(idx) + suffix, be)
for idx in range(ny))
def all_invariants(self, linear_invariants=None, nonlinear_invariants=None, dep=None, backend=None):
linear_invariants = linear_invariants or getattr(self, 'linear_invariants', None)
return (([] if linear_invariants is None else (linear_invariants * (backend or self.be).Matrix(
len(dep or self.dep), 1, dep or self.dep
)).T.tolist()[0]) + (nonlinear_invariants or getattr(self, 'nonlinear_invariants', []) or []))
def all_invariant_names(self):
return (self.linear_invariant_names or []) + (self.nonlinear_invariant_names or [])
def __getitem__(self, key):
return self.dep[self.names.index(key)]
@staticmethod
def _to_array(cont, by_name, names, keys):
if isinstance(cont, dict) and (not by_name or names is None or len(names) == 0):
cont = [cont[k] for k in keys]
return cont
def to_arrays(self, x, y, p, **kwargs):
y = self._to_array(y, self.dep_by_name, self.names, self.dep)
p = self._to_array(p, self.par_by_name, self.param_names, self.params)
return super(SymbolicSys, self).to_arrays(x, y, p, **kwargs)
@staticmethod
def _kwargs_roots_from_roots_cb(roots_cb, kwargs, x, _y, _p, be):
if roots_cb is not None:
if 'roots' in kwargs:
raise ValueError("Keyword argument ``roots`` already given.")
try:
roots = roots_cb(x, _y, _p, be)
except TypeError:
roots = _ensure_4args(roots_cb)(x, _y, _p, be)
kwargs['roots'] = roots
@classmethod
def from_callback(cls, rhs, ny=None, nparams=None, first_step_factory=None,
roots_cb=None, indep_name=None, **kwargs):
""" Create an instance from a callback.
Parameters
----------
rhs : callbable
Signature ``rhs(x, y[:], p[:], backend=math) -> f[:]``.
ny : int
Length of ``y`` in ``rhs``.
nparams : int
Length of ``p`` in ``rhs``.
first_step_factory : callabble
Signature ``step1st(x, y[:], p[:]) -> dx0``.
roots_cb : callable
Callback with signature ``roots(x, y[:], p[:], backend=math) -> r[:]``.
indep_name : str
Default 'x' if not already in ``names``, otherwise indep0, or indep1, or ...
dep_by_name : bool
Make ``y`` passed to ``rhs`` a dict (keys from :attr:`names`) and convert
its return value from dict to array.
par_by_name : bool
Make ``p`` passed to ``rhs`` a dict (keys from :attr:`param_names`).
\\*\\*kwargs :
Keyword arguments passed onto :class:`SymbolicSys`.
Examples
--------
>>> def decay(x, y, p, backend=None):
... rate = y['Po-210']*p[0]
... return {'Po-210': -rate, 'Pb-206': rate}
...
>>> odesys = SymbolicSys.from_callback(decay, dep_by_name=True, names=('Po-210', 'Pb-206'), nparams=1)
>>> xout, yout, info = odesys.integrate([0, 138.4*24*3600], {'Po-210': 1.0, 'Pb-206': 0.0}, [5.798e-8])
>>> import numpy as np; np.allclose(yout[-1, :], [0.5, 0.5], rtol=1e-3, atol=1e-3)
True
Returns
-------
An instance of :class:`SymbolicSys`.
"""
ny, nparams = _get_ny_nparams_from_kw(ny, nparams, kwargs)
be = Backend(kwargs.pop('backend', None))
names = tuple(kwargs.pop('names', ''))
indep_name = indep_name or _get_indep_name(names)
try:
x = be.Symbol(indep_name, real=True)
except TypeError:
x = be.Symbol(indep_name)
y = be.real_symarray('y', ny)
p = be.real_symarray('p', nparams)
_y = dict(zip(names, y)) if kwargs.get('dep_by_name', False) else y
_p = dict(zip(kwargs['param_names'], p)) if kwargs.get('par_by_name', False) else p
try:
exprs = rhs(x, _y, _p, be)
except TypeError:
exprs = _ensure_4args(rhs)(x, _y, _p, be)
try:
if len(exprs) != ny:
raise ValueError("Callback returned unexpected (%d) number of expressions: %d" % (ny, len(exprs)))
except TypeError:
raise ValueError("Callback did not return an array_like of expressions: %s" % str(exprs))
cls._kwargs_roots_from_roots_cb(roots_cb, kwargs, x, _y, _p, be)
if first_step_factory is not None:
if 'first_step_exprs' in kwargs:
raise ValueError("Cannot override first_step_exprs.")
try:
kwargs['first_step_expr'] = first_step_factory(x, _y, _p, be)
except TypeError:
kwargs['first_step_expr'] = _ensure_4args(first_step_factory)(x, _y, _p, be)
if kwargs.get('dep_by_name', False):
exprs = [exprs[k] for k in names]
return cls(zip(y, exprs), x, kwargs.pop('params', None) if len(p) == 0 else p,
backend=be, names=names, **kwargs)
@classmethod
def from_other(cls, ori, **kwargs):
""" Creates a new instance with an existing one as a template.
Parameters
----------
ori : SymbolicSys instance
\\*\\*kwargs:
Keyword arguments used to create the new instance.
Returns
-------
A new instance of the class.
"""
for k in cls._attrs_to_copy + ('params', 'roots', 'init_indep', 'init_dep'):
if k not in kwargs:
val = getattr(ori, k)
if val is not None:
kwargs[k] = val
if 'lower_bounds' not in kwargs and getattr(ori, 'lower_bounds') is not None:
kwargs['lower_bounds'] = ori.lower_bounds
if 'upper_bounds' not in kwargs and getattr(ori, 'upper_bounds') is not None:
kwargs['upper_bounds'] = ori.upper_bounds
if len(ori.pre_processors) > 0:
if 'pre_processors' not in kwargs:
kwargs['pre_processors'] = []
kwargs['pre_processors'] = kwargs['pre_processors'] + ori.pre_processors
if len(ori.post_processors) > 0:
if 'post_processors' not in kwargs:
kwargs['post_processors'] = []
kwargs['post_processors'] = ori.post_processors + kwargs['post_processors']
if 'dep_exprs' not in kwargs:
kwargs['dep_exprs'] = zip(ori.dep, ori.exprs)
if 'indep' not in kwargs:
kwargs['indep'] = ori.indep
instance = cls(**kwargs)
for attr in ori._attrs_to_copy:
if attr not in cls._attrs_to_copy:
setattr(instance, attr, getattr(ori, attr))
return instance
@classmethod
def from_other_new_params(cls, ori, par_subs, new_pars, new_par_names=None,
new_latex_par_names=None, **kwargs):
""" Creates a new instance with an existing one as a template (with new parameters)
Calls ``.from_other`` but first it replaces some parameters according to ``par_subs``
and (optionally) introduces new parameters given in ``new_pars``.
Parameters
----------
ori : SymbolicSys instance
par_subs : dict
Dictionary with substitutions (mapping symbols to new expressions) for parameters.
Parameters appearing in this instance will be omitted in the new instance.
new_pars : iterable (optional)
Iterable of symbols for new parameters.
new_par_names : iterable of str
Names of the new parameters given in ``new_pars``.
new_latex_par_names : iterable of str
TeX formatted names of the new parameters given in ``new_pars``.
\\*\\*kwargs:
Keyword arguments passed to ``.from_other``.
Returns
-------
Intance of the class
extra : dict with keys:
- recalc_params : ``f(t, y, p1) -> p0``
"""
new_exprs = [expr.subs(par_subs) for expr in ori.exprs]
drop_idxs = [ori.params.index(par) for par in par_subs]
params = _skip(drop_idxs, ori.params, False) + list(new_pars)
back_substitute = _Callback(ori.indep, ori.dep, params, list(par_subs.values()),
Lambdify=ori.be.Lambdify)
def recalc_params(t, y, p):
rev = back_substitute(t, y, p)
return _reinsert(drop_idxs, np.repeat(np.atleast_2d(p), rev.shape[0], axis=0),
rev)[..., :len(ori.params)]
return cls.from_other(
ori, dep_exprs=zip(ori.dep, new_exprs),
params=params,
param_names=_skip(drop_idxs, ori.param_names, False) + list(new_par_names or []),
latex_param_names=_skip(drop_idxs, ori.latex_param_names, False) + list(new_latex_par_names or []),
**kwargs
), {'recalc_params': recalc_params}
@classmethod
def from_other_new_params_by_name(cls, ori, par_subs, new_par_names=(), **kwargs):
""" Creates a new instance with an existing one as a template (with new parameters)
Calls ``.from_other_new_params`` but first it creates the new instances from user provided
callbacks generating the expressions the parameter substitutions.
Parameters
----------
ori : SymbolicSys instance
par_subs : dict mapping str to ``f(t, y{}, p{}) -> expr``
User provided callbacks for parameter names in ``ori``.
new_par_names : iterable of str
\\*\\*kwargs:
Keyword arguments passed to ``.from_other_new_params``.
"""
if not ori.dep_by_name:
warnings.warn('dep_by_name is not True')
if not ori.par_by_name:
warnings.warn('par_by_name is not True')
dep = dict(zip(ori.names, ori.dep))
new_pars = ori.be.real_symarray(
'p', len(ori.params) + len(new_par_names))[len(ori.params):]
par = dict(chain(zip(ori.param_names, ori.params), zip(new_par_names, new_pars)))
par_symb_subs = OrderedDict([(ori.params[ori.param_names.index(pk)], cb(
ori.indep, dep, par, backend=ori.be)) for pk, cb in par_subs.items()])
return cls.from_other_new_params(
ori, par_symb_subs, new_pars, new_par_names=new_par_names, **kwargs)
@property
def ny(self):
""" Number of dependent variables in the system. """
return len(self.exprs)
def as_autonomous(self, new_indep_name=None, new_latex_indep_name=None):
if self.autonomous_exprs:
return self
old_indep_name = self.indep_name or _get_indep_name(self.names)
new_names = () if not self.names else (self.names + (old_indep_name,))
new_indep_name = new_indep_name or _get_indep_name(new_names)
new_latex_indep_name = new_latex_indep_name or new_indep_name
new_latex_names = () if not self.latex_names else (
self.latex_names + (new_latex_indep_name,))
new_indep = self.be.Symbol(new_indep_name)
new_dep = self.dep + (self.indep,)
new_exprs = self.exprs + (self.indep**0,)
new_kw = dict(
names=new_names,
indep_name=new_indep_name,
latex_names=new_latex_names,
latex_indep_name=new_latex_indep_name,
| |
Standard LanguageStandard
Cpp03
Cpp11
Auto
+ TabWidth unsigned
+ UseTab UseTabStyle
Never
ForIndentation
Always
# Clang 3.5
+ Language LanguageKind
None
Cpp
JavaScript
# Clang 3.5
- SpaceAfterControlStatementKeyword bool
+ SpaceBeforeParens SpaceBeforeParensOptions
Never
ControlStatements
Always
# Clang 3.5
+ BasedOnStyle string
LLVM
Google
Chromium
Mozilla
WebKit
GNU
+ IndentBlocks bool
# Clang 3.5
- IndentBlocks bool
# Clang 3.5
+ BreakBeforeBraces BraceBreakingStyle
Attach
Linux
Stroustrup
Allman
GNU
# Clang 3.5
+ CommentPragmas std::string
# Clang 3.5
+ SpacesInContainerLiterals bool
# Clang 3.5
+ Language LanguageKind
None
Cpp
JavaScript
Proto
# Clang 3.5
+ ObjCSpaceAfterProperty bool
# Clang 3.5
+ KeepEmptyLinesAtTheStartOfBlocks bool
# Clang 3.5
+ ForEachMacros std::vector<std::string>
# Clang 3.5
+ AllowShortFunctionsOnASingleLine ShortFunctionStyle
None
Inline
All
# Clang 3.5
+ AllowShortBlocksOnASingleLine bool
# Clang 3.5
+ DisableFormat bool
# Clang 3.5
- DerivePointerBinding bool
- PointerBindsToType bool
+ DerivePointerAlignment bool
+ PointerAlignment PointerAlignmentStyle
Left
Right
Middle
# Clang 3.5
- IndentFunctionDeclarationAfterType bool
# Clang 3.5
+ IndentWrappedFunctionNames bool
# Clang 3.6
+ AlwaysBreakAfterDefinitionReturnType bool
# Clang 3.6
+ SpacesInSquareBrackets bool
# Clang 3.6
+ SpaceAfterCStyleCast bool
# Clang 3.6
+ AllowShortCaseLabelsOnASingleLine bool
# Clang 3.6
+ BreakBeforeBinaryOperators BinaryOperatorStyle
None
NonAssignment
All
# Clang 3.6
+ Language LanguageKind
None
Cpp
Java
JavaScript
Proto
# Clang 3.6
+ BinPackArguments bool
# Clang 3.6
+ ObjCBlockIndentWidth unsigned
# Clang 3.6
+ AlignAfterOpenBracket bool
# Clang 3.6
+ AllowShortFunctionsOnASingleLine ShortFunctionStyle
None
Inline
Empty
All
# Clang 3.6
+ AlignOperands bool
# Clang 3.7
+ AlignConsecutiveAssignments bool
# Clang 3.7
+ AllowShortFunctionsOnASingleLine ShortFunctionStyle
None
Empty
Inline
All
# Clang 3.7
+ AlwaysBreakAfterDefinitionReturnType DefinitionReturnTypeBreakingStyle
None
All
TopLevel
# Clang 3.7
+ MacroBlockBegin std::string
+ MacroBlockEnd std::string
# Clang 3.7
+ BreakBeforeBraces BraceBreakingStyle
Attach
Linux
Mozilla
Stroustrup
Allman
GNU
# Clang 3.8
+ BreakBeforeBraces BraceBreakingStyle
Attach
Linux
Mozilla
Stroustrup
Allman
GNU
WebKit
# Clang 3.8
+ IncludeCategories std::vector<std::pair<std::string, unsigned>>
# Clang 3.8
+ BraceWrapping BraceWrappingFlags
bool AfterClass
bool AfterControlStatement
bool AfterEnum
bool AfterFunction
bool AfterNamespace
bool AfterObjCDeclaration
bool AfterStruct
bool AfterUnion
bool BeforeCatch
bool BeforeElse
bool IndentBraces
+ BreakBeforeBraces BraceBreakingStyle
Attach
Linux
Mozilla
Stroustrup
Allman
GNU
WebKit
Custom
# Clang 3.8
+ AlignConsecutiveDeclarations bool
# Clang 3.8
+ IncludeCategories std::vector<IncludeCategory>
# Clang 3.8
+ BreakAfterJavaFieldAnnotations bool
# Clang 3.8
+ AlignAfterOpenBracket BracketAlignmentStyle
Align
DontAlign
AlwaysBreak
# Clang 3.8
+ SortIncludes bool
# Clang 3.8
+ ReflowComments bool
# Clang 3.8
+ AlwaysBreakAfterReturnType ReturnTypeBreakingStyle
None
All
TopLevel
AllDefinitions
TopLevelDefinitions
# Clang 3.8
+ Language LanguageKind
None
Cpp
Java
JavaScript
Proto
TableGen
# Clang 3.9
+ BreakStringLiterals bool
# Clang 3.9
+ JavaScriptQuotes JavaScriptQuoteStyle
Leave
Single
Double
# Clang 3.9
+ IncludeIsMainRegex std::string
# Clang 3.9
+ UseTab UseTabStyle
Never
ForIndentation
ForContinuationAndIndentation
Always
# Clang 3.9
+ JavaScriptWrapImports bool
# Clang 4.0
+ SpaceAfterTemplateKeyword bool
# Clang 4.0
+ Language LanguageKind
None
Cpp
Java
JavaScript
ObjC
Proto
TableGen
# Clang 5
+ FixNamespaceComments bool
# Clang 5
+ BreakBeforeInheritanceComma bool
# Clang 5
- AlignEscapedNewlinesLeft bool
+ AlignEscapedNewlines EscapedNewlineAlignmentStyle
DontAlign
Left
Right
# Clang 5
+ PenaltyBreakAssignment unsigned
# Clang 5
- BreakConstructorInitializersBeforeComma bool
+ AllowShortFunctionsOnASingleLine ShortFunctionStyle
None
InlineOnly
Empty
Inline
All
+ BraceWrapping BraceWrappingFlags
bool AfterClass
bool AfterControlStatement
bool AfterEnum
bool AfterFunction
bool AfterNamespace
bool AfterObjCDeclaration
bool AfterStruct
bool AfterUnion
bool BeforeCatch
bool BeforeElse
bool IndentBraces
bool SplitEmptyFunctionBody
+ BreakConstructorInitializers BreakConstructorInitializersStyle
BeforeColon
BeforeComma
AfterColon
+ CompactNamespaces bool
# Clang 5
+ SortUsingDeclarations bool
# Clang 5
+ BraceWrapping BraceWrappingFlags
bool AfterClass
bool AfterControlStatement
bool AfterEnum
bool AfterFunction
bool AfterNamespace
bool AfterObjCDeclaration
bool AfterStruct
bool AfterUnion
bool BeforeCatch
bool BeforeElse
bool IndentBraces
bool SplitEmptyFunction
bool SplitEmptyRecord
bool SplitEmptyNamespace
# Clang 5
+ Language LanguageKind
None
Cpp
Java
JavaScript
ObjC
Proto
TableGen
TextProto
# Clang 6
+ IndentPPDirectives PPDirectiveIndentStyle
None
AfterHash
# Clang 6
+ BraceWrapping BraceWrappingFlags
bool AfterClass
bool AfterControlStatement
bool AfterEnum
bool AfterFunction
bool AfterNamespace
bool AfterObjCDeclaration
bool AfterStruct
bool AfterUnion
bool AfterExternBlock
bool BeforeCatch
bool BeforeElse
bool IndentBraces
bool SplitEmptyFunction
bool SplitEmptyRecord
bool SplitEmptyNamespace
# Clang 6
+ RawStringFormats std::vector<RawStringFormat>
# Clang 6
+ IncludeBlocks IncludeBlocksStyle
Preserve
Merge
Regroup
# Clang 7
+ ObjCBinPackProtocolList BinPackStyle
Auto
Always
Never
# Clang 7
+ SpaceBeforeCtorInitializerColon bool
+ SpaceBeforeInheritanceColon bool
+ SpaceBeforeRangeBasedForLoopColon bool
# Clang 7
- IncludeBlocks IncludeBlocksStyle
Preserve
Merge
Regroup
- IncludeCategories std::vector<IncludeCategory>
- IncludeIsMainRegex std::string
# Clang 7
+ AlwaysBreakTemplateDeclarations BreakTemplateDeclarationsStyle
No
MultiLine
Yes
+ PenaltyBreakTemplateDeclaration unsigned
# Clang 7
- BreakBeforeInheritanceComma bool
+ BreakInheritanceList BreakInheritanceListStyle
BeforeColon
BeforeComma
AfterColon
# Clang 7
+ SpaceBeforeCpp11BracedList bool
"""
class ClangFormatter(CodeFormatter):
"""Formatter for:
clang-format: A tool to format C/C++/Java/JavaScript/Objective-C/Protobuf code.
(http://clang.llvm.org/docs/ClangFormat.html)
"""
shortname = 'clang-format'
_prefer_basestyle = True
base_optionname = 'BasedOnStyle'
invalid_enums = {'Language': {'None'}}
columnlimitname = 'ColumnLimit'
configfilename = '.clang-format'
styledump_argument = '-dump-config'
def __init__(self, exe, cache=None):
# type: (str, Optional[Cache]) -> None
super(ClangFormatter, self).__init__(exe, cache=cache)
def register_options(self):
# type: () -> None
dump = self.style_dump(style_make())
if dump is None:
reporterror('Error: We could not get a proper dump-config from clang-format')
return
self.register_options_from_dump(dump)
def register_options_from_dump(self, config_dump):
# type: (str) -> None
version, styledef = find_closest_clang_version(unistr(config_dump))
self.styledefinition = self.remove_invalid_options(styledef)
def nested_derivations(self, style):
# type: (Style) -> List[Style]
options = [('BreakBeforeBraces', 'Custom')]
nstyles = []
for optionname, value in options:
optdef = styledef_option(self.styledefinition, optionname)
# We can only use this nested option if the clang version in use supports it.
if optdef is None:
continue
if value not in option_configs(optdef):
continue
if style.get(optionname) != value:
nstyle = Style(copy.deepcopy(style))
set_option(nstyle, optionname, value)
nstyles.append(nstyle)
return nstyles
def styletext(self, style):
# type: (Style) -> str
if not isinstance(style, Style):
raise TypeError()
fragments = []
for optionname, value in self.sorted_style(style).items():
if isinstance(value, Style):
text = self.styletext(value)
fragments.append('%s:' % (optionname, ))
for line in text.splitlines():
fragments.append(' %s' % line)
else:
fragments.append('%s: %s' % (optionname, textrepr(value)))
return '\n'.join(fragments) + '\n'
def cmdargs_for_style(self, formatstyle, filename=None):
# type: (Style, Optional[str]) -> List[str]
assert isinstance(formatstyle, Style)
inlinestyle = self.inlinestyletext(formatstyle)
cmdargs = ['-style=%s' % inlinestyle]
if filename is not None:
cmdargs.append('-assume-filename=' + filename)
return cmdargs
def effective_style(self, style):
# type: (Style) -> Style
dump = self.style_dump(style)
if not dump:
# The style is probably unsuitable
return style_make()
return style_make(parse_clang_dump_config(dump))
def should_report_error(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
"""We do not report known but uncritial errors
"""
if jobres.error is not None:
return True
if jobres.returncode == 0:
if jobres.stderr:
if jobres.stderr.startswith(b'Error parsing -style: Unsuitable'):
return INFO_INVALIDS in args_info
if (jobres.stderr.startswith(b'YAML:') and
b'Error parsing -style: Invalid ' in jobres.stderr):
return INFO_INVALIDS in args_info
return super(ClangFormatter, self).should_report_error(job, jobres)
def extra_penalty(self, style, complexity):
# type: (Style, int) -> Tuple[int, int]
"""Trying longer and longer column limits
without getting better results should be penalized to speed
up the search.
"""
standards = {'ColumnLimit': 80,
'MaxEmptyLinesToKeep': 2, }
penalty = 0
for optionname, value in standards.items():
fvalue = style.get(optionname, value)
if fvalue is not None and fvalue > value:
penalty += fvalue - value
if style.get('BreakBeforeBraces') == 'Custom':
# Rate a commonly known brace breaking style
# better than an equally performing custom style.
penalty += 1
# We would prefer an equally performing style even if we had to
# add another 12 options.
complexity += 12
return complexity, penalty
@staticmethod
def additional_variants(stylename, configs, unextendedname, extendoptions):
combos = []
for c in configs:
if c == unextendedname:
combos.append(stylevariant(stylename, c))
else:
for addopt in extendoptions:
extopt = stylevariant(stylename, c)
extopt.update(addopt)
combos.append(extopt)
return combos
def variants_for(self, option):
# type: (Option) -> List[Style]
"""Generates lists of possible values for this option.
('IndentCaseLabels', 'bool', ())
-> [[('IndentCaseLabels', 'true')], [('IndentCaseLabels', 'false')]]
('Language', 'LanguageKind', ['Cpp', 'Java', 'JavaScript', 'Proto'])
-> [[('Language', 'Cpp')], [('Language', 'Java')],
[('Language', 'JavaScript')], [('Language', 'Proto')]]
('PointerAlignment', 'PointerAlignmentStyle',
('Left', 'Right', 'Middle'))
->
[[('DerivePointerAlignment', 'false'), ('PointerAlignment', 'Left')],
[('DerivePointerAlignment', 'false'), ('PointerAlignment', 'Right')],
[('DerivePointerAlignment', 'false'), ('PointerAlignment', 'Middle')],
[('DerivePointerAlignment', 'true')]]
"""
def kvpairs(vs):
# type: (Iterable[OptionValue]) -> List[Style]
return stylevariants(stylename, vs)
if option is None:
return []
stylename = option_name(option)
styletype = option_type(option)
configs = option_configs(option)
nestedstyle = option_nestedstyle(option)
if nestedstyle is not None:
variants = []
for nopt in styledef_options(nestedstyle):
for nstylevariant in self.variants_for(nopt):
sty = stylevariant(stylename, nstylevariant)
if stylename == 'BraceWrapping':
set_option(sty, 'BreakBeforeBraces', 'Custom')
variants.append(sty)
return variants
if stylename == 'UseTab':
return self.additional_variants(stylename, configs, 'Never',
[stylevariant('TabWidth', i)
for i in inclusiverange(1, 8)])
if stylename == 'BreakBeforeBraces':
# The custom variant is automatically added for the
# BraceWrapping option.
vs = kvpairs(configs)
return [x for x in vs if x.get('BreakBeforeBraces') != 'Custom']
if configs:
if stylename == 'PointerAlignment':
return self.additional_variants('DerivePointerAlignment', [False, True], True,
stylevariants(stylename, configs))
return kvpairs(configs)
if styletype == 'bool':
if stylename == 'DisableFormat':
return kvpairs([False])
if stylename == 'DerivePointerAlignment':
return []
return kvpairs([True, False])
if styletype == 'int':
return []
if styletype == 'unsigned':
if stylename == 'ColumnLimit':
return kvpairs(self.column_limit_candidates)
elif stylename == 'TabWidth':
return kvpairs(inclusiverange(1, 8))
elif stylename == 'IndentWidth':
return kvpairs(inclusiverange(0, 8))
elif stylename.startswith('Penalty'):
# We avoid changing large integers whose purpose
# is not exactly clear for the moment.
return []
else:
return kvpairs(inclusiverange(0, 2))
return []
# ----------------------------------------------------------------------
class IndentFormatter(CodeFormatter):
"""Formatter for:
indent -- indent and format C | |
<reponame>wycivil08/blendocv
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import time
import copy
from mathutils import *
from math import pi,sin,degrees,radians,atan2,copysign,cos,acos
from random import random,uniform,seed,choice,getstate,setstate
from bpy.props import *
from collections import deque
# Initialise the split error and axis vectors
splitError = 0.0
zAxis = Vector((0,0,1))
yAxis = Vector((0,1,0))
xAxis = Vector((1,0,0))
# This class will contain a part of the tree which needs to be extended and the required tree parameters
class stemSpline:
def __init__(self,spline,curvature,curvatureV,segments,maxSegs,segLength,childStems,stemRadStart,stemRadEnd,splineNum):
self.spline = spline
self.p = spline.bezier_points[-1]
self.curv = curvature
self.curvV = curvatureV
self.seg = segments
self.segMax = maxSegs
self.segL = segLength
self.children = childStems
self.radS = stemRadStart
self.radE = stemRadEnd
self.splN = splineNum
# This method determines the quaternion of the end of the spline
def quat(self):
if len(self.spline.bezier_points) == 1:
return ((self.spline.bezier_points[-1].handle_right - self.spline.bezier_points[-1].co).normalized()).to_track_quat('Z','Y')
else:
return ((self.spline.bezier_points[-1].co - self.spline.bezier_points[-2].co).normalized()).to_track_quat('Z','Y')
# Determine the declination
def dec(self):
tempVec = zAxis.copy()
tempVec.rotate(self.quat())
return zAxis.angle(tempVec)
# Update the end of the spline and increment the segment count
def updateEnd(self):
self.p = self.spline.bezier_points[-1]
self.seg += 1
# Determine the spread angle for a split
def spreadAng(self):
return radians(choice([-1,1])*(20 + 0.75*(30 + abs(degrees(self.dec()) - 90))*random()**2))
# Determine the splitting angle for a split
def splitAngle(self,splitAng,splitAngV):
return max(0,splitAng+uniform(-splitAngV,splitAngV)-self.dec())
# This is used to change the the curvature per segment of the spline
def curvAdd(self,curvD):
self.curv += curvD
# This class contains the data for a point where a new branch will sprout
class childPoint:
def __init__(self,coords,quat,radiusPar,offset,lengthPar,parBone):
self.co = coords
self.quat = quat
self.radiusPar = radiusPar
self.offset = offset
self.lengthPar = lengthPar
self.parBone = parBone
# This function calculates the shape ratio as defined in the paper
def shapeRatio(shape,ratio,pruneWidthPeak=0.0,prunePowerHigh=0.0,prunePowerLow=0.0):
if shape == 0:
return 0.2 + 0.8*ratio
elif shape == 1:
return 0.2 + 0.8*sin(pi*ratio)
elif shape == 2:
return 0.2 + 0.8*sin(0.5*pi*ratio)
elif shape == 3:
return 1.0
elif shape == 4:
return 0.5 + 0.5*ratio
elif shape == 5:
if ratio <= 0.7:
return ratio/0.7
else:
return (1.0 - ratio)/0.3
elif shape == 6:
return 1.0 - 0.8*ratio
elif shape == 7:
if ratio <= 0.7:
return 0.5 + 0.5*ratio/0.7
else:
return 0.5 + 0.5*(1.0 - ratio)/0.3
elif shape == 8:
if (ratio < (1 - pruneWidthPeak)) and (ratio > 0.0):
return ((ratio/(1 - pruneWidthPeak))**prunePowerHigh)
elif (ratio >= (1 - pruneWidthPeak)) and (ratio < 1.0):
return (((1 - ratio)/pruneWidthPeak)**prunePowerLow)
else:
return 0.0
# This function determines the actual number of splits at a given point using the global error
def splits(n):
global splitError
nEff = round(n + splitError,0)
splitError -= (nEff - n)
return int(nEff)
# Determine the declination from a given quaternion
def declination(quat):
tempVec = zAxis.copy()
tempVec.rotate(quat)
tempVec.normalize()
return degrees(acos(tempVec.z))
# Determine the length of a child stem
def lengthChild(lMax,offset,lPar,shape=False,lBase=None):
if shape:
return lPar*lMax*shapeRatio(shape,(lPar - offset)/(lPar - lBase))
else:
return lMax*(lPar - 0.6*offset)
# Find the actual downAngle taking into account the special case
def downAngle(downAng,downAngV,lPar=None,offset=None,lBase=None):
if downAngV < 0:
return downAng + (uniform(-downAngV,downAngV)*(1 - 2*shapeRatio(0,(lPar - offset)/(lPar - lBase))))
else:
return downAng + uniform(-downAngV,downAngV)
# Returns the rotation matrix equivalent to i rotations by 2*pi/(n+1)
def splitRotMat(n,i):
return Matrix.Rotation(2*i*pi/(n+1),3,'Z')
# Returns the split angle
def angleSplit(splitAng,splitAngV,quat):
return max(0,splitAng+uniform(-splitAngV,splitAngV)-declination(quat))
# Returns number of stems a stem will sprout
def stems(stemsMax,lPar,offset,lChild=False,lChildMax=None):
if lChild:
return stemsMax*(0.2 + 0.8*(lChild/lPar)/lChildMax)
else:
return stemsMax*(1.0 - 0.5*offset/lPar)
# Returns the spreading angle
def spreadAng(dec):
return radians(choice([-1,1])*(20 + 0.75*(30 + abs(dec - 90))*random()**2))
# Determines the angle of upward rotation of a segment due to attractUp
def curveUp(attractUp,quat,curveRes):
tempVec = yAxis.copy()
tempVec.rotate(quat)
tempVec.normalize()
return attractUp*radians(declination(quat))*abs(tempVec.z)/curveRes
# Evaluate a bezier curve for the parameter 0<=t<=1 along its length
def evalBez(p1,h1,h2,p2,t):
return ((1-t)**3)*p1 + (3*t*(1-t)**2)*h1 + (3*(t**2)*(1-t))*h2 + (t**3)*p2
# Evaluate the unit tangent on a bezier curve for t
def evalBezTan(p1,h1,h2,p2,t):
return ((-3*(1-t)**2)*p1 + (-6*t*(1-t) + 3*(1-t)**2)*h1 + (-3*(t**2) + 6*t*(1-t))*h2 + (3*t**2)*p2).normalized()
# Determine the range of t values along a splines length where child stems are formed
def findChildPoints(stemList,numChild):
numPoints = sum([len(n.spline.bezier_points) for n in stemList])
numSplines = len(stemList)
numSegs = numPoints - numSplines
numPerSeg = numChild/numSegs
numMain = round(numPerSeg*stemList[0].segMax,0)
return [(a+1)/(numMain) for a in range(int(numMain))]
# Find the coordinates, quaternion and radius for each t on the stem
def interpStem(stem,tVals,lPar,parRad):
tempList = deque()
addpoint = tempList.append
checkVal = (stem.segMax - len(stem.spline.bezier_points) + 1)/stem.segMax
points = stem.spline.bezier_points
numPoints = len(stem.spline.bezier_points)
# Loop through all the parametric values to be determined
for t in tVals:
if (t >= checkVal) and (t < 1.0):
scaledT = (t-checkVal)/(tVals[-1]-checkVal)
length = (numPoints-1)*t#scaledT
index = int(length)
if scaledT == 1.0:
coord = points[-1].co
quat = (points[-1].handle_right - points[-1].co).to_track_quat('Z','Y')
radius = parRad#points[-2].radius
else:
tTemp = length - index
coord = evalBez(points[index].co,points[index].handle_right,points[index+1].handle_left,points[index+1].co,tTemp)
quat = (evalBezTan(points[index].co,points[index].handle_right,points[index+1].handle_left,points[index+1].co,tTemp)).to_track_quat('Z','Y')
radius = (1-tTemp)*points[index].radius + tTemp*points[index+1].radius # Not sure if this is the parent radius at the child point or parent start radius
addpoint(childPoint(coord,quat,(parRad, radius),t*lPar,lPar,'bone'+(str(stem.splN).rjust(3,'0'))+'.'+(str(index).rjust(3,'0'))))
return tempList
# Convert a list of degrees to radians
def toRad(list):
return [radians(a) for a in list]
# This is the function which extends (or grows) a given stem.
def growSpline(stem,numSplit,splitAng,splitAngV,splineList,attractUp,hType,splineToBone):
# First find the current direction of the stem
dir = stem.quat()
# If the stem splits, we need to add new splines etc
if numSplit > 0:
# Get the curve data
cuData = stem.spline.id_data.name
cu = bpy.data.curves[cuData]
# Now for each split add the new spline and adjust the growth direction
for i in range(numSplit):
newSpline = cu.splines.new('BEZIER')
newPoint = newSpline.bezier_points[-1]
(newPoint.co,newPoint.handle_left_type,newPoint.handle_right_type) = (stem.p.co,'VECTOR','VECTOR')
newPoint.radius = stem.radS*(1 - stem.seg/stem.segMax) + stem.radE*(stem.seg/stem.segMax)
# Here we make the new "sprouting" stems diverge from the current direction
angle = stem.splitAngle(splitAng,splitAngV)
divRotMat = Matrix.Rotation(angle + stem.curv + uniform(-stem.curvV,stem.curvV),3,'X')#CurveUP should go after curve is applied
dirVec = zAxis.copy()
dirVec.rotate(divRotMat)
dirVec.rotate(splitRotMat(numSplit,i+1))
dirVec.rotate(dir)
# if attractUp != 0.0: # Shouldn't have a special case as this will mess with random number generation
divRotMat = Matrix.Rotation(angle + stem.curv + uniform(-stem.curvV,stem.curvV),3,'X')
dirVec = zAxis.copy()
dirVec.rotate(divRotMat)
dirVec.rotate(splitRotMat(numSplit,i+1))
dirVec.rotate(dir)
#Different version of the commented code above. We could use the inbuilt vector rotations but given this is a special case, it can be quicker to initialise the vector to the correct value.
# angle = stem.splitAngle(splitAng,splitAngV)
# curveUpAng = curveUp(attractUp,dir,stem.segMax)
# angleX = angle + stem.curv + uniform(-stem.curvV,stem.curvV) - curveUpAng
# angleZ = 2*i*pi/(numSplit+1)
# dirVec = Vector((sin(angleX)*sin(angleZ), -sin(angleX)*cos(angleZ), cos(angleX)))
# dirVec.rotate(dir)
# Spread the stem out in a random fashion
spreadMat = Matrix.Rotation(spreadAng(degrees(dirVec.z)),3,'Z')
dirVec.rotate(spreadMat)
# Introduce upward curvature
upRotAxis = xAxis.copy()
upRotAxis.rotate(dirVec.to_track_quat('Z','Y'))
curveUpAng = curveUp(attractUp,dirVec.to_track_quat('Z','Y'),stem.segMax)
upRotMat = Matrix.Rotation(-curveUpAng,3,upRotAxis)
dirVec.rotate(upRotMat)
# Make the growth vec the length of a stem segment
dirVec.normalize()
dirVec *= stem.segL
# Get the end point position
end_co = stem.p.co.copy()
# Add the new point and adjust its coords, handles and radius
newSpline.bezier_points.add()
newPoint = newSpline.bezier_points[-1]
(newPoint.co,newPoint.handle_left_type,newPoint.handle_right_type) = (end_co + dirVec,hType,hType)
newPoint.radius = stem.radS*(1 - (stem.seg + 1)/stem.segMax) + stem.radE*((stem.seg + 1)/stem.segMax)
# If this isn't the last point on a stem, then we need to add it to the list of stems to continue growing
if stem.seg != stem.segMax:
splineList.append(stemSpline(newSpline,stem.curv-angle/(stem.segMax-stem.seg),stem.curvV,stem.seg+1,stem.segMax,stem.segL,stem.children,stem.radS,stem.radE,len(cu.splines)-1))
splineToBone.append('bone'+(str(stem.splN)).rjust(3,'0')+'.'+(str(len(stem.spline.bezier_points)-2)).rjust(3,'0'))
# The original spline also needs to keep growing so adjust its direction too
angle = stem.splitAngle(splitAng,splitAngV)
divRotMat = Matrix.Rotation(angle + stem.curv + uniform(-stem.curvV,stem.curvV),3,'X')
dirVec = zAxis.copy()
dirVec.rotate(divRotMat)
dirVec.rotate(dir)
spreadMat = Matrix.Rotation(spreadAng(degrees(dirVec.z)),3,'Z')
dirVec.rotate(spreadMat)
else:
# If there are no splits then generate the growth direction without accounting for spreading of stems
dirVec = zAxis.copy()
#curveUpAng = curveUp(attractUp,dir,stem.segMax)
divRotMat = Matrix.Rotation(stem.curv + uniform(-stem.curvV,stem.curvV),3,'X')
dirVec.rotate(divRotMat)
#dirVec = Vector((0,-sin(stem.curv - curveUpAng),cos(stem.curv - curveUpAng)))
dirVec.rotate(dir)
upRotAxis = xAxis.copy()
upRotAxis.rotate(dirVec.to_track_quat('Z','Y'))
curveUpAng = curveUp(attractUp,dirVec.to_track_quat('Z','Y'),stem.segMax)
upRotMat = Matrix.Rotation(-curveUpAng,3,upRotAxis)
dirVec.rotate(upRotMat)
dirVec.normalize()
dirVec | |
the standard detection models we have around can detect. Also, this little trick might save you if, say for example, you really had to detect the cell tower but there's no EXIF data to be found: then you'd cycle through every rotation, and every flip, spawning many derivatives of this photo and run them all through. When the percentage of confidence of detection is high enough, Bam!, you found the orientation you needed and that sneaky cell tower.
#
# Anyway, to the example code:
# In[4]:
# Run me to flip the image back and forth
imgMirror = np.fliplr(imgMirror)
pyplot.figure()
pyplot.imshow(imgMirror)
pyplot.axis('off')
pyplot.title('Mirror image')
# In[5]:
# Run me to rotate the image 90 degrees
imgRotated = np.rot90(imgRotated)
pyplot.figure()
pyplot.imshow(imgRotated)
pyplot.axis('off')
pyplot.title('Rotated image')
# ## Sizing
#
# Part of preprocessing is resizing. For reasons we won't get into here, images in the Caffe2 pipeline should be square. Also, to help with performance, they should be resized to a standard height and width which is usually going to be smaller than your original source. In the example below we're resizing to 256 x 256 pixels, however you might notice that the `input_height` and `input_width` is set to 224 x 224 which is then used to specify the crop. This is what several image-based models are expecting. They were trained on images sized to 224 x 224 and in order for the model to properly identify the suspect images you throw at it, these should also be 224 x 224.
#
# ** Make sure you double-check the input sizes for the model you're using!**
# In[6]:
# Model is expecting 224 x 224, so resize/crop needed.
# Here are the steps we use to preprocess the image.
# (1) Resize the image to 256*256, and crop out the center.
input_height, input_width = 224, 224
print("Model's input shape is %dx%d") % (input_height, input_width)
#print("Original image is %dx%d") % (skimage.)
img256 = skimage.transform.resize(img, (256, 256))
pyplot.figure()
pyplot.imshow(img256)
pyplot.axis('on')
pyplot.title('Resized image to 256x256')
print("New image shape:" + str(img256.shape))
# Note the resizing has distorted the image a little bit. It is important to recognize this effect during your processing as it can have an effect on the results of your model. Flowers and animals might be ok with a little stretching or squeezing, but facial features may not.
#
# This can happen when the dimensions of the original image are not proportionally exact to your desired size. In this particular example it would have been better to just resize to 224x224 and not bother cropping. Let's try another strategy of rescaling the image and maintaining the aspect ratio.
#
# ### Rescaling
#
# If you imagine portait images versus landscape images you'll know that there are a lot of things that can get messed up by doing a slopping resize. Rescaling is assuming that you're locking down the aspect ratio to prevent distortion in the image. In this case, we'll scale down the image to the shortest side that matches with the model's input size.
#
# In our example here, the model size is 224 x 224. As you look at your monitor in 1920x1080, it is longer in width than height and if you shrunk it down to 224, you'd run out of height before you ran out of width, so...
#
# - Landscape: limit resize by the height
# - Portrait: limit resize by the width
# In[7]:
print("Original image shape:" + str(img.shape) + " and remember it should be in H, W, C!")
print("Model's input shape is %dx%d") % (input_height, input_width)
aspect = img.shape[1]/float(img.shape[0])
print("Orginal aspect ratio: " + str(aspect))
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_height, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_width))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_height, input_width))
pyplot.figure()
pyplot.imshow(imgScaled)
pyplot.axis('on')
pyplot.title('Rescaled image')
print("New image shape:" + str(imgScaled.shape) + " in HWC")
# At this point only one dimension is set to what the model's input requires. We still need to crop one side to make a square.
#
# ### Cropping
#
# There are a variety of strategies we could utilize. In fact, we could backpeddle and decide to do a center crop. So instead of scaling down to the smallest we could get on at least one side, we take a chunk out of the middle. If we had done that without scaling we would have ended up with just part of a flower pedal, so we still needed some resizing of the image.
#
# Below we'll try a few strategies for cropping:
#
# 1. Just grab the exact dimensions you need from the middle!
# 2. Resize to a square that's pretty close then grab from the middle.
# 3. Use the rescaled image and grab the middle.
# In[8]:
# Compare the images and cropping strategies
# Try a center crop on the original for giggles
print("Original image shape:" + str(img.shape) + " and remember it should be in H, W, C!")
def crop_center(img,cropx,cropy):
y,x,c = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
# yes, the function above should match resize and take a tuple...
pyplot.figure()
# Original image
imgCenter = crop_center(img,224,224)
pyplot.subplot(1,3,1)
pyplot.imshow(imgCenter)
pyplot.axis('on')
pyplot.title('Original')
# Now let's see what this does on the distorted image
img256Center = crop_center(img256,224,224)
pyplot.subplot(1,3,2)
pyplot.imshow(img256Center)
pyplot.axis('on')
pyplot.title('Squeezed')
# Scaled image
imgScaledCenter = crop_center(imgScaled,224,224)
pyplot.subplot(1,3,3)
pyplot.imshow(imgScaledCenter)
pyplot.axis('on')
pyplot.title('Scaled')
# As you can see that didn't work out so well, except for maybe the last one. The middle one may be just fine too, but you won't know until you try on the model and test a lot of candidate images.
# At this point we can look at the difference we have, split it in half and remove some pixels from each side. This does have a drawback, however, as an off-center subject of interest would get clipped.
# If you've run this tutorial a few times now and are on Round 3, you'll notice a pretty big problem. You're missing astronaughts! You can still see the issue with the flower from Round 2 as well. Things are missing after the cropping and that could cause you problems. Think of it this way: if you don't know how the model you're using was prepared then you don't know how to conform your images, so take care to test results! If the model used a lot of different aspect ratio images and just squeezed them to conform to a square then there's a good chance that over time and lots of samples it "learned" what things look like squeezed and can make a match. However, if you're looking for details like facial features and landmarks, or really nuanced elements in any image, this could be dangerous and error-prone.
#
# #### Further Strategies?
#
# Another strategy would be to rescale to the best size you can, with real data, but then pad the rest of the image with information that you can safely ignore in your model. We'll save that for another tutorial though since you've been through enough here!
#
# ### Upscaling
#
# What do you do when the images you want to run are "tiny"? In our example we've been prepping for Input Images with the spec of 224x224. Consider this 128x128 image below.
# 
# Now we're not talking about super-resolution or the CSI-effect where we can take blurry ATM photos and identify the tattoo an a perp's neck. Although, there are [some advances](https://github.com/david-gpu/srez) along these lines that deep learning has provided, and if you're reading this in time (before 3/1/17), go [check this out](https://developer.nvidia.com/zoom-enhance-magic-image-upscaling-using-deep-learning). What we want to do is simple, but, like cropping, it does have a variety of strategies you should consider.
#
# The most basic approach is going from a small square to a bigger square and using the defauls skimage provides for you. This `resize` method defaults the interpolation order parameter to 1 which happens to be bi-linear if you even cared, but it is worth mentioning because these might be the fine-tuning knobs you need later to fix problems, such as | |
# -*- coding: utf-8 -*-
"""
CLI for running the commands on workers
"""
from __future__ import annotations
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Optional
import click
from pioreactor.config import config
from pioreactor.config import get_active_workers_in_inventory
from pioreactor.config import get_leader_hostname
from pioreactor.logging import create_logger
from pioreactor.utils.timing import current_utc_timestamp
from pioreactor.whoami import am_I_leader
from pioreactor.whoami import get_latest_experiment_name
from pioreactor.whoami import get_unit_name
from pioreactor.whoami import UNIVERSAL_IDENTIFIER
def universal_identifier_to_all_active_workers(units: tuple[str, ...]) -> tuple[str, ...]:
if units == (UNIVERSAL_IDENTIFIER,):
units = get_active_workers_in_inventory()
return units
def add_leader(units: tuple[str, ...]) -> tuple[str, ...]:
leader = get_leader_hostname()
if leader not in units:
units = units + (leader,)
return units
def remove_leader(units: tuple[str, ...]) -> tuple[str, ...]:
leader = get_leader_hostname()
if leader not in units:
return units
return tuple(u for u in units if u != leader)
def save_config_files_to_db(units: tuple[str, ...], shared: bool, specific: bool) -> None:
import sqlite3
conn = sqlite3.connect(config["storage"]["database"])
cur = conn.cursor()
timestamp = current_utc_timestamp()
sql = "INSERT INTO config_files(timestamp,filename,data) VALUES(?,?,?)"
if specific:
for unit in units:
with open(f"/home/pioreactor/.pioreactor/config_{unit}.ini") as f:
cur.execute(sql, (timestamp, f"config_{unit}.ini", f.read()))
if shared:
with open("/home/pioreactor/.pioreactor/config.ini") as f:
cur.execute(sql, (timestamp, "config.ini", f.read()))
conn.commit()
conn.close()
def sync_config_files(ftp_client, unit: str, shared: bool, specific: bool) -> None:
"""
Moves
1. the config.ini (if shared is True)
2. config_{unit}.ini to the remote Pioreactor (if specific is True)
Note: this function occurs in a thread
"""
# move the global config.ini
# there was a bug where if the leader == unit, the config.ini would get wiped
if (get_leader_hostname() != unit) and shared:
ftp_client.put(
localpath="/home/pioreactor/.pioreactor/config.ini",
remotepath="/home/pioreactor/.pioreactor/config.ini",
)
# move the specific unit config.ini
if specific:
try:
ftp_client.put(
localpath=f"/home/pioreactor/.pioreactor/config_{unit}.ini",
remotepath="/home/pioreactor/.pioreactor/unit_config.ini",
)
except Exception as e:
click.echo(
f"Did you forget to create a config_{unit}.ini to deploy to {unit}?",
err=True,
)
raise e
ftp_client.close()
return
@click.group()
def pios() -> None:
"""
Command each of the worker Pioreactors with the `pios` command.
See full documentation here: https://docs.pioreactor.com/user_guide/Advanced/Command%20line%20interface#leader-only-commands-to-control-workers
Report errors or feedback here: https://github.com/Pioreactor/pioreactor/issues
"""
import sys
if not am_I_leader():
click.echo("workers cannot run `pios` commands. Try `pio` instead.", err=True)
sys.exit(1)
if len(get_active_workers_in_inventory()) == 0:
logger = create_logger("CLI", unit=get_unit_name(), experiment=get_latest_experiment_name())
logger.warning("No active workers. See `network.inventory` section in config.ini.")
sys.exit(1)
@pios.command("update", short_help="update PioreactorApp on workers")
@click.option(
"--units",
multiple=True,
default=(UNIVERSAL_IDENTIFIER,),
type=click.STRING,
help="specify a Pioreactor name, default is all active units",
)
@click.option("-b", "--branch", help="update to the github branch")
def update(units: tuple[str, ...], branch: Optional[str]) -> None:
"""
Pulls and installs the latest code
"""
import paramiko # type: ignore
logger = create_logger("update", unit=get_unit_name(), experiment=get_latest_experiment_name())
if branch is not None:
command = f"pio update --app -b {branch}"
else:
command = "pio update --app"
def _thread_function(unit: str):
logger.debug(f"Executing `{command}` on {unit}...")
try:
with paramiko.SSHClient() as client:
client.load_system_host_keys()
client.connect(unit, username="pioreactor", compress=True)
(stdin, stdout, stderr) = client.exec_command(command)
for line in stderr.readlines():
pass
return True
except Exception as e:
logger.error(f"Unable to connect to unit {unit}.")
logger.debug(e, exc_info=True)
return False
units = universal_identifier_to_all_active_workers(units)
with ThreadPoolExecutor(max_workers=len(units)) as executor:
results = executor.map(_thread_function, units)
if not all(results):
sys.exit(1)
@pios.command("install-plugin", short_help="install a plugin on workers")
@click.argument("plugin")
@click.option(
"--units",
multiple=True,
default=(UNIVERSAL_IDENTIFIER,),
type=click.STRING,
help="specify a Pioreactor name, default is all active units",
)
def install_plugin(plugin: str, units: tuple[str, ...]) -> None:
"""
Installs a plugin to worker and leader
"""
import paramiko
logger = create_logger(
"install_plugin", unit=get_unit_name(), experiment=get_latest_experiment_name()
)
command = f"pio install-plugin {plugin}"
def _thread_function(unit: str):
logger.debug(f"Executing `{command}` on {unit}...")
try:
with paramiko.SSHClient() as client:
client.load_system_host_keys()
client.connect(unit, username="pioreactor", compress=True)
(stdin, stdout, stderr) = client.exec_command(command)
for line in stderr.readlines():
pass
return True
except Exception as e:
logger.error(f"Unable to connect to unit {unit}.")
logger.debug(e, exc_info=True)
return False
units = add_leader(universal_identifier_to_all_active_workers(units))
with ThreadPoolExecutor(max_workers=len(units)) as executor:
results = executor.map(_thread_function, units)
if not all(results):
sys.exit(1)
@pios.command("uninstall-plugin", short_help="uninstall a plugin on workers")
@click.argument("plugin")
@click.option(
"--units",
multiple=True,
default=(UNIVERSAL_IDENTIFIER,),
type=click.STRING,
help="specify a Pioreactor name, default is all active units",
)
def uninstall_plugin(plugin: str, units: tuple[str, ...]) -> None:
"""
Uninstalls a plugin from worker and leader
"""
import paramiko
logger = create_logger(
"uninstall_plugin", unit=get_unit_name(), experiment=get_latest_experiment_name()
)
command = f"pio uninstall-plugin {plugin}"
def _thread_function(unit: str):
logger.debug(f"Executing `{command}` on {unit}...")
try:
with paramiko.SSHClient() as client:
client.load_system_host_keys()
client.connect(unit, username="pioreactor", compress=True)
(stdin, stdout, stderr) = client.exec_command(command)
for line in stderr.readlines():
pass
return True
except Exception as e:
logger.error(f"Unable to connect to unit {unit}.")
logger.debug(e, exc_info=True)
return False
units = add_leader(universal_identifier_to_all_active_workers(units))
with ThreadPoolExecutor(max_workers=len(units)) as executor:
results = executor.map(_thread_function, units)
if not all(results):
sys.exit(1)
@pios.command(name="sync-configs", short_help="sync config")
@click.option(
"--units",
multiple=True,
default=(UNIVERSAL_IDENTIFIER,),
type=click.STRING,
help="specify a hostname, default is all active units",
)
@click.option(
"--shared",
is_flag=True,
help="sync the shared config.ini",
)
@click.option(
"--specific",
is_flag=True,
help="sync the worker specific config.ini(s)",
)
def sync_configs(units: tuple[str, ...], shared: bool, specific: bool) -> None:
"""
Deploys the shared config.ini and worker specific config.inis to the workers.
If neither `--shared` not `--specific` are specified, both are set to true.
"""
import paramiko
logger = create_logger(
"sync_configs", unit=get_unit_name(), experiment=get_latest_experiment_name()
)
units = universal_identifier_to_all_active_workers(units)
if not shared and not specific:
shared = specific = True
def _thread_function(unit: str) -> bool:
logger.debug(f"Syncing configs on {unit}...")
try:
with paramiko.SSHClient() as client:
client.load_system_host_keys()
client.connect(unit, username="pioreactor", compress=True)
with client.open_sftp() as ftp_client:
sync_config_files(ftp_client, unit, shared, specific)
return True
except Exception as e:
logger.error(f"Unable to connect to unit {unit}.")
logger.debug(e, exc_info=True)
return False
# save config.inis to database
save_config_files_to_db(units, shared, specific)
with ThreadPoolExecutor(max_workers=len(units)) as executor:
results = executor.map(_thread_function, units)
if not all(results):
sys.exit(1)
@pios.command("kill", short_help="kill a job(s) on workers")
@click.argument("job", nargs=-1)
@click.option(
"--units",
multiple=True,
default=(UNIVERSAL_IDENTIFIER,),
type=click.STRING,
help="specify a hostname, default is all active units",
)
@click.option("--all-jobs", is_flag=True, help="kill all worker jobs")
@click.option("-y", is_flag=True, help="skip asking for confirmation")
def kill(job: str, units: tuple[str, ...], all_jobs: bool, y: bool) -> None:
"""
Send a SIGTERM signal to JOB. JOB can be any Pioreactor job name, like "stirring".
Example:
> pios kill stirring
multiple jobs accepted:
> pios kill stirring dosing_control
Kill all worker jobs (i.e. this excludes leader jobs like watchdog). Ignores `job` argument.
> pios kill --all
"""
from sh import ssh # type: ignore
if not y:
confirm = input(
f"Confirm killing {str(job) if (not all_jobs) else 'all jobs'} on {units}? Y/n: "
).strip()
if confirm != "Y":
return
command = f"pio kill {' '.join(job)}"
command += "--all-jobs" if all_jobs else ""
logger = create_logger("CLI", unit=get_unit_name(), experiment=get_latest_experiment_name())
def _thread_function(unit: str):
logger.debug(f"Executing `{command}` on {unit}.")
try:
ssh(unit, command)
if all_jobs: # tech debt
ssh(
unit,
"pio run led_intensity --A 0 --B 0 --C 0 --D 0 --no-log",
)
return True
except Exception as e:
logger.debug(e, exc_info=True)
logger.error(f"Unable to connect to unit {unit}.")
return False
units = universal_identifier_to_all_active_workers(units)
with ThreadPoolExecutor(max_workers=len(units)) as executor:
results = executor.map(_thread_function, units)
if not all(results):
sys.exit(1)
@pios.command(
name="run",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
short_help="run a job on workers",
)
@click.argument("job", type=click.STRING)
@click.option(
"--units",
multiple=True,
default=(UNIVERSAL_IDENTIFIER,),
type=click.STRING,
help="specify a hostname, default is all active units",
)
@click.option("-y", is_flag=True, help="Skip asking for confirmation.")
@click.pass_context
def run(ctx, job: str, units: tuple[str, ...], y: bool) -> None:
"""
Run a job on all, or specific, workers. Ex:
> pios run stirring
Will start stirring on all workers, after asking for confirmation.
Each job has their own unique options:
> pios run stirring --duty-cycle 10
> pios run od_reading --od-angle-channel 135,0
To specify specific units, use the `--units` keyword multiple times, ex:
> pios run stirring --units pioreactor2 --units pioreactor3
"""
from sh import ssh
from shlex import quote # https://docs.python.org/3/library/shlex.html#shlex.quote
extra_args = list(ctx.args)
if "unit" in extra_args:
click.echo("Did you mean to use 'units' instead of 'unit'? Exiting.", err=True)
sys.exit(1)
core_command = " ".join(["pio", "run", quote(job), *extra_args])
# pipe all output to null
command = " ".join(["nohup", core_command, ">/dev/null", "2>&1", "&"])
if not y:
confirm = input(f"Confirm running `{core_command}` on {units}? Y/n: ").strip()
if confirm != "Y":
return
def _thread_function(unit: str) -> bool:
click.echo(f"Executing `{core_command}` on {unit}.")
try:
ssh(unit, command)
return True
except Exception as e:
logger = create_logger(
"CLI", unit=get_unit_name(), experiment=get_latest_experiment_name()
)
logger.debug(e, exc_info=True)
logger.error(f"Unable to connect to unit {unit}.")
return False
units = universal_identifier_to_all_active_workers(units)
with ThreadPoolExecutor(max_workers=len(units)) as executor:
results = executor.map(_thread_function, units)
if not all(results):
sys.exit(1)
@pios.command(
name="reboot",
short_help="reboot workers",
)
@click.option(
"--units",
multiple=True,
default=(UNIVERSAL_IDENTIFIER,),
type=click.STRING,
help="specify a hostname, default is all active units",
)
@click.option("-y", is_flag=True, help="Skip asking for confirmation.")
@click.pass_context
def reboot(units: tuple[str, ...], y: bool) -> None:
"""
Reboot Pioreactor / Raspberry Pi
"""
from sh import ssh
# pipe all output to null
command = " ".join(["sudo", "reboot"])
if not y:
confirm = input(f"Confirm running `{command}` on {units}? Y/n: ").strip()
if confirm | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 16:01:32 2019
@author: jorge
"""
import re
import networkx as nx
import json
import csv
import os
import VenuesReader
###############################################
## Util functions
###############################################
def getPublicationsInfo(Venues, inside_venues, dblp_file, best_papers, i_best_papers, max_data_year):
"""
Iterates throught the dblp file to save information for each node in the network. Each author should have a list of lists,
where each inner list consists of [n_authors, venue score, year] for each publication
"""
regex = re.compile('[^a-zA-Z ]')
matched_best_pubs = dict()
max_pub_year = 0
authors_info = dict()
with open(dblp_file, "r") as fp:
for line in fp:
data = json.loads(line)
if 'venue' in data and 'authors' in data and data['venue'].lower() in inside_venues: ##inside venue
try:
year = int(data['year']) ## in case the year is not on the data
if year >= max_data_year:
continue
if year > max_pub_year: ##update max year for age probability calculation
max_pub_year = year
except:
print "Publication without a year need to fix this..."
quit()
n_authors = float(len(data['authors']))
venue_score = getVenueScore(Venues, data['venue'].lower(), year)
award_pub = False
#title = encodeTitle(data['title'])
title = data['title'].encode('utf8')
title = regex.sub('',title)
title = title.lower()
if title in best_papers:
matched_best_pubs[title] = list()
best_papers.pop(title, None)
award_pub = True
else:
for i in i_best_papers: ##some publications have incomplete names, try to match any title with the incomplete name
if i in title:
matched_best_pubs[title] = list()
i_best_papers.pop(i, None)
award_pub = True
break
for author in data['authors']:
author = author.encode('utf8')
#author = encodeAuthorName(author)
if award_pub:
matched_best_pubs[title].append(author)
if author not in authors_info:
authors_info[author] = []
authors_info[author].append([n_authors, venue_score, year])
print "Matched %d inside authors from dblp for venues: %s" % (len(authors_info), ";".join(inside_venues))
return matched_best_pubs, authors_info, max_pub_year
def getAuthorsGroundTruth(file):
#authors_ground_truth = dict()
regex = re.compile('[^a-zA-Z ]')
best_papers = dict()
incomplete_best_papers = dict() ##papers that are written with ... on title
with open(file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
#[conference, author, paper, score, year] = row
[author, paper, uni, equal, pos] = row
incomp = False
if "..." in paper: ##in order to know which titles are incomplete
incomp = True
paper = regex.sub('',paper)
if incomp:
if paper not in incomplete_best_papers:
incomplete_best_papers[paper] = list()
incomplete_best_papers[paper].append(author)
else:
if paper not in best_papers:
best_papers[paper] = list()
best_papers[paper].append(author)
return best_papers, incomplete_best_papers
def getInsideVenues(folder):
insidevenues_list = folder + "inside_venues.txt"
inside_venues = set()
with open(insidevenues_list, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
[venue] = row
inside_venues.add(venue)
return inside_venues
def removeGhosts(G, authors_info):
n_to_remove = []
for n in G.nodes():
if n not in authors_info:
n_to_remove.append(n)
print "Removed %d out of %d nodes from graph , because no author info" % (len(n_to_remove), G.number_of_nodes())
for n in n_to_remove:
G.remove_node(n)
def getInsideOutsideGraphs(G, inside_authors):
Outside_G = G.copy()
Inside_G = nx.empty_graph(0, create_using=nx.MultiDiGraph())
for edge in G.edges(data=True, keys=True):
citing_author = edge[0]
if citing_author in inside_authors: ##the inside authors considers every author in the inside network
year = edge[3]['year']
author_position = edge[3]['author_position']
venue = edge[3]['venue']
weight = edge[3]['weight']
venue_score = edge[3]['venue_score']
coauthor_penalty = edge[3]['co_author_penalty']
key = edge[2]
Outside_G.remove_edge(edge[0], edge[1], key=key)
Inside_G.add_edge(edge[0], edge[1], year=year, venue=venue, weight=weight, author_position=author_position, venue_score=venue_score, co_author_penalty=coauthor_penalty)
# not_cited_authors = set()
# for author in inside_authors: ##for nodes that are not cited and neither cite anybody
# if author not in Inside_G.nodes():
# Inside_G.add_node(author)
# not_cited_authors.add(author)
G.clear()
return Inside_G, Outside_G#, not_cited_authors
def getVenueScore(venue_scores, venue, year):
venue = venue.replace(",", "")
venue = venue.lower()
venue_score = venue_scores.getVenueScoreYear(venue, year)
if not venue_score:
venue_score = venue_scores.getVenueScoreYear(venue, venue_scores.missing_year) ##if there's no value lets get the value from the missing year, i.e. the average of the other years
if not venue_score:
return venue_scores.epsilon ##if there's nothing for the venue return the epsilon
return venue_score
def createNetwork(Venues, citations, n_pens, uniform_weight, allow_self_citations):
G = nx.MultiDiGraph()
with open(citations, "r") as fp:
csv_reader = csv.reader(fp, delimiter=',',quotechar='"')
for line in csv_reader:
try:
if n_pens == 0: #no_penalties associated to the file
[citing_author, cited_author, year, venue, weight, author_pos] = line
elif n_pens == 1: ##one penalty associated to the file
[citing_author, cited_author, year, venue, weight, author_pos, pen1] = line
else: ##in case that we have 2 penalties
[citing_author, cited_author, year, venue, weight, author_pos, pen1, pen2] = line
except Exception as e:
print line
print e
quit()
venue_score = getVenueScore(Venues, venue, int(year))
if not allow_self_citations and citing_author == cited_author:
continue
##for the baseline we want to allow auto-citations. On the penalty networks this will be punished with 1.0 pen so they won't count. Still the score is split differently among the authors than it would be if we just ignore the link
# continue
if uniform_weight:
weight = 1.0
if n_pens == 0:
G.add_edge(citing_author, cited_author, attr_dict={'year':int(year), 'venue':venue, 'weight':float(weight), 'author_position':int(author_pos), 'venue_score': venue_score})
elif n_pens == 1:
G.add_edge(citing_author, cited_author, attr_dict={'year':int(year), 'venue':venue, 'weight':float(weight), 'author_position':int(author_pos), 'venue_score': venue_score, 'pen1':float(pen1)})
else:
G.add_edge(citing_author, cited_author, attr_dict={'year':int(year), 'venue':venue, 'weight':float(weight), 'author_position':int(author_pos), 'venue_score': venue_score, 'pen1':float(pen1), 'pen2':float(pen2)})
return G
def addOutsiderInfo(G, file):
epsilon = 1
nx.set_node_attributes(G, 'outsider_info', dict(zip(G.nodes(), [epsilon]*G.number_of_nodes())))
matches = {}
with open(file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
[author, min_group_score, mean_group_score, max_group_score] = row
#author = encodeAuthorName(author)
#print author
try:
G.node[author]['outsider_info'] = float(mean_group_score)+epsilon
matches[author] = 1
except KeyError:
continue
print "Outsiders matched %d out of %d nodes" % (len(matches), G.number_of_nodes())
def writeNetworkToFile(G, file):
myf = open(file, "w")
for edge in G.edges(data=True, keys=True):
author1 = edge[0]
author2 = edge[1]
year = edge[3]['year']
author_position = edge[3]['author_position']
venue = edge[3]['venue']
weight = edge[3]['weight']
#author1 = encodeAuthorName(author1)
#author2 = encodeAuthorName(author2)
myf.write('"%s","%s",%d,"%s",%.4f,%d\n' % (author1, author2, int(year), venue, float(weight), int(author_position)))
#myf.write(author1 + "," + author2 + "," + year + "," + venue + "," + weight + "," + author_position + "\n")
myf.close()
def generateFilename(save_folder, pr_type, prod_type, out_type, outsider_rankings_file, pr_params, out_prefix):
if not os.path.exists(save_folder):
os.makedirs(save_folder)
file_parts = [out_prefix]
if pr_type == "newrank":
file_parts.append("newrank")
elif pr_type == "yetrank":
file_parts.append("yetrank")
else:
if prod_type != "":
file_parts.append("prodt_" + prod_type.upper())
if out_type != "":
if "10" in outsider_rankings_file:
out_str = "out10_"
elif "5" in outsider_rankings_file:
out_str = "out5_"
else:
out_str = "out0_"
file_parts.append(out_str + out_type.upper())
file_parts.append(pr_type)
file_parts.append("prt_" + pr_params)
#file_parts.append(str(end_year))
basename = save_folder + "_".join(file_parts)
no_pr_name = basename + "_InitialPR.txt"
first_it_name = basename + "_OneITPR.txt"
return basename + ".txt", no_pr_name, first_it_name
def prepareData(folder, dblp_file, uniform_weight, inside_authors, Venues, in_cit_file, out_cit_file, allow_self_citations, n_pens):
# citations_list = folder + "network_before_%d.csv" % start_year
# icitations_list = folder + "data_in_b%d_%s.csv" % (start_year, pen_name) #3for the citation with penalties
# ocitations_list = folder + "data_out_b%d.csv" % start_year
# ncitations_list = folder + "data_null.csv" ##to store guys that do not have any citation but are part of the inside network
print "Creating networks weigth uniform weight = %s" % uniform_weight
in_cit_file = folder + in_cit_file
out_cit_file = folder + out_cit_file
if not os.path.isfile(in_cit_file): ## read graph
print "Did not find citation file, script is not ready to create one!"
quit()
# G= createNetwork(Venues, citations_list, True, uniform_weight)
# print G.number_of_edges()
# G, Outside_G = getInsideOutsideGraphs(G, inside_authors)
# n_nodes = G.number_of_nodes()
# print "%d Inside Nodes, %d Outside nodes" % (G.number_of_nodes(), Outside_G.number_of_nodes())
# writeNetworkToFile(G, icitations_list)
# writeNetworkToFile(Outside_G, ocitations_list)
else:
G = createNetwork(Venues, in_cit_file, n_pens, uniform_weight, allow_self_citations) #3create network reading penalty column from file
Outside_G = createNetwork(Venues, out_cit_file, 0, uniform_weight, False) ##create network without reading column penalty and without self-citations
n_nodes = G.number_of_nodes()
print "%d inside nodes %d outside nodes" % (G.number_of_nodes(), Outside_G.number_of_nodes())
##add dblp authors that are never cited
graph_nodes = {n:1 for n in G.nodes()}
for author in inside_authors:
if author not in graph_nodes:
G.add_node(author)
print "%d not_cited_authors were added" % (G.number_of_nodes() - n_nodes)
return G, Outside_G
def createGTFile(folder, best_papers, i_best_papers, matched_pubs, eval_file, G):
filename = folder + "gt_" + eval_file
print "GT FILE is: %s" % filename
if os.path.isfile(filename):
print "GT File already exists, not creating a new one"
return
write_file = open(filename, "a")
for pub in matched_pubs: ##publications matched by title on the dblp file
for i in range(len(matched_pubs[pub])):
write_file.write('"%s","%s",%d,%.3f,%.3f\n' % (matched_pubs[pub][i], pub, 1, 1/float(len(matched_pubs[pub])), 1/float(i+1)))
best_papers.update(i_best_papers) #dict with unmatched pubs
counter = 0
for pub in best_papers:
for i in range(len(best_papers[pub])):
if best_papers[pub][i] in G.nodes():
counter | |
2*self.dC[:, :, :self.N]
elif mode == 'diagonal':
# Eigendecomposition of the A matrix
L, V = torch.linalg.eig(self.dA)
V_inv = torch.linalg.inv(V)
# Check that the eigendedecomposition is correct
if self.verbose:
print("Diagonalization error:", torch.dist(V @ torch.diag_embed(L) @ V_inv, self.dA))
# Change the parameterization to diagonalize
self.dA = L
self.dB = contract('h n m, h m -> h n', V_inv, self.dB)
self.dC = contract('h n m, c h n -> c h m', V, self.dC)
elif mode == 'dense':
pass
else: raise NotImplementedError("NPLR Kernel step mode must be {'dense' | 'linear' | 'diagonal'}")
def default_state(self, *batch_shape):
C = _r2c(self.C)
N = C.size(-1)
H = C.size(-2)
# Cache the tensor contractions we will later do, for efficiency
# These are put in this function because they depend on the batch size
if self._step_mode !='linear':
N *= 2
if self._step_mode == 'diagonal':
self.state_contraction = contract_expression(
"h n, ... h n -> ... h n",
(H, N),
batch_shape + (H, N),
)
else:
# Dense (quadratic) case: expand all terms
self.state_contraction = contract_expression(
"h m n, ... h n -> ... h m",
(H, N, N),
batch_shape + (H, N),
)
self.input_contraction = contract_expression(
"h n, ... h -> ... h n",
(H, N), # self.dB.shape
batch_shape + (H,),
)
self.output_contraction = contract_expression(
"c h n, ... h n -> ... c h",
(C.shape[0], H, N), # self.dC.shape
batch_shape + (H, N),
)
state = torch.zeros(*batch_shape, H, N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
""" Must have called self.setup_step() and created state with self.default_state() before calling this """
if self._step_mode == 'linear':
new_state = self._step_state_linear(u, state)
else:
new_state = self._step_state(u, state)
y = self.output_contraction(self.dC, new_state)
return y, new_state
class SSKernelSlow(OptimModule):
"""Slow version of SSKernel function for illustration and benchmarking.
- Caches discretized matrices A^(dt), B^(dt)
- Computes K_L(A^dt, B^dt, C)
Usage:
```
krylov = SSKernelSlow(L, A, B, C, log_dt)()
```
Result is expected to be equal to SSKernelNPLR(L, w, P, B, C, log_dt, P)() if A = w - PP^*
"""
def __init__(self, L, A, B, C, log_dt, trainable=None, lr=None):
super().__init__()
self.L = L
self.N = A.size(-1)
self.H = log_dt.size(-1)
C = C.expand(torch.broadcast_shapes(C.shape, (1, self.H, self.N))) # (C, H, N)
# Register parameters
train = False
if trainable is None: trainable = {}
if trainable == False: trainable = {}
if trainable == True: trainable, train = {}, True
self.register("log_dt", log_dt, trainable.get('dt', train), lr)
self.register("A", A, trainable.get('A', train), lr)
self.register("B", B, trainable.get('B', train), lr)
# NOTE leaving in complex form for convenience, which means it currently won't work with DDP and might have incorrect param count
# This class shouldn't be used for anything other than testing and simple ablations, so this is fine
# self.register("C", C.conj().resolve_conj(), True, None, wd=None)
self.C = nn.Parameter(_resolve_conj(C))
# Cache if nothing is trained
self.trainable = trainable.get('dt', train) or trainable.get('A', train) or trainable.get('B', train)
self.K = None # Compute in forward pass since that ensures correct device
def forward(self, state=None, rate=1.0, L=None):
if L is None: L = self.L
# This class shouldn't support the more advanced sampling and variable length functionalities, since it's just for testing
# But the code from NPLR could be pasted here if desired
assert rate == 1.0 and L is not None
if self.trainable:
dA, dB = bilinear(torch.exp(self.log_dt), self.A, self.B)
k = krylov(L, dA, dB, self.C) # (H L)
else:
if self.K is None:
dA, dB = bilinear(torch.exp(self.log_dt), self.A, self.B)
self.K = krylov(L, dA, dB) # (H N L)
k = contract('hnl,chn->chl', self.K[..., :L], self.C)
if state is not None:
state = state.to(self.dA)
state = contract("... n m, ... m -> ... n", self.dA, state)
k_state = krylov(L, self.dA, state.unsqueeze(-3), self.C)
else:
k_state = None
return k, k_state
# return k.to(torch.float)
def default_state(self, *batch_shape):
state = torch.zeros(*batch_shape, self.H, self.N, dtype=self.C.dtype, device=self.C.device)
return state
def _setup_state(self):
self.dA, self.dB = bilinear(torch.exp(self.log_dt), self.A, self.B)
def setup_step(self):
self._setup_state()
self.dC = self.C
def step(self, u, state):
next_state = contract("h m n, b h n -> b h m", self.dA, state) \
+ contract("h n, b h -> b h n", self.dB, u)
y = contract("c h n, b h n -> b c h", self.dC, next_state)
return y, next_state
class HippoSSKernel(nn.Module):
"""Wrapper around SSKernel that generates A, B, C, dt according to HiPPO arguments.
The SSKernel is expected to support the interface
forward()
default_state()
setup_step()
step()
"""
def __init__(
self,
H,
N=64,
L=1,
measure="legs",
rank=1,
channels=1, # 1-dim to C-dim map; can think of C as having separate "heads"
dt_min=0.001,
dt_max=0.1,
deterministic=False,
trainable=None, # Dictionary of options to train various HiPPO parameters
lr=None, # Hook to set LR of hippo parameters differently
mode="nplr", # 'slow' for complex naive version, 'real' for real naive version
length_correction=True, # Multiply by I-A|^L after initialization; can be turned off for initialization speed
hurwitz=False,
tie_state=False, # Tie parameters of HiPPO ODE across the H features
precision=1, # 1 (single) or 2 (double) for the kernel
resample=False, # If given inputs of different lengths, adjust the sampling rate. Note that L should always be provided in this case, as it assumes that L is the true underlying length of the continuous signal
verbose=False,
keops=False,
):
super().__init__()
self.N = N
self.H = H
L = L or 1
self.precision = precision
dtype = torch.double if self.precision == 2 else torch.float
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
self.rate = None if resample else 1.0
self.channels = channels
# Generate dt
log_dt = torch.rand(self.H, dtype=dtype) * (
math.log(dt_max) - math.log(dt_min)
) + math.log(dt_min)
# Compute the preprocessed representation
if mode == "real": # For testing and ablation purposes
# Generate A, B
A, B = hippo.transition(measure, self.N)
A = torch.as_tensor(A, dtype=dtype)
B = torch.as_tensor(B, dtype=dtype)[:, 0]
# Generate C
C = torch.randn(channels, self.H, self.N, dtype=dtype)
self.kernel = SSKernelSlow(
L, A, B, C, log_dt,
trainable=trainable, lr=lr,
)
else:
# Generate low rank correction p for the measure
w, p, B, C, _ = hippo.nplr(measure, self.N, rank, dtype=dtype)
if deterministic:
C = repeat(C, 'n -> c h n', c=channels, h=self.H)
else:
C = torch.randn(channels, self.H, self.N // 2, dtype=cdtype)
if mode == "nplr":
self.kernel = SSKernelNPLR(
L, w, p, B, C,
log_dt,
hurwitz=hurwitz,
trainable=trainable,
lr=lr,
tie_state=tie_state,
length_correction=length_correction,
verbose=verbose,
keops=keops,
)
elif mode == "slow": # Testing only
A = torch.diag_embed(_conj(w)) \
- contract("... r p, ... r q -> ... p q", _conj(p), _conj(p).conj())
self.kernel = SSKernelSlow(
L, A, _conj(B), _conj(C), log_dt,
trainable=trainable, lr=lr,
)
def forward(self, state=None, L=None):
k, k_state = self.kernel(state=state, rate=self.rate, L=L)
k_state = None if k_state is None else k_state.float()
return k.float(), k_state
@torch.no_grad()
def forward_state(self, u, state):
""" Forward the state through a sequence, i.e. computes the state after passing chunk through SSM
state: (..., H, N)
u: (..., H, L)
Returns: (..., H, N)
"""
self.kernel._setup_state()
dA, dB = self.kernel.dA, self.kernel.dB # (H N N) (H N)
conj = state.size(-1) != dA.size(-1)
if conj: state = _conj(state)
v = contract('h n, ... h l -> ... h n l', dB, u.flip(-1)) # dB.unsqueeze(-1) * u.flip(-1).unsqueeze(-2)
AL, v = power(u.size(-1), dA, v)
next_state = contract("... m n, ... n -> ... m", AL, state)
next_state = next_state + v
if conj: next_state = next_state[..., : next_state.size(-1) // 2]
return next_state
def step(self, u, state, **kwargs):
u, state = self.kernel.step(u, state, **kwargs)
return u.float(), state
def default_state(self, *args, **kwargs):
return self.kernel.default_state(*args, **kwargs)
""" Tests below """
def generate_kernel(H, N, L, measure="legs", rank=1):
A, B = hippo.transition(measure, N)
A = torch.as_tensor(A, dtype=torch.float)
B = torch.as_tensor(B, dtype=torch.float)[:, 0]
# _C = torch.ones(1, H, N)
_C = torch.randn(1, H, N)
log_dt = torch.log((1 + 10 * torch.arange(H) / H) * 1 / L)
# kernel slow real
kernel_real = SSKernelSlow(L, A, B, _C, log_dt)
kernel_real.to(device)
kernel_real.setup_step()
# kernel slow complex
w, p, B, V = hippo.nplr(measure, N, rank=rank)
C = contract(
"ij, ... j -> ... i", V.conj().transpose(-1, -2), | |
<gh_stars>0
#!/usr/bin/env python3
# Copyright (C) 2016 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import typing, numbers, threading, time, datetime, sys
STATE_INIT = 'INIT'
STATE_TO_START = 'TO_START'
STATE_STARTED = 'STARTED'
STATE_TO_STOP = 'TO_STOP'
STATE_STOPPED = 'STOPPED'
STATE_TO_CONTINUE = 'TO_CONTINUE'
STATE_FINISHED = 'FINISHED'
ACTIVITY_NONE = 'NONE'
ACTIVITY_BUSY = 'BUSY'
ACTIVITY_SLEEP = 'SLEEP'
ACTIVITY_JOIN = 'JOIN'
class ExceptionHandler:
"""
Handles Exceptions of task objects
If anywhere an exceptions occured and was put to the ExceptionHandler,
any thread that uses the same instance of ExceptionHandler exits,
when it calls its method fire
"""
def __init__(self):
self._exc = False
def put(self, exc: Exception):
"""
informs, that an exception occured
Arguments:
exc: Exception, ignored, but subclasses may distinguish
"""
self._exc = True
def fire(self, lock: threading.Lock=None):
"""
fires sys.exit(1) if an exception occured, else does nothing
"""
if self._exc:
if lock and lock.locked():
lock.release()
sys.exit(1)
def concat(*tasks) -> 'Task':
"""
concats a number of tasks and returns a chain of tasks
"""
chain = None
for task in tasks:
assert isinstance(task, Task), 'tasks must be instances of class Task'
if not chain:
chain = task
else:
chain.append(task)
return chain
class Task:
"""
Uses multithreading for tasks or chains of tasks.
In standard case it's an action, which is executed by a single callable.
Subsequent tasks or chains of tasks can be added with method append().
"""
_exc_default = ExceptionHandler()
_contained_register = {}
def __init__(self, action: typing.Callable, **kwargs):
"""
Construct a new 'Task' object
Arguments:
action: callable object (f.i. a function).
Keyword Arguments:
args: tuple=() -- argument list of action
kwargs: dict={} -- keyword arguments of action
action_stop: typing.Callable=None -- object (f.i. a function), which is called when task is stopped.
args_stop: tuple=() -- argument list of action_stop
kwargs_stop: dict={} -- keyword arguments of action_stop
action_cont: typing.Callable=None -- object (f.i. a function), which is called when task is continued.
args_cont: tuple=() -- argument list of action_cont
kwargs_cont: dict={} -- keyword arguments of action_cont
join: bool=False -- flag if contained task will be joined
duration: float=None -- duration of task (if action returns earlier, task will wait)
exc: ExceptionHandler=None -- exception handler to coordinate exceptions
example:
def a1():
print("action 1 - started")
def a2(text: str):
print(text, "- started")
def a3(txt: str="something"):
print(txt, "- started")
task.cont(
Task(a1),
Task(a2, args=("action 2",)),
Task(a3, kwargs={"txt": "action 3"})
).start()
"""
self._action = action
self._args = kwargs.pop('args', ())
self._kwargs = kwargs.pop('kwargs', {})
self._join = kwargs.pop('join', False)
self._duration = kwargs.pop('duration', None)
self._num = kwargs.pop('num', 0)
self._next = None
self._root = self
self._time_end = None
self._netto_time = False
self._cnt = 0
# the following are root only attributes
self._state = STATE_INIT
self._thread = None
self._thread_start = None
self._restart = False
self._thread_cont = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
self._actual = None
self._last = None
self._activity = ACTIVITY_NONE
self._time_action = None
self._time_called_stop = None
self._contained = []
self._cont_join = None
self._action_stop = kwargs.pop('action_stop', None)
self._args_stop = kwargs.pop('args_stop', ())
self._kwargs_stop = kwargs.pop('kwargs_stop', {})
self._action_cont = kwargs.pop('action_cont', None)
self._args_cont = kwargs.pop('args_cont', ())
self._kwargs_cont = kwargs.pop('kwargs_cont', {})
self._exc = kwargs.pop('exc', self._exc_default)
self._exc.fire()
assert not kwargs, 'unknown keyword arguments: ' + str(kwargs.keys())
assert isinstance(self._action, typing.Callable), \
"action needs to be a callable"
assert isinstance(self._args, tuple), 'args needs to be a tuple'
assert isinstance(self._kwargs, dict), 'kwargs needs to be a dictionary'
assert self._action_stop is None or isinstance(self._action_stop, typing.Callable), \
"action_stop needs to be a callable"
assert isinstance(self._args_stop, tuple), 'args_stop needs to be a tuple'
assert isinstance(self._kwargs_stop, dict), 'kwargs_stop needs to be a dictionary'
assert self._action_cont is None or isinstance(self._action_cont, typing.Callable), \
"action_cont needs to be a callable"
assert isinstance(self._args_cont, tuple), 'args_cont needs to be a tuple'
assert isinstance(self._kwargs_cont, dict), 'kwargs_cont needs to be a dictionary'
assert isinstance (self._join, bool), 'join needs to be a bool value'
assert not self._join or hasattr(self._action, '__self__'), 'only bounded methods can be joined'
assert not self._join or isinstance(self._action.__self__, Task), 'only instances of Task can be joined'
assert not self._join or self._action.__name__ in ["start", "cont"], 'only methods start or cont can be joined'
assert self._duration is None or isinstance(self._duration, numbers.Number), \
'duration needs to be a number'
assert self._duration is None or self._duration >= 0, \
'duration needs to be positive'
assert isinstance(self._num, int), 'num must be an integer'
assert self._num >= 0, 'num must be positive'
assert self._exc is None or isinstance(self._exc, ExceptionHandler), \
'exc needs to be an ExceptionHandler instance'
def append(self, task) -> 'Task':
"""
appends a task or a chain of tasks (both must be root tasks)
"""
try:
assert self._root is self, 'appending to root tasks only'
assert task._root is task, 'both tasks need to be root tasks'
assert self._state in [
STATE_INIT,
STATE_FINISHED,
STATE_STOPPED
], 'root task is actually executed'
assert task._state in [
STATE_INIT,
STATE_FINISHED,
STATE_STOPPED
], 'appended task is actually executed'
assert not (self is task and self._last is self), 'is already self-contained'
except Exception as exc:
self._root._exc.put(exc)
raise
self._exc.fire()
if self._last is None and task._last is None:
self._last = task
self._next = task
elif self._last is None:
self._last = task._last
self._next = task
elif task._last is None:
self._last._next = task
self._last = task
elif self is task:
self._last._next = self
self._last = self
else:
self._last._next = task
self._last = task._last
if not self is task:
task.root = self
return self
def start(self, gap: float=0) -> 'Task':
"""
starts execution of task (finished or stopped tasks may be started again)
Keyword Arguments:
gap: sets the waiting time, before start occurs (in seconds)
"""
self._root._exc.fire()
self._root._lock.acquire()
try:
assert isinstance(gap, numbers.Number), 'gap needs to be a number'
assert gap >= 0, 'gap needs to be positive'
assert self._root is self, 'only root tasks can be started'
assert self._state in [
STATE_INIT,
STATE_TO_STOP,
STATE_STOPPED,
STATE_FINISHED
], "can't start from state " + self._state
assert self._thread_start is None, "starting is already in progress"
assert self._thread_cont is None, "continuation is already in progress"
except Exception as exc:
self._root._exc.put(exc)
self._root._lock.release()
raise
if self._state == STATE_TO_STOP or gap > 0:
if self._state == STATE_TO_STOP:
self._restart = True
else:
self._state = STATE_TO_START
if gap:
self._thread_start = threading.Thread(
target=self._start2,
args=(time.time() + gap,)
)
else:
self._thread_start = threading.Thread(target=self._start2)
self._thread_start.start()
else:
self._start3()
self._thread = threading.Thread(target=self._execute)
self._thread.start()
return self
def _start2(self, time_action: float=None) -> None:
if self._state == STATE_TO_STOP:
self._lock.release()
self._thread.join()
self._exc.fire()
self._lock.acquire()
if not threading.current_thread() is self._thread_start:
self._lock.release()
return
if time_action:
gap = time_action - time.time()
if gap > 0:
self._activity = ACTIVITY_SLEEP
self._cond.wait(gap)
self._activity = ACTIVITY_NONE
self._exc.fire(self._lock)
if not threading.current_thread() is self._thread_start:
self._lock.release()
return
self._thread = self._thread_start
self._thread_start = None
self._start3()
self._execute()
def _start3(self) -> None:
self._state = STATE_STARTED
self._restart = False
self._time_called_stop = None
self._actual = self
self._cnt = 0
self._time_action = time.time()
if self._duration != None:
self._time_end = self._time_action + self._duration
def join(self) -> None:
"""
joins the thread of the task
"""
try:
assert self._root is self, "only root tasks can be joined"
assert self._state != STATE_INIT, "can't join tasks in state " + str(self._state)
except Exception as exc:
self._root._exc.put(exc)
raise
self._exc.fire()
try: self._thread_start.join()
except Exception: pass
try: self._thread_cont.join()
except Exception: pass
try: self._thread.join()
except Exception: pass
def stop(self) -> None:
"""
stops execution as fast as possible
allows to continue with method cont or restart with method start
already finished tasks silently do nothing
"""
self._root._exc.fire()
self._root._lock.acquire()
try:
assert self is self._root, 'only root tasks can be stopped'
assert self._state in [
STATE_TO_START,
STATE_STARTED,
STATE_TO_STOP,
STATE_TO_CONTINUE,
STATE_FINISHED
], "can't stop from state: " + | |
0:1024].mean(axis=0).values.reshape(1,1024))
df_EFR_a_85_label = pd.DataFrame(df_EFR_a_85_test1.iloc[2*i, 1024:1031].values.reshape(1,7))
df_EFR_a_85_avg = df_EFR_a_85_avg.append(pd.concat([df_EFR_a_85_avg_t, df_EFR_a_85_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_a_85_avg.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_a_85_avg = df_EFR_a_85_avg.sort_values(by=["Condition", "Subject"])
df_EFR_a_85_avg = df_EFR_a_85_avg.reset_index(drop=True)
'''
##################################################
# Frequency Domain
# parameters
sampling_rate = 9606 # fs
# sampling_rate = 9596.623
n = 1024
k = np.arange(n)
T = n/sampling_rate # time of signal
frq = k/T
freq = frq[range(int(n/2))]
n2 = 9606
k2 = np.arange(n2)
T2 = n2/sampling_rate
frq2 = k2/T2
freq2 = frq2[range(int(n2/2))]
# zero padding
# for df_EFR
df_EFR_data = df_EFR.iloc[:, :1024]
df_EFR_label = df_EFR.iloc[:, 1024:]
df_EFR_mid = pd.DataFrame(np.zeros((1408, 95036)))
df_EFR_withzero = pd.concat([df_EFR_data, df_EFR_mid, df_EFR_label], axis=1)
# rename columns
df_EFR_withzero.columns = np.append(np.arange(96060), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# for df_EFR_avg_85
df_EFR_avg_85_data = df_EFR_avg_85.iloc[:, :1024]
df_EFR_avg_85_label = df_EFR_avg_85.iloc[:, 1024:]
df_EFR_avg_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_85_withzero = pd.concat([df_EFR_avg_85_data, df_EFR_avg_85_mid, df_EFR_avg_85_label], axis=1)
# rename columns
df_EFR_avg_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# df_EFR_avg_win_85
df_EFR_avg_win_85_data = df_EFR_avg_win_85.iloc[:, :1024]
df_EFR_avg_win_85_label = df_EFR_avg_win_85.iloc[:, 1024:]
df_EFR_avg_win_85_mid = pd.DataFrame(np.zeros((176, 8582)))
df_EFR_avg_win_85_withzero = pd.concat([df_EFR_avg_win_85_data, df_EFR_avg_win_85_mid, df_EFR_avg_win_85_label], axis=1)
df_EFR_avg_win_85_withzero.columns = np.append(np.arange(9606), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
# concatenate AENU
temp1 = pd.concat([df_EFR_avg_85.iloc[0:44, 0:1024].reset_index(drop=True),df_EFR_avg_85.iloc[44:88, 0:1024].reset_index(drop=True)], axis=1)
temp2 = pd.concat([df_EFR_avg_85.iloc[88:132, 0:1024].reset_index(drop=True), df_EFR_avg_85.iloc[132:176, 0:1024].reset_index(drop=True)], axis=1)
df_EFR_avg_85_aenu = pd.concat([temp1, temp2], axis=1, ignore_index=True)
df_EFR_avg_85_aenu_withzero = pd.concat([df_EFR_avg_85_aenu, pd.DataFrame(np.zeros((44, 36864)))] , axis=1)
'''
# test##############
# test(detrend)
temp_test = np.asarray(df_EFR_avg_85_data.iloc[0, 0:1024])
temp_test_detrend = signal.detrend(temp_test)
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(temp_test)
plt.subplot(2, 1, 2)
plt.plot(temp_test_detrend)
plt.show()
# the raw data is already DC removed
# test(zero padding)
temp_EFR_1 = df_EFR_withzero.iloc[0, 0:1024]
temp_EFR_2= df_EFR_withzero.iloc[0, 0:9606]
temp_amplitude_spectrum_1 = np.abs((fftpack.fft(temp_EFR_1)/n)[range(int(n/2))])
temp_amplitude_spectrum_2 = np.abs((fftpack.fft(temp_EFR_2)/n2)[range(int(n2/2))])
plt.figure()
plt.subplot(2, 1, 1)
markers1 = [11, 21, 32, 43, 53, 64, 75]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_1, '-D', markevery=markers1)
plt.xlim(0, 100)
plt.title('without zero padding')
plt.subplot(2, 1, 2)
#markers2 = [100, 200, 300, 400, 500, 600, 700]
markers2 = [99, 199, 299, 399, 499, 599, 599]
# which corresponds to 100 200....700Hz in frequency domain
plt.plot(temp_amplitude_spectrum_2, '-D', markevery=markers2)
plt.xlim(0, 1000)
# plt.xscale('linear')
plt.title('with zero padding')
plt.show()
# #################
'''
# Calculate the Amplitude Spectrum
# create a new dataframe with zero-padding amplitude spectrum
'''
# for df_EFR
df_as_7= pd.DataFrame()
for i in range(1408):
temp_EFR = df_EFR_avg_85_withzero.iloc[i, 0:96060]
temp_as = np.abs((fftpack.fft(temp_EFR)/n2)[range(int(n2/2))])
#df_as_7 = pd.concat([df_as_7, temp_as_7_t], axis=0)
df_as_7 = df_as_7.append(pd.DataFrame(np.array([temp_as[1000], temp_as[2000], temp_as[3000], temp_as[4000], \
temp_as[5000], temp_as[6000], temp_as[7000]]).reshape(1,7)), ignore_index = True)
df_as_7 = pd.concat([df_as_7, df_EFR_label], axis=1) # add labels on it
# filter by 'a vowel and 85Db'
df_as_7_test1 = df_as_7[(df_as_7['Vowel'] == 'a vowel') & (df_as_7['Sound Level'] == '85')]
df_as_7_test1 = df_as_7_test1.reset_index(drop=True)
'''
# for df_EFR_avg_vcs_withzero
df_as_85_no0= pd.DataFrame()
df_as_85= pd.DataFrame()
df_as7_85= pd.DataFrame()
df_as_win_85= pd.DataFrame()
df_as7_win_85= pd.DataFrame()
for i in range(176):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as_no0 = np.abs((np.fft.fft(df_EFR_avg_85_data.iloc[i, :]))[range(int(n/2))])
df_as_85_no0 = df_as_85_no0.append(pd.DataFrame(temp_as_no0.reshape(1,512)), ignore_index = True)
temp_as = np.abs((np.fft.fft(df_EFR_avg_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))])
df_as_85 = df_as_85.append(pd.DataFrame(temp_as.reshape(1,4803)), ignore_index = True)
df_as7_85 = df_as7_85.append(pd.DataFrame(np.array([temp_as[100], temp_as[200], temp_as[300], temp_as[400], \
temp_as[500], temp_as[600], temp_as[700]]).reshape(1,7)), ignore_index = True)
temp_as_win = np.abs((np.fft.fft(df_EFR_avg_win_85_withzero.iloc[i, 0:9606]))[range(int(n2/2))])
df_as_win_85 = df_as_win_85.append(pd.DataFrame(temp_as_win.reshape(1,4803)), ignore_index = True)
df_as7_win_85 = df_as7_win_85.append(pd.DataFrame(np.array([temp_as_win[100], temp_as_win[200], temp_as_win[300], temp_as_win[400], \
temp_as_win[500], temp_as_win[600], temp_as_win[700]]).reshape(1,7)), ignore_index = True)
df_as_85_no0 = pd.concat([df_as_85_no0, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_85 = pd.concat([df_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as7_85 = pd.concat([df_as7_85, df_EFR_avg_85_label], axis=1) # add labels on it
df_as_win_85 = pd.concat([df_as_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
df_as7_win_85 = pd.concat([df_as7_win_85, df_EFR_avg_win_85_label], axis=1) # add labels on it
# wothout zero padding
df_as_85_aenu = pd.concat([df_as_85.iloc[0:44, :4803],
df_as_85.iloc[44:88, :4803].reset_index(drop=True),
df_as_85.iloc[88:132, :4803].reset_index(drop=True),
df_as_85.iloc[132:176, :4803].reset_index(drop=True)], axis=1)
df_as_85_1300_aenu = pd.concat([df_as_85.iloc[0:44, :1300],
df_as_85.iloc[44:88, :1300].reset_index(drop=True),
df_as_85.iloc[88:132, :1300].reset_index(drop=True),
df_as_85.iloc[132:176, :1300].reset_index(drop=True)], axis=1)
df_as_85_no0_1300 = df_as_85_no0.iloc[:, :139]
df_as_85_no0_aenu = pd.concat([df_as_85_no0_1300.iloc[0:44, :],
df_as_85_no0_1300.iloc[44:88, :].reset_index(drop=True),
df_as_85_no0_1300.iloc[88:132, :].reset_index(drop=True),
df_as_85_no0_1300.iloc[132:176, :].reset_index(drop=True)], axis=1)
df_as7_85_aenu = pd.concat([df_as7_85.iloc[0:44, :7],
df_as7_85.iloc[44:88, :7].reset_index(drop=True),
df_as7_85.iloc[88:132, :7].reset_index(drop=True),
df_as7_85.iloc[132:176, :7].reset_index(drop=True)], axis=1)
# for efr_aenu
df_aenu_as_85 = pd.DataFrame()
df_aenu_as7_85 = pd.DataFrame()
for i in range(44):
#temp_aenu_EFR = df_EFR_avg_aenu_withzero.iloc[i, 0:9606]
temp_as2 = np.abs((fftpack.fft(df_EFR_avg_85_aenu.iloc[i, 0:4096])/4096)[range(int(4096/2))])
df_aenu_as_85 = df_aenu_as_85.append(pd.DataFrame(temp_as2.reshape(1,2048)), ignore_index = True)
df_aenu_as7_85 = df_aenu_as7_85.append(pd.DataFrame(np.array([temp_as2[43], temp_as2[85], temp_as2[128], temp_as2[170], \
temp_as2[213], temp_as2[256], temp_as2[298]]).reshape(1,7)), ignore_index = True)
#df_aenu_as_85 = pd.concat([df_aenu_as_85, df_EFR_avg_85_label], axis=1) # add labels on it
'''
# average test1 and test2
df_as_7_avg = pd.DataFrame()
for i in range(44):
df_as_7_avg1 = pd.DataFrame(df_as_7_test1.iloc[2*i: 2*i+1, 0:7].mean(axis=0).values.reshape(1,7))
df_as_7_label = pd.DataFrame(df_as_7_test1.iloc[2*i, 7:14].values.reshape(1,7))
df_as_7_avg_t = pd.concat([df_as_7_avg1, df_as_7_label], axis=1, ignore_index=True)
df_as_7_avg = df_as_7_avg.append(df_as_7_avg_t)
# set the title of columns
df_as_7_avg.columns = np.append(np.arange(7), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_as_7_avg = df_as_7_avg.sort_values(by=["Condition", "Subject"])
df_as_7_avg = df_as_7_avg.reset_index(drop=True)
'''
'''
# set a normalized AS
df_as_7_avg_data= pd.DataFrame(df_as_7_avg.iloc[:, 0:7].astype(float))
df_as_7_avg_sum= pd.DataFrame(df_as_7_avg.iloc[:, 0:7]).sum(axis=1)
df_as_7_avg_label= pd.DataFrame(df_as_7_avg.iloc[:, 7:14])
# normalize
df_as_7_avg_norm = df_as_7_avg_data.div(df_as_7_avg_sum, axis=0)
# add label
df_as_7_avg_norm = pd.concat([df_as_7_avg_norm, df_as_7_avg_label], axis=1, ignore_index=True)
'''
# normalization
df_EFR_avg_85_aenu_norm = df_EFR_avg_85_aenu.div((df_EFR_avg_85_aenu.iloc[0:4096].abs()**2).sum())
df_aenu_as_85_1300_norm = df_aenu_as_85.iloc[:, :535].div((df_aenu_as_85.iloc[:, :535].abs()**2).sum()/1300)
df_as_85_1300_aenu_norm = df_as_85_1300_aenu.div((df_as_85_1300_aenu.abs()**2).sum()/1300)
# Calculate correlation
# EFR
corr_EFR_avg_85_a = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_e = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_n = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_u = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 0:22]
corr_EFR_avg_85_aenu = df_EFR_avg_85_aenu.iloc[:, 0:4096].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_EFR_avg_85_a_t = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_e_t = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_n_t = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_u_t = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[0:22, 0:22]
corr_EFR_avg_85_a_re = df_EFR_avg_85.iloc[0:44, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_e_re = df_EFR_avg_85.iloc[44:88, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_n_re = df_EFR_avg_85.iloc[88:132, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
corr_EFR_avg_85_u_re = df_EFR_avg_85.iloc[132:176, 0:1024].T.corr(method='pearson').iloc[22:44, 22:44]
'''
# AS
corr_as_85_a = df_as_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_e = df_as_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_n = df_as_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_u = df_as_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_a = df_as_win_85.iloc[0:44, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_e = df_as_win_85.iloc[44:88, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_n = df_as_win_85.iloc[88:132, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_win_85_u = df_as_win_85.iloc[132:176, 0:1300].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_aenu = df_aenu_as_85.iloc[0:44, 0:2048].T.corr(method='pearson').iloc[22:44, 0:22]
# here we use df_aenu_as_85.iloc[:, 0:535] to limit freq into 0 to 1300Hz
corr_as_85_aenu_1300 = df_aenu_as_85.iloc[0:44, 0:535].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as_85_no0_aenu = df_as_85_no0_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_aenu = df_as7_85_aenu.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
corr_aenu_as7_85 = df_aenu_as7_85.iloc[0:44, :].T.corr(method='pearson').iloc[22:44, 0:22]
# calculate the improved PCC matrix
corr_as_85_a_v2 = improved_PCC(df_as_85.iloc[0:44, 0:1300])
corr_as_85_e_v2 = improved_PCC(df_as_85.iloc[44:88, 0:1300])
corr_as_85_n_v2 = improved_PCC(df_as_85.iloc[88:132, 0:1300])
corr_as_85_u_v2 = improved_PCC(df_as_85.iloc[132:176, 0:1300])
corr_as_85_1300_aenu = improved_PCC(df_as_85_1300_aenu)
# df_EFR + df_aenu_AS_1300
df_aenu_sum_85 = pd.concat([df_EFR_avg_85_aenu, df_aenu_as_85.iloc[:, :535]], axis=1)
# df_aenu_sum_85 = pd.concat([df_EFR_avg_85_aenu_norm, df_aenu_as_85_1300_norm], axis=1)
corr_sum_85_aenu = df_aenu_sum_85.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22]
# df_EFR + df_aenu_no0_as
df_aenu_sum_85_v2 = pd.concat([df_EFR_avg_85_aenu, df_as_85_no0_aenu], axis=1)
corr_sum_85_aenu_v2 = df_aenu_sum_85_v2.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22]
# concatenate df_EFR and df_as_85_1300_aenu
df_aenu_sum_85_v3 = pd.concat([df_EFR_avg_85_aenu, df_as_85_1300_aenu], axis=1)
# df_aenu_sum_85_v3 = pd.concat([df_EFR_avg_85_aenu_norm, df_as_85_1300_aenu_norm], axis=1)
corr_sum_85_aenu_v3 = df_aenu_sum_85_v3.iloc[0:44, 0:].T.corr(method='pearson').iloc[22:44, 0:22]
# improved PCC (not remove mean for as)
# test for do not removing the mean of PCC
corr_sum_85_aenu_v4 = pd.DataFrame()
signal_in = df_aenu_sum_85_v3
for i in range(44):
row_pcc_notremovemean = []
row_pcc = []
for j in range(44):
sig_1 = signal_in.iloc[i, :].reset_index(drop=True)
sig_2 = signal_in.iloc[j, :].reset_index(drop=True)
sig_1_remove_mean = (sig_1 - sig_1.mean()).reset_index(drop=True)
sig_2_remove_mean = (sig_2 - sig_2.mean()).reset_index(drop=True)
# here EFR remove the mean but AS not
# then normalize the energy of EFR and AS
sig_1_p1 = sig_1_remove_mean.iloc[0:4096].div((sig_1_remove_mean.iloc[0:4096].abs()**2).sum())
sig_1_p2 = sig_1.iloc[4096:].div((sig_1.iloc[4096:].abs()**2).sum()/1300)
sig_1_new = pd.concat([sig_1_p1, sig_1_p2])
sig_2_p1 = sig_2_remove_mean.iloc[0:4096].div((sig_2_remove_mean.iloc[0:4096].abs()**2).sum())
sig_2_p2 = sig_2.iloc[4096:].div((sig_2.iloc[4096:].abs()**2).sum()/1300)
sig_2_new = pd.concat([sig_2_p1, sig_2_p2])
#sig_1_new = pd.concat([sig_1_remove_mean.iloc[0:4096], sig_1.iloc[4096:]])
#sig_2_new = pd.concat([sig_2_remove_mean.iloc[0:4096], sig_2.iloc[4096:]])
'''
pcc_notremovemean = np.abs(np.sum(sig_1 * sig_2) / np.sqrt(np.sum(sig_1*sig_1) * np.sum(sig_2 * sig_2)))
pcc = np.abs(np.sum(sig_1_remove_mean * sig_2_remove_mean) /
np.sqrt(np.sum(sig_1_remove_mean*sig_1_remove_mean) * np.sum(sig_2_remove_mean * sig_2_remove_mean)))
'''
pcc_notremovemean = np.abs(np.sum(sig_1_new * sig_2_new) / np.sqrt(np.sum(sig_1_new*sig_1_new) * np.sum(sig_2_new * sig_2_new)))
row_pcc_notremovemean = np.append(row_pcc_notremovemean, pcc_notremovemean)
# row_pcc = np.append(row_pcc, pcc)
# example
if i==4 & j==5:
plt.figure(1)
ax1 = plt.subplot(211)
ax1.plot(sig_1)
ax1.plot(sig_2)
ax2 = plt.subplot(212)
ax2.plot(sig_1_remove_mean)
ax2.plot(sig_2_remove_mean)
ax1.set_title("original signal, norm corr = %.3f" % pcc_notremovemean)
ax2.set_title("signal with mean removed(PCC), norm corr = %.3f" % pcc)
plt.tight_layout()
ax1.grid(True)
ax2.grid(True)
plt.show()
corr_sum_85_aenu_v4 = corr_sum_85_aenu_v4.append(pd.DataFrame(row_pcc_notremovemean.reshape(1,44)), ignore_index=True)
corr_sum_85_aenu_v4 = corr_sum_85_aenu_v4.iloc[22:44, 0:22]
'''
corr_as_85_a_t = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_e_t = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_n_t = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_u_t = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as_85_a_re = df_as_85.iloc[0:44, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_e_re = df_as_85.iloc[44:88, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_n_re = df_as_85.iloc[88:132, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as_85_u_re = df_as_85.iloc[132:176, 0:48030].T.corr(method='pearson').iloc[22:44, 22:44]
'''
#AS7
corr_as7_85_a = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_e = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_n = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
corr_as7_85_u = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 0:22]
'''
corr_as7_85_a_t = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_e_t = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_n_t = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_u_t = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[0:22, 0:22]
corr_as7_85_a_re = df_as7_85.iloc[0:44, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_e_re = df_as7_85.iloc[44:88, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_n_re = df_as7_85.iloc[88:132, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
corr_as7_85_u_re = df_as7_85.iloc[132:176, 0:7].T.corr(method='pearson').iloc[22:44, 22:44]
'''
# shrink
# shrink the correlation range from 0.3 to 1
# EFR
'''
corr_EFR_avg_85_a_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_a)
corr_EFR_avg_85_e_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_e)
corr_EFR_avg_85_n_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_n)
corr_EFR_avg_85_u_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_u)
'''
corr_EFR_avg_85_aenu_shrink_03_1 = shrink_value_03_1(corr_EFR_avg_85_aenu)
# AS
'''
corr_as_win_85_a_shrink_03_1 = shrink_value_03_1(corr_as_win_85_a)
corr_as_win_85_e_shrink_03_1 = shrink_value_03_1(corr_as_win_85_e)
corr_as_win_85_n_shrink_03_1 = shrink_value_03_1(corr_as_win_85_n)
corr_as_win_85_u_shrink_03_1 = shrink_value_03_1(corr_as_win_85_u)
'''
corr_as_85_aenu_shrink_03_1 = shrink_value_03_1(corr_as_85_aenu)
# shrink the correlation range from 0.5 to 1
# EFR
corr_EFR_avg_85_aenu_shrink_05_1 = shrink_value_05_1(corr_EFR_avg_85_aenu)
# AS
corr_as_85_aenu_shrink_05_1 = shrink_value_05_1(corr_as_85_aenu)
# test
# sum of time and frequency corelation matrix
corr_sum_avg_85_aenu = (corr_EFR_avg_85_aenu + corr_as_85_aenu_1300).copy()
corr_sum_avg_85_aenu_v2 = (corr_EFR_avg_85_aenu + corr_as_85_no0_aenu).copy()
#corr_sum_avg_85_aenu = (corr_EFR_avg_85_aenu + corr_as_85_aenu).copy()
# max of time and frequency corelation matrix
# corr_max_avg_85_aenu = (corr_EFR_avg_85_aenu ? corr_as_85_aenu).copy()
# plot the figure
'''
# Correlation Matrix
# EFR
correlation_matrix(corr_EFR_avg_85_a, 'cross correlation of 85dB a_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_e, 'cross correlation of 85dB e_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_n, 'cross correlation of 85dB n_vowel in time domain')
correlation_matrix(corr_EFR_avg_85_u, 'cross correlation of 85dB u_vowel in time domain')
# AS
correlation_matrix(corr_as_85_a, 'cross correlation of 85dB a_vowel in frequency domain')
correlation_matrix(corr_as_85_e, 'cross correlation of 85dB e_vowel in frequency domain')
correlation_matrix(corr_as_85_n, 'cross correlation of 85dB n_vowel in frequency domain')
correlation_matrix(corr_as_85_u, 'cross correlation of 85dB u_vowel in frequency domain')
# AS7
correlation_matrix(corr_as7_85_a, 'cross correlation of 85dB a_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_e, 'cross correlation of 85dB e_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_n, 'cross correlation of 85dB n_vowel in frequency domain 7')
correlation_matrix(corr_as7_85_u, 'cross correlation of 85dB u_vowel in frequency domain 7')
# | |
<filename>tests/test_oauth2_implicit.py
import time
import datetime
import httpx
import pytest
from pytest_httpx import HTTPXMock
from httpx_auth.testing import BrowserMock, create_token, token_cache, browser_mock
from tests.auth_helper import get_header
import httpx_auth
def test_oauth2_implicit_flow_url_is_mandatory():
with pytest.raises(Exception) as exception_info:
httpx_auth.OAuth2Implicit(None)
assert str(exception_info.value) == "Authorization URL is mandatory."
def test_header_value_must_contains_token():
with pytest.raises(Exception) as exception_info:
httpx_auth.OAuth2Implicit("http://test_url", header_value="Bearer token")
assert str(exception_info.value) == "header_value parameter must contains {token}."
def test_oauth2_implicit_flow_token_is_not_reused_if_a_url_parameter_is_changing(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth1 = httpx_auth.OAuth2Implicit(
"http://provide_token?response_type=custom_token&fake_param=1",
token_field_name="custom_token",
)
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
first_token = create_token(expiry_in_1_hour)
tab1 = browser_mock.add_response(
opened_url="http://provide_token?response_type=custom_token&fake_param=1&state=5652a8138e3a99dab7b94532c73ed5b10f19405316035d1efdc8bf7e0713690485254c2eaff912040eac44031889ef0a5ed5730c8a111541120d64a898c31afe&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"custom_token={first_token}&state=5652a8138e3a99dab7b94532c73ed5b10f19405316035d1efdc8bf7e0713690485254c2eaff912040eac44031889ef0a5ed5730c8a111541120d64a898c31afe",
)
assert get_header(httpx_mock, auth1).get("Authorization") == f"Bearer {first_token}"
# Ensure that the new token is different than previous one
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(
hours=1, seconds=1
)
auth2 = httpx_auth.OAuth2Implicit(
"http://provide_token?response_type=custom_token&fake_param=2",
token_field_name="custom_token",
)
second_token = create_token(expiry_in_1_hour)
tab2 = browser_mock.add_response(
opened_url="http://provide_token?response_type=custom_token&fake_param=2&state=5c3940ccf78ac6e7d6d8d06782d9fd95a533aa5425b616eaa38dc3ec9508fbd55152c58a0d8dd8a087e76b77902559285819a41cb78ce8713e5a3b974bf07ce9&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"custom_token={second_token}&state=5c3940ccf78ac6e7d6d8d06782d9fd95a533aa5425b616eaa38dc3ec9508fbd55152c58a0d8dd8a087e76b77902559285819a41cb78ce8713e5a3b974bf07ce9",
)
response = httpx.get("http://authorized_only", auth=auth2)
# Return headers received on this dummy URL
assert response.request.headers.get("Authorization") == f"Bearer {second_token}"
tab1.assert_success(
"You are now authenticated on 5652a8138e3a99dab7b94532c73ed5b10f19405316035d1efdc8bf7e0713690485254c2eaff912040eac44031889ef0a5ed5730c8a111541120d64a898c31afe. You may close this tab."
)
tab2.assert_success(
"You are now authenticated on 5c3940ccf78ac6e7d6d8d06782d9fd95a533aa5425b616eaa38dc3ec9508fbd55152c58a0d8dd8a087e76b77902559285819a41cb78ce8713e5a3b974bf07ce9. You may close this tab."
)
def test_oauth2_implicit_flow_token_is_reused_if_only_nonce_differs(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth1 = httpx_auth.OAuth2Implicit(
"http://provide_token?response_type=custom_token&nonce=1",
token_field_name="custom_token",
)
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=custom_token&state=67b95d2c7555751d1d72c97c7cd9ad6630c8395e0eaa51ee86ac7e451211ded9cd98a7190848789fe93632d8960425710e93f1f5549c6c6bc328bf3865a85ff2&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F&nonce=%5B%271%27%5D",
reply_url="http://localhost:5000",
data=f"custom_token={token}&state=67b95d2c7555751d1d72c97c7cd9ad6630c8395e0eaa51ee86ac7e451211ded9cd98a7190848789fe93632d8960425710e93f1f5549c6c6bc328bf3865a85ff2",
)
assert get_header(httpx_mock, auth1).get("Authorization") == f"Bearer {token}"
auth2 = httpx_auth.OAuth2Implicit(
"http://provide_token?response_type=custom_token&nonce=2",
token_field_name="custom_token",
)
response = httpx.get("http://authorized_only", auth=auth2)
# Return headers received on this dummy URL
assert response.request.headers.get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 67b95d2c7555751d1d72c97c7cd9ad6630c8395e0eaa51ee86ac7e451211ded9cd98a7190848789fe93632d8960425710e93f1f5549c6c6bc328bf3865a85ff2. You may close this tab."
)
def test_oauth2_implicit_flow_token_can_be_requested_on_a_custom_server_port(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
# TODO Should use a method to retrieve a free port instead
available_port = 5002
auth = httpx_auth.OAuth2Implicit(
"http://provide_token", redirect_uri_port=available_port
)
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5002%2F",
reply_url="http://localhost:5002",
data=f"access_token={token}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_post_token_is_sent_in_authorization_header_by_default(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit("http://provide_token")
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={token}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_post_token_is_expired_after_30_seconds_by_default(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit("http://provide_token")
# Add a token that expires in 29 seconds, so should be considered as expired when issuing the request
expiry_in_29_seconds = datetime.datetime.utcnow() + datetime.timedelta(seconds=29)
token_cache._add_token(
key="42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
token=create_token(expiry_in_29_seconds),
expiry=httpx_auth.oauth2_tokens._to_expiry(expires_in=29),
)
# Meaning a new one will be requested
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={token}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_post_token_custom_expiry(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit("http://provide_token", early_expiry=28)
# Add a token that expires in 29 seconds, so should be considered as not expired when issuing the request
expiry_in_29_seconds = datetime.datetime.utcnow() + datetime.timedelta(seconds=29)
token = create_token(expiry_in_29_seconds)
token_cache._add_token(
key="42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
token=create_token(expiry_in_29_seconds),
expiry=httpx_auth.oauth2_tokens._to_expiry(expires_in=29),
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
def test_browser_opening_failure(token_cache, httpx_mock: HTTPXMock, monkeypatch):
import httpx_auth.oauth2_authentication_responses_server
auth = httpx_auth.OAuth2Implicit("http://provide_token", timeout=0.1)
class FakeBrowser:
def open(self, url, new):
return False
monkeypatch.setattr(
httpx_auth.oauth2_authentication_responses_server.webbrowser,
"get",
lambda *args: FakeBrowser(),
)
httpx_mock.add_response(
method="GET",
url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
)
with pytest.raises(httpx_auth.TimeoutOccurred) as exception_info:
httpx.get("http://authorized_only", auth=auth)
assert (
str(exception_info.value)
== "User authentication was not received within 0.1 seconds."
)
def test_browser_error(token_cache, httpx_mock: HTTPXMock, monkeypatch):
import httpx_auth.oauth2_authentication_responses_server
auth = httpx_auth.OAuth2Implicit("http://provide_token", timeout=0.1)
class FakeBrowser:
def open(self, url, new):
import webbrowser
raise webbrowser.Error("Failure")
monkeypatch.setattr(
httpx_auth.oauth2_authentication_responses_server.webbrowser,
"get",
lambda *args: FakeBrowser(),
)
httpx_mock.add_response(
method="GET",
url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
)
with pytest.raises(httpx_auth.TimeoutOccurred) as exception_info:
httpx.get("http://authorized_only", auth=auth)
assert (
str(exception_info.value)
== "User authentication was not received within 0.1 seconds."
)
def test_state_change(token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock):
auth = httpx_auth.OAuth2Implicit("http://provide_token")
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={token}&state=123456",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success("You are now authenticated on 123456. You may close this tab.")
def test_empty_token_is_invalid(token_cache, browser_mock: BrowserMock):
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token=&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
with pytest.raises(httpx_auth.InvalidToken) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
assert str(exception_info.value) == " is invalid."
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_token_without_expiry_is_invalid(token_cache, browser_mock: BrowserMock):
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={create_token(None)}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
with pytest.raises(httpx_auth.TokenExpiryNotProvided) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
assert str(exception_info.value) == "Expiry (exp) is not provided in None."
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_get_token_is_sent_in_authorization_header_by_default(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit("http://provide_token")
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url=f"http://localhost:5000#access_token={token}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_token_is_sent_in_requested_field(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit(
"http://provide_token", header_name="Bearer", header_value="{token}"
)
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={token}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
assert get_header(httpx_mock, auth).get("Bearer") == token
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_can_send_a_custom_response_type_and_expects_token_to_be_received_with_this_name(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit(
"http://provide_token",
response_type="custom_token",
token_field_name="custom_token",
)
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=custom_token&state=67b95d2c7555751d1d72c97c7cd9ad6630c8395e0eaa51ee86ac7e451211ded9cd98a7190848789fe93632d8960425710e93f1f5549c6c6bc328bf3865a85ff2&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"custom_token={token}&state=67b95d2c7555751d1d72c97c7cd9ad6630c8395e0eaa51ee86ac7e451211ded9cd98a7190848789fe93632d8960425710e93f1f5549c6c6bc328bf3865a85ff2",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 67b95d2c7555751d1d72c97c7cd9ad6630c8395e0eaa51ee86ac7e451211ded9cd98a7190848789fe93632d8960425710e93f1f5549c6c6bc328bf3865a85ff2. You may close this tab."
)
def test_oauth2_implicit_flow_expects_token_in_id_token_if_response_type_is_id_token(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit("http://provide_token", response_type="id_token")
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=id_token&state=87c4108ec0eb03599335333a40434a36674269690b6957fef684bfb6c5a849ce660ef7031aa874c44d67cd3eada8febdfce41efb1ed3bc53a0a7e716cbba025a&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"id_token={token}&state=87c4108ec0eb03599335333a40434a36674269690b6957fef684bfb6c5a849ce660ef7031aa874c44d67cd3eada8febdfce41efb1ed3bc53a0a7e716cbba025a",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 87c4108ec0eb03599335333a40434a36674269690b6957fef684bfb6c5a849ce660ef7031aa874c44d67cd3eada8febdfce41efb1ed3bc53a0a7e716cbba025a. You may close this tab."
)
def test_oauth2_implicit_flow_expects_token_in_id_token_if_response_type_in_url_is_id_token(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit("http://provide_token?response_type=id_token")
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=id_token&state=87c4108ec0eb03599335333a40434a36674269690b6957fef684bfb6c5a849ce660ef7031aa874c44d67cd3eada8febdfce41efb1ed3bc53a0a7e716cbba025a&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"id_token={<PASSWORD>}&state=87c4108ec0eb03599335333a40434a36674269690b6957fef684bfb6c5a849ce660ef7031aa874c44d67cd3eada8febdfce41efb1ed3bc53a0a7e716cbba025a",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 87c4108ec0eb03599335333a40434a36674269690b6957fef684bfb6c5a849ce660ef7031aa874c44d67cd3eada8febdfce41efb1ed3bc53a0a7e716cbba025a. You may close this tab."
)
def test_oauth2_implicit_flow_expects_token_to_be_stored_in_access_token_by_default(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth = httpx_auth.OAuth2Implicit("http://provide_token")
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={token}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
assert get_header(httpx_mock, auth).get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_token_is_reused_if_not_expired(
token_cache, httpx_mock: HTTPXMock, browser_mock: BrowserMock
):
auth1 = httpx_auth.OAuth2Implicit("http://provide_token")
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={token}&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521",
)
assert get_header(httpx_mock, auth1).get("Authorization") == f"Bearer {token}"
oauth2 = httpx_auth.OAuth2Implicit("http://provide_token")
response = httpx.get("http://authorized_only", auth=oauth2)
# Return headers received on this dummy URL
assert response.request.headers.get("Authorization") == f"Bearer {token}"
tab.assert_success(
"You are now authenticated on 42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521. You may close this tab."
)
def test_oauth2_implicit_flow_post_failure_if_token_is_not_provided(
token_cache, browser_mock: BrowserMock
):
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data="",
)
with pytest.raises(Exception) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
assert str(exception_info.value) == "access_token not provided within {}."
tab.assert_failure(
"Unable to properly perform authentication: access_token not provided within {}."
)
def test_oauth2_implicit_flow_get_failure_if_token_is_not_provided(
token_cache, browser_mock: BrowserMock
):
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
)
with pytest.raises(Exception) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
assert str(exception_info.value) == "access_token not provided within {}."
tab.assert_failure(
"Unable to properly perform authentication: access_token not provided within {}."
)
def test_oauth2_implicit_flow_post_failure_if_state_is_not_provided(
token_cache, browser_mock: BrowserMock
):
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000",
data=f"access_token={token}",
)
with pytest.raises(httpx_auth.StateNotProvided) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
assert (
str(exception_info.value)
== f"state not provided within {{'access_token': ['{token}']}}."
)
tab.assert_failure(
f"Unable to properly perform authentication: state not provided within {{'access_token': ['{token}']}}."
)
def test_oauth2_implicit_flow_get_failure_if_state_is_not_provided(
token_cache, browser_mock: BrowserMock
):
expiry_in_1_hour = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
token = create_token(expiry_in_1_hour)
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url=f"http://localhost:5000#access_token={token}",
)
with pytest.raises(httpx_auth.StateNotProvided) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
assert (
str(exception_info.value)
== f"state not provided within {{'access_token': ['{token}'], 'httpx_auth_redirect': ['1']}}."
)
tab.assert_failure(
f"Unable to properly perform authentication: state not provided within {{'access_token': ['{token}'], 'httpx_auth_redirect': ['1']}}."
)
def test_with_invalid_token_request_invalid_request_error(
token_cache, browser_mock: BrowserMock
):
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000#error=invalid_request",
)
with pytest.raises(httpx_auth.InvalidGrantRequest) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
assert (
str(exception_info.value)
== "invalid_request: The request is missing a required parameter, includes an invalid parameter value, includes a parameter more than once, or is otherwise malformed."
)
tab.assert_failure(
"Unable to properly perform authentication: invalid_request: The request is missing a required parameter, includes an invalid parameter value, includes a parameter more than once, or is otherwise malformed."
)
def test_with_invalid_token_request_invalid_request_error_and_error_description(
token_cache, browser_mock: BrowserMock
):
tab = browser_mock.add_response(
opened_url="http://provide_token?response_type=token&state=42a85b271b7a652ca3cc4c398cfd3f01b9ad36bf9c945ba823b023e8f8b95c4638576a0e3dcc96838b838bec33ec6c0ee2609d62ed82480b3b8114ca494c0521&redirect_uri=http%3A%2F%2Flocalhost%3A5000%2F",
reply_url="http://localhost:5000#error=invalid_request&error_description=desc",
)
with pytest.raises(httpx_auth.InvalidGrantRequest) as exception_info:
httpx.get(
"http://authorized_only",
auth=httpx_auth.OAuth2Implicit("http://provide_token"),
)
| |
import numpy as np
import scipy
from scipy.stats import binom
# numpy.seterr(all='raise')
#
# Decision tree: Regression
#
class Tree():
def __init__(self, X, y, maxDepth, alpha0 = None, baseline_features = None, peek_ahead_max_depth=0, split_val_quantiles = [], peek_ahead_quantiles = [], nSamples = 0, cut_middle_y=0, no_downstream_feature_repeats=0, internal_cross_val=0, internal_cross_val_nodes=0, treegrowth_CV=0, beta0 = 0, beta0_vec = [0, 0]):
self.X = X
self.y = y
self.maxDepth = maxDepth
self.alpha0 = alpha0
self.peek_ahead_max_depth = peek_ahead_max_depth
self.split_val_quantiles = split_val_quantiles
self.peek_ahead_quantiles = peek_ahead_quantiles
self.nSamples = nSamples
self.cut_middle_y = cut_middle_y
self.no_downstream_feature_repeats = no_downstream_feature_repeats
self.internal_cross_val = internal_cross_val
self.internal_cross_val_nodes = internal_cross_val_nodes
self.treegrowth_CV = treegrowth_CV
self.beta0 = beta0
self.beta0_vec = beta0_vec
self.peek_ahead_depth = 0 # Changed in loop in build_tree
self.tree_info = []
self.current_node_index = 0
self.baseline_features = baseline_features
if self.cut_middle_y == 1:
q = np.quantile(self.y, [0.33, 0.66])
self.X = self.X[(self.y < q[0]) | (self.y > q[1]), :]
self.y = self.y[(self.y < q[0]) | (self.y > q[1])]
if self.alpha0 == None:
self.f_auto_alpha()
def f_auto_alpha(self):
self.alpha0 = 0
current_d_alpha = 0.1
min_d_alpha = 0.003
nIts_per_alpha = 200
print("Finding alpha:")
while abs(current_d_alpha) > min_d_alpha:
print('\tAlpha = ', self.alpha0)
n_non_empty_tree = 0
for iIt in range(nIts_per_alpha):
perm_indices = np.random.permutation(len(self.y))
y_perm = self.y[perm_indices]
tree0 = self.teg_tree_inner(self.X, y_perm)
C, nodes_collapsed = self.prune_the_tree(tree0)
# print(C)
# print(nodes_collapsed)
best_collapse_seq_end = np.argmin(C)
if (best_collapse_seq_end < (len(C) - 1)):
n_non_empty_tree = n_non_empty_tree + 1
p = n_non_empty_tree / nIts_per_alpha # False-positive tree
if current_d_alpha > 0:
if p < 0.05:
current_d_alpha = -abs(current_d_alpha) / 2
if current_d_alpha < 0:
if p >= 0.05:
current_d_alpha = abs(current_d_alpha) / 2
self.alpha0 = self.alpha0 + current_d_alpha
if (self.alpha0 < 0):
self.alpha0 = 0
print('\t-> p, new alpha, new dAlpha = ', p, self.alpha0, current_d_alpha)
print('Auto-selected alpha = ', self.alpha0);
def build_tree(self):
tree0 = []
cost_complexity_criterion = np.inf
best_peek_crit = np.NaN
best_raw_tree = []
best_C_min_v_crossval = []
best_C_min_v_null = []
p = 1
for peek_ahead_depth in range(self.peek_ahead_max_depth + 1):
print('Finding tree for peek_ahead_depth = ', peek_ahead_depth)
self.peek_ahead_depth = peek_ahead_depth
tree0_this, cost_complexity_criterion_this, raw_tree, C_min_v_crossval, C_min_v_null, p = self.teg_regression_tree()
print('Cost-Complexity Criterion = ', cost_complexity_criterion_this)
if cost_complexity_criterion_this < cost_complexity_criterion:
tree0 = tree0_this
cost_complexity_criterion = cost_complexity_criterion_this
best_peek_crit = peek_ahead_depth
best_raw_tree = raw_tree
best_C_min_v_crossval = C_min_v_crossval
best_C_min_v_null = C_min_v_null
print(" ! New best tree !")
print("\n")
print("Best tree was found at peek-ahead depth = ", best_peek_crit)
Output = {'tree': tree0, 'cost_complexity_criterion':cost_complexity_criterion, 'best_peek_crit':best_peek_crit, 'raw_tree':best_raw_tree, 'CV_distr':best_C_min_v_crossval, 'null_distr':best_C_min_v_null, 'p':p}
self.tree_info = Output
return Output
def teg_regression_tree(self):
if (self.nSamples == 0):
if self.internal_cross_val == 1:
print('Internal cross validation not used with nSamples=0.')
mean_y = np.nanmean(self.y)
sd_y = np.sqrt(np.var(self.y))
y = (self.y - mean_y) / sd_y
tree0 = self.teg_tree_inner(self.X, y)
C, nodes_collapsed = self.prune_the_tree(tree0)
C_min_v_crossval = []
C_min_v_null = []
p = 1
else:
best_mean_y = np.NaN
best_sd_y = np.NaN
best_C_min = np.inf
best_tree = []
best_C = []
best_nodes_collapsed = []
C_min_v_crossval = []
C_min_v_null = []
for iSample in range(self.nSamples):
#print(iSample)
# Random split sample into:
# Subsample to construct tree
# Independent subsample used for entropy per terminal node
# Additionally, create a randomly permuted sample to generate a null distribution over the samples
perm_indices = np.random.permutation(len(self.y))
a = int(np.floor(len(self.y) / 2))
set1_indices = perm_indices[1:a]
set2_indices = perm_indices[a:]
# Split half used to build tree
y_1 = self.y[set1_indices]
X_1 = self.X[set1_indices, :]
# Split half used for cross-validation and NHST
y_2 = self.y[set2_indices]
X_2 = self.X[set2_indices, :]
set3_indices = np.random.permutation(set2_indices)
X_3 = self.X[set3_indices, :]
mean_y_1 = np.nanmean(y_1)
sd_y_1 = np.sqrt(np.var(y_1))
y_1 = (y_1 - mean_y_1) / sd_y_1
mean_y_2 = np.nanmean(y_2)
sd_y_2 = np.sqrt(np.var(y_2))
y_2 = (y_2 - mean_y_2) / sd_y_2
# Null distribution
# y_null = np.random.permutation(y_2) # Already normalized
y_null = y_2
X_null = X_3 # Permuted X
if self.baseline_features != None:
# Baseline columns of X_null remain statistically linked to y_null; other columns are randomized
X_null[:, self.baseline_features] = X_2[:, self.baseline_features]
#
tree0 = self.teg_tree_inner(X_1, y_1)
C, nodes_collapsed = self.prune_the_tree(tree0)
best_collapse_seq_end = np.argmin(C)
nodes_collapsed_choice = nodes_collapsed[0:(best_collapse_seq_end + 1)]
tree0_CV = self.tree_copy(tree0, X_2, y_2)
min_C_CV_original_pruning = self.f_C(tree0_CV, nodes_collapsed_choice)
C_CV, nodes_collapsed_CV = self.prune_the_tree(tree0_CV)
min_C_CV = np.min(C_CV)
tree0_null = self.tree_copy(tree0, X_null, y_null)
min_C_null = self.f_C(tree0_null, nodes_collapsed_choice)
#C_null, nodes_collapsed_null = self.prune_the_tree(tree0_null)
#min_C_null = np.min(C_null)
#
C_min_v_crossval.append(min_C_CV_original_pruning)
C_min_v_null.append(min_C_null)
delta_C = min_C_CV_original_pruning - min_C_null # Find cross-validated tree must distinct from null tree
if self.internal_cross_val == 1:
best_C_min_to_use = min_C_CV
# best_C_min_to_use = delta_C
else:
best_C_min_to_use = np.min(C)
# Pick the tree that has the lowest minimal CCC found in the C vector
if best_C_min_to_use < best_C_min:
best_C_min = best_C_min_to_use
best_mean_y = mean_y_1
best_sd_y = sd_y_1
best_tree = tree0
best_C = C
if (self.internal_cross_val == 1) and (self.internal_cross_val_nodes == 1):
best_nodes_collapsed = nodes_collapsed_CV
else:
best_nodes_collapsed = nodes_collapsed
mean_y = best_mean_y
sd_y = best_sd_y
tree0 = best_tree
C = best_C
nodes_collapsed = best_nodes_collapsed
d_for_NHST = np.array(C_min_v_crossval) - np.array(C_min_v_null)
obs_CV_better = np.sum(d_for_NHST < 0)
p = 1 - binom.cdf(obs_CV_better, d_for_NHST.size, 0.5) # Ties are coded conservatively
#print(tree0)
#print(C)
#print(nodes_collapsed)
# print(len(C))
# Apply selected tree0 to full dataset to get consistent terminal nodes, independent of random split when best tree was found
mean_y = np.nanmean(self.y)
sd_y = np.sqrt(np.var(self.y))
tree0 = self.tree_copy(tree0, self.X, self.y)
self.print_tree(tree0, C, nodes_collapsed, mean_y, sd_y)
collapsed_tree = self.collapse_tree(tree0, C, nodes_collapsed, mean_y, sd_y)
if len(C) > 0:
return collapsed_tree, min(C), tree0, C_min_v_crossval, C_min_v_null, p
else:
return collapsed_tree, np.NaN, tree0, C_min_v_crossval, C_min_v_null, p
def teg_tree_inner(self, X, y, iDepth=0, prev_terminal_node_pred=np.nan, visited_features_v = None):
if not isinstance(visited_features_v, np.ndarray):
visited_features_v = np.array([])
# print("Params: ", twostep, internalEnsemble)
if (iDepth == 0):
self.current_node_index = 0
else:
self.current_node_index = self.current_node_index + 1
# print(node_index_v)
if len(y) > 0:
terminal_node_pred = np.nanmean(y)
else:
terminal_node_pred = 0
SS_pre_split = self.f_SS_assigned_to_node(y)
# Check whether maxdepth passed or y is empty
if (iDepth >= self.maxDepth) or (len(y) <= 1) or (SS_pre_split == 0):
if len(y) > 0:
terminal_node_pred = np.nanmean(y)
else:
terminal_node_pred = prev_terminal_node_pred
return [[np.NaN, terminal_node_pred, SS_pre_split, 0, 0, 0, self.current_node_index, iDepth, y], np.NaN, np.NaN]
# Create branches
# Check one step ahead
best_split_feature = np.NaN
best_split_val = np.NaN
SS_best = np.inf
if self.treegrowth_CV == 1:
perm_indices = np.random.permutation(len(y))
a = int(np.floor(len(y) / 2))
ind_for_splitval = perm_indices[1:a]
ind_for_feature_comparison = perm_indices[a:]
else:
ind_for_splitval = range(len(y))
ind_for_feature_comparison = range(len(y))
for iFeature1 in range(X.shape[1]):
if self.no_downstream_feature_repeats and (np.sum(np.array(visited_features_v) == iFeature1) > 0):
continue
best_split_val_this, SS_best_this = self.f_get_best_split_val(iFeature1, y, X, ind_for_splitval, ind_for_feature_comparison, self.maxDepth - iDepth)
if SS_best_this < SS_best:
#print("New best!")
best_split_feature = iFeature1
best_split_val = best_split_val_this
SS_best = SS_best_this
#print("> iFeature1: ", iFeature1, ", SS_best_this: ", SS_best_this)
if np.isnan(best_split_feature):
if len(y) > 0:
terminal_node_pred = np.nanmean(y)
else:
terminal_node_pred = prev_terminal_node_pred
return [[np.NaN, terminal_node_pred, SS_pre_split, 0, 0, 0, self.current_node_index, iDepth, y], np.NaN, np.NaN]
ind_left = (X[:, best_split_feature] < best_split_val)
ind_right = (X[:, best_split_feature] >= best_split_val)
SS_left = self.f_SS(y[ind_left])
SS_right = self.f_SS(y[ind_right])
best_split = [best_split_feature, best_split_val, SS_pre_split, SS_left, SS_right, len(y), self.current_node_index, iDepth, y]
branch_left = self.teg_tree_inner(X[ind_left, :], y[ind_left], iDepth + 1, prev_terminal_node_pred=terminal_node_pred, visited_features_v=np.append(visited_features_v, best_split_feature))
branch_right = self.teg_tree_inner(X[ind_right, :], y[ind_right], iDepth + 1, visited_features_v=np.append(visited_features_v, best_split_feature))
return [best_split, branch_left, branch_right]
def f_get_best_SS_peek(self, y, X, this_peek_ahead_depth, peek_ahead_maxDepth_limiter, current_peek_depth = 0):
# print(current_peek_depth, peek_ahead_depth, peek_ahead_maxDepth_limiter)
if (len(y) <= 1) or (current_peek_depth >= this_peek_ahead_depth) or (current_peek_depth >= peek_ahead_maxDepth_limiter):
return self.f_SS_for_split(y)
best_SS = np.inf
best_feature_peek = np.nan
best_val_peek = np.nan
for iFeature_this in range(X.shape[1]):
if len(self.peek_ahead_quantiles) == 0:
splitting_vals_this = np.unique(X[:, iFeature_this])
else:
splitting_vals_this = np.quantile(X[:, iFeature_this], self.peek_ahead_quantiles)
for val_this in splitting_vals_this:
ind_left = (X[:, iFeature_this] < val_this)
ind_right = (X[:, iFeature_this] >= val_this)
best_SS_left = self.f_get_best_SS_peek(y[ind_left], X[ind_left, :], this_peek_ahead_depth, peek_ahead_maxDepth_limiter, current_peek_depth + 1)
best_SS_right = self.f_get_best_SS_peek(y[ind_right], X[ind_right, :], this_peek_ahead_depth, peek_ahead_maxDepth_limiter, current_peek_depth + 1)
current_SS = best_SS_left + best_SS_right
if (current_SS < best_SS):
best_SS = current_SS
best_feature_peek = iFeature_this
best_val_peek = val_this
#print(">>> best_feature_peek: ", best_feature_peek, ", best_val_peek: ", best_val_peek, ", best_SS: ", best_SS)
return best_SS
def f_get_SS_for_split(self, iFeature1, val1, X, y, peek_ahead_maxDepth_limiter):
ind_left = (X[:, iFeature1] < val1)
ind_right = (X[:, iFeature1] >= val1)
SS_best_over_peeks = np.inf
for this_peek_ahead_depth in range(self.peek_ahead_depth + 1):
SS_left = self.f_get_best_SS_peek(y[ind_left], X[ind_left, :], this_peek_ahead_depth,
peek_ahead_maxDepth_limiter)
SS_right = self.f_get_best_SS_peek(y[ind_right], X[ind_right, :], this_peek_ahead_depth,
peek_ahead_maxDepth_limiter)
# print(iFeature1, val1, SS_left, SS_right)
SS_this = SS_left + SS_right
if (SS_this < SS_best_over_peeks):
SS_best_over_peeks = SS_this
| |
4.474091E+05, 4.670263E+05, 4.873326E+05, 5.083460E+05, 5.300849E+05,
5.525683E+05, 5.758153E+05, 5.998453E+05, 6.246787E+05, 6.503350E+05, 6.768352E+05,
7.042000E+05, 7.324508E+05, 7.616091E+05, 7.916970E+05, 8.227364E+05, 8.547503E+05,
8.877616E+05, 9.217938E+05, 9.568705E+05, 9.930163E+05, 1.030255E+06, 1.068612E+06,
1.108113E+06, 1.148782E+06, 1.190646E+06, 1.233733E+06, 1.278068E+06, 1.323678E+06,
1.370591E+06, 1.418836E+06, 1.468441E+06, 1.519434E+06, 1.571845E+06, 1.625704E+06,
1.681041E+06, 1.737886E+06, 1.796270E+06, 1.856224E+06, 1.917781E+06, 1.980972E+06,
2.045832E+06, 2.112391E+06, 2.180684E+06, 2.250746E+06, 2.322610E+06, 2.396312E+06,
2.471886E+06, 2.549370E+06, 2.628798E+06, 2.710208E+06, 2.793637E+06, 2.879123E+06,
2.966704E+06, 3.056419E+06, 3.148307E+06, 3.242406E+06, 3.338759E+06, 3.437405E+06,
3.538386E+06, 3.641742E+06, 3.747517E+06, 3.855753E+06, 3.966492E+06, 4.079779E+06,
4.195658E+06, 4.314174E+06, 4.435371E+06, 4.559296E+06, 4.685994E+06, 4.815512E+06,
4.947899E+06, 5.083202E+06, 5.221468E+06, 5.362749E+06, 5.507091E+06, 5.654546E+06,
5.805164E+06, 5.958997E+06, 6.116096E+06, 6.276514E+06, 6.440302E+06, 6.607517E+06,
6.778209E+06, 6.952435E+06, 7.130249E+06, 7.311707E+06, 7.496867E+06, 7.685785E+06,
7.878517E+06, 8.075122E+06,
])
# ---------------------- M = 19, I = 5 ---------------------------
M = 19
I = 5
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
4.005750E+00, 7.341299E+01, 1.465248E+02, 2.196725E+02, 2.929080E+02, 3.665343E+02,
4.412543E+02, 5.180980E+02, 5.982340E+02, 6.828353E+02, 7.730027E+02, 8.697668E+02,
9.740684E+02, 1.086823E+03, 1.208879E+03, 1.341069E+03, 1.484232E+03, 1.639180E+03,
1.806745E+03, 1.987801E+03, 2.183188E+03, 2.393832E+03, 2.620622E+03, 2.864546E+03,
3.126558E+03, 3.407697E+03, 3.709043E+03, 4.031658E+03, 4.376687E+03, 4.745342E+03,
5.138807E+03, 5.558356E+03, 6.005318E+03, 6.481015E+03, 6.986846E+03, 7.524300E+03,
8.094828E+03, 8.699968E+03, 9.341331E+03, 1.002056E+04, 1.073933E+04, 1.149937E+04,
1.230251E+04, 1.315057E+04, 1.404547E+04, 1.498914E+04, 1.598358E+04, 1.703092E+04,
1.813322E+04, 1.929266E+04, 2.051152E+04, 2.179207E+04, 2.313669E+04, 2.454777E+04,
2.602783E+04, 2.757939E+04, 2.920503E+04, 3.090744E+04, 3.268941E+04, 3.455366E+04,
3.650309E+04, 3.854064E+04, 4.066930E+04, 4.289217E+04, 4.521234E+04, 4.763307E+04,
5.015761E+04, 5.278935E+04, 5.553171E+04, 5.838817E+04, 6.136237E+04, 6.445786E+04,
6.767842E+04, 7.102788E+04, 7.451010E+04, 7.812908E+04, 8.188878E+04, 8.579338E+04,
8.984714E+04, 9.405423E+04, 9.841911E+04, 1.029462E+05, 1.076400E+05, 1.125052E+05,
1.175464E+05, 1.227686E+05, 1.281765E+05, 1.337751E+05, 1.395696E+05, 1.455650E+05,
1.517667E+05, 1.581798E+05, 1.648099E+05, 1.716626E+05, 1.787433E+05, 1.860579E+05,
1.936120E+05, 2.014118E+05, 2.094632E+05, 2.177721E+05, 2.263450E+05, 2.351881E+05,
2.443078E+05, 2.537105E+05, 2.634030E+05, 2.733918E+05, 2.836841E+05, 2.942864E+05,
3.052060E+05, 3.164500E+05, 3.280255E+05, 3.399401E+05, 3.522011E+05, 3.648163E+05,
3.777931E+05, 3.911396E+05, 4.048633E+05, 4.189728E+05, 4.334758E+05, 4.483808E+05,
4.636960E+05, 4.794302E+05, 4.955917E+05, 5.121894E+05, 5.292323E+05, 5.467291E+05,
5.646892E+05, 5.831215E+05, 6.020357E+05, 6.214410E+05, 6.413472E+05, 6.617640E+05,
6.827011E+05, 7.041687E+05, 7.261769E+05, 7.487358E+05, 7.718559E+05, 7.955477E+05,
8.198219E+05, 8.446893E+05, 8.701605E+05, 8.962469E+05, 9.229596E+05, 9.503098E+05,
9.783092E+05, 1.006969E+06, 1.036302E+06, 1.066319E+06, 1.097032E+06, 1.128454E+06,
1.160596E+06, 1.193472E+06, 1.227094E+06, 1.261474E+06, 1.296626E+06, 1.332563E+06,
1.369297E+06, 1.406843E+06, 1.445213E+06, 1.484422E+06, 1.524483E+06, 1.565410E+06,
1.607217E+06, 1.649919E+06, 1.693530E+06, 1.738064E+06, 1.783536E+06, 1.829962E+06,
1.877355E+06, 1.925733E+06, 1.975109E+06, 2.025500E+06, 2.076920E+06, 2.129387E+06,
2.182916E+06, 2.237523E+06,
])
# ---------------------- M = 20, I = 1 ---------------------------
M = 20
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.091110E+00, 4.947081E+01, 1.416480E+02, 2.592507E+02, 3.983023E+02, 5.559709E+02,
7.302920E+02, 9.198257E+02, 1.123477E+03, 1.340418E+03, 1.570058E+03, 1.812038E+03,
2.066230E+03, 2.332728E+03, 2.611836E+03, 2.904051E+03, 3.210045E+03, 3.530639E+03,
3.866789E+03, 4.219566E+03, 4.590146E+03, 4.979793E+03, 5.389860E+03, 5.821776E+03,
6.277046E+03, 6.757251E+03, 7.264045E+03, 7.799159E+03, 8.364401E+03, 8.961659E+03,
9.592904E+03, 1.026019E+04, 1.096512E+04, 1.170966E+04, 1.249659E+04, 1.332840E+04,
1.420757E+04, 1.513680E+04, 1.611862E+04, 1.715616E+04, 1.825218E+04, 1.940974E+04,
2.063218E+04, 2.192282E+04, 2.328522E+04, 2.472293E+04, 2.623979E+04, 2.783973E+04,
2.952702E+04, 3.130573E+04, 3.318041E+04, 3.515575E+04, 3.723663E+04, 3.942790E+04,
4.173473E+04, 4.416289E+04, 4.671773E+04, 4.940519E+04, 5.223118E+04, 5.520223E+04,
5.832512E+04, 6.160614E+04, 6.505245E+04, 6.867173E+04, 7.247112E+04, 7.645903E+04,
8.064278E+04, 8.503157E+04, 8.963402E+04, 9.445875E+04, 9.951574E+04, 1.048142E+05,
1.103643E+05, 1.161765E+05, 1.222616E+05, 1.286309E+05, 1.352955E+05, 1.422674E+05,
1.495587E+05, 1.571827E+05, 1.651519E+05, 1.734802E+05, 1.821813E+05, 1.912699E+05,
2.007604E+05, 2.106690E+05, 2.210108E+05, 2.318027E+05, 2.430609E+05, 2.548036E+05,
2.670483E+05, 2.798136E+05, 2.931184E+05, 3.069823E+05, 3.214256E+05, 3.364693E+05,
3.521348E+05, 3.684439E+05, 3.854199E+05, 4.030855E+05, 4.214650E+05, 4.405834E+05,
4.604660E+05, 4.811396E+05, 5.026303E+05, 5.249660E+05, 5.481756E+05, 5.722878E+05,
5.973333E+05, 6.233430E+05, 6.503482E+05, 6.783817E+05, 7.074775E+05, 7.376698E+05,
7.689940E+05, 8.014861E+05, 8.351831E+05, 8.701245E+05, 9.063479E+05, 9.438955E+05,
9.828070E+05, 1.023125E+06, 1.064895E+06, 1.108158E+06, 1.152963E+06, 1.199355E+06,
1.247383E+06, 1.297095E+06, 1.348543E+06, 1.401778E+06, 1.456853E+06, 1.513823E+06,
1.572742E+06, 1.633669E+06, 1.696660E+06, 1.761777E+06, 1.829081E+06, 1.898632E+06,
1.970497E+06, 2.044741E+06, 2.121430E+06, 2.200634E+06, 2.282422E+06, 2.366867E+06,
2.454042E+06, 2.544023E+06, 2.636886E+06, 2.732710E+06, 2.831576E+06, 2.933566E+06,
3.038763E+06, 3.147255E+06, 3.259128E+06, 3.374473E+06, 3.493381E+06, 3.615946E+06,
3.742264E+06, 3.872434E+06, 4.006554E+06, 4.144726E+06, 4.287056E+06, 4.433650E+06,
4.584616E+06, 4.740066E+06, 4.900110E+06, 5.064870E+06, 5.234458E+06, 5.408999E+06,
5.588613E+06, 5.773427E+06, 5.963569E+06, 6.159169E+06, 6.360361E+06, 6.567283E+06,
6.780068E+06, 6.998864E+06,
])
# ---------------------- M = 20, I = 2 ---------------------------
M = 20
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.519830E+00, 1.041424E+02, 2.906766E+02, 5.317342E+02, 8.170004E+02, 1.140498E+03,
1.498191E+03, 1.887134E+03, 2.305068E+03, 2.750313E+03, 3.221668E+03, 3.718356E+03,
4.240095E+03, 4.787152E+03, 5.360055E+03, 5.959825E+03, 6.587858E+03, 7.245886E+03,
7.935732E+03, 8.659593E+03, 9.420003E+03, 1.021942E+04, 1.106064E+04, 1.194646E+04,
1.288013E+04, 1.386475E+04, 1.490370E+04, 1.600056E+04, 1.715893E+04, 1.838256E+04,
1.967551E+04, 2.104202E+04, 2.248633E+04, 2.401319E+04, 2.562698E+04, 2.733280E+04,
2.913575E+04, 3.104133E+04, 3.305479E+04, 3.518252E+04, 3.743016E+04, 3.980401E+04,
4.231092E+04, 4.495767E+04, 4.775159E+04, 5.069997E+04, 5.381064E+04, 5.709169E+04,
6.055190E+04, 6.419956E+04, 6.804403E+04, 7.209494E+04, 7.636229E+04, 8.085600E+04,
8.558671E+04, 9.056622E+04, 9.580554E+04, 1.013168E+05, 1.071122E+05, 1.132050E+05,
1.196093E+05, 1.263378E+05, 1.334053E+05, 1.408275E+05, 1.486191E+05, 1.567972E+05,
1.653770E+05, 1.743773E+05, 1.838157E+05, 1.937100E+05, 2.040806E+05, 2.149463E+05,
2.263282E+05, 2.382475E+05, 2.507265E+05, 2.637882E+05, 2.774557E+05, 2.917531E+05,
3.067057E+05, 3.223406E+05, 3.386834E+05, 3.557626E+05, 3.736064E+05, 3.922448E+05,
4.117074E+05, 4.320273E+05, 4.532358E+05, 4.753672E+05, 4.984549E+05, 5.225361E+05,
5.476468E+05, 5.738251E+05, 6.011099E+05, 6.295411E+05, 6.591607E+05, 6.900115E+05,
7.221373E+05, 7.555835E+05, 7.903966E+05, 8.266243E+05, 8.643161E+05, 9.035230E+05,
9.442971E+05, 9.866935E+05, 1.030765E+06, 1.076570E+06, 1.124167E+06, 1.173615E+06,
1.224977E+06, 1.278317E+06, 1.333697E+06, 1.391187E+06, 1.450855E+06, 1.512772E+06,
1.577010E+06, 1.643643E+06, 1.712747E+06, 1.784403E+06, 1.858688E+06, 1.935688E+06,
2.015486E+06, 2.098169E+06, 2.183827E+06, 2.272550E+06, 2.364432E+06, 2.459570E+06,
2.558063E+06, 2.660012E+06, 2.765517E+06, 2.874690E+06, 2.987634E+06, 3.104464E+06,
3.225293E+06, 3.350239E+06, 3.479418E+06, 3.612957E+06, 3.750979E+06, 3.893612E+06,
4.040990E+06, 4.193245E+06, 4.350513E+06, 4.512941E+06, 4.680667E+06, 4.853844E+06,
5.032618E+06, 5.217145E+06, 5.407584E+06, 5.604095E+06, 5.806845E+06, 6.016000E+06,
6.231733E+06, 6.454222E+06, 6.683645E+06, 6.920189E+06, 7.164040E+06, 7.415389E+06,
7.674437E+06, 7.941381E+06, 8.216427E+06, 8.499785E+06, 8.791667E+06, 9.092294E+06,
9.401888E+06, 9.720676E+06, 1.004889E+07, 1.038677E+07, 1.073455E+07, 1.109249E+07,
1.146083E+07, 1.183984E+07, 1.222977E+07, 1.263090E+07, 1.304350E+07, 1.346784E+07,
1.390421E+07, 1.435290E+07,
])
# ---------------------- M = 20, I = 3 ---------------------------
M = 20
I = 3
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
7.773000E-01, 5.324867E+01, 1.486677E+02, 2.719834E+02, 4.179176E+02, 5.834117E+02,
7.664008E+02, 9.653781E+02, 1.179188E+03, 1.406969E+03, 1.648109E+03, 1.902210E+03,
2.169127E+03, 2.448996E+03, 2.742088E+03, 3.048926E+03, 3.370223E+03, 3.706865E+03,
4.059785E+03, 4.430108E+03, 4.819128E+03, 5.228105E+03, 5.658465E+03, 6.111648E+03,
6.589308E+03, 7.093032E+03, 7.624553E+03, 8.185700E+03, 8.778316E+03, 9.404317E+03,
1.006578E+04, 1.076488E+04, 1.150378E+04, 1.228491E+04, 1.311052E+04, 1.398321E+04,
1.490558E+04, 1.588047E+04, 1.691055E+04, 1.799908E+04, 1.914896E+04, 2.036341E+04,
2.164594E+04, 2.300000E+04, 2.442935E+04, 2.593773E+04, 2.752913E+04, 2.920771E+04,
3.097792E+04, 3.284406E+04, 3.481086E+04, 3.688330E+04, 3.906645E+04, 4.136541E+04,
4.378561E+04, 4.633311E+04, 4.901353E+04, 5.183307E+04, 5.479795E+04, 5.791503E+04,
6.119141E+04, 6.463369E+04, 6.824939E+04, 7.204656E+04, 7.603270E+04, 8.021661E+04,
8.460600E+04, 8.921050E+04, 9.403916E+04, 9.910103E+04, 1.044066E+05, 1.099654E+05,
1.157884E+05, 1.218862E+05, 1.282704E+05, 1.349528E+05, 1.419450E+05, 1.492595E+05,
1.569092E+05, 1.649080E+05, 1.732688E+05, 1.820065E+05, 1.911353E+05, 2.006707E+05,
2.106276E+05, 2.210232E+05, 2.318735E+05, 2.431958E+05, 2.550074E+05, 2.673273E+05,
2.801738E+05, 2.935665E+05, 3.075254E+05, 3.220707E+05, 3.372240E+05, 3.530071E+05,
3.694426E+05, 3.865535E+05, 4.043638E+05, 4.228977E+05, 4.421807E+05, 4.622389E+05,
4.830988E+05, 5.047886E+05, 5.273357E+05, 5.507693E+05, 5.751197E+05, 6.004172E+05,
6.266939E+05, 6.539820E+05, 6.823147E+05, 7.117262E+05, 7.422522E+05, 7.739285E+05,
8.067927E+05, 8.408819E+05, 8.762352E+05, 9.128942E+05, 9.508983E+05, 9.902916E+05,
1.031116E+06, 1.073416E+06, 1.117238E+06, 1.162629E+06, 1.209636E+06, 1.258308E+06,
1.308697E+06, 1.360853E+06, 1.414829E+06, 1.470682E+06, 1.528464E+06, 1.588234E+06,
1.650050E+06, 1.713972E+06, 1.780059E+06, 1.848378E+06, 1.918989E+06, 1.991960E+06,
2.067358E+06, 2.145251E+06, 2.225709E+06, 2.308807E+06, 2.394615E+06, 2.483212E+06,
2.574672E+06, 2.669076E+06, 2.766504E+06, 2.867038E+06, 2.970764E+06, 3.077768E+06,
3.188136E+06, 3.301961E+06, 3.419333E+06, 3.540348E+06, 3.665102E+06, 3.793692E+06,
3.926220E+06, 4.062788E+06, 4.203501E+06, 4.348466E+06, 4.497792E+06, 4.651592E+06,
4.809980E+06, 4.973071E+06, 5.140983E+06, 5.313842E+06, 5.491767E+06, 5.674888E+06,
5.863331E+06, 6.057231E+06, 6.256719E+06, 6.461935E+06, 6.673017E+06, 6.890111E+06,
7.113357E+06, 7.342907E+06,
])
# ---------------------- M = 21, I = 1 ---------------------------
M = 21
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
3.981460E+00, 3.304167E+02, 9.292313E+02, 1.704088E+03, 2.621559E+03, 3.662421E+03,
4.814121E+03, 6.068541E+03, 7.421071E+03, 8.869781E+03, 1.041512E+04, 1.205911E+04,
1.380519E+04, 1.565730E+04, 1.762056E+04, 1.970014E+04, 2.190181E+04, 2.423163E+04,
2.669554E+04, 2.929991E+04, 3.205132E+04, 3.495586E+04, 3.802056E+04, 4.125226E+04,
4.465708E+04, 4.824259E+04, 5.201564E+04, 5.598309E+04, 6.015219E+04, 6.453072E+04,
6.912550E+04, 7.394438E+04, 7.899525E+04, 8.428610E+04, 8.982435E+04, 9.561869E+04,
1.016773E+05, 1.080083E+05, 1.146215E+05, 1.215237E+05, 1.287266E+05, 1.362371E+05,
1.440654E+05, 1.522203E+05, 1.607132E+05, 1.695523E+05, 1.787484E+05, 1.883127E+05,
1.982535E+05, 2.085833E+05, 2.193115E+05, 2.304511E+05, 2.420107E+05, 2.540037E+05,
2.664413E+05, 2.793348E+05, 2.926958E+05, 3.065372E+05, 3.208723E+05, 3.357116E+05,
3.510703E+05, 3.669592E+05, 3.833922E+05, 4.003837E+05, 4.179467E+05, 4.360939E+05,
4.548404E+05, 4.741996E+05, 4.941865E+05, 5.148171E+05, 5.361032E+05, 5.580627E+05,
5.807100E+05, 6.040616E+05, 6.281301E+05, 6.529321E+05, 6.784872E+05, 7.048083E+05,
7.319154E+05, 7.598219E+05, 7.885455E+05, 8.181023E+05, 8.485133E+05, 8.797949E+05,
9.119639E+05, 9.450366E+05, 9.790353E+05, 1.013977E+06, 1.049882E+06, 1.086768E+06,
1.124652E+06, 1.163556E+06, 1.203500E+06, 1.244505E+06, 1.286588E+06, 1.329770E+06,
1.374075E+06, 1.419521E+06, 1.466130E+06, 1.513921E+06, 1.562920E+06, 1.613148E+06,
1.664626E+06, 1.717375E+06, 1.771419E+06, 1.826780E+06, 1.883484E+06, 1.941552E+06,
2.001009E+06, 2.061877E+06, 2.124181E+06, 2.187948E+06, 2.253196E+06, 2.319956E+06,
2.388251E+06, 2.458107E+06, 2.529549E+06, 2.602607E+06, 2.677298E+06, 2.753658E+06,
2.831706E+06, 2.911473E+06, 2.992987E+06, 3.076272E+06, 3.161364E+06, 3.248277E+06,
3.337053E+06, 3.427717E+06, 3.520289E+06, 3.614807E+06, 3.711301E+06, 3.809798E+06,
3.910326E+06, 4.012918E+06, 4.117603E+06, 4.224413E+06, 4.333378E+06, 4.444528E+06,
4.557903E+06, 4.673519E+06, 4.791422E+06, 4.911642E+06, 5.034208E+06, 5.159155E+06,
5.286513E+06, 5.416319E+06, 5.548609E+06, 5.683412E+06, 5.820768E+06, 5.960706E+06,
6.103265E+06, 6.248481E+06, 6.396387E+06, 6.547017E+06, 6.700413E+06, 6.856604E+06,
7.015634E+06, 7.177539E+06, 7.342350E+06, 7.510115E+06, 7.680862E+06, 7.854632E+06,
8.031473E+06, 8.211412E+06, 8.394491E+06, 8.580756E+06, 8.770230E+06, 8.962977E+06,
9.159022E+06, 9.358408E+06, 9.561181E+06, 9.767369E+06, 9.977034E+06, 1.019021E+07,
1.040692E+07, 1.062723E+07,
])
# ---------------------- M = 21, I = 2 ---------------------------
M = 21
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
4.058120E+00, 3.362054E+02, 9.456074E+02, 1.734179E+03, 2.667896E+03, 3.727193E+03,
4.899294E+03, 6.175937E+03, 7.552431E+03, 9.026809E+03, 1.059953E+04, 1.227265E+04,
1.404968E+04, 1.593461E+04, 1.793265E+04, 2.004908E+04, 2.228977E+04, 2.466088E+04,
2.716846E+04, 2.981898E+04, 3.261916E+04, 3.557517E+04, 3.869418E+04, 4.198316E+04,
4.544832E+04, 4.909737E+04, 5.293730E+04, 5.697506E+04, 6.121805E+04, 6.567419E+04,
7.035040E+04, 7.525467E+04, 8.039507E+04, 8.577969E+04, 9.141609E+04, 9.731312E+04,
1.034791E+05, 1.099223E+05, 1.166527E+05, 1.236773E+05, 1.310079E+05, 1.386514E+05,
1.466185E+05, 1.549179E+05, 1.635614E+05, 1.725571E+05, 1.819162E+05, 1.916501E+05,
2.017671E+05, 2.122799E+05, 2.231982E+05, 2.345353E+05, 2.462997E+05, 2.585053E+05,
2.711634E+05, 2.842854E+05, 2.978833E+05, 3.119700E+05, 3.265592E+05, 3.416615E+05,
3.572924E+05, 3.734629E+05, 3.901872E+05, 4.074799E+05, 4.253541E+05, 4.438230E+05,
4.629018E+05, 4.826041E+05, 5.029453E+05, 5.239416E+05, 5.456049E+05, 5.679538E+05,
5.910025E+05, 6.147679E+05, 6.392630E+05, 6.645047E+05, 6.905128E+05, 7.173004E+05,
7.448879E+05, 7.732890E+05, 8.025217E+05, 8.326025E+05, 8.635525E+05, 8.953886E+05,
9.281278E+05, 9.617867E+05, 9.963880E+05, 1.031949E+06, 1.068491E+06, 1.106030E+06,
1.144586E+06, 1.184179E+06, 1.224831E+06, 1.266563E+06, 1.309392E+06, 1.353339E+06,
1.398430E+06, 1.444682E+06, 1.492116E+06, 1.540754E+06, 1.590622E+06, 1.641741E+06,
1.694130E+06, 1.747816E+06, 1.802817E+06, 1.859159E+06, 1.916869E+06, 1.975965E+06,
2.036476E+06, 2.098423E+06, 2.161832E+06, 2.226729E+06, 2.293134E+06, 2.361077E+06,
2.430583E+06, 2.501677E+06, 2.574385E+06, 2.648739E+06, 2.724753E+06, 2.802466E+06,
2.881898E+06, 2.963079E+06, 3.046038E+06, 3.130799E+06, 3.217399E+06, 3.305853E+06,
3.396203E+06, 3.488473E+06, 3.582687E+06, 3.678880E+06, 3.777084E+06, 3.877327E+06,
3.979638E+06, 4.084047E+06, 4.190589E+06, 4.299291E+06, 4.410188E+06, 4.523309E+06,
4.638693E+06, 4.756358E+06, 4.876351E+06, 4.998702E+06, 5.123440E+06, 5.250602E+06,
5.380218E+06, 5.512325E+06, 5.646960E+06, 5.784152E+06, 5.923943E+06, 6.066361E+06,
6.211446E+06, 6.359237E+06, 6.509766E+06, 6.663065E+06, 6.819180E+06, 6.978140E+06,
7.139989E+06, 7.304764E+06, 7.472497E+06, 7.643236E+06, 7.817009E+06, 7.993858E+06,
8.173833E+06, 8.356963E+06, 8.543287E+06, 8.732853E+06, 8.925687E+06, 9.121849E+06,
9.321369E+06, 9.524290E+06, 9.730657E+06, 9.940501E+06, 1.015388E+07, 1.037083E+07,
1.059139E+07, 1.081561E+07,
])
# ---------------------- M = 22, I = 1 ---------------------------
M = 22
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[3]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
6.029370E+00, 3.298562E+01, 6.440722E+01, 9.584644E+01, 1.272919E+02, 1.587414E+02,
1.901941E+02, 2.216498E+02, 2.531083E+02, 2.845695E+02, 3.160334E+02, 3.475000E+02,
3.789694E+02, 4.104418E+02, 4.419180E+02, 4.733988E+02, 5.048860E+02, 5.363820E+02,
5.678901E+02, 5.994148E+02, 6.309613E+02, | |
# coding=utf-8
import os
import random
import sys
from collections import namedtuple
from enum import Enum
import ccobra
from anytree import AnyNode, LevelOrderIter
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../..")))
from modular_models.util import sylutil
from modular_models.models.basic_models.interface import SyllogisticReasoningModel
class PSYCOP(SyllogisticReasoningModel):
""" PSYCOP model according to Rips (1994). """
def __init__(self):
SyllogisticReasoningModel.__init__(self)
# Prospensity to guess instead of replying NVC if no conclusion is found
self.params["guess"] = 0.0
# Whether or not existential implicatures are added to the forward propositions
self.params["premise_implicatures_existential"] = True
# Whether or not gricean implicatures are added to the forward propositions
self.params["premise_implicatures_grice"] = True
# Whether or not proving conclusion implicatures is required to prove a conclusion
self.params["conclusion_implicatures"] = False
# Availability of rules
self.params["rule_transitivity"] = True
self.params["rule_exclusivity"] = True
self.params["rule_conversion"] = True
self.params["rule_fw_and_elimination"] = True
self.params["rule_bw_and_introduction"] = True
self.params["rule_bw_conjunctive_syllogism"] = True
self.params["rule_bw_if_elimination"] = True
self.params["rule_bw_not_introduction"] = True
self.param_grid["guess"] = [0.0, 1.0]
self.param_grid["premise_implicatures_existential"] = [True, False]
self.param_grid["premise_implicatures_grice"] = [True, False]
self.param_grid["conclusion_implicatures"] = [False, True]
self.param_grid["rule_transitivity"] = [True, False]
self.param_grid["rule_exclusivity"] = [True, False]
self.param_grid["rule_conversion"] = [True, False]
self.param_grid["rule_fw_and_elimination"] = [True, False]
self.param_grid["rule_bw_and_introduction"] = [True, False]
self.param_grid["rule_bw_conjunctive_syllogism"] = [True, False]
self.param_grid["rule_bw_if_elimination"] = [True, False]
self.param_grid["rule_bw_not_introduction"] = [True, False]
class Prop:
""" abstract representation of a categorical proposition like a syllogistic premise or
conclusion.
Example:
All A are B = Prop(PT.implies, Prop(PT.atomic, Atom("A", 936), None), Prop(PT.atomic, Atom("B", 936), None))
"""
def __init__(self, type, arg1, arg2):
# proposition type like atom or conjunction
self.type = type
self.v1 = arg1
self.v2 = arg2
def __repr__(self):
if self.type == PSYCOP.PT.atomic:
if self.v1.is_name:
if self.v1.hat:
var = "â"
else:
var = "a"
else:
var = "x"
return self.v1.predicate + "(" + var + "_" + str(self.v1.arg_id) + ")"
elif self.type == PSYCOP.PT.negation:
return "NOT (" + self.v1.__repr__() + ")"
elif self.type == PSYCOP.PT.implies:
return "(" + self.v1.__repr__() + " -> " + self.v2.__repr__() + ")"
elif self.type == PSYCOP.PT.conjunction:
return "(" + self.v1.__repr__() + " AND " + self.v2.__repr__() + ")"
# proposition type
PT = Enum("PT", "atomic negation implies conjunction")
""" representation of an atom = predicate + argument. Additional info (hat, name) is required by
PSYCOP, example:
Red(â) = Atom("Red", i, True, True) where i identifies â """
Atom = namedtuple("Atom", "predicate arg_id is_name hat")
# unique identifier for objects
max_id = -1
def get_fresh_id(self):
self.max_id = self.max_id + 1
return self.max_id
def get_atomic_proposition(self, predicate, arg_id, is_name, hat):
return self.Prop(self.PT.atomic, self.Atom(predicate, arg_id, is_name, hat), None)
def encode_proposition(self, p, hat=False):
"""
>>> m = PSYCOP()
>>> m.encode_proposition("Aac")
(A(x_0) -> C(x_0))
>>> m.encode_proposition("Iac")
(A(a_1) AND C(a_1))
"""
i = self.get_fresh_id()
if p[0] == "A":
# A(x) -> B(x)
return self.Prop(self.PT.implies,
self.get_atomic_proposition(p[1].upper(), i, False, hat),
self.get_atomic_proposition(p[2].upper(), i, False, hat))
elif p[0] == "E":
# not (A(x) and B(x))
return self.Prop(self.PT.negation,
self.Prop(self.PT.conjunction,
self.get_atomic_proposition(p[1].upper(), i, False, hat),
self.get_atomic_proposition(p[2].upper(), i, False, hat)),
None)
elif p[0] == "I":
# A(a) and B(a)
return self.Prop(self.PT.conjunction,
self.get_atomic_proposition(p[1].upper(), i, True, hat),
self.get_atomic_proposition(p[2].upper(), i, True, hat))
else:
# A(a) and not B(a)
return self.Prop(self.PT.conjunction,
self.get_atomic_proposition(p[1].upper(), i, True, hat),
self.Prop(self.PT.negation,
self.get_atomic_proposition(p[2].upper(), i, True, hat),
None))
def encode_premises(self, syllogism, ex_implicatures=True, grice_implicatures=False):
""" Encode premises as propositions, possibly adding implicatures """
to = sylutil.term_order(syllogism[2])
premises = []
pr = []
for i in [0, 1]:
pr.append(syllogism[i] + to[i])
pr = sylutil.add_implicatures(pr, existential=ex_implicatures, gricean=grice_implicatures)
for p in pr:
premises.append(self.encode_proposition(p, True))
return premises
def isomorphic(self, p1, p2, same_nameness=False):
""" same_nameness = True <-> "notational variant", see p. 197
>>> m = PSYCOP()
>>> a0 = m.Prop(m.PT.atomic, m.Atom("A", 0, False, False), None)
>>> a1 = m.Prop(m.PT.atomic, m.Atom("A", 1, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", 2, False, False), None)
>>> p1 = m.Prop(m.PT.implies, a0, b)
>>> p2 = m.Prop(m.PT.implies, a1, b)
>>> m.isomorphic(p1,p2)
True
>>> m.isomorphic(m.Prop(m.PT.negation, p1, None),m.Prop(m.PT.negation, p2, None))
True
>>> m.isomorphic(p1,m.Prop(m.PT.negation, p2, None))
False
>>> p3 = m.Prop(m.PT.conjunction, a1, b)
>>> m.isomorphic(p1,p3)
False
"""
if p1 is None and p2 is None:
return True
if p1 is None or p2 is None:
return False
if type(p1) is self.Atom and type(p2) is self.Atom:
if p1.predicate == p2.predicate:
if same_nameness:
if p1.is_name == p2.is_name:
return True
return False
return True
return False
if type(p1) is self.Atom or type(p2) is self.Atom:
return False
if p1.type == p2.type:
return self.isomorphic(p1.v1, p2.v1) and self.isomorphic(p1.v2, p2.v2)
return False
def contains_isomorphic_proposition(self, domain, p):
for pd in domain:
if self.isomorphic(pd, p):
return True
return False
def atom_prop_replace_properties(self, p, new_arg_id=None, new_is_name=None, new_hat=None):
if new_arg_id is None:
new_arg_id = p.v1.arg_id
if new_is_name is None:
new_is_name = p.v1.is_name
if new_hat is None:
new_hat = p.v1.hat
return self.Prop(self.PT.atomic,
self.Atom(p.v1.predicate, new_arg_id, new_is_name, new_hat), None)
def prop_replace_properties(self, p, new_arg_id=None, new_is_name=None, new_hat=None):
if p.type == self.PT.negation:
return self.Prop(self.PT.negation,
self.atom_prop_replace_properties(p.v1, new_arg_id, new_is_name,
new_hat), None)
return self.atom_prop_replace_properties(p, new_arg_id, new_is_name, new_hat)
def rule_transitivity(self, p1, p2, domain):
""" PSYCOP transitivity rule
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> c = m.Prop(m.PT.atomic, m.Atom("C", i, False, False), None)
>>> p1 = m.Prop(m.PT.implies, a, b)
>>> p2 = m.Prop(m.PT.implies, b, c)
>>> m.rule_transitivity(p1, p2, set())
[(A(x_1) -> C(x_1))]
"""
if p1.type == self.PT.implies and p2.type == self.PT.implies:
if p1.v1.type == self.PT.atomic and p1.v2.type == self.PT.atomic and \
p2.v1.type == self.PT.atomic and p2.v2.type == self.PT.atomic:
if p1.v1.v1.arg_id == p1.v2.v1.arg_id and p2.v1.v1.arg_id == p2.v2.v1.arg_id:
if not p1.v1.v1.is_name and not p1.v2.v1.is_name and not p2.v1.v1.is_name and not p2.v2.v1.is_name:
if p1.v2.v1.predicate == p2.v1.v1.predicate:
i = self.get_fresh_id()
p = self.Prop(self.PT.implies,
self.atom_prop_replace_properties(p1.v1, i),
self.atom_prop_replace_properties(p2.v2, i))
if not self.contains_isomorphic_proposition(domain, p):
return [p]
return []
def rule_exclusivity(self, p1, p2, domain):
""" PSYCOP exclusivity rule
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> j = m.get_fresh_id()
>>> ai = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> bi = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> bj = m.Prop(m.PT.atomic, m.Atom("B", j, False, False), None)
>>> cj = m.Prop(m.PT.atomic, m.Atom("C", j, False, False), None)
>>> p1 = m.Prop(m.PT.implies, ai, bi)
>>> p2 = m.Prop(m.PT.negation, m.Prop(m.PT.conjunction, bj, cj), None)
>>> m.rule_exclusivity(p1, p2, set())
[NOT ((A(x_2) AND C(x_2)))]
"""
if p1.type == self.PT.implies and p2.type == self.PT.negation:
if p2.v1.type == self.PT.conjunction:
if p1.v1.type == self.PT.atomic and p1.v2.type == self.PT.atomic:
if p2.v1.v1.type == self.PT.atomic and p2.v1.v2.type == self.PT.atomic:
if p1.v1.v1.arg_id == p1.v2.v1.arg_id and p2.v1.v1.v1.arg_id == p2.v1.v2.v1.arg_id:
if not p1.v1.v1.is_name and not p1.v2.v1.is_name and not p2.v1.v1.v1.is_name and not p2.v1.v2.v1.is_name:
if p1.v2.v1.predicate == p2.v1.v1.v1.predicate:
i = self.get_fresh_id()
p = self.Prop(self.PT.negation,
self.Prop(self.PT.conjunction,
self.atom_prop_replace_properties(p1.v1,
i),
self.atom_prop_replace_properties(
p2.v1.v2, i)),
None)
if not self.contains_isomorphic_proposition(domain, p):
return [p]
return []
def rule_conversion(self, p, domain):
""" PSYCOP conversion rule
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> p = m.Prop(m.PT.negation, m.Prop(m.PT.conjunction, a, b), None)
>>> m.rule_conversion(p, set())
[NOT ((B(x_1) AND A(x_1)))]
"""
if p.type == self.PT.negation:
if p.v1.type == self.PT.conjunction:
if p.v1.v1.type == self.PT.atomic and p.v1.v2.type == self.PT.atomic:
i = self.get_fresh_id()
p_new = self.Prop(self.PT.negation,
self.Prop(self.PT.conjunction,
self.atom_prop_replace_properties(p.v1.v2, i),
self.atom_prop_replace_properties(p.v1.v1, i)),
None)
if not self.contains_isomorphic_proposition(domain, p_new):
return [p_new]
return []
def get_leftmost_atom(self, p):
""" Returns leftmost atom in p. """
if p.type == self.PT.atomic:
return p.v1
else:
return self.get_leftmost_atom(p.v1)
def matching(self, p, g):
if self.isomorphic(p, g):
# note: the leftmost atom is equal to any atom in the proposition
pa, ga = self.get_leftmost_atom(p), self.get_leftmost_atom(g)
if pa == ga:
# Propositions are equal
return True
if not pa.is_name and not ga.is_name:
# Matching 1
return True
if pa.is_name and ga.is_name and not ga.hat:
# Matching 2
return True
if not pa.is_name and ga.is_name:
# Matching 4
return True # ?
return False
def rule_forward_and_elimination(self, p):
if p.type == self.PT.conjunction:
return [p.v1, p.v2]
return []
def rule_backward_and_introduction(self, g):
return self.rule_forward_and_elimination(g)
def rule_backward_conjunctive_syllogism(self, p, g):
"""
a = m.Prop(m.PT.atomic, v1='a', v2=None)
b = m.Prop(m.PT.atomic, v1='b', v2=None)
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> prop = m.Prop(m.PT.negation, m.Prop(m.PT.conjunction, a, b), None)
>>> m.rule_backward_conjunctive_syllogism(prop, m.Prop(m.PT.negation, a, None))
[B(x_0)]
"""
if g.type == self.PT.negation and | |
self.all_docs[docname] = max(
time.time(), path.getmtime(self.doc2path(docname)))
if self.versioning_condition:
old_doctree = None
if self.versioning_compare:
# get old doctree
try:
with open(self.doc2path(docname,
self.doctreedir, '.doctree'), 'rb') as f:
old_doctree = pickle.load(f)
except EnvironmentError:
pass
# add uids for versioning
if not self.versioning_compare or old_doctree is None:
list(add_uids(doctree, self.versioning_condition))
else:
list(merge_doctrees(
old_doctree, doctree, self.versioning_condition))
# make it picklable
doctree.reporter = None
doctree.transformer = None
doctree.settings.warning_stream = None
doctree.settings.env = None
doctree.settings.record_dependencies = None
# cleanup
self.temp_data.clear()
self.ref_context.clear()
roles._roles.pop('', None) # if a document has set a local default role
# save the parsed doctree
doctree_filename = self.doc2path(docname, self.doctreedir,
'.doctree')
ensuredir(path.dirname(doctree_filename))
with open(doctree_filename, 'wb') as f:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# utilities to use while reading a document
@property
def docname(self):
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
@property
def currmodule(self):
"""Backwards compatible alias. Will be removed."""
self.warn(self.docname, 'env.currmodule is being referenced by an '
'extension; this API will be removed in the future')
return self.ref_context.get('py:module')
@property
def currclass(self):
"""Backwards compatible alias. Will be removed."""
self.warn(self.docname, 'env.currclass is being referenced by an '
'extension; this API will be removed in the future')
return self.ref_context.get('py:class')
def new_serialno(self, category=''):
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document.
"""
key = category + 'serialno'
cur = self.temp_data.get(key, 0)
self.temp_data[key] = cur + 1
return cur
def note_dependency(self, filename):
"""Add *filename* as a dependency of the current document.
This means that the document will be rebuilt if this file changes.
*filename* should be absolute or relative to the source directory.
"""
self.dependencies.setdefault(self.docname, set()).add(filename)
def note_included(self, filename):
"""Add *filename* as a included from other document.
This means the document is not orphaned.
*filename* should be absolute or relative to the source directory.
"""
self.included.add(self.path2doc(filename))
def note_reread(self):
"""Add the current document to the list of documents that will
automatically be re-read at the next build.
"""
self.reread_always.add(self.docname)
def note_versionchange(self, type, version, node, lineno):
self.versionchanges.setdefault(version, []).append(
(type, self.temp_data['docname'], lineno,
self.ref_context.get('py:module'),
self.temp_data.get('object'), node.astext()))
# post-processing of read doctrees
def process_dependencies(self, docname, doctree):
"""Process docutils-generated dependency info."""
cwd = getcwd()
frompath = path.join(path.normpath(self.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
if not deps:
return
for dep in deps.list:
# the dependency path is relative to the working dir, so get
# one relative to the srcdir
if isinstance(dep, bytes):
dep = dep.decode(fs_encoding)
relpath = relative_path(frompath,
path.normpath(path.join(cwd, dep)))
self.dependencies.setdefault(docname, set()).add(relpath)
def process_downloads(self, docname, doctree):
"""Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
rel_filename, filename = self.relfn2path(targetname, docname)
self.dependencies.setdefault(docname, set()).add(rel_filename)
if not os.access(filename, os.R_OK):
self.warn_node('download file not readable: %s' % filename,
node)
continue
uniquename = self.dlfiles.add_file(docname, filename)
node['filename'] = uniquename
def process_images(self, docname, doctree):
"""Process and rewrite image URIs."""
def collect_candidates(imgpath, candidates):
globbed = {}
for filename in glob(imgpath):
new_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
filename)
try:
mimetype = guess_mimetype(filename)
if mimetype not in candidates:
globbed.setdefault(mimetype, []).append(new_imgpath)
except (OSError, IOError) as err:
self.warn_node('image file %s not readable: %s' %
(filename, err), node)
for key, files in iteritems(globbed):
candidates[key] = sorted(files, key=len)[0] # select by similarity
for node in doctree.traverse(nodes.image):
# Map the mimetype to the corresponding image. The writer may
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
node['candidates'] = candidates = {}
imguri = node['uri']
if imguri.startswith('data:'):
self.warn_node('image data URI found. some builders might not support', node,
type='image', subtype='data_uri')
candidates['?'] = imguri
continue
elif imguri.find('://') != -1:
self.warn_node('nonlocal image URI found: %s' % imguri, node,
type='image', subtype='nonlocal_uri')
candidates['?'] = imguri
continue
rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
if self.config.language:
# substitute figures (ex. foo.png -> foo.en.png)
i18n_full_imgpath = search_image_for_language(full_imgpath, self)
if i18n_full_imgpath != full_imgpath:
full_imgpath = i18n_full_imgpath
rel_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
i18n_full_imgpath)
# set imgpath as default URI
node['uri'] = rel_imgpath
if rel_imgpath.endswith(os.extsep + '*'):
if self.config.language:
# Search language-specific figures at first
i18n_imguri = get_image_filename_for_language(imguri, self)
_, full_i18n_imgpath = self.relfn2path(i18n_imguri, docname)
collect_candidates(full_i18n_imgpath, candidates)
collect_candidates(full_imgpath, candidates)
else:
candidates['*'] = rel_imgpath
# map image paths to unique image names (so that they can be put
# into a single directory)
for imgpath in itervalues(candidates):
self.dependencies.setdefault(docname, set()).add(imgpath)
if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
self.warn_node('image file not readable: %s' % imgpath,
node)
continue
self.images.add_file(docname, imgpath)
def process_metadata(self, docname, doctree):
"""Process the docinfo part of the doctree as metadata.
Keep processing minimal -- just return what docutils says.
"""
self.metadata[docname] = md = {}
try:
docinfo = doctree[0]
except IndexError:
# probably an empty document
return
if docinfo.__class__ is not nodes.docinfo:
# nothing to see here
return
for node in docinfo:
# nodes are multiply inherited...
if isinstance(node, nodes.authors):
md['authors'] = [author.astext() for author in node]
elif isinstance(node, nodes.TextElement): # e.g. author
md[node.__class__.__name__] = node.astext()
else:
name, body = node
md[name.astext()] = body.astext()
for name, value in md.items():
if name in ('tocdepth',):
try:
value = int(value)
except ValueError:
value = 0
md[name] = value
del doctree[0]
def create_title_from(self, docname, document):
"""Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
titlenode = nodes.title()
longtitlenode = titlenode
# explicit title set with title directive; use this only for
# the <title> tag in HTML output
if 'title' in document:
longtitlenode = nodes.title()
longtitlenode += nodes.Text(document['title'])
# look for first section title and use that as the title
for node in document.traverse(nodes.section):
visitor = SphinxContentsFilter(document)
node[0].walkabout(visitor)
titlenode += visitor.get_entry_text()
break
else:
# document has no title
titlenode += nodes.Text('<no title>')
self.titles[docname] = titlenode
self.longtitles[docname] = longtitlenode
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
self.toctree.note_toctree(docname, toctreenode)
def get_toc_for(self, docname, builder):
"""Return a TOC nodetree -- for use on the same page only!"""
return self.toctree.get_toc_for(docname, builder)
def get_toctree_for(self, docname, builder, collapse, **kwds):
"""Return the global TOC nodetree."""
return self.toctree.get_toctree_for(docname, builder, collapse, **kwds)
def get_domain(self, domainname):
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
"""
try:
return self.domains[domainname]
except KeyError:
raise ExtensionError('Domain %r is not registered' % domainname)
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
"""Read the doctree for a file from the pickle and return it."""
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
with open(doctree_filename, 'rb') as f:
doctree = pickle.load(f)
doctree.settings.env = self
doctree.reporter = Reporter(self.doc2path(docname), 2, 5,
stream=WarningStream(self._warnfunc))
return doctree
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
if doctree is None:
doctree = self.get_doctree(docname)
# resolve all pending cross-references
self.resolve_references(doctree, docname, builder)
# now, resolve all toctree nodes
for toctreenode in doctree.traverse(addnodes.toctree):
result = self.resolve_toctree(docname, builder, toctreenode,
prune=prune_toctrees,
includehidden=includehidden)
if result is None:
toctreenode.replace_self([])
else:
toctreenode.replace_self(result)
return doctree
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
to the value of the *maxdepth* option on the *toctree* node.
If *titles_only* is True, only toplevel document titles will be in the
resulting tree.
If *collapse* is True, all branches not containing docname will
be collapsed.
"""
return self.toctree.resolve_toctree(docname, builder, toctree, prune,
maxdepth, titles_only, collapse,
includehidden)
def resolve_references(self, doctree, fromdocname, builder):
for node in doctree.traverse(addnodes.pending_xref):
contnode = node[0].deepcopy()
newnode = None
typ = node['reftype']
target = node['reftarget']
refdoc = node.get('refdoc', fromdocname)
domain = None
try:
if 'refdomain' in node and node['refdomain']:
# let the domain try to resolve the reference
try:
domain = self.domains[node['refdomain']]
except KeyError:
raise NoUri
newnode = domain.resolve_xref(self, refdoc, builder,
typ, target, node, contnode)
# really hardwired reference types
elif typ == 'any':
newnode = self._resolve_any_reference(builder, refdoc, node, contnode)
elif typ | |
<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Calculate fiberloss fractions.
Fiberloss fractions are computed as the overlap between the light profile
illuminating a fiber and the on-sky aperture of the fiber.
"""
from __future__ import print_function, division
import numpy as np
import numpy.lib.stride_tricks
import scipy.interpolate
import astropy.units as u
import astropy.table
class GalsimFiberlossCalculator(object):
"""
Initialize a fiberloss calculator that uses GalSim.
Parameters
----------
fiber_diameter : float
Fiber diameter in microns.
wlen_grid : array
Array of wavelengths in Angstroms where fiberloss will be calculated.
num_pixels : int
Number of pixels to cover the fiber aperture along each axis.
oversampling : int
Oversampling factor for anti-aliasing the circular fiber aperture.
moffat_beta : float
Beta parameter value for the atmospheric PSF Moffat profile.
maximum_fft_size : int
Maximum size of FFT allowed.
"""
def __init__(self, fiber_diameter, wlen_grid, num_pixels=16,
oversampling=32, moffat_beta=3.5, maximum_fft_size=32767):
self.wlen_grid = np.asarray(wlen_grid)
self.moffat_beta = moffat_beta
# Defer import to runtime.
import galsim
# Prepare an image of the fiber aperture for numerical integration.
# Images are formed in the physical x-y space of the focal plane
# rather than on-sky angles.
scale = fiber_diameter / num_pixels
self.image = galsim.Image(num_pixels, num_pixels, scale=scale)
self.gsparams = galsim.GSParams(maximum_fft_size=32767)
# Prepare an anti-aliased image of the fiber aperture.
nos = num_pixels * oversampling
dxy = (np.arange(nos) + 0.5 - 0.5 * nos) / (0.5 * nos)
rsq = dxy ** 2 + dxy[:, np.newaxis] ** 2
inside = (rsq <= 1).astype(float)
s0, s1 = inside.strides
blocks = numpy.lib.stride_tricks.as_strided(
inside, shape=(num_pixels, num_pixels, oversampling, oversampling),
strides=(oversampling * s0, oversampling * s1, s0, s1))
self.aperture = blocks.sum(axis=(2, 3)) / oversampling ** 2
def create_source(self, fractions, half_light_radius,
minor_major_axis_ratio, position_angle):
"""Create a model for the on-sky profile of a single source.
Size and shape parameter values for any component that is not
present (because its fraction is zero) are ignored.
Parameters
----------
fractions : array
Array of length 2 giving the disk and bulge fractions, respectively,
which must be in the range [0,1] (but this is not checked). If
their sum is less than one, the remainder is modeled as a point-like
component.
half_light_radius : array
Array of length 2 giving the disk and bulge half-light radii in
arcseconds, respectively.
minor_major_axis_ratio : array
Array of length 2 giving the dimensionless on-sky ellipse
minor / major axis ratio for the disk and bulge components,
respectively.
position_angle : array
Array of length 2 giving the position angle in degrees of the on-sky
disk and bluge ellipses, respectively. Angles are measured counter
clockwise relative to the +x axis.
Returns
-------
galsim.GSObject
A object representing the sum of all requested components with its
total flux normalized to one.
"""
# This is a no-op but still required to define the namespace.
import galsim
components = []
if fractions[0] > 0:
# Disk component
components.append(galsim.Exponential(
flux=fractions[0], half_light_radius=half_light_radius[0])
.shear(q=minor_major_axis_ratio[0],
beta=position_angle[0] * galsim.degrees))
if fractions[1] > 0:
components.append(galsim.DeVaucouleurs(
flux=fractions[1], half_light_radius=half_light_radius[1])
.shear(q=minor_major_axis_ratio[1],
beta=position_angle[1] * galsim.degrees))
star_fraction = 1 - fractions.sum()
if star_fraction > 0:
# Model a point-like source with a tiny (0.001 arcsec) Gaussian.
# TODO: sigma should be in arcsec here, not microns!
components.append(galsim.Gaussian(
flux=star_fraction, sigma=1e-3 * self.image.scale))
# Combine the components and transform to focal-plane microns.
return galsim.Add(components, gsparams=self.gsparams)
def calculate(self, seeing_fwhm, scale, offset, blur_rms,
source_fraction, source_half_light_radius,
source_minor_major_axis_ratio, source_position_angle,
saved_images_file=None):
"""Calculate the acceptance fractions for a set of fibers.
Parameters
----------
seeing_fwhm : array
Array of length num_wlen giving the FWHM seeing in arcseconds
at each wavelength.
scale : array
Array of shape (num_fibers, 2) giving the x and y image scales in
microns / arcsec at each fiber location.
offset : array
Array of shape (num_fibers, num_wlen, 2) giving the x and y offsets
in microns at each fiber location and wavelength.
blur_rms : array
Array of shape (num_fibers, num_wlen) giving the RMS instrumental
Gaussian blur at each fiber location and wavelength.
source_fraction : array
Array of shape (num_fibers, 2). See :meth:`create_source`
for details.
source_half_light_radius : array
Array of shape (num_fibers, 2). See :meth:`create_source`
for details.
source_minor_major_axis_ratio : array
Array of shape (num_fibers, 2). See :meth:`create_source`
for details.
source_position_angle : array
Array of shape (num_fibers, 2). See :meth:`create_source`
for details.
saved_images_file : str or None
Write a multi-extension FITS file with this name containing images
of the atmospheric and instrument PSFs as a function of wavelength,
as well as the source profile and the anti-aliased fiber aperture.
Returns
-------
array
Array of fiberloss fractions in the range 0-1 with shape
(num_fibers, num_wlen).
"""
# This is a no-op but still required to define the namespace.
import galsim
num_fibers, num_wlen = len(offset), len(self.wlen_grid)
assert seeing_fwhm.shape == (num_wlen,)
assert scale.shape == (num_fibers, 2)
assert offset.shape == (num_fibers, num_wlen, 2)
assert blur_rms.shape == (num_fibers, num_wlen)
assert source_fraction.shape == (num_fibers, 2)
assert source_half_light_radius.shape == (num_fibers, 2)
assert source_minor_major_axis_ratio.shape == (num_fibers, 2)
assert source_position_angle.shape == (num_fibers, 2)
assert np.all(source_fraction >= 0) and np.all(source_fraction <= 1)
star_fraction = 1 - source_fraction.sum(axis=1)
assert np.all(star_fraction >= 0) and np.all(star_fraction <= 1)
if saved_images_file is not None:
import astropy.io.fits
import astropy.wcs
hdu_list = astropy.io.fits.HDUList()
header = astropy.io.fits.Header()
header['COMMENT'] = 'Fiberloss calculation images.'
hdu_list.append(astropy.io.fits.PrimaryHDU(header=header))
# All subsequent HDUs contain images with the same WCS.
w = astropy.wcs.WCS(naxis=2)
w.wcs.ctype = ['x', 'y']
ny, nx = self.image.array.shape
w.wcs.crpix = [nx / 2. + 0.5, nx / 2. + 0.5]
w.wcs.cdelt = [self.image.scale, self.image.scale]
w.wcs.crval = [0., 0.]
header = w.to_header()
# Save the anti-aliased fiber aperture.
header['COMMENT'] = 'Fiber aperture'
hdu_list.append(astropy.io.fits.ImageHDU(
data=self.aperture, header=header))
scaled_offset = offset / self.image.scale
fiberloss = np.empty((num_fibers, num_wlen))
source_profiles = []
for i, wlen in enumerate(self.wlen_grid):
# Create the atmospheric PSF for this wavelength in
# on-sky coordinates.
seeing = galsim.Moffat(fwhm=seeing_fwhm[i], beta=self.moffat_beta)
# Loop over fibers.
for j in range(num_fibers):
# Transform the atmospheric PSF to the focal plane for
# this fiber location.
atmospheric_psf = seeing.transform(
scale[j, 0], 0, 0, scale[j, 1]).withFlux(1)
# Create the instrument PSF for this fiber and wavelength.
instrument_psf = galsim.Gaussian(sigma=blur_rms[j, i])
if i == 0:
# Create the source profile for this fiber on the sky.
source_profile = self.create_source(
source_fraction[j], source_half_light_radius[j],
source_minor_major_axis_ratio[j],
source_position_angle[j])
# Transform to focal-plane coordinates.
source_profile = source_profile.transform(
scale[j, 0], 0, 0, scale[j, 1]).withFlux(1)
source_profiles.append(source_profile)
else:
# Lookup the source model for this fiber.
source_profile = source_profiles[j]
# Convolve the source + instrument + astmosphere.
convolved = galsim.Convolve(
[instrument_psf, atmospheric_psf, source_profile],
gsparams=self.gsparams)
# Render the convolved model with its offset.
offsets = (scaled_offset[j, i, 0], scaled_offset[j, i, 1])
# TODO: compare method='no_pixel' and 'auto' for
# accuracy and speed.
draw_args = dict(image=self.image, method='auto')
convolved.drawImage(offset=offsets, **draw_args)
# Calculate the fiberloss fraction for this fiber and wlen.
fiberloss[j, i] = np.sum(self.image.array * self.aperture)
if saved_images_file is not None:
header['FIBER'] = j
header['WLEN'] = wlen
header['FRAC'] = fiberloss[j, i]
header['COMMENT'] = 'Convolved model'
hdu_list.append(astropy.io.fits.ImageHDU(
data=self.image.array.copy(), header=header))
# The component models are only rendered individually if we
# need to save them.
instrument_psf.drawImage(offset=offsets, **draw_args)
header['COMMENT'] = 'Instrument blur model'
hdu_list.append(astropy.io.fits.ImageHDU(
data=self.image.array.copy(), header=header))
# Render the seeing without the instrumental offset.
atmospheric_psf.drawImage(**draw_args)
header['COMMENT'] = 'Atmospheric seeing model'
hdu_list.append(astropy.io.fits.ImageHDU(
data=self.image.array.copy(), header=header))
if wlen == self.wlen_grid[-1]:
# Render the source profile without any offset after
# all other postage stamps for this fiber.
source_profile.drawImage(**draw_args)
del header['WLEN']
del header['FRAC']
header['COMMENT'] = 'Source profile'
hdu_list.append(astropy.io.fits.ImageHDU(
data=self.image.array.copy(), header=header))
if saved_images_file is not None:
hdu_list.writeto(saved_images_file, clobber=True)
return fiberloss
def calculate_fiber_acceptance_fraction(
focal_x, focal_y, wavelength, source, atmosphere, instrument,
source_types=None, source_fraction=None, source_half_light_radius=None,
source_minor_major_axis_ratio=None, source_position_angle=None,
oversampling=32, saved_images_file=None, saved_table_file=None):
"""Calculate the acceptance fraction for a single fiber.
The behavior of this function is customized by the instrument.fiberloss
configuration parameters. When instrument.fiberloss.method == 'table',
pre-tabulated values are returned using source.type as the key and
all other parameters to this function are ignored.
When instrument.fiberloss.method == 'galsim', fiberloss is calculated
on the fly using the GalSim package via :class:`GalsimFiberlossCalculator`
to model the PSF components and source profile and perform the convolutions.
To efficiently calculate fiberloss fractions for multiple sources with
GalSim, use :class:`GalsimFiberlossCalculator` directly instead of
repeatedly calling this method. See | |
request.caller_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ReleaseCommodityResponse(),
await self.do_roarequest_async('ReleaseCommodity', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/appAuth/commodities/release', 'json', req, runtime)
)
def render_batch_callback(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenderBatchCallbackHeaders()
return self.render_batch_callback_with_options(request, headers, runtime)
async def render_batch_callback_async(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.RenderBatchCallbackHeaders()
return await self.render_batch_callback_with_options_async(request, headers, runtime)
def render_batch_callback_with_options(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
headers: dingtalkyida__1__0_models.RenderBatchCallbackHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.oss_url):
body['ossUrl'] = request.oss_url
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.namespace):
body['namespace'] = request.namespace
if not UtilClient.is_unset(request.time_zone):
body['timeZone'] = request.time_zone
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.source):
body['source'] = request.source
if not UtilClient.is_unset(request.sequence_id):
body['sequenceId'] = request.sequence_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenderBatchCallbackResponse(),
self.do_roarequest('RenderBatchCallback', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printings/callbacks/batch', 'none', req, runtime)
)
async def render_batch_callback_with_options_async(
self,
request: dingtalkyida__1__0_models.RenderBatchCallbackRequest,
headers: dingtalkyida__1__0_models.RenderBatchCallbackHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.RenderBatchCallbackResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.oss_url):
body['ossUrl'] = request.oss_url
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.namespace):
body['namespace'] = request.namespace
if not UtilClient.is_unset(request.time_zone):
body['timeZone'] = request.time_zone
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.source):
body['source'] = request.source
if not UtilClient.is_unset(request.sequence_id):
body['sequenceId'] = request.sequence_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.status):
body['status'] = request.status
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.RenderBatchCallbackResponse(),
await self.do_roarequest_async('RenderBatchCallback', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/printings/callbacks/batch', 'none', req, runtime)
)
def get_open_url(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOpenUrlHeaders()
return self.get_open_url_with_options(app_type, request, headers, runtime)
async def get_open_url_async(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetOpenUrlHeaders()
return await self.get_open_url_with_options_async(app_type, request, headers, runtime)
def get_open_url_with_options(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
headers: dingtalkyida__1__0_models.GetOpenUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.file_url):
query['fileUrl'] = request.file_url
if not UtilClient.is_unset(request.timeout):
query['timeout'] = request.timeout
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOpenUrlResponse(),
self.do_roarequest('GetOpenUrl', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/temporaryUrls/{app_type}', 'json', req, runtime)
)
async def get_open_url_with_options_async(
self,
app_type: str,
request: dingtalkyida__1__0_models.GetOpenUrlRequest,
headers: dingtalkyida__1__0_models.GetOpenUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetOpenUrlResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.file_url):
query['fileUrl'] = request.file_url
if not UtilClient.is_unset(request.timeout):
query['timeout'] = request.timeout
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetOpenUrlResponse(),
await self.do_roarequest_async('GetOpenUrl', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/temporaryUrls/{app_type}', 'json', req, runtime)
)
def get_sale_user_info_by_user_id(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders()
return self.get_sale_user_info_by_user_id_with_options(request, headers, runtime)
async def get_sale_user_info_by_user_id_async(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders()
return await self.get_sale_user_info_by_user_id_with_options_async(request, headers, runtime)
def get_sale_user_info_by_user_id_with_options(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
headers: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.namespace):
query['namespace'] = request.namespace
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse(),
self.do_roarequest('GetSaleUserInfoByUserId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/saleUserInfo', 'json', req, runtime)
)
async def get_sale_user_info_by_user_id_with_options_async(
self,
request: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdRequest,
headers: dingtalkyida__1__0_models.GetSaleUserInfoByUserIdHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.namespace):
query['namespace'] = request.namespace
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.GetSaleUserInfoByUserIdResponse(),
await self.do_roarequest_async('GetSaleUserInfoByUserId', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/apps/saleUserInfo', 'json', req, runtime)
)
def validate_application_authorization_order(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders()
return self.validate_application_authorization_order_with_options(instance_id, request, headers, runtime)
async def validate_application_authorization_order_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders()
return await self.validate_application_authorization_order_with_options_async(instance_id, request, headers, runtime)
def validate_application_authorization_order_with_options(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse(),
self.do_roarequest('ValidateApplicationAuthorizationOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationOrderUpdateAuthorizations/{instance_id}', 'json', req, runtime)
)
async def validate_application_authorization_order_with_options_async(
self,
instance_id: str,
request: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderRequest,
headers: dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.access_key):
query['accessKey'] = request.access_key
if not UtilClient.is_unset(request.caller_union_id):
query['callerUnionId'] = request.caller_union_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ValidateApplicationAuthorizationOrderResponse(),
await self.do_roarequest_async('ValidateApplicationAuthorizationOrder', 'yida_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/yida/applicationOrderUpdateAuthorizations/{instance_id}', 'json', req, runtime)
)
def execute_task(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteTaskHeaders()
return self.execute_task_with_options(request, headers, runtime)
async def execute_task_async(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.ExecuteTaskHeaders()
return await self.execute_task_with_options_async(request, headers, runtime)
def execute_task_with_options(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
headers: dingtalkyida__1__0_models.ExecuteTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteTaskResponse(),
self.do_roarequest('ExecuteTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/execute', 'none', req, runtime)
)
async def execute_task_with_options_async(
self,
request: dingtalkyida__1__0_models.ExecuteTaskRequest,
headers: dingtalkyida__1__0_models.ExecuteTaskHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.ExecuteTaskResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.out_result):
body['outResult'] = request.out_result
if not UtilClient.is_unset(request.no_execute_expressions):
body['noExecuteExpressions'] = request.no_execute_expressions
if not UtilClient.is_unset(request.app_type):
body['appType'] = request.app_type
if not UtilClient.is_unset(request.form_data_json):
body['formDataJson'] = request.form_data_json
if not UtilClient.is_unset(request.system_token):
body['systemToken'] = request.system_token
if not UtilClient.is_unset(request.language):
body['language'] = request.language
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.process_instance_id):
body['processInstanceId'] = request.process_instance_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.ExecuteTaskResponse(),
await self.do_roarequest_async('ExecuteTask', 'yida_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/yida/tasks/execute', 'none', req, runtime)
)
def delete_instance(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteInstanceHeaders()
return self.delete_instance_with_options(request, headers, runtime)
async def delete_instance_async(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkyida__1__0_models.DeleteInstanceHeaders()
return await self.delete_instance_with_options_async(request, headers, runtime)
def delete_instance_with_options(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
headers: dingtalkyida__1__0_models.DeleteInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = request.app_type
if not UtilClient.is_unset(request.system_token):
query['systemToken'] = request.system_token
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
if not UtilClient.is_unset(request.language):
query['language'] = request.language
if not UtilClient.is_unset(request.process_instance_id):
query['processInstanceId'] = request.process_instance_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkyida__1__0_models.DeleteInstanceResponse(),
self.do_roarequest('DeleteInstance', 'yida_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/yida/processes/instances', 'none', req, runtime)
)
async def delete_instance_with_options_async(
self,
request: dingtalkyida__1__0_models.DeleteInstanceRequest,
headers: dingtalkyida__1__0_models.DeleteInstanceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkyida__1__0_models.DeleteInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.app_type):
query['appType'] = | |
<filename>bagpy-sbg/sbg_genpy/_SbgStatus.py<gh_stars>0
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from sbg_driver/SbgStatus.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import sbg_driver.msg
import std_msgs.msg
class SbgStatus(genpy.Message):
_md5sum = "1b73c890bd111d40339f4be9a7495e96"
_type = "sbg_driver/SbgStatus"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# SBG Ellipse Messages
Header header
# Time since sensor is powered up (in us)
uint32 time_stamp
# General status bitmask and enums
SbgStatusGeneral status_general
# Communication status bitmask and enums.
SbgStatusCom status_com
# Aiding equipments status bitmask and enums.
SbgStatusAiding status_aiding
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: sbg_driver/SbgStatusGeneral
# SBG Ellipse Messages
# SbgStatus submessage
# General main power
# True when main power supply is OK.
bool main_power
# General imu power
# True when IMU power supply is OK.
bool imu_power
# General gps power
# Set to True when GPS power supply is OK.
bool gps_power
# General Settings
# True if settings were correctly loaded
bool settings
# General Temperature
# True when temperature is within specified limits.
bool temperature
================================================================================
MSG: sbg_driver/SbgStatusCom
# SBG Ellipse Messages
# SbgStatus submessage
# PORT A: False in case of low level communication error.
bool port_a
# PORT B: False in case of low level communication error.
bool port_b
# PORT C: False in case of low level communication error.
bool port_c
# PORT D: False in case of low level communication error.
bool port_d
# PORT E: False in case of low level communication error.
bool port_e
# PORT A RX: False in case of saturation on PORT A input
bool port_a_rx
# PORT A TX: False in case of saturation on PORT A output
bool port_a_tx
# PORT B RX: False in case of saturation on PORT B input
bool port_b_rx
# PORT B TX: False in case of saturation on PORT B output
bool port_b_tx
# PORT C RX: False in case of saturation on PORT C input
bool port_c_rx
# PORT C TX: False in case of saturation on PORT C output
bool port_c_tx
# PORT D RX: False in case of saturation on PORT D input
bool port_d_rx
# PORT D TX: False in case of saturation on PORT D output
bool port_d_tx
# PORT E RX: False in case of saturation on PORT E input
bool port_e_rx
# PORT E TX: False in case of saturation on PORT E output
bool port_e_tx
# CAN RX: False in case of saturation on CAN Bus output buffer
bool can_rx
# CAN TX: False in case of saturation on CAN Bus input buffer
bool can_tx
# CAN BUS
# 0 CAN BUS OFF Bus OFF operation due to too much errors.
# 1 CAN BUS TX_RX_ERR Transmit or received error.
# 2 CAN BUS OK The CAN bus is working correctly.
# 3 CAN BUS ERROR A general error has occurred on the CAN bus.
uint8 can_status
================================================================================
MSG: sbg_driver/SbgStatusAiding
# SBG Ellipse Messages
# SbgStatus submessage
# AIDING_GPS1_POS_RECV true when valid GPS 1 position data is received
bool gps1_pos_recv
# AIDING_GPS1_VEL_RECV true when valid GPS 1 velocity data is received
bool gps1_vel_recv
# AIDING_GPS1_HDT_RECV true when valid GPS 1 true heading data is received
bool gps1_hdt_recv
# AIDING_GPS1_UTC_RECV true when valid GPS 1 UTC time data is received
bool gps1_utc_recv
# AIDING_MAG_RECV true when valid Magnetometer data is received
bool mag_recv
# AIDING_ODO_RECV true when Odometer pulse is received
bool odo_recv
# AIDING_DVL_RECV true when valid DVL data is received
bool dvl_recv
"""
__slots__ = ['header','time_stamp','status_general','status_com','status_aiding']
_slot_types = ['std_msgs/Header','uint32','sbg_driver/SbgStatusGeneral','sbg_driver/SbgStatusCom','sbg_driver/SbgStatusAiding']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,time_stamp,status_general,status_com,status_aiding
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SbgStatus, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.time_stamp is None:
self.time_stamp = 0
if self.status_general is None:
self.status_general = sbg_driver.msg.SbgStatusGeneral()
if self.status_com is None:
self.status_com = sbg_driver.msg.SbgStatusCom()
if self.status_aiding is None:
self.status_aiding = sbg_driver.msg.SbgStatusAiding()
else:
self.header = std_msgs.msg.Header()
self.time_stamp = 0
self.status_general = sbg_driver.msg.SbgStatusGeneral()
self.status_com = sbg_driver.msg.SbgStatusCom()
self.status_aiding = sbg_driver.msg.SbgStatusAiding()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_I30B().pack(_x.time_stamp, _x.status_general.main_power, _x.status_general.imu_power, _x.status_general.gps_power, _x.status_general.settings, _x.status_general.temperature, _x.status_com.port_a, _x.status_com.port_b, _x.status_com.port_c, _x.status_com.port_d, _x.status_com.port_e, _x.status_com.port_a_rx, _x.status_com.port_a_tx, _x.status_com.port_b_rx, _x.status_com.port_b_tx, _x.status_com.port_c_rx, _x.status_com.port_c_tx, _x.status_com.port_d_rx, _x.status_com.port_d_tx, _x.status_com.port_e_rx, _x.status_com.port_e_tx, _x.status_com.can_rx, _x.status_com.can_tx, _x.status_com.can_status, _x.status_aiding.gps1_pos_recv, _x.status_aiding.gps1_vel_recv, _x.status_aiding.gps1_hdt_recv, _x.status_aiding.gps1_utc_recv, _x.status_aiding.mag_recv, _x.status_aiding.odo_recv, _x.status_aiding.dvl_recv))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status_general is None:
self.status_general = sbg_driver.msg.SbgStatusGeneral()
if self.status_com is None:
self.status_com = sbg_driver.msg.SbgStatusCom()
if self.status_aiding is None:
self.status_aiding = sbg_driver.msg.SbgStatusAiding()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 34
(_x.time_stamp, _x.status_general.main_power, _x.status_general.imu_power, _x.status_general.gps_power, _x.status_general.settings, _x.status_general.temperature, _x.status_com.port_a, _x.status_com.port_b, _x.status_com.port_c, _x.status_com.port_d, _x.status_com.port_e, _x.status_com.port_a_rx, _x.status_com.port_a_tx, _x.status_com.port_b_rx, _x.status_com.port_b_tx, _x.status_com.port_c_rx, _x.status_com.port_c_tx, _x.status_com.port_d_rx, _x.status_com.port_d_tx, _x.status_com.port_e_rx, _x.status_com.port_e_tx, _x.status_com.can_rx, _x.status_com.can_tx, _x.status_com.can_status, _x.status_aiding.gps1_pos_recv, _x.status_aiding.gps1_vel_recv, _x.status_aiding.gps1_hdt_recv, _x.status_aiding.gps1_utc_recv, _x.status_aiding.mag_recv, _x.status_aiding.odo_recv, _x.status_aiding.dvl_recv,) = _get_struct_I30B().unpack(str[start:end])
self.status_general.main_power = bool(self.status_general.main_power)
self.status_general.imu_power = bool(self.status_general.imu_power)
self.status_general.gps_power = bool(self.status_general.gps_power)
self.status_general.settings = bool(self.status_general.settings)
self.status_general.temperature = bool(self.status_general.temperature)
self.status_com.port_a = bool(self.status_com.port_a)
self.status_com.port_b = bool(self.status_com.port_b)
self.status_com.port_c = bool(self.status_com.port_c)
self.status_com.port_d = bool(self.status_com.port_d)
self.status_com.port_e = bool(self.status_com.port_e)
self.status_com.port_a_rx = bool(self.status_com.port_a_rx)
self.status_com.port_a_tx = bool(self.status_com.port_a_tx)
self.status_com.port_b_rx = bool(self.status_com.port_b_rx)
self.status_com.port_b_tx = bool(self.status_com.port_b_tx)
self.status_com.port_c_rx = bool(self.status_com.port_c_rx)
self.status_com.port_c_tx = bool(self.status_com.port_c_tx)
self.status_com.port_d_rx = bool(self.status_com.port_d_rx)
self.status_com.port_d_tx = bool(self.status_com.port_d_tx)
self.status_com.port_e_rx = bool(self.status_com.port_e_rx)
self.status_com.port_e_tx = bool(self.status_com.port_e_tx)
self.status_com.can_rx = bool(self.status_com.can_rx)
self.status_com.can_tx = bool(self.status_com.can_tx)
self.status_aiding.gps1_pos_recv = bool(self.status_aiding.gps1_pos_recv)
self.status_aiding.gps1_vel_recv = bool(self.status_aiding.gps1_vel_recv)
self.status_aiding.gps1_hdt_recv = bool(self.status_aiding.gps1_hdt_recv)
self.status_aiding.gps1_utc_recv = bool(self.status_aiding.gps1_utc_recv)
self.status_aiding.mag_recv = bool(self.status_aiding.mag_recv)
self.status_aiding.odo_recv = bool(self.status_aiding.odo_recv)
self.status_aiding.dvl_recv = bool(self.status_aiding.dvl_recv)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_I30B().pack(_x.time_stamp, _x.status_general.main_power, _x.status_general.imu_power, _x.status_general.gps_power, _x.status_general.settings, _x.status_general.temperature, _x.status_com.port_a, _x.status_com.port_b, _x.status_com.port_c, _x.status_com.port_d, _x.status_com.port_e, _x.status_com.port_a_rx, _x.status_com.port_a_tx, _x.status_com.port_b_rx, _x.status_com.port_b_tx, _x.status_com.port_c_rx, _x.status_com.port_c_tx, _x.status_com.port_d_rx, _x.status_com.port_d_tx, _x.status_com.port_e_rx, _x.status_com.port_e_tx, _x.status_com.can_rx, _x.status_com.can_tx, _x.status_com.can_status, _x.status_aiding.gps1_pos_recv, _x.status_aiding.gps1_vel_recv, _x.status_aiding.gps1_hdt_recv, _x.status_aiding.gps1_utc_recv, _x.status_aiding.mag_recv, _x.status_aiding.odo_recv, _x.status_aiding.dvl_recv))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status_general is None:
self.status_general = sbg_driver.msg.SbgStatusGeneral()
if self.status_com is None:
self.status_com = sbg_driver.msg.SbgStatusCom()
if self.status_aiding is None:
self.status_aiding = sbg_driver.msg.SbgStatusAiding()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
| |
<gh_stars>1-10
#
# EOBTidalExternalC
#
# Provides interface to Bernuzzi EOB model, ** C++ implementation ++
#
# REQUIRES
# - /EOB_ihes.out # created by compiling C++ code
#
# PROVIDES
# - hlmoft
#
# COMPARE TO
# NRWaveformCatalogManager3 : very similar interface
debug_output =False
import numpy as np
import os
import sys
import shutil
import lalsimulation as lalsim
import lal
from scipy.interpolate import interp1d, UnivariateSpline
import pickle
import time
from .. import lalsimutils
rosUseArchivedWaveforms = True
rosDebug = False
#dirBaseFiles =os.environ["HOME"] + "/unixhome/Projects/LIGO-ILE-Applications/ILE-Tides/MatlabCodePolished"
dirBaseFiles =os.environ["EOB_C_BASE"]
dirBaseFilesArchive =os.environ["EOB_C_ARCHIVE"]
n_max_dirs = 1+ int(os.environ["EOB_C_ARCHIVE_NMAX"])
# PRINT GIT REPO IN LOG
print(" EOB resumS git hash ")
if os.path.exists(dirBaseFiles):
os.system("(cd " + dirBaseFiles +"; git rev-parse HEAD)")
else:
print(" No EOBResumS C external!")
default_interpolation_kind = 'linear' # spline interpolation # very slow!
#internal_ModesAvailable = [(2,2), (2,1), (2,-2), (2,-1), (3,3), (3,2), (3,1), (3,-3), (3,-2), (3,-1)]
internal_ModesAvailable = [(2,2), (2,1), (2,-2), (2,-1), (3,3), (3,-3), (3,2), (3,-2), (3,1), (3,-1)]
# see TEOBResumSHlm.cpp for mode order (e.g., as in hlm_Tidal)
internal_ModeLookup= {}
internal_ModeLookup[(2,2)] = [3,4] # amplitude, phase
internal_ModeLookup[(2,-2)] = [3,4] # amplitude, phase
internal_ModeLookup[(2,1)] = [1,2] # amplitude, phase
internal_ModeLookup[(2,-1)] = [1,2] # amplitude, phase
internal_ModeLookup[(3,1)] = [5,6] # amplitude, phase
internal_ModeLookup[(3,-1)] = [5,6] # amplitude, phase
internal_ModeLookup[(3,2)] = [7,8] # amplitude, phase
internal_ModeLookup[(3,-2)] = [7,8] # amplitude, phase
internal_ModeLookup[(3,3)] = [9,10] # amplitude, phase
internal_ModeLookup[(3,-3)] = [9,10] # amplitude, phase
MsunInSec = lal.MSUN_SI*lal.G_SI/lal.C_SI**3
def RangeWrap1dAlt(bound,val, fn):
"""
RangeWrap1d: Uses np.piecewise to construct a piecewise function which is =fn inside the boundary, and 0 outside.
SHOULD be syntactic sugar, but depending on the python version the language needed to implement this changes.
"""
# return (lambda x: fn(x) if (x>bound[0] and x<bound[1]) else val)
# WARNING: piecewise is much faster, but will fail for numpy versions less than 1.8-ish :http://stackoverflow.com/questions/20800324/scipy-pchipinterpolator-error-array-cannot-be-safely-cast-to-required-type
# Unfortunately that is the version LIGO uses on their clusters.
return (lambda x: np.piecewise( x, [
np.logical_and(x> bound[0], x<bound[1]),
np.logical_not(np.logical_and(x> bound[0], x<bound[1]))
], [fn, myzero]))
import functools
def compose(*functions):
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def myzero(arg):
return 0
def RangeWrap1d(bound, val,fn):
return fn # IDEALLY not necessary with modern interp1d
def ModeToString(pair):
return str(pair[0])+str(pair[1]) # this is only used for POSITIVE l,m (single digit)
def write_par_file(basedir, mtot_msun, q,chi1, chi2,lambda1,lambda2, fmin,dt):
# lambda1_3 = lalsimutils.Yagi13_fit_barlamdel(lambda1,3)
# lambda1_4 = lalsimutils.Yagi13_fit_barlamdel(lambda1,4)
# lambda2_3 = lalsimutils.Yagi13_fit_barlamdel(lambda2,3)
# lambda2_4 = lalsimutils.Yagi13_fit_barlamdel(lambda2,4)
fname = basedir + "/my.par"
with open(fname, 'w') as f:
f.write("Mtot "+str(mtot_msun)+" \n")
f.write("distance 1\n")
f.write("q "+ str(q) +"\n")
f.write("chi1 "+ str(chi1) +"\n")
f.write("chi2 "+ str(chi2) +"\n")
# f.write("r0 "+ str(r0) +"\n")
# f.write("fmin " + str(fmin * mtot_msun*MsunInSec)+"\n") # because geometric units are used (e.g., for dt), we must convert to omega; see TEOBResunSUtils.cpp
f.write("f_min "+str(fmin) + "\n")
# f.write("NQC 0\n")
f.write("tidal 1\n") # must be 1 for tidal calculation
# f.write("spin 1\n")
f.write("RWZ 0\n")
f.write("speedy 1\n")
f.write("dynamics 0\n") # does nothing?
f.write("Yagi_fit 1\n")
f.write("multipoles 1\n")
f.write("lm 1\n")
# f.write("dt "+ str(dt)+ " \n")
f.write("solver_scheme 0\n")
f.write("LambdaAl2 "+str(lambda1) + "\n")
# f.write("LambdaAl3 "+str(lambda1_3) + "\n")
# f.write("LambdaAl4 "+str(lambda1_4) + "\n")
f.write("LambdaBl2 "+str(lambda2) + "\n")
# f.write("LambdaBl3 "+str(lambda2_3) + "\n")
# f.write("LambdaBl4 "+str(lambda2_4) + "\n")
f.write("geometric_units 0\n")
class WaveformModeCatalog:
"""
Class containing EOB tidal harmonics, both in dimensionless and dimensional form
"""
def __init__(self, P, lmax=2,
align_at_peak_l2_m2_emission=True, mode_list_to_load=[],build_fourier_time_window=1000,clean_with_taper=True,use_internal_interpolation_deltaT=None,build_strain_and_conserve_memory=False,reference_phase_at_peak=None,fix_phase_zero_at_coordinate=False):
self.P = P
self.quantity = "h"
self.fOrbitLower =0. # Used to clean results. Based on the phase of the 22 mode
self.fMinMode ={}
# Mode storage convention
# - event time in first element
# - t=0 MAY be at peak, if opts.align_at_peak_l2m2_emission. Cannot promise...but raw samples show the valid range
# - t in seconds
# - h*r/M in second element
self.waveform_modes = {}
self.waveform_modes_uniform_in_time={}
self.waveform_modes_nonuniform_smallest_timestep = {}
self.waveform_modes_nonuniform_largest_timestep = {}
self.waveform_modes[(2,2)] = []
self.waveform_modes_complex = {}
self.waveform_modes_complex_interpolated = {}
self.waveform_modes_complex_interpolated_amplitude = {}
self.waveform_modes_complex_interpolated_phase = {}
self.waveform_modes_complex_padded = {}
self.waveform_modes_fourier = {}
# Require spins zero for now
if any([P.s1x,P.s1y,P.s2x,P.s2y]):
print(" FAILURE: Tidal code assumes a nonprecessing approximant for now")
# Run the external script with the necessary parameters
# Assume environment set correctly
# Delete files from previous case"
m1InMsun = P.m1/lal.MSUN_SI
m2InMsun = P.m2/lal.MSUN_SI
m1InMsun, m2InMsun = reversed(sorted([m1InMsun, m2InMsun])) # FORCE m1 > m2
# Convert lambda1, lambda2 (assumed l=2) to other terms,
# Note we are passed lambda1, lambda2. We convert to (l=2) kappaA, kappaB
# - Generate lambdatilde
# - generate lambda2
fname_base = "working.dir"+str(np.random.randint(0,n_max_dirs))
print(" Saving to file (beware collisions!) ", fname_base)
retrieve_directory = ''
cwd = os.getcwd();
while os.path.exists(dirBaseFilesArchive+"/"+fname_base):
print(" Waiting to delete file... "+fname_base)
time.sleep(10)
if False: #rosUseArchivedWaveforms and os.path.exists(dirBaseFilesArchive+"/"+fname_base) and os.path.exists(dirBaseFilesArchive+"/"+fname_base+"/test_h22.dat"):
retrieve_directory = dirBaseFilesArchive+"/"+fname_base
print(" Attempting to use archived waveform data in ", retrieve_directory)
else:
retrieve_directory = dirBaseFilesArchive+"/"+fname_base + "/"
# Create directory
if not os.path.exists(retrieve_directory):
print(" Making directory to archive this run ... ", retrieve_directory)
os.makedirs(retrieve_directory)
if not os.path.exists(retrieve_directory):
print(" FAILED TO CREATE ", retrieve_directory)
sys.exit(0)
M_sec = (P.m1+P.m2)/lal.MSUN_SI * MsunInSec
dt_over_M = P.deltaT/M_sec # needed for solver sanity at end
write_par_file(retrieve_directory, (m1InMsun+m2InMsun),m1InMsun/m2InMsun, P.s1z, P.s2z, P.lambda1,P.lambda2,P.fmin,dt_over_M)
cmd = dirBaseFiles+"/TEOBResumS.x -p my.par"
print(" Generating tidal EOB with ", cmd)
os.chdir(retrieve_directory); os.system(cmd);
# First loop: Create all the basic mode data
# This should ALREADY BE IN PHYSICAL TIME UNITS but have UNPHYSICAL distance scales
nu = lalsimutils.symRatio(P.m1,P.m2)
delta = (m1InMsun- m2InMsun)/(m1InMsun+m2InMsun)
# h_lm = A exp (- i phi)
# time/M Amp_21 phi_21 Amp_22 phi_22 Amp_33 phi_33
hlm_data_raw = np.loadtxt(retrieve_directory + "/hlm_insp.dat")
# DELETE RESULTS
print(" Deleting intermediate files...", retrieve_directory)
shutil.rmtree(retrieve_directory)
tmin = np.min(hlm_data_raw[:,0])
tmax = np.max(hlm_data_raw[:,0])
tvals = np.array(hlm_data_raw[:,0]) # copy
print(" Loading time range ", tvals[0], tvals[-1], " in dimensionless time ")
# Rescale time units (previously done in matlab code)
tvals *= (m1InMsun+m2InMsun)*MsunInSec
tmax *= (m1InMsun+m2InMsun)*MsunInSec
tmin *= (m1InMsun+m2InMsun)*MsunInSec
col_A_22 = internal_ModeLookup[(2,2)][0]
t_ref = tvals[np.argmax( np.abs(hlm_data_raw[:,col_A_22]) )] # peak of 22 mode
# shift all times, if necessary
if align_at_peak_l2_m2_emission:
tvals += -t_ref
t_ref = 0
print(" Time range after timeshift and rescaling to seconds ", tvals[0], tvals[-1])
# taper functuion:
# DISABLE: it so happens we taper *again* in hlmoft !
def fnTaperHere(x,tmax=tmax,tmin=tmin):
tTaperEnd= 10./P.fmin
return np.piecewise(x , [x<tmin, x>tmin+tTaperEnd, np.logical_and(x>tmin,x<=tmin+tTaperEnd)],
[(lambda z:0), (lambda z: 1)
(lambda z, tm=tmin,dt=tmin+tTaperEnd: 0.5-0.5*np.cos(np.pi* (z-tm)/dt))
]
)
for mode in internal_ModesAvailable:
if mode[0]<= lmax:
self.waveform_modes_uniform_in_time[mode] =False
col_t =0
col_A =internal_ModeLookup[mode][0]
col_P =internal_ModeLookup[mode][1]
datA = np.array(hlm_data_raw[:,col_A]) # important to ALLOCATE so we are not accessing a pointer / same data
datP = np.array( (-1)* hlm_data_raw[:,col_P]) # allocate so not a copy
# nan removal: occasionally, TEOBResumS code can nan-pad at end (e,g., negative spins)
if rosDebug:
print(" Mode ", mode, " nan check for phase ", np.sum(np.isnan(datP)), " out of ", len(datP))
print(" Mode ", mode, " nan check for amp ", np.sum(np.isnan(datA)), " out of ", len(datA))
datP[np.isnan(datP)] = 0 # zero this out
datA[np.isnan(datA)] = 0 # zero this out
# Create, if symmetric
if mode[1]<0: # (-1)^l conjugate
datP *= -1; # complex conjugate
datP += mode[0]*np.pi # (-1)^l factor
# # Add factor of 'nu' that was missing (historical)
if mode[1] %2 ==0 :
datA *= nu # important we are not accessing a copy
else:
datA*= nu *delta
# fnA = compose(np.exp,UnivariateSpline(tvals, np.log(datA+1e-40),ext='zeros',k=3,s=0)) # s=0 prevents horrible behavior. Interpolate logA so A>0 is preserved.
# fnA = UnivariateSpline(tvals, datA,ext='zeros',k=3,s=0) # s=0 prevents horrible behavior. Sometimes interpolation in the log behaves oddly
# fnP = UnivariateSpline(tvals, datP,ext='const',k=3,s=0) # s=0 prevents horrible behavior. 'const' uses boundary value to prevent discontinuity
# self.waveform_modes_complex_interpolated_amplitude[mode] = fnA #lambda x,s=fnA,t=fnTaperHere: t(x)*s(x)
# self.waveform_modes_complex_interpolated_phase[mode] = fnP
fnA = UnivariateSpline(tvals, datA,k=3,s=0) # s=0 prevents horrible behavior. Sometimes interpolation in the log behaves oddly
fnP = UnivariateSpline(tvals, datP,k=3,s=0) # s=0 prevents horrible behavior. 'const' uses boundary value to prevent discontinuity
self.waveform_modes_complex_interpolated_amplitude[mode] = RangeWrap1dAlt([tvals[0],tvals[-1]],0,fnA) #lambda x,s=fnA,t=fnTaperHere: t(x)*s(x)
self.waveform_modes_complex_interpolated_phase[mode] = RangeWrap1dAlt([tvals[0],tvals[-1]], 0,fnP)
# Estimate starting frequency. Historical interest
nOffsetForPhase = 0 # ad-hoc offset based on uniform sampling
nStride = 5
self.fMinMode[mode] = np.abs((datP[nOffsetForPhase+nStride]-datP[nOffsetForPhase])/(2*np.pi*(tvals[nOffsetForPhase+nStride]-tvals[nOffsetForPhase]))) # historical interest
if mode ==(2,2):
self.fOrbitLower = 0.5*self.fMinMode[mode]
if rosDebug:
print(" Identifying initial orbital frequency ", self.fOrbitLower, " which had better be related to ", P.fmin)
if rosDebug:
print(mode, self.fMinMode[mode])
# Historical/used for plotting only
datC = datA*np.exp(1j*datP)
self.waveform_modes[mode] =np.zeros( (len(datC),3),dtype=float)
self.waveform_modes[mode][:,0] = tvals
self.waveform_modes[mode][:,1] = np.real(datC)
self.waveform_modes[mode][:,2] = | |
= kwargs[kw]
if _debug: Choice._debug(" - my_kwargs: %r", my_kwargs)
if _debug: Choice._debug(" - other_kwargs: %r", other_kwargs)
# call some superclass, if there is one
super(Choice, self).__init__(**other_kwargs)
# set the attribute/property values for the ones provided
for element in self.choiceElements:
setattr(self, element.name, my_kwargs.get(element.name, None))
def encode(self, taglist):
if _debug: Choice._debug("(%r)encode %r", self.__class__.__name__, taglist)
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
if issubclass(element.klass, (Atomic, AnyAtomic)):
# a helper cooperates between the atomic value and the tag
helper = element.klass(value)
# build a tag and encode the data into it
tag = Tag()
helper.encode(tag)
# convert it to context encoding
if element.context is not None:
tag = tag.app_to_context(element.context)
# now encode the tag
taglist.append(tag)
break
elif isinstance(value, element.klass):
# encode an opening tag
if element.context is not None:
taglist.append(OpeningTag(element.context))
# encode the value
value.encode(taglist)
# encode a closing tag
if element.context is not None:
taglist.append(ClosingTag(element.context))
break
else:
raise TypeError, "'%s' must be a %s" % (element.name, element.klass.__name__)
else:
raise AttributeError, "missing choice of %s" % (self.__class__.__name__,)
def decode(self, taglist):
if _debug: Choice._debug("(%r)decode %r", self.__class__.__name__, taglist)
# peek at the element
tag = taglist.Peek()
if tag is None:
raise AttributeError, "missing choice of %s" % (self.__class__.__name__,)
if tag.tagClass == Tag.closingTagClass:
raise AttributeError, "missing choice of %s" % (self.__class__.__name__,)
# keep track of which one was found
foundElement = {}
# figure out which choice it is
for element in self.choiceElements:
if _debug: Choice._debug(" - checking choice: %s", element.name)
# check for a sequence element
if _sequence_of_classes.has_key(element.klass):
# check for context encoding
if element.context is None:
raise NotImplementedError, "choice of a SequenceOf must be context encoded"
# match the context tag number
if tag.tagClass != Tag.contextTagClass or tag.tagNumber != element.context:
continue
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass()
helper.decode(taglist)
# now save the value
foundElement[element.name] = helper.value
# check for context closing tag
tag = taglist.Pop()
if tag.tagClass != Tag.closingTagClass or tag.tagNumber != element.context:
raise DecodingError, "'%s' expected closing tag %d" % (element.name, element.context)
# done
if _debug: Choice._debug(" - found choice (sequence)")
break
# check for an atomic element
elif issubclass(element.klass, (Atomic, AnyAtomic)):
# convert it to application encoding
if element.context is not None:
if tag.tagClass != Tag.contextTagClass or tag.tagNumber != element.context:
continue
tag = tag.context_to_app(element.klass._app_tag)
else:
if tag.tagClass != Tag.applicationTagClass or tag.tagNumber != element.klass._app_tag:
continue
# consume the tag
taglist.Pop()
# a helper cooperates between the atomic value and the tag
helper = element.klass(tag)
# now save the value
foundElement[element.name] = helper.value
# done
if _debug: Choice._debug(" - found choice (atomic)")
break
# some kind of structure
else:
# check for context encoding
if element.context is None:
raise NotImplementedError, "choice of non-atomic data must be context encoded"
if tag.tagClass != Tag.openingTagClass or tag.tagNumber != element.context:
continue
taglist.Pop()
# build a value and decode it
value = element.klass()
value.decode(taglist)
# now save the value
foundElement[element.name] = value
# check for the correct closing tag
tag = taglist.Pop()
if tag.tagClass != Tag.closingTagClass or tag.tagNumber != element.context:
raise DecodingError, "'%s' expected closing tag %d" % (element.name, element.context)
# done
if _debug: Choice._debug(" - found choice (structure)")
break
else:
raise AttributeError, "missing choice of %s" % (self.__class__.__name__,)
# now save the value and None everywhere else
for element in self.choiceElements:
setattr(self, element.name, foundElement.get(element.name, None))
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
elif issubclass(element.klass, (Atomic, AnyAtomic)):
file.write("%s%s = %r\n" % (" " * indent, element.name, value))
break
elif isinstance(value, element.klass):
file.write("%s%s\n" % (" " * indent, element.name))
value.debug_contents(indent+1, file, _ids)
break
else:
file.write("%s%s must be a %s" % (" " * indent, element.name, element.klass.__name__))
else:
file.write("%smissing choice of %s" % (" " * indent, self.__class__.__name__))
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: _log.debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# look for the chosen element
for element in self.choiceElements:
value = getattr(self, element.name, None)
if value is None:
continue
if issubclass(element.klass, Atomic):
mapped_value = value ### ambiguous
elif issubclass(element.klass, AnyAtomic):
mapped_value = value.value ### ambiguous
elif isinstance(value, element.klass):
mapped_value = value.dict_contents(as_class=as_class)
use_dict.__setitem__(element.name, mapped_value)
break
# return what we built/updated
return use_dict
#
# Any
#
@bacpypes_debugging
class Any:
def __init__(self, *args):
self.tagList = TagList()
# cast in the args
for arg in args:
self.cast_in(arg)
def encode(self, taglist):
if _debug: Any._debug("encode %r", taglist)
taglist.extend(self.tagList)
def decode(self, taglist):
if _debug: Any._debug("decode %r", taglist)
lvl = 0
while len(taglist) != 0:
tag = taglist.Peek()
if tag.tagClass == Tag.openingTagClass:
lvl += 1
elif tag.tagClass == Tag.closingTagClass:
lvl -= 1
if lvl < 0: break
self.tagList.append(taglist.Pop())
# make sure everything balances
if lvl > 0:
raise DecodingError, "mismatched open/close tags"
def cast_in(self, element):
"""encode the element into the internal tag list."""
if _debug: Any._debug("cast_in %r", element)
t = TagList()
if isinstance(element, Atomic):
tag = Tag()
element.encode(tag)
t.append(tag)
elif isinstance(element, AnyAtomic):
tag = Tag()
element.value.encode(tag)
t.append(tag)
else:
element.encode(t)
self.tagList.extend(t.tagList)
def cast_out(self, klass):
"""Interpret the content as a particular class."""
if _debug: Any._debug("cast_out %r", klass)
# check for a sequence element
if _sequence_of_classes.has_key(klass):
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError, "incomplete cast"
# return what was built
return helper.value
# check for an array element
elif _array_of_classes.has_key(klass):
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError, "incomplete cast"
# return what was built with Python list semantics
return helper.value[1:]
elif issubclass(klass, (Atomic, AnyAtomic)):
# make sure there's only one piece
if len(self.tagList) == 0:
raise DecodingError, "missing cast component"
if len(self.tagList) > 1:
raise DecodingError, "too many cast components"
if _debug: Any._debug(" - building helper: %r", klass)
# a helper cooperates between the atomic value and the tag
helper = klass(self.tagList[0])
# return the value
return helper.value
else:
if _debug: Any._debug(" - building value: %r", klass)
# build an element
value = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
value.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError, "incomplete cast"
# return what was built
return value
def is_application_class_null(self):
if _debug: Any._debug("is_application_class_null")
return (len(self.tagList) == 1) and (self.tagList[0].tagClass == Tag.applicationTagClass) and (self.tagList[0].tagNumber == Tag.nullAppTag)
def debug_contents(self, indent=1, file=sys.stdout, _ids=None):
self.tagList.debug_contents(indent, file, _ids)
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: Any._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# result will be a list
rslt_list = []
# loop through the tags
for tag in self.tagList:
# build a tag thing
use_dict = as_class()
# save the pieces
use_dict.__setitem__('class', tag.tagClass)
use_dict.__setitem__('number', tag.tagNumber)
use_dict.__setitem__('lvt', tag.tagLVT)
use_dict.__setitem__('data', '.'.join('%02X' % ord(c) for c in tag.tagData))
# add it to the list
rslt_list = use_dict
# return what we built
return rslt_list
#
# AnyAtomic
#
@bacpypes_debugging
class AnyAtomic:
def __init__(self, arg=None):
if _debug: AnyAtomic._debug("__init__ %r", arg)
# default to no value
self.value = None
if arg is None:
pass
elif isinstance(arg, Atomic):
self.value = arg
elif isinstance(arg, Tag):
self.value = arg.app_to_object()
else:
raise TypeError, "invalid constructor datatype"
def encode(self, tag):
if _debug: AnyAtomic._debug("encode %r", tag)
self.value.encode(tag)
def decode(self, tag):
if _debug: AnyAtomic._debug("decode %r", tag)
if (tag.tagClass != Tag.applicationTagClass):
raise ValueError, "application tag required"
# get the data
self.value = tag.app_to_object()
def __str__(self):
return "AnyAtomic(%s)" % (str(self.value), )
def __repr__(self):
xid = id(self)
if (xid < 0): xid += (1L << 32)
desc = self.__module__ + '.' + self.__class__.__name__
if self.value:
desc += "(" + self.value.__class__.__name__ + ")"
desc += ' ' + str(self.value)
return | |
self._debug['u32_0x18']['start'] = self._io.pos()
self.u32_0x18 = self._io.read_u4be()
self._debug['u32_0x18']['end'] = self._io.pos()
class PlatMvspn0x74(KaitaiStruct):
SEQ_FIELDS = ["u32_0x34", "u32_0x38", "u32_0x3c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u32_0x34']['start'] = self._io.pos()
self.u32_0x34 = self._io.read_u4be()
self._debug['u32_0x34']['end'] = self._io.pos()
self._debug['u32_0x38']['start'] = self._io.pos()
self.u32_0x38 = self._io.read_u4be()
self._debug['u32_0x38']['end'] = self._io.pos()
self._debug['u32_0x3c']['start'] = self._io.pos()
self.u32_0x3c = self._io.read_u4be()
self._debug['u32_0x3c']['end'] = self._io.pos()
class PlatOrbit(KaitaiStruct):
SEQ_FIELDS = ["u16_120", "u16_136", "u16_134", "u16_132", "u32_116", "name", "f_112", "f_108", "f_104", "f_100", "f_96", "f_92", "f_88", "f_84", "f_80", "u32_176"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u16_120']['start'] = self._io.pos()
self.u16_120 = self._io.read_u2be()
self._debug['u16_120']['end'] = self._io.pos()
self._debug['u16_136']['start'] = self._io.pos()
self.u16_136 = self._io.read_u2be()
self._debug['u16_136']['end'] = self._io.pos()
self._debug['u16_134']['start'] = self._io.pos()
self.u16_134 = self._io.read_u2be()
self._debug['u16_134']['end'] = self._io.pos()
self._debug['u16_132']['start'] = self._io.pos()
self.u16_132 = self._io.read_u2be()
self._debug['u16_132']['end'] = self._io.pos()
self._debug['u32_116']['start'] = self._io.pos()
self.u32_116 = self._io.read_u4be()
self._debug['u32_116']['end'] = self._io.pos()
self._debug['name']['start'] = self._io.pos()
self.name = (self._io.read_bytes(8)).decode(u"ASCII")
self._debug['name']['end'] = self._io.pos()
self._debug['f_112']['start'] = self._io.pos()
self.f_112 = self._io.read_f4be()
self._debug['f_112']['end'] = self._io.pos()
self._debug['f_108']['start'] = self._io.pos()
self.f_108 = self._io.read_f4be()
self._debug['f_108']['end'] = self._io.pos()
self._debug['f_104']['start'] = self._io.pos()
self.f_104 = self._io.read_f4be()
self._debug['f_104']['end'] = self._io.pos()
self._debug['f_100']['start'] = self._io.pos()
self.f_100 = self._io.read_f4be()
self._debug['f_100']['end'] = self._io.pos()
self._debug['f_96']['start'] = self._io.pos()
self.f_96 = self._io.read_f4be()
self._debug['f_96']['end'] = self._io.pos()
self._debug['f_92']['start'] = self._io.pos()
self.f_92 = self._io.read_f4be()
self._debug['f_92']['end'] = self._io.pos()
self._debug['f_88']['start'] = self._io.pos()
self.f_88 = self._io.read_f4be()
self._debug['f_88']['end'] = self._io.pos()
self._debug['f_84']['start'] = self._io.pos()
self.f_84 = self._io.read_f4be()
self._debug['f_84']['end'] = self._io.pos()
self._debug['f_80']['start'] = self._io.pos()
self.f_80 = self._io.read_f4be()
self._debug['f_80']['end'] = self._io.pos()
self._debug['u32_176']['start'] = self._io.pos()
self.u32_176 = self._io.read_u4be()
self._debug['u32_176']['end'] = self._io.pos()
class PlatSpike(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
pass
class PlatSpecial0x8e(KaitaiStruct):
SEQ_FIELDS = ["enable"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['enable']['start'] = self._io.pos()
self.enable = self._io.read_u2be()
self._debug['enable']['end'] = self._io.pos()
class PlatActorSurfaceType(KaitaiStruct):
SEQ_FIELDS = ["value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['value']['start'] = self._io.pos()
self.value = self._io.read_u2be()
self._debug['value']['end'] = self._io.pos()
class Plat0x9f(KaitaiStruct):
SEQ_FIELDS = ["u32_0x6c", "u32_0x70", "u32_0x1c", "u32_0x28"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u32_0x6c']['start'] = self._io.pos()
self.u32_0x6c = self._io.read_u4be()
self._debug['u32_0x6c']['end'] = self._io.pos()
self._debug['u32_0x70']['start'] = self._io.pos()
self.u32_0x70 = self._io.read_u4be()
self._debug['u32_0x70']['end'] = self._io.pos()
self._debug['u32_0x1c']['start'] = self._io.pos()
self.u32_0x1c = self._io.read_u4be()
self._debug['u32_0x1c']['end'] = self._io.pos()
self._debug['u32_0x28']['start'] = self._io.pos()
self.u32_0x28 = self._io.read_u4be()
self._debug['u32_0x28']['end'] = self._io.pos()
class EnemyInstructionDash(KaitaiStruct):
SEQ_FIELDS = ["destination_x", "destination_y", "destination_z", "vel_magnitude"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['destination_x']['start'] = self._io.pos()
self.destination_x = self._io.read_f4be()
self._debug['destination_x']['end'] = self._io.pos()
self._debug['destination_y']['start'] = self._io.pos()
self.destination_y = self._io.read_f4be()
self._debug['destination_y']['end'] = self._io.pos()
self._debug['destination_z']['start'] = self._io.pos()
self.destination_z = self._io.read_f4be()
self._debug['destination_z']['end'] = self._io.pos()
self._debug['vel_magnitude']['start'] = self._io.pos()
self.vel_magnitude = self._io.read_f4be()
self._debug['vel_magnitude']['end'] = self._io.pos()
class EnvironmentalSound(KaitaiStruct):
SEQ_FIELDS = ["sound_id", "volume", "flags", "h_0x06", "h_0x08", "h_0x0a", "h_0x0c", "h_0x0e", "x", "y", "z", "radius"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['sound_id']['start'] = self._io.pos()
self.sound_id = self._io.read_u2be()
self._debug['sound_id']['end'] = self._io.pos()
self._debug['volume']['start'] = self._io.pos()
self.volume = self._io.read_u2be()
self._debug['volume']['end'] = self._io.pos()
self._debug['flags']['start'] = self._io.pos()
self.flags = self._io.read_u2be()
self._debug['flags']['end'] = self._io.pos()
self._debug['h_0x06']['start'] = self._io.pos()
self.h_0x06 = self._io.read_u2be()
self._debug['h_0x06']['end'] = self._io.pos()
self._debug['h_0x08']['start'] = self._io.pos()
self.h_0x08 = self._io.read_u2be()
self._debug['h_0x08']['end'] = self._io.pos()
self._debug['h_0x0a']['start'] = self._io.pos()
self.h_0x0a = self._io.read_u2be()
self._debug['h_0x0a']['end'] = self._io.pos()
self._debug['h_0x0c']['start'] = self._io.pos()
self.h_0x0c = self._io.read_u2be()
self._debug['h_0x0c']['end'] = self._io.pos()
self._debug['h_0x0e']['start'] = self._io.pos()
self.h_0x0e = self._io.read_u2be()
self._debug['h_0x0e']['end'] = self._io.pos()
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_f4be()
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_f4be()
self._debug['y']['end'] = self._io.pos()
self._debug['z']['start'] = self._io.pos()
self.z = self._io.read_f4be()
self._debug['z']['end'] = self._io.pos()
self._debug['radius']['start'] = self._io.pos()
self.radius = self._io.read_f4be()
self._debug['radius']['end'] = self._io.pos()
class PlatSetInitialPos(KaitaiStruct):
SEQ_FIELDS = ["x", "y", "z"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_f4be()
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_f4be()
self._debug['y']['end'] = self._io.pos()
self._debug['z']['start'] = self._io.pos()
self.z = self._io.read_f4be()
self._debug['z']['end'] = self._io.pos()
class Actor0xbf(KaitaiStruct):
SEQ_FIELDS = ["mode", "child_mesh_id"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['mode']['start'] = self._io.pos()
self.mode = self._io.read_u2be()
self._debug['mode']['end'] = self._io.pos()
self._debug['child_mesh_id']['start'] = self._io.pos()
self.child_mesh_id = self._io.read_u4be()
self._debug['child_mesh_id']['end'] = self._io.pos()
class PlatMaxVelocity(KaitaiStruct):
SEQ_FIELDS = ["velocity"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['velocity']['start'] = self._io.pos()
self.velocity = self._io.read_f4be()
self._debug['velocity']['end'] = self._io.pos()
class EnemyFinalize(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
pass
class PlatMvspn0x59(KaitaiStruct):
SEQ_FIELDS = ["u16_0x24", "u32_0x20", "u32_0x28", "u32_0x2c", "u32_0x30"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u16_0x24']['start'] = self._io.pos()
self.u16_0x24 = self._io.read_u2be()
self._debug['u16_0x24']['end'] = self._io.pos()
self._debug['u32_0x20']['start'] = self._io.pos()
self.u32_0x20 = self._io.read_u4be()
self._debug['u32_0x20']['end'] = self._io.pos()
self._debug['u32_0x28']['start'] = self._io.pos()
self.u32_0x28 = self._io.read_u4be()
self._debug['u32_0x28']['end'] = self._io.pos()
self._debug['u32_0x2c']['start'] = self._io.pos()
self.u32_0x2c = self._io.read_u4be()
self._debug['u32_0x2c']['end'] = self._io.pos()
self._debug['u32_0x30']['start'] = self._io.pos()
self.u32_0x30 = self._io.read_u4be()
self._debug['u32_0x30']['end'] = self._io.pos()
class Cameo(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
pass
class PlatConstantSpin(KaitaiStruct):
SEQ_FIELDS = ["axis", "initial_theta", "speed"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['axis']['start'] = self._io.pos()
self.axis = self._io.read_u2be()
self._debug['axis']['end'] = self._io.pos()
self._debug['initial_theta']['start'] = self._io.pos()
self.initial_theta = self._io.read_f4be()
self._debug['initial_theta']['end'] = self._io.pos()
self._debug['speed']['start'] = self._io.pos()
self.speed = self._io.read_f4be()
self._debug['speed']['end'] = self._io.pos()
class VentDutyCycle(KaitaiStruct):
SEQ_FIELDS = ["frames_off", "frames_on"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['frames_off']['start'] = self._io.pos()
self.frames_off = self._io.read_s2be()
self._debug['frames_off']['end'] = self._io.pos()
self._debug['frames_on']['start'] = self._io.pos()
self.frames_on = self._io.read_s2be()
self._debug['frames_on']['end'] = self._io.pos()
class Plat0xc3(KaitaiStruct):
SEQ_FIELDS = ["u16_0x86", "u32_0x78_0x80", "u16_0x84"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u16_0x86']['start'] = self._io.pos()
self.u16_0x86 = self._io.read_u2be()
self._debug['u16_0x86']['end'] = self._io.pos()
self._debug['u32_0x78_0x80']['start'] = self._io.pos()
self.u32_0x78_0x80 = self._io.read_u2be()
self._debug['u32_0x78_0x80']['end'] = self._io.pos()
self._debug['u16_0x84']['start'] = self._io.pos()
self.u16_0x84 = self._io.read_u2be()
self._debug['u16_0x84']['end'] = self._io.pos()
class EndLevelData(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
pass
class SetObjectSparkle(KaitaiStruct):
SEQ_FIELDS = ["period"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['period']['start'] = self._io.pos()
self.period = self._io.read_u2be()
self._debug['period']['end'] = self._io.pos()
class PlatFan0x8a(KaitaiStruct):
SEQ_FIELDS = ["u16_0x0c", "u32_0x48", "u32_0x4c", "u32_0x50", "u32_0x10", "u32_0x14", "u32_0x18", "u32_0x1c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u16_0x0c']['start'] = self._io.pos()
self.u16_0x0c = self._io.read_u2be()
self._debug['u16_0x0c']['end'] = self._io.pos()
self._debug['u32_0x48']['start'] = self._io.pos()
self.u32_0x48 = self._io.read_u4be()
self._debug['u32_0x48']['end'] = self._io.pos()
self._debug['u32_0x4c']['start'] = self._io.pos()
self.u32_0x4c = self._io.read_u4be()
self._debug['u32_0x4c']['end'] = self._io.pos()
self._debug['u32_0x50']['start'] = self._io.pos()
self.u32_0x50 = self._io.read_u4be()
self._debug['u32_0x50']['end'] = self._io.pos()
self._debug['u32_0x10']['start'] = self._io.pos()
self.u32_0x10 = self._io.read_u4be()
self._debug['u32_0x10']['end'] = self._io.pos()
self._debug['u32_0x14']['start'] = self._io.pos()
self.u32_0x14 = self._io.read_u4be()
self._debug['u32_0x14']['end'] = self._io.pos()
self._debug['u32_0x18']['start'] = self._io.pos()
self.u32_0x18 = self._io.read_u4be()
self._debug['u32_0x18']['end'] = self._io.pos()
self._debug['u32_0x1c']['start'] = self._io.pos()
self.u32_0x1c = self._io.read_u4be()
self._debug['u32_0x1c']['end'] = self._io.pos()
class PlatSpinSound0xc5(KaitaiStruct):
SEQ_FIELDS = ["sound_id", "volume", "pitch"]
def __init__(self, _io, _parent=None, _root=None):
self._io | |
= -vertices[:, 1]
triangles = np.array(self.car_model_dict[car_name]['faces']) - 1
translation = np.array(translations[gt_car_idx])
Rt = np.eye(4)
Rt[:3, 3] = translation
# project 3D points to 2d image plane
# Apollo below is correct
# https://en.wikipedia.org/wiki/Euler_angles
# Y, P, R = euler_to_Rot_YPR(eular_angle[1], eular_angle[0], eular_angle[2])
rot_mat = euler_to_Rot(eular_angle[0], eular_angle[1], eular_angle[2]).T
# check eular from rot mat
Rt[:3, :3] = rot_mat
Rt = Rt[:3, :]
P = np.ones((vertices.shape[0], vertices.shape[1] + 1))
P[:, :-1] = vertices
P = P.T
img_cor_points = np.dot(self.camera_matrix, np.dot(Rt, P))
img_cor_points = img_cor_points.T
img_cor_points[:, 0] /= img_cor_points[:, 2]
img_cor_points[:, 1] /= img_cor_points[:, 2]
x1, y1, x2, y2 = img_cor_points[:, 0].min(), img_cor_points[:, 1].min(), img_cor_points[:,
0].max(), img_cor_points[:,
1].max()
bboxes.append([x1, y1, x2, y2])
# project 3D points to 2d image plane
mask_seg = np.zeros(image.shape, dtype=np.uint8)
for t in triangles:
coord = np.array([img_cor_points[t[0]][:2], img_cor_points[t[1]][:2], img_cor_points[t[2]][:2]],
dtype=np.int32)
# This will draw the mask for segmenation
# cv2.drawContours(mask_seg, np.int32([coord]), 0, (255, 255, 255), -1)
cv2.polylines(mask_seg, np.int32([coord]), 1, (0, 255, 0))
mask_all += mask_seg
mask_all = mask_all * 255 / mask_all.max()
cv2.addWeighted(image.astype(np.uint8), 1.0, mask_all.astype(np.uint8), alpha, 0, merged_image)
im_write_file = os.path.join(draw_dir, img_name.split('/')[-1])
print("Writing image to: %s" % os.path.join(draw_dir, img_name.split('/')[-1]))
imwrite(merged_image, im_write_file)
return True
def plot_and_examine(self, annotations, draw_dir='/data/Kaggle/wudi_data/train_image_gt_vis'):
# for ann in tqdm(annotations):
# for ann in tqdm(annotations[5000: 5010]):
for ann in tqdm(annotations):
img_name = ann['filename']
image = imread(img_name)
mask_all = np.zeros(image.shape)
merged_image = image.copy()
alpha = 0.9 # transparency
bboxes = ann['bboxes']
labels = ann['labels']
eular_angles = ann['eular_angles']
quaternion_semispheres = ann['quaternion_semispheres']
translations = ann['translations']
assert len(bboxes) == len(labels) == len(eular_angles) == len(quaternion_semispheres) == len(translations)
for gt_car_idx in range(len(ann['quaternion_semispheres'])):
eular_angle = np.array(eular_angles[gt_car_idx])
# if 'Camera' in img_name: # this is an apolloscape dataset
# eular_angle_kaggle = np.array([eular_angle[1], eular_angle[0], eular_angle[2]])
# elif 'ID' in img_name:
# eular_angle_kaggle = eular_angle
# else:
# print("Unidentified class")
quaternion = euler_angles_to_quaternions(eular_angle)
quaternion_semisphere = quaternion_upper_hemispher(quaternion)
ea_make = quaternion_to_euler_angle(quaternion_semisphere)
json_q = quaternion_semispheres[gt_car_idx]
ea_json = quaternion_to_euler_angle(json_q)
ea_json = np.array(ea_json)
# q1 = R.from_euler('xyz', eular_angle)
# q2 = R.from_euler('xyz', q)
# print('GT eular angle: ', eular_angle)
# print('Generate eular angle:', ea_make)
# print('Json generated eular angle', ea_json)
# print('Generate q:', quaternion_semisphere)
# print('Json q:', json_q)
# print("diff is: %f" % np.sum(np.abs(ea_json-ea_make)))
if self.RotationDistance(ea_make, ea_json) > 0.01:
print('Wrong!!!!!!!!!!!!!')
# rendering the car according to:
# Augmented Reality | Kaggle
# car_id2name is from:
# https://github.com/ApolloScapeAuto/dataset-api/blob/master/car_instance/car_models.py
car_name = car_id2name[labels[gt_car_idx]].name
vertices = np.array(self.car_model_dict[car_name]['vertices'])
vertices[:, 1] = -vertices[:, 1]
triangles = np.array(self.car_model_dict[car_name]['faces']) - 1
translation = np.array(translations[gt_car_idx])
Rt = np.eye(4)
Rt[:3, 3] = translation
# project 3D points to 2d image plane
# Apollo below is correct
# https://en.wikipedia.org/wiki/Euler_angles
# Y, P, R = euler_to_Rot_YPR(eular_angle[1], eular_angle[0], eular_angle[2])
rot_mat = euler_to_Rot(-eular_angle[1], -eular_angle[0], -eular_angle[2]).T
# check eular from rot mat
Rt[:3, :3] = rot_mat
Rt = Rt[:3, :]
P = np.ones((vertices.shape[0], vertices.shape[1] + 1))
P[:, :-1] = vertices
P = P.T
img_cor_points = np.dot(self.camera_matrix, np.dot(Rt, P))
img_cor_points = img_cor_points.T
img_cor_points[:, 0] /= img_cor_points[:, 2]
img_cor_points[:, 1] /= img_cor_points[:, 2]
x1, y1, x2, y2 = img_cor_points[:, 0].min(), img_cor_points[:, 1].min(), img_cor_points[:,
0].max(), img_cor_points[:,
1].max()
bboxes.append([x1, y1, x2, y2])
# project 3D points to 2d image plane
mask_seg = np.zeros(image.shape, dtype=np.uint8)
for t in triangles:
coord = np.array([img_cor_points[t[0]][:2], img_cor_points[t[1]][:2], img_cor_points[t[2]][:2]],
dtype=np.int32)
# This will draw the mask for segmenation
# cv2.drawContours(mask_seg, np.int32([coord]), 0, (255, 255, 255), -1)
cv2.polylines(mask_seg, np.int32([coord]), 1, (0, 255, 0))
mask_all += mask_seg
mask_all = mask_all * 255 / mask_all.max()
cv2.addWeighted(image.astype(np.uint8), 1.0, mask_all.astype(np.uint8), alpha, 0, merged_image)
im_write_file = os.path.join(draw_dir, img_name.split('/')[-1])
print("Writing image to: %s" % os.path.join(draw_dir, img_name.split('/')[-1]))
imwrite(merged_image, im_write_file)
return True
def visualise_pred_postprocessing(self, outputs, args):
car_cls_coco = 2
for idx in tqdm(range(len(outputs))):
# ann = self.annotations[idx]
test_folder = '/data/home/yyj/code/kaggle/new_code/Kaggle_PKU_Baidu/data/pku_data/test_images/'
img_name = os.path.join(test_folder, os.path.basename(outputs[idx][2]['file_name']))
if not os.path.isfile(img_name):
assert "Image file does not exist!"
else:
image = imread(img_name)
output = outputs[idx]
# output is a tuple of three elements
bboxes, segms, six_dof = output[0], output[1], output[2]
car_cls_score_pred = six_dof['car_cls_score_pred']
quaternion_pred = six_dof['quaternion_pred']
trans_pred_world = six_dof['trans_pred_world'].copy()
euler_angle = np.array([quaternion_to_euler_angle(x) for x in quaternion_pred])
car_labels = np.argmax(car_cls_score_pred, axis=1)
kaggle_car_labels = [self.unique_car_mode[x] for x in car_labels]
car_names = np.array([car_id2name[x].name for x in kaggle_car_labels])
assert len(bboxes[car_cls_coco]) == len(segms[car_cls_coco]) == len(kaggle_car_labels) \
== len(trans_pred_world) == len(euler_angle) == len(car_names)
# print('change ',trans_pred_world,trans_pred_world_refined)
quaternion_semisphere_refined, flag = refine_yaw_and_roll(image, bboxes[car_cls_coco],
segms[car_cls_coco], car_names, euler_angle,
quaternion_pred, trans_pred_world,
self.car_model_dict,
self.camera_matrix)
if flag:
output[2]['quaternion_pred'] = quaternion_semisphere_refined
euler_angle = np.array([quaternion_to_euler_angle(x) for x in output[2]['quaternion_pred']])
trans_pred_world_refined = restore_x_y_from_z_withIOU(image, bboxes[car_cls_coco], segms[car_cls_coco],
car_names, euler_angle, trans_pred_world,
self.car_model_dict,
self.camera_matrix)
output[2]['trans_pred_world'] = trans_pred_world_refined
# img_box_mesh_refined = self.visualise_box_mesh(image,bboxes[car_cls_coco], segms[car_cls_coco],car_names, euler_angle,trans_pred_world_refined)
# img_box_mesh_refined, iou_flag = self.visualise_box_mesh(image,bboxes[car_cls_coco], segms[car_cls_coco],car_names, euler_angle,trans_pred_world)
# if iou_flag:
# print('iou problem',os.path.basename(img_name))
# img_kaggle = self.visualise_kaggle(image, coords)
# img_mesh = self.visualise_mesh(image, bboxes[car_cls_coco], segms[car_cls_coco], car_names, euler_angle,
# trans_pred_world)
# imwrite(img_kaggle, os.path.join(args.out[:-4] + '_kaggle_vis/' + img_name.split('/')[-1]))
# imwrite(img_mesh, os.path.join(args.out[:-4] + '_mes_vis/' + img_name.split('/')[-1]))
# img_box_mesh_half = cv2.resize(img_box_mesh,None,fx=0.5,fy=0.5)
# img_kaggle_half = cv2.resize(img_kaggle,None,fx=0.5,fy=0.5)
# img_concat = np.concatenate([img_kaggle_half,img_box_mesh_half],axis=1)
# imwrite(img_concat, os.path.join(args.out[:-4] + '_mes_box_vis/' + img_name.split('/')[-1]))
# imwrite(img_box_mesh, os.path.join(args.out[:-4] + '_mes_box_vis_10_0.7/' + img_name.split('/')[-1]))
# imwrite(img_box_mesh_refined, os.path.join(args.out[:-4] + '_mes_box_vis_10_0.7_IOU=0/' + img_name.split('/')[-1])[:-4]+'_refined.jpg')
return outputs
def distributed_visualise_pred_merge_postprocessing(self, img_id, outputs, args, vote=2, tmp_dir="./results/",
draw_flag=False):
car_cls_coco = 2
bboxes_list = []
segms_list = []
six_dof_list = []
bboxes_with_IOU_list = []
bboxes_merge = outputs[0][img_id][0].copy()
segms_merge = outputs[0][img_id][1].copy()
six_dof_merge = outputs[0][img_id][2].copy()
last_name = ""
for i, output in enumerate(outputs):
a = output[img_id]
file_name = os.path.basename(a[2]['file_name'])
if last_name != "" and file_name != last_name:
assert "Image error!"
last_name = file_name
img_name = os.path.join(self.img_prefix, file_name)
if not os.path.isfile(img_name):
assert "Image file does not exist!"
image = imread(img_name)
bboxes, segms, six_dof = a[0], a[1], a[2]
bboxes_list.append(bboxes)
segms_list.append(segms)
six_dof_list.append(six_dof)
bboxes_with_IOU = get_IOU(image, bboxes[car_cls_coco], segms[car_cls_coco], six_dof,
car_id2name, self.car_model_dict, self.unique_car_mode, self.camera_matrix)
new_bboxes_with_IOU = np.zeros((bboxes_with_IOU.shape[0], bboxes_with_IOU.shape[1] + 1))
for bbox_idx in range(bboxes_with_IOU.shape[0]):
new_bboxes_with_IOU[bbox_idx] = np.append(bboxes_with_IOU[bbox_idx], float(i))
bboxes_with_IOU_list.append(new_bboxes_with_IOU)
bboxes_with_IOU = np.concatenate(bboxes_with_IOU_list, axis=0)
inds = nms_with_IOU_and_vote(bboxes_with_IOU, vote=vote) ## IOU nms filter out processing return output indices
inds = np.array(inds)
inds_list = []
start = 0
for bboxes_iou in bboxes_with_IOU_list:
end = bboxes_iou.shape[0] + start
i = np.where((inds >= start) & (inds < end))
if i:
inds_current = inds[i] - start
else:
inds_current = []
inds_list.append(inds_current)
start = end
bboxes_merge_concat = []
segms_merge_concat = []
car_cls_score_pred_concat = []
quaternion_pred_concat = []
trans_pred_world_concat = []
for ids, bboxes, segms, six_dof in zip(inds_list, bboxes_list, segms_list, six_dof_list):
bboxes_merge_concat.append(bboxes[car_cls_coco][ids])
segms_merge_concat.append(np.array(segms[car_cls_coco])[ids])
car_cls_score_pred_concat.append(six_dof['car_cls_score_pred'][ids])
quaternion_pred_concat.append(six_dof['quaternion_pred'][ids])
trans_pred_world_concat.append(six_dof['trans_pred_world'][ids])
bboxes_merge[car_cls_coco] = np.concatenate(bboxes_merge_concat, axis=0)
segms_merge[car_cls_coco] = np.concatenate(segms_merge_concat, axis=0)
six_dof_merge['car_cls_score_pred'] = np.concatenate(car_cls_score_pred_concat, axis=0)
six_dof_merge['quaternion_pred'] = np.concatenate(quaternion_pred_concat, axis=0)
six_dof_merge['trans_pred_world'] = np.concatenate(trans_pred_world_concat, axis=0)
output_model_merge = (bboxes_merge, segms_merge, six_dof_merge)
if draw_flag:
car_cls_score_pred = six_dof_merge['car_cls_score_pred']
quaternion_pred = six_dof_merge['quaternion_pred']
trans_pred_world = six_dof_merge['trans_pred_world'].copy()
euler_angle = np.array([quaternion_to_euler_angle(x) for x in quaternion_pred])
car_labels = np.argmax(car_cls_score_pred, axis=1)
kaggle_car_labels = [self.unique_car_mode[x] for x in car_labels]
car_names = np.array([car_id2name[x].name for x in kaggle_car_labels])
# img_box_mesh_refined = self.visualise_box_mesh(image,bboxes[car_cls_coco], segms[car_cls_coco],car_names, euler_angle,trans_pred_world_refined)
img_box_mesh_refined, iou_flag = self.visualise_box_mesh(image, bboxes_merge[car_cls_coco],
segms_merge[car_cls_coco], car_names,
euler_angle, trans_pred_world)
imwrite(img_box_mesh_refined,
os.path.join(args.out[:-4] + '_mes_box_vis_merged/' + img_name.split('/')[-1])[
:-4] + '_merged.jpg')
tmp_file = os.path.join(tmp_dir, "{}.pkl".format(last_name[:-4]))
mmcv.dump(output_model_merge, tmp_file)
return output_model_merge
def distributed_visualise_pred_merge_postprocessing_weight_merge(self, img_id, outputs, args, vote=0,
tmp_dir="./results/", draw_flag=False):
car_cls_coco = 2
bboxes_list = []
segms_list = []
six_dof_list = []
bboxes_with_IOU_list = []
bboxes_merge = outputs[0][img_id][0].copy()
segms_merge = outputs[0][img_id][1].copy()
six_dof_merge = outputs[0][img_id][2].copy()
last_name = ""
if vote == 0:
vote = len(outputs)
for i, output in enumerate(outputs):
a = output[img_id]
file_name = os.path.basename(a[2]['file_name'])
if last_name != "" and file_name != last_name:
assert "Image error!"
last_name = file_name
img_name = os.path.join(self.img_prefix, file_name)
if not os.path.isfile(img_name):
assert "Image file does not exist!"
image = imread(img_name)
bboxes, segms, six_dof = a[0], a[1], a[2]
bboxes_list.append(bboxes)
segms_list.append(segms)
six_dof_list.append(six_dof)
bboxes_with_IOU = get_IOU(image, bboxes[car_cls_coco], segms[car_cls_coco], six_dof,
car_id2name, self.car_model_dict, self.unique_car_mode, self.camera_matrix)
new_bboxes_with_IOU = np.zeros((bboxes_with_IOU.shape[0], bboxes_with_IOU.shape[1] + 1))
for bbox_idx in range(bboxes_with_IOU.shape[0]):
new_bboxes_with_IOU[bbox_idx] = np.append(bboxes_with_IOU[bbox_idx], float(i))
bboxes_with_IOU_list.append(new_bboxes_with_IOU)
bboxes_with_IOU = np.concatenate(bboxes_with_IOU_list, axis=0)
inds_index = nms_with_IOU_and_vote_return_index(bboxes_with_IOU, vote=vote) ## IOU nms filter out processing return output indices
inds = np.array(list(inds_index.keys()))
trans_pred_world = np.concatenate([sd['trans_pred_world'] for sd in six_dof_list], axis=0)
# Now we weighted average of the translation
for ii in inds_index:
weight = bboxes_with_IOU[:, 5][inds_index[ii]] / np.sum(bboxes_with_IOU[:, 5][inds_index[ii]])
trans_pred_world[ii] = np.sum(trans_pred_world[inds_index[ii]] * np.expand_dims(weight, axis=1), axis=0)
inds_list = []
start = 0
for bi in range(len(bboxes_with_IOU_list)):
bboxes_iou = bboxes_with_IOU_list[bi]
end = bboxes_iou.shape[0] + start
i = np.where((inds >= start) & (inds < end))
if i:
inds_i = inds[i] - start
else:
inds_i = | |
parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `insert_email_postcard`") # noqa: E501
# verify the required parameter 'email_commseq_postcard' is set
if ('email_commseq_postcard' not in params or
params['email_commseq_postcard'] is None):
raise ValueError("Missing the required parameter `email_commseq_postcard` when calling `insert_email_postcard`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'email_commseq_postcard' in params:
body_params = params['email_commseq_postcard']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/postcards', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailCommseqPostcardResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def insert_email_segment(self, storefront_oid, email_segment, **kwargs): # noqa: E501
"""Insert email segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_segment(storefront_oid, email_segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailSegment email_segment: Email segment (required)
:return: EmailSegmentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_email_segment_with_http_info(storefront_oid, email_segment, **kwargs) # noqa: E501
else:
(data) = self.insert_email_segment_with_http_info(storefront_oid, email_segment, **kwargs) # noqa: E501
return data
def insert_email_segment_with_http_info(self, storefront_oid, email_segment, **kwargs): # noqa: E501
"""Insert email segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_segment_with_http_info(storefront_oid, email_segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailSegment email_segment: Email segment (required)
:return: EmailSegmentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_segment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_email_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `insert_email_segment`") # noqa: E501
# verify the required parameter 'email_segment' is set
if ('email_segment' not in params or
params['email_segment'] is None):
raise ValueError("Missing the required parameter `email_segment` when calling `insert_email_segment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'email_segment' in params:
body_params = params['email_segment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/segments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailSegmentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def insert_screen_recording_segment(self, storefront_oid, segment, **kwargs): # noqa: E501
"""Insert screen recording segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_screen_recording_segment(storefront_oid, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param ScreenRecordingSegment segment: Segment (required)
:return: ScreenRecordingSegmentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_screen_recording_segment_with_http_info(storefront_oid, segment, **kwargs) # noqa: E501
else:
(data) = self.insert_screen_recording_segment_with_http_info(storefront_oid, segment, **kwargs) # noqa: E501
return data
def insert_screen_recording_segment_with_http_info(self, storefront_oid, segment, **kwargs): # noqa: E501
"""Insert screen recording segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_screen_recording_segment_with_http_info(storefront_oid, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param ScreenRecordingSegment segment: Segment (required)
:return: ScreenRecordingSegmentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'segment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_screen_recording_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `insert_screen_recording_segment`") # noqa: E501
# verify the required parameter 'segment' is set
if ('segment' not in params or
params['segment'] is None):
raise ValueError("Missing the required parameter `segment` when calling `insert_screen_recording_segment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'segment' in params:
body_params = params['segment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/screen_recordings/segments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScreenRecordingSegmentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def prepare_download_email_segment(self, storefront_oid, email_segment_uuid, **kwargs): # noqa: E501
"""Prepare download of email segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.prepare_download_email_segment(storefront_oid, email_segment_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_segment_uuid: (required)
:return: EmailSegmentDownloadPrepareResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.prepare_download_email_segment_with_http_info(storefront_oid, email_segment_uuid, **kwargs) # noqa: E501
else:
(data) = self.prepare_download_email_segment_with_http_info(storefront_oid, email_segment_uuid, **kwargs) # noqa: E501
return data
def prepare_download_email_segment_with_http_info(self, storefront_oid, email_segment_uuid, **kwargs): # noqa: E501
"""Prepare download of email segment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.prepare_download_email_segment_with_http_info(storefront_oid, email_segment_uuid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param str email_segment_uuid: (required)
:return: EmailSegmentDownloadPrepareResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_segment_uuid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method prepare_download_email_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `prepare_download_email_segment`") # noqa: E501
# verify the required parameter 'email_segment_uuid' is set
if ('email_segment_uuid' not in params or
params['email_segment_uuid'] is None):
raise ValueError("Missing the required parameter `email_segment_uuid` when calling `prepare_download_email_segment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
if 'email_segment_uuid' in params:
path_params['email_segment_uuid'] = params['email_segment_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/segments/{email_segment_uuid}/downloadPrepare', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailSegmentDownloadPrepareResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def publish_library_item(self, library_item_oid, publish_library_request, **kwargs): # noqa: E501
"""Publish library item. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.publish_library_item(library_item_oid, publish_library_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int library_item_oid: (required)
:param PublishLibraryItemRequest publish_library_request: Publish library item request (required)
:return: LibraryItemResponse
If | |
mask_y = mask_y_branch
else:
mask_y = torch.cat([mask_y, mask_y_branch], 1)
return self.model(mask_y)
class MultiCNNMaskRandomBGBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, branch_num, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(MultiCNNMaskRandomBGBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
mask_models = []
for i in range(0, branch_num):
equalconv = nn.Conv2d(input_nc, inner_nc, kernel_size=7 + 2 * i,
stride=1, padding=3 + i, bias=use_bias)
# equalconv = nn.Conv2d(input_nc, inner_nc, kernel_size=3 + 2 * i,
# stride=1, padding=1 + i, bias=use_bias)
equalrelu = nn.LeakyReLU(0.2, True)
equalnorm = norm_layer(inner_nc)
mask_model = [equalconv, equalnorm, equalrelu]
mask_models.append(nn.Sequential(*mask_model))
self.mask_models = nn.ModuleList(mask_models)
shrinkconv = nn.Conv2d(inner_nc * branch_num, branch_num, kernel_size=1,
stride=1, padding=0, bias=use_bias)
shrinkrelu = nn.LeakyReLU(0.2, True)
shrinknorm = norm_layer(branch_num)
disperseconv = nn.Conv2d(branch_num, branch_num, kernel_size=11,
stride=1, padding=5, bias=use_bias)
disperserelu = nn.LeakyReLU(0.2, True)
dispersenorm = norm_layer(branch_num)
shrinkpart = [shrinkconv, shrinknorm, shrinkrelu]
dispersepart = [disperseconv, dispersenorm, disperserelu]
self.shrinkpart = nn.Sequential(*shrinkpart)
self.dispersepart = nn.Sequential(*dispersepart)
model = [submodule]
self.model = nn.Sequential(*model)
def forward(self, x, mask, random_bg):
mask_y = None
for mask_model in self.mask_models:
mask_y_branch = mask_model(x) * mask
if mask_y is None:
mask_y = mask_y_branch
else:
mask_y = torch.cat([mask_y, mask_y_branch], 1)
mask_y = mask_y + random_bg * (1 - mask)
processed_y = self.shrinkpart(mask_y)
processed_y = self.random_move_controlling_stick(processed_y, mask)
processed_y = self.dispersepart(processed_y)
return self.model(processed_y)
def random_move_controlling_stick(self, processed_y, mask):
controlling_stick_gap = 5
cut_width = 50
upper_bound = processed_y.shape[3] - 1 - cut_width
src_pos_x = random.randint(0, upper_bound)
src_pos_x = src_pos_x - src_pos_x % controlling_stick_gap
src_pos_y = random.randint(0, upper_bound)
tag_pos_x = random.randint(0, upper_bound - int(controlling_stick_gap / 2))
tag_pos_x = tag_pos_x - tag_pos_x % controlling_stick_gap + int(controlling_stick_gap / 2)
tag_pos_y = random.randint(0, upper_bound)
ret_y = processed_y.clone()
ret_y[:, :, tag_pos_y: tag_pos_y + cut_width, tag_pos_x: tag_pos_x + cut_width] += \
processed_y[:, :, src_pos_y: src_pos_y + cut_width, src_pos_x: src_pos_x + cut_width]
return ret_y
class MaskCollectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, branch_num, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(MaskCollectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
mask_models = []
for i in range(0, branch_num):
equalconv = nn.Conv2d(input_nc, inner_nc, kernel_size=7 + 2 * i,
stride=1, padding=3 + i, bias=use_bias)
# equalconv = nn.Conv2d(input_nc, inner_nc, kernel_size=3 + 2 * i,
# stride=1, padding=1 + i, bias=use_bias)
equalrelu = nn.LeakyReLU(0.2, True)
equalnorm = norm_layer(inner_nc)
mask_model = [equalconv, equalnorm, equalrelu]
mask_models.append(nn.Sequential(*mask_model))
self.mask_models = nn.ModuleList(mask_models)
shrinkconv = nn.Conv2d(inner_nc * branch_num, outer_nc, kernel_size=1,
stride=1, padding=0, bias=use_bias)
shrinkrelu = nn.LeakyReLU(0.2, True)
shrinknorm = norm_layer(outer_nc)
shrinkpart = [shrinkconv, shrinknorm, shrinkrelu]
self.shrinkpart = nn.Sequential(*shrinkpart)
def forward(self, x, mask, random_bg):
mask_y = None
for mask_model in self.mask_models:
mask_y_branch = mask_model(x) * mask
if mask_y is None:
mask_y = mask_y_branch
else:
mask_y = torch.cat([mask_y, mask_y_branch], 1)
mask_y = mask_y + random_bg * (1 - mask)
processed_y = self.shrinkpart(mask_y)
return processed_y
class DisperseBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, branch_num, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(DisperseBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
disperseconv = nn.Conv2d(input_nc, branch_num, kernel_size=11,
stride=1, padding=5, bias=use_bias)
disperserelu = nn.LeakyReLU(0.2, True)
dispersenorm = norm_layer(branch_num)
dispersepart = [disperseconv, dispersenorm, disperserelu]
self.dispersepart = nn.Sequential(*dispersepart)
model = [submodule]
self.model = nn.Sequential(*model)
def forward(self, x):
processed_y = self.dispersepart(x)
return self.model(processed_y)
class UnetSkipConnectionInnerRandomBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, inner_ap_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
inner_ap_nc -- the number of channels in inner append vector
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionInnerRandomBlock, self).__init__()
self.outermost = outermost
self.innermost = innermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model_down = down
model_sub = submodule
model_up = up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc + inner_ap_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model_down = down
model_sub = None
model_up = up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model_down = down
model_sub = submodule
model_up = up + [nn.Dropout(0.5)]
else:
model_down = down
model_sub = submodule
model_up = up
self.model_down = nn.Sequential(*model_down)
if model_sub is not None:
self.model_sub = model_sub
self.model_up = nn.Sequential(*model_up)
def forward(self, x, inner_ap):
if self.outermost:
down_out = self.model_down(x)
sub_out = self.model_sub(down_out, inner_ap)
return self.model_up(sub_out)
elif self.innermost:
down_out = self.model_down(x)
sub_out = torch.cat([down_out, inner_ap], 1)
return torch.cat([x, self.model_up(sub_out)], 1)
else: # add skip connections
down_out = self.model_down(x)
sub_out = self.model_sub(down_out, inner_ap)
return torch.cat([x, self.model_up(sub_out)], 1)
class DownsamplingResnetBranchGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from <NAME>'s neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', n_downsampling=2):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(DownsamplingResnetBranchGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
| |
cfab_cmd('interface', 'delete', ports=self.ports) \
+ cfab_cmd('lag', 'add', lag_id='3') \
+ cfab_cmd('ifgroup', 'add', ifg='1', lag_id='3', lag=True) \
+ cfab_cmd('interface', 'add', lag_id='3',
ports=self.ports, lag=True) \
+ cfab_cmd('vlan', 'add', ifg='1')
self.assert_configured(expect)
def test_already_configured_lag_and_vlan(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
interface 1/1/0/1
type linkaggregation 1
exit
interface 1/1/0/2
type linkaggregation 1
exit
linkaggregation 1 1 type endpoint
linkaggregation 1 1 mode active
linkaggregation 1 1 cfab port-mode external
vfab 1 vlan 8 endpoint untag 0
"""
self.driver.setup_vlan_with_lag("a", "u", "p", "1", 8,
self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete', ports=self.ports) \
+ cfab_cmd('vlan', 'delete') \
+ cfab_cmd('lag', 'delete') \
+ cfab_cmd('lag', 'add', lag_id='2') \
+ cfab_cmd('ifgroup', 'add', ifg='1', lag_id='2', lag=True) \
+ cfab_cmd('interface', 'add', ports=self.ports,
lag_id='2', lag=True) \
+ cfab_cmd('vlan', 'add', ifg='1')
self.assert_configured(expect)
class TestCFABdriverClearVlan(BaseTestCFABdriver):
"""Test Fujitsu C-Fabric mechanism driver for VLAN configuration."""
def setUp(self):
cfg.CONF.set_override('share_pprofile', True, "fujitsu_cfab")
cfg.CONF.set_override('pprofile_prefix', "test-", "fujitsu_cfab")
super(TestCFABdriverClearVlan, self).setUp()
def test_raises(self):
mgr = self.driver.mgr
cfab = self.driver
for er in [EOFError, EnvironmentError, OSError, select.error]:
mgr.get_candidate_config.side_effect = er
self.assertRaises(er,
cfab.clear_vlan, 'a', 'u', 'p', '1', 8,
self.ports, self.mac)
self.assertEqual(4, mgr.close_session.call_count)
def test_ifgroup_ether_is_exhauted(self):
mgr = self.driver.mgr
candidate = ""
for i in range(0, 4096):
candidate += 'ifgroup {if_id} ether 1/1/0/{port}\n'.format(
if_id=i, port=(i + 1))
mgr.get_candidate_config.return_value = candidate
ret = self.driver.clear_vlan("a", "u", "p", "1", 8,
self.ports, self.mac)
self.assertIsNone(ret)
def test_clear_with_no_command(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
interface 1/1/0/1
type endponit
cfab port-mode external
lldp mode enable
exit
vfab 1 vlan 8 endpoint untag 0
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('vlan', 'delete') + cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_clear_vlan_and_assoc(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
interface 1/1/0/1
type endponit
cfab port-mode external
lldp mode enable
exit
pprofile test-1 vlan tag 8
vfab 1 vlan 8 endpoint untag 0
vfab 1 pprofile 0 vsiid mac 00:01:02:03:04:05 test-1
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('pp_assoc', 'delete') \
+ cfab_cmd('vlan', 'delete') \
+ cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_eliminate_own_definition(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
ifgroup 1 ether 1/1/0/2
interface 1/1/0/1
type endponit
cfab port-mode external
lldp mode enable
exit
interface 1/1/0/2
type endponit
cfab port-mode external
lldp mode enable
exit
vfab 1 vlan 8 endpoint untag 0,1
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('vlan', 'replace', ifg='1') \
+ cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_eliminate_own_definition_from_boundary(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/2
ifgroup 1 ether 1/1/0/3
ifgroup 2 ether 1/1/0/4
ifgroup 3 ether 1/1/0/1
ifgroup 4 ether 1/1/0/5
ifgroup 5 ether 1/1/0/6
interface 1/1/0/1
type endponit
cfab port-mode external
lldp mode enable
exit
interface 1/1/0/2
type endponit
cfab port-mode external
lldp mode enable
exit
vfab 1 vlan 8 endpoint untag 0-5
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('vlan', 'replace', ifg='0-2,4-5') \
+ cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_already_cleared_vlan(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
interface 1/1/0/1
exit
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_already_cleared_vlan_without_interface(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
interface 1/1/0/1
type endponit
cfab port-mode external
lldp mode enable
exit
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_already_cleared_all_definitions(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
interface 1/1/0/1
exit
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_exists_different_vlan(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
interface 1/1/0/1
cfab port-mode external
type endpoint
exit
vfab 1 vlan 100 endpoint untag 0
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('vlan', 'delete', vlanid=100) \
+ cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_exists_different_vlans(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
interface 1/1/0/1
cfab port-mode external
type endpoint
exit
vfab 1 vlan 100 endpoint untag 0
vfab 1 vlan 200 endpoint untag 0
vfab 1 vlan 300 endpoint untag 0
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('vlan', 'delete', vlanid=100) \
+ cfab_cmd('vlan', 'delete', vlanid=200) \
+ cfab_cmd('vlan', 'delete', vlanid=300) \
+ cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_exists_different_vlan_with_range(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1
ifgroup 1 ether 1/1/0/2
interface 1/1/0/1
cfab port-mode external
type endpoint
exit
vfab 1 vlan 100 endpoint untag 0-1
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('vlan', 'replace', vlanid=100, ifg=1) \
+ cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_exists_lag(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
interface 1/1/0/1
type linkaggregation 1
exit
linkaggregation 1 1 mode active
linkaggregation 1 1 cfab port-mode external
linkaggregation 1 1 type endpoint
vfab 1 vlan 8 endpoint untag 0
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_exists_lag_without_vlan(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
interface 1/1/0/1
type linkaggregation 1
exit
linkaggregation 1 1 mode active
linkaggregation 1 1 cfab port-mode external
linkaggregation 1 1 type endpoint
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_exists_lag_without_interface(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
interface 1/1/0/1
exit
linkaggregation 1 1 mode active
linkaggregation 1 1 cfab port-mode external
linkaggregation 1 1 type endpoint
vfab 1 vlan 8 endpoint untag 0
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete')
self.assert_configured(expect)
def test_illegal_exists_port_range(self):
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 ether 1/1/0/1,1/1/0/2
interface 1/1/0/1
cfab port-mode external
type endpoint
exit
interface 1/1/0/2
cfab port-mode external
type endpoint
exit
vfab 1 vlan 8 endpoint untag 0
"""
self.driver.clear_vlan("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete')
self.assert_configured(expect)
class TestCFABdriverClearVlanWithLAG(BaseTestCFABdriver):
"""Test Fujitsu C-Fabric mechanism driver for VLAN configuration."""
def setUp(self):
cfg.CONF.set_override('share_pprofile', True, "fujitsu_cfab")
cfg.CONF.set_override('pprofile_prefix', "test-", "fujitsu_cfab")
super(TestCFABdriverClearVlanWithLAG, self).setUp()
self.ports = "1/1/0/1,1/1/0/2"
def test_raises(self):
mgr = self.driver.mgr
cfab = self.driver
for er in [EOFError, EnvironmentError, OSError, select.error]:
mgr.get_candidate_config.side_effect = er
self.assertRaises(er,
cfab.clear_vlan_with_lag, 'a', 'u', 'p', '1', 8,
self.ports, self.mac)
self.assertEqual(4, mgr.close_session.call_count)
def test_ifgroup_ether_is_exhauted(self):
cfab = self.driver
mgr = self.driver.mgr
candidate = ""
for i in range(0, 4096):
candidate += 'ifgroup {if_id} ether 1/1/0/{port}\n'.format(
if_id=i, port=(i + 1))
mgr.get_candidate_config.return_value = candidate
ret = cfab.clear_vlan_with_lag("a", "u", "p", "1", 8,
self.ports, self.mac)
self.assertIsNone(ret)
def test_clear_with_no_command(self):
cfab = self.driver
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
interface 1/1/0/1
type linkaggregation 1
lldp mode enable
exit
interface 1/1/0/2
type linkaggregation 1
lldp mode enable
exit
linkaggregation 1 1 cfab port-mode external
linkaggregation 1 1 mode active
linkaggregation 1 1 type endpoint
vfab 1 vlan 8 endpoint untag 0
"""
cfab.clear_vlan_with_lag("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete', ports=self.ports) \
+ cfab_cmd('vlan', 'delete') + cfab_cmd('lag', 'delete')
self.assert_configured(expect)
def test_eliminate_own_definition(self):
cfab = self.driver
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
ifgroup 1 linkaggregation 1 2
interface 1/1/0/1
type linkaggregation 1
lldp mode enable
exit
interface 1/1/0/2
type linkaggregation 1
lldp mode enable
exit
interface 1/1/0/3
type linkaggregation 2
lldp mode enable
exit
interface 1/1/0/4
type linkaggregation 2
lldp mode enable
exit
linkaggregation 1 1 cfab port-mode external
linkaggregation 1 1 mode active
linkaggregation 1 1 type endpoint
linkaggregation 1 2 cfab port-mode external
linkaggregation 1 2 mode active
linkaggregation 1 2 type endpoint
vfab 1 vlan 8 endpoint untag 0,1
"""
cfab.clear_vlan_with_lag("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete', ports=self.ports) \
+ cfab_cmd('vlan', 'replace', ifg='1') \
+ cfab_cmd('lag', 'delete')
self.assert_configured(expect)
def test_already_cleared_only_interface(self):
cfab = self.driver
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
interface 1/1/0/1
lldp mode enable
exit
interface 1/1/0/2
lldp mode enable
exit
linkaggregation 1 1 cfab port-mode external
linkaggregation 1 1 mode active
linkaggregation 1 1 type endpoint
vfab 1 vlan 8 endpoint untag 0
"""
cfab.clear_vlan_with_lag("a", "u", "p", "1", 8, self.ports, self.mac)
mgr.connect.assert_called_once_with("a", "u", "p")
mgr.get_candidate_config.assert_called_once_with()
expect = cfab_cmd('interface', 'delete', ports=self.ports)
self.assert_configured(expect)
def test_already_cleared_only_vlan(self):
cfab = self.driver
mgr = self.driver.mgr
mgr.get_candidate_config.return_value = """
ifgroup 0 linkaggregation 1 1
interface 1/1/0/1
type linkaggregation 1
lldp mode enable
exit
interface 1/1/0/2
type linkaggregation 1
lldp mode enable
exit
linkaggregation 1 1 cfab port-mode external
linkaggregation 1 | |
minimum power
# #2 at the demand-charge power threshold
# #3 at the new demand rate and power threshold
# #4 at maximum power and demand rate
vertices = [Vertex(0, 0, 0), Vertex(0, 0, 0), Vertex(0, 0, 0), Vertex(0, 0, 0)]
# Evaluate the first of the four vertices
# Segment 1: First-order parameter a1.
# This could be stated directly from cost parameters, but this
# model allows for dynamic rates, accounts for losses, and models
# demand-charges, which would require defining multiple
# cost-parameter models. The first-order parameter is the
# electricity rate. In this model, the rate is meaningful at a
# neighbor node location at zero power transfer.
a1 = energy_rate # [$/kWh]
# Vertex 1: Full available power transfer at Vertex 1 is thus the
# physical transfer limit, minus losses.
vertices[0].power = (minimum_power - minimum_power_loss)
# Vertex 1: Marginal price of Vertex 1 is augmented by the value
# of energy from the neighbor that is lost. (This model assigns
# the cost of losses to the recipient (importer) of electricity.)
vertices[0].marginalPrice = a1 * (1 + self.object.lossFactor * minimum_power / maximum_power) # [$/kWh]
# Evalauate the second of four vertices
# Vertex 2: Available power at Vertex 2 is determined by the
# current peak demand charge threshold pdt and possibly scheduled
# powers prior to the indexed time interval. The demand threshold
# in the indexed time interval is at least equal to the
# parameter. NOTE this process will work only if the demand
# threshold is is updated based on actual, accurate measurements.
peak_demand_threshold = self.demandThreshold # [kW]
# Also consider, however, scheduled powers prior to the indexed
# interval that might have already set a new demand threshold.
# For simplicity, presume that new demand thresholds would occur
# only during HLH hour types. More complex code will be needed
# if only HLH hours must be considered. NOTE this process will
# work only if the load forcasts are meaningful and accurate.
# Gather scheduled powers sp
scheduled_powers = self.scheduledPowers
if len(scheduled_powers) == 0:
# Powers have been scheduled, order the scheduled powers by
# their start time
ordered_scheduled_powers = sorted(self.scheduledPowers, key=lambda x: x.timeInterval.startTime)
ordered_scheduled_powers = ordered_scheduled_powers[:i+1]
# The peak demand determinant is the greater of the monthly
# peak threshold or the prior scheduled powers.
ordered_scheduled_powers = [x.value for x in ordered_scheduled_powers]
ordered_scheduled_powers.append(peak_demand_threshold)
peak_demand_threshold = max(ordered_scheduled_powers) # kW
# Vertex 2: The power at which demand charges will begin accruing
# and therefore marks the start of Vertex 2. It is not affected
# by losses because it is based on local metering.
vertices[1].power = peak_demand_threshold # [avg.kW]
# Vertex 2: Marginal price of Vertex 2 is augmented by the value
# of energy from the neighbor that is lost.
vertices[1].marginalPrice = a1 * (1 + self.object.lossFactor * vertices[1].power / maximum_power) # [$/kWh]
# Evaluate the third of four vertices
# Look up the demand rate dr for the month_number. The second
# parameter is HLH = 1 (i.e., the first column of the table).
demand_rate = bpa_demand_rate[month_number-1][0] #bpa_demand_rate(month_number, 1) # [$/kW (per kWh)]
# Vertex 3: The power of Vertex 3 is the same as that of Vertex 2
vertices[2].power = peak_demand_threshold # [avg.kW]
# Vertex 3: The marginal price at Vertex 3 is shifted strongly by
# the demand response rate. The logic here is that cost is
# determined by rate * (power-threshold). Therefore, the
# effective marginal rate is augmented by the demand rate itself.
# NOTE: Some hand-waving is always needed to compare demand and
# energy rates. This approach assigns a meaningful production
# cost, but it is not correct to say it describes an energy
# price. The cost is assigned to the entire hour. Shorter time
# intervals should not be further incremented. Evenso, a huge
# discontinuity appears in the marginal price.
vertices[2].marginalPrice = vertices[2].marginalPrice + demand_rate # [$/kWh]
# Evaluate the fourth of four vertices
# Vertex 4: The power at Vertex 4 is the maximum power, minus losses
vertices[3].power = maximum_power - full_power_loss # [avg.kW]
# The marginal price at Vertex 4 is affected by both losses and
# demand charges.
# Marginal price at Vertex 3 from loss component
vertices[3].marginalPrice = a1 * (1 + self.object.lossFactor) # [$/kWh]
# Augment marginal price at Vertex 4 with demand-charge impact
vertices[3].marginalPrice = vertices[3].marginalPrice + demand_rate # [$/kW (per hour)]
# Assign production costs for the four vertices
# Segment 1: The second-order cost coefficient a2 on the first
# line segment is determined from the change in marginal price
# divided by change in power.
a2 = (vertices[1].marginalPrice - vertices[0].marginalPrice) # [$/kWh]
a2 = a2 / (vertices[1].power - vertices[0].power) # [$/kW^2h]
# Vertex 1: The cost at Vertex 1 can be inferred by integrating
# from p=0 to Vertex 1.
vertices[0].cost = a0 + a1 * vertices[0].power + 0.5 * a2 * (vertices[0].power) ** 2 # production cost [$/h]
# Vertex 2: The cost at Vertex 2 is on the same trajectory
vertices[1].cost = a0 + a1 * vertices[1].power + 0.5 * a2 * (vertices[1].power) ** 2 # production cost [$/h]
# Vertex 3: Both the power and production cost should be the same
# at Vertex 3 as for Vertex 2.
vertices[2].cost = vertices[1].cost # production cost [$/h]
# Vertex 4: The cost on the third line segment has a new
# trajectory that begins with the cost at Vertex 3 (an
# integration constant).
vertices[3].cost = vertices[2].cost # partial production cost [#/h]
# Segment 3: The new first-order term for the third line segment
# is the marginal price at Vertex 3. This applies only to power
# imports that exceed Vertex 3.
a1 = vertices[2].marginalPrice # first-order coefficient [$/kWh]
# Vertex 4: Add the first-order term to the Vertex-4 cost
vertices[3].cost = vertices[3].cost + a1 * (vertices[3].power - vertices[2].power) # partial production cost [$/h]
# Segment 3: NOTE: The second-order coeffiecient a2 on the second
# line segment is unchanged from the first segment
# Vertex 4: Add the second-order term to the Vertex-4 cost.
vertices[3].cost = vertices[3].cost + 0.5 * a2 * (vertices[3].power - vertices[2].power) ** 2 # production cost [$/h]
# Convert the costs to raw dollars
# NOTE: This usage of Matlab hours() toggles a duration back
# into a numerical representation, which is correct here.
interval_duration = get_duration_in_hour(time_intervals[i].duration)
vertices[0].cost = vertices[0].cost * interval_duration # [$]
vertices[1].cost = vertices[1].cost * interval_duration # [$]
vertices[2].cost = vertices[2].cost * interval_duration # [$]
vertices[3].cost = vertices[3].cost * interval_duration # [$]
# Create interval values for the active vertices
interval_values = [
IntervalValue(self, time_intervals[i], mkt, MeasurementType.ActiveVertex, vertices[0]),
IntervalValue(self, time_intervals[i], mkt, MeasurementType.ActiveVertex, vertices[1]),
IntervalValue(self, time_intervals[i], mkt, MeasurementType.ActiveVertex, vertices[2]),
IntervalValue(self, time_intervals[i], mkt, MeasurementType.ActiveVertex, vertices[3])]
# Append the active vertices to the list of active vertices
# in the indexed time interval
self.activeVertices.extend(interval_values)
else: # indexed time interval is a LLH hour
# LLH hours
# The indexed time interval is a LLH hour. The electricity rate
# is a little lower, and demand charges are not applicable.
#
# Look up the BPA energy rate for month m. The second parameter
# is LLH = 2 (i.e., column 2 of the table).
energy_rate = bpa_energy_rate[month_number-1][1] #bpa_energy_rate(month_number, 2)
# Two active vertices are created
# #1 at minimum power
# #2 at maximum power
vertices = [Vertex(0, 0, 0), Vertex(0, 0, 0)]
# Evaluate the first of two vertices
# First-order parameter a1.
a1 = energy_rate # [$/kWh]
# Vertex 1: Full | |
"""
Interactive annotation tool for 3D human pose estimation.
Given an image and a coarse 3D skeleton estimation, the user can interactively
modify the 3D parameters and save them as the ground truth.
"""
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial.transform import Rotation as R
from cv2 import projectPoints
import matplotlib.pyplot as plt
import numpy as np
import argparse
import imageio
import sys
import os
sys.path.append("../")
from libs.skeleton.anglelimits import get_basis1, normalize, gram_schmidt_columns
from libs.skeleton.anglelimits import nt_parent_indices, nt_child_indices, di_indices
from libs.skeleton.anglelimits import get_normal, di, a, to_spherical, to_xyz, bone_name
''' GLOBAL VARIABLES '''
angle_idx = 0 # Bone angle to adjust
direction = 0 # Direction to rotate, (0 - x, 1 - y, 2 - z) for upper arm only
step = 3 # 3 degrees for step size
step_radian = step * np.pi / 180
local_system_map = {1:0, 3:0, 5:1, 7:1, 2:2, 4:3, 6:4, 8:5}
line_index_map = {1:11, 3:14, 5:4, 7:1, 2:12, 4:15, 6:5, 8:2}
parent_indices = np.array([1,2,3,1,7,8,1, 13,14,15,14,18,19,14,26,27])-1
child_indices = np.array([2,3,4,7,8,9,13,14,15,16,18,19,20,26,27,28])-1
direction_name = ['x', 'y', 'z']
# translation vector of the camera
t = None
# focal length of the camera
f = None
# intrinsic matrix for camera projection
intrinsic_mat = None
# Objects for ploting
fig = None
plot_ax = None
img_ax = None
skeleton = None
lines = None
points = None
RADIUS = 1 # Space around the subject
# hierarchical representation
local_systems = None
need_to_update_lc = False
bones_global = None
bones_local = None
angles = None
# file path
annotation_path = None
annotation = None
img_name = None
# some joint correspondence
index_list = [13, 14, 129, 145]
H36M_IDS = [0, 2, 5, 8, 1, 4, 7, 3, 12, 15, 24, 16, 18, 20, 17, 19, 21]
USE_DIMS = [0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27]
# keyboard inputs
bone_idx_keys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
global_rot_key ='0'
inc_step_key = 'd'
dec_step_key = 'f'
ang_inc_key = 'up'
ang_dec_key = 'down'
ang_cw_key = 'right'
ang_ccw_key = 'left'
save_key = 'm'
def press(event):
"""
Call-back function when user press any key.
"""
global angle_idx, direction, need_to_update_lc
global bones_global, bones_local, skeleton, angles, local_systems
if event.key == 'p':
plot_ax.plot([np.random.rand()], [np.random.rand()], [np.random.rand()], 'ro')
fig.canvas.draw()
if event.key in bone_idx_keys: angle_idx = int(event.key) - 1
if event.key == global_rot_key: angle_idx = None
if event.key == inc_step_key: direction = (direction + 1) % 3
if event.key == dec_step_key: direction = (direction - 1) % 3
if event.key == ang_inc_key or event.key == ang_dec_key:
update_skeleton(angle_idx, event.key)
if event.key == ang_cw_key or event.key == ang_ccw_key:
if angle_idx in [2, 4, 6, 8]:
update_skeleton(angle_idx, event.key)
if event.key == save_key:
save_skeleton()
if angle_idx is not None:
notes = 'current limb: ' + bone_name[angle_idx + 1]
# update local coordinate systems if needed
if need_to_update_lc:
# compute the local coordinate system
bones_global, bones_local, local_systems = to_local(skeleton)
# convert the local coordinates into spherical coordinates
angles = to_spherical(bones_local)
angles[:,1:] *= 180/np.pi
# need to update local coordinate system once after global rotation
need_to_update_lc = False
else:
notes = 'global rotation: '
if angle_idx in [None, 1, 3, 5, 7]:
notes += ' direction: ' + direction_name[direction]
plot_ax.set_xlabel(notes)
def show3Dpose(channels,
ax,
lcolor="#3498db",
rcolor="#e74c3c",
add_labels=True,
gt=False,
pred=False,
inv_z=False
):
vals = np.reshape( channels, (32, -1) )
I = parent_indices
J = child_indices
LR = np.array([1,1,1,0,0,0,0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype=bool)
lines = []
# Make connection matrix
for i in np.arange( len(I) ):
x, y, z = [np.array( [vals[I[i], j], vals[J[i], j]] ) for j in range(3)]
line = ax.plot(x,y, z, lw=2, c=lcolor if LR[i] else rcolor)
lines.append(line)
xroot, yroot, zroot = vals[0,0], vals[0,1], vals[0,2]
ax.set_xlim3d([-RADIUS+xroot, RADIUS+xroot])
ax.set_zlim3d([-RADIUS+zroot, RADIUS+zroot])
ax.set_ylim3d([-RADIUS+yroot, RADIUS+yroot])
if add_labels:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_aspect('auto')
# Get rid of the panes (actually, make them white)
white = (1.0, 1.0, 1.0, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
# Get rid of the lines in 3d
ax.w_xaxis.line.set_color(white)
ax.w_yaxis.line.set_color(white)
ax.w_zaxis.line.set_color(white)
if inv_z:
ax.invert_zaxis()
return lines
def to_local(skeleton):
"""
Convert bone vector in skeleton format to local coordinate system
"""
global local_systems
v1, v2, v3 = get_basis1(skeleton)
# Compute vector of left hip to right hip
left_hip = skeleton[6]
right_hip = skeleton[1]
v4 = normalize(right_hip - left_hip)
# v5 is the cross product of v4 and v2 (front-facing vector for lower-body)
v5 = normalize(np.cross(v4, v2))
# Compute orthogonal coordinate systems using GramSchmidt
# Make sure the directions roughly align
# For upper body, we use v1, v2 and v3
# For lower body, we use v4, v2 and v5
system1 = gram_schmidt_columns(np.hstack([v1.reshape(3,1),
v2.reshape(3,1),
v3.reshape(3,1)]))
system2 = gram_schmidt_columns(np.hstack([v4.reshape(3,1),
v2.reshape(3,1),
v5.reshape(3,1)]))
local_systems = [system1, system2]
bones = skeleton[nt_parent_indices, :] - skeleton[nt_child_indices, :]
# convert bone vector to local coordinate system
bones_local = np.zeros(bones.shape, dtype=bones.dtype)
for bone_idx in range(len(bones)):
# only compute bone vectors for non-torsos
# the order of the non-torso bone vector is:
# bone vector1: thorax to head top
# bone vector2: left shoulder to left elbow
# bone vector3: left elbow to left wrist
# bone vector4: right shoulder to right elbow
# bone vector5: right elbow to right wrist
# bone vector6: left hip to left knee
# bone vector7: left knee to left ankle
# bone vector8: right hip to right knee
# bone vector9: right knee to right ankle
bone = bones[bone_idx]
if bone_idx in [0, 1, 3, 5, 7]:
# Bones directly connected to torso
# Upper body - 0, 1, 3
# Lower body - 5, 7
if bone_idx in [0, 1, 3]: bones_local[bone_idx] = system1.T @ bone
else: bones_local[bone_idx] = system2.T @ bone
else:
if bone_idx in [2, 4]: parent_R = system1
else: parent_R = system2
# parent bone index is smaller than 1
vector_u = normalize(bones[bone_idx - 1])
di_index = di_indices[bone_idx]
vector_v, flag = get_normal(parent_R@di[:, di_index],
parent_R@a,
vector_u)
vector_w = np.cross(vector_u, vector_v)
local_system = gram_schmidt_columns(np.hstack([vector_u.reshape(3,1),
vector_v.reshape(3,1),
vector_w.reshape(3,1)]))
local_systems.append(local_system)
bones_local[bone_idx] = local_system.T @ bone
return bones, bones_local, local_systems
def update_line(line_idx, parent_idx, child_idx):
"""
Update 3D line segments.
"""
global lines
# update 3D lines
parent, child = skeleton[parent_idx], skeleton[child_idx]
x = [parent[0], child[0]]
y = [parent[1], child[1]]
z = [parent[2], child[2]]
lines[line_idx][0].set_data(x, y)
lines[line_idx][0].set_3d_properties(z)
fig.canvas.draw_idle()
def update_global(angle_idx):
"""
Update bone vectors.
"""
global bones_global, bones_local, local_systems, skeleton
bones_global[angle_idx] = local_systems[local_system_map[angle_idx]] @ bones_local[angle_idx]
skeleton[nt_child_indices[angle_idx]] = skeleton[nt_parent_indices[angle_idx]] \
- bones_global[angle_idx]
line_idx = line_index_map[angle_idx]
parent_idx = nt_parent_indices[angle_idx]
child_idx = nt_child_indices[angle_idx]
update_line(line_idx, parent_idx, child_idx)
def rotate_global(rot):
"""
Change the global orientation of the 3D skeleton.
"""
global skeleton
hip = skeleton[0].reshape(1,3)
temp_skeleton = skeleton - hip
skeleton = (rot.as_dcm() @ temp_skeleton.T).T + hip
for line_idx in range(len(parent_indices)):
update_line(line_idx,
parent_indices[line_idx],
child_indices[line_idx]
)
def update_skeleton(angle_idx, key_name):
"""
Update the 3D skeleton with a specified keyboard input.
"""
global need_to_update_lc, local_systems
# Rotate the lower-limb
if angle_idx in [2, 4, 6, 8]:
if key_name == 'up': angles[angle_idx, 1] += step
elif key_name == 'down': angles[angle_idx, 1] -= step
elif key_name == 'left': angles[angle_idx, 2] += step
elif key_name == 'right': angles[angle_idx, 2] -= step
temp = angles[angle_idx].copy()
temp[1:] *= np.pi / 180
bones_local[angle_idx] = to_xyz(temp.reshape(1,3))
update_global(angle_idx)
# Rotate the upper-limb with respect to the torso coordinate system
if angle_idx in [1, 3, 5, 7]:
# Local rotation vector
rot_vec = np.array([0., 0., 0.])
rot_vec[direction] = 1. if key_name == 'up' else -1.
rot = R.from_rotvec(rot_vec*step_radian)
bones_local[angle_idx] = rot.apply(bones_local[angle_idx])
# Global rotation vector
rot_vec2 = local_systems[local_system_map[angle_idx]][:, direction].copy()
rot_vec2 *= 1. if key_name == 'up' else -1.
rot2 = R.from_rotvec(rot_vec2*step_radian)
# Local rotation vector for child/lower limb
temp = local_systems[local_system_map[angle_idx + 1]]
local_systems[local_system_map[angle_idx + 1]] = rot2.as_dcm() @ temp
# update parent and child bone
update_global(angle_idx)
update_global(angle_idx + 1)
# Global rotation
if angle_idx is None and key_name in ['up', 'down']:
need_to_update_lc = True
rot_vec = np.array([0., 0., 0.])
rot_vec[direction] = 1. if key_name == 'up' else -1.
rot = R.from_rotvec(rot_vec*step_radian)
rotate_global(rot)
# Update the 2D Projection
update_projection()
def update_projection():
"""
Update the 2D projection of the 3D key-points.
"""
global points
points.pop(0).remove()
proj2d = projectPoints(skeleton,
np.zeros((3)),
t,
intrinsic_mat,
np.zeros((5))
)
proj2d = proj2d[0].reshape(-1,2)
points = img_ax.plot(proj2d[:,0], proj2d[:,1], 'ro')
fig.canvas.draw_idle()
def save_skeleton():
"""
Save the annotation file.
"""
global annotation
annotation[img_name]['p3d'] | |
"""
This module contains the `BinaryReader` class, a wrapper for binary I/O streams that offers functions for extracting
binary-encoded data such as ints, strings, structures etc.
"""
import struct
from typing import Union, BinaryIO, Optional, AnyStr, Iterable, Tuple
from io import BytesIO, IOBase, TextIOBase, UnsupportedOperation
from os import SEEK_SET, SEEK_CUR
from atmfjstc.lib.file_utils.fileobj import get_fileobj_size
class BinaryReader:
"""
This class wraps a binary I/O file object and offers functions for extracting binary-encoded ints, strings,
structures etc.
Note that all the functions that encounter data different than what was expected will throw an exception from a
subclass of `BinaryReaderFormatError`. This allows you to easily add specific processing for corrupted files vs
other kinds of momentary or system errors.
"""
_fileobj: BinaryIO
_big_endian: bool
_bytes_read: Optional[int] = 0
_cached_total_size: Optional[int] = None
_synthetic_eof: bool = False
def __init__(self, data_or_fileobj: Union[bytes, BinaryIO], big_endian: bool):
"""
Constructor.
Args:
data_or_fileobj: Either a `bytes` object, or a binary mode file object to read data from. For a file object,
the data will be read starting from the file object's current position.
big_endian: Set True to read ints and structures in big-endian mode, False to read them in little-endian.
This setting can be overridden in individual calls if needed.
Notes:
- The passed in file object can be either seekable or non-seekable. For non-seekable streams, some of the
functions of the `BinaryReader` will be unavailable, and others will run less efficiently.
- You should avoid manipulating the file object in between calls to the `BinaryReader`. If you must,
be aware that the reader does not automatically restore its last position, it will continue from where
you left the stream before you called again.
"""
self._fileobj = _parse_main_input_arg(data_or_fileobj)
self._big_endian = big_endian
def name(self) -> Optional[AnyStr]:
name = getattr(self._fileobj, 'name', None)
return None if ((name is None) or (name == '')) else name
def seekable(self) -> bool:
return self._fileobj.seekable()
def _require_seekable(self):
if not self.seekable():
raise UnsupportedOperation("This operation can only be performed on seekable readers")
def seek(self, offset: int, whence: type(SEEK_SET) = SEEK_SET) -> int:
self._require_seekable()
return self._fileobj.seek(offset, whence)
def tell(self) -> int:
"""
Reports the position of the reader in the binary data.
Note that, as opposed to a regular file object, the `BinaryReader` can report the position even if the
underlying stream is not seekable. In this case, the reported position represents the number of bytes read so
far from the stream, by the `BinaryReader` itself.
Returns:
The position of the binary reader within the input, in bytes
"""
return self._fileobj.tell() if self.seekable() else self._bytes_read
def total_size(self) -> int:
"""
Gets the total size of the data in the underlying file object (which must be seekeable).
Returns:
The size of the data, in bytes.
"""
self._require_seekable()
if self._cached_total_size is None:
self._cached_total_size = get_fileobj_size(self._fileobj)
return self._cached_total_size
def bytes_remaining(self) -> int:
"""
Gets the total size of the data remaining in the file object (from the current position to the end). The reader
must be seekable for this.
Returns:
The size of the data, in bytes.
"""
self._require_seekable()
return self.total_size() - self.tell()
def eof(self) -> bool:
"""
Checks whether we are at the end of the data.
If the stream is seekable, this function is always accurate. If not, this just returns a flag that is set when
we attempt to read data in the past and ran against the end.
Returns:
True if no more data is available.
"""
return (self.bytes_remaining() == 0) if self.seekable() else self._synthetic_eof
def read_at_most(self, n_bytes: int) -> bytes:
"""
Try to read `n_bytes` of data, returning fewer only if the data is exhausted.
Short reads, e.g. from a socket, are handled.
Args:
n_bytes: The number of bytes to try to read.
Returns:
The read data, at most `n_bytes` in length. Note that the function never raises a format error.
"""
if n_bytes < 0:
raise ValueError("The number of bytes to read cannot be negative")
data = b''
while len(data) < n_bytes:
new_data = self._fileobj.read(n_bytes - len(data))
if len(new_data) == 0:
self._synthetic_eof = True
break
self._bytes_read += len(new_data)
if data == b'':
data = new_data
else:
data += new_data
return data
def read_remainder(self) -> bytes:
"""
Reads all the remaining data in the input.
This is intended for use inside relatively small data structures of some bigger file, e.g. if the tail of a
record in a TLV sequence represents some path or text string.
Returns:
The data, as a `bytes` object.
"""
BUF_SIZE = 1000000
data = b''
while not self.eof():
new_data = self.read_at_most(BUF_SIZE)
if data == b'':
data = new_data
else:
data += new_data
return data
def read_amount(self, n_bytes: int, meaning: Optional[str] = None) -> bytes:
"""
Reads exactly `n_bytes` from the underlying stream.
Args:
n_bytes: The amount of bytes to read.
meaning: An indication as to the meaning of the data being read (e.g. "user ID"). It is used in the text
of any exceptions that may be thrown.
Returns:
The data, as a `bytes` object `n_bytes` in length.
Raises:
BinaryReaderMissingDataError: If we are at the end of the stream and no bytes are left at all.
BinaryReaderReadPastEndError: If we read some bytes, but reached the end of the data before we got the
full `n_bytes`.
"""
if n_bytes == 0:
return b''
original_pos = self.tell()
data = self.read_at_most(n_bytes)
if len(data) == 0:
raise BinaryReaderMissingDataError(original_pos, n_bytes, meaning)
if len(data) < n_bytes:
raise BinaryReaderReadPastEndError(original_pos, n_bytes, len(data), meaning)
return data
def maybe_read_amount(self, n_bytes: int, meaning: Optional[str] = None) -> Optional[bytes]:
"""
Like `read_amount`, but returns None if there is no more data to be read.
Note that an exception is still thrown if there is *some* data available short of the required amount.
"""
try:
return self.read_amount(n_bytes, meaning)
except BinaryReaderMissingDataError:
return None
def peek(self, n_bytes: int) -> bytes:
"""
Reads up to `n_bytes` of data without advancing the current position. Only for seekable inputs.
Args:
n_bytes: The number of bytes to read ahead.
Returns:
A byte string `n_bytes` in length, or less, if there is less data available.
"""
self._require_seekable()
data = self.read_at_most(n_bytes)
self.seek(-len(data), SEEK_CUR)
return data
def skip_bytes(self, n_bytes: int, meaning: Optional[str] = None):
"""
Skips over a number of bytes, ignoring the data. The bytes MUST be present.
Args:
n_bytes: The number of bytes to skip.
meaning: An indication as to the meaning of the data being skipped (e.g. "compressed date"). It is used in
the text of any exceptions that may be thrown.
Raises:
BinaryReaderMissingDataError: If we are at the end of the stream and no bytes are left at all.
BinaryReaderReadPastEndError: If we read some bytes, but reached the end of the data before we got the
full length required.
"""
if n_bytes < 0:
raise ValueError("Number of bytes to skip must be non-negative")
if n_bytes == 0:
return
original_pos = self.tell()
if self.seekable():
bytes_avail = self.bytes_remaining()
if bytes_avail == 0:
raise BinaryReaderMissingDataError(original_pos, n_bytes, meaning)
if bytes_avail < n_bytes:
self.seek(bytes_avail, SEEK_CUR)
raise BinaryReaderReadPastEndError(original_pos, n_bytes, bytes_avail, meaning)
self.seek(n_bytes, SEEK_CUR)
return
# Fall back to non-seeking algorithm
BUF_SIZE = 1000000
total_read = 0
while total_read < n_bytes:
to_read = min(BUF_SIZE, n_bytes - total_read)
data = self.read_at_most(to_read)
total_read += len(data)
if len(data) < to_read:
if total_read == 0:
raise BinaryReaderMissingDataError(original_pos, n_bytes, meaning)
raise BinaryReaderReadPastEndError(original_pos, n_bytes, total_read, meaning)
def expect_magic(self, magic: bytes, meaning: Optional[str] = None):
"""
Verifies that a specific bytes sequence ("magic") follows in the underlying stream.
This is often used to validate that a file is of the correct type.
Args:
magic: A `bytes` object containing the expected sequence
meaning: An indication as to the meaning of the data being read (e.g. "ZIP signature"). It is used in the
text of any exceptions that may be thrown.
Raises:
BinaryReaderWrongMagicError: If the read sequence does not match the expected one.
BinaryReaderMissingDataError: If we are at the end of the stream and no bytes are left at all.
BinaryReaderReadPastEndError: If we read some | |
- m.b3004 <= 0)
m.c515 = Constraint(expr= m.x514 - m.b3004 <= 0)
m.c516 = Constraint(expr= m.x515 - m.b3004 <= 0)
m.c517 = Constraint(expr= m.x516 - m.b3004 <= 0)
m.c518 = Constraint(expr= m.x517 - m.b3004 <= 0)
m.c519 = Constraint(expr= m.x518 - m.b3004 <= 0)
m.c520 = Constraint(expr= m.x519 - m.b3004 <= 0)
m.c521 = Constraint(expr= m.x520 - m.b3004 <= 0)
m.c522 = Constraint(expr= m.x521 - m.b3004 <= 0)
m.c523 = Constraint(expr= m.x522 - m.b3004 <= 0)
m.c524 = Constraint(expr= m.x523 - m.b3004 <= 0)
m.c525 = Constraint(expr= m.x524 - m.b3004 <= 0)
m.c526 = Constraint(expr= m.x525 - m.b3004 <= 0)
m.c527 = Constraint(expr= m.x526 - m.b3004 <= 0)
m.c528 = Constraint(expr= m.x527 - m.b3004 <= 0)
m.c529 = Constraint(expr= m.x528 - m.b3004 <= 0)
m.c530 = Constraint(expr= m.x529 - m.b3004 <= 0)
m.c531 = Constraint(expr= m.x530 - m.b3004 <= 0)
m.c532 = Constraint(expr= m.x531 - m.b3004 <= 0)
m.c533 = Constraint(expr= m.x532 - m.b3004 <= 0)
m.c534 = Constraint(expr= m.x533 - m.b3004 <= 0)
m.c535 = Constraint(expr= m.x534 - m.b3004 <= 0)
m.c536 = Constraint(expr= m.x535 - m.b3004 <= 0)
m.c537 = Constraint(expr= m.x536 - m.b3004 <= 0)
m.c538 = Constraint(expr= m.x537 - m.b3004 <= 0)
m.c539 = Constraint(expr= m.x538 - m.b3004 <= 0)
m.c540 = Constraint(expr= m.x539 - m.b3004 <= 0)
m.c541 = Constraint(expr= m.x540 - m.b3004 <= 0)
m.c542 = Constraint(expr= m.x541 - m.b3004 <= 0)
m.c543 = Constraint(expr= m.x542 - m.b3004 <= 0)
m.c544 = Constraint(expr= m.x543 - m.b3004 <= 0)
m.c545 = Constraint(expr= m.x544 - m.b3004 <= 0)
m.c546 = Constraint(expr= m.x545 - m.b3004 <= 0)
m.c547 = Constraint(expr= m.x546 - m.b3004 <= 0)
m.c548 = Constraint(expr= m.x547 - m.b3004 <= 0)
m.c549 = Constraint(expr= m.x548 - m.b3004 <= 0)
m.c550 = Constraint(expr= m.x549 - m.b3004 <= 0)
m.c551 = Constraint(expr= m.x550 - m.b3004 <= 0)
m.c552 = Constraint(expr= m.x551 - m.b3004 <= 0)
m.c553 = Constraint(expr= m.x552 - m.b3004 <= 0)
m.c554 = Constraint(expr= m.x553 - m.b3004 <= 0)
m.c555 = Constraint(expr= m.x554 - m.b3004 <= 0)
m.c556 = Constraint(expr= m.x555 - m.b3004 <= 0)
m.c557 = Constraint(expr= m.x556 - m.b3004 <= 0)
m.c558 = Constraint(expr= m.x557 - m.b3004 <= 0)
m.c559 = Constraint(expr= m.x558 - m.b3004 <= 0)
m.c560 = Constraint(expr= m.x559 - m.b3004 <= 0)
m.c561 = Constraint(expr= m.x560 - m.b3004 <= 0)
m.c562 = Constraint(expr= m.x561 - m.b3004 <= 0)
m.c563 = Constraint(expr= m.x562 - m.b3004 <= 0)
m.c564 = Constraint(expr= m.x563 - m.b3004 <= 0)
m.c565 = Constraint(expr= m.x564 - m.b3004 <= 0)
m.c566 = Constraint(expr= m.x565 - m.b3004 <= 0)
m.c567 = Constraint(expr= m.x566 - m.b3004 <= 0)
m.c568 = Constraint(expr= m.x567 - m.b3004 <= 0)
m.c569 = Constraint(expr= m.x568 - m.b3004 <= 0)
m.c570 = Constraint(expr= m.x569 - m.b3004 <= 0)
m.c571 = Constraint(expr= m.x570 - m.b3004 <= 0)
m.c572 = Constraint(expr= m.x571 - m.b3004 <= 0)
m.c573 = Constraint(expr= m.x572 - m.b3004 <= 0)
m.c574 = Constraint(expr= m.x573 - m.b3004 <= 0)
m.c575 = Constraint(expr= m.x574 - m.b3004 <= 0)
m.c576 = Constraint(expr= m.x575 - m.b3004 <= 0)
m.c577 = Constraint(expr= m.x576 - m.b3004 <= 0)
m.c578 = Constraint(expr= m.x577 - m.b3004 <= 0)
m.c579 = Constraint(expr= m.x578 - m.b3004 <= 0)
m.c580 = Constraint(expr= m.x579 - m.b3004 <= 0)
m.c581 = Constraint(expr= m.x580 - m.b3004 <= 0)
m.c582 = Constraint(expr= m.x581 - m.b3004 <= 0)
m.c583 = Constraint(expr= m.x582 - m.b3004 <= 0)
m.c584 = Constraint(expr= m.x583 - m.b3004 <= 0)
m.c585 = Constraint(expr= m.x584 - m.b3004 <= 0)
m.c586 = Constraint(expr= m.x585 - m.b3004 <= 0)
m.c587 = Constraint(expr= m.x586 - m.b3004 <= 0)
m.c588 = Constraint(expr= m.x587 - m.b3004 <= 0)
m.c589 = Constraint(expr= m.x588 - m.b3004 <= 0)
m.c590 = Constraint(expr= m.x589 - m.b3004 <= 0)
m.c591 = Constraint(expr= m.x590 - m.b3004 <= 0)
m.c592 = Constraint(expr= m.x591 - m.b3004 <= 0)
m.c593 = Constraint(expr= m.x592 - m.b3004 <= 0)
m.c594 = Constraint(expr= m.x593 - m.b3004 <= 0)
m.c595 = Constraint(expr= m.x594 - m.b3004 <= 0)
m.c596 = Constraint(expr= m.x595 - m.b3004 <= 0)
m.c597 = Constraint(expr= m.x596 - m.b3004 <= 0)
m.c598 = Constraint(expr= m.x597 - m.b3004 <= 0)
m.c599 = Constraint(expr= m.x598 - m.b3004 <= 0)
m.c600 = Constraint(expr= m.x599 - m.b3004 <= 0)
m.c601 = Constraint(expr= m.x600 - m.b3004 <= 0)
m.c602 = Constraint(expr= m.x601 - m.b3005 <= 0)
m.c603 = Constraint(expr= m.x602 - m.b3005 <= 0)
m.c604 = Constraint(expr= m.x603 - m.b3005 <= 0)
m.c605 = Constraint(expr= m.x604 - m.b3005 <= 0)
m.c606 = Constraint(expr= m.x605 - m.b3005 <= 0)
m.c607 = Constraint(expr= m.x606 - m.b3005 <= 0)
m.c608 = Constraint(expr= m.x607 - m.b3005 <= 0)
m.c609 = Constraint(expr= m.x608 - m.b3005 <= 0)
m.c610 = Constraint(expr= m.x609 - m.b3005 <= 0)
m.c611 = Constraint(expr= m.x610 - m.b3005 <= 0)
m.c612 = Constraint(expr= m.x611 - m.b3005 <= 0)
m.c613 = Constraint(expr= m.x612 - m.b3005 <= 0)
m.c614 = Constraint(expr= m.x613 - m.b3005 <= 0)
m.c615 = Constraint(expr= m.x614 - m.b3005 <= 0)
m.c616 = Constraint(expr= m.x615 - m.b3005 <= 0)
m.c617 = Constraint(expr= m.x616 - m.b3005 <= 0)
m.c618 = Constraint(expr= m.x617 - m.b3005 <= 0)
m.c619 = Constraint(expr= m.x618 - m.b3005 <= 0)
m.c620 = Constraint(expr= m.x619 - m.b3005 <= 0)
m.c621 = Constraint(expr= m.x620 - m.b3005 <= 0)
m.c622 = Constraint(expr= m.x621 - m.b3005 <= 0)
m.c623 = Constraint(expr= m.x622 - m.b3005 <= 0)
m.c624 = Constraint(expr= m.x623 - m.b3005 <= 0)
m.c625 = Constraint(expr= m.x624 - m.b3005 <= 0)
m.c626 = Constraint(expr= m.x625 - m.b3005 <= 0)
m.c627 = Constraint(expr= m.x626 - m.b3005 <= 0)
m.c628 = Constraint(expr= m.x627 - m.b3005 <= 0)
m.c629 = Constraint(expr= m.x628 - m.b3005 <= 0)
m.c630 = Constraint(expr= m.x629 - m.b3005 <= 0)
m.c631 = Constraint(expr= m.x630 - m.b3005 <= 0)
m.c632 = Constraint(expr= m.x631 - m.b3005 <= 0)
m.c633 = Constraint(expr= m.x632 - m.b3005 <= 0)
m.c634 = Constraint(expr= m.x633 - m.b3005 <= 0)
m.c635 = Constraint(expr= m.x634 - m.b3005 <= 0)
m.c636 = Constraint(expr= m.x635 - m.b3005 <= 0)
m.c637 = Constraint(expr= m.x636 - m.b3005 <= 0)
m.c638 = Constraint(expr= m.x637 - m.b3005 <= 0)
m.c639 = Constraint(expr= m.x638 - m.b3005 <= 0)
m.c640 = Constraint(expr= m.x639 - m.b3005 <= 0)
m.c641 = Constraint(expr= m.x640 - m.b3005 <= 0)
m.c642 = Constraint(expr= m.x641 - m.b3005 <= 0)
m.c643 = Constraint(expr= m.x642 - m.b3005 <= 0)
m.c644 = Constraint(expr= m.x643 - m.b3005 <= 0)
m.c645 = Constraint(expr= m.x644 - m.b3005 <= 0)
m.c646 = Constraint(expr= m.x645 - m.b3005 <= 0)
m.c647 = Constraint(expr= m.x646 - m.b3005 <= 0)
m.c648 = Constraint(expr= m.x647 - m.b3005 <= 0)
m.c649 = Constraint(expr= m.x648 - m.b3005 <= 0)
m.c650 = Constraint(expr= m.x649 - m.b3005 <= 0)
m.c651 = Constraint(expr= m.x650 - m.b3005 <= 0)
m.c652 = Constraint(expr= m.x651 - m.b3005 <= 0)
m.c653 = Constraint(expr= m.x652 - m.b3005 <= 0)
m.c654 = Constraint(expr= m.x653 - m.b3005 <= 0)
m.c655 = Constraint(expr= m.x654 - m.b3005 <= 0)
m.c656 = Constraint(expr= m.x655 - m.b3005 <= 0)
m.c657 = Constraint(expr= m.x656 - m.b3005 <= 0)
m.c658 = Constraint(expr= m.x657 - m.b3005 <= 0)
m.c659 = Constraint(expr= m.x658 - m.b3005 <= 0)
m.c660 = Constraint(expr= m.x659 - m.b3005 <= 0)
m.c661 = Constraint(expr= m.x660 - m.b3005 <= 0)
m.c662 = Constraint(expr= m.x661 - m.b3005 <= 0)
m.c663 = Constraint(expr= m.x662 - m.b3005 <= 0)
m.c664 = Constraint(expr= m.x663 - m.b3005 <= 0)
m.c665 = Constraint(expr= m.x664 - m.b3005 <= 0)
m.c666 = Constraint(expr= m.x665 - m.b3005 <= 0)
m.c667 = Constraint(expr= m.x666 - m.b3005 <= 0)
m.c668 = Constraint(expr= m.x667 - m.b3005 <= 0)
m.c669 = Constraint(expr= m.x668 - m.b3005 <= 0)
m.c670 = Constraint(expr= m.x669 - m.b3005 <= 0)
m.c671 = Constraint(expr= m.x670 - m.b3005 <= 0)
m.c672 = Constraint(expr= m.x671 - m.b3005 <= 0)
m.c673 = Constraint(expr= m.x672 - m.b3005 <= 0)
m.c674 = Constraint(expr= m.x673 - m.b3005 <= 0)
m.c675 = Constraint(expr= m.x674 - m.b3005 <= 0)
m.c676 = Constraint(expr= m.x675 - m.b3005 <= 0)
m.c677 = Constraint(expr= m.x676 - m.b3005 <= 0)
m.c678 = Constraint(expr= m.x677 - m.b3005 <= 0)
m.c679 = Constraint(expr= m.x678 - m.b3005 <= 0)
m.c680 = Constraint(expr= m.x679 - m.b3005 <= 0)
m.c681 = Constraint(expr= m.x680 - m.b3005 <= 0)
m.c682 = Constraint(expr= m.x681 - m.b3005 <= 0)
m.c683 = Constraint(expr= m.x682 - m.b3005 <= 0)
m.c684 = Constraint(expr= m.x683 - m.b3005 <= 0)
m.c685 = Constraint(expr= m.x684 - m.b3005 <= 0)
m.c686 = Constraint(expr= m.x685 - m.b3005 <= 0)
m.c687 = Constraint(expr= m.x686 - m.b3005 <= 0)
m.c688 = Constraint(expr= m.x687 - m.b3005 <= 0)
m.c689 = Constraint(expr= m.x688 - m.b3005 <= 0)
m.c690 = Constraint(expr= m.x689 - m.b3005 <= 0)
m.c691 = Constraint(expr= m.x690 - m.b3005 <= 0)
m.c692 = Constraint(expr= m.x691 - m.b3005 <= 0)
m.c693 = Constraint(expr= m.x692 - m.b3005 <= 0)
m.c694 = Constraint(expr= m.x693 - m.b3005 <= 0)
m.c695 = Constraint(expr= m.x694 - m.b3005 <= 0)
m.c696 = Constraint(expr= m.x695 - m.b3005 <= 0)
m.c697 = Constraint(expr= | |
"""
# Get model specific params
if model_params_ann:
model_params = model_params_ann
model_params["x_train"] = ast.Name(id=model_params["x_train"], ctx=ast.Store())
model_params["y_train"] = ast.Name(id=model_params["y_train"], ctx=ast.Store())
else:
model_params = self.get_model_params(elem)
# print(model_params)
# Get mutation specific params
mutation_params = self.get_mutation_params()
# print(mutation_params)
mutation_node = ast.Assign(targets=[ast.Tuple(elts=[
# ast.Name(id=model_params["x_train"], ctx=ast.Store()),
# ast.Name(id=model_params["y_train"], ctx=ast.Store()),
model_params["x_train"],
model_params["y_train"],
], ctx=ast.Store()),
],
value=ast.Call(
func=ast.Attribute(value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[
# ast.Name(id=model_params["x_train"], ctx=ast.Load()),
# ast.Name(id=model_params["y_train"], ctx=ast.Load()),
model_params["x_train"],
model_params["y_train"],
ast.Name(id=mutation_params["percentage"], ctx=ast.Load()), ],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
# generate a mutation call
mutation_node = self.generate_mutation_node(elem, model_params_ann)
# insert a mutation call
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params_ann = None):
self.insert_mutation(node, elem, ind, model_params_ann)
class UnbalanceTDMut(Mutation):
mutationName = "unbalance_train_data"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def get_model_params(self, elem):
return self.get_model_params_td(elem)
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
params["module_name"] = "training_data_operators"
params["operator_name"] = "unbalance_training_data"
params["percentage"] = "properties.unbalance_train_data['unbalance_train_data_pct']"
return params
def generate_mutation_node(self, elem, model_params_ann = None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
# Get model specific params
if model_params_ann:
model_params = model_params_ann
model_params["x_train"] = ast.Name(id=model_params["x_train"], ctx=ast.Store())
model_params["y_train"] = ast.Name(id=model_params["y_train"], ctx=ast.Store())
else:
model_params = self.get_model_params(elem)
# print(model_params)
# Get mutation specific params
mutation_params = self.get_mutation_params()
# print(mutation_params)
mutation_node = ast.Assign(targets=[ast.Tuple(elts=[
# ast.Name(id=model_params["x_train"], ctx=ast.Store()),
# ast.Name(id=model_params["y_train"], ctx=ast.Store()),
model_params["x_train"],
model_params["y_train"],
], ctx=ast.Store()),
],
value=ast.Call(
func=ast.Attribute(value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[
# ast.Name(id=model_params["x_train"], ctx=ast.Load()),
# ast.Name(id=model_params["y_train"], ctx=ast.Load()),
model_params["x_train"],
model_params["y_train"],
ast.Name(id=mutation_params["percentage"], ctx=ast.Load()), ],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
# generate a mutation call
mutation_node = self.generate_mutation_node(elem, model_params_ann)
# insert a mutation call
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params_ann = None):
self.insert_mutation(node, elem, ind, model_params_ann)
class AddNoiseTDMut(Mutation):
mutationName = "add_noise"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def get_model_params(self, elem):
return self.get_model_params_td(elem)
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
params["module_name"] = "training_data_operators"
params["operator_name"] = "operator_add_noise_to_training_data"
params["percentage"] = "properties.add_noise['add_noise_pct']"
return params
def generate_mutation_node(self, elem, model_params_ann = None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
# Get model specific params
if model_params_ann:
model_params = model_params_ann
model_params["x_train"] = ast.Name(id=model_params["x_train"], ctx=ast.Store())
else:
model_params = self.get_model_params(elem)
# print(model_params)
# Get mutation specific params
mutation_params = self.get_mutation_params()
# print(mutation_params)
mutation_node = ast.Assign(targets=[
# ast.Name(id=model_params["x_train"], ctx=ast.Store()),
model_params["x_train"],
],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[
# ast.Name(id=model_params["x_train"], ctx=ast.Load()),
model_params["x_train"],
ast.Name(id=mutation_params["percentage"], ctx=ast.Load()), ],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
# generate a mutation call
mutation_node = self.generate_mutation_node(elem, model_params_ann)
# insert a mutation call
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params_ann = None):
self.insert_mutation(node, elem, ind, model_params_ann)
#########################################
############ HYPERPARAMS #############
# class ChangeLearnRateHPMut_old(Mutation):
# mutationName = "change_learning_rate"
# optimiser_definition_type = None
#
# def dummy(self):
# print("lalala")
#
# def is_target_node(self, elem):
# result = False
#
# result, self.optimiser_definition_type = mu.is_optimiser_object(elem)
#
# if not result:
# result = mu.is_specific_call(elem, "compile")
# self.optimiser_definition_type = "argument"
#
# return result
#
# def get_optimiser_name(self, elem, type):
# optimiser_name = None
#
# if type == "object":
# optimiser_name = elem.value.func.attr
# elif type == "argument":
# for keyword in elem.value.keywords:
# if keyword.arg == "optimizer":
# if isinstance(keyword.value, ast.Str):
# optimiser_name = keyword.value.s
# elif isinstance(keyword.value, ast.Call):
# optimiser_name = keyword.value.func.id
# else:
# print("Mutations-Change Learning Rate-get_optimiser_name-invalid optimiser parameter to compile call")
# else:
# print("Mutations-Change Learning Rate-get_optimiser_name- invalid type parameter")
#
# return optimiser_name
#
# def get_model_params(self, elem):
# optimiser_name = self.get_optimiser_name(elem, self.optimiser_definition_type)
#
# params = {"optimiser_name": optimiser_name}
# return params
#
# def get_mutation_params(self, optimiser_name):
# """Extract a dict of params needed for mutation from a params file
#
# Keyword arguments:
# mutation_name -- name of the mutation
#
# Returns: dics (params)
# """
#
# params = {}
# #
# # # TODO: write the param extraction
# # # FOR NOW it will be like this, after, we read from the file given the mutation name
# #
# # params["module_name"] = "hyperparams_operators"
# # params["operator_name"] = "operator_change_learning_rate"
#
# default_params = getattr(const, optimiser_name.lower())
# default_params["learning_rate"] = "properties.change_learning_rate['learning_rate']"
#
# params.update(default_params)
#
# return params
#
# def change_optimiser(self, elem, object_name):
# for keyword in elem.value.keywords:
# if keyword.arg == "optimizer":
# keyword.value = ast.Name(id=object_name, ctx=ast.Load())
#
# def generate_mutation_node(self, elem):
# """Generate a mutation node
#
# Keyword arguments:
# mutation_name -- name of a mutation (str)
# model_params -- params needed to build a mutation node. depend on the model (list)
#
# Returns: ast node (mutation_node)
# """
# optimiser_call = None
#
# model_params = self.get_model_params(elem)
# mutation_params = self.get_mutation_params(model_params["optimiser_name"])
# # mutation_params.pop("learning_rate")
#
# for opt in const.keras_optimisers:
# if opt.lower() == model_params["optimiser_name"].lower():
# optimiser_call = opt
#
# if not optimiser_call:
# print("Raise Value Error")
#
#
# object_name = "mutation_optimiser"
#
# keywords = []
#
# for param, value in mutation_params.items():
# if isinstance(value, bool):
# keywords.append(ast.keyword(arg=param, value=ast.NameConstant(value=value)))
# elif isinstance(value, str):
# keywords.append(ast.keyword(arg=param, value=ast.Name(id=value, ctx=ast.Load())))
# elif isinstance(value, (int, float)):
# keywords.append(ast.keyword(arg=param, value=ast.Num(n=value)))
#
# mutation_node = ast.Assign(
# targets=[ast.Name(id=object_name, ctx=ast.Store()),],
# value=ast.Call(
# func=ast.Attribute(value=ast.Name(id='optimizers', ctx=ast.Load()), attr=optimiser_call, ctx=ast.Load()),
# args=[],
# keywords=keywords)
# )
#
# return mutation_node, object_name
#
# def insert_mutation(self, node, elem, ind):
# #TODO: to implement
# # generate a mutation call
# mutation_node, object_name = self.generate_mutation_node(elem)
# # insert a mutation call
# node.body.insert(ind, mutation_node)
# # change optimiser argument of compile function to a newly created optimiser object
# self.change_optimiser(elem, object_name)
#
# is_inserted = True
# return None
#
# def perform_mutation(self, elem):
# for keyword in elem.value.keywords:
# if keyword.arg == "learning_rate":
# keyword.value = ast.Name(id="properties.change_learning_rate['learning_rate']", ctx=ast.Load())
#
# def apply_mutation(self, node, elem, ind, model_params = None):
# if self.optimiser_definition_type == "object":
# self.perform_mutation(elem)
# else:
# self.insert_mutation(node, elem, ind)
class ChangeLearnRateHPMut(Mutation):
mutationName = "change_learning_rate"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_specific_call(elem, "compile")
def get_model_params(self, elem):
params = {}
return params
def get_mutation_params(self, optimiser_name = None):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
params["module_name"] = "hyperparams_operators"
params["operator_name"] = "operator_change_learning_rate"
return params
def perform_mutation(self, elem):
params = self.get_mutation_params()
for keyword in elem.value.keywords:
if keyword.arg == "optimizer":
keyword.value = ast.Call(func=ast.Attribute(value=ast.Name(id=params["module_name"], ctx=ast.Load()),
attr=params["operator_name"], ctx=ast.Load()),
args=[keyword.value,],
keywords=[])
def apply_mutation(self, node, elem, ind, model_params = None):
self.perform_mutation(elem)
class ChangeBatchSizeHPMut(Mutation):
mutationName = "change_batch_size"
def dummy(self):#__init__
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def get_model_params(self, elem):
return self.get_model_params_hp(elem)
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
params["module_name"] = "hyperparams_operators"
params["operator_name"] = "operator_change_batch_size"
return params
def generate_mutation_node(self, elem, model_params):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
# Get model specific params
# print(model_params)
# Get mutation specific params
# mutation_params = self.get_mutation_params()
# print(mutation_params)
mutation_node = ast.Assign(targets=[
ast.Name(id=model_params["batch_size"], ctx=ast.Store()), ],
value=ast.Subscript(
value=ast.Attribute(value=ast.Name(id='properties', ctx=ast.Load()), attr='change_batch_size',
ctx=ast.Load()),
slice=ast.Index(value=ast.Str(s='batch_size')), ctx=ast.Load()))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params):
# generate a mutation call
mutation_node = self.generate_mutation_node(elem, model_params)
# insert a mutation call
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def perform_mutation(self, elem):
for keyword in elem.value.keywords:
if keyword.arg == "batch_size":
keyword.value = ast.Name(id="properties.change_batch_size['batch_size']", ctx=ast.Load())
def apply_mutation(self, node, elem, ind, model_params = None):
# Get model.fit specific params
model_params = self.get_model_params(elem)
batch_size = model_params.get("batch_size")
if not props.change_batch_size["applicable"]:
print("Change batch size in not applicable")
elif batch_size is None:
self.add_keyword(elem, "batch_size", "properties.change_batch_size['batch_size']")
elif isinstance(batch_size, str):
self.insert_mutation(node, elem, ind, model_params)
elif isinstance(batch_size, int):
self.perform_mutation(elem)
else:
print("Unknown batch size value")
class ChangeEpochsHPMut(Mutation):
mutationName = "change_epochs"
| |
<reponame>jtran10/pyNastran
"""
This file defines:
- WriteMesh
"""
from __future__ import print_function
import io
import sys
from codecs import open
from six import string_types, StringIO, PY2, iterkeys
from numpy import array, unique, concatenate, intersect1d, where
from pyNastran.bdf.utils import print_filename
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.dev.bdf_vectorized.bdf_interface2.attributes import BDFAttributes
class WriteMesh(BDFAttributes):
"""
Defines methods for writing cards
Major methods:
- model.write_bdf(...)
"""
def __init__(self):
"""creates methods for writing cards"""
BDFAttributes.__init__(self)
def get_encoding(self, encoding=None):
if encoding is not None:
pass
else:
encoding = self._encoding
if encoding is None:
encoding = sys.getdefaultencoding()
return encoding
def _output_helper(self, out_filename, interspersed, size, is_double):
"""
Performs type checking on the write_bdf inputs
"""
if out_filename is None:
from pyNastran.utils.gui_io import save_file_dialog
wildcard_wx = "Nastran BDF (*.bdf; *.dat; *.nas; *.pch)|" \
"*.bdf;*.dat;*.nas;*.pch|" \
"All files (*.*)|*.*"
wildcard_qt = "Nastran BDF (*.bdf *.dat *.nas *.pch);;All files (*)"
title = 'Save BDF/DAT/PCH'
out_filename = save_file_dialog(title, wildcard_wx, wildcard_qt)
assert out_filename is not None, out_filename
if PY2:
if not(hasattr(out_filename, 'read') and hasattr(out_filename, 'write')) or isinstance(out_filename, (file, StringIO)):
return out_filename
elif not isinstance(out_filename, string_types):
msg = 'out_filename=%r must be a string; type=%s' % (
out_filename, type(out_filename))
raise TypeError(msg)
else:
if not(hasattr(out_filename, 'read') and hasattr(out_filename, 'write')) or isinstance(out_filename, io.IOBase):
return out_filename
elif not isinstance(out_filename, string_types):
msg = 'out_filename=%r must be a string; type=%s' % (
out_filename, type(out_filename))
raise TypeError(msg)
if size == 8:
assert is_double is False, 'is_double=%r' % is_double
elif size == 16:
assert is_double in [True, False], 'is_double=%r' % is_double
else:
assert size in [8, 16], size
assert isinstance(interspersed, bool)
fname = print_filename(out_filename, self._relpath)
self.log.debug("***writing %s" % fname)
return out_filename
def write_caero_model(self, caero_bdf_filename='caero.bdf'):
"""write the CAERO cards as CQUAD4s that can be visualized"""
raise NotImplementedError()
def write_bdf(self, out_filename=None, encoding=None,
size=8, is_double=False,
interspersed=False, enddata=None, close=True):
"""
Writes the BDF.
Parameters
----------
out_filename : varies; default=None
str - the name to call the output bdf
file - a file object
StringIO() - a StringIO object
None - pops a dialog
encoding : str; default=None -> system specified encoding
the unicode encoding
latin1, and utf8 are generally good options
size : int; {8, 16}
the field size
is_double : bool; default=False
False : small field
True : large field
interspersed : bool; default=False
Writes a bdf with properties & elements
interspersed like how Patran writes the bdf. This takes
slightly longer than if interspersed=False, but makes it
much easier to compare to a Patran-formatted bdf and is
more clear.
enddata : bool; default=None
bool - enable/disable writing ENDDATA
None - depends on input BDF
close : bool; default=True
should the output file be closed
"""
#self.write_caero_model()
out_filename = self._output_helper(out_filename,
interspersed, size, is_double)
self.log.debug('---starting BDF.write_bdf of %s---' % out_filename)
encoding = self.get_encoding(encoding)
#assert encoding.lower() in ['ascii', 'latin1', 'utf8'], encoding
if hasattr(out_filename, 'read') and hasattr(out_filename, 'write'):
bdf_file = out_filename
else:
bdf_file = open(out_filename, 'w', encoding=encoding)
self._write_header(bdf_file, encoding)
self._write_params(bdf_file, size, is_double)
self._write_nodes(bdf_file, size, is_double)
self.write_elements_properties(bdf_file, size, is_double, interspersed)
self._write_materials(bdf_file, size, is_double)
self._write_rigid_elements(bdf_file, size, is_double) # split out for write_bdf_symmetric
self._write_aero(bdf_file, size, is_double) # split out for write_bdf_symmetric
self._write_common(bdf_file, size, is_double)
if (enddata is None and 'ENDDATA' in self.card_count) or enddata:
bdf_file.write('ENDDATA\n')
if close:
bdf_file.close()
def write_bdf_symmetric(self, out_filename=None, encoding=None,
size=8, is_double=False,
enddata=None, close=True, plane='xz'):
"""
Writes the BDF.
Parameters
----------
out_filename : varies; default=None
str - the name to call the output bdf
file - a file object
StringIO() - a StringIO object
None - pops a dialog
encoding : str; default=None -> system specified encoding
the unicode encoding
latin1, and utf8 are generally good options
size : int; {8, 16}
the field size
is_double : bool; default=False
False : small field
True : large field
enddata : bool; default=None
bool - enable/disable writing ENDDATA
None - depends on input BDF
close : bool; default=True
should the output file be closed
plane : str; {'xy', 'yz', 'xz'}; default='xz'
the plane to mirror about
"""
raise NotImplementedError()
def _write_header(self, bdf_file, encoding):
"""
Writes the executive and case control decks.
"""
if self.punch is None:
# writing a mesh without using read_bdf
if self.executive_control_lines or self.case_control_deck:
self.punch = False
else:
self.punch = True
if self.nastran_format:
bdf_file.write('$pyNastran: version=%s\n' % self.nastran_format)
bdf_file.write('$pyNastran: punch=%s\n' % self.punch)
bdf_file.write('$pyNastran: encoding=%s\n' % encoding)
bdf_file.write('$pyNastran: nnodes=%s\n' % self.grid.n)
bdf_file.write('$pyNastran: nelements=%s\n' % len(self.elements))
if not self.punch:
self._write_executive_control_deck(bdf_file)
self._write_case_control_deck(bdf_file)
def _write_executive_control_deck(self, bdf_file):
"""
Writes the executive control deck.
"""
if self.executive_control_lines:
msg = '$EXECUTIVE CONTROL DECK\n'
if self.sol == 600:
new_sol = 'SOL 600,%s' % self.sol_method
else:
new_sol = 'SOL %s' % self.sol
if self.sol_iline is not None:
self.executive_control_lines[self.sol_iline] = new_sol
for line in self.executive_control_lines:
msg += line + '\n'
bdf_file.write(msg)
def _write_case_control_deck(self, bdf_file):
"""
Writes the Case Control Deck.
"""
if self.case_control_deck:
msg = '$CASE CONTROL DECK\n'
msg += str(self.case_control_deck)
assert 'BEGIN BULK' in msg, msg
bdf_file.write(''.join(msg))
def _write_elements_properties(self, bdf_file, size):
"""
Writes the elements and properties in and interspersed order
"""
#return self._write_elements_properties2(f, size)
#msg = []
#missing_properties = []
if self.properties:
bdf_file.write('$ELEMENTS_WITH_PROPERTIES\n')
#eids_written = []
#pids = sorted(self.properties.keys())
ptypes = [
self.properties_shell.pshell,
self.properties_shell.pcomp,
self.pshear,
self.prod,
self.properties_solid.psolid,
#self.properties_bar.pbar,
#self.properties_bar.pbarl,
#self.properties_beam.pbeam,
#self.properties_beam.pbeaml,
]
n = 0
pids_all = None # the actual properties
for t in ptypes:
if t.n and n == 0:
pids_all = t.property_id
n = 1
elif t.n:
self.log.debug(pids_all)
self.log.debug(t.property_id)
try:
pids_all = concatenate(pids_all, t.property_id)
except ValueError:
pids_all = array(list(pids_all) + list(t.property_id))
etypes = (self.elements_shell._get_types() +
self.elements_solid._get_types() +
[self.crod, self.cshear])
#pids_set = None
if pids_all is None:
bdf_file.write('$MISSING_ELEMENTS because there are no properties\n')
for t in etypes:
#print("t.type =", t.type)
t.write_card(bdf_file, size=size)
return
# there are properties
pids_set = set(list(pids_all))
n = 0
pids = None
for t in etypes:
#print("t.type =", t.type)
if t.n and n == 0:
eids = t.element_id
pids = t.property_id
n = 1
elif t.n:
try:
eids = concatenate(eids, t.element_id)
#except AttributeError:
#eids = array(list(eids) + list(t.element_id))
except TypeError:
#print eids
#print t.element_id
eids = array(list(eids) + list(t.element_id))
except ValueError:
#print eids
#print t.element_id
eids = array(list(eids) + list(t.element_id))
try:
pids = concatenate(pids, t.property_id)
except AttributeError:
pids = array(list(pids) + list(t.property_id))
except TypeError:
pids = array(list(pids) + list(t.property_id))
except ValueError:
pids = array(list(pids) + list(t.property_id))
#else:
#print t.type
elements_by_pid = {}
if pids is not None:
pids_unique = unique(pids)
self.log.debug("pids_unique = %s" % pids_unique)
pids_unique.sort()
if len(pids_unique) > 0:
bdf_file.write('$ELEMENTS_WITH_PROPERTIES\n')
for pid in pids_all:
i = where(pid == pids)[0]
eids2 = eids[i]
for t in ptypes:
if t.n and pid in t.property_id:
self.log.debug("prop.type = %s" % t.type)
t.write_card(bdf_file, size=size, property_ids=[pid])
pids_set.remove(pid)
n = 0
for t in etypes:
if not t.n:
continue
eids3 = intersect1d(t.element_id, eids2, assume_unique=False)
#print("eids3[pid=%s]" %(pid), eids3)
if n == 0 and len(eids3):
elements_by_pid[pid] = eids3
n = 1
elif len(eids3):
try:
c = concatenate(elements_by_pid[pid], eids3)
except TypeError:
c = array(list(elements_by_pid[pid]) + list(eids3))
except ValueError:
c = array(list(elements_by_pid[pid]) + list(eids3))
elements_by_pid[pid] = c
else:
continue
try:
t.write_card(bdf_file, size=size, element_ids=eids3)
except TypeError:
print("t.type = %s" % t.type)
raise
del eids3
#for pid, elements in elements_by_pid.items():
#print("pid=%s n=%s" % (pid, len(elements)))
#print elements_by_pid
# missing properties
if pids_set:
pids_list = list(pids_set)
bdf_file.write('$UNASSOCIATED_PROPERTIES\n')
for pid in pids_list:
for prop in ptypes:
if prop.n and pid in prop.property_id:
prop.write_card(bdf_file, size=size, property_ids=[pid])
#.. todo:: finish...
bdf_file.write('$UNASSOCIATED_ELEMENTS\n')
# missing elements...
def write_elements_properties(self, bdf_file, size, is_double, interspersed):
self.elements.write_card(bdf_file, size=size, is_double=is_double,
include_properties=True, interspersed=interspersed)
def _write_aero(self, bdf_file, size=8, is_double=False):
"""Writes the aero cards"""
if self.caeros or self.paeros or self.monitor_points or self.splines:
msg = ['$AERO\n']
for (unused_id, caero) in sorted(self.caeros.items()):
msg.append(caero.write_card(size, is_double))
for (unused_id, paero) in sorted(self.paeros.items()):
msg.append(paero.write_card(size, is_double))
for (unused_id, spline) in sorted(self.splines.items()):
msg.append(spline.write_card(size, is_double))
for monitor_point in self.monitor_points:
msg.append(monitor_point.write_card(size, is_double))
bdf_file.write(''.join(msg))
def _write_aero_control(self, bdf_file, size=8, is_double=False):
"""Writes the aero control surface cards"""
if(self.aecomps or self.aefacts or self.aeparams or self.aelinks or
self.aelists or self.aestats or self.aesurf or self.aesurfs):
msg = ['$AERO CONTROL SURFACES\n']
for unused_id, aelinks in sorted(self.aelinks.items()):
for aelink in aelinks:
msg.append(aelink.write_card(size, is_double))
for unused_id, aecomp in sorted(self.aecomps.items()):
msg.append(aecomp.write_card(size, is_double))
for unused_id, aeparam in sorted(self.aeparams.items()):
msg.append(aeparam.write_card(size, is_double))
for unused_id, aestat in sorted(self.aestats.items()):
msg.append(aestat.write_card(size, is_double))
for unused_id, aelist in sorted(self.aelists.items()):
msg.append(aelist.write_card(size, is_double))
for unused_id, aesurf in sorted(self.aesurf.items()):
msg.append(aesurf.write_card(size, | |
= kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lros_patch201_retry_with_async_header_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 201:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_patch201_retry_with_async_header(
self, product: JSONType = None, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Long running patch request, service returns a 201 to the initial request with async header.
:param product: Product to patch.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 201
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._patch201_retry_with_async_header_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(
lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs
)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _patch202_retry_with_async_and_location_header_initial(
self, product: JSONType = None, **kwargs: Any
) -> JSONType:
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lros_patch202_retry_with_async_and_location_header_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 202:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_patch202_retry_with_async_and_location_header(
self, product: JSONType = None, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Long running patch request, service returns a 202 to the initial request with async and
location header.
:param product: Product to patch.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 202
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._patch202_retry_with_async_and_location_header_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _put201_succeeded_initial(self, product: JSONType = None, **kwargs: Any) -> JSONType:
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lros_put201_succeeded_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def begin_put201_succeeded(self, product: JSONType = None, **kwargs: Any) -> AsyncLROPoller[JSONType]:
"""Long running put request, service returns a 201 to the initial request, with an entity that
contains ProvisioningState=’Succeeded’.
:param product: Product to put.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource | |
"shrug tone 2",
"unicode": "1f937-1f3fc"
},
":shrug_tone3:": {
"category": "people",
"name": "shrug tone 3",
"unicode": "1f937-1f3fd"
},
":shrug_tone4:": {
"category": "people",
"name": "shrug tone 4",
"unicode": "1f937-1f3fe"
},
":shrug_tone5:": {
"category": "people",
"name": "shrug tone 5",
"unicode": "1f937-1f3ff"
},
":signal_strength:": {
"category": "symbols",
"name": "antenna with bars",
"unicode": "1f4f6"
},
":six:": {
"category": "symbols",
"name": "keycap digit six",
"unicode": "0036-20e3",
"unicode_alt": "0036-fe0f-20e3"
},
":six_pointed_star:": {
"category": "symbols",
"name": "six pointed star with middle dot",
"unicode": "1f52f"
},
":ski:": {
"category": "activity",
"name": "ski and ski boot",
"unicode": "1f3bf"
},
":skier:": {
"category": "activity",
"name": "skier",
"unicode": "26f7",
"unicode_alt": "26f7-fe0f"
},
":skull:": {
"category": "people",
"name": "skull",
"unicode": "1f480"
},
":skull_crossbones:": {
"category": "objects",
"name": "skull and crossbones",
"unicode": "2620",
"unicode_alt": "2620-fe0f"
},
":sleeping:": {
"category": "people",
"name": "sleeping face",
"unicode": "1f634"
},
":sleeping_accommodation:": {
"category": "objects",
"name": "sleeping accommodation",
"unicode": "1f6cc"
},
":sleepy:": {
"category": "people",
"name": "sleepy face",
"unicode": "1f62a"
},
":slight_frown:": {
"category": "people",
"name": "slightly frowning face",
"unicode": "1f641"
},
":slight_smile:": {
"category": "people",
"name": "slightly smiling face",
"unicode": "1f642"
},
":slot_machine:": {
"category": "activity",
"name": "slot machine",
"unicode": "1f3b0"
},
":small_blue_diamond:": {
"category": "symbols",
"name": "small blue diamond",
"unicode": "1f539"
},
":small_orange_diamond:": {
"category": "symbols",
"name": "small orange diamond",
"unicode": "1f538"
},
":small_red_triangle:": {
"category": "symbols",
"name": "up-pointing red triangle",
"unicode": "1f53a"
},
":small_red_triangle_down:": {
"category": "symbols",
"name": "down-pointing red triangle",
"unicode": "1f53b"
},
":smile:": {
"category": "people",
"name": "smiling face with open mouth and smiling eyes",
"unicode": "1f604"
},
":smile_cat:": {
"category": "people",
"name": "grinning cat face with smiling eyes",
"unicode": "1f638"
},
":smiley:": {
"category": "people",
"name": "smiling face with open mouth",
"unicode": "1f603"
},
":smiley_cat:": {
"category": "people",
"name": "smiling cat face with open mouth",
"unicode": "1f63a"
},
":smiling_imp:": {
"category": "people",
"name": "smiling face with horns",
"unicode": "1f608"
},
":smirk:": {
"category": "people",
"name": "smirking face",
"unicode": "1f60f"
},
":smirk_cat:": {
"category": "people",
"name": "cat face with wry smile",
"unicode": "1f63c"
},
":smoking:": {
"category": "objects",
"name": "smoking symbol",
"unicode": "1f6ac"
},
":snail:": {
"category": "nature",
"name": "snail",
"unicode": "1f40c"
},
":snake:": {
"category": "nature",
"name": "snake",
"unicode": "1f40d"
},
":sneezing_face:": {
"category": "people",
"name": "sneezing face",
"unicode": "1f927"
},
":snowboarder:": {
"category": "activity",
"name": "snowboarder",
"unicode": "1f3c2"
},
":snowflake:": {
"category": "nature",
"name": "snowflake",
"unicode": "2744",
"unicode_alt": "2744-fe0f"
},
":snowman2:": {
"category": "nature",
"name": "snowman",
"unicode": "2603",
"unicode_alt": "2603-fe0f"
},
":snowman:": {
"category": "nature",
"name": "snowman without snow",
"unicode": "26c4",
"unicode_alt": "26c4-fe0f"
},
":sob:": {
"category": "people",
"name": "loudly crying face",
"unicode": "1f62d"
},
":soccer:": {
"category": "activity",
"name": "soccer ball",
"unicode": "26bd",
"unicode_alt": "26bd-fe0f"
},
":soon:": {
"category": "symbols",
"name": "soon with rightwards arrow above",
"unicode": "1f51c"
},
":sos:": {
"category": "symbols",
"name": "squared sos",
"unicode": "1f198"
},
":sound:": {
"category": "symbols",
"name": "speaker with one sound wave",
"unicode": "1f509"
},
":space_invader:": {
"category": "activity",
"name": "alien monster",
"unicode": "1f47e"
},
":spades:": {
"category": "symbols",
"name": "black spade suit",
"unicode": "2660",
"unicode_alt": "2660-fe0f"
},
":spaghetti:": {
"category": "food",
"name": "spaghetti",
"unicode": "1f35d"
},
":sparkle:": {
"category": "symbols",
"name": "sparkle",
"unicode": "2747",
"unicode_alt": "2747-fe0f"
},
":sparkler:": {
"category": "travel",
"name": "firework sparkler",
"unicode": "1f387"
},
":sparkles:": {
"category": "nature",
"name": "sparkles",
"unicode": "2728"
},
":sparkling_heart:": {
"category": "symbols",
"name": "sparkling heart",
"unicode": "1f496"
},
":speak_no_evil:": {
"category": "nature",
"name": "speak-no-evil monkey",
"unicode": "1f64a"
},
":speaker:": {
"category": "symbols",
"name": "speaker",
"unicode": "1f508"
},
":speaking_head:": {
"category": "people",
"name": "speaking head in silhouette",
"unicode": "1f5e3",
"unicode_alt": "1f5e3-fe0f"
},
":speech_balloon:": {
"category": "symbols",
"name": "speech balloon",
"unicode": "1f4ac"
},
":speech_left:": {
"category": "symbols",
"name": "left speech bubble",
"unicode": "1f5e8",
"unicode_alt": "1f5e8-fe0f"
},
":speedboat:": {
"category": "travel",
"name": "speedboat",
"unicode": "1f6a4"
},
":spider:": {
"category": "nature",
"name": "spider",
"unicode": "1f577",
"unicode_alt": "1f577-fe0f"
},
":spider_web:": {
"category": "nature",
"name": "spider web",
"unicode": "1f578",
"unicode_alt": "1f578-fe0f"
},
":spoon:": {
"category": "food",
"name": "spoon",
"unicode": "1f944"
},
":spy:": {
"category": "people",
"name": "sleuth or spy",
"unicode": "1f575",
"unicode_alt": "1f575-fe0f"
},
":spy_tone1:": {
"category": "people",
"name": "sleuth or spy tone 1",
"unicode": "1f575-1f3fb"
},
":spy_tone2:": {
"category": "people",
"name": "sleuth or spy tone 2",
"unicode": "1f575-1f3fc"
},
":spy_tone3:": {
"category": "people",
"name": "sleuth or spy tone 3",
"unicode": "1f575-1f3fd"
},
":spy_tone4:": {
"category": "people",
"name": "sleuth or spy tone 4",
"unicode": "1f575-1f3fe"
},
":spy_tone5:": {
"category": "people",
"name": "sleuth or spy tone 5",
"unicode": "1f575-1f3ff"
},
":squid:": {
"category": "nature",
"name": "squid",
"unicode": "1f991"
},
":stadium:": {
"category": "travel",
"name": "stadium",
"unicode": "1f3df",
"unicode_alt": "1f3df-fe0f"
},
":star2:": {
"category": "nature",
"name": "glowing star",
"unicode": "1f31f"
},
":star:": {
"category": "nature",
"name": "white medium star",
"unicode": "2b50",
"unicode_alt": "2b50-fe0f"
},
":star_and_crescent:": {
"category": "symbols",
"name": "star and crescent",
"unicode": "262a",
"unicode_alt": "262a-fe0f"
},
":star_of_david:": {
"category": "symbols",
"name": "star of david",
"unicode": "2721",
"unicode_alt": "2721-fe0f"
},
":stars:": {
"category": "travel",
"name": "shooting star",
"unicode": "1f320"
},
":station:": {
"category": "travel",
"name": "station",
"unicode": "1f689"
},
":statue_of_liberty:": {
"category": "travel",
"name": "statue of liberty",
"unicode": "1f5fd"
},
":steam_locomotive:": {
"category": "travel",
"name": "steam locomotive",
"unicode": "1f682"
},
":stew:": {
"category": "food",
"name": "pot of food",
"unicode": "1f372"
},
":stop_button:": {
"category": "symbols",
"name": "black square for stop",
"unicode": "23f9",
"unicode_alt": "23f9-fe0f"
},
":stopwatch:": {
"category": "objects",
"name": "stopwatch",
"unicode": "23f1",
"unicode_alt": "23f1-fe0f"
},
":straight_ruler:": {
"category": "objects",
"name": "straight ruler",
"unicode": "1f4cf"
},
":strawberry:": {
"category": "food",
"name": "strawberry",
"unicode": "1f353"
},
":stuck_out_tongue:": {
"category": "people",
"name": "face with stuck-out tongue",
"unicode": "1f61b"
},
":stuck_out_tongue_closed_eyes:": {
"category": "people",
"name": "face with stuck-out tongue and tightly-closed eyes",
"unicode": "1f61d"
},
":stuck_out_tongue_winking_eye:": {
"category": "people",
"name": "face with stuck-out tongue and winking eye",
"unicode": "1f61c"
},
":stuffed_flatbread:": {
"category": "food",
"name": "stuffed flatbread",
"unicode": "1f959"
},
":sun_with_face:": {
"category": "nature",
"name": "sun with face",
"unicode": "1f31e"
},
":sunflower:": {
"category": "nature",
"name": "sunflower",
"unicode": "1f33b"
},
":sunglasses:": {
"category": "people",
"name": "smiling face with sunglasses",
"unicode": "1f60e"
},
":sunny:": {
"category": "nature",
"name": "black sun with rays",
"unicode": "2600",
"unicode_alt": "2600-fe0f"
},
":sunrise:": {
"category": "travel",
"name": "sunrise",
"unicode": "1f305"
},
":sunrise_over_mountains:": {
"category": "travel",
"name": "sunrise over mountains",
"unicode": "1f304"
},
":surfer:": {
"category": "activity",
"name": "surfer",
"unicode": "1f3c4"
},
":surfer_tone1:": {
"category": "activity",
"name": "surfer tone 1",
"unicode": "1f3c4-1f3fb"
},
":surfer_tone2:": {
"category": "activity",
"name": "surfer tone 2",
"unicode": "1f3c4-1f3fc"
},
":surfer_tone3:": {
"category": "activity",
"name": "surfer tone 3",
"unicode": "1f3c4-1f3fd"
},
":surfer_tone4:": {
"category": "activity",
"name": "surfer tone 4",
"unicode": "1f3c4-1f3fe"
},
":surfer_tone5:": {
"category": "activity",
"name": "surfer tone 5",
"unicode": "1f3c4-1f3ff"
},
":sushi:": {
"category": "food",
"name": "sushi",
"unicode": "1f363"
},
":suspension_railway:": {
"category": "travel",
"name": "suspension railway",
"unicode": "1f69f"
},
":sweat:": {
"category": "people",
"name": "face with cold sweat",
"unicode": "1f613"
},
":sweat_drops:": {
"category": "nature",
"name": "splashing sweat symbol",
"unicode": "1f4a6"
},
":sweat_smile:": {
"category": "people",
"name": "smiling face with open mouth and cold sweat",
"unicode": "1f605"
},
":sweet_potato:": {
"category": "food",
"name": "roasted sweet potato",
"unicode": "1f360"
},
":swimmer:": {
"category": "activity",
"name": "swimmer",
"unicode": "1f3ca"
},
":swimmer_tone1:": {
"category": "activity",
"name": "swimmer tone 1",
"unicode": "1f3ca-1f3fb"
},
":swimmer_tone2:": {
"category": "activity",
"name": "swimmer tone 2",
"unicode": "1f3ca-1f3fc"
},
":swimmer_tone3:": {
"category": "activity",
"name": "swimmer tone 3",
"unicode": "1f3ca-1f3fd"
},
":swimmer_tone4:": {
"category": "activity",
"name": "swimmer tone 4",
"unicode": "1f3ca-1f3fe"
},
":swimmer_tone5:": {
"category": "activity",
"name": "swimmer tone 5",
"unicode": "1f3ca-1f3ff"
},
":symbols:": {
"category": "symbols",
"name": "input symbol for symbols",
"unicode": "1f523"
},
":synagogue:": {
"category": "travel",
"name": "synagogue",
"unicode": "1f54d"
},
":syringe:": {
"category": "objects",
"name": "syringe",
"unicode": "1f489"
},
":taco:": {
"category": "food",
"name": "taco",
"unicode": "1f32e"
},
":tada:": {
"category": "objects",
"name": "party popper",
"unicode": "1f389"
},
":tanabata_tree:": {
"category": "nature",
"name": "tanabata tree",
"unicode": "1f38b"
},
":tangerine:": {
"category": "food",
"name": "tangerine",
"unicode": "1f34a"
},
":taurus:": {
"category": "symbols",
"name": "taurus",
"unicode": "2649",
"unicode_alt": "2649-fe0f"
},
":taxi:": {
"category": "travel",
"name": "taxi",
"unicode": "1f695"
},
":tea:": {
"category": "food",
"name": | |
#
# TODO if this gets slow, use kdtree for neighbor search
# could also use some pre-sorting and heuristics, e.g. keep cells list
# sorted by x,y it's highly unlikely any daughter cell is going to be
# hundreds of pixels away from its parent, so don't need to search the
# whole list and, likely, we ought to do something else anyway if it
# is that far away
no_ancestor_found = cells[cells[:, CELL_ANCESTOR_COL] == -1]
for naf in no_ancestor_found:
x_new = naf[CELL_X_COL]
y_new = naf[CELL_Y_COL]
z_new = naf[CELL_Z_COL]
naf_id = naf[CELL_ID_COL]
min_dist = -1
nearest_ancestor = -1
for cell_id, anc2_id in ancestry.items():
ancestor_found = cells[cells[:, CELL_ID_COL] == cell_id]
if(len(ancestor_found) > 0):
x_old = ancestor_found[0, CELL_X_COL]
y_old = ancestor_found[0, CELL_Y_COL]
z_old = ancestor_found[0, CELL_Z_COL]
distance = ((x_old-x_new)*(x_old-x_new)
+ (y_old-y_new)*(y_old-y_new)
+ (z_old-z_new)*(z_old-z_new))
if((min_dist == -1) | (distance < min_dist)):
min_dist = distance
nearest_ancestor = anc2_id
# now that we've found the nearest neighbor cell with a known
# ancestor, update the ancestry dictionary
ancestry[naf_id] = nearest_ancestor
ancestor_found = cells[cells[:, CELL_ID_COL] == cell_id][0]
ancestor_found[CELL_ANCESTOR_COL] = nearest_ancestor
# probably don't need to do this update since cells is
# about to go out of scope
cells[cells[:, CELL_ID_COL] == cell_id] = ancestor_found
return(ancestry)
# TODO this family of functions really ought to have some responsiblities
# split. Basically, there's filtering which colonies we care about and there's
# determining the area(s) of the relevant colonies. As a motivating example
# think about how separating out the filter responsiblity would ease a new
# use case of 'show me only the live heterotrophs while ignoring the
# cyanobacteria and eps components'
def get_colony_morphology_at_time(time, ancestor_id, ancestors, trajectory,
scale, height, width):
"""
Determine the apparent 2D area of a colony at a specific timestep. A
colony is defined as all cells sharing a common ancestor. The 2D apparent
area is the visible biomass looking from the top down. Every cell from
the colony is projected to the x-y plane and is occulded by any non-colony
colony cells above them.
Internally, this function generates a virtual black and white image of
the projected and occluded colony to determine the apparent area. The
scale, height, and width parameters should be set so that the results
are comparable to any associated micrographs from analagous wet-lab
experiments.
This function may be called on its own, but it is originally intended as
the lowest level component of :func: get_colony_morphologies_at_times.
Parameters
----------
ancestor_id : The numeric id of the common ancestor to all colony members.
ancestors : A dictionary mapping each cell present in the timestep to the
id of its ancestor.
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
time : The numeric timestep of interest.
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
height : The height of the virtual image.
width : The width of the virtual image.
Returns
-------
A three-element list containg the timestep, ancestor id, and apparent 2D
area. Although techinically an ancestor id, the second item can also be
thought of as a colony id.
"""
dprint(f'Getting morphology of colony {ancestor_id} at time {time}')
cells = get_cells(trajectory, time, scale)
mask = np.zeros((height, width, 3), dtype="uint8")
sorted_array = cells[np.argsort(cells[:, CELL_Z_COL])]
for cell in sorted_array:
loc = (int(cell[CELL_X_COL]), int(cell[CELL_Y_COL]))
cell_id = cell[CELL_ID_COL]
seed_id = ancestors[cell_id]
if(seed_id == ancestor_id):
color = (255, 255, 55)
else:
color = (0, 0, 0)
cv2.circle(mask, loc, int(cell[CELL_RADIUS_COL]), color, -1)
# for area, we just count white pixels. no need for cv2
area = np.count_nonzero(mask)
return([time, ancestor_id, area])
# %%
def get_colony_morphologies_at_time(time, ancestors, trajectory, scale, height,
width):
"""
Determine the apparent 2D areas of all colonies at a specific timestep. A
colony is defined as all cells sharing a common ancestor. The 2D apparent
area is the visible biomass looking from the top down. Every cell from
the colony is projected to the x-y plane and is occulded by any non-colony
colony cells above them.
Internally, this function relies on a virtual black and white image of
the projected and occluded colony to determine the apparent area. The
scale, height, and width parameters should be set so that the results
are comparable to any associated micrographs from analagous wet-lab
experiments.
This function may be called on its own, but it is originally intended as
a mid-level component of :func: get_colony_morphologies_at_times.
Parameters
----------
ancestors : A dictionary mapping each cell present in the timestep to the
id of its ancestor.
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
time : The numeric timestep of interest.
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
height : The height of the virtual image.
width : The width of the virtual image.
Returns
-------
A list of three-element lists which describes all colonies present at the
given timestep. Each three-element list contains the timestep, ancestor id,
and apparent 2D area. Although techinically an ancestor id, the second item
can also be thought of as a colony id.
"""
# TODO although it's conceptually pleasing to defer to doing one colony at
# a time, it means there is A LOT of extra total drawing calls. Almost
# certainly more efficient to draw all colonies on one image and count
# the number pixels with the right color code
morphologies = []
for vi, v in enumerate(set(ancestors.values())):
morphologies.append(
get_colony_morphology_at_time(time, v, ancestors, trajectory,
scale, height, width))
return(morphologies)
# %%
def get_colony_morphologies_at_times(times, ancestors, trajectory, scale,
height, width):
"""
Determine the apparent 2D areas of all colonies at the specified times.
A colony is defined as all cells sharing a common ancestor. The 2D apparent
area is the visible biomass looking from the top down. Every cell from
the colony is projected to the x-y plane and is occulded by any non-colony
colony cells above them.
Internally, this function relies on a virtual black and white image of
the projected and occluded colony to determine the apparent area. The
scale, height, and width parameters should be set so that the results
are comparable to any associated micrographs from analagous wet-lab
experiments.
This function is intended as the main entry point to getting all colony
areas over all timesteps of the simulation. Note that it may take a while
to run. The subordinate functions :func: get_colony_morphologies_at_time
and :func: get_colony_morphology_at_time can be called directly and may
be useful for either prototyping/debugging or for when only a subset of
colony areas (such as the areas at the final timestep) are of interest.
Parameters
----------
times : A numeric list of all timesteps of interest.
ancestors : A dictionary mapping each cell present in the timestep to the
id of its ancestor.
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
height : The height of the virtual image.
width : The width of the virtual image.
Returns
-------
A Pandas dataframe which describes all colonies present at the
requested timesteps. Each row contains the the timestep,
ancestor id, apparent 2D area in pixels, and a record of the scaling factor
between pixels and meters. Although techinically an ancestor id,
the second item can also be thought of as a colony id.
We are returning a dataframe, which is unlike the finer grained related
functions for getting | |
<filename>musa/core.py
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from .utils import *
from .datasets.utils import label_parser, label_encoder, tstamps_to_dur
try:
import ahoproc_tools
from ahoproc_tools.io import *
except ImportError:
ahoproc_tools = None
from scipy.io import wavfile
import numpy as np
import tempfile
import struct
import json
import timeit
import os
def train_engine(model, dloader, opt, log_freq, train_fn, train_criterion,
epochs, save_path, model_savename, tr_opts={}, eval_fn=None,
val_dloader=None, eval_stats=None, eval_target=None,
eval_patience=None, cuda=False, va_opts={}, log_writer=None,
opt_scheduler=None):
tr_loss = {}
va_loss = {}
min_va_loss = np.inf
patience=eval_patience
for epoch in range(epochs):
best_model = False
tr_e_loss = train_fn(model, dloader, opt, log_freq, epoch,
criterion=train_criterion,
cuda=cuda, tr_opts=tr_opts.copy(),
log_writer=log_writer)
for k, v in tr_e_loss.items():
if k not in tr_loss:
tr_loss[k] = [v]
else:
tr_loss[k].append(v)
if eval_fn:
if val_dloader is None:
raise ValueError('Train engine: please specify '
'a validation data loader!')
val_scores = eval_fn(model, val_dloader,
epoch, cuda=cuda,
stats=eval_stats,
va_opts=va_opts.copy(),
log_writer=log_writer)
if eval_target:
if eval_patience is None:
raise ValueError('Train engine: Need a patience '
'factor to be specified '
'whem eval_target is given')
for k, v in val_scores.items():
if k not in va_loss:
va_loss[k] = [v]
else:
va_loss[k].append(v)
if opt_scheduler is not None:
opt_scheduler.step(val_scores[eval_target])
# we have a target key to do early stopping upon it
if val_scores[eval_target] < min_va_loss:
print('Val loss improved {:.3f} -> {:.3f}'
''.format(min_va_loss, val_scores[eval_target]))
min_va_loss = val_scores[eval_target]
best_model = True
patience = eval_patience
else:
patience -= 1
print('Val loss did not improve. Curr '
'patience: {}/{}'.format(patience,
eval_patience))
if patience == 0:
print('Out of patience. Ending training.')
break
model.save(save_path, model_savename, epoch,
best_val=best_model)
for k, v in tr_loss.items():
#print('Saving training loss ', k)
np.save(os.path.join(save_path, k), v)
if eval_target:
for k, v in va_loss.items():
#print('Saving val score ', k)
np.save(os.path.join(save_path, k), v)
def synthesize(dur_model, aco_model, spk_id, spk2durstats, spk2acostats,
save_path, out_fname, codebooks, lab_file, ogmios_fmt=True,
cuda=False, force_dur=False, pf=1):
beg_t = timeit.default_timer()
if not force_dur:
dur_model.eval()
aco_model.eval()
lab_parser = label_parser(ogmios_fmt=ogmios_fmt)
lab_enc = label_encoder(codebooks_path=codebooks,
lab_data=None,
force_gen=False)
spk_int = spk_id
with open(lab_file, 'r') as lf:
lab_lines = [l.rstrip() for l in lf.readlines()]
tstamps, parsed_lab = lab_parser(lab_lines)
lab_codes = []
for l_n, lab in enumerate(parsed_lab, start=1):
#print('Encoding[{}]={}'.format(l_n, lab))
code = lab_enc(lab, normalize='znorm', sort_types=False)
#print('code[{}]:{}'.format(l_n, code))
lab_codes.append(code)
lab_codes = np.array(lab_codes, dtype=np.float32)
print('lab_codes tensor shape: ', lab_codes.shape)
# prepare input data
lab_codes = Variable(torch.from_numpy(lab_codes).unsqueeze(0))
lab_codes = lab_codes.transpose(0, 1)
if spk_id is not None:
spk_id = Variable(torch.LongTensor([spk_id] * lab_codes.size(0)))
spk_id = spk_id.view(lab_codes.size(0), 1, 1)
if cuda:
lab_codes = lab_codes.cuda()
if spk_id is not None:
spk_id = spk_id.cuda()
durstats = spk2durstats[spk_int]
if force_dur:
# use durs from lab file
dur = Variable(torch.FloatTensor(tstamps_to_dur(tstamps, True)))
dur = dur.view(-1, 1, 1)
if cuda:
dur = dur.cuda()
# normalize durs
ndurs = (dur - durstats['min']) / \
(durstats['max'] - durstats['min'])
else:
# predict durs
ndurs, _ = dur_model(lab_codes, None, spk_id)
min_dur = durstats['min']
max_dur = durstats['max']
dur = ndurs * min_dur - max_dur + min_dur
# build acoustic batch
aco_inputs = []
# go over time dur by dur
for t in range(ndurs.size(0)):
ndur = np.asscalar(ndurs[t, :, :].cpu().data.numpy())
# go over all windows within this dur
reldur_c = 0.
dur_t = np.asscalar(dur[t, :, :].cpu().data.numpy())
while reldur_c <= dur_t:
n_reldur = float(reldur_c) / dur_t
# every 5ms, shift. TODO: change hardcode to allow speed variation
reldur_c += 0.005
aco_inputs.append(np.concatenate((lab_codes[t, 0, :].cpu().data.numpy(),
np.array([n_reldur, ndur]))))
aco_seqlen = len(aco_inputs)
aco_inputs = Variable(torch.FloatTensor(aco_inputs))
aco_inputs = aco_inputs.view(aco_seqlen, 1, -1)
#print('aco_inputs size: ', aco_inputs.size())
if cuda:
aco_inputs = aco_inputs.cuda()
yt, hstate, ostate = aco_model(aco_inputs,
None, None,
spk_id)
#np.save('synth_aco_inputs.npy', aco_inputs.squeeze(1).cpu().data.numpy())
#np.save('synth_aco_outputs.npy', yt.squeeze(1).cpu().data.numpy())
acostats = spk2acostats[spk_int]
min_aco = acostats['min']
max_aco = acostats['max']
yt_npy = yt.cpu().data.squeeze(1).numpy()
acot = denorm_minmax(yt_npy, min_aco, max_aco)
acot = apply_pf(acot, pf, n_feats=40)
mfcc = acot[:, :40].reshape(-1)
fv = acot[:, -3].reshape(-1)
lf0 = acot[:, -2].reshape(-1)
uv = acot[:, -1].reshape(-1)
uv = np.round(uv)
fv[np.where(uv == 0)] = 1000.0
fv[np.where(fv < 1000)] = 1000.0
lf0[np.where(uv == 0)] = -10000000000.0
assert len(uv) == len(fv), 'uv len {} != ' \
'fv len {}'.format(len(uv),
len(fv))
# write the output ahocoder files
write_aco_file(os.path.join(save_path,
'{}.cc'.format(out_fname), mfcc))
write_aco_file(os.path.join(save_path,
'{}.lf0'.format(out_fname), lf0))
write_aco_file(os.path.join(save_path,
'{}.fv'.format(out_fname), fv))
aco2wav(os.path.join(save_path, out_fname))
end_t = timeit.default_timer()
print('[*] Synthesis completed into file: {}.wav .\n'
'Total elapsed time: {:.4f} s'.format(out_fname,
end_t - beg_t))
def att_synthesize(dur_model, aco_model, spk_id, spk2durstats, spk2acostats,
save_path, out_fname, codebooks, lab_file, ogmios_fmt=True,
cuda=False, force_dur=False, pf=1):
beg_t = timeit.default_timer()
if not force_dur:
dur_model.eval()
aco_model.eval()
lab_parser = label_parser(ogmios_fmt=ogmios_fmt)
lab_enc = label_encoder(codebooks_path=codebooks,
lab_data=None,
force_gen=False)
spk_int = spk_id
with open(lab_file, 'r') as lf:
lab_lines = [l.rstrip() for l in lf.readlines()]
tstamps, parsed_lab = lab_parser(lab_lines)
lab_codes = []
for l_n, lab in enumerate(parsed_lab, start=1):
#print('Encoding[{}]={}'.format(l_n, lab))
code = lab_enc(lab, normalize='minmax', sort_types=False)
#print('code[{}]:{}'.format(l_n, code))
lab_codes.append(code)
lab_codes = np.array(lab_codes, dtype=np.float32)
print('lab_codes tensor shape: ', lab_codes.shape)
# prepare input data
lab_codes = Variable(torch.from_numpy(lab_codes).unsqueeze(0))
lab_codes = lab_codes.transpose(0, 1)
if spk_id is not None:
spk_id = Variable(torch.LongTensor([spk_id] * lab_codes.size(0)))
spk_id = spk_id.view(lab_codes.size(0), 1, 1)
if cuda:
lab_codes = lab_codes.cuda()
if spk_id is not None:
spk_id = spk_id.cuda()
durstats = spk2durstats[spk_int]
if force_dur:
# use durs from lab file
dur = Variable(torch.FloatTensor(tstamps_to_dur(tstamps, True)))
dur = dur.view(-1, 1, 1)
if cuda:
dur = dur.cuda()
# normalize durs
ndurs = (dur - durstats['min']) / \
(durstats['max'] - durstats['min'])
else:
# predict durs
ndurs, _ = dur_model(lab_codes, None, spk_id)
min_dur = durstats['min']
max_dur = durstats['max']
dur = ndurs * min_dur - max_dur + min_dur
# build acoustic batch
aco_inputs = []
# go over time dur by dur
for t in range(ndurs.size(0)):
ndur = np.asscalar(ndurs[t, :, :].cpu().data.numpy())
# go over all windows within this dur
reldur_c = 0.
dur_t = np.asscalar(dur[t, :, :].cpu().data.numpy())
while reldur_c < dur_t:
n_reldur = float(reldur_c) / dur_t
# every 5ms, shift. TODO: change hardcode to allow speed variation
reldur_c += 0.005
aco_inputs.append(np.concatenate((lab_codes[t, 0, :].cpu().data.numpy(),
np.array([n_reldur, ndur]))))
aco_seqlen = len(aco_inputs)
aco_inputs = torch.FloatTensor(aco_inputs)
aco_inputs = aco_inputs.view(aco_seqlen, 1, -1)
if cuda:
aco_inputs = aco_inputs.cuda()
with torch.no_grad():
yt = aco_model(aco_inputs, speaker_idx=spk_id)
print('yt size: ', yt.size())
acostats = spk2acostats[spk_int]
min_aco = acostats['min']
max_aco = acostats['max']
yt_npy = yt.cpu().data.squeeze(1).numpy()
acot = denorm_minmax(yt_npy, min_aco, max_aco)
acot = apply_pf(acot, pf, n_feats=40)
mfcc = acot[:, :40].reshape(-1)
fv = acot[:, -3].reshape(-1)
lf0 = acot[:, -2].reshape(-1)
uv = acot[:, -1].reshape(-1)
uv = np.round(uv)
fv[np.where(uv == 0)] = 1000.0
fv[np.where(fv < 1000)] = 1000.0
lf0[np.where(uv == 0)] = -10000000000.0
assert len(uv) == len(fv), 'uv len {} != ' \
'fv len {}'.format(len(uv),
len(fv))
# write the output ahocoder files
write_aco_file(os.path.join(save_path,
'{}.cc'.format(out_fname)), mfcc)
write_aco_file(os.path.join(save_path,
'{}.lf0'.format(out_fname)), lf0)
write_aco_file(os.path.join(save_path,
'{}.fv'.format(out_fname)), fv)
aco2wav(os.path.join(save_path, out_fname))
end_t = timeit.default_timer()
print('[*] Synthesis completed into file: {}.wav .\n'
'Total elapsed time: {:.4f} s'.format(out_fname,
end_t - beg_t))
def train_aco_epoch(model, dloader, opt, log_freq, epoch_idx,
criterion=None, cuda=False, tr_opts={},
spk2acostats=None, log_writer=None):
# When mulout is True (MO), log_freq is per round, not batch
# note that a round will have N batches
model.train()
global_step = epoch_idx * len(dloader)
# At the moment, acoustic training is always stateful
spk2acostats = None
if 'spk2acostats' in tr_opts:
print('Getting spk2acostats')
spk2acostats = tr_opts.pop('spk2acostats')
idx2spk = None
if 'idx2spk' in tr_opts:
idx2spk = tr_opts.pop('idx2spk')
mulout = False
round_N = 1
if 'mulout' in tr_opts:
print('Multi-Output aco training')
mulout = tr_opts.pop('mulout')
round_N = len(list(idx2spk.keys()))
if idx2spk is None:
raise ValueError('Specify a idx2spk in training opts '
'when using MO.')
assert len(tr_opts) == 0, 'unrecognized params passed in: '\
'{}'.format(tr_opts.keys())
epoch_losses = {}
num_batches = len(dloader)
print('num_batches: ', num_batches)
# keep stateful references by spk idx
spk2hid_states = {}
spk2out_states = {}
if mulout:
# keep track of the losses per round to make a proper log
# when MO is running
spk_loss_batch = {}
for b_idx, batch in enumerate(dloader):
# decompose the batch into the sub-batches
spk_b, lab_b, aco_b, slen_b, ph_b = batch
# build batch of curr_ph to filter out results without sil phones
# size of curr_ph_b [bsize, seqlen]
curr_ph_b = [[ph[2] for ph in ph_s] for ph_s in ph_b]
# convert all into variables and transpose (we want time-major)
spk_b = spk_b.transpose(0,1)
spk_name = idx2spk[spk_b.data[0,0].item()]
lab_b = lab_b.transpose(0,1)
aco_b = aco_b.transpose(0,1)
# get curr batch size
curr_bsz = spk_b.size(1)
if spk_name not in spk2hid_states:
# initialize hidden states for this (hidden and out) speaker
#print('Initializing states of | |
!= top:
[anomalies.append(id) for id in self[p].idx]
elif p == top:
pattern = self[p]
all_idxs = set()
for pat in pattern:
all_idxs.update(pat.idx)
[anomalies.append(ix) for ix in all_idxs if ix not in pattern.idx]
return anomalies
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
eq = len(self) == len(other)
for s, o in zip(self, other):
eq = eq and s == o
return eq and self.global_freq == other.global_freq
def __repr__(self):
vals = ''
for i in self:
vals += str(i)
return '{}({})'.format(self.__class__.__name__, vals)
class RowPatterns(Patterns):
"""Token type tracker for each distinct pattern that appears in the rows
"""
def __init__(self, size_coverage: float = 1):
"""initializes the row object
Parameters
----------
size_coverage : float
the threshold value for the PatternElementSizeMonitor
"""
super(RowPatterns, self).__init__(size_coverage)
def insert(self, row: Iterable[Token]):
"""Inserts a row into the discovered patterns or updates the PatternRow object
Parameters
----------
row : tuple of Tokens
"""
self.global_freq += 1
types = list()
[types.append(i.regex_type) for i in row]
key = Patterns.keygen(types)
if key not in self:
self[key] = SingularRowPattern()
for r in row:
self[key].append(PatternElementSizeMonitor(threshold=self.size_coverage).update(r))
self[key].freq += 1
self[key].idx.add(r.rowidx)
else:
self[key].update(row)
def condense(self):
"""executes the pattern element size monitors and creates final pattern elements that have anomalous values
excluded.
Returns
-------
RowPatterns
"""
for pats in self.values():
for i, s in enumerate(pats):
pats.container[i] = s.load() # load PatternElement from size monitor
return self
def distribution(self):
"""returns each pattern and it's frequency
Returns
-------
dict
"""
freqs = defaultdict(float)
for p in self:
freqs[p] = self[p].freq
return freqs
class ColumnPatterns(Patterns):
"""
Token type tracker for each supported datatype that appears in the same column token position
e.g.
123 BARCLAY AVE, NY === NUM(3) SPACE ALPHA(7) SPACE STREET PUNC(,) SPACE STATE
23 NEWTON ST, OH ====== NUM(2) SPACE ALPHA(6) SPACE STREET PUNC(,) SPACE STATE
ABRA, KADABRA AVE, MN = ALPHA(4) PUNC(,) SPACE ALPHA(7) SPACE STREET PUNC(,) SPACE STATE
for column token position 0, the OpencleanPattern components would be:
{
NUM:
PatternElementSizeMonitor
PatternElementSet{
self.regex_type = NUM
self.size = 3
self.freq = 1
self.values = {123}
self.idx = {0}
},
PatternElementSet{
self.regex_type = NUM
self.size = 2
self.freq = 1
self.values = {23}
self.idx = {1}
}
}
ALPHA:
PatternElementSizeMonitor{
PatternElementSet{
self.regex_type = ALPHA
self.size = 4
self.freq = 1
self.values = {'abra'}
self.idx = {2}
}
}
}
self.global_min = 2
self.global_max = 4
self.global_freq = 3
The insert method adds tokens to the respective PatternElementSizeMonitor. The final PatternElements are
loaded from the SizeMonitors and condensed into a Regex Expression as a RowPatterns
"""
def __init__(self, size_coverage: float = 1):
"""initializes the PatternColumn object
Parameters
----------
size_coverage : float
the threshold value for the PatternElementSizeMonitor
"""
super(ColumnPatterns, self).__init__(size_coverage)
def insert(self, row: Iterable[Token]):
"""insert the row into the respective method
Parameters
----------
row : list of Tokens
the tokens to insert/ use to update the respective PatternColumnElement
"""
self.global_freq += 1
for key, token in enumerate(row):
if not isinstance(token, Token):
raise TypeError("expected: openclean.function.token.base.Token, got: {}".format(token.__class__))
if key not in self:
self[key] = SingularColumnPattern(self.size_coverage)
self[key].update(token)
def condense(self):
"""finds the top element in each column and returns the derived pattern
Returns
-------
RowPatterns
"""
pattern = list()
for ind, col in self.items():
pattern.append(col.top())
types = list()
[types.append(i.element_type) for i in pattern]
key = Patterns.keygen(pattern)
patterns = RowPatterns()
patterns[key] = SingularRowPattern()
# add patterns and idx and freq of pattern
for pat in pattern:
patterns[key].append(pat)
# calculate intersection of all indices as the indices of interest
if len(patterns[key].idx) == 0:
patterns[key].idx = pat.idx
else:
patterns[key].idx = patterns[key].idx.intersection(pat.idx)
patterns[key].freq = len(patterns[key].idx)
# add global freq (total rows)
patterns.global_freq = self.global_freq
return patterns
# -- Pattern element / building blocks / Monitor ----------------------------------------
class PatternElementSet(object):
"""An individual set object to store values, idx and freq of the same type and size
PatternElementSet{
self.regex_type = NUM
self.size = 2
self.freq = 1
self.values = list()
self.idx = set()
},
"""
def __init__(self):
"""init the set"""
self.regex_type = None
self.size = 0
self.values = set()
self.idx = set()
self.freq = 0
def add(self, token: Token):
if self.regex_type is None:
self.regex_type = token.regex_type
self.size = token.size
elif token.regex_type is not self.regex_type:
raise Exception("Incompatible Token used to update PatternElementSet")
self.values.add(token.value)
self.idx.add(token.rowidx)
self.freq += 1
return self
def __hash__(self):
return hash(str(self.regex_type) + str(self.size))
def __iter__(self):
return self
class PatternElementSizeMonitor(defaultdict):
"""Keeps track of all the pattern element sets. A set is one with the same
regex type tokens of the same size. e.g. 4 digit numbers will be one set
and 5 digit numbers another. Only the biggest sets are compiled into a
PatternElemenet. This is done to identify and prevent anomalous values from
being introduced during pattern compilation.
"""
def __init__(self, threshold: float = 1):
"""init the monitor
Parameters
----------
threshold: int (default: 100%)
the proportion of values that is considered non-anomalous. By default,
values of all size will be included in the pattern.
"""
self.default_factory = PatternElementSet
self.freq = 0
self.threshold = threshold
def update(self, token: Token):
"""update the elements in the tracker
Parameters
----------
token: Token
the token object to insert into the monitor
"""
self[token.size].add(token)
self.freq += 1
return self
def load(self):
"""On the tracked sets, perform this pseudocode:
1. get the frequency for each set and sort them
2. starting from the largest, keep adding to the pattern to evolve it
3. stop when 97.5% of the values have been added
Note: if there are multiple sets with the same frequency, to be fair
add them all before stopping even if you're not able to stop at 90%
"""
# create a counter and sort the frequencies
minmax = Counter()
for i in self.values():
minmax[i.size] += i.freq
common = minmax.most_common()
freq_sets = defaultdict(list)
for c in common:
freq_sets[c[1]].append(c[0])
# select sizes to be able to combine 97.5% of the data
covered = 0
sorted_freqs = sorted(freq_sets.keys(), reverse=True)
pe = None
for k in sorted_freqs:
v = freq_sets[k]
for set_id in v:
if pe is None:
pe = PatternElement(self[set_id])
else:
pe.update(self[set_id])
covered += k
if covered / self.freq > self.threshold:
break
return pe
class PatternElement(object):
"""
Element tracker for a single supported datatype that appear in the same column token position
e.g.
231 BARCLAY AVE, NY === NUM(3) SPACE ALPHA(7) SPACE STREET PUNC(,) SPACE STATE
23 NEWTON ST, OH ====== NUM(2) SPACE ALPHA(6) SPACE STREET PUNC(,) SPACE STATE
for column token position 0, the PatternColumnElement object would be:
self.element_type = DIGIT #type of regex element
self.regex = NUMERIC #regex representation
self.len_min = 2 #min len
self.len_max = 3 #max len
self.idx = [0, 1] #list of indices that went into this element. useful to trace mismatches back to rows
self.punc_list = [] #list of punc tokens if this is a PUNCTUATION element
self.partial_regex = 23X #partial regex value
self.partial_ambiguous = False #is partial regex value too ambiguous to be used
self.freq = 2
The update_pattern method keeps adding similar type tokens to PatternColumnElement object and once all the tokens have been
exhausted, the final PatternColumnElement object is condensed by PatternColumn into a Regex Expression for the position
"""
def __init__(self, token: Token = None):
"""initializes the PatternElement and keeps track of numerous stats incrementally as it builds the regexp
Parameters
----------
token : Token
the token used to create this PatternElement object
"""
self.element_type = None # type of regex element
self.len_min = np.inf # min len
self.len_max = -np.inf # max len
self.values = set()
self.idx = set() # list of indices that went into this element. useful to trace mismatches back to rows
self.punc_list = list() # list of punc tokens if this is a PUNCTUATION elemenet
self.partial_regex = None # partial regex value
self.partial_ambiguous = False # is partial regex value too ambiguous to be used
self.freq = 0 # total frequency of tokens seen (useful for element proportions to identify anomalous patterns)
if isinstance(token, Token):
token = PatternElementSet().add(token)
if token is not None:
self.from_set(token) # init PatternElement from | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
We undertake not to change the open source license (MIT license) applicable to the current version of
the project delivered to anyone in the future.
"""
from rest_framework import serializers
from rest_framework.response import Response
from apps.generic import APIViewSet
from apps.iam import ActionEnum, ResourceEnum
from apps.iam.handlers.drf import (
ViewBusinessPermission,
BusinessActionPermission,
InstanceActionPermission,
insert_permission_field,
)
from apps.utils.drf import list_route, detail_route
from apps.log_databus.handlers.storage import StorageHandler
from apps.log_databus.constants import STORAGE_CLUSTER_TYPE
from apps.log_databus.serializers import (
StorageListSerializer,
StorageCreateSerializer,
StorageDetectSerializer,
StorageUpdateSerializer,
StorageBathcDetectSerializer,
StorageRepositorySerlalizer,
)
from apps.log_databus.exceptions import StorageNotExistException, StorageCreateException
from apps.api import BkLogApi
class StorageViewSet(APIViewSet):
lookup_field = "cluster_id"
serializer_class = serializers.Serializer
def get_permissions(self):
if self.action == "create":
return [BusinessActionPermission([ActionEnum.CREATE_ES_SOURCE])]
if self.action == "update":
return [InstanceActionPermission([ActionEnum.MANAGE_ES_SOURCE], ResourceEnum.ES_SOURCE)]
return [ViewBusinessPermission()]
@list_route(methods=["GET"], url_path="cluster_groups")
@insert_permission_field(
actions=[ActionEnum.MANAGE_ES_SOURCE],
resource_meta=ResourceEnum.ES_SOURCE,
id_field=lambda d: d["storage_cluster_id"],
always_allowed=lambda d: d.get("bk_biz_id") == 0,
)
def list_cluster_groups(self, request, *args, **kwargs):
"""
@api {get} /databus/storage/cluster_groups/?bk_biz_id=$bk_biz_id 11_字段提取-存储集群
@apiName list_storage_cluster_groups
@apiGroup 09_StorageCluster
@apiDescription 拉取存储集群列表
@apiParam {Int} bk_biz_id 业务ID
@apiSuccess {Int} storage_cluster_id 存储集群id
@apiSuccess {String} storage_cluster_name 存储集群名称
@apiSuccess {String} storage_type 存储集群类型(固定elasticsearch类型)
@apiSuccess {Object} setup_config 存储设置参数
@apiSuccess {Int} setup_config.retention_days_max 存储设置参数 最大天数
@apiSuccess {Int} setup_config.retention_days_default 存储设置参数 默认天数
@apiSuccess {Int} setup_config.number_of_replicas_max 存储设置参数 最大副本数量
@apiSuccess {Int} setup_config.number_of_replicas_default 存储设置参数 默认副本书
@apiSuccess {Array} admin 管理员
@apiSuccess {String} description 描述
@apiSuccess {Boolean} is_platform 是否是平台存储集群
@apiSuccess {Boolean} enable_archive 是否启用归档
@apiSuccess {Boolean} enable_assessment 是否启用评估
@apiSuccess {Boolean} enable_hot_warm 是否启用冷热集群
@apiSuccess {Int} storage_usage 存储使用量
@apiSuccess {Int} storage_total 存储总理 bytes
@apiSuccess {Int} index_count 索引数量
@apiSuccess {Int} biz_count 业务数量
@apiSuccessExample {json} 成功返回:
{
"result": true,
"data": [
{
"storage_cluster_id": 17,
"storage_cluster_name": "bklog-test",
"storage_version": "6.4.3",
"storage_type": "elasticsearch",
"priority": 0,
"registered_system": "bklog",
"bk_biz_id": 2,
"enable_hot_warm": false,
"setup_config": {
"retention_days_max": 7,
"retention_days_default": 7,
"number_of_replicas_max": 3,
"number_of_replicas_default": 1
},
"admin": [
"xxxxx"
],
"description": "",
"enable_assessment": false,
"enable_archive": false,
"is_platform": false,
"storage_usage": 0,
"storage_total": 0,
"index_count": 0,
"biz_count": 0,
"storage_capacity": 0,
"storage_used": 0,
"permission": {
"manage_es_source": true
}
},
],
"code": 0,
"message": ""
}
"""
data = self.params_valid(StorageListSerializer)
return Response(
StorageHandler().get_cluster_groups_filter(
bk_biz_id=data["bk_biz_id"], enable_archive=data.get("enable_archive", False)
)
)
@insert_permission_field(
id_field=lambda d: d["cluster_config"]["cluster_id"],
actions=[ActionEnum.MANAGE_ES_SOURCE],
resource_meta=ResourceEnum.ES_SOURCE,
always_allowed=lambda d: d.get("bk_biz_id") == 0,
)
def list(self, request, *args, **kwargs):
"""
@api {get} /databus/storage/?bk_biz_id=$bk_biz_id 01_储存集群-列表
@apiName list_storage
@apiGroup 09_StorageCluster
@apiDescription 查询集群列表,此界面不需要分页,is_editable为false时不能编辑
@apiParam {Int} bk_biz_id 业务ID
@apiSuccess {Object} cluster_config 集群配置
@apiSuccess {String} cluster_config.registered_system 注册系统
@apiSuccess {String} cluster_config.domain_name 集群域名
@apiSuccess {String} cluster_config.cluster_name 集群名称
@apiSuccess {int} cluster_config.cluster_id 集群ID
@apiSuccess {Object} cluster_config.custom_option 自定义标签
@apiSuccess {Int} cluster_config.custom_option.bk_biz_id 业务ID
@apiSuccess {int} cluster_config.port 端口
@apiSuccess {String} cluster_type 集群类型
@apiSuccess {Object} auth_info 凭据信息
@apiSuccess {String} auth_info.username 用户
@apiSuccess {String} auth_info.password 密码
@apiSuccess {Bool} is_editable 是否可编辑(为false时不可编辑)
@apiSuccess {Object} cluster_stats 集群状态 连接出错该对象不存在
@apiSuccess {String} cluster_stats.status 集群状况 green yellow red
@apiSuccess {Int} cluster_stats.indices_count 集群索引数量
@apiSuccess {Int} cluster_stats.indices_doc_count 集群文档数量
@apiSuccess {Int} cluster_stats.indices_store 集群存储大小 单位Byte
@apiSuccessExample {json} 成功返回:
{
"result": true,
"data": [
{
"cluster_config": {
"domain_name": "127.0.0.1",
"port": 9200,
"schema": "http",
"is_ssl_verify": false,
"cluster_id": 17,
"cluster_name": "bklog-test",
"version": "6.4.3",
"custom_option": {
"bk_biz_id": 2,
"hot_warm_config": {
"is_enabled": false,
"hot_attr_name": "",
"hot_attr_value": "",
"warm_attr_name": "",
"warm_attr_value": ""
},
"visible_config": {
"visible_type": "current_biz"
},
"admin": [
"admin"
],
"setup_config": {
"retention_days_max": 7,
"retention_days_default": 7,
"number_of_replicas_max": 3,
"number_of_replicas_default": 1
},
"description": "",
"enable_archive": false,
"enable_assessment": false
},
"registered_system": "bklog",
"creator": "admin",
"create_time": "2021-03-04 10:41:32+0800",
"last_modify_user": "admin",
"last_modify_time": "2021-04-30 15:06:43+0800",
"is_default_cluster": false,
"enable_hot_warm": false
},
"cluster_type": "elasticsearch",
"auth_info": {
"password": "",
"username": "elastic"
},
"is_editable": true,
"priority": 0,
"bk_biz_id": 2,
"visible_bk_biz": [],
"permission": {
"manage_es_source": true
}
}
],
"code": 0,
"message": ""
}
"""
data = self.params_valid(StorageListSerializer)
return Response(
StorageHandler().list(bk_biz_id=data["bk_biz_id"], enable_archive=data.get("enable_archive", False))
)
def retrieve(self, request, *args, **kwargs):
"""
@api {get} /databus/storage/$cluster_id/?bk_biz_id=$bk_biz_id 02_存储集群-详情
@apiName retrieve_storage
@apiGroup 09_StorageCluster
@apiDescription 查询集群详情
@apiParam {Int} cluster_id 集群ID
@apiParam {Int} bk_biz_id 业务ID
@apiSuccess {Object} cluster_config 集群配置
@apiSuccess {String} cluster_config.registered_system 注册系统
@apiSuccess {String} cluster_config.domain_name 集群域名
@apiSuccess {String} cluster_config.cluster_name 集群名称
@apiSuccess {int} cluster_config.cluster_id 集群ID
@apiSuccess {Object} cluster_config.custom_option 自定义标签
@apiSuccess {Int} cluster_config.custom_option.bk_biz_id 业务ID
@apiSuccess {int} cluster_config.port 端口
@apiSuccess {String} cluster_type 集群类型
@apiSuccess {Object} auth_info 凭据信息
@apiSuccess {String} auth_info.username 用户
@apiSuccess {String} auth_info.password 密码
@apiSuccess {Bool} is_editable 是否可编辑(为false时不可编辑)
@apiSuccessExample {json} 成功返回:
{
"message": "",
"code": 0,
"data": {
"cluster_config": {
"domain_name": "service.consul",
"port": 9052,
"schema": "https",
"is_ssl_verify": true,
"cluster_id": 1,
"custom_option": {
"bk_biz_id": 5
},
"cluster_name": "default_influx",
"version": ""
},
"cluster_type": "elasticsearch",
"auth_info": {
"password": "<PASSWORD>",
"username": "xxx"
},
"is_editable ": true
},
"result": true
}
"""
data = self.params_valid(StorageListSerializer)
cluster_list = StorageHandler().list(cluster_id=kwargs["cluster_id"], bk_biz_id=data.get("bk_biz_id"))
if not cluster_list:
raise StorageNotExistException()
return Response(cluster_list[0])
def create(self, request, *args, **kwargs):
"""
@api {post} /databus/storage/?bk_biz_id=$bk_biz_id 05_存储集群-创建
@apiName create_storage
@apiGroup 09_StorageCluster
@apiParam {String} cluster_name 集群名称
@apiParam {String} domain_name 集群域名(可以填入IP)
@apiParam {Int} port 端口
@apiParam {String} schema 协议
@apiParam {Object} auth_info 凭据信息
@apiParam {String} auth_info.username 用户
@apiParam {String} auth_info.password 密码
@apiParam {String} source_type 来源类型
@apiParam {Object} visible_config 可见业务配置
@apiParam {string} visible_config.visible_type 可见业务配置类型 current_biz 当前业务,all_biz 全部业务 biz_attr 业务属性multi_biz多个业务
@apiParam {List} [visible_config.visible_bk_biz] multi_biz类型设置该参数
@apiParam {Object} [visible_config.bk_biz_labels] biz_attr 类型设置该参数
@apiSuccess {Object} setup_config 存储设置参数
@apiSuccess {Int} setup_config.retention_days_max 存储设置参数 最大天数
@apiSuccess {Int} setup_config.retention_days_default 存储设置参数 默认天数
@apiSuccess {Int} setup_config.number_of_replicas_max 存储设置参数 最大副本数量
@apiSuccess {Int} setup_config.number_of_replicas_default 存储设置参数 默认副本书
@apiSuccess {List} admin 管理员
@apiSuccess {String} [description] 描述
@apiSuccess {Boolean} enable_archive 是否启用归档
@apiSuccess {Boolean} enable_assessment 是否启用评估
@apiParamExample {Json} 请求参数
{
"cluster_name": "ES集群",
"domain_name": "xxx",
"port": 9200,
"schema": "http",
"auth_info": {
"username": "",
"password": ""
},
"bk_biz_id": 1,
"enable_hot_warm": True,
"visible_config": {
"visible_type": "current_biz"
}
"source_type": "other",
"setup_config": {
"retention_days_max": 7,
"retention_days_default": 7,
"number_of_replicas_max": 3,
"number_of_replicas_default": 1
},
"admin": ["admin"],
"description": "xxxx",
"enable_archive": false,
"enable_assessment": false
}
@apiSuccess {Int} data 集群ID
@apiSuccessExample {json} 成功返回:
{
"result": true,
"data": 18,
"code": 0,
"message": ""
}
"""
data = self.params_valid(StorageCreateSerializer)
connect_result, version_num_str = BkLogApi.connectivity_detect( # pylint: disable=unused-variable
params={
"bk_biz_id": data["bk_biz_id"],
"domain_name": data["domain_name"],
"port": data["port"],
"version_info": True,
"schema": data["schema"],
"es_auth_info": {
"username": data["auth_info"]["username"],
"password": data["auth_info"]["password"],
},
},
)
data.update(
{
"cluster_type": STORAGE_CLUSTER_TYPE,
"custom_option": {
"bk_biz_id": data["bk_biz_id"],
"hot_warm_config": {
"is_enabled": data["enable_hot_warm"],
"hot_attr_name": data["hot_attr_name"],
"hot_attr_value": data["hot_attr_value"],
"warm_attr_name": data["warm_attr_name"],
"warm_attr_value": data["warm_attr_value"],
},
"source_type": data["source_type"],
"visible_config": data["visible_config"],
"setup_config": data["setup_config"],
"admin": data["admin"],
"description": data.get("description", ""),
"enable_archive": data["enable_archive"],
"enable_assessment": data["enable_assessment"],
},
"version": version_num_str,
}
)
return Response(StorageHandler().create(data))
def update(self, request, *args, **kwargs):
"""
@api {put} /databus/storage/$cluster_id/?bk_biz_id=$bk_biz_id 06_存储集群-更新
@apiName update_storage
@apiGroup 09_StorageCluster
@apiParam {String} domain_name 集群域名
@apiParam {Int} port 端口
@apiParam {String} schema 协议
@apiParam {Object} auth_info 凭据信息
@apiParam {String} auth_info.username 用户
@apiParam {String} auth_info.password 密码
@apiParam {String} cluster_name 集群名称
@apiParam {List} [visible_bk_biz] 可见业务范围
@apiParamExample {Json} 请求参数
{
"domain_name": "127.0.0.11",
"port":9200,
"schema": "http",
"auth_info":{
"username": "admin",
"password": "<PASSWORD>"
},
"visible_bk_biz: [1, 2, 3]
}
@apiSuccess {Int} data 集群ID
@apiSuccessExample {json} 成功返回:
{
"result": true,
"data": {
"cluster_config": {
"is_ssl_verify": false,
"registered_system": "log-search-4",
"domain_name": "127.0.0.11",
"cluster_name": "log_cluster11",
"version": "",
"cluster_id": 19,
"custom_option": "{\"bk_biz_id\": \"8\"}",
"port": 9201,
"schema": ""
},
"auth_info": {
"username": "admin",
"password": "<PASSWORD>"
},
"cluster_type": "elasticsearch"
},
"code": 0,
"message": ""
}
"""
data = self.params_valid(StorageUpdateSerializer)
data.update(
{
"custom_option": {
"bk_biz_id": data["bk_biz_id"],
"hot_warm_config": {
"is_enabled": data["enable_hot_warm"],
"hot_attr_name": data["hot_attr_name"],
"hot_attr_value": data["hot_attr_value"],
"warm_attr_name": data["warm_attr_name"],
"warm_attr_value": data["warm_attr_value"],
},
"source_type": data["source_type"],
"visible_config": data["visible_config"],
"setup_config": data["setup_config"],
"admin": data["admin"],
"description": data.get("description", ""),
"enable_archive": data["enable_archive"],
"enable_assessment": data["enable_assessment"],
},
"cluster_id": kwargs["cluster_id"],
}
)
data.pop("description", None)
return Response(StorageHandler(kwargs["cluster_id"]).update(data))
def destroy(self, request, cluster_id):
"""
@api {DELETE} /databus/storage/$cluster_id/?bk_biz_id=$bk_biz_id 06_存储集群-删除
@apiName delete_storage
@apiGroup 09_StorageCluster
@apiParam {Int} cluster_id 集群名称
@apiSuccessExample | |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Cheetah.compat import unicode
from _base_const_pyi import BaseConstPyi
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '3.2.6.post2'
__CHEETAH_versionTuple__ = (3, 2, 6, 'post', 2)
__CHEETAH_genTime__ = 1652553872.8408022
__CHEETAH_genTimestamp__ = 'Sat May 14 14:44:32 2022'
__CHEETAH_src__ = '_const_pyi.tmpl'
__CHEETAH_srcLastModified__ = 'Sat May 14 14:42:54 2022'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class const_pyi(BaseConstPyi):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(const_pyi, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_v = VFN(VFFSL(SL,"self",True),"init_data",False)() # '$self.init_data()' on line 4, col 1
if _v is not None: write(_filter(_v, rawExpr='$self.init_data()')) # from line 4, col 1.
# str or list or tupple
_v = VFN(VFFSL(SL,"self",True),"load_data",False)() # '$self.load_data()' on line 21, col 1
if _v is not None: write(_filter(_v, rawExpr='$self.load_data()')) # from line 21, col 1.
# Main Template
if VFFSL(SL,"write_class",True): # generated from line 23, col 1
indt = " "
else: # generated from line 25, col 1
indt = ""
safe_name = VFN(VFFSL(SL,"self",True),"get_safe_word",False)(VFFSL(SL,"name",True))
fullname = VFFSL(SL,"namespace",True) + '.' + VFFSL(SL,"safe_name",True)
write('''# coding: utf-8
''')
self._handleCheetahInclude("resources/inc_lic.txt", trans=trans, includeFrom="file", raw=True)
write('''# Const
# this is a auto generated file generated by Cheetah
''')
if VFFSL(SL,"libre_office_ver",True): # generated from line 34, col 1
write('''# Libre Office Version: ''')
_v = VFFSL(SL,"libre_office_ver",True) # '$libre_office_ver' on line 35, col 25
if _v is not None: write(_filter(_v, rawExpr='$libre_office_ver')) # from line 35, col 25.
write('''
''')
write('''# Namespace: ''')
_v = VFFSL(SL,"namespace",True) # '$namespace' on line 37, col 14
if _v is not None: write(_filter(_v, rawExpr='$namespace')) # from line 37, col 14.
write('''
from typing_extensions import Literal
''')
if VFFSL(SL,"requires_typing",True): # generated from line 39, col 1
write('''import typing
''')
for imp in VFFSL(SL,"imports",True): # generated from line 42, col 1
write('''import ''')
_v = VFFSL(SL,"imp",True) # '$imp' on line 43, col 8
if _v is not None: write(_filter(_v, rawExpr='$imp')) # from line 43, col 8.
write('''
''')
for frm, imp in VFFSL(SL,"from_imports",True): # generated from line 45, col 1
write('''from ''')
_v = VFFSL(SL,"frm",True) # '$frm' on line 46, col 6
if _v is not None: write(_filter(_v, rawExpr='$frm')) # from line 46, col 6.
write(''' import ''')
_v = VFFSL(SL,"imp",True) # '$imp' on line 46, col 18
if _v is not None: write(_filter(_v, rawExpr='$imp')) # from line 46, col 18.
write('''
''')
for frm, imp in VFFSL(SL,"from_typing_imports",True): # generated from line 48, col 1
write('''from ''')
_v = VFFSL(SL,"frm",True) # '$frm' on line 49, col 6
if _v is not None: write(_filter(_v, rawExpr='$frm')) # from line 49, col 6.
write(''' import ''')
_v = VFFSL(SL,"imp",True) # '$imp' on line 49, col 18
if _v is not None: write(_filter(_v, rawExpr='$imp')) # from line 49, col 18.
write('''
''')
if VFFSL(SL,"write_class",True): # generated from line 51, col 1
write('''
class ''')
_v = VFFSL(SL,"safe_name",True) # '${safe_name}' on line 54, col 7
if _v is not None: write(_filter(_v, rawExpr='${safe_name}')) # from line 54, col 7.
write('''(object):
''')
_v = VFFSL(SL,"indt",True) # '${indt}' on line 56, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 56, col 1.
write('''"""
''')
_v = VFFSL(SL,"indt",True) # '${indt}' on line 57, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 57, col 1.
write('''Const
''')
if isinstance(VFFSL(SL,"desc",True), str): # generated from line 59, col 5
_v = VFFSL(SL,"indt",True) # '${indt}' on line 60, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 60, col 1.
_v = VFFSL(SL,"desc",True) # '${desc}' on line 60, col 8
if _v is not None: write(_filter(_v, rawExpr='${desc}')) # from line 60, col 8.
write('''
''')
else: # generated from line 61, col 5
for line in VFFSL(SL,"desc",True): # generated from line 62, col 9
_v = VFFSL(SL,"indt",True) # '${indt}' on line 63, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 63, col 1.
_v = VFFSL(SL,"line",True) # '${line}' on line 63, col 8
if _v is not None: write(_filter(_v, rawExpr='${line}')) # from line 63, col 8.
write('''
''')
if VFFSL(SL,"link",True): # generated from line 66, col 1
write('''
''')
_v = VFFSL(SL,"indt",True) # '${indt}' on line 68, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 68, col 1.
write('''See Also:
''')
_v = VFFSL(SL,"indt",True) # '${indt}' on line 69, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 69, col 1.
write(''' `API ''')
_v = VFFSL(SL,"name",True) # '$name' on line 69, col 17
if _v is not None: write(_filter(_v, rawExpr='$name')) # from line 69, col 17.
write(''' <''')
_v = VFFSL(SL,"link",True) # '$link' on line 69, col 24
if _v is not None: write(_filter(_v, rawExpr='$link')) # from line 69, col 24.
write('''>`_
''')
_v = VFFSL(SL,"indt",True) # '${indt}' on line 71, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 71, col 1.
write('''"""
''')
for item in VFFSL(SL,"attribs",True): # generated from line 72, col 1
_v = VFFSL(SL,"indt",True) # '${indt}' on line 73, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 73, col 1.
_v = VFN(VFFSL(SL,"self",True),"get_safe_word",False)(VFFSL(SL,"item",True)['name']) # "$self.get_safe_word($item['name'])" on line 73, col 8
if _v is not None: write(_filter(_v, rawExpr="$self.get_safe_word($item['name'])")) # from line 73, col 8.
write(''': ''')
_v = VFN(VFFSL(SL,"self",True),"get_const_type",False)(VFFSL(SL,"item",True)) # '$self.get_const_type($item)' on line 73, col 44
if _v is not None: write(_filter(_v, rawExpr='$self.get_const_type($item)')) # from line 73, col 44.
write('''
''')
if len(VFFSL(SL,"item",True)['lines']) > 0: # generated from line 74, col 5
_v = VFFSL(SL,"indt",True) # '${indt}' on line 75, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 75, col 1.
write('''"""
''')
if isinstance(VFFSL(SL,"item",True)['lines'], str): # generated from line 76, col 9
_v = VFFSL(SL,"indt",True) # '${indt}' on line 77, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 77, col 1.
_v = VFFSL(SL,"item",True)['lines'] # "$item['lines']" on line 77, col 8
if _v is not None: write(_filter(_v, rawExpr="$item['lines']")) # from line 77, col 8.
write('''
''')
else: # generated from line 78, col 9
for line in VFFSL(SL,"item",True)['lines']: # generated from line 79, col 13
_v = VFFSL(SL,"indt",True) # '${indt}' on line 80, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 80, col 1.
_v = VFFSL(SL,"line",True) # '$line' on line 80, col 8
if _v is not None: write(_filter(_v, rawExpr='$line')) # from line 80, col 8.
write('''
''')
_v = VFFSL(SL,"indt",True) # '${indt}' on line 83, col 1
if _v is not None: write(_filter(_v, rawExpr='${indt}')) # from line 83, col 1.
write('''"""
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
hex = False
flags = False
name = ""
namespace = ""
libre_office_ver = False
link = ""
base_class = ""
quote = set()
typings = set()
desc = ""
imports = []
from_imports = []
from_typing_imports = []
requires_typing = False
write_class = False
attribs = []
_mainCheetahMethod_for_const_pyi = 'respond'
## END CLASS DEFINITION
if not hasattr(const_pyi, '_initCheetahAttributes'):
templateAPIClass = getattr(const_pyi,
'_CHEETAH_templateClass',
Template)
templateAPIClass._addCheetahPlumbingCodeToClass(const_pyi)
# CHEETAH | |
FreeMono18pt7bBitmaps = [
0x27, 0x77, 0x77, 0x77, 0x77, 0x22, 0x22, 0x20, 0x00, 0x6F, 0xF6, 0xF1,
0xFE, 0x3F, 0xC7, 0xF8, 0xFF, 0x1E, 0xC3, 0x98, 0x33, 0x06, 0x60, 0xCC,
0x18, 0x04, 0x20, 0x10, 0x80, 0x42, 0x01, 0x08, 0x04, 0x20, 0x10, 0x80,
0x42, 0x01, 0x10, 0x04, 0x41, 0xFF, 0xF0, 0x44, 0x02, 0x10, 0x08, 0x40,
0x21, 0x0F, 0xFF, 0xC2, 0x10, 0x08, 0x40, 0x21, 0x00, 0x84, 0x02, 0x10,
0x08, 0x40, 0x23, 0x00, 0x88, 0x02, 0x20, 0x02, 0x00, 0x10, 0x00, 0x80,
0x1F, 0xA3, 0x07, 0x10, 0x09, 0x00, 0x48, 0x00, 0x40, 0x03, 0x00, 0x0C,
0x00, 0x3C, 0x00, 0x1E, 0x00, 0x18, 0x00, 0x20, 0x01, 0x80, 0x0C, 0x00,
0x70, 0x05, 0xE0, 0xC9, 0xF8, 0x01, 0x00, 0x08, 0x00, 0x40, 0x02, 0x00,
0x10, 0x00, 0x1E, 0x00, 0x42, 0x01, 0x02, 0x02, 0x04, 0x04, 0x08, 0x08,
0x10, 0x08, 0x40, 0x0F, 0x00, 0x00, 0x1E, 0x01, 0xF0, 0x1F, 0x01, 0xE0,
0x0E, 0x00, 0x00, 0x3C, 0x00, 0x86, 0x02, 0x06, 0x04, 0x04, 0x08, 0x08,
0x10, 0x30, 0x10, 0xC0, 0x1E, 0x00, 0x0F, 0xC1, 0x00, 0x20, 0x02, 0x00,
0x20, 0x02, 0x00, 0x10, 0x01, 0x00, 0x08, 0x03, 0xC0, 0x6C, 0x3C, 0x62,
0x82, 0x68, 0x34, 0x81, 0xCC, 0x08, 0x61, 0xC3, 0xE7, 0xFF, 0xFF, 0xF6,
0x66, 0x66, 0x08, 0xC4, 0x62, 0x31, 0x8C, 0xC6, 0x31, 0x8C, 0x63, 0x18,
0xC3, 0x18, 0xC2, 0x18, 0xC3, 0x18, 0x86, 0x10, 0xC2, 0x18, 0xC6, 0x10,
0xC6, 0x31, 0x8C, 0x63, 0x18, 0x8C, 0x62, 0x31, 0x98, 0x80, 0x02, 0x00,
0x10, 0x00, 0x80, 0x04, 0x0C, 0x21, 0x9D, 0x70, 0x1C, 0x00, 0xA0, 0x0D,
0x80, 0xC6, 0x04, 0x10, 0x40, 0x80, 0x01, 0x00, 0x02, 0x00, 0x04, 0x00,
0x08, 0x00, 0x10, 0x00, 0x20, 0x00, 0x40, 0x00, 0x80, 0xFF, 0xFE, 0x02,
0x00, 0x04, 0x00, 0x08, 0x00, 0x10, 0x00, 0x20, 0x00, 0x40, 0x00, 0x80,
0x01, 0x00, 0x3E, 0x78, 0xF3, 0xC7, 0x8E, 0x18, 0x70, 0xC1, 0x80, 0xFF,
0xFE, 0x77, 0xFF, 0xF7, 0x00, 0x00, 0x08, 0x00, 0xC0, 0x04, 0x00, 0x60,
0x02, 0x00, 0x30, 0x01, 0x00, 0x18, 0x00, 0x80, 0x0C, 0x00, 0x40, 0x02,
0x00, 0x20, 0x01, 0x00, 0x10, 0x00, 0x80, 0x08, 0x00, 0x40, 0x04, 0x00,
0x20, 0x02, 0x00, 0x10, 0x01, 0x00, 0x08, 0x00, 0x80, 0x04, 0x00, 0x00,
0x0F, 0x81, 0x82, 0x08, 0x08, 0x80, 0x24, 0x01, 0x60, 0x0E, 0x00, 0x30,
0x01, 0x80, 0x0C, 0x00, 0x60, 0x03, 0x00, 0x18, 0x00, 0xC0, 0x06, 0x00,
0x30, 0x03, 0x40, 0x12, 0x00, 0x88, 0x08, 0x60, 0xC0, 0xF8, 0x00, 0x06,
0x00, 0x70, 0x06, 0x80, 0x64, 0x06, 0x20, 0x31, 0x00, 0x08, 0x00, 0x40,
0x02, 0x00, 0x10, 0x00, 0x80, 0x04, 0x00, 0x20, 0x01, 0x00, 0x08, 0x00,
0x40, 0x02, 0x00, 0x10, 0x00, 0x80, 0x04, 0x0F, 0xFF, 0x80, 0x0F, 0x80,
0xC3, 0x08, 0x04, 0x80, 0x24, 0x00, 0x80, 0x04, 0x00, 0x20, 0x02, 0x00,
0x10, 0x01, 0x00, 0x10, 0x01, 0x80, 0x18, 0x01, 0x80, 0x18, 0x01, 0x80,
0x18, 0x01, 0x80, 0x58, 0x03, 0x80, 0x1F, 0xFF, 0x80, 0x0F, 0xC0, 0xC0,
0x86, 0x01, 0x00, 0x02, 0x00, 0x08, 0x00, 0x20, 0x00, 0x80, 0x04, 0x00,
0x20, 0x0F, 0x00, 0x06, 0x00, 0x04, 0x00, 0x08, 0x00, 0x10, 0x00, 0x40,
0x01, 0x00, 0x04, 0x00, 0x2C, 0x01, 0x9C, 0x0C, 0x0F, 0xC0, 0x01, 0xC0,
0x14, 0x02, 0x40, 0x64, 0x04, 0x40, 0xC4, 0x08, 0x41, 0x84, 0x10, 0x42,
0x04, 0x20, 0x44, 0x04, 0x40, 0x48, 0x04, 0xFF, 0xF0, 0x04, 0x00, 0x40,
0x04, 0x00, 0x40, 0x04, 0x07, 0xF0, 0x3F, 0xF0, 0x80, 0x02, 0x00, 0x08,
0x00, 0x20, 0x00, 0x80, 0x02, 0x00, 0x0B, 0xF0, 0x30, 0x30, 0x00, 0x60,
0x00, 0x80, 0x01, 0x00, 0x04, 0x00, 0x10, 0x00, 0x40, 0x01, 0x00, 0x0E,
0x00, 0x2C, 0x01, 0x0C, 0x18, 0x0F, 0xC0, 0x01, 0xF0, 0x60, 0x18, 0x03,
0x00, 0x20, 0x04, 0x00, 0x40, 0x0C, 0x00, 0x80, 0x08, 0xF8, 0x98, 0x4A,
0x02, 0xE0, 0x3C, 0x01, 0x80, 0x14, 0x01, 0x40, 0x14, 0x03, 0x20, 0x21,
0x0C, 0x0F, 0x80, 0xFF, 0xF8, 0x01, 0x80, 0x18, 0x03, 0x00, 0x20, 0x02,
0x00, 0x20, 0x04, 0x00, 0x40, 0x04, 0x00, 0xC0, 0x08, 0x00, 0x80, 0x18,
0x01, 0x00, 0x10, 0x01, 0x00, 0x30, 0x02, 0x00, 0x20, 0x02, 0x00, 0x0F,
0x81, 0x83, 0x10, 0x05, 0x80, 0x38, 0x00, 0xC0, 0x06, 0x00, 0x30, 0x03,
0x40, 0x11, 0x83, 0x07, 0xF0, 0x60, 0xC4, 0x01, 0x60, 0x0E, 0x00, 0x30,
0x01, 0x80, 0x0E, 0x00, 0xD0, 0x04, 0x60, 0xC1, 0xFC, 0x00, 0x1F, 0x03,
0x08, 0x40, 0x4C, 0x02, 0x80, 0x28, 0x02, 0x80, 0x18, 0x03, 0xC0, 0x74,
0x05, 0x21, 0x91, 0xF1, 0x00, 0x10, 0x03, 0x00, 0x20, 0x02, 0x00, 0x40,
0x0C, 0x01, 0x80, 0x60, 0xF8, 0x00, 0x77, 0xFF, 0xF7, 0x00, 0x00, 0x00,
0x1D, 0xFF, 0xFD, 0xC0, 0x1C, 0x7C, 0xF9, 0xF1, 0xC0, 0x00, 0x00, 0x00,
0x00, 0xF1, 0xE3, 0x8F, 0x1C, 0x38, 0xE1, 0xC3, 0x06, 0x00, 0x00, 0x06,
0x00, 0x18, 0x00, 0xE0, 0x07, 0x00, 0x38, 0x01, 0xC0, 0x06, 0x00, 0x38,
0x00, 0xE0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x18, 0x00, 0x1C, 0x00, 0x0E,
0x00, 0x07, 0x00, 0x03, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x07, 0xFF, 0xFC, 0xC0, 0x00, 0xC0, 0x00, 0xE0, 0x00, 0x70,
0x00, 0x38, 0x00, 0x1C, 0x00, 0x0C, 0x00, 0x0E, 0x00, 0x0E, 0x00, 0x70,
0x03, 0x80, 0x0C, 0x00, 0x70, 0x03, 0x80, 0x1C, 0x00, 0x60, 0x00, 0x3F,
0x8E, 0x0C, 0x80, 0x28, 0x01, 0x80, 0x10, 0x01, 0x00, 0x10, 0x02, 0x00,
0xC0, 0x38, 0x06, 0x00, 0x40, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E,
0x01, 0xF0, 0x1F, 0x00, 0xE0, 0x0F, 0x01, 0x86, 0x08, 0x08, 0x80, 0x24,
0x01, 0x40, 0x0A, 0x00, 0x50, 0x1E, 0x83, 0x14, 0x20, 0xA2, 0x05, 0x10,
0x28, 0x81, 0x46, 0x0A, 0x18, 0x50, 0x3F, 0x80, 0x04, 0x00, 0x10, 0x00,
0x80, 0x02, 0x00, 0x18, 0x18, 0x3F, 0x00, 0x1F, 0xF0, 0x00, 0x06, 0x80,
0x00, 0x34, 0x00, 0x01, 0x30, 0x00, 0x18, 0x80, 0x00, 0x86, 0x00, 0x04,
0x30, 0x00, 0x60, 0x80, 0x02, 0x06, 0x00, 0x10, 0x10, 0x01, 0x80, 0x80,
0x08, 0x06, 0x00, 0x7F, 0xF0, 0x06, 0x00, 0x80, 0x20, 0x06, 0x01, 0x00,
0x10, 0x18, 0x00, 0xC0, 0x80, 0x06, 0x04, 0x00, 0x11, 0xFC, 0x0F, 0xF0,
0xFF, 0xF8, 0x04, 0x01, 0x01, 0x00, 0x20, 0x40, 0x04, 0x10, 0x01, 0x04,
0x00, 0x41, 0x00, 0x10, 0x40, 0x08, 0x10, 0x0C, 0x07, 0xFF, 0x01, 0x00,
0x70, 0x40, 0x06, 0x10, 0x00, 0x84, 0x00, 0x11, 0x00, 0x04, 0x40, 0x01,
0x10, 0x00, 0x44, 0x00, 0x21, 0x00, 0x33, 0xFF, 0xF8, 0x03, 0xF1, 0x06,
0x0E, 0x8C, 0x01, 0xC4, 0x00, 0x64, 0x00, 0x12, 0x00, 0x0A, 0x00, 0x01,
0x00, 0x00, 0x80, 0x00, 0x40, 0x00, 0x20, 0x00, 0x10, 0x00, 0x08, 0x00,
0x04, 0x00, 0x01, 0x00, 0x00, 0x80, 0x00, 0x20, 0x01, 0x88, 0x01, 0x83,
0x03, 0x80, 0x7E, 0x00, 0xFF, 0xE0, 0x20, 0x18, 0x20, 0x0C, 0x20, 0x04,
0x20, 0x02, 0x20, 0x02, 0x20, 0x01, 0x20, 0x01, 0x20, 0x01, 0x20, 0x01,
0x20, 0x01, 0x20, 0x01, 0x20, 0x01, 0x20, 0x01, 0x20, 0x02, 0x20, 0x02,
0x20, 0x04, 0x20, 0x0C, 0x20, 0x18, 0xFF, 0xE0, 0xFF, 0xFF, 0x08, 0x00,
0x84, 0x00, 0x42, 0x00, 0x21, 0x00, 0x10, 0x80, 0x00, 0x40, 0x00, 0x20,
0x40, 0x10, 0x20, 0x0F, 0xF0, 0x04, 0x08, 0x02, 0x04, 0x01, 0x00, 0x00,
0x80, 0x00, 0x40, 0x02, 0x20, 0x01, 0x10, 0x00, 0x88, 0x00, 0x44, 0x00,
0x3F, 0xFF, 0xF0, 0xFF, 0xFF, 0x88, 0x00, 0x44, 0x00, 0x22, 0x00, 0x11,
0x00, 0x08, 0x80, 0x00, 0x40, 0x00, 0x20, 0x40, 0x10, 0x20, 0x0F, 0xF0,
0x04, 0x08, 0x02, 0x04, 0x01, 0x00, 0x00, 0x80, 0x00, 0x40, 0x00, 0x20,
0x00, 0x10, 0x00, 0x08, 0x00, 0x04, 0x00, 0x1F, 0xF8, 0x00, 0x03, 0xF9,
0x06, 0x07, 0x84, 0x00, 0xC4, 0x00, 0x24, 0x00, 0x12, 0x00, 0x02, 0x00,
0x01, 0x00, 0x00, 0x80, 0x00, 0x40, 0x00, 0x20, 0x00, 0x10, 0x0F, 0xF8,
0x00, 0x14, 0x00, 0x09, 0x00, 0x04, 0x80, 0x02, 0x20, 0x01, 0x18, 0x00,
0x83, 0x01, 0xC0, 0x7F, 0x00, 0xFC, 0x3F, 0x20, 0x04, 0x20, 0x04, 0x20,
0x04, 0x20, 0x04, 0x20, 0x04, 0x20, 0x04, 0x20, 0x04, 0x20, 0x04, 0x3F,
0xFC, 0x20, 0x04, 0x20, 0x04, 0x20, 0x04, 0x20, 0x04, 0x20, 0x04, 0x20,
0x04, 0x20, 0x04, 0x20, 0x04, 0x20, 0x04, 0xFC, 0x3F, 0xFF, 0xF8, 0x10,
0x00, 0x80, 0x04, 0x00, 0x20, 0x01, 0x00, 0x08, 0x00, 0x40, 0x02, 0x00,
0x10, 0x00, 0x80, 0x04, 0x00, 0x20, 0x01, 0x00, 0x08, 0x00, 0x40, 0x02,
0x00, 0x10, 0x00, 0x81, 0xFF, 0xF0, 0x03, 0xFF, 0x80, 0x04, 0x00, 0x02,
0x00, 0x01, 0x00, 0x00, 0x80, 0x00, 0x40, 0x00, 0x20, 0x00, 0x10, 0x00,
0x08, 0x00, 0x04, 0x00, 0x02, 0x10, 0x01, 0x08, 0x00, 0x84, 0x00, 0x42,
0x00, 0x21, 0x00, 0x10, 0x80, 0x10, 0x20, 0x18, 0x0C, 0x18, 0x01, 0xF0,
0x00, 0xFF, 0x1F, 0x84, 0x01, 0x81, 0x00, 0xC0, 0x40, 0x60, 0x10, 0x30,
0x04, 0x18, 0x01, 0x0C, 0x00, | |
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# build index of pali words into JavaScript Array variables
import os, sys
import shutil
import xml.dom.minidom
import json
import urllib
prefix_code = {
# "°" : "uc",
# "-" : "dash",
"a" : "a",
"ā" : "aa",
"b" : "b",
"c" : "c",
"d" : "d",
"ḍ" : "dotd",
"e" : "e",
"g" : "g",
"h" : "h",
"i" : "i",
"ī" : "ii",
"j" : "j",
"k" : "k",
"l" : "l",
"ḷ" : "dotl",
"m" : "m",
# "ṃ" : "dotm",
"n" : "n",
"ñ" : "tilden",
# "ṇ" : "dotn",
# "ṅ" : "ndot",
# "ŋ" : "ngng",
"o" : "o",
"p" : "p",
"r" : "r",
"s" : "s",
"t" : "t",
"ṭ" : "dott",
"u" : "u",
"ū" : "uu",
"v" : "v",
"y" : "y",
}
def usage():
print("Usage:")
print("$ python index.py index")
print("$ python index.py group")
print("$ python index.py stats")
print("$ python index.py cpdir")
print("$ python index.py js")
def buildJSONIndex(xmlDir, savedName):
wordCount = {}
wordCount['all'] = 0
trueWordCount = {}
trueWordCount['all'] = 0
dicPrefixWordLists = {}
# get words start with prefix 'key'
for key in prefix_code.keys():
# get all the files under folder name 'key'
fileNames = os.listdir(xmlDir + key)
wordCount[key] = 0
trueWordCount[key] = 0
dicPrefixWordLists[key] = []
# iterate all words start with prefix 'key'
for fileName in fileNames:
wordCount['all'] += 1
wordCount[key] += 1
fileName = unicode(fileName.decode('utf8'))
filaPath = xmlDir + key + '/' + fileName.encode('utf-8')
fileData = open(filaPath.decode('utf-8'), 'r').read()
# parse the xml data in the file
dom = xml.dom.minidom.parseString(fileData)
# iterate all the 'word' tag inside a xml file
words = dom.getElementsByTagName('word')
for word in words:
wordStr = word.childNodes[0].data
# FIXME: is lower() safe here?
if (wordStr.lower() == fileName[:-4]):
# This is a "true" word
dicPrefixWordLists[key].append(fileName[:-4])
trueWordCount['all'] += 1
trueWordCount[key] += 1
break
# sort the words
for key in dicPrefixWordLists.keys():
dicPrefixWordLists[key].sort()
# dicPrefixWordLists = {
# "a" : [ ... ]
# "ā" : [ ... ],
# "b" : [ ... ],
# "c" : [ ... ],
# ...
# }
# save the indexes in JSON-format to file
fd = open(savedName, "w")
fd.write(json.dumps(dicPrefixWordLists))
fd.close()
# print statistics
for key in prefix_code.keys():
print('word count %s: %d' % (key, wordCount[key]))
print('true word count %s: %d' % (key, trueWordCount[key]))
print('word count all: %d' % wordCount['all'])
print('true word count all: %d' % trueWordCount['all'])
def stats(savedName, groupedSavedName):
# dicPrefixWordLists = {
# "a" : [ ... ]
# "ā" : [ ... ],
# "b" : [ ... ],
# "c" : [ ... ],
# ...
# }
dicPrefixWordLists = json.loads(open(savedName).read())
# print counts of words under each prefix
allCount = 0
for key in dicPrefixWordLists.keys():
print('# of %s words: %d' %(key, len(dicPrefixWordLists[key])) )
allCount += len(dicPrefixWordLists[key])
print('all words count: %d' % allCount)
raw_input('press Enter...')
os.system('clear')
# print words under each 'prefix' belongs to which version
groupInfo = json.loads(open(groupedSavedName).read())
for key in groupInfo['version'].keys():
print('%s belongs to version #%d' % (key, groupInfo['version'][key]))
raw_input('press Enter...')
os.system('clear')
showRecursiveVariable(groupInfo['dir'])
def buildWordsGroup(savedName, groupedSavedName, debug=False):
# dicPrefixWordLists = {
# "a" : [ ... ]
# "ā" : [ ... ],
# "b" : [ ... ],
# "c" : [ ... ],
# ...
# }
dicPrefixWordLists = json.loads(open(savedName).read())
groupInfo = {}
# step 1:
# GAE allows at most 10,000 files for each version
versionLimitCount = 9900
groupInfo['version'] = {}
groupIndex = 0
wordCount = 0
for prefix in dicPrefixWordLists.keys():
prefixWordCount = len(dicPrefixWordLists[prefix])
if (prefixWordCount > versionLimitCount):
raise Exception("%s too large: %d" % (prefix, prefixWordCount))
if (prefixWordCount + wordCount < versionLimitCount):
wordCount += prefixWordCount
groupInfo['version'][prefix] = groupIndex
else:
wordCount = prefixWordCount
groupIndex += 1
groupInfo['version'][prefix] = groupIndex
# step 2:
# GAE allows at most 1,000 files for each directory
dirCountLimit = 995
for prefix in dicPrefixWordLists.keys():
prefixWordCount = len(dicPrefixWordLists[prefix])
if (prefixWordCount > dirCountLimit):
if debug:
print('%s # %d (too large)' %(prefix, prefixWordCount))
tmpObj = groupByPrefixUnderCountLimit(dicPrefixWordLists[prefix], dirCountLimit, 2)
del dicPrefixWordLists[prefix]
dicPrefixWordLists[prefix] = tmpObj
else:
if debug:
print('%s # %d' %(prefix, prefixWordCount))
if debug:
raw_input('press Enter...')
os.system('clear')
# show re-grouped dicPrefixWordLists
showRecursiveVariable(dicPrefixWordLists)
groupInfo['dir'] = dicPrefixWordLists
# step 3:
# save the grouped variable
# example:
# groupInfo = {
# 'version' : {
# 'a' : 0,
# 'b' : 0,
# ...
# },
#
# 'dir': {
# 'a' : {},
# 'b' : [],
# ...
# }
# }
fd = open(groupedSavedName, "w")
fd.write(json.dumps(groupInfo))
fd.close()
def showRecursiveVariable(var, space=1):
if type(var) is type([]):
print(': %d' % len(var))
elif type(var) is type({}):
for key in var.keys():
if type(var[key]) is type([]):
sys.stdout.write(' '*space + '%s ' % key)
else:
print(' '*space + '%s + (over limit, break)' % key)
showRecursiveVariable(var[key], space + 1)
else:
raise Exception('only [] or {} is allowed!')
def groupByPrefixUnderCountLimit(wordsArray, countLimit, digit, debug=False):
group = {}
# group by first 'digit'-letter
for word in wordsArray:
prefix = word[:digit]
if prefix in group:
group[prefix].append(word)
else:
group[prefix] = []
group[prefix].append(word)
# check if the length of array > countLimit
for prefix in group:
prefixWordCount = len(group[prefix])
if (prefixWordCount > countLimit):
# still > countLimit, recursively call self
if debug:
print('%s # %d (too large)' %(prefix, prefixWordCount))
tmpObj = groupByPrefixUnderCountLimit(group[prefix], countLimit, digit + 1, debug)
del group[prefix]
group[prefix] = tmpObj
else:
if debug:
print('%s # %d' %(prefix, prefixWordCount))
if debug:
raw_input('press Enter...')
os.system('clear')
return group
def buildJSONDeployDir(xmlDir, dpDirName, groupedSavedName):
if os.path.exists(dpDirName):
# remove all dirs and sub-dirs
shutil.rmtree(dpDirName)
# load pre-built indexes
groupInfo = json.loads(open(groupedSavedName).read())
versionInfo = groupInfo['version']
# example:
# versionInfo = {
# 'a' : 0,
# 'b' : 0,
# 'c' : 1,
# 'd' : 2
# }
# ==>
# versions = {
# 0 : ['a', 'b'],
# 1 : ['c'],
# 2 : ['d']
# }
versions = {}
for prefix in versionInfo.keys():
if versionInfo[prefix] in versions:
versions[versionInfo[prefix]].append(prefix)
else:
versions[versionInfo[prefix]] = []
versions[versionInfo[prefix]].append(prefix)
for version in versions.keys():
print('version %d:' % version)
print(versions[version])
dirInfo = groupInfo['dir']
# iterate each version in all versions
for version in versions:
# destination directory of each version
versionDir = dpDirName + 'version%d/' % version
print(versionDir)
# iterate all prefixes in each version
for prefix in versions[version]:
# source directory of words start with 'prefix'
srcDir = xmlDir + prefix + '/'
if not os.path.exists(srcDir):
raise Exception('%s does not exist!' % srcDir)
print(srcDir)
count = iterateAllWordsInRecursiveVariable(dirInfo[prefix], prefix, versionDir, srcDir)
print('%d' % count)
# generate app.yaml for each version
fd = open(versionDir + 'app.yaml', "w")
fd.write('application: palidictionary\n')
fd.write('version: json%d\n' % version)
fd.write('runtime: python27\n')
fd.write('api_version: 1\n')
fd.write('threadsafe: true\n')
fd.write('\n')
fd.write('handlers:\n')
fd.write('- url: /json\n')
fd.write(' static_dir: json\n')
fd.write(' mime_type: application/json\n')
fd.write(' http_headers:\n')
fd.write(' Access-Control-Allow-Origin: "*"\n')
fd.close()
def iterateAllWordsInRecursiveVariable(var, prefix, versionDir, srcDir):
wordCount = 0
if type(var) is type([]):
for word in var:
srcFile = srcDir + word + '.xml'
if not os.path.exists(srcFile):
raise Exception('%s does not exist!' % srcFile)
dstFile = versionDir + 'json/' + urllib.quote(prefix.encode('utf-8') + '/' + word.encode('utf-8') + '.json').replace('%', 'Z')
if not os.path.exists(os.path.dirname(dstFile)):
os.makedirs(os.path.dirname(dstFile))
dstFd = open(dstFile, 'w')
dstFd.write(json.dumps(xmlToJson(word, open(srcFile).read())))
dstFd.close()
wordCount += 1
elif type(var) is type({}):
for key in var.keys():
wordCount += iterateAllWordsInRecursiveVariable(var[key], prefix + '/' + key, versionDir, srcDir)
else:
raise Exception('only [] or {} is allowed!')
return wordCount
def xmlToJson(word, xmlFiledata):
jsonData = {}
jsonData['word'] = word
jsonData['data'] = decodeXML(xmlFiledata)
return jsonData
def decodeXML(xmlFiledata):
dom = xml.dom.minidom.parseString(xmlFiledata)
items = dom.getElementsByTagName("item")
result = []
for item in items:
dictstr, wordstr, explainstr = decodeItem(item)
result.append((dictstr, wordstr, explainstr))
# return valus is "list of 3-tuples"
return result
def decodeItem(item):
dict = item.getElementsByTagName("dict")[0]
word = item.getElementsByTagName("word")[0]
explain = item.getElementsByTagName("explain")[0]
dictstr = dict.childNodes[0].data
wordstr = word.childNodes[0].data
explainstr = HexStringToString(explain.childNodes[0].data)
return dictstr, wordstr, explainstr
def HexStringToString(hexString):
# convert hex string to utf8 string
# example: "%2c%e3%80" -> "\x2C\xE3\x80"
bytes = []
hexStr = ''.join( hexString.split("%") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
# decode as utf8
try:
string = ''.join( bytes ).decode("utf-8")
except UnicodeDecodeError:
string = u"Sorry! Something wrong with the database. We cannot get explain of this word in this dictionary."
return string
def | |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.242508,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.393166,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.18677,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.65899,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.14113,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.654471,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.45459,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.469435,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 8.01852,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.224206,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0238889,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.268533,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.176673,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.492739,
'Execution Unit/Register Files/Runtime Dynamic': 0.200562,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.715324,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.64045,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.09415,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00124821,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00124821,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00108166,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000415701,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00253792,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.006116,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0121654,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.16984,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.373073,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.576854,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.13805,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.236658,
'L2/Runtime Dynamic': 0.0598803,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.93936,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.93155,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.119776,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.119776,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 5.50727,
'Load Store Unit/Runtime Dynamic': 2.64202,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.295348,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.590696,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.10482,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.108361,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0612,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.739707,
'Memory Management Unit/Runtime Dynamic': 0.169561,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.0326,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.782203,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0431096,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.332547,
'Renaming Unit/Int Front End RAT/Subthreshold | |
import re
import codecs
import os
import shutil
import tempfile
import sys
import zipfile
import base64
import StringIO
import urllib
"""
uglifyjs is a python wrapper for uglifyjs.
See e.g. https://bitbucket.org/chrisk/uglifyjs
It can be any javascript minifyer. The required interface is:
def minify(inpath, outpath, encoding="utf_8"):
Minify input path to outputpath, optionally using encoding
def minify_in_place(path, encoding="utf_8"):
Minify path and write it to to the same location. Optionally use
encoding
"""
try:
import uglifyjs as jsminify
except ImportError:
import jsminify
_text_exts = (".js", ".html", ".xml", ".css")
_directive_exts = (".xml", ".html", ".xhtml") # files that may have <!-- command.. directives
_keyword_exts = (".css", ".js", ".xml", ".html", ".xhtml", ".txt") # files we will try to do keyword interpolation on
_license_exts = (".js", ".css") # extensions that should get a license
_img_exts = (".png", ".jpg", ".gif")
_script_ele = u"<script src=\"%s\"/>\n"
_style_ele = u"<link rel=\"stylesheet\" href=\"%s\"/>\n"
_base_url = u"<base href=\"%s\" />\n"
_re_command = re.compile("""\s?<!--\s+command\s+(?P<command>\w+)\s+"?(?P<target>.*?)"?\s*(?:if\s+(?P<neg>not)?\s*(?P<cond>\S+?))?\s*-->""")
_re_comment = re.compile("""\s*<!--.*-->\s*""")
_re_script = re.compile("\s?<script +src=\"(?P<src>[^\"]*)\"")
_re_css = re.compile("\s?<link +rel=\"stylesheet\" +href=\"(?P<href>[^\"]*)\"/>")
_re_condition = re.compile("\s+if\s+(not)? (.*)")
_concatcomment =u"""
/* dfbuild: concatenated from: %s */
"""
def _process_directive_files(dirpath, vars):
for base, dirs, files in os.walk(dirpath, topdown=True):
for file in [ os.path.join(dirpath, base, f) for f in files if f.endswith(_directive_exts) ]:
_process_directives(dirpath, file, vars)
def _process_directives(root, filepath, vars):
"""
Process all directives in the file filepath. The root dir is in root
TODO: Refactor this to use separate functions for each directive and
just pass in a context for it to keep stuff in.
"""
file = open(filepath)
tmpfd, tmppath = tempfile.mkstemp(".tmp", "dfbuild.")
tmpfile = os.fdopen(tmpfd, "w")
known_files = {}
current_css_file = None
current_js_file = None
for line in file:
match_cmd = _re_command.search(line)
match_css = _re_css.search(line)
match_js = _re_script.search(line)
match_comment = _re_comment.search(line)
if match_cmd:
cmd, target, neg, cond = match_cmd.groups()
if cond: # check if this directive is conditional
c = bool(cond in vars and vars[cond])
if neg:
c = not c
if not c: # the condition was not met, skip rule
continue
# at this point the rule will be honoured
if cmd == "concat_css":
if target in ["off", "false", "no"]:
current_css_file = None
elif target in known_files:
current_css_file = target
else:
known_files[target] = []
current_css_file = target
tmpfile.write(_style_ele % target)
continue
elif cmd == "concat_js":
if target in ["off", "false", "no"]:
current_js_file = None
elif target in known_files:
current_js_file = target
else:
known_files[target] = []
current_js_file = target
tmpfile.write(_script_ele % target)
continue
elif cmd == "set_rel_base_url" and \
vars.has_key("base_url") and vars["base_url"]:
tmpfile.write(_base_url % vars["base_url"])
continue
else: # some other unknown command! Let fall through so line is written
pass
elif match_comment:
continue
elif match_css:
if current_css_file:
known_files[current_css_file].append(match_css.group("href"))
continue
elif match_js:
if current_js_file:
known_files[current_js_file].append(match_js.group("src"))
#fixme: The following continue should have been on the same level as this comment. However, that causes lang files to be included. must fix
continue
elif line.isspace():
continue
tmpfile.write(line)
tmpfile.close()
# write back the temp stuff, which is now the authoritative stuff:
shutil.copy(tmppath, filepath)
os.unlink(tmppath)
for outfile, contentfiles in known_files.items():
outpath = os.path.join(root, outfile)
outdir = os.path.dirname(outpath)
if not os.path.isdir(outdir):
os.makedirs(outdir)
fout_path = os.path.join(root, outfile)
fout = codecs.open(fout_path, "w", encoding="utf_8_sig")
for infile in contentfiles:
fout.write(_concatcomment % infile)
fin = codecs.open(os.path.join(root, infile), "r", encoding="utf_8_sig")
fout.write(fin.read())
fin.close()
os.unlink(os.path.join(root, infile))
fout.close()
def _clean_dir(root, exclude_dirs, exclude_files):
"""
Remove anything in either of the blacklists, then remove all empty
directories under root and its children
"""
exclude_files = [os.path.normpath(os.path.join(root, f)) for f in exclude_files ]
exclude_dirs = [os.path.normpath(os.path.join(root, d)) for d in exclude_dirs ]
# first pass, remove blacklisted files
for base, dirs, files in os.walk(root, topdown=True):
if base in exclude_dirs:
shutil.rmtree(base)
continue
for file in files:
absfile = os.path.abspath(os.path.join(base, file))
if absfile in exclude_files and os.path.isfile(absfile):
os.unlink(absfile)
# second pass, remove empty dirs
for base, dirs, files in os.walk(root, topdown=True):
if not dirs and not files:
os.rmdir(base)
def _add_license(root, license_path="include-license.txt"):
"""
Read a license from license_path and append it to all files under root
whose extension is in _license_exts.
"""
if not os.path.isfile(license_path):
return
lfile = codecs.open(license_path, "r", encoding="utf_8_sig")
license = lfile.read()
lfile.close()
license_files = []
for base, dirs, files in os.walk(root):
license_files.extend( [ os.path.join(base, f) for f in files if f.endswith(_license_exts)] )
for f in license_files:
source = codecs.open(f, "r", encoding="utf_8_sig")
tmpfd, tmppath = tempfile.mkstemp(".tmp", "dfbuild.")
tmpfile = os.fdopen(tmpfd, "w")
wrapped = codecs.getwriter("utf_8_sig")(tmpfile)
wrapped.write(license)
wrapped.write("\n")
wrapped.write(source.read())
source.close()
tmpfile.close()
shutil.copy(tmppath, f)
os.unlink(tmppath)
def _add_keywords(root, keywords):
"""
Do keyword replacement on all files in and under root which has an
extension in _keyword_exts. keywords is a dictionary, the key will be
replaced with the value.
"""
keyword_files = []
for base, dirs, files in os.walk(root):
keyword_files.extend( [ os.path.join(base, f) for f in files if f.endswith(_keyword_exts)] )
for f in keyword_files:
source = codecs.open(f, "r", encoding="utf_8_sig")
tmpfd, tmppath = tempfile.mkstemp(".tmp", "dfbuild.")
tmpfile = os.fdopen(tmpfd, "w")
wrapped = codecs.getwriter("utf_8_sig")(tmpfile)
for line in source:
for key, val in keywords.items():
line = line.replace(key, val)
wrapped.write(line)
source.close()
tmpfile.close()
shutil.copy(tmppath, f)
os.unlink(tmppath)
def _is_utf8(path):
"""Check if file at path is utf8. Note that this only checks for a
utf8 BOM, nothing more
"""
if not os.path.isfile(path): return None
f = open(path, "rb")
return "test-scripts" in path and True or f.read(3) == codecs.BOM_UTF8
def _minify_buildout(src):
"""
Run minification on all javascript files in directory src. Minification
is done in-place, so the original file is replaced with the minified one.
"""
for base, dirs, files in os.walk(src):
for file in [f for f in files if f.endswith(".js")]:
abs = os.path.join(base, file)
jsminify.minify_in_place(abs)
def _localize_buildout(src, langdir):
"""Make a localized version of the build dir. That is, with one
script.js for each language, with a prefix suffix for each language
src: directory containing the finished build
language: dir containing language files. NOT in build dir!
Note, this function knows much more than it should about the structure
of the build. The whole thing should possibly be refactored :(
"""
tmpfiles = []
scriptpath = os.path.normpath(os.path.join(src, "script/dragonfly.js"))
fp = codecs.open(scriptpath, "r", encoding="utf_8_sig")
script_data = fp.read()
fp.close()
clientpath = os.path.normpath(os.path.join(src, "client-en.xml"))
fp = codecs.open(clientpath, "r", encoding="utf_8_sig")
clientdata = fp.read()
fp.close()
# Grab all english data. Will be put in front of localized strings so
# there are fallbacks
englishfile = os.path.join(langdir, "ui_strings-en.js")
fp = codecs.open(englishfile, "r", encoding="utf_8_sig")
englishdata = fp.read()
fp.close()
langnames = [f for f in os.listdir(langdir) if f.startswith("ui_strings-") and f.endswith(".js") ]
langnames = [f.replace("ui_strings-", "").replace(".js", "") for f in langnames]
for lang, newscriptpath, newclientpath, path in [ (ln, "script/dragonfly-"+ln+".js", "client-"+ln+".xml", os.path.join(langdir, "ui_strings-"+ln+".js")) for ln in langnames ]:
newscript = codecs.open(os.path.join(src,newscriptpath), "w", encoding="utf_8_sig")
newclient = codecs.open(os.path.join(src, newclientpath), "w", encoding="utf_8_sig")
if not options.minify:
newscript.write(_concatcomment % englishfile)
newscript.write(englishdata)
langfile = codecs.open(path, "r", encoding="utf_8_sig")
if not options.minify:
newscript.write(_concatcomment % path)
newscript.write(langfile.read())
newscript.write(script_data)
newclient.write(clientdata.replace("dragonfly.js", "dragonfly" + "-" + lang +".js"))
newclient.close()
langfile.close()
newscript.close()
os.unlink(os.path.join(src, "script/dragonfly.js"))
while tmpfiles:
os.unlink(tmpfiles.pop())
def _get_bad_encoding_files(src):
"""Check the source directory if it passes the criteria for a valid
build. This means all files should be utf8 with a bom and all language
strings present in the sources should be present in all the language
files"""
files = os.walk(src)
bad = []
for base, dirs, files in os.walk(src):
for file in [f for f in files if f.endswith(_text_exts)]:
abs = os.path.join(base, file)
if not _is_utf8(abs): bad.append(abs)
return bad
def _get_string_keys(path):
"""Grab all the string keys of out a language file"""
re_key = re.compile("^ *ui_strings\.([^ =]*)")
fp = codecs.open(path, "r", "utf_8_sig")
lang_keys = set()
for line in fp:
lang_keys.update(re_key.findall(line))
fp.close()
return lang_keys
def _get_missing_strings(path, master):
"""Get the differences between the set of all strings and the
strings in path"""
keys = _get_string_keys(path)
diff = master - keys
return diff
def _get_missing_strings_for_dir(stringsdir, masterlang):
stringfiles = os.listdir(stringsdir)
masterfile = os.path.join(stringsdir, "ui_strings-%s.js" % masterlang )
missing = {}
if not os.path.isfile(masterfile): return None
masterstrings = _get_string_keys(masterfile)
for path, lang in [(f, f[-5:-3]) for f in stringfiles]:
if lang==masterlang: continue
langfile = os.path.join(stringsdir, "ui_strings-%s.js" % lang)
if not os.path.isfile(langfile):
continue
s = _get_missing_strings(langfile, masterstrings)
if s:
missing[lang] = s
return missing
def _clobbering_copytree(src, dst, symlinks=False):
"""This is a modified version of copytree from the shutil | |
<reponame>supaflysnooka/localstack<gh_stars>1-10
import ast
import asyncio
import base64
import json
import logging
import traceback
import uuid
import requests
import six
import xmltodict
from flask import Response as FlaskResponse
from moto.sns.exceptions import DuplicateSnsEndpointError
from moto.sns.models import SNSBackend as MotoSNSBackend
from requests.models import Request, Response
from six.moves.urllib import parse as urlparse
from localstack.config import external_service_url
from localstack.constants import MOTO_ACCOUNT_ID, TEST_AWS_ACCOUNT_ID
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import RegionBackend
from localstack.services.install import SQS_BACKEND_IMPL
from localstack.utils.analytics import event_publisher
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_responses import create_sqs_system_attributes, response_regex_replace
from localstack.utils.aws.dead_letter_queue import sns_error_to_dead_letter_queue
from localstack.utils.common import (
parse_request_data,
short_uid,
start_thread,
timestamp_millis,
to_bytes,
to_str,
)
from localstack.utils.persistence import PersistingProxyListener
# set up logger
LOG = logging.getLogger(__name__)
# additional attributes used for HTTP subscriptions
HTTP_SUBSCRIPTION_ATTRIBUTES = ["UnsubscribeURL"]
class SNSBackend(RegionBackend):
def __init__(self):
# maps topic ARN to list of subscriptions
self.sns_subscriptions = {}
# maps subscription ARN to subscription status
self.subscription_status = {}
# maps topic ARN to list of tags
self.sns_tags = {}
# cache of topic ARN to platform endpoint messages (used primarily for testing)
self.platform_endpoint_messages = {}
# maps phone numbers to list of sent messages
self.sms_messages = []
# actions to be skipped from persistence
self.skip_persistence_actions = [
"Subscribe",
"ConfirmSubscription",
"Unsubscribe",
]
class ProxyListenerSNS(PersistingProxyListener):
def api_name(self):
return "sns"
def forward_request(self, method, path, data, headers):
if method == "OPTIONS":
return 200
# check region
try:
aws_stack.check_valid_region(headers)
aws_stack.set_default_region_in_headers(headers)
except Exception as e:
return make_error(message=str(e), code=400)
if method == "POST":
# parse payload and extract fields
req_data = urlparse.parse_qs(to_str(data), keep_blank_values=True)
# parse data from query path
if not req_data:
parsed_path = urlparse.urlparse(path)
req_data = urlparse.parse_qs(parsed_path.query, keep_blank_values=True)
req_action = req_data["Action"][0]
topic_arn = (
req_data.get("TargetArn") or req_data.get("TopicArn") or req_data.get("ResourceArn")
)
if topic_arn:
topic_arn = topic_arn[0]
topic_arn = aws_stack.fix_account_id_in_arns(topic_arn)
if req_action == "SetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(message="Unable to find subscription for given ARN", code=400)
attr_name = req_data["AttributeName"][0]
attr_value = req_data["AttributeValue"][0]
sub[attr_name] = attr_value
return make_response(req_action)
elif req_action == "GetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(
message="Subscription with arn {0} not found".format(
req_data["SubscriptionArn"][0]
),
code=404,
code_string="NotFound",
)
content = "<Attributes>"
for key, value in sub.items():
if key in HTTP_SUBSCRIPTION_ATTRIBUTES:
continue
content += "<entry><key>%s</key><value>%s</value></entry>\n" % (
key,
value,
)
content += "</Attributes>"
return make_response(req_action, content=content)
elif req_action == "Subscribe":
if "Endpoint" not in req_data:
return make_error(message="Endpoint not specified in subscription", code=400)
elif req_action == "ConfirmSubscription":
if "TopicArn" not in req_data:
return make_error(
message="TopicArn not specified in confirm subscription request",
code=400,
)
if "Token" not in req_data:
return make_error(
message="Token not specified in confirm subscription request",
code=400,
)
do_confirm_subscription(req_data.get("TopicArn")[0], req_data.get("Token")[0])
elif req_action == "Unsubscribe":
if "SubscriptionArn" not in req_data:
return make_error(
message="SubscriptionArn not specified in unsubscribe request",
code=400,
)
do_unsubscribe(req_data.get("SubscriptionArn")[0])
elif req_action == "DeleteTopic":
do_delete_topic(topic_arn)
elif req_action == "Publish":
if req_data.get("Subject") == [""]:
return make_error(code=400, code_string="InvalidParameter", message="Subject")
sns_backend = SNSBackend.get()
# No need to create a topic to send SMS or single push notifications with SNS
# but we can't mock a sending so we only return that it went well
if "PhoneNumber" not in req_data and "TargetArn" not in req_data:
if topic_arn not in sns_backend.sns_subscriptions:
return make_error(
code=404,
code_string="NotFound",
message="Topic does not exist",
)
message_id = publish_message(topic_arn, req_data, headers)
# return response here because we do not want the request to be forwarded to SNS backend
return make_response(req_action, message_id=message_id)
elif req_action == "ListTagsForResource":
tags = do_list_tags_for_resource(topic_arn)
content = "<Tags/>"
if len(tags) > 0:
content = "<Tags>"
for tag in tags:
content += "<member>"
content += "<Key>%s</Key>" % tag["Key"]
content += "<Value>%s</Value>" % tag["Value"]
content += "</member>"
content += "</Tags>"
return make_response(req_action, content=content)
elif req_action == "CreateTopic":
sns_backend = SNSBackend.get()
topic_arn = aws_stack.sns_topic_arn(req_data["Name"][0])
tag_resource_success = self._extract_tags(topic_arn, req_data, True, sns_backend)
sns_backend.sns_subscriptions[topic_arn] = (
sns_backend.sns_subscriptions.get(topic_arn) or []
)
# in case if there is an error it returns an error , other wise it will continue as expected.
if not tag_resource_success:
return make_error(
code=400,
code_string="InvalidParameter",
message="Topic already exists with different tags",
)
elif req_action == "TagResource":
sns_backend = SNSBackend.get()
self._extract_tags(topic_arn, req_data, False, sns_backend)
return make_response(req_action)
elif req_action == "UntagResource":
tags_to_remove = []
req_tags = {k: v for k, v in req_data.items() if k.startswith("TagKeys.member.")}
req_tags = req_tags.values()
for tag in req_tags:
tags_to_remove.append(tag[0])
do_untag_resource(topic_arn, tags_to_remove)
return make_response(req_action)
data = self._reset_account_id(data)
return Request(data=data, headers=headers, method=method)
return True
@staticmethod
def _extract_tags(topic_arn, req_data, is_create_topic_request, sns_backend):
tags = []
req_tags = {k: v for k, v in req_data.items() if k.startswith("Tags.member.")}
existing_tags = sns_backend.sns_tags.get(topic_arn, None)
# TODO: use aws_responses.extract_tags(...) here!
for i in range(int(len(req_tags.keys()) / 2)):
key = req_tags["Tags.member." + str(i + 1) + ".Key"][0]
value = req_tags["Tags.member." + str(i + 1) + ".Value"][0]
tag = {"Key": key, "Value": value}
tags.append(tag)
# this means topic already created with empty tags and when we try to create it
# again with other tag value then it should fail according to aws documentation.
if is_create_topic_request and existing_tags is not None and tag not in existing_tags:
return False
do_tag_resource(topic_arn, tags)
return True
@staticmethod
def _reset_account_id(data):
"""Fix account ID in request payload. All external-facing responses contain our
predefined account ID (defaults to 000000000000), whereas the backend endpoint
from moto expects a different hardcoded account ID (123456789012)."""
return aws_stack.fix_account_id_in_arns(
data,
colon_delimiter="%3A",
existing=TEST_AWS_ACCOUNT_ID,
replace=MOTO_ACCOUNT_ID,
)
def return_response(self, method, path, data, headers, response):
# persist requests to disk
super(ProxyListenerSNS, self).return_response(method, path, data, headers, response)
if method == "POST" and path == "/":
# convert account IDs in ARNs
data = aws_stack.fix_account_id_in_arns(data, colon_delimiter="%3A")
aws_stack.fix_account_id_in_arns(response)
# remove "None" strings from result
search = r"<entry><key>[^<]+</key>\s*<value>\s*None\s*</[^>]+>\s*</entry>"
response_regex_replace(response, search, "")
# parse request and extract data
req_data = urlparse.parse_qs(to_str(data))
req_action = req_data["Action"][0]
if req_action == "Subscribe" and response.status_code < 400:
response_data = xmltodict.parse(response.content)
topic_arn = (req_data.get("TargetArn") or req_data.get("TopicArn"))[0]
filter_policy = (req_data.get("FilterPolicy") or [None])[0]
attributes = get_subscribe_attributes(req_data)
sub_arn = response_data["SubscribeResponse"]["SubscribeResult"]["SubscriptionArn"]
do_subscribe(
topic_arn,
req_data["Endpoint"][0],
req_data["Protocol"][0],
sub_arn,
attributes,
filter_policy,
)
if req_action == "CreateTopic" and response.status_code < 400:
response_data = xmltodict.parse(response.content)
topic_arn = response_data["CreateTopicResponse"]["CreateTopicResult"]["TopicArn"]
# publish event
event_publisher.fire_event(
event_publisher.EVENT_SNS_CREATE_TOPIC,
payload={"t": event_publisher.get_hash(topic_arn)},
)
if req_action == "DeleteTopic" and response.status_code < 400:
# publish event
topic_arn = (req_data.get("TargetArn") or req_data.get("TopicArn"))[0]
event_publisher.fire_event(
event_publisher.EVENT_SNS_DELETE_TOPIC,
payload={"t": event_publisher.get_hash(topic_arn)},
)
def should_persist(self, method, path, data, headers, response):
sns_backend = SNSBackend.get()
req_params = parse_request_data(method, path, data)
action = req_params.get("Action", "")
if action in sns_backend.skip_persistence_actions:
return False
return super(ProxyListenerSNS, self).should_persist(method, path, data, headers, response)
def patch_moto():
def patch_create_platform_endpoint(self, *args):
try:
return create_platform_endpoint_orig(self, *args)
except DuplicateSnsEndpointError:
custom_user_data, token = args[2], args[3]
for endpoint in self.platform_endpoints.values():
if endpoint.token == token:
if custom_user_data and custom_user_data != endpoint.custom_user_data:
raise DuplicateSnsEndpointError(
"Endpoint already exist for token: %s with different attributes" % token
)
return endpoint
create_platform_endpoint_orig = MotoSNSBackend.create_platform_endpoint
MotoSNSBackend.create_platform_endpoint = patch_create_platform_endpoint
patch_moto()
# instantiate listener
UPDATE_SNS = ProxyListenerSNS()
def unsubscribe_sqs_queue(queue_url):
"""Called upon deletion of an SQS queue, to remove the queue from subscriptions"""
sns_backend = SNSBackend.get()
for topic_arn, subscriptions in sns_backend.sns_subscriptions.items():
subscriptions = sns_backend.sns_subscriptions.get(topic_arn, [])
for subscriber in list(subscriptions):
sub_url = subscriber.get("sqs_queue_url") or subscriber["Endpoint"]
if queue_url == sub_url:
subscriptions.remove(subscriber)
def message_to_subscribers(
message_id,
message,
topic_arn,
req_data,
headers,
subscription_arn=None,
skip_checks=False,
):
sns_backend = SNSBackend.get()
subscriptions = sns_backend.sns_subscriptions.get(topic_arn, [])
async def wait_for_messages_sent():
subs = [
message_to_subscriber(
message_id,
message,
topic_arn,
req_data,
headers,
subscription_arn,
skip_checks,
sns_backend,
subscriber,
subscriptions,
)
for subscriber in list(subscriptions)
]
if subs:
await asyncio.wait(subs)
asyncio.run(wait_for_messages_sent())
async def message_to_subscriber(
message_id,
message,
topic_arn,
req_data,
headers,
subscription_arn,
skip_checks,
sns_backend,
subscriber,
subscriptions,
):
if subscription_arn not in [None, subscriber["SubscriptionArn"]]:
return
filter_policy = json.loads(subscriber.get("FilterPolicy") or "{}")
message_attributes = get_message_attributes(req_data)
if not skip_checks and not check_filter_policy(filter_policy, message_attributes):
LOG.info(
"SNS filter policy %s does not match attributes %s"
% (filter_policy, message_attributes)
)
return
if subscriber["Protocol"] == "sms":
event = {
"topic_arn": topic_arn,
"endpoint": subscriber["Endpoint"],
"message_content": req_data["Message"][0],
}
sns_backend.sms_messages.append(event)
LOG.info(
"Delivering SMS message to %s: %s",
subscriber["Endpoint"],
req_data["Message"][0],
)
return
elif subscriber["Protocol"] == "sqs":
queue_url = None
try:
endpoint = subscriber["Endpoint"]
if "sqs_queue_url" in subscriber:
queue_url = subscriber.get("sqs_queue_url")
elif "://" in endpoint:
queue_url = endpoint
else:
queue_name = endpoint.split(":")[5]
queue_url = aws_stack.get_sqs_queue_url(queue_name)
subscriber["sqs_queue_url"] = queue_url
message_group_id = (
req_data.get("MessageGroupId")[0] if req_data.get("MessageGroupId") else ""
)
sqs_client = aws_stack.connect_to_service("sqs")
# TODO remove this kwargs if we stop using ElasticMQ entirely
kwargs = {"MessageGroupId": message_group_id} if SQS_BACKEND_IMPL == "moto" else {}
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=create_sns_message_body(subscriber, req_data, message_id),
MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes),
MessageSystemAttributes=create_sqs_system_attributes(headers),
**kwargs,
)
except Exception as exc:
LOG.info("Unable to forward SNS message to SQS: %s %s" % (exc, traceback.format_exc()))
sns_error_to_dead_letter_queue(subscriber["SubscriptionArn"], req_data, str(exc))
if | |
O
0x80, 0xC2, # O OO O
0x80, 0xC2, # O OO O
0x43, 0xC4, # O OOOO O
0x43, 0x84, # O OOO O
0x23, 0x08, # O OO O
0x18, 0x30, # OO OO
0x07, 0xC0, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @552 '*' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOOOOO
0xC0, 0x00, 0x30, # OO OO
0xA0, 0x00, 0x50, # O O O O
0x90, 0x00, 0x90, # O O O O
0x88, 0x01, 0x10, # O O O O
0x84, 0x02, 0x10, # O O O O
0x82, 0x04, 0x10, # O O O O
0x81, 0x08, 0x10, # O O O O
0x82, 0x94, 0x10, # O O O O O O
0x84, 0x62, 0x10, # O O OO O O
0x88, 0x01, 0x10, # O O O O
0x90, 0x00, 0x90, # O O O O
0xA0, 0x00, 0x50, # O O O O
0xC0, 0x00, 0x30, # OO OO
0xFF, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @621 '+' (20 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xFF, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOOOOO
0x80, 0x00, 0x10, # O O
0x80, 0x00, 0xD0, # O OO O
0x80, 0x00, 0xD0, # O OO O
0x80, 0x00, 0xD0, # O OO O
0x80, 0x00, 0x10, # O O
0x83, 0xF8, 0x10, # O OOOOOOO O
0x80, 0x00, 0x10, # O O
0x81, 0xFC, 0x10, # O OOOOOOO O
0x80, 0x00, 0x10, # O O
0x80, 0x7F, 0x10, # O OOOOOOO O
0x80, 0x00, 0x10, # O O
0x80, 0x00, 0x10, # O O
0x80, 0x00, 0x10, # O O
0xFF, 0xFF, 0xF0, # OOOOOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @690 ',' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x03, 0xE0, # OOOOO
0x00, 0x3C, 0x30, # OOOO OO
0x03, 0xC0, 0x18, # OOOO OO
0x3C, 0x00, 0x08, # OOOO O
0x63, 0x00, 0x08, # OO OO O
0xC0, 0x80, 0x08, # OO O O
0x80, 0x40, 0x08, # O O O
0x80, 0x40, 0xE8, # O O OOO O
0x9C, 0x5F, 0xE8, # O OOO O OOOOOOOO O
0x80, 0x79, 0xE8, # O OOOO OOOO O
0x80, 0x41, 0xE8, # O O OOOO O
0x80, 0x41, 0x08, # O O O O
0x80, 0x40, 0xF0, # O O OOOO
0x80, 0x4F, 0x00, # O O OOOO
0xFF, 0xF0, 0x00, # OOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @759 '-' (21 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x0E, 0x00, # OOO
0x00, 0x3F, 0xC0, # OOOOOOOO
0x00, 0x3E, 0x30, # OOOOO OO
0x00, 0xFE, 0x10, # OOOOOOO O
0x0F, 0x3E, 0x08, # OOOO OOOOO O
0x32, 0x30, 0x08, # OO O OO O
0x41, 0xB0, 0x08, # O OO OO O
0x80, 0x70, 0x08, # O OOO O
0x80, 0x70, 0x08, # O OOO O
0x80, 0x70, 0x08, # O OOO O
0x9C, 0x70, 0x08, # O OOO OOO O
0x80, 0x70, 0x08, # O OOO O
0x80, 0x40, 0x08, # O O O
0x80, 0x40, 0x08, # O O O
0x80, 0x40, 0xF0, # O O OOOO
0x80, 0x4F, 0x00, # O O OOOO
0xFF, 0xF0, 0x00, # OOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @828 '.' (27 pixels wide)
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x38, 0x00, # OOO
0x00, 0x00, 0xFF, 0x00, # OOOOOOOO
0x00, 0x00, 0xF8, 0xC0, # OOOOO OO
0x00, 0x07, 0xF8, 0x40, # OOOOOOOO O
0x00, 0x38, 0xE0, 0x20, # OOO OOO O
0x00, 0xC8, 0xC0, 0x20, # OO O OO O
0x01, 0x04, 0xC0, 0x20, # O O OO O
0x02, 0x1A, 0xC0, 0x20, # O OO O OO O
0x7A, 0x22, 0xC0, 0x20, # OOOO O O O OO O
0x86, 0x46, 0xC0, 0x20, # O OO O OO OO O
0x81, 0x5A, 0xC0, 0x20, # O O O OO O OO O
0x80, 0xD2, 0x80, 0x20, # O OO O O O O
0x4C, 0x52, 0x00, 0x20, # O OO O O O O
0x20, 0x22, 0x00, 0x60, # O O O OO
0x18, 0x16, 0x07, 0x80, # OO O OO OOOO
0x0C, 0x0A, 0x78, 0x00, # OO O O OOOO
0x07, 0xFF, 0x80, 0x00, # OOOOOOOOOOOO
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
# @920 '/' (27 pixels wide)
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x0F, 0x80, # OOOOO
0x00, 0x00, 0xF0, 0xC0, # OOOO OO
0x00, 0x07, 0x00, 0x60, # OOO OO
0x00, 0x78, 0x00, 0x20, # OOOO O
0x01, 0x8C, 0x00, 0x20, # OO OO O
0x01, 0x04, 0x00, 0x20, # O O O
0x02, 0x02, 0x00, 0xA0, # O O O O
0x7A, 0x02, 0x07, 0xA0, # OOOO O O OOOO O
0x86, 0x02, 0x7F, 0xA0, # O OO O OOOOOOOO O
0x81, 0x02, 0xC7, 0xA0, # O O O OO OOOO O
0x4C, 0xC2, 0x07, 0xA0, # O OO OO O OOOO O
0x60, 0x22, 0x08, 0x60, # OO O O O OO
0x10, 0x12, 0x07, 0x80, # O O O OOOO
0x0C, 0x0A, 0x78, 0x00, # OO O O OOOO
0x07, 0xFF, 0x80, 0x00, # OOOOOOOOOOOO
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
# @1012 '0' (19 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x1F, 0xC0, 0x00, # OOOOOOO
0x10, 0x40, 0x00, # O O
0xFF, 0xFF, 0xE0, # OOOOOOOOOOOOOOOOOOO
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0x80, 0x00, 0x20, # O O
0xFF, 0xFF, 0xE0, # OOOOOOOOOOOOOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @1081 '1' (25 pixels wide)
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, 0x00, #
0x1F, 0xC0, 0x00, 0x00, # OOOOOOO
0x10, 0x40, 0x00, 0x00, # O O
0xF0, 0x3F, 0xF0, 0x00, # OOOO OOOOOOOOOO
0x80, 0x00, 0x10, 0x00, # O O
0x83, 0xFF, 0xFF, 0x80, # O OOOOOOOOOOOOOOOOOOO
0x84, 0x00, 0x00, 0x80, # O O O
0x88, 0x00, 0x01, 0x00, # O O O
0x88, | |
<gh_stars>0
"""
Title: Graph representation learning with node2vec
Author: [<NAME>](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/05/15
Last modified: 2021/05/15
Description: Implementing the node2vec model to generate embeddings for movies from the MovieLens dataset.
"""
"""
## Introduction
Learning useful representations from objects structured as graphs is useful for
a variety of machine learning (ML) applications—such as social and communication networks analysis,
biomedicine studies, and recommendation systems.
[Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/) aims to
learn embeddings for the graph nodes, which can be used for a variety of ML tasks
such as node label prediction (e.g. categorizing an article based on its citations)
and link prediction (e.g. recommending an interest group to a user in a social network).
[node2vec](https://arxiv.org/abs/1607.00653) is a simple, yet scalable and effective
technique for learning low-dimensional embeddings for nodes in a graph by optimizing
a neighborhood-preserving objective. The aim is to learn similar embeddings for
neighboring nodes, with respect to the graph structure.
Given your data items structured as a graph (where the items are represented as
nodes and the relationship between items are represented as edges),
node2vec works as follows:
1. Generate item sequences using (biased) random walk.
2. Create positive and negative training examples from these sequences.
3. Train a [word2vec](https://www.tensorflow.org/tutorials/text/word2vec) model
(skip-gram) to learn embeddings for the items.
In this example, we demonstrate the node2vec technique on the
[small version of the Movielens dataset](https://files.grouplens.org/datasets/movielens/ml-latest-small-README.html)
to learn movie embeddings. Such a dataset can be represented as a graph by treating
the movies as nodes, and creating edges between movies that have similar ratings
by the users. The learnt movie embeddings can be used for tasks such as movie recommendation,
or movie genres prediction.
This example requires `networkx` package, which can be installed using the following command:
```shell
pip install networkx
```
"""
"""
## Setup
"""
import os
from collections import defaultdict
import math
import networkx as nx
import random
from tqdm import tqdm
from zipfile import ZipFile
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
"""
## Download the MovieLens dataset and prepare the data
The small version of the MovieLens dataset includes around 100k ratings
from 610 users on 9,742 movies.
First, let's download the dataset. The downloaded folder will contain
three data files: `users.csv`, `movies.csv`, and `ratings.csv`. In this example,
we will only need the `movies.dat`, and `ratings.dat` data files.
"""
urlretrieve(
"http://files.grouplens.org/datasets/movielens/ml-latest-small.zip", "movielens.zip"
)
ZipFile("movielens.zip", "r").extractall()
"""
Then, we load the data into a Pandas DataFrame and perform some basic preprocessing.
"""
# Load movies to a DataFrame.
movies = pd.read_csv("ml-latest-small/movies.csv")
# Create a `movieId` string.
movies["movieId"] = movies["movieId"].apply(lambda x: f"movie_{x}")
# Load ratings to a DataFrame.
ratings = pd.read_csv("ml-latest-small/ratings.csv")
# Convert the `ratings` to floating point
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Create the `movie_id` string.
ratings["movieId"] = ratings["movieId"].apply(lambda x: f"movie_{x}")
print("Movies data shape:", movies.shape)
print("Ratings data shape:", ratings.shape)
"""
Let's inspect a sample instance of the `ratings` DataFrame.
"""
ratings.head()
"""
Next, let's check a sample instance of the `movies` DataFrame.
"""
movies.head()
"""
Implement two utility functions for the `movies` DataFrame.
"""
def get_movie_title_by_id(movieId):
return list(movies[movies.movieId == movieId].title)[0]
def get_movie_id_by_title(title):
return list(movies[movies.title == title].movieId)[0]
"""
## Construct the Movies graph
We create an edge between two movie nodes in the graph if both movies are rated
by the same user >= `min_rating`. The weight of the edge will be based on the
[pointwise mutual information](https://en.wikipedia.org/wiki/Pointwise_mutual_information)
between the two movies, which is computed as: `log(xy) - log(x) - log(y) + log(D)`, where:
* `xy` is how many users rated both movie `x` and movie `y` with >= `min_rating`.
* `x` is how many users rated movie `x` >= `min_rating`.
* `y` is how many users rated movie `y` >= `min_rating`.
* `D` total number of movie ratings >= `min_rating`.
"""
"""
### Step 1: create the weighted edges between movies.
"""
min_rating = 5
pair_frequency = defaultdict(int)
item_frequency = defaultdict(int)
# Filter instances where rating is greater than or equal to min_rating.
rated_movies = ratings[ratings.rating >= min_rating]
# Group instances by user.
movies_grouped_by_users = list(rated_movies.groupby("userId"))
for group in tqdm(
movies_grouped_by_users,
position=0,
leave=True,
desc="Compute movie rating frequencies",
):
# Get a list of movies rated by the user.
current_movies = list(group[1]["movieId"])
for i in range(len(current_movies)):
item_frequency[current_movies[i]] += 1
for j in range(i + 1, len(current_movies)):
x = min(current_movies[i], current_movies[j])
y = max(current_movies[i], current_movies[j])
pair_frequency[(x, y)] += 1
"""
### Step 2: create the graph with the nodes and the edges
To reduce the number of edges between nodes, we only add an edge between movies
if the weight of the edge is greater than `min_weight`.
"""
min_weight = 10
D = math.log(sum(item_frequency.values()))
# Create the movies undirected graph.
movies_graph = nx.Graph()
# Add weighted edges between movies.
# This automatically adds the movie nodes to the graph.
for pair in tqdm(
pair_frequency, position=0, leave=True, desc="Creating the movie graph"
):
x, y = pair
xy_frequency = pair_frequency[pair]
x_frequency = item_frequency[x]
y_frequency = item_frequency[y]
pmi = math.log(xy_frequency) - math.log(x_frequency) - math.log(y_frequency) + D
weight = pmi * xy_frequency
# Only include edges with weight >= min_weight.
if weight >= min_weight:
movies_graph.add_edge(x, y, weight=weight)
"""
Let's display the total number of nodes and edges in the graph.
Note that the number of nodes is less than the total number of movies,
since only the movies that have edges to other movies are added.
"""
print("Total number of graph nodes:", movies_graph.number_of_nodes())
print("Total number of graph edges:", movies_graph.number_of_edges())
"""
Let's display the average node degree (number of neighbours) in the graph.
"""
degrees = []
for node in movies_graph.nodes:
degrees.append(movies_graph.degree[node])
print("Average node degree:", round(sum(degrees) / len(degrees), 2))
"""
### Step 3: Create vocabulary and a mapping from tokens to integer indices
The vocabulary is the nodes (movie IDs) in the graph.
"""
vocabulary = ["NA"] + list(movies_graph.nodes)
vocabulary_lookup = {token: idx for idx, token in enumerate(vocabulary)}
"""
## Implement the biased random walk
A random walk starts from a given node, and randomly picks a neighbour node to move to.
If the edges are weighted, the neighbour is selected *probabilistically* with
respect to weights of the edges between the current node and its neighbours.
This procedure is repeated for `num_steps` to generate a sequence of *related* nodes.
The [*biased* random walk](https://en.wikipedia.org/wiki/Biased_random_walk_on_a_graph) balances between **breadth-first sampling**
(where only local neighbours are visited) and **depth-first sampling**
(where distant neighbours are visited) by introducing the following two parameters:
1. **Return parameter** (`p`): Controls the likelihood of immediately revisiting
a node in the walk. Setting it to a high value encourages moderate exploration,
while setting it to a low value would keep the walk local.
2. **In-out parameter** (`q`): Allows the search to differentiate
between *inward* and *outward* nodes. Setting it to a high value biases the
random walk towards local nodes, while setting it to a low value biases the walk
to visit nodes which are further away.
"""
def next_step(graph, previous, current, p, q):
neighbors = list(graph.neighbors(current))
weights = []
# Adjust the weights of the edges to the neighbors with respect to p and q.
for neighbor in neighbors:
if neighbor == previous:
# Control the probability to return to the previous node.
weights.append(graph[current][neighbor]["weight"] / p)
elif graph.has_edge(neighbor, previous):
# The probability of visiting a local node.
weights.append(graph[current][neighbor]["weight"])
else:
# Control the probability to move forward.
weights.append(graph[current][neighbor]["weight"] / q)
# Compute the probabilities of visiting each neighbor.
weight_sum = sum(weights)
probabilities = [weight / weight_sum for weight in weights]
# Probabilistically select a neighbor to visit.
next = np.random.choice(neighbors, size=1, p=probabilities)[0]
return next
def random_walk(graph, num_walks, num_steps, p, q):
walks = []
nodes = list(graph.nodes())
# Perform multiple iterations of the random walk.
for walk_iteration in range(num_walks):
random.shuffle(nodes)
for node in tqdm(
nodes,
position=0,
leave=True,
desc=f"Random walks iteration {walk_iteration + 1} of {num_walks}",
):
# Start the walk with a random node from the graph.
walk = [node]
# Randomly walk for num_steps.
while len(walk) < num_steps:
current = walk[-1]
previous = walk[-2] if len(walk) > 1 else None
# Compute the next node to visit.
next = next_step(graph, previous, current, p, q)
walk.append(next)
# Replace node ids (movie ids) in the walk with token ids.
walk = [vocabulary_lookup[token] for token in walk]
# Add the walk to the generated sequence.
walks.append(walk)
return walks
"""
## Generate training data using the biased random walk
You can explore different configurations of `p` and `q` to different results of
related movies.
"""
# Random walk return parameter.
p = 1
# Random walk in-out parameter.
q = 1
# Number of iterations of random walks.
num_walks = 5
# Number of steps of each random walk.
num_steps = 10
walks = random_walk(movies_graph, num_walks, num_steps, p, q)
print("Number of walks generated:", len(walks))
"""
## Generate positive and negative examples
To train a skip-gram model, we use the generated walks to | |
<filename>core/domain/suggestion_services_test.py
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suggestion related services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from constants import constants
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import question_domain
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import state_domain
from core.domain import story_domain
from core.domain import story_services
from core.domain import suggestion_registry
from core.domain import suggestion_services
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(suggestion_models, feedback_models, user_models) = (
models.Registry.import_models(
[models.NAMES.suggestion, models.NAMES.feedback, models.NAMES.user]
)
)
class SuggestionServicesUnitTests(test_utils.GenericTestBase):
"""Test the functions in suggestion_services."""
score_category = (
suggestion_models.SCORE_TYPE_CONTENT +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'Algebra')
target_id = 'exp1'
target_id_2 = 'exp2'
target_id_3 = 'exp3'
target_version_at_submission = 1
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
}
}
AUTHOR_EMAIL = '<EMAIL>'
REVIEWER_EMAIL = '<EMAIL>'
NORMAL_USER_EMAIL = '<EMAIL>'
THREAD_ID = 'exploration.exp1.thread_1'
COMMIT_MESSAGE = 'commit message'
EMPTY_COMMIT_MESSAGE = ' '
suggestion_id = THREAD_ID
suggestion_id_2 = 'exploration.exp2.thread_2'
suggestion_id_3 = 'exploration.exp3.thread_3'
def setUp(self):
super(SuggestionServicesUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.signup(self.NORMAL_USER_EMAIL, 'normaluser')
self.normal_user_id = self.get_user_id_from_email(
self.NORMAL_USER_EMAIL)
self.save_new_valid_exploration(
self.target_id, self.author_id, category='Algebra')
def assert_suggestion_status(self, suggestion_id, status):
"""Assert the status of the suggestion with suggestion_id."""
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
self.assertEqual(suggestion.status, status)
def mock_accept_suggestion(
self, suggestion_id, reviewer_id, commit_message, review_message):
"""Sets up the appropriate mocks to successfully call
accept_suggestion.
"""
with self.swap(
exp_services, 'update_exploration', self.mock_update_exploration):
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
with self.swap(
suggestion_registry.SuggestionEditStateContent,
'pre_accept_validate',
self.mock_pre_accept_validate_does_nothing):
with self.swap(
suggestion_registry.SuggestionEditStateContent,
'get_change_list_for_accepting_suggestion',
self.mock_get_change_list_does_nothing):
suggestion_services.accept_suggestion(
suggestion_id, reviewer_id,
commit_message, review_message)
def mock_create_suggestion(self, target_id):
"""Sets up the appropriate mocks to successfully call
create_suggestion.
"""
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
def mock_generate_new_thread_id(self, entity_type, exp_id):
thread_id = 'thread_%s' % exp_id[-1]
return '.'.join([entity_type, exp_id, thread_id])
class MockExploration(python_utils.OBJECT):
"""Mocks an exploration. To be used only for testing."""
def __init__(self, exploration_id, states):
self.id = exploration_id
self.states = states
self.category = 'Algebra'
# All mock explorations created for testing.
explorations = [
MockExploration('exp1', {'state_1': {}, 'state_2': {}}),
MockExploration('exp2', {'state_1': {}, 'state_2': {}}),
MockExploration('exp3', {'state_1': {}, 'state_2': {}})
]
def mock_get_exploration_by_id(self, exp_id):
for exp in self.explorations:
if exp.id == exp_id:
return exp
def mock_pre_accept_validate_does_nothing(self):
pass
def mock_get_change_list_does_nothing(self):
pass
def mock_accept_does_nothing(self, unused_arg):
pass
def test_create_new_suggestion_successfully(self):
expected_suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread_1',
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': suggestion_models.TARGET_TYPE_EXPLORATION,
'target_id': self.target_id,
'target_version_at_submission': self.target_version_at_submission,
'status': suggestion_models.STATUS_IN_REVIEW,
'author_name': 'author',
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
},
'score_category': self.score_category,
'language_code': None
}
self.mock_create_suggestion(self.target_id)
observed_suggestion = suggestion_services.get_suggestion_by_id(
self.suggestion_id)
self.assertDictContainsSubset(
expected_suggestion_dict, observed_suggestion.to_dict())
def test_cannot_create_suggestion_with_invalid_suggestion_type(self):
with self.assertRaisesRegexp(Exception, 'Invalid suggestion type'):
suggestion_services.create_suggestion(
'invalid_suggestion_type',
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
def test_cannot_create_suggestion_with_invalid_author_id(self):
with self.assertRaisesRegexp(
Exception, 'Expected author_id to be in a valid user ID format'):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
'invalid author ID', self.change, 'test description')
def test_cannot_create_translation_suggestion_with_invalid_content_html_raise_error(self): # pylint: disable=line-too-long
add_translation_change_dict = {
'cmd': 'add_translation',
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>The invalid content html</p>',
'translation_html': '<p>Translation for invalid content.</p>'
}
with self.assertRaisesRegexp(
Exception,
'The given content_html does not match the content of the '
'exploration.'):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, add_translation_change_dict, 'test description')
def test_get_all_stale_suggestion_ids(self):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
with self.swap(
suggestion_models, 'THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS', 0):
self.assertEqual(
len(suggestion_services.get_all_stale_suggestion_ids()), 1)
with self.swap(
suggestion_models, 'THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS',
7 * 24 * 60 * 60 * 1000):
self.assertEqual(
len(suggestion_services.get_all_stale_suggestion_ids()), 0)
def mock_update_exploration(
self, unused_user_id, unused_exploration_id, unused_change_list,
commit_message, is_suggestion):
self.assertTrue(is_suggestion)
self.assertEqual(
commit_message, 'Accepted suggestion by %s: %s' % (
'author', self.COMMIT_MESSAGE))
def test_cannot_reject_suggestion_with_empty_review_message(self):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, self.change, 'test description')
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', self.target_id)])[0]
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
with self.assertRaisesRegexp(
Exception, 'Review message cannot be empty.'):
suggestion_services.reject_suggestion(
suggestion.suggestion_id, self.reviewer_id, '')
# Assert that the suggestion was not rejected.
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
def test_accept_suggestion_and_send_email_to_author(self):
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'state 1',
})]
exp_services.update_exploration(
self.author_id, self.target_id, change_list, 'Add state.')
new_suggestion_content = state_domain.SubtitledHtml(
'content', '<p>new suggestion content html</p>').to_dict()
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state 1',
'new_value': new_suggestion_content
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, change_dict, 'test description')
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', self.target_id)])[0]
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
# Create a user proficiency model to verify that the
# score and onboarding_email_sent fields have changed after the
# suggestion has been accepted.
user_models.UserContributionProficiencyModel.create(
self.author_id, suggestion.score_category, 0)
# An email is sent to users the first time that they pass the score
# required to review a suggestion category. By default, when a
# suggestion is accepted and the recording of scores is enabled, the
# score of the author of that suggestion is increased by 1. Therefore,
# by setting that increment to minimum score required to review, we can
# ensure that the email is sent.
with self.swap(feconf, 'ENABLE_RECORDING_OF_SCORES', True):
with self.swap(
feconf, 'SEND_SUGGESTION_REVIEW_RELATED_EMAILS', True):
with self.swap(
suggestion_models, 'INCREMENT_SCORE_OF_AUTHOR_BY',
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW):
suggestion_services.accept_suggestion(
suggestion.suggestion_id, self.reviewer_id,
self.COMMIT_MESSAGE, 'review message')
# Assert that the suggestion is now accepted.
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', self.target_id)])[0]
self.assert_suggestion_status(
suggestion.suggestion_id, suggestion_models.STATUS_ACCEPTED)
# Assert that the email was sent and that the score increased by the
# correct amount.
user_proficiency_model = (
user_models.UserContributionProficiencyModel.get(
self.author_id, suggestion.score_category
)
)
self.assertTrue(user_proficiency_model.onboarding_email_sent)
self.assertEqual(
user_proficiency_model.score,
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
def test_accept_suggestion_does_not_send_email_if_users_score_is_too_low(
self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
# Create the user proficiency model to verify the score and
# that the onboarding_email_sent field does not change after the
# suggestion is accepted.
user_models.UserContributionProficiencyModel.create(
self.author_id, self.score_category, 0)
# An email is sent to users the first time that they pass the score
# required to review a suggestion category. By default, when a
# suggestion is accepted and the recording of scores is enabled, the
# score of the author of that suggestion is increased by 1. This is
# less than the minimum score required to review so an email should not
# be sent.
with self.swap(feconf, 'ENABLE_RECORDING_OF_SCORES', True):
with self.swap(
feconf, 'SEND_SUGGESTION_REVIEW_RELATED_EMAILS', True):
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Assert that the suggestion is now accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
user_proficiency_model = (
user_models.UserContributionProficiencyModel.get(
self.author_id, self.score_category
)
)
# Assert that the users score was updated correctly.
self.assertEqual(
user_proficiency_model.score,
suggestion_models.INCREMENT_SCORE_OF_AUTHOR_BY)
# Assert that their score is not high enough to review the category.
self.assertLess(
user_proficiency_model.score,
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
# Assert that the onboarding new reviewer email was not sent.
self.assertFalse(user_proficiency_model.onboarding_email_sent)
def test_accept_suggestion_creates_user_proficiency_model_if_it_is_none(
self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
# Verify that a user proficiency model does not exist.
self.assertIsNone(user_models.UserContributionProficiencyModel.get(
self.author_id, self.score_category))
with self.swap(feconf, 'ENABLE_RECORDING_OF_SCORES', True):
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Verify that a user proficiency model now exists.
self.assertIsNotNone(user_models.UserContributionProficiencyModel.get(
self.author_id, self.score_category))
def test_accept_suggestion_successfully(self):
self.mock_create_suggestion(self.target_id)
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_IN_REVIEW)
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
# Assert that the suggestion is now accepted.
self.assert_suggestion_status(
self.suggestion_id, suggestion_models.STATUS_ACCEPTED)
suggestion = suggestion_services.get_suggestion_by_id(
self.suggestion_id)
self.assertEqual(
suggestion.final_reviewer_id, self.reviewer_id)
thread_messages = feedback_services.get_messages(self.THREAD_ID)
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(
last_message.text, 'review message')
def test_accept_suggestion_raises_exception_if_suggestion_does_not_exist(
self):
expected_exception_regexp = (
'You cannot accept the suggestion with id %s because it does not '
'exist.' % (self.suggestion_id)
)
with self.assertRaisesRegexp(Exception, expected_exception_regexp):
self.mock_accept_suggestion(
self.suggestion_id, self.reviewer_id, self.COMMIT_MESSAGE,
'review message')
def test_accept_suggestion_with_invalid_math_fails(self):
"""Test that the method for accepting suggestions raises error when
a suggestion with invalid math-tags is tried to be accepted.
"""
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': (
'<oppia-noninteractive-math raw_latex-with-value="&am'
'p;quot;(x - a_1)(x - a_2)(x - a_3)...(x - a_n)&q'
'uot;"></oppia-noninteractive-math>')
}
}
with self.swap(
feedback_models.GeneralFeedbackThreadModel,
'generate_new_thread_id', self.mock_generate_new_thread_id):
with self.swap(
exp_fetchers, 'get_exploration_by_id',
self.mock_get_exploration_by_id):
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, self.target_version_at_submission,
self.author_id, change_dict, 'test | |
: variable_property")
def chaining_meth_base(self, p):
return ChainingStuff([], [p[0]])
@pg.production("chaining_dereference : "
"chaining_dereference [ dim_offset ]")
def chaining_index(self, p):
lst = p[0]
assert isinstance(lst, ChainingStuff)
lst.indices.append(p[2])
return lst
@pg.production("chaining_dereference : [ dim_offset ]")
def chaining_index_base(self, p):
return ChainingStuff([p[1]], [])
@pg.production("chaining_instance_call : "
"chaining_dereference chaining_method_or_property")
def chaining_both(self, p):
index_list = p[0]
prop_list = p[1]
assert isinstance(index_list, ChainingStuff)
assert isinstance(prop_list, ChainingStuff)
return ChainingStuff(index_list.indices, prop_list.props)
@pg.production("chaining_instance_call : chaining_dereference")
@pg.production("chaining_instance_call : chaining_method_or_property")
def chaining_instance_call(self, p):
return p[0]
@pg.production("new_expr : T_NEW class_name_reference ctor_arguments")
def new_expr(self, p):
return New(p[1], p[2].getstmtlist(), lineno=p[0].getsourcepos())
@pg.production("lexical_vars : empty")
def lexical_vars_empty(self, p):
return None
@pg.production("lexical_vars : T_USE ( lexical_var_list )")
def lexical_vars_use_lexical_var_list(self, p):
return p[2]
@pg.production("lexical_var_list : lexical_var")
def lexical_var_list_lexical_var(self, p):
return ListOfVars([p[0]])
@pg.production("lexical_var_list : & lexical_var")
def lexical_var_list_ref(self, p):
return ListOfVars([Reference(p[1])])
@pg.production("lexical_var_list : lexical_var_list , lexical_var")
def lexical_var_list_more_vars(self, p):
varlist = p[0]
assert isinstance(varlist, ListOfVars)
return ListOfVars(varlist.varlist + [p[2]])
@pg.production("lexical_var_list : lexical_var_list , & lexical_var")
def lexical_var_list_more_refs(self, p):
varlist = p[0]
assert isinstance(varlist, ListOfVars)
return ListOfVars(varlist.varlist + [Reference(p[3])])
@pg.production("lexical_var : T_VARIABLE")
def lexical_var_variable(self, p):
return NamedVariable(p[0].getstr()[1:])
@pg.production("assignment_list : "
"assignment_list , assignment_list_element")
def assignment_list_assignment_list_assignment_list_element(self, p):
x = p[0]
assert isinstance(x, ListOfVars)
return ListOfVars(x.varlist + [p[2]])
@pg.production("assignment_list : assignment_list_element")
def assignment_list_assignment_list_element(self, p):
return ListOfVars(p)
@pg.production("assignment_list_element : variable")
def assignment_list_element_variable(self, p):
return p[0]
@pg.production("assignment_list_element : T_LIST ( assignment_list )")
def assignment_list_element_t_list_assignment_list(self, p):
return p[2]
@pg.production("assignment_list_element : empty")
def assignment_list_element_empty(self, p):
return None
@pg.production("internal_functions_in_yacc : T_ISSET ( isset_variables )")
def internal_f_isset(self, p):
return p[2]
@pg.production("internal_functions_in_yacc : T_EMPTY ( variable )")
#@pg.production("internal_functions_in_yacc : "
# "T_EMPTY ( expr_without_variable )")
def internal_f_empty(self, p):
return Empty(p[2], lineno=p[0].getsourcepos())
@pg.production("isset_variables : isset_variables , isset_variable")
def issetvs_issetvs_issetv(self, p):
return And(p[0], p[2], lineno=p[1].getsourcepos())
@pg.production("isset_variables : isset_variable")
def issetvs_issetv(self, p):
return p[0]
@pg.production("isset_variable : variable")
<EMAIL>("isset_variable : expr_without_variable")
def isset_variable(self, p):
return IsSet(p[0], lineno=p[0].lineno)
@pg.production("expr : T_PRINT expr")
def expr_t_print_expr(self, p):
return Print(p[1], lineno=p[0].getsourcepos())
# what's that?
# we are missing
# http://php.net/manual/en/language.types.string.php#language.types.string.parsing.complex
<EMAIL>("scalar : T_STRING_VARNAME")
#def scalar_t_string_varname(self, p):
# raise NotImplementedError(p)
@pg.production("scalar : class_constant")
def scalar_class_constant(self, p):
return p[0]
@pg.production("scalar : namespace_name")
def scalar_namespace_name(self, p):
return NamedConstant(p[0].getstr(), p[0].getsourcepos())
@pg.production("scalar : T_NAMESPACE T_NS_SEPARATOR namespace_name")
def scalar_namespace_sep_namespace(self, p):
raise ParseError("not implemented", p[0].getsourcepos())
@pg.production("scalar : T_NS_SEPARATOR namespace_name")
def scalar_sep_namespace(self, p):
raise ParseError("not implemented", p[0].getsourcepos())
@pg.production("scalar : common_scalar")
def scalar_common_scalar(self, p):
return p[0]
@pg.production("scalar : T_START_HEREDOC encaps_list T_END_HEREDOC")
def scalar_heredoc_encaps_list(self, p):
lst = p[1]
l = []
if isinstance(lst, LinkedList):
lst.flatten(l)
else:
l.append(p[1])
return DoubleQuotedStr(l, lineno=p[0].lineno)
@pg.production('scalar : " encaps_list "')
def scalar_encaps_list(self, p):
lst = p[1]
l = []
if isinstance(lst, LinkedList):
lst.flatten(l)
else:
l.append(p[1])
return DoubleQuotedStr(l, lineno=p[0].lineno)
@pg.production('encaps_list : encaps_list encaps_var')
def encaps_list_encaps_list_encaps_var(self, p):
return LinkedList(p[0], p[1])
@pg.production('encaps_list : encaps_list T_ENCAPSED_AND_WHITESPACE')
def encaps_list_encaps_list_encapsed_and_whitespace(self, p):
v = self._parse_doublequoted(p[1].getstr(), False)
return LinkedList(p[0], v)
@pg.production('encaps_list : encaps_list T_CONSTANT_ENCAPSED_STRING')
def encaps_list_encaps_list_encapsed_and_t_constant_encapsed(self, p):
v = self._parse_doublequoted(p[1].getstr(), False)
return LinkedList(p[0], v)
@pg.production('encaps_list : encaps_var')
def encaps_list_encaps_var(self, p):
return p[0]
@pg.production('encaps_list : T_ENCAPSED_AND_WHITESPACE encaps_var')
def encaps_list_encaps_list_encapsed_and_whitespce_encaps_var(self, p):
v = self._parse_doublequoted(p[0].getstr(), False)
return LinkedList(v, p[1])
@pg.production('encaps_list : T_STRING encaps_var')
def encaps_list_encaps_list_string(self, p):
v = self._parse_doublequoted(p[0].getstr(), False)
return LinkedList(v, p[1])
@pg.production('encaps_list : T_CONSTANT_ENCAPSED_STRING encaps_var')
def encaps_list_encaps_list_encapsed_and_t_constant_encapsed_encaps_var(self, p):
v = self._parse_doublequoted(p[0].getstr(), False)
return LinkedList(v, p[1])
@pg.production('encaps_var : T_VARIABLE')
def encaps_var_variable(self, p):
return NamedVariable(p[0].getstr()[1:])
@pg.production('encaps_var : T_VARIABLE T_OBJECT_OPERATOR T_STRING')
def encaps_var_obj_operator(self, p):
return GetAttr(NamedVariable(p[0].getstr()[1:]),
ConstantStr(p[2].getstr()), lineno=p[0].getsourcepos())
@pg.production('encaps_var : T_VARIABLE [ encaps_var_offset ]')
def encaps_var_variable_brackets(self, p):
return GetItem(NamedVariable(p[0].getstr()[1:]), p[2])
@pg.production('encaps_var_offset : T_STRING')
def encaps_var_offset_string(self, p):
return ConstantStr(p[0].getstr())
@pg.production('encaps_var_offset : T_NUM_STRING')
def encaps_var_offset_numeric(self, p):
return ConstantStr(p[0].getstr())
@pg.production('encaps_var_offset : T_VARIABLE')
def encaps_var_offset_variable(self, p):
return NamedVariable(p[0].getstr()[1:])
@pg.production('encaps_var : T_DOLLAR_OPEN_CURLY_BRACES variable }')
def encaps_var_curly_braces(self, p):
return p[1]
@pg.production("common_scalar : T_LNUMBER")
def common_scalar_lnumber(self, p):
from hippy.objects.convert import convert_string_to_number
lineno = p[0].getsourcepos()
num_str = p[0].getstr()
w_num, _ = convert_string_to_number(num_str, can_be_octal=True)
if w_num.tp == self.space.tp_int:
return ConstantInt(self.space.int_w(w_num), lineno=lineno)
else:
return ConstantFloat(self.space.float_w(w_num), lineno=lineno)
@pg.production("common_scalar : T_DNUMBER")
def common_scalar_dnumber(self, p):
lineno = p[0].getsourcepos()
return ConstantFloat(float(p[0].getstr()), lineno=lineno)
@staticmethod
def _parse_doublequoted(s, lineno, escape_quotes=True, skip_borders=False):
if skip_borders:
i = 1
end = len(s) - 1
else:
i = 0
end = len(s)
r = []
while i < end:
c = s[i]
if c == '\\':
if i == end - 1:
r.append(c)
break
next = s[i + 1]
if next == 'n':
r.append('\n') # \x0A
elif next == 'r':
r.append('\r') # \x0D
elif next == 't':
r.append('\t') # \x09
elif next == 'v':
r.append('\v') # \x0B
elif next == 'e':
r.append('\x1B')
elif next == 'f':
r.append('\f') # \x0C
elif next == "$":
r.append("$")
elif next == '\\' or next == '$' or (
escape_quotes and next == '"'):
r.append(next)
elif next == 'x' and i < end - 2 and is_hexdigit(s[i + 2]):
charvalue = hexdigit(s[i + 2])
if i < end - 3 and is_hexdigit(s[i + 3]):
charvalue <<= 4
charvalue |= hexdigit(s[i + 3])
i += 1
i += 1
r.append(chr(charvalue))
elif '0' <= next <= '7':
charvalue = ord(next) - ord('0')
if i < end - 2 and '0' <= s[i + 2] <= '7':
charvalue <<= 3
charvalue |= (ord(s[i + 2]) - ord('0'))
i += 1
if i < end - 2 and '0' <= s[i + 2] <= '7':
charvalue <<= 3
charvalue &= 0xFF
charvalue |= (ord(s[i + 2]) - ord('0'))
i += 1
r.append(chr(charvalue))
else:
r.append('\\')
r.append(next)
i += 2
else:
r.append(c)
i += 1
return ConstantStr(''.join(r), lineno=lineno)
@pg.production("common_scalar : T_CONSTANT_ENCAPSED_STRING")
def common_scalar_constant_escaped_string(self, p):
lineno = p[0].getsourcepos()
s = p[0].getstr()
#
if s[0] == 'b':
s = s[1:]
last = len(s) - 1
assert last >= 0
got = []
if s[0] == "'":
assert s[last] == "'"
i = 1
while i < last:
if s[i] == "\\" and (s[i + 1] == "\\" or s[i + 1] == "'"):
got.append(s[i + 1])
i += 2
else:
got.append(s[i])
i += 1
#
if s[0] == '"':
assert s[last] == '"'
return self._parse_doublequoted(s, lineno, True, True)
# remove "\'" and "\\"
return ConstantStr(''.join(got), lineno=lineno)
#
@pg.production("common_scalar : T_LINE")
def magic_line(self, p):
lineno = p[0].getsourcepos()
return ConstantInt(lineno, lineno=lineno)
@pg.production("common_scalar : T_FILE")
def magic_file(self, p):
return FileMagic(lineno=p[0].getsourcepos())
@pg.production("common_scalar : T_DIR")
def magic_dir(self, p):
return DirMagic(lineno=p[0].getsourcepos())
@pg.production("common_scalar : T_CLASS_C")
def magic_class(self, p):
return ClassMagic(lineno=p[0].getsourcepos())
@pg.production("common_scalar : T_METHOD_C")
def magic_method(self, p):
return MethodMagic(lineno=p[0].getsourcepos())
@pg.production("common_scalar : T_FUNC_C")
def magic_func(self, p):
return FunctionMagic(lineno=p[0].getsourcepos())
@pg.production("common_scalar : T_NS_C")
def common_scalar_nsmagic(self, p):
raise ParseError("not implemented", p[0].getsourcepos())
@pg.production("common_scalar : T_START_HEREDOC "
"T_ENCAPSED_AND_WHITESPACE T_END_HEREDOC")
def heredoc(self, p):
lineno = p[1].getsourcepos()
return self._parse_doublequoted(p[1].getstr(), lineno, False)
@pg.production("common_scalar : T_START_HEREDOC T_END_HEREDOC")
def heredoc_empty(self, p):
return ConstantStr("")
@pg.production("static_scalar : common_scalar")
def static_scalar_common_scalar(self, p):
return p[0]
@pg.production("static_scalar : namespace_name")
def static_scalar_namespace_name(self, p):
return NamedConstant(p[0].getstr(), lineno=p[0].getsourcepos())
@pg.production("static_scalar : static_class_constant")
def static_scalar_static_class_constant(self, p):
return p[0]
# static_scalar : T_NAMESPACE T_NS_SEPARATOR namespace_name
# static_scalar : T_NS_SEPARATOR namespace_name
@pg.production("static_scalar : - static_scalar")
def static_scalar_minus_static_scalar(self, p):
return p[1].uminus()
@pg.production("static_scalar : + static_scalar")
def static_scalar_plus_static_scalar(self, p):
return p[1]
@pg.production("static_class_constant : class_name "
"T_PAAMAYIM_NEKUDOTAYIM T_STRING")
def static_class_constant_class_name(self, p):
return ClassConstant(p[0], p[2].getstr())
@pg.production('static_scalar : T_ARRAY ( static_array_pair_list )')
def static_array(self, p):
hash = p[2]
hash.lineno = p[0].getsourcepos()
return hash
@pg.production('static_array_pair_list : empty')
def empty_static_array_pl(self, p):
return Hash([])
@pg.production('static_array_pair_list : non_empty_array_pair_list '
'possible_comma')
def nonempty_static_array_pl(self, p):
return p[0]
@pg.production("variable : "
"base_variable_with_function_calls T_OBJECT_OPERATOR "
"object_property method_or_not variable_properties")
def variable_object_operator(self, p):
result = p[0]
lineno = p[1].getsourcepos()
result = self._apply_objprop(p[0], p[2], p[3], lineno)
for x in p[4].getstmtlist():
assert isinstance(x, TupleWrapper)
prop, arguments = x.p1, x.p2
result = self._apply_objprop(result, prop, arguments, lineno)
return result
def _apply_objprop(self, base, prop, arguments, lineno):
if isinstance(prop, ObjectDimList):
result = GetAttr(base, prop.head, lineno=prop.lineno)
for index in prop.tail:
result = GetItem(result, index, lineno=prop.lineno)
else:
result = GetAttr(base, prop, lineno=prop.lineno)
if arguments is not None:
assert isinstance(arguments, ObjectDimList)
result = SimpleCall(result, arguments.head.getstmtlist(),
result.lineno)
for index in arguments.tail:
result = GetItem(result, index, lineno=prop.lineno)
return result
@pg.production("array_function_dereference : "
"function_call [ dim_offset ]")
@pg.production("array_function_dereference : "
"array_function_dereference [ dim_offset ]")
def array_function_dereference_def(self, p):
return GetItem(p[0], p[2], lineno=p[1].getsourcepos())
@pg.production("variable : base_variable_with_function_calls")
def variable_base_variable_with_function_calls(self, p):
return p[0]
@pg.production("base_variable_with_function_calls : base_variable")
def base_variable_with_function_calls_base_variable(self, p):
return p[0]
@pg.production("base_variable_with_function_calls : "
"array_function_dereference")
def base_variable_with_function_calls_array(self, p):
return p[0]
@pg.production("base_variable_with_function_calls : function_call")
def base_variable_with_function_calls_function_call(self, p):
return p[0]
@pg.production("base_variable : reference_variable")
def | |
<gh_stars>1-10
"""
Copyright 2013 IO Rodeo Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
# GCode program
# -----------------------------------------------------------------------------
class GCodeProg(object):
def __init__(self):
self.listOfCmds = []
self.lineNumbers = False
self.lineNumberStep = 2
def add(self,obj,comment=False):
if isinstance(obj,GCodeCmd):
if comment:
obj.comment = True
self.listOfCmds.append(obj)
else:
self.listOfCmds.extend(obj.listOfCmds)
def __str__(self):
listOfStr = [x.__str__() for x in self.listOfCmds]
if self.lineNumbers:
step = self.lineNumberStep
listOfStr = ['N{0} {1}'.format(step*i,x) for i,x in enumerate(listOfStr)]
listOfStr.append('')
return '\n'.join(listOfStr)
def write(self,filename):
with open(filename,'w') as f:
f.write(self.__str__());
# Basic program starts (TODO: move this to separate module)
# ----------------------------------------------------------------------------------
class GenericStart(GCodeProg):
"""
Simple startup routine ... cancels tool offset, cutter compensation,
puts system in absolute mode, set units, sets feedrate (optional).
"""
def __init__(self,feedrate=None, units='in',coord=1,comment=True):
super(GenericStart,self).__init__()
self.add(Space())
self.add(Comment('Generic Start'))
self.add(CancelCutterCompensation(),comment=comment)
self.add(CancelToolLengthOffset(),comment=comment)
self.add(CancelCannedCycle(),comment=comment)
self.add(CoordinateSystem(coord),comment=comment)
self.add(AbsoluteMode(),comment=comment)
self.add(Units(units),comment=comment)
self.add(ExactPathMode(),comment=comment)
if feedrate is not None:
self.add(FeedRate(feedrate),comment=comment)
# Base classes
# -----------------------------------------------------------------------------
class GCodeCmd(object):
"""
Base class for all gcode commands.
"""
def __init__(self):
self.motionDict = {}
self.code = ';NONE'
self.comment = False
self.commentStr = ''
def __str__(self):
cmdList= self.getCmdList()
if self.comment and self.commentStr:
cmdList.append('({0})'.format(self.commentStr))
return ' '.join(cmdList)
def getCmdList(self):
return [self.code]
class GCodeSingleArgCmd(GCodeCmd):
"""
Base class for gcode commands with a single argument.
"""
def __init__(self,value,valueType=float):
super(GCodeSingleArgCmd,self).__init__()
self.valueType = valueType # float, int, str
self.value = value
def getCmdList(self):
cmdList = super(GCodeSingleArgCmd,self).getCmdList()
cmdList.append('{0}'.format(self.valueType(self.value)))
return cmdList
class GCodeAxisArgCmd(GCodeCmd):
"""
Base class for gcode commands with axis arguments, such as RapidMotion,
LinearFeed, etc.
"""
axisNames = ('x','y','z','a','b','c','u','v','w')
def __init__(self, *args, **kwargs):
super(GCodeAxisArgCmd,self).__init__()
self.motionDict = normalizeToKwargs(self.axisNames,args,kwargs)
if not [ v for k, v in self.motionDict.iteritems() if v is not None]:
raise RuntimeError('missing commands')
def getCmdList(self):
cmdList = super(GCodeAxisArgCmd,self).getCmdList()
for axis in self.axisNames: # Use order in axisNames list
motion = self.motionDict[axis]
if motion is not None:
cmdList.append('{0}{1:1.8f}'.format(axis.upper(),float(motion)))
return cmdList
class GCodeHelicalMotion(GCodeCmd):
"""
Base class for gcode commands involving helical motion.
"""
def __init__(self,*args, **kwargs):
super(GCodeHelicalMotion,self).__init__()
self.direction = kwargs.pop('d').lower()
if self.direction == 'cw':
self.code = 'G2'
elif self.direction == 'ccw':
self.code = 'G3'
else:
raise ValueError('unknown dirction {0}'.format(direction))
self.motionDict = kwargs
# Make sure we have at least one of the required arguments
test = False
for k in self.requiredKeys:
if k in kwargs:
test = True
if not test:
raise RuntimeError('missing required key: {0}'.format(self.requiredKeys))
def getCmdList(self):
cmdList = super(GCodeHelicalMotion,self).getCmdList()
for name in self.motionArgs:
value = self.motionDict[name]
if value is not None:
if name == 'p':
cmdList.append('{0}{1}'.format(name.upper(),int(value)))
else:
cmdList.append('{0}{1:1.8f}'.format(name.upper(),float(value)))
return cmdList
# Motion commands
# -----------------------------------------------------------------------------
class RapidMotion(GCodeAxisArgCmd):
def __init__(self, *args, **kwargs):
super(RapidMotion,self).__init__(*args, **kwargs)
self.code = 'G0'
self.commentStr = 'Rapid motion'
class LinearFeed(GCodeAxisArgCmd):
def __init__(self, *args, **kwargs):
super(LinearFeed,self).__init__(*args, **kwargs)
self.code = 'G1'
self.commentStr = 'Linear feed'
class Dwell(GCodeSingleArgCmd):
def __init__(self,value):
super(Dwell,self).__init__(value,valueType=float)
self.code = 'G4'
self.commentStr = 'Dwell'
def getCmdList(self):
cmdList = super(GCodeSingleArgCmd,self).getCmdList()
cmdList.append('P{0}'.format(self.valueType(self.value)))
return cmdList
class HelicalMotionXY(GCodeHelicalMotion):
motionArgs = ('x', 'y', 'z', 'i', 'j', 'p')
kwargsKeys = ('d' ,) + motionArgs
requiredKeys = ('i', 'j') # Must have at least one of these
def __init__(self,*args,**kwargs):
"""
Arguments (positional or keyword)
d = 'cw' for 'ccw'
x = (optional) x end position
y = (optional) y end position
z = (optional) z end position helical motion
i = x offset
j = y offset
p = (optional) number of turns
Note, if x,y given then distances from arc center to start and end
positions must be equal.
"""
kwargs = normalizeToKwargs(self.kwargsKeys, args, kwargs)
super(HelicalMotionXY,self).__init__(*args,**kwargs)
self.commentStr = 'Helical motion xy-plane, {0}'.format(self.direction)
class HelicalMotionXZ(GCodeHelicalMotion):
motionArgs = ('x', 'z', 'y', 'i', 'k', 'p')
kwargsKeys = ('d',) + motionArgs
requiredKeys = ('i', 'k') # Must have at least one of these
def __init__(self,*args,**kwargs):
"""
Arguments (positional or keyword)
d = 'cw' or 'ccw'
x = (optional) x end position
z = (optional) z end positino
y = (optional) y end position for helical motion
i = x offset
k = z offset
p = (optional) number of turns
Note, if x,z given then distances from arc center to start and end
positions must be equal.
"""
kwargs = normalizeToKwargs(self.kwargsKeys,args,kwargs)
super(HelicalMotionXZ,self).__init__(*args, **kwargs)
self.commentStr = 'Helical motion xz-plane, {0}'.format(self.direction)
class HelicalMotionYZ(GCodeHelicalMotion):
motionArgs = ('y', 'z', 'x', 'j', 'k', 'p')
kwargsKeys = ('d',) + motionArgs
requiredKeys = ('i','k') # Must have at least one of these
def __init__(self,*args, **kwargs):
"""
Arguments (positional or keyword)
d = 'cw' or 'ccw'
y = (optional) y end position
z = (optional) z end positino
x = (optional) x end position for helical motion
j = j offset
k = z offset
p = (optional) number of turns
Note, if y,z given then distances from arc center to start and end
positions must be equal.
"""
kwargs = normalizeToKwargs(self.kwargsKeys,args,kwargs)
super(HelicalMotionYZ,self).__init__()
self.commentStr = 'Helical motion yz-plane, {0}'.format(self.direction)
class CancelCannedCycle(GCodeCmd):
def __init__(self):
super(CancelCannedCycle,self).__init__()
self.code = 'G80'
self.commentStr = 'Cancel canned cycle'
class QuadraticBSplineXY(GCodeCmd):
kwargsKeys = ('x', 'y', 'i', 'j')
def __init__(self,*args,**kwargs):
kwargs = normalizeToKwargs(self.kwargsKeys,args,kwargs)
for k in self.kwargsKeys:
if k not in kwargs:
raise RuntimeError('missing required argument {0}'.format(k))
super(QuadraticBSplineXY,self).__init__()
self.code = 'G5.1'
self.commentStr = 'Quadratic B-Spline'
self.splineArgs = kwargs
def getCmdList(self):
cmdList = super(QuadraticBSplineXY,self).getCmdList()
for key in self.kwargsKeys: # Use order in axisNames list
value = self.splineArgs[key]
if value is not None:
cmdList.append('{0}{1}'.format(key.upper(),float(value)))
return cmdList
# Canned Cycles
# -----------------------------------------------------------------------------
class DrillCycleBase(GCodeCmd):
""" Base class for drilling cycles """
kwargsKeys = ()
requiredKeys = ()
def __init__(self,*args, **kwargs):
super(DrillCycleBase,self).__init__()
kwargs = normalizeToKwargs(self.kwargsKeys,args,kwargs)
checkRequiredKwargs(self.requiredKeys,kwargs)
self.params = kwargs
def getCmdList(self):
cmdList = super(DrillCycleBase,self).getCmdList()
for name in self.kwargsKeys:
value = self.params[name]
if value is not None:
if name != 'l':
value = float(value)
else:
value = int(value)
cmdList.append('{0}{1}'.format(name.upper(),value))
return cmdList
class DrillCycle(DrillCycleBase):
kwargsKeys = ('x','y','z','r','l','p')
requiredKeys = ('x','y','z','r')
def __init__(self,*args,**kwargs):
"""
x = drill x position
y = drill y position
z = drill z position (final)
r = feed start/retract z position
l = (optional) number of repetitions
p = (optional) dwell time in secs
"""
super(DrillCycle,self).__init__(*args,**kwargs)
if self.params['p'] is None:
self.code = 'G81'
self.commentStr = 'Drill cycle'
else:
self.code = 'G82'
self.commentStr = 'Drill cycle w/ dwell'
class PeckDrillCycle(DrillCycleBase):
kwargsKeys = ('x','y','z','r','l','q')
requiredKeys = ('x','<KEY>')
def __init__(self,*args,**kwargs):
"""
x = drill x position
y = drill y position
z = drill z posiion (final)
r = feed start/restract z position
l = (optional) number of repetitions
q = increment along z axis (must be > 0)
"""
super(PeckDrillCycle,self).__init__(*args,**kwargs)
if self.params['q'] <= 0:
raise ValueError('increment q must be >= 0')
self.code = 'G83'
# Distance Mode
# -----------------------------------------------------------------------------
class AbsoluteMode(GCodeCmd):
def __init__(self):
super(AbsoluteMode,self).__init__()
self.code = 'G90'
self.commentStr = 'Set absolute distance mode'
class IncrementalMode(GCodeCmd):
def __init__(self):
super(IncrementalMode,self).__init__()
self.code = 'G91'
self.commentStr = 'Set incremental distance mode'
# Feedrate Mode
# -----------------------------------------------------------------------------
class InverseTimeMode(GCodeCmd):
def __init__(self):
super(InverseTimeMode,self).__init__()
self.code = 'G93'
self.commentStr = 'Set feedrate mode to inverse time'
class UnitsPerMinuteMode(GCodeCmd):
def __init__(self):
super(UnitsPerMinuteMode,self).__init__()
self.code = 'G94'
self.commentStr = 'Set feedrate mode to units per minute'
class UnitsPerRevMode(GCodeCmd):
def __init__(self):
super(UnitsPerRevMode,self).__init__()
self.code = 'G95'
self.commentStr = 'Set feedrate mode to units per revolution'
# Coolant
# -----------------------------------------------------------------------------
class MistCoolantOn(GCodeCmd):
def __init__(self):
super(MistCoolantOn,self).__init__()
self.code = 'M7'
self.commentStr = 'Turn mist coolant on'
class FloodCoolantOn(GCodeCmd):
def __init__(self):
super(FloodCoolantOn,self).__init__()
self.code = 'M8'
self.commentStr = 'Turn flood coolant on'
class CoolantOff(GCodeCmd):
def __init__(self):
super(CoolantOff,self).__init__()
self.code = 'M9'
self.commantStr = 'Turn all coolant off'
# Tool length offset
# -----------------------------------------------------------------------------
class EnableToolLengthOffset(GCodeCmd):
def __init__(self, tool=None):
super(EnableToolLengthOffset,self).__init__()
self.code = 'G43'
self.tool = tool
self.commentStr = "Tool length offset enabled"
if self.tool is not None:
self.commentStr = "{0} for tool {1}".format(self.commentStr,self.tool)
def getCmdList(self):
cmdList = super(EnableToolLengthOffset,self).getCmdList()
if self.tool is not None:
cmdList.append('H{0}'.format(int(self.tool)))
return cmdList
class SetToolLengthOffset(GCodeAxisArgCmd):
def __init__(self,*arg,**kwarg):
super(SetToolLengthOffset,self).__init__(*arg,**kwarg)
self.code = "G43.1"
self.commentStr = "Set tool length offset"
class CancelToolLengthOffset(GCodeCmd):
def __init__(self):
super(CancelToolLengthOffset,self).__init__()
self.code = 'G49'
self.commentStr = "Cancel tool length offset"
# Cutter compensation
# -----------------------------------------------------------------------------
class CancelCutterCompensation(GCodeCmd):
def __init__(self):
super(CancelCutterCompensation,self).__init__()
self.code = 'G40'
self.commentStr = 'Cancel cutter radius compensation'
class CutterCompensation(GCodeCmd):
| |
deque = Deque()
>>> deque += 'ab'
>>> deque.popleft()
'a'
>>> deque.popleft()
'b'
>>> deque.popleft()
Traceback (most recent call last):
...
IndexError: pop from an empty deque
:return: value at front of deque
:raises IndexError: if deque is empty
"""
default = None, ENOVAL
_, value = self._cache.pull(default=default, retry=True)
if value is ENOVAL:
raise IndexError('pop from an empty deque')
return value
def remove(self, value):
"""Remove first occurrence of `value` in deque.
>>> deque = Deque()
>>> deque += 'aab'
>>> deque.remove('a')
>>> list(deque)
['a', 'b']
>>> deque.remove('b')
>>> list(deque)
['a']
>>> deque.remove('c')
Traceback (most recent call last):
...
ValueError: deque.remove(value): value not in deque
:param value: value to remove
:raises ValueError: if value not in deque
"""
_cache = self._cache
for key in _cache.iterkeys():
try:
item = _cache[key]
except KeyError:
continue
else:
if value == item:
try:
del _cache[key]
except KeyError:
continue
return
raise ValueError('deque.remove(value): value not in deque')
def reverse(self):
"""Reverse deque in place.
>>> deque = Deque()
>>> deque += 'abc'
>>> deque.reverse()
>>> list(deque)
['c', 'b', 'a']
"""
# GrantJ 2019-03-22 Consider using an algorithm that swaps the values
# at two keys. Like self._cache.swap(key1, key2, retry=True) The swap
# method would exchange the values at two given keys. Then, using a
# forward iterator and a reverse iterator, the reversis method could
# avoid making copies of the values.
temp = Deque(iterable=reversed(self))
self.clear()
self.extend(temp)
directory = temp.directory
del temp
rmtree(directory)
def rotate(self, steps=1):
"""Rotate deque right by `steps`.
If steps is negative then rotate left.
>>> deque = Deque()
>>> deque += range(5)
>>> deque.rotate(2)
>>> list(deque)
[3, 4, 0, 1, 2]
>>> deque.rotate(-1)
>>> list(deque)
[4, 0, 1, 2, 3]
:param int steps: number of steps to rotate (default 1)
"""
if not isinstance(steps, int):
type_name = type(steps).__name__
raise TypeError('integer argument expected, got %s' % type_name)
len_self = len(self)
if not len_self:
return
if steps >= 0:
steps %= len_self
for _ in range(steps):
try:
value = self.pop()
except IndexError:
return
else:
self.appendleft(value)
else:
steps *= -1
steps %= len_self
for _ in range(steps):
try:
value = self.popleft()
except IndexError:
return
else:
self.append(value)
__hash__ = None # type: ignore
@contextmanager
def transact(self):
"""Context manager to perform a transaction by locking the deque.
While the deque is locked, no other write operation is permitted.
Transactions should therefore be as short as possible. Read and write
operations performed in a transaction are atomic. Read operations may
occur concurrent to a transaction.
Transactions may be nested and may not be shared between threads.
>>> from diskcache import Deque
>>> deque = Deque()
>>> deque += range(5)
>>> with deque.transact(): # Atomically rotate elements.
... value = deque.pop()
... deque.appendleft(value)
>>> list(deque)
[4, 0, 1, 2, 3]
:return: context manager for use in `with` statement
"""
with self._cache.transact(retry=True):
yield
def save(self, path: str = None, compressed: bool = False):
return self._cache.save(path, compressed=compressed)
async def async_save(self, path: str = None, compressed: bool = False):
return await self._cache.async_save(path, compressed=compressed)
@classmethod
def load(cls, src_path: str, directory: str, table_name: str = CachezConfigz.default_table, **settings) -> 'Deque':
_cache = Cache.load(src_path = src_path, directory = directory, table_name = table_name, **settings)
return Deque.fromcache(_cache)
@classmethod
async def async_load(cls, src_path: str, directory: str, table_name: str = CachezConfigz.default_table, **settings) -> 'Deque':
_cache = await Cache.async_load(src_path = src_path, directory = directory, table_name = table_name, **settings)
return Deque.fromcache(_cache)
class Index(MutableMapping):
"""Persistent mutable mapping with insertion order iteration.
Items are serialized to disk. Index may be initialized from directory path
where items are stored.
Hashing protocol is not used. Keys are looked up by their serialized
format. See ``diskcache.Disk`` for details.
>>> index = Index()
>>> index.update([('a', 1), ('b', 2), ('c', 3)])
>>> index['a']
1
>>> list(index)
['a', 'b', 'c']
>>> len(index)
3
>>> del index['b']
>>> index.popitem()
('c', 3)
"""
def __init__(self, *args, **kwargs):
"""Initialize index in directory and update items.
Optional first argument may be string specifying directory where items
are stored. When None or not given, temporary directory is created.
>>> index = Index({'a': 1, 'b': 2, 'c': 3})
>>> len(index)
3
>>> directory = index.directory
>>> inventory = Index(directory, d=4)
>>> inventory['b']
2
>>> len(inventory)
4
"""
if args and isinstance(args[0], (bytes, str)):
directory = args[0]
args = args[1:]
else:
if args and args[0] is None:
args = args[1:]
directory = None
self._cache = Cache(directory, eviction_policy='none')
self.update(*args, **kwargs)
@classmethod
def fromcache(cls, cache, *args, **kwargs):
"""Initialize index using `cache` and update items.
>>> cache = Cache()
>>> index = Index.fromcache(cache, {'a': 1, 'b': 2, 'c': 3})
>>> index.cache is cache
True
>>> len(index)
3
>>> 'b' in index
True
>>> index['c']
3
:param Cache cache: cache to use
:param args: mapping or sequence of items
:param kwargs: mapping of items
:return: initialized Index
"""
# pylint: disable=no-member,protected-access
self = cls.__new__(cls)
self._cache = cache
self.update(*args, **kwargs)
return self
@property
def cache(self):
"Cache used by index."
return self._cache
@property
def directory(self):
"Directory path where items are stored."
return self._cache.directory
def __getitem__(self, key):
"""index.__getitem__(key) <==> index[key]
Return corresponding value for `key` in index.
>>> index = Index()
>>> index.update({'a': 1, 'b': 2})
>>> index['a']
1
>>> index['b']
2
>>> index['c']
Traceback (most recent call last):
...
KeyError: 'c'
:param key: key for item
:return: value for item in index with given key
:raises KeyError: if key is not found
"""
return self._cache[key]
def __setitem__(self, key, value):
"""index.__setitem__(key, value) <==> index[key] = value
Set `key` and `value` item in index.
>>> index = Index()
>>> index['a'] = 1
>>> index[0] = None
>>> len(index)
2
:param key: key for item
:param value: value for item
"""
self._cache[key] = value
def __delitem__(self, key):
"""index.__delitem__(key) <==> del index[key]
Delete corresponding item for `key` from index.
>>> index = Index()
>>> index.update({'a': 1, 'b': 2})
>>> del index['a']
>>> del index['b']
>>> len(index)
0
>>> del index['c']
Traceback (most recent call last):
...
KeyError: 'c'
:param key: key for item
:raises KeyError: if key is not found
"""
del self._cache[key]
def setdefault(self, key, default=None):
"""Set and get value for `key` in index using `default`.
If `key` is not in index then set corresponding value to `default`. If
`key` is in index then ignore `default` and return existing value.
>>> index = Index()
>>> index.setdefault('a', 0)
0
>>> index.setdefault('a', 1)
0
:param key: key for item
:param default: value if key is missing (default None)
:return: value for item in index with given key
"""
_cache = self._cache
while True:
try:
return _cache[key]
except KeyError:
_cache.add(key, default, retry=True)
def peekitem(self, last=True):
"""Peek at key and value item pair in index based on iteration order.
>>> index = Index()
>>> for num, letter in enumerate('xyz'):
... index[letter] = num
>>> index.peekitem()
('z', 2)
>>> index.peekitem(last=False)
('x', 0)
:param bool last: last item in iteration order (default True)
:return: key and value item pair
:raises KeyError: if cache is empty
"""
return self._cache.peekitem(last, retry=True)
def pop(self, key, default=ENOVAL):
"""Remove corresponding item for `key` from index and return value.
If `key` is missing then return `default`. If `default` is `ENOVAL`
then raise KeyError.
>>> index = Index({'a': 1, 'b': 2})
>>> index.pop('a')
1
>>> index.pop('b')
2
>>> index.pop('c', default=3)
3
>>> index.pop('d')
Traceback (most recent call last):
...
KeyError: 'd'
:param key: key for item
:param default: return value if key is missing (default ENOVAL)
:return: value for item if key is found else default
:raises KeyError: if key is not found and default is ENOVAL
"""
_cache = self._cache
value = _cache.pop(key, default=default, retry=True)
if value is ENOVAL:
raise KeyError(key)
return value
def popitem(self, last=True):
"""Remove and return item pair.
Item pairs are returned in last-in-first-out (LIFO) order if last is
True else first-in-first-out (FIFO) order. LIFO order imitates a stack
and FIFO order imitates a queue.
>>> index = Index()
>>> index.update([('a', 1), ('b', 2), ('c', 3)])
| |
__init__(self,
req,
substrate,
flow_values,
relative_decomposition_abortion_epsilon,
absolute_decomposition_abortion_epsilon,
decomposition_epsilon,
extended_graph=None, # optional, can be provided by the caller to save time
substrate_resources=None, # optional, can be provided by the caller to save time
logger=None):
if logger is None:
logger = util.get_logger("Decomposition", make_file=False, propagate=True)
self.logger = logger
# TODO: Replace with new SubstrateX
self.substrate = substrate
if substrate_resources is None:
substrate_resources = list(self.substrate.edges)
for ntype in self.substrate.get_types():
for snode in self.substrate.get_nodes_by_type(ntype):
substrate_resources.append((ntype, snode))
self.substrate_resources = substrate_resources
self.flow_values = flow_values
self.request = req
if extended_graph is None:
extended_graph = extendedcactusgraph.ExtendedCactusGraph(req, self.substrate)
self.ext_graph = extended_graph
self._mapping_count = None
self.relative_decomposition_abortion_epsilon = relative_decomposition_abortion_epsilon
self.absolute_decomposition_abortion_epsilon = absolute_decomposition_abortion_epsilon
self.decomposition_epsilon = decomposition_epsilon
self._used_ext_graph_edge_resources = None # set of edges in the extended graph that are used in a single iteration of the decomposition algorithm
self._used_ext_graph_node_resources = None # same for nodes
self._abort_decomposition_based_on_numerical_trouble = False
self.lost_flow_in_the_decomposition = 0.0
self._predecessor = {node : None for node in self.ext_graph.nodes}
self._changed_predecessors = []
self._stack = []
self._propagating_neighbors = deque()
self.original_embedding_flow_value = None
self.scaling_factor = None
self.inverse_scaling_factor = None
def scale_flow_to_unit_flow(self):
self.original_embedding_flow_value = self.flow_values["embedding"]
if self.original_embedding_flow_value < self.absolute_decomposition_abortion_epsilon:
self.logger.info("Performing no scaling in the first place, as the original flow value {} is negligible.".format(self.original_embedding_flow_value))
self.inverse_scaling_factor = 1.0
self.scaling_factor = 1.0
return
self.inverse_scaling_factor = self.original_embedding_flow_value
self.scaling_factor = 1.0 / self.original_embedding_flow_value
if self.scaling_factor > 1.001:
self.logger.info("Scaling all flows for the decomposition by factor {}.".format(self.scaling_factor))
else:
self.logger.info("Not scaling flows, as the scaling factor {} is negligible.".format(self.scaling_factor))
self.scaling_factor = 1.0
self.inverse_scaling_factor = 1.0
return
self.flow_values["embedding"] = min(1.0, self.flow_values["embedding"] * self.scaling_factor)
for vnode in self.flow_values["node"].keys():
for snode in self.flow_values["node"][vnode].keys():
self.flow_values["node"][vnode][snode] = min(1.0, self.flow_values["node"][vnode][snode] * self.scaling_factor)
for eedge in self.flow_values["edge"].keys():
self.flow_values["edge"][eedge] = min(1.0, self.flow_values["edge"][eedge] * self.scaling_factor)
def compute_mappings(self):
result = []
self._mapping_count = 0
self.logger.info("\n")
self.logger.info(
"\t:Starting decomposition for request {} having a total flow of {}".format(self.request.name, self.flow_values["embedding"]))
initial_flow_value = self.flow_values["embedding"]
self.scale_flow_to_unit_flow()
while self.flow_values["embedding"] > self.relative_decomposition_abortion_epsilon and \
self.flow_values["embedding"] * self.inverse_scaling_factor > self.absolute_decomposition_abortion_epsilon:
self._used_ext_graph_edge_resources = set() # use the request's original root to store the maximal possible flow according to embedding value
self._used_ext_graph_node_resources = set()
mapping = self._decomposition_iteration()
# diminish the flow on the used edges
node_flow_list = []
for ext_node in self._used_ext_graph_node_resources:
i, u = self.ext_graph.get_associated_original_resources(ext_node)
value = self.flow_values["node"][i][u]
if value > 0:
node_flow_list.append(value)
flow = min(
min(node_flow_list),
self.flow_values["embedding"],
min(self.flow_values["edge"][eedge] for eedge in self._used_ext_graph_edge_resources)
)
if flow < 0:
self.logger.error("ERROR: Decomposition Termination: Flow should never be negative! {}".format(flow))
break
self.flow_values["embedding"] -= flow
for eedge in self._used_ext_graph_edge_resources:
self.flow_values["edge"][eedge] -= flow
for enode in self._used_ext_graph_node_resources:
i, u = self.ext_graph.get_associated_original_resources(enode)
self.flow_values["node"][i][u] -= flow
if self._abort_decomposition_based_on_numerical_trouble:
self.lost_flow_in_the_decomposition += flow * self.inverse_scaling_factor
self.logger.warning("Based on numerical trouble only a partial mapping of value {} was extracted.".format(flow))
self.logger.warning("Trying to continue with {} flow to decompose".format(self.flow_values["embedding"]))
self._abort_decomposition_based_on_numerical_trouble = False
else:
#self.logger.info("Extracted mapping has flow {}".format(flow))
load = self._calculate_substrate_load_for_mapping(mapping)
result.append((mapping, flow * self.inverse_scaling_factor, load))
remaining_flow = self.flow_values["embedding"]
if remaining_flow > self.relative_decomposition_abortion_epsilon and remaining_flow * self.inverse_scaling_factor > self.absolute_decomposition_abortion_epsilon:
self.logger.error("ERROR: Aborted decomposition with {} flow left, which does not meet relative or absolute termination criterion {}, resp. {}".format(
remaining_flow * self.inverse_scaling_factor, self.relative_decomposition_abortion_epsilon, self.absolute_decomposition_abortion_epsilon
))
else:
self.logger.info(
"Aborted decomposition with only {} flow left (either less than the specified relative abortion epsilon {} or the absolute abortion epsilon {})".format(
remaining_flow * self.inverse_scaling_factor, self.relative_decomposition_abortion_epsilon, self.absolute_decomposition_abortion_epsilon
))
self.logger.info("Given partial mappings of flow {}, the total lost flow is {}.".format(
self.lost_flow_in_the_decomposition, self.lost_flow_in_the_decomposition + remaining_flow * self.inverse_scaling_factor))
self.lost_flow_in_the_decomposition += remaining_flow * self.inverse_scaling_factor
if self.original_embedding_flow_value > self.absolute_decomposition_abortion_epsilon:
self.logger.info("Overall, {}% of the flow was successfully decomposed.".format(100.0*((self.original_embedding_flow_value - self.lost_flow_in_the_decomposition)/self.original_embedding_flow_value)))
else:
self.logger.info("Due to the negligible initial flow ({}), no percentage of lost flow is given.".format(self.original_embedding_flow_value))
return result
def _decomposition_iteration(self):
self._mapping_count += 1
mapping_name = modelcreator.construct_name("mapping_", req_name=self.request.name, other=self._mapping_count)
mapping = solutions.Mapping(mapping_name, self.request, self.substrate, True)
ext_root = self._map_root_node_on_node_with_nonzero_flow(mapping)
if ext_root is None:
self._abort_decomposition_based_on_numerical_trouble = True
return None
self._used_ext_graph_node_resources.add(ext_root)
queue = {self.ext_graph.root}
while queue:
i = queue.pop()
for cycle in self.ext_graph.ecg_cycles:
if cycle.start_node != i:
continue
branch_1 = cycle.ext_graph_branches[0]
branch_2 = cycle.ext_graph_branches[1]
flow_path_1, ext_path_1, sink = self._choose_flow_path_in_extended_cycle(branch_1, mapping)
if flow_path_1 is None:
self._abort_decomposition_based_on_numerical_trouble = True
break
flow_path_2, ext_path_2, sink_2 = self._choose_flow_path_in_extended_cycle(branch_2, mapping, sink=sink)
if flow_path_2 is None:
self._abort_decomposition_based_on_numerical_trouble = True
break
if sink_2 != sink:
msg = "Both branches of cycle need to start & end in the same node: {}, {}".format(sink, sink_2)
raise CactusDecompositionError(msg)
self._process_path(ext_path_1, flow_path_1, mapping, queue)
self._process_path(ext_path_2, flow_path_2, mapping, queue)
for ext_path in self.ext_graph.ecg_paths:
if self._abort_decomposition_based_on_numerical_trouble:
break
if ext_path.start_node != i:
continue
flow_path, sink = self._choose_flow_path_in_extended_path(ext_path, mapping)
if flow_path is not None:
self._process_path(ext_path, flow_path, mapping, queue)
else:
self._abort_decomposition_based_on_numerical_trouble = True
return mapping
def _map_root_node_on_node_with_nonzero_flow(self, mapping):
root = self.ext_graph.root
def outgoing_flow_func(value):
#print value
u, ext_node = value
return max(self.flow_values["edge"].get(eedge, 0.0) for eedge in self.ext_graph.out_edges[ext_node])
u, ext_node = max(iter(self.ext_graph.source_nodes[root].items()), key=outgoing_flow_func)
ext_node_outflow = outgoing_flow_func((u, ext_node))
if ext_node_outflow > self.decomposition_epsilon:
mapping.map_node(root, u)
return ext_node
else:
self.logger.warning("Decomposition Termination: No valid root mapping found for {}.".format(self.request.name))
self._abort_decomposition_based_on_numerical_trouble = True
return None
def _choose_flow_path_in_extended_cycle(self, branch, mapping, sink=None):
if sink is None: # this is the case for the first processed branch
ext_path = self._choose_path_for_cycle_branch(branch, self.ext_graph, mapping)
if ext_path is None:
self._abort_decomposition_based_on_numerical_trouble = True
return None, None, None
sink_nodes = [sink_node for (last_layer_node, sink_node) in ext_path.extended_path[ext_path.end_node]]
else: # for the second processed branch, the sink mapping is already fixed and this branch must be chosen accordingly
u_sink = self.ext_graph.node[sink]["substrate_node"]
ext_path = branch[u_sink]
sink_nodes = [sink]
eedge_path, sink = self._choose_flow_path_in_extended_path(ext_path, mapping, sink_nodes=sink_nodes)
return eedge_path, ext_path, sink
def _choose_path_for_cycle_branch(self, branch, ext_graph, mapping):
chosen_path = None
# Iterate over the branch copies corresponding to different target node mappings
def outgoing_flow_of_path(value):
end_node, path = value
ext_source = ext_graph.source_nodes[path.start_node][mapping.mapping_nodes[path.start_node]]
return max(self.flow_values["edge"].get((eetail, eehead), 0.0) for (eetail, eehead) in ext_graph.out_edges[ext_source] if eehead in path.extended_nodes)
#end_node, path = max(branch.iteritems(), key=outgoing_flow_of_path)
value_list = [outgoing_flow_of_path((key, value)) for (key, value) in branch.items()]
end_node, path = max(iter(branch.items()), key=outgoing_flow_of_path)
if outgoing_flow_of_path((end_node, path)) > self.decomposition_epsilon:
chosen_path = path
if chosen_path is None:
self.logger.warning("Decomposition Termination: Couldn't determine a branch to start the decomposition")
self._abort_decomposition_based_on_numerical_trouble = True
return chosen_path
def _choose_flow_path_in_extended_path(self, path, mapping, sink_nodes=None):
# if a sink node is specified, we search the extended graph until that node is reached. Otherwise, all sink nodes are viable:
if sink_nodes is None:
sink_nodes = list(self.ext_graph.sink_nodes[path.end_node].values())
# sink_nodes = [sink for sink in self.ext_graph.sink_nodes[path.end_node].values() if self.flow_values["node"][path.end_node].get(self.ext_graph.get_associated_original_resources(sink)[1], 0.0) >= self.decomposition_epsilon]
# self.logger.info("All sink nodes vs. actual sink nodes:\n{}\n{}".format(all_sink_nodes, sink_nodes))
ext_source = self.ext_graph.source_nodes[path.start_node][mapping.mapping_nodes[path.start_node]]
# Depth-First Search until we hit one of the viable sinks:
for node in self._changed_predecessors:
self._predecessor[node] = None
del self._changed_predecessors[:]
del self._stack[:]
self._stack.append(ext_source)
sink = None
while len(self._stack) > 0:
current_enode = self._stack.pop()
# print "path search, current node:", current_enode
if current_enode in sink_nodes:
sink = current_enode
break
max_flow = 0.0
for eedge in self.ext_graph.out_edges[current_enode]:
if eedge not in path.extended_edges:
continue
ee_tail, ee_head = eedge
# ignore flow-less edges:
flow = self.flow_values["edge"].get(eedge, 0.0)
if flow > self.decomposition_epsilon and self._predecessor[ee_head] is None:
if flow > max_flow:
self._propagating_neighbors.append(ee_head)
max_flow = flow
else:
self._propagating_neighbors.appendleft(ee_head)
while len(self._propagating_neighbors) > 0:
ee_head = self._propagating_neighbors.popleft()
self._stack.append(ee_head)
self._changed_predecessors.append(ee_head)
self._predecessor[ee_head] = current_enode
if sink is None:
for node in self.ext_graph.nodes:
if self._predecessor[node] != None:
self._used_ext_graph_edge_resources.add((self._predecessor[node], node))
self._used_ext_graph_node_resources.add(ext_source)
self._abort_decomposition_based_on_numerical_trouble = True
self.logger.warning("Decomposition Termination: Couldn't find a path in the decomposition process")
return None, None
eedge_path = Decomposition._dfs_assemble_path_from_predecessor_dictionary(self._predecessor, ext_source, sink)
for edge in eedge_path:
self._used_ext_graph_edge_resources.add(edge)
self._used_ext_graph_node_resources.add(ext_source)
self._used_ext_graph_node_resources.add(sink)
return eedge_path, sink
@staticmethod
def _dfs_assemble_path_from_predecessor_dictionary(predecessor, ext_source_node, ext_sink_node):
eedge_path = []
current_enode = ext_sink_node
while current_enode != ext_source_node:
previous_hop = predecessor[current_enode]
ext_edge = (previous_hop, current_enode)
eedge_path.append(ext_edge)
current_enode = previous_hop
# reverse edges such that path leads from super source to super sink
eedge_path.reverse()
return eedge_path
def _process_path(self, extended_path, flow_path, mapping, queue):
for ij in extended_path.original_path:
i, j = ij
for uu_ext in extended_path.extended_path[j]:
# extended_path.extended_path[j] should contain all inter-layer edges associated with the node mapping of j
if uu_ext in flow_path:
u1_ext, u2_ext = uu_ext
u1 = self.ext_graph.node[u1_ext]["substrate_node"]
u2 = self.ext_graph.node[u2_ext]["substrate_node"]
if u1 != u2:
msg = "Inter-layer edge should connect nodes corresponding to the same substrate node! Instead: {} -> {} (= {}, {})".format(u1_ext, u2_ext, u1, u2)
raise CactusDecompositionError(msg)
if j in mapping.mapping_nodes:
u_prev = mapping.mapping_nodes[j] # previous mapping of
if u_prev != u1:
msg = "Tried remapping node | |
units_btn.style.button_color = 'tan'
row = [name_btn, self.float717, units_btn, ]
box765 = Box(children=row, layout=box_layout)
name_btn = Button(description='calcification_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float718 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float718, units_btn, ]
box766 = Box(children=row, layout=box_layout)
name_btn = Button(description='relative_rupture_volume', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float719 = FloatText(value='2.0', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float719, units_btn, ]
box767 = Box(children=row, layout=box_layout)
# -------------------------
div_row51 = Button(description='phenotype:volume', disabled=True, layout=divider_button_layout)
div_row51.style.button_color = 'orange'
name_btn = Button(description='total', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float720 = FloatText(value='478', step='10', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float720, units_btn, ]
box768 = Box(children=row, layout=box_layout)
name_btn = Button(description='fluid_fraction', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float721 = FloatText(value='0.75', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float721, units_btn, ]
box769 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float722 = FloatText(value='47.8', step='1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float722, units_btn, ]
box770 = Box(children=row, layout=box_layout)
name_btn = Button(description='fluid_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float723 = FloatText(value='0.05', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float723, units_btn, ]
box771 = Box(children=row, layout=box_layout)
name_btn = Button(description='cytoplasmic_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float724 = FloatText(value='0.0045', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float724, units_btn, ]
box772 = Box(children=row, layout=box_layout)
name_btn = Button(description='nuclear_biomass_change_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float725 = FloatText(value='0.0055', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float725, units_btn, ]
box773 = Box(children=row, layout=box_layout)
name_btn = Button(description='calcified_fraction', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float726 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float726, units_btn, ]
box774 = Box(children=row, layout=box_layout)
name_btn = Button(description='calcification_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float727 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float727, units_btn, ]
box775 = Box(children=row, layout=box_layout)
name_btn = Button(description='relative_rupture_volume', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float728 = FloatText(value='2.0', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float728, units_btn, ]
box776 = Box(children=row, layout=box_layout)
# -------------------------
div_row52 = Button(description='phenotype:mechanics', disabled=True, layout=divider_button_layout)
div_row52.style.button_color = 'orange'
name_btn = Button(description='cell_cell_adhesion_strength', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float729 = FloatText(value='0.4', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float729, units_btn, ]
box777 = Box(children=row, layout=box_layout)
name_btn = Button(description='cell_cell_repulsion_strength', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float730 = FloatText(value='10.0', step='1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float730, units_btn, ]
box778 = Box(children=row, layout=box_layout)
name_btn = Button(description='relative_maximum_adhesion_distance', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float731 = FloatText(value='1.25', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float731, units_btn, ]
box779 = Box(children=row, layout=box_layout)
self.bool30 = Checkbox(description='enabled', value=False,layout=name_button_layout)
name_btn = Button(description='set_relative_equilibrium_distance', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float732 = FloatText(value='1.8', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [self.bool30, name_btn, self.float732, units_btn, ]
box780 = Box(children=row, layout=box_layout)
self.bool31 = Checkbox(description='enabled', value=False,layout=name_button_layout)
name_btn = Button(description='set_absolute_equilibrium_distance', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float733 = FloatText(value='15.12', step='1', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [self.bool31, name_btn, self.float733, units_btn, ]
box781 = Box(children=row, layout=box_layout)
# -------------------------
div_row53 = Button(description='phenotype:motility', disabled=True, layout=divider_button_layout)
div_row53.style.button_color = 'orange'
name_btn = Button(description='speed', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float734 = FloatText(value='4', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='micron/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float734, units_btn]
box782 = Box(children=row, layout=box_layout)
name_btn = Button(description='persistence_time', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float735 = FloatText(value='5', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float735, units_btn]
box783 = Box(children=row, layout=box_layout)
name_btn = Button(description='migration_bias', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float736 = FloatText(value='0.70', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float736, units_btn]
box784 = Box(children=row, layout=box_layout)
self.bool32 = Checkbox(description='enabled', value=True,layout=name_button_layout)
self.bool33 = Checkbox(description='use_2D', value=True,layout=name_button_layout)
chemotaxis_btn = Button(description='chemotaxis', disabled=True, layout={'width':'30%'})
chemotaxis_btn.style.button_color = '#ffde6b'
self.bool34 = Checkbox(description='enabled', value=False,layout=name_button_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.chemotaxis_substrate7 = Text(value='chemokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.chemotaxis_substrate7]
box785 = Box(children=row, layout=box_layout)
name_btn = Button(description='direction', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.chemotaxis_direction7 = Text(value='1', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.chemotaxis_direction7]
box786 = Box(children=row, layout=box_layout)
# -------------------------
div_row54 = Button(description='phenotype:secretion', disabled=True, layout=divider_button_layout)
div_row54.style.button_color = 'orange'
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.text36 = Text(value='interferon 1', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text36]
box787 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float737 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float737, units_btn]
box788 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float738 = FloatText(value='0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float738, units_btn]
box789 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.text37 = Text(value='pro-inflammatory cytokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text37]
box790 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float739 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float739, units_btn]
box791 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float740 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float740, units_btn]
box792 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.text38 = Text(value='chemokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text38]
box793 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float741 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float741, units_btn]
box794 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float742 = FloatText(value='0.01', step='0.001', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float742, units_btn]
box795 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.text39 = Text(value='debris', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text39]
box796 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float743 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float743, units_btn]
box797 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float744 = FloatText(value='0.1', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float744, units_btn]
box798 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.text40 = Text(value='anti-inflammatory cytokine', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text40]
box799 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float745 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float745, units_btn]
box800 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float746 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float746, units_btn]
box801 = Box(children=row, layout=box_layout)
name_btn = Button(description='substrate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.text41 = Text(value='collagen', disabled=False, style=style, layout=widget_layout_long)
row = [name_btn, self.text41]
box802 = Box(children=row, layout=box_layout)
name_btn = Button(description='secretion_target', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'tan'
self.float747 = FloatText(value='1', step='0.1', style=style, layout=widget_layout)
units_btn = Button(description='dimensionless substrate concentration', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'tan'
row = [name_btn, self.float747, units_btn]
box803 = Box(children=row, layout=box_layout)
name_btn = Button(description='uptake_rate', disabled=True, layout=name_button_layout)
name_btn.style.button_color = 'lightgreen'
self.float748 = FloatText(value='0.0', step='0.01', style=style, layout=widget_layout)
units_btn = Button(description='1/min', disabled=True, layout=name_button_layout)
units_btn.style.button_color = 'lightgreen'
row = [name_btn, self.float748, units_btn]
box804 = Box(children=row, layout=box_layout)
# -------------------------
div_row55 = Button(description='phenotype:molecular', disabled=True, layout=divider_button_layout)
div_row55.style.button_color = 'orange'
# ================== <custom_data>, if present ==================
div_row56 = Button(description='Custom Data',disabled=True, layout=divider_button_layout)
div_row56.style.button_color = 'cyan'
name_btn | |
<filename>autoclass/autorepr_.py
# Authors: <NAME> <<EMAIL>>
#
# Copyright (c) Schneider Electric Industries, 2019. All right reserved.
from warnings import warn
try: # python 3+
from inspect import signature
except ImportError:
from funcsigs import signature
try:
from typing import Any, Tuple, Union, Dict, TypeVar, Callable, Iterable, Sized
try:
from typing import Type
except ImportError:
pass
T = TypeVar('T')
except ImportError:
pass
from autoclass.utils import is_attr_selected, method_already_there, check_known_decorators, read_fields, \
__AUTOCLASS_OVERRIDE_ANNOTATION, iterate_on_vars
from decopatch import class_decorator, DECORATED
@class_decorator
def autorepr(include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
only_known_fields=True, # type: bool
only_public_fields=True, # type: bool
curly_string_repr=False, # type: bool
cls=DECORATED
):
"""
A decorator to generate str and repr method for class cls if not already implemented
Parameters allow to customize the list of fields that will be visible in the representation.
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:param only_known_fields: if True (default), only known fields (constructor arguments or pyfields fields) will be
exposed through the str/repr view, not any other field that would be created in the constructor or
dynamically. If set to False, the representation is a direct view of *all* public object fields. This view can be
filtered with include/exclude and private fields can be made visible by setting only_public_fields to false
:param only_public_fields: this parameter is only used when only_constructor_args is set to False. If
only_public_fields is set to False, all fields are visible. Otherwise (default), class-private fields will be
hidden
:param curly_string_repr: turn this to `True` to get the curly string representation `{%r: %r, ...}` instead of
the default one `(%s=%r, ...)`
:return:
"""
return autorepr_decorate(cls, include=include, exclude=exclude, curly_string_repr=curly_string_repr,
only_public_fields=only_public_fields, only_known_fields=only_known_fields)
def autorepr_decorate(cls, # type: Type[T]
include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
only_known_fields=True, # type: bool
only_public_fields=True, # type: bool
curly_string_repr=False, # type: bool
):
# type: (...) -> Type[T]
"""
To automatically generate the appropriate str and repr methods, without using @autoeq decorator.
:param cls: the class on which to execute. Note that it won't be wrapped.
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:param only_known_fields: if True (default), only known fields (constructor arguments or pyfields fields) will be
exposed through the str/repr view, not any other field that would be created in the constructor or
dynamically. If set to False, the representation is a direct view of *all* public object fields. This view can be
filtered with include/exclude and private fields can be made visible by setting only_public_fields to false
:param only_public_fields: this parameter is only used when only_constructor_args is set to False. If
only_public_fields is set to False, all fields are visible. Otherwise (default), class-private fields will be
hidden
:param curly_string_repr: turn this to `True` to get the curly string representation `{%r: %r, ...}` instead of
the default one `(%s=%r, ...)`
:return:
"""
# first check that we do not conflict with other known decorators
check_known_decorators(cls, '@autorepr')
# perform the class mod
if only_known_fields:
# retrieve the list of fields from pyfields or constructor signature
selected_names, source = read_fields(cls, include=include, exclude=exclude, caller="@autorepr")
# add autohash with explicit list
execute_autorepr_on_class(cls, selected_names=selected_names, curly_string_repr=curly_string_repr)
else:
# no explicit list
execute_autorepr_on_class(cls, include=include, exclude=exclude, public_fields_only=only_public_fields,
curly_string_repr=curly_string_repr)
return cls
def execute_autorepr_on_class(cls, # type: Type[T]
selected_names=None, # type: Iterable[str]
include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
public_fields_only=True, # type: bool
curly_string_repr=False, # type: bool
):
"""
This method overrides str and repr method if not already implemented
Parameters allow to customize the list of fields that will be visible.
:param cls: the class on which to execute.
:param selected_names: an explicit list of attribute names that should be used in the dict. If this is provided,
`include`, `exclude` and `public_fields_only` should be left as default as they are not used.
:param include: a tuple of explicit attribute names to include (None means all). This parameter is only used when
`selected_names` is not provided.
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None. This
parameter is only used when `selected_names` is not provided.
:param public_fields_only: this parameter is only used when `selected_names` is not provided. If
public_fields_only is set to False, all fields are visible. Otherwise (default), class-private fields will be
hidden from the exposed str/repr view.
:param curly_string_repr: turn this to `True` to get the curly string representation `{%r: %r, ...}` instead of
the default one `(%s=%r, ...)`
:return:
"""
if selected_names is not None:
# case (a) hardcoded list - easy: we know the exact list of fields to make visible
if include is not None or exclude is not None or public_fields_only is not True:
raise ValueError("`selected_names` can not be used together with `include`, `exclude` or "
"`public_fields_only`")
str_repr_methods = create_repr_methods_for_hardcoded_list(selected_names, curly_mode=curly_string_repr)
else:
# case (b) the list of fields is not predetermined, it will depend on vars(self)
if include is None and exclude is None and not public_fields_only:
# easy: all vars() are exposed
str_repr_methods = create_repr_methods_for_object_vars(curly_mode=curly_string_repr)
else:
# harder: all fields are allowed, but there are filters on this dynamic list
# private_name_prefix = '_' + object_type.__name__ + '_'
private_name_prefix = '_' if public_fields_only else None
str_repr_methods = create_repr_methods_for_object_vars_with_filters(curly_mode=curly_string_repr,
include=include, exclude=exclude,
private_name_prefix=private_name_prefix)
if method_already_there(cls, '__str__', this_class_only=True):
if not hasattr(cls.__str__, __AUTOCLASS_OVERRIDE_ANNOTATION):
warn('__str__ is already defined on class %s, it will be overridden with the one generated by '
'@autorepr/@autoclass ! If you want to use your version, annotate it with @autoclass_override'
% cls)
cls.__str__ = str_repr_methods.str
else:
cls.__str__ = str_repr_methods.str
if method_already_there(cls, '__repr__', this_class_only=True):
if not hasattr(cls.__repr__, __AUTOCLASS_OVERRIDE_ANNOTATION):
warn('__repr__ is already defined on class %s, it will be overridden with the one generated by '
'@autorepr/@autoclass ! If you want to use your version, annotate it with @autoclass_override'
% cls)
cls.__repr__ = str_repr_methods.repr
else:
cls.__repr__ = str_repr_methods.repr
class ReprMethods(object):
"""
Container used in @autodict to exchange the various methods created
"""
__slots__ = 'str', 'repr'
def __init__(self, str, repr):
self.str = str
self.repr = repr
def create_repr_methods_for_hardcoded_list(selected_names, # type: Union[Sized, Iterable[str]]
curly_mode # type: bool
):
# type: (...) -> ReprMethods
"""
:param selected_names:
:param curly_mode:
:return:
"""
if not curly_mode:
def __repr__(self):
"""
Generated by @autorepr. Relies on the hardcoded list of field names and "getattr" (object) for the value.
"""
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s=%r' % (k, getattr(self, k)) for k in selected_names))
else:
def __repr__(self):
"""
Generated by @autorepr. Relies on the hardcoded list of field names and "getattr" (object) for the value.
"""
return '%s(**{%s})' % (self.__class__.__name__,
', '.join('%r: %r' % (k, getattr(self, k)) for k in selected_names))
return ReprMethods(str=__repr__, repr=__repr__)
def create_repr_methods_for_object_vars(curly_mode # type: bool
):
# type: (...) -> ReprMethods
"""
:param curly_mode:
:return:
"""
if not curly_mode:
def __repr__(self):
"""
Generated by @autorepr. Relies on the list of vars() and "getattr" (object) for the value.
"""
return '%s(%s)' % (self.__class__.__name__, ', '.join('%s=%r' % (k, getattr(self, k))
for k in iterate_on_vars(self)))
else:
def __repr__(self):
"""
Generated by @autorepr. Relies on the list of vars() and "getattr" (object) for the value.
"""
return '%s(**{%s})' % (self.__class__.__name__, ', '.join('%r: %r' % (k, getattr(self, k))
for k in iterate_on_vars(self)))
return ReprMethods(str=__repr__, repr=__repr__)
def create_repr_methods_for_object_vars_with_filters(curly_mode, # type: bool
include, # type: Union[str, Tuple[str]]
exclude, # type: Union[str, Tuple[str]]
private_name_prefix=None # type: str
):
# type: (...) -> ReprMethods
"""
:param curly_mode:
:param include:
:param exclude:
:param private_name_prefix:
:return:
"""
public_fields_only = private_name_prefix is not None
def _vars_iterator(self):
"""
Filters the vars(self) according to include/exclude/public_fields_only
:param self:
:return:
"""
for att_name in iterate_on_vars(self):
# filter based on the name (include/exclude + private/public)
if is_attr_selected(att_name, include=include, exclude=exclude) and \
(not public_fields_only or not att_name.startswith(private_name_prefix)):
# use it
yield att_name, getattr(self, att_name)
if not curly_mode:
def __repr__(self):
"""
Generated by @autorepr. Relies | |
cnt += 1
if cnt % 1000 == 0:
print cnt, zl.tock()
zl.tick()
ok = False
for r in doc['relationships']:
pre = r['predicate']
sub_name = r['subject']['name']
obj_name = r['object']['name']
spo = sub_name+'_'+pre+'_'+obj_name
if spo in spo_list:
rcnt+=1
ok = True
if ok:
total_test_cnt += 1
print rcnt,total_train_cnt,total_test_cnt
def vg_count_predicate_per_object():
client = MongoClient("mongodb://localhost:27017")
db = client.visual_genome_1_2
db_results = db.relationships_all_train.find(no_cursor_timeout=True)
cnt = 0
spo_infos = {}
zl.tick()
for doc in db_results:
id = doc['image_id']
cnt += 1
if cnt % 1000 == 0:
print cnt, zl.tock()
zl.tick()
rcnt = 0
for r in doc['relationships']:
pre = r['predicate']
sub_name = r['subject']['name']
obj_name = r['object']['name']
if obj_name not in spo_infos:
spo_info = {'predicates':[]}
spo_infos[obj_name] = spo_info
if sub_name not in spo_infos:
spo_info = {'predicates':[]}
spo_infos[sub_name] = spo_info
sub_spo_info = spo_infos[sub_name]
obj_spo_info = spo_infos[obj_name]
if pre not in sub_spo_info['predicates']:
sub_spo_info['predicates'].append(pre)
if pre not in obj_spo_info['predicates']:
obj_spo_info['predicates'].append(pre)
db_results_2 = db.relationships_all_test.find(no_cursor_timeout=True)
for doc in db_results_2:
id = doc['image_id']
cnt += 1
if cnt % 1000 == 0:
print cnt, zl.tock()
zl.tick()
rcnt = 0
for r in doc['relationships']:
pre = r['predicate']
sub_name = r['subject']['name']
obj_name = r['object']['name']
if obj_name not in spo_infos:
spo_info = {'predicates':[]}
spo_infos[obj_name] = spo_info
if sub_name not in spo_infos:
spo_info = {'predicates':[]}
spo_infos[sub_name] = spo_info
sub_spo_info = spo_infos[sub_name]
obj_spo_info = spo_infos[obj_name]
if pre not in sub_spo_info['predicates']:
sub_spo_info['predicates'].append(pre)
if pre not in obj_spo_info['predicates']:
obj_spo_info['predicates'].append(pre)
total_predicates = 0
for k in spo_infos.keys():
spo_info = spo_infos[k]
print len(spo_info['predicates'])
total_predicates+= len(spo_info['predicates'])
print total_predicates/200.
def vg_count_only_one_triplet():
client = MongoClient("mongodb://localhost:27017")
db = client.visual_genome_1_2
db_results = db.relationships_all_train.find(no_cursor_timeout=True)
cnt = 0
spo_info = {}
spo_list = []
zl.tick()
for doc in db_results:
id = doc['image_id']
cnt += 1
if cnt % 1000 == 0:
print cnt, zl.tock()
zl.tick()
rcnt = 0
for r in doc['relationships']:
pre = r['predicate']
sub_name = r['subject']['name']
obj_name = r['object']['name']
spo = sub_name+'_'+pre+'_'+obj_name
if spo not in spo_info:
spo_info[spo]= 0
spo_info[spo]+=1
db_results_2 = db.relationships_all_test.find(no_cursor_timeout=True)
for doc in db_results_2:
id = doc['image_id']
cnt += 1
if cnt % 1000 == 0:
print cnt, zl.tock()
zl.tick()
rcnt = 0
for r in doc['relationships']:
pre = r['predicate']
sub_name = r['subject']['name']
obj_name = r['object']['name']
spo = sub_name+'_'+pre+'_'+obj_name
if spo not in spo_info:
spo_info[spo]= 0
spo_info[spo]+=1
zl.save('output/spo_info_vg.pkl',spo_info)
#total_pairs = len(sub_obj_info.keys())+0.0
total_spo = len(spo_info.keys())+0.0
one_count = 0
for k in spo_info.keys():
if spo_info[k]>=5:
spo_list.append(k)
one_count += 1
#print total_spo,one_count
vg_total_annotation_count(spo_list)
def relation_make_meta_add_imid2path():
m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5')
client = MongoClient("mongodb://localhost:27017")
db = client.visual_genome_1_2
imdatas = {}
for imdata in db.image_data.find(no_cursor_timeout=True):
imid = imdata['image_id']
imdatas[imid] = imdata
for k in imdatas.keys():
im_data = imdatas[k]
im_path_full = im_data['url'].replace('https://cs.stanford.edu/people/rak248/','')
im_path_folder = im_path_full.split('/')[0]
im_path_file = im_path_full.split('/')[1]
im_path_relative = im_path_folder+'/'+im_path_file
m['meta/imid2path/%s'%k] = im_path_relative
def relation_make_meta():
spo_info = zl.load('output/spo_info_vg.pkl')
spo_list = []
spo_dict = {}
for k in spo_info.keys():
if spo_info[k]>=5:
spo_list.append(k)
for spo in spo_list:
spo_dict[spo] = 0
spo_list = spo_dict
blacklist =[
'VG_100K/2363098',
'VG_100K_2/2402233',
'VG_100K/2365839',
'VG_100K_2/2398948',
'VG_100K/2315697',
'VG_100K_2/2403354',
]
client = MongoClient("mongodb://localhost:27017")
db = client.visual_genome_1_2
imdatas = {}
for imdata in db.image_data.find(no_cursor_timeout=True):
imid =imdata['image_id']
imdatas[imid] = imdata
db_results_train = db.relationships_all_train.find(no_cursor_timeout=True)
m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5')
cnt = 0
for doc in db_results_train:
if cnt %1000 ==0:
print cnt
cnt += 1
imid = doc['image_id']
im_data = imdatas[imid]
if im_data['width']<100 or im_data['height']<100:continue
im_path_full = im_data['url'].replace('https://cs.stanford.edu/people/rak248/','')
im_path_folder = im_path_full.split('/')[0]
im_path_file = im_path_full.split('/')[1]
im_index = im_path_folder+'/'+im_path_file.replace('.jpg','')
if im_index in blacklist:continue
obj_boxes = []
sub_boxes = []
rlp_labels = []
imid = doc['image_id']
imdata = imdatas[imid]
for r in doc['relationships']:
pre = r['predicate']
sub_name = r['subject']['name']
obj_name = r['object']['name']
spo = sub_name+'_'+pre+'_'+obj_name
if spo in spo_list:
sidx = zl.name2idx_cls(m,sub_name)
oidx = zl.name2idx_cls(m,obj_name)
pidx = zl.name2idx_pre(m,pre)
ox1,oy1,ow,oh = r['object']['x'],r['object']['y'],r['object']['w'],r['object']['h']
sx1,sy1,sw,sh = r['subject']['x'],r['subject']['y'],r['subject']['w'],r['subject']['h']
ox2,oy2 = ox1+ow,oy1+oh
sx2,sy2 = sx1+sw,sy1+sh
rlp_labels.append([sidx,pidx,oidx])
sub_boxes.append([sx1,sy1,sx2,sy2])
obj_boxes.append([ox1,oy1,ox2,oy2])
m.create_dataset('gt/train/%s/rlp_labels'%imid,data=np.array(rlp_labels).astype(np.int16))
m.create_dataset('gt/train/%s/sub_boxes'%imid,data=np.array(sub_boxes).astype(np.int16))
m.create_dataset('gt/train/%s/obj_boxes'%imid,data=np.array(obj_boxes).astype(np.int16))
db_results_test = db.relationships_all_test.find(no_cursor_timeout=True)
for doc in db_results_test:
if cnt %1000 ==0:
print cnt
cnt += 1
imid = doc['image_id']
im_data = imdatas[imid]
if im_data['width']<100 or im_data['height']<100:continue
im_path_full = im_data['url'].replace('https://cs.stanford.edu/people/rak248/','')
im_path_folder = im_path_full.split('/')[0]
im_path_file = im_path_full.split('/')[1]
im_index = im_path_folder+'/'+im_path_file.replace('.jpg','')
if im_index in blacklist:continue
obj_boxes = []
sub_boxes = []
rlp_labels = []
imid = doc['image_id']
imdata = imdatas[imid]
for r in doc['relationships']:
pre = r['predicate']
sub_name = r['subject']['name']
obj_name = r['object']['name']
spo = sub_name+'_'+pre+'_'+obj_name
if spo in spo_list:
sidx = zl.name2idx_cls(m,sub_name)
oidx = zl.name2idx_cls(m,obj_name)
pidx = zl.name2idx_pre(m,pre)
ox1,oy1,ow,oh = r['object']['x'],r['object']['y'],r['object']['w'],r['object']['h']
sx1,sy1,sw,sh = r['subject']['x'],r['subject']['y'],r['subject']['w'],r['subject']['h']
ox2,oy2 = ox1+ow,oy1+oh
sx2,sy2 = sx1+sw,sy1+sh
rlp_labels.append([sidx,pidx,oidx])
sub_boxes.append([sx1,sy1,sx2,sy2])
obj_boxes.append([ox1,oy1,ox2,oy2])
m.create_dataset('gt/test/%s/rlp_labels'%imid,data=np.array(rlp_labels).astype(np.int16))
m.create_dataset('gt/test/%s/sub_boxes'%imid,data=np.array(sub_boxes).astype(np.int16))
m.create_dataset('gt/test/%s/obj_boxes'%imid,data=np.array(obj_boxes).astype(np.int16))
def remove_empty_from_metadata():
m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta (3rd copy).h5')
del_cnt =0
cnt = 0
# print len(m['gt/train'].keys())
# exit(0)
for k in m['gt/test'].keys():
if cnt%1000==0:
print cnt
cnt +=1
rlp_labels = m['gt/test/%s/sub_boxes'%k][...]
if rlp_labels.shape[0]==0:
del m['gt/test/%s'%k]
del_cnt+=1
for k in m['gt/train'].keys():
if cnt%1000==0:
print cnt
cnt +=1
rlp_labels = m['gt/train/%s/sub_boxes'%k][...]
if rlp_labels.shape[0]==0:
del m['gt/train/%s'%k]
del_cnt+=1
def vg_vphrase_make_voc_format(split_type):
if split_type !='train' and split_type!='test':
print 'error'
exit(0)
m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5')
m_vp = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_vp_meta.h5')
vg_root = '/media/zawlin/ssd/data_vrd/vg_1.2/'
root = '/media/zawlin/ssd/data_vrd/vg_1.2/voc_format_vp/'
anno_root= root+'Annotations/'+split_type+'/'
data_root= root+'Data/'+split_type+'/'
zl.make_dirs(anno_root+'VG_100K_2')
zl.make_dirs(anno_root+'VG_100K')
zl.make_dirs(data_root+'VG_100K_2')
zl.make_dirs(data_root+'VG_100K')
client = MongoClient("mongodb://localhost:27017")
db = client.visual_genome_1_2
imdatas = {}
for imdata in db.image_data.find(no_cursor_timeout=True):
imid =str(imdata['image_id'])
imdatas[imid] = imdata
imid2path = {}
for k in m['meta/imid2path'].keys():
imid2path[k] = str(m['meta/imid2path/%s'%k][...])
cnt = 0
zl.tick()
for k in m_vp['gt/%s'%split_type].keys():
if cnt%1000==0:
print cnt,zl.tock()
zl.tick()
cnt+=1
# todo for vg
im_path = imid2path[k]
im_src_path = vg_root+im_path
im_dst_path = data_root+im_path
zl.copy_file(im_src_path,im_dst_path)
voc_datum = {"folder": im_path.split('/')[0],
"source": {"database":"sg vrd visual phrase"},
"filename":im_path.split('/')[1]
}
#todo,remove mongodb from this processing stage
imdata = imdatas[k]
w, h =imdata['width'],imdata['height']
voc_datum['size']={'width':w,'height':h}
objs = []
gt_boxes = m_vp['gt/%s/%s/boxes'%(split_type,k)][...]
gt_labels = m_vp['gt/%s/%s/labels'%(split_type,k)][...]
for i in xrange(gt_boxes.shape[0]):
gt_box = gt_boxes[i]
gt_label = gt_labels[i]
ymin, ymax, xmin, xmax = gt_box[1],gt_box[3],gt_box[0],gt_box[2]
bbox = {'ymin':ymin,'ymax':ymax,'xmin':xmin,'xmax':xmax}
name = zl.idx2name_tri(m_vp,gt_label)
obj = {'name':name,
'bndbox':bbox}
objs.append(obj)
voc_datum['object']=objs
#write to xml
dst_path = os.path.join(anno_root,voc_datum["folder"], voc_datum["filename"][:voc_datum["filename"].rfind('.')]+'.xml')
voc_datum={'annotation':voc_datum}
f = open(dst_path,'w')
f.write(dict2xml(voc_datum)+'\n')
f.close()
print 'images with annotation=%d\n'%cnt
def vg_vphrase_make_imagesets():
imageset_root= '/media/zawlin/ssd/data_vrd/vg_1.2/voc_format_vp/ImageSets/train.txt'
m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5')
imid2path = {}
for k in m['meta/imid2path'].keys():
imid2path[k] = str(m['meta/imid2path/%s'%k][...])
m.close()
m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_vp_meta.h5')
out = open(imageset_root,'w')
cnt = 1
for k in m['gt/train'].keys():
out.write('%s %d\n'%(imid2path[k].replace('.jpg',''),cnt))
cnt+=1
def vg_make_meta_for_obj_evaluation():
from numpy.core.records import fromarrays
from scipy.io import savemat
m = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5')
VG1_2_ID = []
WNID = []
name = []
description = []
for i in xrange(1,201):
n = str(m['meta/cls/idx2name/%d'%i][...])
VG1_2_ID.append(i)
WNID.append(n)
name.append(n)
description.append(n)
meta_synset = fromarrays([VG1_2_ID,WNID,name,description], names=['VG1_2_ID', 'WNID', 'name', 'description'])
savemat('/home/zawlin/Dropbox/proj/vg1_2_meta.mat', {'synsets': meta_synset})
def vg_make_relation_gt_for_evaluation():
from numpy.core.records import fromarrays
from scipy.io import savemat
h5f = h5py.File('/home/zawlin/Dropbox/proj/vg1_2_meta.h5')
cnt = 1
gt_obj_bboxes = []
gt_sub_bboxes = []
gt_rlp_labels = []
keys_sorted = sorted(h5f['gt/test'].keys())
imid2path = {}
for k in h5f['meta/imid2path'].keys():
imid2path[k] = str(h5f['meta/imid2path/%s'%k][...])
ccnt=0
imagePath = []
for k in keys_sorted:
if cnt%1000==0:
print cnt
cnt+=1
obj_boxes = h5f['gt/test/%s/obj_boxes'%k][...].astype(np.float64)
rlp_labels = h5f['gt/test/%s/rlp_labels'%k][...].astype(np.float64)
sub_boxes = h5f['gt/test/%s/sub_boxes'%k][...].astype(np.float64)
# for i in xrange(rlp_labels.shape[0]):
# if rlp_labels[i][1] == 76 or rlp_labels[i][1]==85:
# ccnt += 1
gt_obj_bboxes.append(obj_boxes)
gt_sub_bboxes.append(sub_boxes)
gt_rlp_labels.append(rlp_labels)
imagePath.append(imid2path[k])
# print ccnt
savemat('/home/zawlin/Dropbox/proj/vg_gt.mat',{'gt_obj_bboxes':np.array(gt_obj_bboxes),'gt_sub_bboxes':np.array(gt_sub_bboxes),'gt_rlp_labels':np.array(gt_rlp_labels),'imagePath':np.array(imagePath,dtype=np.object)})
def vg_meta_add_predicate_types():
h5f = '/home/zawlin/Dropbox/proj/vg1_2_meta.h5'
h5f = h5py.File(h5f)
lines = [line.strip() for line in open('/media/zawlin/ssd/Dropbox/cvpr17/_relation_mappings/vg_predicates_for_processing.txt')]
type_mappings={}
for l in lines:
ls = [i.strip() for i in l.split(',') if i.strip() != '']
type_mappings[ls[0]]=ls[-1]
#print type_mappings
for k in h5f['meta/pre/name2idx/']:
h5f['meta/pre/name2idx/'+k].attrs['type']=type_mappings[k]
def vg_generate_type_idx():
h5f = '/home/zawlin/Dropbox/proj/vg1_2_meta.h5'
h5f = h5py.File(h5f)
v = []
p =[]
s = []
c=[]
for k in h5f['meta/pre/name2idx/']:
if h5f['meta/pre/name2idx/'+k].attrs['type']=='v':
v.append(str(h5f['meta/pre/name2idx/'+k][...]))
if h5f['meta/pre/name2idx/'+k].attrs['type']=='p':
p.append(str(h5f['meta/pre/name2idx/'+k][...]))
if h5f['meta/pre/name2idx/'+k].attrs['type']=='s':
s.append(str(h5f['meta/pre/name2idx/'+k][...]))
if h5f['meta/pre/name2idx/'+k].attrs['type']=='c':
c.append(str(h5f['meta/pre/name2idx/'+k][...]))
print 'v= ' +str(v)
print 'p= ' +str(p)
print 's= ' +str(s)
print 'c= ' +str(c)
def vg_vp_meta_add_predicate_types():
h5f = '/home/zawlin/Dropbox/proj/vg1_2_vp_meta.h5'
h5f = h5py.File(h5f)
lines = [line.strip() for line in open('/home/zawlin/Dropbox/cvpr17/_relation_mappings/vg_predicates_for_processing.txt')]
type_mappings={}
for l in lines:
ls = [i.strip() for i in l.split(',') if i.strip() != '']
type_mappings[ls[0]]=ls[-1]
for k in h5f['meta/tri/name2idx/']:
pre_orig = k
try:
if pre_orig != '__background__':
pre = pre_orig.split('_')[1]
# if 'tall' in pre_orig:
# print pre_orig
# if 'short' in pre_orig:
# print pre_orig
# if type_mappings[pre]=='c':
# print pre_orig
h5f['meta/tri/name2idx/'+k].attrs['type']=type_mappings[pre]
except:
print pre_orig
exit(0)
def check_c_type_img_in_train():
h5f = '/home/zawlin/Dropbox/proj/vg1_2_meta.h5'
h5f = h5py.File(h5f)
for k in h5f['meta/pre/name2idx/']:
if k !='__background__':
if h5f['meta/pre/name2idx/%s'%k].attrs['type']=='c':
print str(h5f['meta/pre/name2idx/%s'%k][...])
print k
for k in h5f['gt/test']:
#print h5f['gt/train'][k].keys()
p_labels = h5f['gt/test'][k]['rlp_labels'][...][:,1]
# 76 == small than, 85==tall than
if np.any(np.in1d(p_labels,np.array([76,85]))):
# h5f.create_dataset('gt/test/%s/obj_boxes'%k,data = h5f['gt/train'][k]['obj_boxes'][...].astype(np.short))
# h5f.create_dataset('gt/test/%s/sub_boxes'%k,data = h5f['gt/train'][k]['sub_boxes'][...].astype(np.short))
# h5f.create_dataset('gt/test/%s/rlp_labels'%k,data = h5f['gt/train'][k]['rlp_labels'][...].astype(np.short))
print k
# print p_labels
# print | |
559831, 559841, 559849, 559859, 559877,
559883, 559901, 559907, 559913, 559939, 559967, 559973, 559991,
560017, 560023, 560029, 560039, 560047, 560081, 560083, 560089,
560093, 560107, 560113, 560117, 560123, 560137, 560149, 560159,
560171, 560173, 560179, 560191, 560207, 560213, 560221, 560227,
560233, 560237, 560239, 560243, 560249, 560281, 560293, 560297,
560299, 560311, 560317, 560341, 560353, 560393, 560411, 560437,
560447, 560459, 560471, 560477, 560479, 560489, 560491, 560501,
560503, 560531, 560543, 560551, 560561, 560597, 560617, 560621,
560639, 560641, 560653, 560669, 560683, 560689, 560701, 560719,
560737, 560753, 560761, 560767, 560771, 560783, 560797, 560803,
560827, 560837, 560863, 560869, 560873, 560887, 560891, 560893,
560897, 560929, 560939, 560941, 560969, 560977, 561019, 561047,
561053, 561059, 561061, 561079, 561083, 561091, 561097, 561101,
561103, 561109, 561161, 561173, 561181, 561191, 561199, 561229,
561251, 561277, 561307, 561313, 561343, 561347, 561359, 561367,
561373, 561377, 561389, 561409, 561419, 561439, 561461, 561521,
561529, 561551, 561553, 561559, 561599, 561607, 561667, 561703,
561713, 561733, 561761, 561767, 561787, 561797, 561809, 561829,
561839, 561907, 561917, 561923, 561931, 561943, 561947, 561961,
561973, 561983, 561997, 562007, 562019, 562021, 562043, 562091,
562103, 562129, 562147, 562169, 562181, 562193, 562201, 562231,
562259, 562271, 562273, 562283, 562291, 562297, 562301, 562307,
562313, 562333, 562337, 562349, 562351, 562357, 562361, 562399,
562403, 562409, 562417, 562421, 562427, 562439, 562459, 562477,
562493, 562501, 562517, 562519, 562537, 562577, 562579, 562589,
562591, 562607, 562613, 562621, 562631, 562633, 562651, 562663,
562669, 562673, 562691, 562693, 562699, 562703, 562711, 562721,
562739, 562753, 562759, 562763, 562781, 562789, 562813, 562831,
562841, 562871, 562897, 562901, 562909, 562931, 562943, 562949,
562963, 562967, 562973, 562979, 562987, 562997, 563009, 563011,
563021, 563039, 563041, 563047, 563051, 563077, 563081, 563099,
563113, 563117, 563119, 563131, 563149, 563153, 563183, 563197,
563219, 563249, 563263, 563287, 563327, 563351, 563357, 563359,
563377, 563401, 563411, 563413, 563417, 563419, 563447, 563449,
563467, 563489, 563501, 563503, 563543, 563551, 563561, 563587,
563593, 563599, 563623, 563657, 563663, 563723, 563743, 563747,
563777, 563809, 563813, 563821, 563831, 563837, 563851, 563869,
563881, 563887, 563897, 563929, 563933, 563947, 563971, 563987,
563999, 564013, 564017, 564041, 564049, 564059, 564061, 564089,
564097, 564103, 564127, 564133, 564149, 564163, 564173, 564191,
564197, 564227, 564229, 564233, 564251, 564257, 564269, 564271,
564299, 564301, 564307, 564313, 564323, 564353, 564359, 564367,
564371, 564373, 564391, 564401, 564407, 564409, 564419, 564437,
564449, 564457, 564463, 564467, 564491, 564497, 564523, 564533,
564593, 564607, 564617, 564643, 564653, 564667, 564671, 564679,
564701, 564703, 564709, 564713, 564761, 564779, 564793, 564797,
564827, 564871, 564881, 564899, 564917, 564919, 564923, 564937,
564959, 564973, 564979, 564983, 564989, 564997, 565013, 565039,
565049, 565057, 565069, 565109, 565111, 565127, 565163, 565171,
565177, 565183, 565189, 565207, 565237, 565241, 565247, 565259,
565261, 565273, 565283, 565289, 565303, 565319, 565333, 565337,
565343, 565361, 565379, 565381, 565387, 565391, 565393, 565427,
565429, 565441, 565451, 565463, 565469, 565483, 565489, 565507,
565511, 565517, 565519, 565549, 565553, 565559, 565567, 565571,
565583, 565589, 565597, 565603, 565613, 565637, 565651, 565661,
565667, 565723, 565727, 565769, 565771, 565787, 565793, 565813,
565849, 565867, 565889, 565891, 565907, 565909, 565919, 565921,
565937, 565973, 565979, 565997, 566011, 566023, 566047, 566057,
566077, 566089, 566101, 566107, 566131, 566149, 566161, 566173,
566179, 566183, 566201, 566213, 566227, 566231, 566233, 566273,
566311, 566323, 566347, 566387, 566393, 566413, 566417, 566429,
566431, 566437, 566441, 566443, 566453, 566521, 566537, 566539,
566543, 566549, 566551, 566557, 566563, 566567, 566617, 566633,
566639, 566653, 566659, 566677, 566681, 566693, 566701, 566707,
566717, 566719, 566723, 566737, 566759, 566767, 566791, 566821,
566833, 566851, 566857, 566879, 566911, 566939, 566947, 566963,
566971, 566977, 566987, 566999, 567011, 567013, 567031, 567053,
567059, 567067, 567097, 567101, 567107, 567121, 567143, 567179,
567181, 567187, 567209, 567257, 567263, 567277, 567319, 567323,
567367, 567377, 567383, 567389, 567401, 567407, 567439, 567449,
567451, 567467, 567487, 567493, 567499, 567527, 567529, 567533,
567569, 567601, 567607, 567631, 567649, 567653, 567659, 567661,
567667, 567673, 567689, 567719, 567737, 567751, 567761, 567767,
567779, 567793, 567811, 567829, 567841, 567857, 567863, 567871,
567877, 567881, 567883, 567899, 567937, 567943, 567947, 567949,
567961, 567979, 567991, 567997, 568019, 568027, 568033, 568049,
568069, 568091, 568097, 568109, 568133, 568151, 568153, 568163,
568171, 568177, 568187, 568189, 568193, 568201, 568207, 568231,
568237, 568241, 568273, 568279, 568289, 568303, 568349, 568363,
568367, 568387, 568391, 568433, 568439, 568441, 568453, 568471,
568481, 568493, 568523, 568541, 568549, 568577, 568609, 568619,
568627, 568643, 568657, 568669, 568679, 568691, 568699, 568709,
568723, 568751, 568783, 568787, 568807, 568823, 568831, 568853,
568877, 568891, 568903, 568907, 568913, 568921, 568963, 568979,
568987, 568991, 568999, 569003, 569011, 569021, 569047, 569053,
569057, 569071, 569077, 569081, 569083, 569111, 569117, 569137,
569141, 569159, 569161, 569189, 569197, 569201, 569209, 569213,
569237, 569243, 569249, 569251, 569263, 569267, 569269, 569321,
569323, 569369, 569417, 569419, 569423, 569431, 569447, 569461,
569479, 569497, 569507, 569533, 569573, 569579, 569581, 569599,
569603, 569609, 569617, 569623, 569659, 569663, 569671, 569683,
569711, 569713, 569717, 569729, 569731, 569747, 569759, 569771,
569773, 569797, 569809, 569813, 569819, 569831, 569839, 569843,
569851, 569861, 569869, 569887, 569893, 569897, 569903, 569927,
569939, 569957, 569983, 570001, 570013, 570029, 570041, 570043,
570047, 570049, 570071, 570077, 570079, 570083, 570091, 570107,
570109, 570113, 570131, 570139, 570161, 570173, 570181, 570191,
570217, 570221, 570233, 570253, 570329, 570359, 570373, 570379,
570389, 570391, 570403, 570407, 570413, 570419, 570421, 570461,
570463, 570467, 570487, 570491, 570497, 570499, 570509, 570511,
570527, 570529, 570539, 570547, 570553, 570569, 570587, 570601,
570613, 570637, 570643, 570649, 570659, 570667, 570671, 570677,
570683, 570697, 570719, 570733, 570737, 570743, 570781, 570821,
570827, 570839, 570841, 570851, 570853, 570859, 570881, 570887,
570901, 570919, 570937, 570949, 570959, 570961, 570967, 570991,
571001, 571019, 571031, 571037, 571049, 571069, 571093, 571099,
571111, 571133, 571147, 571157, 571163, 571199, 571201, 571211,
571223, 571229, 571231, 571261, 571267, 571279, 571303, 571321,
571331, 571339, 571369, 571381, 571397, 571399, 571409, 571433,
571453, 571471, 571477, 571531, 571541, 571579, 571583, 571589,
571601, 571603, 571633, 571657, 571673, 571679, 571699, 571709,
571717, 571721, 571741, 571751, 571759, 571777, 571783, 571789,
571799, 571801, 571811, 571841, 571847, 571853, 571861, 571867,
571871, 571873, 571877, 571903, 571933, 571939, 571969, 571973,
572023, 572027, 572041, 572051, 572053, 572059, 572063, 572069,
572087, 572093, 572107, 572137, 572161, 572177, 572179, 572183,
572207, 572233, 572239, 572251, 572269, 572281, 572303, 572311,
572321, 572323, 572329, 572333, 572357, 572387, 572399, 572417,
572419, 572423, 572437, 572449, 572461, 572471, 572479, 572491,
572497, 572519, 572521, 572549, 572567, 572573, 572581, 572587,
572597, 572599, 572609, 572629, 572633, 572639, 572651, 572653,
572657, 572659, 572683, 572687, 572699, 572707, 572711, 572749,
572777, 572791, 572801, 572807, 572813, 572821, 572827, 572833,
572843, 572867, 572879, 572881, 572903, 572909, 572927, 572933,
572939, 572941, 572963, 572969, 572993, 573007, 573031, 573047,
573101, 573107, 573109, 573119, 573143, 573161, 573163, 573179,
573197, 573247, 573253, 573263, 573277, 573289, 573299, 573317,
573329, 573341, 573343, 573371, 573379, 573383, 573409, 573437,
573451, 573457, 573473, 573479, 573481, 573487, 573493, 573497,
573509, 573511, 573523, 573527, 573557, 573569, 573571, 573637,
573647, 573673, 573679, 573691, 573719, 573737, 573739, 573757,
573761, 573763, 573787, 573791, 573809, 573817, 573829, 573847,
573851, 573863, 573871, 573883, 573887, 573899, 573901, 573929,
573941, 573953, 573967, 573973, 573977, 574003, 574031, 574033,
574051, 574061, 574081, 574099, 574109, 574127, 574157, 574159,
574163, 574169, 574181, 574183, 574201, 574219, 574261, 574279,
574283, 574289, 574297, 574307, 574309, 574363, 574367, 574373,
574393, 574423, 574429, 574433, 574439, 574477, 574489, 574493,
574501, 574507, 574529, 574543, 574547, 574597, 574619, 574621,
574627, 574631, 574643, 574657, 574667, 574687, 574699, 574703,
574711, 574723, 574727, 574733, 574741, 574789, 574799, 574801,
574813, 574817, 574859, 574907, 574913, 574933, 574939, 574949,
574963, 574967, 574969, 575009, 575027, 575033, 575053, 575063,
575077, 575087, 575119, 575123, 575129, 575131, 575137, 575153,
575173, 575177, 575203, 575213, 575219, 575231, 575243, 575249,
575251, 575257, 575261, 575303, 575317, 575359, 575369, 575371,
575401, 575417, 575429, 575431, 575441, 575473, 575479, 575489,
575503, 575513, 575551, 575557, 575573, 575579, 575581, 575591,
575593, 575611, 575623, 575647, 575651, 575669, 575677, 575689,
575693, 575699, 575711, 575717, 575723, 575747, 575753, 575777,
575791, 575821, 575837, 575849, 575857, 575863, 575867, 575893,
575903, 575921, 575923, 575941, 575957, 575959, 575963, 575987,
576001, 576013, 576019, 576029, 576031, 576041, 576049, 576089,
576101, 576119, 576131, 576151, 576161, 576167, 576179, 576193,
576203, 576211, 576217, 576221, 576223, 576227, 576287, 576293,
576299, 576313, 576319, 576341, 576377, 576379, 576391, 576421,
576427, 576431, 576439, 576461, 576469, 576473, 576493, 576509,
576523, 576529, 576533, 576539, 576551, 576553, 576577, 576581,
576613, 576617, 576637, 576647, 576649, 576659, 576671, 576677,
576683, 576689, 576701, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.