input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
the SMILES separated by new lines.
"""
with open(smiles_file, 'r') as smiles_file_fd:
for line in smiles_file_fd:
self.smiles.append(line.strip())
return
def obtain_SMILES_from_table(self, drugbankids, drugbank_smiles_file):
"""
Obtains the SMILES of a drug from an input table and stores them into a list.
"""
drugbank_smiles_df = pd.read_csv(drugbank_smiles_file, sep='\t', index_col=None)
self.smiles = set(drugbank_smiles_df.loc[drugbank_smiles_df['#drugbankid'].isin(drugbankids), 'smiles'].tolist())
return
def obtain_SMILES_from_pickle(self, smiles_pickle_file, output_file):
"""
Obtains the SMILES from an input pickle file and stores them into a list.
"""
drug2smiles = pickle.load(open(smiles_pickle_file))
drug_id = self.drug_name.upper()
if drug_id in drug2smiles:
self.smiles = drug2smiles[drug_id]
else:
self.smiles = None
if len(self.smiles) > 0:
with open(output_file, 'w') as smiles_fd:
for result in self.smiles:
smiles_fd.write('{}\n'.format(result))
else:
print('No SMILES found for the drug {}.\n'.format(self.drug_name))
return
def obtain_SMILES_from_BIANA(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the SMILES from BIANA database using as query the name of the drug.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the SMILES in an output file.
If there is more than one different SMILES, they are printed separated by new lines.
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
type_name_table = self.return_drug_biana_table(self.type_name) # Obtain the table containing the type of name introduced
query = (''' SELECT S.value FROM {} N, {} U1, {} U2, externalEntitySMILES S
WHERE N.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = S.externalEntityID AND N.value = %s
'''.format(type_name_table, up_table, up_table))
cursor = biana_cnx.cursor() # Start cursor to MySQL
cursor.execute(query, (self.drug_name,))
smiles = set()
for items in cursor:
for result in items:
smiles.add(result)
cursor.close()
if len(smiles) > 0:
self.smiles = list(smiles)
with open(output_file, 'w') as smiles_fd:
for result in self.smiles:
smiles_fd.write('{}\n'.format(result))
else:
print('No SMILES found for the drug {}.\n'.format(self.drug_name))
return
def obtain_ATCs_from_file(self, ATCs_file):
"""
Obtains the ATCs from an input file and stores them into a list.
The file must contain the names of the ATCs separated by new lines.
"""
with open(ATCs_file, 'r') as ATC_file_fd:
for line in ATC_file_fd:
self.ATCs.append(line.strip())
self.level_to_ATCs = obtain_ATC_levels(self.ATCs)
return
def obtain_ATCs_from_table(self, drugbankids, drugbank_atc_file):
"""
Obtains the pfams of a list of targets (in gene ID) from an input table and stores them into a list.
"""
drugbank_atc_df = pd.read_csv(drugbank_atc_file, sep='\t', index_col=None)
atcs = drugbank_atc_df.loc[drugbank_atc_df['#drugbankid'].isin(drugbankids), 'atc'].tolist()
self.ATCs = set([atc.upper() for atc in atcs])
self.level_to_ATCs = obtain_ATC_levels(self.ATCs)
return
def obtain_ATCs_from_BIANA(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the ATCs from BIANA database using as query the targets.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the ATCs found in an output file.
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
type_name_table = self.return_drug_biana_table(self.type_name) # Obtain the table containing the type of name introduced
query = (''' SELECT A.value FROM {} N, {} U1, {} U2, externalEntityATC A
WHERE N.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = A.externalEntityID AND N.value = %s
'''.format(type_name_table, up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query, (self.drug_name,))
ATCs = set()
for items in cursor:
for ATC in items:
ATCs.add(ATC.upper())
cursor.close()
if len(ATCs) > 0:
self.ATCs = list(ATCs)
with open(output_file, 'w') as ATCs_fd:
for ATCs in self.ATCs:
ATCs_fd.write('{}\n'.format(ATCs))
else:
print(' DIANA INFO:\tNo ATCs for the drug introduced: {}.\n'.format(self.drug_name))
return
def obtain_ATCs_from_pickle(self, atc_pickle_file, output_file):
"""
Obtains the ATCs from an input pickle file and stores them into a list.
"""
drug2atcs = pickle.load(open(atc_pickle_file))
drug_id = self.drug_name.upper()
if drug_id in drug2atcs:
self.ATCs = drug2atcs[drug_id]
else:
self.ATCs = set()
if len(self.ATCs) > 0:
with open(output_file, 'w') as atc_fd:
for result in self.ATCs:
atc_fd.write('{}\n'.format(result))
else:
print('No ATCs found for the drug {}.\n'.format(self.drug_name))
return
def obtain_SE_from_file(self, SE_file):
"""
Obtains the SE from an input file and stores them into a list.
The file must contain the names of the SE separated by new lines.
"""
with open(SE_file, 'r') as SE_file_fd:
for line in SE_file_fd:
self.SEs.append(line.strip())
return
def obtain_SE_from_table(self, drugbankids, drugbank_side_effects_file):
"""
Obtains the side effects of a drug from an input table and stores them into a list.
"""
drugbank_side_effects_df = pd.read_csv(drugbank_side_effects_file, sep='\t', index_col=None)
self.SEs = set(drugbank_side_effects_df.loc[drugbank_side_effects_df['#drugbankid'].isin(drugbankids), 'umls_id'].tolist())
return
def obtain_SE_from_BIANA(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the SE from BIANA database using as query the targets.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the SE found in an output file.
"""
target_type_id_table = self.return_targets_biana_table(self.target_type_id)
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = (''' SELECT UD.value FROM {} D, {} U1, {} U2, externalEntityPubChemCompound PC, externalEntityRelationParticipant P1, externalEntityRelationParticipant P2, externalEntityRelation R, externalEntityUMLS_diseaseID UD
WHERE D.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = PC.externalEntityID AND PC.externalEntityID = P1.externalEntityID AND D.value = %s
AND P1.externalEntityRelationID = R.externalEntityRelationID AND P2.externalEntityRelationID = R.externalEntityRelationID AND P1.externalEntityID != P2.externalEntityID AND R.type = "drug_phenotype_association"
AND P2.externalEntityID = UD.externalEntityID
'''.format(target_type_id_table, up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query, (self.drug_name,))
SEs = set()
for items in cursor:
for SEs in items:
SEs.add(SEs.upper())
cursor.close()
if len(SEs) > 0:
self.SEs = list(SEs)
with open(output_file, 'w') and SEs_fd:
for SEs in self.SEs:
SEs_fd.write('{}\n'.format(SEs))
else:
print(' DIANA INFO:\tNo Side Effects for the drug introduced: {}.\n'.format(self.drug_name))
return
def obtain_SE_from_pickle(self, se_pickle_file, output_file):
"""
Obtains the side effects from an input pickle file and stores them into a list.
"""
drug2side_effects = pickle.load(open(se_pickle_file))
drug_id = self.drug_name.upper()
if drug_id in drug2side_effects:
self.SEs = drug2side_effects[drug_id]
else:
self.SEs = set()
if len(self.SEs) > 0:
with open(output_file, 'w') as se_fd:
for result in self.SEs:
se_fd.write('{}\n'.format(result))
else:
print('No SEs found for the drug {}.\n'.format(self.drug_name))
return
class InsufficientTargets(Exception):
"""
Exception raised when the number of targets is below 3.
This exception is raised because the analyses of GUILD with less than 3
targets are not reliable.
"""
def __init__(self, targets, limit_targets=1):
self.targets = targets
self.limit_targets = limit_targets
def __str__(self):
return 'The number of targets provided ({}) is insufficient.\nGUILD must have at least 1 target to run a reliable analysis.\n'.format(len(self.targets), self.limit_targets)
class DrugNameNotFound(Exception):
"""
Exception raised when the drug name is not found in BIANA.
"""
def __init__(self, drug_name, type_name):
self.drug_name = drug_name
self.type_name = type_name
def __str__(self):
return 'The drug {} {} has not been found in BIANA.\nTherefore, any target could be found. Please, introduce another name or the targets of the drug.\n'.format(self.type_name, self.drug_name)
class IncorrectTypeID(Exception):
"""
Exception that raises when a type of IDs of the proteins is not admitted for
the program.
"""
def __init__(self, target_type_id, target_type_id_to_table):
self.target_type_id = target_type_id
self.target_type_id_to_table = target_type_id_to_table
def __str__(self):
return 'The initial type of IDs of the proteins ({}) is not admitted.\nThe types of ID admitted in DIANA are: {}\n'.format(self.target_type_id, ', '.join(self.target_type_id_to_table.keys()))
def generate_diana_id(drug_name, targets, network_name):
"""
Generates an ID for the drug using the drug name, the sorted targets and
the network name.
"""
id_str = 'diana_'
drug_str = ''.join(drug_name.split('\s')) # Obtain the drug name, and remove any space in the name
id_str += drug_str.lower() # Add the drug name in the ID string
targets = [str(x) for x in targets] # Transform the targets to strings
targets_str = ''.join(sorted(targets)) # Join the targets in one string
id_str += targets_str.lower() # Add the targets in the ID string
id_str += network_name.lower() # Add the network name in the ID string
id_str=id_str.encode('utf-8') # Encode it by utf-8
m = hashlib.md5()
m.update(id_str) # Introduce the string in the hashlib instance
unique_id = m.hexdigest()[:12] # Obtain a unique ID from the string. Only get the first 12 characters
return unique_id
def old_generate_drug_id(drug_name, targets, network_name):
"""
Generates an ID for the drug using the drug name, the sorted targets and
the network name.
"""
id_str = ''
drug_str = ''.join(drug_name.split('\s')) # Obtain the drug name, and remove any space in the name
id_str += drug_str.lower() # Add the drug name in the ID string
targets = [str(x) for x in targets] # Transform the targets to strings
targets_str = ''.join(sorted(targets)) # Join the targets in one string
id_str += targets_str.lower() # Add the targets in the ID string
id_str += network_name.lower() # Add the network name in the ID string
id_str=id_str.encode('utf-8') # Encode it by utf-8
m = hashlib.md5()
m.update(id_str) # Introduce the string in the hashlib instance
unique_id = m.hexdigest()[:12] # Obtain a unique ID from the string. Only | |
along each of the leading diagonals.
1=lst, 2=days, 3=triads.
'dlst' [scalar] LST interval (in mins) or difference between LST
pairs which will be determined and used for
cross-power spectrum. Will only apply if values under
'axes' contains the LST axis(=1).
'dlst_range'
[scalar, numpy array, or NoneType] Specifies the LST
difference(s) in minutes that are to be used in the
computation of cross-power spectra. If a scalar, only
the diagonal consisting of pairs with that LST
difference will be computed. If a numpy array, those
diagonals consisting of pairs with that LST difference
will be computed. If set to None (default), the main
diagonal (LST difference of 0) and the first off-main
diagonal (LST difference of 1 unit) corresponding to
pairs with 0 and 1 unit LST difference are computed.
Applies only if key 'axes' contains LST axis (=1).
'avgcov'
[boolean] It specifies if the collapse of square
covariance matrix is to be collapsed further to a single
number after applying 'postX' weights. If not set or
set to False (default), this late stage collapse will
not be performed. Otherwise, it will be averaged in a
weighted average sense where the 'postX' weights would
have already been applied during the collapsing
operation
'wts' [NoneType or Dictionary] If not set, a default
dictionary (see default values below) will be created.
It must have the follwoing keys and values:
'preX' [list of numpy arrays] It contains pre-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of weights. If
set to True, the delay spectrum will be
normalized by the sum of the weights.
'postX' [list of numpy arrays] It contains post-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of 'preX' weights.
If set to True, the delay spectrum will be
normalized by the sum of the weights.
'postXnorm'
[boolean] If False (default), no normalization
is done after the application of postX weights.
If set to True, the delay cross power spectrum
will be normalized by the sum of the weights.
cosmo [instance of cosmology class from astropy] An instance of class
FLRW or default_cosmology of astropy cosmology module. Default
uses Planck 2015 cosmology, with H0=100 h km/s/Mpc
units [string] Specifies the units of output power spectum. Accepted
values are 'Jy' and 'K' (default)) and the power spectrum will
be in corresponding squared units.
Output:
Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array),
'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday'
((ndays,) array), 'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function FT().
Values under keys 'triads_ind' and 'lst_ind' are numpy array
corresponding to triad and time indices used in selecting the data.
Values under keys 'oversampled' and 'resampled' each contain a
dictionary with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to
'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
each of which is a dictionary. 'whole' contains power spectrum info
about the input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure phase) from
the 'whole' model. 'residual' contains power spectrum info about the
closure phases obtained as a difference between 'whole' and 'submodel'.
It contains the following keys and values:
'mean' [numpy array] Delay power spectrum incoherently estiamted over
the axes specified in xinfo['axes'] using the 'mean' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided but avgcov is False, those axes will be
of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged over
the axes specified in incohax using the 'median' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided bu avgcov is False, those axes will be
of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets for
those axes. If 'avgcov' was set, those entries will be removed
from 'diagoffsets' since all the leading diagonal elements have
been collapsed (averaged) further. Value under each key is a
numpy array where each element in the array corresponds to the
index of that leading diagonal. This should match the size of
the output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in collapse_axes and
the value is a numpy array of weights corresponding to the
diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated but | |
#!/usr/bin/env python3
import calendar
import json
import os
import pickle
import platform
import sys
import unittest
import warnings
import yaml
from io import StringIO
from contextlib import redirect_stderr, redirect_stdout
from collections import OrderedDict
from random import randint, shuffle
from jsonargparse import (
ActionConfigFile,
ActionJsonnet,
ActionJsonSchema,
ActionParser,
ActionYesNo,
ArgumentParser,
Namespace,
ParserError,
Path,
set_config_read_mode,
SUPPRESS,
strip_meta,
usage_and_exit_error_handler,
)
from jsonargparse.namespace import meta_keys
from jsonargparse.optionals import (
docstring_parser_support,
dump_preserve_order_support,
fsspec_support,
jsonnet_support,
jsonschema_support,
ruyaml_support,
url_support,
)
from jsonargparse.typing import NotEmptyStr, Path_fc, Path_fr, PositiveFloat, PositiveInt
from jsonargparse.util import CaptureParserException, capture_parser, DebugException
from jsonargparse_tests.base import TempDirTestCase, responses, responses_activate
def example_parser():
"""Creates a simple parser for doing tests."""
parser = ArgumentParser(prog='app', default_meta=False, error_handler=None)
group_one = parser.add_argument_group('Group 1', name='group1')
group_one.add_argument('--bools.def_false',
default=False,
nargs='?',
action=ActionYesNo)
group_one.add_argument('--bools.def_true',
default=True,
nargs='?',
action=ActionYesNo)
group_two = parser.add_argument_group('Group 2', name='group2')
group_two.add_argument('--lev1.lev2.opt1',
default='opt1_def')
group_two.add_argument('--lev1.lev2.opt2',
default='opt2_def')
group_three = parser.add_argument_group('Group 3')
group_three.add_argument('--nums.val1',
type=int,
default=1)
group_three.add_argument('--nums.val2',
type=float,
default=2.0)
return parser
example_yaml = '''
lev1:
lev2:
opt1: opt1_yaml
opt2: opt2_yaml
nums:
val1: -1
'''
example_env = {
'APP_LEV1__LEV2__OPT1': 'opt1_env',
'APP_NUMS__VAL1': '0'
}
class ParsersTests(TempDirTestCase):
def test_parse_args(self):
parser = example_parser()
self.assertEqual('opt1_arg', parser.parse_args(['--lev1.lev2.opt1', 'opt1_arg']).lev1.lev2.opt1)
self.assertEqual(9, parser.parse_args(['--nums.val1', '9']).nums.val1)
self.assertEqual(6.4, parser.parse_args(['--nums.val2', '6.4']).nums.val2)
self.assertRaises(ParserError, lambda: parser.parse_args(['--nums.val1', '7.5']))
self.assertRaises(ParserError, lambda: parser.parse_args(['--nums.val2', 'eight']))
self.assertEqual(9, parser.parse_args(['--nums.val1', '9'])['nums.val1'])
def test_parse_object(self):
parser = example_parser()
base = Namespace(**{'nums.val2': 3.4})
cfg = parser.parse_object(yaml.safe_load(example_yaml), cfg_base=base)
self.assertEqual('opt1_yaml', cfg.lev1.lev2.opt1)
self.assertEqual('opt2_yaml', cfg.lev1.lev2.opt2)
self.assertEqual(-1, cfg.nums.val1)
self.assertEqual(3.4, cfg.nums.val2)
self.assertEqual(False, cfg.bools.def_false)
self.assertEqual(True, cfg.bools.def_true)
self.assertRaises(ParserError, lambda: parser.parse_object({'undefined': True}))
def test_parse_env(self):
parser = example_parser()
cfg = parser.parse_env(example_env)
self.assertEqual('opt1_env', cfg.lev1.lev2.opt1)
self.assertEqual(0, cfg.nums.val1)
cfg = parser.parse_env(example_env, defaults=False)
self.assertFalse(hasattr(cfg, 'bools'))
self.assertTrue(hasattr(cfg, 'nums'))
parser.add_argument('--cfg', action=ActionConfigFile)
env = OrderedDict(example_env)
env['APP_NUMS__VAL1'] = '"""'
self.assertRaises(ParserError, lambda: parser.parse_env(env))
env = OrderedDict(example_env)
env['APP_CFG'] = '{"nums": {"val1": 1}}'
self.assertEqual(0, parser.parse_env(env).nums.val1)
parser.add_argument('req', nargs='+')
env['APP_REQ'] = 'abc'
self.assertEqual(['abc'], parser.parse_env(env).req)
env['APP_REQ'] = '["abc", "xyz"]'
self.assertEqual(['abc', 'xyz'], parser.parse_env(env).req)
env['APP_REQ'] = '[""","""]'
self.assertEqual(['[""","""]'], parser.parse_env(env).req)
with self.assertRaises(ValueError):
parser.default_env = 'invalid'
with self.assertRaises(ValueError):
parser.env_prefix = lambda: 'invalid'
def test_default_env(self):
parser = ArgumentParser()
self.assertFalse(parser.default_env)
parser.default_env = True
self.assertTrue(parser.default_env)
parser = ArgumentParser(default_env=True)
self.assertTrue(parser.default_env)
parser.default_env = False
self.assertFalse(parser.default_env)
def test_parse_string(self):
parser = example_parser()
cfg1 = parser.parse_string(example_yaml)
self.assertEqual('opt1_yaml', cfg1.lev1.lev2.opt1)
self.assertEqual('opt2_yaml', cfg1.lev1.lev2.opt2)
self.assertEqual(-1, cfg1.nums.val1)
self.assertEqual(2.0, cfg1.nums.val2)
self.assertEqual(False, cfg1.bools.def_false)
self.assertEqual(True, cfg1.bools.def_true)
cfg2 = parser.parse_string(example_yaml, defaults=False)
self.assertFalse(hasattr(cfg2, 'bools'))
self.assertTrue(hasattr(cfg2, 'nums'))
self.assertRaises(ParserError, lambda: parser.parse_string('"""'))
def test_parse_path(self):
parser = example_parser()
cfg1 = parser.parse_string(example_yaml)
cfg2 = parser.parse_string(example_yaml, defaults=False)
yaml_file = os.path.realpath(os.path.join(self.tmpdir, 'example.yaml'))
with open(yaml_file, 'w') as output_file:
output_file.write(example_yaml)
self.assertEqual(cfg1, parser.parse_path(yaml_file, defaults=True))
self.assertEqual(cfg2, parser.parse_path(yaml_file, defaults=False))
self.assertNotEqual(cfg2, parser.parse_path(yaml_file, defaults=True))
self.assertNotEqual(cfg1, parser.parse_path(yaml_file, defaults=False))
with open(yaml_file, 'w') as output_file:
output_file.write(example_yaml+' val2: eight\n')
self.assertRaises(ParserError, lambda: parser.parse_path(yaml_file))
with open(yaml_file, 'w') as output_file:
output_file.write(example_yaml+' val3: key_not_defined\n')
self.assertRaises(ParserError, lambda: parser.parse_path(yaml_file))
def test_cfg_base(self):
parser = ArgumentParser()
parser.add_argument('--op1')
parser.add_argument('--op2')
cfg = parser.parse_args(['--op1=abc'], Namespace(op2='xyz'))
self.assertEqual('abc', cfg.op1)
self.assertEqual('xyz', cfg.op2)
def test_precedence_of_sources(self):
input1_config_file = os.path.realpath(os.path.join(self.tmpdir, 'input1.yaml'))
input2_config_file = os.path.realpath(os.path.join(self.tmpdir, 'input2.yaml'))
default_config_file = os.path.realpath(os.path.join(self.tmpdir, 'default.yaml'))
parser = ArgumentParser(prog='app',
default_env=True,
default_config_files=[default_config_file])
parser.add_argument('--op1', default='from parser default')
parser.add_argument('--op2')
parser.add_argument('--cfg', action=ActionConfigFile)
with open(input1_config_file, 'w') as output_file:
output_file.write('op1: from input config file')
with open(input2_config_file, 'w') as output_file:
output_file.write('op2: unused')
## check parse_env precedence ##
self.assertEqual('from parser default', parser.parse_env().op1)
with open(default_config_file, 'w') as output_file:
output_file.write('op1: from default config file')
self.assertEqual('from default config file', parser.parse_env().op1)
env = {'APP_CFG': '{"op1": "from env config"}'}
self.assertEqual('from env config', parser.parse_env(env).op1)
env['APP_OP1'] = 'from env var'
self.assertEqual('from env var', parser.parse_env(env).op1)
## check parse_path precedence ##
os.remove(default_config_file)
for key in [k for k in ['APP_CFG', 'APP_OP1'] if k in os.environ]:
del os.environ[key]
self.assertEqual('from parser default', parser.parse_path(input2_config_file).op1)
with open(default_config_file, 'w') as output_file:
output_file.write('op1: from default config file')
self.assertEqual('from default config file', parser.parse_path(input2_config_file).op1)
os.environ['APP_CFG'] = input1_config_file
self.assertEqual('from input config file', parser.parse_path(input2_config_file).op1)
os.environ['APP_OP1'] = 'from env var'
self.assertEqual('from env var', parser.parse_path(input2_config_file).op1)
os.environ['APP_CFG'] = input2_config_file
self.assertEqual('from input config file', parser.parse_path(input1_config_file).op1)
## check parse_args precedence ##
os.remove(default_config_file)
for key in ['APP_CFG', 'APP_OP1']:
del os.environ[key]
self.assertEqual('from parser default', parser.parse_args([]).op1)
with open(default_config_file, 'w') as output_file:
output_file.write('op1: from default config file')
self.assertEqual('from default config file', parser.parse_args([]).op1)
os.environ['APP_CFG'] = input1_config_file
self.assertEqual('from input config file', parser.parse_args([]).op1)
os.environ['APP_OP1'] = 'from env var'
self.assertEqual('from env var', parser.parse_args([]).op1)
os.environ['APP_CFG'] = input2_config_file
self.assertEqual('from arg', parser.parse_args(['--op1', 'from arg']).op1)
self.assertEqual('from arg', parser.parse_args(['--cfg', input1_config_file, '--op1', 'from arg']).op1)
self.assertEqual('from input config file', parser.parse_args(['--op1', 'from arg', '--cfg', input1_config_file]).op1)
cfg = parser.parse_args(['--cfg', input1_config_file])
cfg_list = parser.get_config_files(cfg)
self.assertEqual(default_config_file, str(cfg_list[0]))
self.assertEqual(input1_config_file, str(cfg_list[1]))
for key in ['APP_CFG', 'APP_OP1']:
del os.environ[key]
class ArgumentFeaturesTests(unittest.TestCase):
def test_positionals(self):
parser = ArgumentParser(error_handler=None)
parser.add_argument('pos1')
parser.add_argument('pos2', nargs='?')
self.assertRaises(ParserError, lambda: parser.parse_args([]))
self.assertIsNone(parser.parse_args(['v1']).pos2)
self.assertEqual('v1', parser.parse_args(['v1']).pos1)
self.assertEqual('v2', parser.parse_args(['v1', 'v2']).pos2)
parser = ArgumentParser(error_handler=None)
parser.add_argument('pos1')
parser.add_argument('pos2', nargs='+')
self.assertRaises(ParserError, lambda: parser.parse_args(['v1']).pos2)
self.assertEqual(['v2', 'v3'], parser.parse_args(['v1', 'v2', 'v3']).pos2)
parser.add_argument('--opt')
parser.add_argument('--cfg',
action=ActionConfigFile)
cfg = parser.parse_args(['--cfg', '{"pos2": ["v2", "v3"], "opt": "v4"}', 'v1'])
self.assertEqual('v1', cfg.pos1)
self.assertEqual(['v2', 'v3'], cfg.pos2)
self.assertEqual('v4', cfg.opt)
def test_required(self):
parser = ArgumentParser(env_prefix='APP', error_handler=None)
group = parser.add_argument_group('Group 1')
group.add_argument('--req1', required=True)
parser.add_argument('--lev1.req2', required=True)
cfg = parser.parse_args(['--req1', 'val1', '--lev1.req2', 'val2'])
self.assertEqual('val1', cfg.req1)
self.assertEqual('val2', cfg.lev1.req2)
cfg = parser.parse_string('{"req1":"val3","lev1":{"req2":"val4"}}')
self.assertEqual('val3', cfg.req1)
self.assertEqual('val4', cfg.lev1.req2)
cfg = parser.parse_env({'APP_REQ1': 'val5', 'APP_LEV1__REQ2': 'val6'})
self.assertEqual('val5', cfg.req1)
self.assertEqual('val6', cfg.lev1.req2)
self.assertRaises(ParserError, lambda: parser.parse_args(['--req1', 'val1']))
self.assertRaises(ParserError, lambda: parser.parse_string('{"lev1":{"req2":"val4"}}'))
self.assertRaises(ParserError, lambda: parser.parse_env({}))
parser = ArgumentParser(default_env=True)
parser.add_argument('--req1', required=True)
parser.add_argument('--cfg', action=ActionConfigFile)
cfg = parser.parse_args(['--cfg', '{"req1": "val1"}'])
self.assertEqual('val1', cfg.req1)
def test_choices(self):
parser = ArgumentParser(error_handler=None)
parser.add_argument('--ch1',
choices='ABC')
parser.add_argument('--ch2',
choices=['v1', 'v2'])
cfg = parser.parse_args(['--ch1', 'C', '--ch2', 'v1'])
self.assertEqual(cfg.as_dict(), {'ch1': 'C', 'ch2': 'v1'})
self.assertRaises(ParserError, lambda: parser.parse_args(['--ch1', 'D']))
self.assertRaises(ParserError, lambda: parser.parse_args(['--ch2', 'v0']))
def test_nargs(self):
parser = ArgumentParser(error_handler=None)
parser.add_argument('--val', nargs='+', type=int)
self.assertEqual([9], parser.parse_args(['--val', '9']).val)
self.assertEqual([3, 6, 2], parser.parse_args(['--val', '3', '6', '2']).val)
self.assertRaises(ParserError, lambda: parser.parse_args(['--val']))
parser = ArgumentParser()
parser.add_argument('--val', nargs='*', type=float)
self.assertEqual([5.2, 1.9], parser.parse_args(['--val', '5.2', '1.9']).val)
self.assertEqual([], parser.parse_args(['--val']).val)
parser = ArgumentParser()
parser.add_argument('--val', nargs='?', type=str)
self.assertEqual('~', parser.parse_args(['--val', '~']).val)
self.assertEqual(None, parser.parse_args(['--val']).val)
parser = ArgumentParser()
parser.add_argument('--val', nargs=2)
self.assertEqual(['q', 'p'], parser.parse_args(['--val', 'q', 'p']).val)
parser = ArgumentParser()
parser.add_argument('--val', nargs=1)
self.assertEqual(['-'], parser.parse_args(['--val', '-']).val)
class AdvancedFeaturesTests(unittest.TestCase):
def test_link_arguments(self):
parser = ArgumentParser(error_handler=None)
parser.add_argument('--a.v1', default=2)
parser.add_argument('--a.v2', type=int, default=3)
parser.add_argument('--b.v2', type=int, default=4)
def a_prod(a):
return a['v1'] * a['v2']
parser.link_arguments('a', 'b.v2', a_prod)
cfg = parser.parse_args(['--a.v2=-5'])
self.assertEqual(cfg.b.v2, cfg.a.v1*cfg.a.v2)
self.assertRaises(ParserError, lambda: parser.parse_args(['--a.v1=x']))
dump = yaml.safe_load(parser.dump(cfg))
self.assertEqual(dump, {'a': {'v1': 2, 'v2': -5}})
def test_subcommands(self):
parser_a = ArgumentParser(error_handler=None)
parser_a.add_argument('ap1')
parser_a.add_argument('--ao1',
default='ao1_def')
parser = ArgumentParser(prog='app', error_handler=None)
parser.add_argument('--o1',
default='o1_def')
subcommands = parser.add_subcommands()
subcommands.add_subcommand('a', parser_a)
subcommands.add_subcommand('b', example_parser(),
aliases=['B'],
help='b help')
self.assertRaises(NotImplementedError, lambda: parser.add_subparsers())
self.assertRaises(NotImplementedError, lambda: subcommands.add_parser(''))
self.assertRaises(ParserError, lambda: parser.parse_args(['c']))
cfg = parser.get_defaults().as_dict()
self.assertEqual(cfg, {'o1': 'o1_def', 'subcommand': None})
parser.add_argument('--cfg', action=ActionConfigFile)
cfg = parser.parse_args(['--cfg={"o1": "o1_arg"}', 'a', 'ap1_arg']).as_dict()
self.assertEqual(cfg, {'a': {'ao1': 'ao1_def', 'ap1': 'ap1_arg'}, 'cfg': [None], 'o1': 'o1_arg', 'subcommand': 'a'})
cfg = parser.parse_args(['--o1', 'o1_arg', 'a', 'ap1_arg'])
self.assertEqual(cfg['o1'], 'o1_arg')
self.assertEqual(cfg['subcommand'], 'a')
self.assertEqual(cfg['a'].as_dict(), {'ap1': 'ap1_arg', 'ao1': 'ao1_def'})
cfg = parser.parse_args(['a', 'ap1_arg', '--ao1', 'ao1_arg'])
self.assertEqual(cfg['a'].as_dict(), {'ap1': 'ap1_arg', 'ao1': 'ao1_arg'})
self.assertRaises(KeyError, lambda: cfg['b'])
cfg = parser.parse_args(['b', '--lev1.lev2.opt2', 'opt2_arg']).as_dict()
cfg_def = example_parser().get_defaults().as_dict()
cfg_def['lev1']['lev2']['opt2'] = 'opt2_arg'
self.assertEqual(cfg['o1'], 'o1_def')
self.assertEqual(cfg['subcommand'], 'b')
self.assertEqual(cfg['b'], cfg_def)
self.assertRaises(KeyError, lambda: cfg['a'])
parser.parse_args(['B'])
self.assertRaises(ParserError, lambda: parser.parse_args(['A']))
self.assertRaises(ParserError, lambda: parser.parse_args())
self.assertRaises(ParserError, lambda: parser.parse_args(['a']))
self.assertRaises(ParserError, lambda: parser.parse_args(['b', '--unk']))
self.assertRaises(ParserError, lambda: parser.parse_args(['c']))
cfg = parser.parse_string('{"a": {"ap1": "ap1_cfg"}}').as_dict()
self.assertEqual(cfg['subcommand'], 'a')
self.assertEqual(cfg['a'], {'ap1': 'ap1_cfg', 'ao1': 'ao1_def'})
self.assertRaises(ParserError, lambda: parser.parse_string('{"a": {"ap1": "ap1_cfg", "unk": "unk_cfg"}}'))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cfg = parser.parse_string('{"a": {"ap1": "ap1_cfg"}, "b": {"nums": {"val1": 2}}}')
self.assertEqual(cfg.subcommand, 'a')
self.assertFalse(hasattr(cfg, 'b'))
self.assertEqual(len(w), 1)
self.assertIn('Subcommand "a" will be used', str(w[0].message))
cfg = parser.parse_string('{"subcommand": "b", "a": {"ap1": "ap1_cfg"}, "b": {"nums": {"val1": 2}}}')
self.assertFalse(hasattr(cfg, 'a'))
cfg = parser.parse_args(['--cfg={"a": {"ap1": "ap1_cfg"}, "b": {"nums": {"val1": 2}}}', 'a'])
cfg = cfg.as_dict()
self.assertEqual(cfg, {'o1': 'o1_def', 'subcommand': 'a', 'cfg': [None], 'a': {'ap1': 'ap1_cfg', 'ao1': 'ao1_def'}})
cfg = parser.parse_args(['--cfg={"a": {"ap1": "ap1_cfg"}, "b": {"nums": {"val1": 2}}}', 'b'])
self.assertFalse(hasattr(cfg, 'a'))
self.assertTrue(hasattr(cfg, 'b'))
os.environ['APP_O1'] = 'o1_env'
os.environ['APP_A__AP1'] = 'ap1_env'
os.environ['APP_A__AO1'] = 'ao1_env'
os.environ['APP_B__LEV1__LEV2__OPT2'] = 'opt2_env'
cfg = parser.parse_args(['a'], env=True).as_dict()
self.assertEqual(cfg['o1'], 'o1_env')
self.assertEqual(cfg['subcommand'], 'a')
self.assertEqual(cfg['a'], {'ap1': 'ap1_env', 'ao1': 'ao1_env'})
parser.default_env = True
cfg = parser.parse_args(['b']).as_dict()
cfg_def['lev1']['lev2']['opt2'] = 'opt2_env'
self.assertEqual(cfg['subcommand'], 'b')
self.assertEqual(cfg['b'], cfg_def)
os.environ['APP_SUBCOMMAND'] = 'a'
cfg = parser.parse_env().as_dict()
self.assertEqual(cfg['o1'], 'o1_env')
self.assertEqual(cfg['subcommand'], 'a')
self.assertEqual(cfg['a'], {'ap1': 'ap1_env', 'ao1': 'ao1_env'})
for key in ['APP_O1', 'APP_A__AP1', 'APP_A__AO1', 'APP_B__LEV1__LEV2__OPT2', 'APP_SUBCOMMAND']:
del os.environ[key]
def test_subsubcommands(self):
parser_s1_a = ArgumentParser(error_handler=None)
parser_s1_a.add_argument('--os1a',
default='os1a_def')
parser_s2_b = ArgumentParser(error_handler=None)
parser_s2_b.add_argument('--os2b',
default='os2b_def')
parser = ArgumentParser(prog='app', error_handler=None, default_meta=False)
subcommands1 = parser.add_subcommands()
subcommands1.add_subcommand('a', parser_s1_a)
subcommands2 = parser_s1_a.add_subcommands()
subcommands2.add_subcommand('b', parser_s2_b)
self.assertRaises(ParserError, lambda: parser.parse_args([]))
self.assertRaises(ParserError, lambda: parser.parse_args(['a']))
cfg = parser.parse_args(['a', 'b']).as_dict()
self.assertEqual(cfg, {'subcommand': 'a', 'a': {'subcommand': 'b', 'os1a': 'os1a_def', 'b': {'os2b': 'os2b_def'}}})
cfg = parser.parse_args(['a', '--os1a=os1a_arg', 'b']).as_dict()
self.assertEqual(cfg, {'subcommand': 'a', 'a': {'subcommand': 'b', 'os1a': 'os1a_arg', 'b': {'os2b': 'os2b_def'}}})
cfg = parser.parse_args(['a', 'b', '--os2b=os2b_arg']).as_dict()
self.assertEqual(cfg, {'subcommand': 'a', 'a': {'subcommand': 'b', 'os1a': 'os1a_def', 'b': {'os2b': 'os2b_arg'}}})
def test_subsubcommands_bad_order(self):
parser_s1_a = ArgumentParser()
parser_s2_b = ArgumentParser()
parser = ArgumentParser()
subcommands2 = parser_s1_a.add_subcommands()
subcommands2.add_subcommand('b', parser_s2_b)
subcommands1 = parser.add_subcommands()
self.assertRaises(ValueError, lambda: subcommands1.add_subcommand('a', parser_s1_a))
def test_optional_subcommand(self):
parser = ArgumentParser(error_handler=None)
subcommands = parser.add_subcommands(required=False)
subparser = ArgumentParser()
subcommands.add_subcommand('foo', subparser)
cfg = parser.parse_args([])
self.assertEqual(cfg, Namespace(subcommand=None))
def test_subcommand_print_config_default_env_issue_126(self):
| |
"""Input and output (IO) of information from and to the persistence layer.
Currently, input and output of both, datasets and recipes can be handled.
Datasets
========
Both, data and metadata contained in datasets as well as the information
stored in recipes for recipe-driven data analysis can be read and written.
For datasets, two generic classes are provided:
* :class:`aspecd.io.DatasetImporter`
* :class:`aspecd.io.DatasetExporter`
As the name says, these classes should be used to implement import and
export functionality for your own purposes in applications derived from the
ASpecD framework.
Generally, both import and export should be handled via the respective
methods of the :class:`aspecd.dataset.Dataset` class, thus first
instantiating an object of that class and an appropriate importer or
exporter, and afterwards only operating on the dataset using its methods.
In its most generic form, this may look something like:
.. code-block::
dataset = aspecd.dataset.Dataset()
importer = aspecd.io.DatasetImporter(source="/path/to/your/data")
dataset.import_from(importer)
Similarly, you would handle the export of your data (and metadata)
contained in a dataset object using an exporter object, respectively.
.. code-block::
dataset = aspecd.dataset.Dataset()
importer = aspecd.io.DatasetExporter(target="/path/to/destination")
dataset.export_to(exporter)
However, if you use :ref:`recipe-driven data analysis <recipes>`, things
become much simpler:
* Imports will be automatically taken care of.
* Exports can be specified as simple task.
A simple example of a recipe only loading datasets and afterwards exporting
them could look like this:
.. code-block:: yaml
datasets:
- /path/to/first/dataset
- /path/to/second/dataset
tasks:
- kind: export
type: AdfExporter
properties:
target:
- dataset1
- dataset2
What is happening here? Two datasets are imported, and afterwards exported
to the ASpecD Dataset Format (ADF) using the :class:`aspecd.io.AdfExporter`.
Another frequent use case, although one that admittedly pretty much opposes
the whole idea of the ASpecD framework in terms of reproducibility and
traceability: Your collaboration partners require you to provide them with
raw data they can import into their favourite program for creating plots.
The only viable way: export to plain text (ouch!) - saying good-bye to all
your metadata and history:
.. code-block:: yaml
datasets:
- /path/to/first/cool/dataset
- /path/to/second/cool/dataset
- /path/to/another/cool/dataset
tasks:
- kind: export
type: TxtExporter
properties:
target:
- cool-dataset1
- cool-dataset2
- cool-dataset3
In this case, you can as well add whatever processing necessary to your
datasets before exporting them, and you see that recipes come in quite handy
here.
Importers for specific file formats
-----------------------------------
There exists a series of importers for specific file formats:
* :class:`aspecd.io.AdfImporter`
Importer for data in ASpecD Dataset Format (ADF)
* :class:`aspecd.io.AsdfImporter`
Importer for data in asdf format
* :class:`aspecd.io.TxtImporter`
Importer for data in plain text format
For details, see the respective class documentation.
Exporters for specific file formats
-----------------------------------
Datasets need to be persisted sometimes, and currently, there exist two
exporters for specific file formats that can be imported again using the
respective importers. Furthermore, the full information contained in a
dataset will be retained.
* :class:`aspecd.io.AdfExporter`
Exporter for datasets to ASpecD Dataset Format (ADF)
* :class:`aspecd.io.AsdfExporter`
Exporter for datasets to asdf format
For details, see the respective class documentation.
A bit a special case is the exporter to plain text files, as this file
format does *not* preserve the metadata stored within the dataset and should
only be used as last resort:
* :class:`aspecd.io.TxtExporter`
Exporter for data to plain text format
.. warning::
All metadata contained within a dataset (including the full history)
are lost when exporting to plain text. Therefore, using this
exporter will usually result in you loosing reproducibility. Hence,
better think twice before using this exporter and use entirely on
your own risk and only if you *really* know what you are doing (and
why).
Writing importers for data
--------------------------
When writing importer classes for your own data, there is a number of
pitfalls, some of which shall be described here together with solutions and
"best practices".
Dimensions of data
~~~~~~~~~~~~~~~~~~
Usually, we assign axes in the order *x*, *y*, *z*, and assume the *x* axis
to be the horizontal axis in a plot. However, numpy (as well as other
software), follows a different convention, with the first index referring to
the *row* of your matrix, the second index to the *column*. That boils down
to having the first index correspond to the *y* axis, and the second index
referring to the *x* axis.
As long as your data are one-dimensional, resulting in two axes objects in
your dataset, everything is fine, and the second axis will have no values.
However, if your data to be imported are two-dimensional, your first
dimension will be the index of rows (along a column), hence the *y* axis,
and the second dimension the index of your columns (along a row), *i.e.* the
*x* axis. This is perfectly fine, and it is equally fine to revert this
order, as long as you ensure your axis objects to be consistent with the
dimensions of your data.
If you assign numeric data to the :attr:`aspecd.dataset.Data.data` property,
the corresponding axes values will initially be set to the indices of the
data points along the corresponding dimension, with the first axis (index 0)
corresponding to the first dimension (row indices along a column) and
similar for each of the following dimensions of your data. Note that there
will always be one axis more than dimensions of your data. This last axis
will not have values, and usually its quantity is something like "intensity".
Backup of the data
~~~~~~~~~~~~~~~~~~
One essential concept of the ASpecD dataset is to store the original data
together with their axes in a separate, non-public property. This is done
automatically by the importer after calling out to its non-public method
:meth:`aspecd.io.DatasetImporter._import`. Hence, usually you need not take
care of this at all.
Handling of metadata
~~~~~~~~~~~~~~~~~~~~
Data without information about these data are usually pretty useless. Hence,
an ASpecD dataset is always a unit of numerical data and corresponding
metadata. While you will need to come up with your own structure for
metadata of your datasets and create a hierarchy of classes derived from
:class:`aspecd.metadata.DatasetMetadata`, your importers need to ensure that
these metadata are populated respectively. Of course, which metadata can be
populated depends strongly on the file format you are about to import.
Handling different file formats for importing data
--------------------------------------------------
Often, data are available in different formats, and deciding which importer
is appropriate for a given format can be quite involved. To free other
classes from having to contain the relevant code, a factory can be used:
* :class:`aspecd.io.DatasetImporterFactory`
Currently, the sole information provided to decide about the appropriate
importer is the source (a string). A concrete importer object is returned
by the method :meth:`get_importer`. Thus, using the factory in another
class may look like the following::
importer_factory = aspecd.io.DatasetImporterFactory()
importer = importer_factory.get_importer(source="/path/to/your/data")
dataset = aspecd.dataset.Dataset()
dataset.import_from(importer)
Here, as in the example above, "source" refers to a (unique) identifier of
your dataset, be it a filename, path, URL/URI, LOI, or alike.
.. important::
For recipe-driven data analysis to work with an ASpecD-derived package,
you need to implement a :class:`aspecd.io.DatasetImporterFactory` class
there as well that can be obtained by instantiating
``<your_package>.io.DatasetImporterFactory()``.
Recipes
=======
For recipes, a similar set of classes is provided:
* :class:`aspecd.io.RecipeImporter`
* :class:`aspecd.io.RecipeExporter`
For additional concrete classes handling import and export from and to YAML
files see below.
The same general principles laid out above for the datasets applies to
these classes as well. In particular, both import and export should be
handled via the respective methods of the :class:`aspecd.tasks.Recipe`
class, thus first instantiating an object of that class and an appropriate
importer or exporter, and afterwards only operating on the recipe using
its methods.
In its most generic form, this may look something like::
recipe = aspecd.tasks.Recipe()
importer = aspecd.io.RecipeImporter(source="/path/to/your/recipe")
recipe.import_from(importer)
Similarly, you would handle the export of the information contained in a
recipe object using an exporter object, respectively.
To simplify the input and output of recipes, and due recipe-driven data
analysis being an intrinsic property of the ASpecD framework, two classes
handling the import and export from and to YAML files are provided as well:
* :class:`aspecd.io.RecipeYamlImporter`
* :class:`aspecd.io.RecipeYamlExporter`
These classes can directly be used to work with YAML files containing
information for recipe-driven data analysis. For details of the YAML file
structure, see the :class:`aspecd.tasks.Recipe` class and its attributes.
Module documentation
====================
"""
import copy
import os
import tempfile
import zipfile
import asdf
import numpy as np
import aspecd.exceptions
import aspecd.metadata
import aspecd.utils
class DatasetImporter:
"""Base class for dataset importer.
Each class actually importing data and metadata into a dataset should
inherit from this class.
To perform the import, call the
:meth:`~aspecd.dataset.Dataset.import_from` method of the dataset
the import should be performed for, and provide a reference to the
actual importer object to it.
The | |
the FLT file.
"""
import pyregion
mask_files = glob.glob(flt_file.replace('_flt.fits','.*.mask.reg').replace('_flc.fits','.*.mask.reg').replace('_c0m.fits','.*.mask.reg').replace('_c0f.fits','.*.mask.reg'))
if len(mask_files) == 0:
return True
logstr = '# Region mask for {0}: {1}'.format(flt_file, mask_files)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
flt = pyfits.open(flt_file, mode='update')
for mask_file in mask_files:
ext = int(mask_file.split('.')[-3])
try:
reg = pyregion.open(mask_file).as_imagecoord(flt['SCI',ext].header)
mask = reg.get_mask(hdu=flt['SCI',ext])
except:
# Above fails for lookup-table distortion (ACS / UVIS)
# Here just assume the region file is defined in image coords
reg = pyregion.open(mask_file)
mask = reg.get_mask(shape=flt['SCI',ext].data.shape)
flt['DQ',ext].data[mask] |= dq_value
flt.flush()
return True
def apply_saturated_mask(flt_file, dq_value=1024):
"""Saturated WFC3/IR pixels have some pulldown in the opposite amplifier
Parameters
----------
flt_file : str
Filename of the FLT exposure
dq_value : int
DQ bit to flip for affected pixels
Returns
-------
Nothing, modifies DQ extension of `flt_file` in place.
"""
import scipy.ndimage as nd
flt = pyfits.open(flt_file, mode='update')
sat = (((flt['DQ'].data & 256) > 0) & ((flt['DQ'].data & 4) == 0))
## Don't flag pixels in lower right corner
sat[:80,-80:] = False
## Flag only if a number of nearby pixels also saturated
kern = np.ones((3,3))
sat_grow = nd.convolve(sat*1, kern)
sat_mask = (sat & (sat_grow > 2))[::-1,:]*1
NSAT = sat_mask.sum()
logstr = '# {0}: flagged {1:d} pixels affected by saturation pulldown'
logstr = logstr.format(flt_file, NSAT)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
if NSAT > 0:
flt['DQ'].data[sat_mask > 0] |= dq_value
flt.flush()
def clip_lists(input, output, clip=20):
"""TBD
Clip [x,y] arrays of objects that don't have a match within `clip` pixels
in either direction
"""
import scipy.spatial
tree = scipy.spatial.cKDTree(input, 10)
### Forward
N = output.shape[0]
dist, ix = np.zeros(N), np.zeros(N, dtype=int)
for j in range(N):
dist[j], ix[j] = tree.query(output[j,:], k=1,
distance_upper_bound=np.inf)
ok = dist < clip
out_arr = output[ok]
if ok.sum() == 0:
print('No matches within `clip={0:f}`'.format(clip))
return False
### Backward
tree = scipy.spatial.cKDTree(out_arr, 10)
N = input.shape[0]
dist, ix = np.zeros(N), np.zeros(N, dtype=int)
for j in range(N):
dist[j], ix[j] = tree.query(input[j,:], k=1,
distance_upper_bound=np.inf)
ok = dist < clip
in_arr = input[ok]
return in_arr, out_arr
def match_lists(input, output, transform=None, scl=3600., simple=True,
outlier_threshold=5, toler=5, triangle_size_limit=[5, 800],
triangle_ba_max=0.9, assume_close=False):
"""TBD
Compute matched objects and transformation between two [x,y] lists.
If `transform` is None, use Similarity transform (shift, scale, rot)
"""
import copy
from astropy.table import Table
import skimage.transform
from skimage.measure import ransac
import stsci.stimage
if transform is None:
transform = skimage.transform.SimilarityTransform
#print 'xyxymatch'
if (len(output) == 0) | (len(input) == 0):
print('No entries!')
return input, output, None, transform()
try:
from tristars import match
pair_ix = match.match_catalog_tri(input, output, maxKeep=10, auto_keep=3, auto_transform=transform, auto_limit=outlier_threshold, size_limit=triangle_size_limit, ignore_rot=False, ignore_scale=True, ba_max=triangle_ba_max)
input_ix = pair_ix[:,0]
output_ix = pair_ix[:,1]
print(' tristars.match: Nin={0}, Nout={1}, match={2}'.format(len(input), len(output), len(output_ix)))
#print('xxx Match from tristars!')
if False:
fig = match.match_diagnostic_plot(input, output, pair_ix, tf=None, new_figure=True)
fig.savefig('/tmp/xtristars.png')
plt.close(fig)
tform = match.get_transform(input, output, pair_ix, transform=transform, use_ransac=True)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# ! tristars failed")
match = stsci.stimage.xyxymatch(copy.copy(input), copy.copy(output),
origin=np.median(input, axis=0),
mag=(1.0, 1.0), rotation=(0.0, 0.0),
ref_origin=np.median(input, axis=0),
algorithm='tolerance', tolerance=toler,
separation=0.5, nmatch=10, maxratio=10.0,
nreject=10)
m = Table(match)
output_ix = m['ref_idx'].data
input_ix = m['input_idx'].data
print(' xyxymatch.match: Nin={0}, Nout={1}, match={2}'.format(len(input), len(output), len(output_ix)))
tf = transform()
tf.estimate(input[input_ix,:], output[output_ix])
if not simple:
model, inliers = ransac((input[input_ix,:], output[output_ix,:]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
# Iterate
if inliers.sum() > 2:
m_i, in_i = ransac((input[input_ix[inliers],:], output[output_ix[inliers],:]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
if in_i.sum() > 2:
model = m_i
inliers[np.arange(len(inliers), dtype=np.int)[inliers][in_i]] = False
outliers = ~inliers
mout = model(input[input_ix,:])
dx = mout - output[output_ix]
else:
model = tf
### Compute statistics
if len(input_ix) > 10:
mout = tf(input[input_ix,:])
dx = mout - output[output_ix]
dr = np.sqrt(np.sum(dx**2, axis=1))
outliers = dr > outlier_threshold
else:
outliers = np.zeros(len(input_ix), dtype=bool)
return input_ix, output_ix, outliers, model
def align_drizzled_image(root='', mag_limits=[14,23], radec=None, NITER=3,
clip=20, log=True, outlier_threshold=5,
verbose=True, guess=[0., 0., 0., 1], simple=True,
rms_limit=2, use_guess=False,
triangle_size_limit=[5,1800], max_sources=200,
triangle_ba_max=0.9, max_err_percentile=99,
catalog_mask_pad=0.05, match_catalog_density=None,
assume_close=False):
"""TBD
"""
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.align_drizzled_image')
if not os.path.exists('{0}.cat.fits'.format(root)):
#cat = make_drz_catalog(root=root)
cat = make_SEP_catalog(root=root)
else:
cat = utils.read_catalog('{0}.cat.fits'.format(root))
if hasattr(radec, 'upper'):
rd_ref = np.loadtxt(radec)
radec_comment = radec
if match_catalog_density is None:
match_catalog_density = '.cat.radec' not in radec
elif radec is False:
# Align to self, i.e., do nothing
so = np.argsort(cat['MAG_AUTO'])
rd_ref = np.array([cat['X_WORLD'], cat['Y_WORLD']]).T[so[:50],:]
radec_comment = 'self catalog'
if match_catalog_density is None:
match_catalog_density = False
else:
rd_ref = radec*1
radec_comment = 'input arrays (N={0})'.format(rd_ref.shape)
if match_catalog_density is None:
match_catalog_density = False
### Clip obviously distant files to speed up match
# rd_cat = np.array([cat['X_WORLD'], cat['Y_WORLD']])
# rd_cat_center = np.median(rd_cat, axis=1)
# cosdec = np.array([np.cos(rd_cat_center[1]/180*np.pi),1])
# dr_cat = np.sqrt(np.sum((rd_cat.T-rd_cat_center)**2*cosdec**2, axis=1))
#
# #print('xxx', rd_ref.shape, rd_cat_center.shape, cosdec.shape)
#
# dr = np.sqrt(np.sum((rd_ref-rd_cat_center)**2*cosdec**2, axis=1))
#
# rd_ref = rd_ref[dr < 1.1*dr_cat.max(),:]
ok = (cat['MAG_AUTO'] > mag_limits[0]) & (cat['MAG_AUTO'] < mag_limits[1])
if len(mag_limits) > 2:
ok &= cat['MAGERR_AUTO'] < mag_limits[2]
else:
ok &= cat['MAGERR_AUTO'] < 0.05
if ok.sum() == 0:
print('{0}.cat: no objects found in magnitude range {1}'.format(root,
mag_limits))
return False
ok &= utils.catalog_mask(cat, max_err_percentile=max_err_percentile, pad=catalog_mask_pad, pad_is_absolute=False, min_flux_radius=1.)
xy_drz = np.array([cat['X_IMAGE'][ok], cat['Y_IMAGE'][ok]]).T
drz_file = glob.glob('{0}_dr[zc]_sci.fits'.format(root))[0]
drz_im = pyfits.open(drz_file)
sh = drz_im[0].data.shape
drz_wcs = pywcs.WCS(drz_im[0].header, relax=True)
orig_wcs = drz_wcs.copy()
if use_guess:
drz_wcs = utils.transform_wcs(drz_wcs, guess[:2], guess[2], guess[3])
return orig_wcs, drz_wcs, guess[:2], guess[2]/np.pi*180, guess[3]
##########
# Only include reference objects in the DRZ footprint
pix_origin = 1
ref_x, ref_y = drz_wcs.all_world2pix(rd_ref, pix_origin).T
if hasattr(drz_wcs, '_naxis1'):
nx1, nx2 = drz_wcs._naxis1, drz_wcs._naxis2
else:
nx1, nx2 = drz_wcs._naxis
ref_cut = (ref_x > -100) & (ref_x < nx1+100)
ref_cut &= (ref_y > -100) & (ref_y < nx2+100)
if ref_cut.sum() == 0:
print('{0}: no reference objects found in the DRZ footprint'.format(root))
return False
rd_ref = rd_ref[ref_cut,:]
########
# Match surface density of drizzled and reference catalogs
if match_catalog_density:
icut = np.minimum(ok.sum()-2, int(2*ref_cut.sum()))
# acat = utils.hull_area(cat['X_WORLD'][ok], cat['Y_WORLD'][ok])
# aref = utils.hull_area(rd_ref[:,0], rd_ref[:,1])
cut = np.argsort(cat['MAG_AUTO'][ok])[:icut]
xy_drz = np.array([cat['X_IMAGE'][ok][cut], cat['Y_IMAGE'][ok][cut]]).T
else:
# Limit to brightest X objects
icut = 400
cut = np.argsort(cat['MAG_AUTO'][ok])[:icut]
xy_drz = np.array([cat['X_IMAGE'][ok][cut], cat['Y_IMAGE'][ok][cut]]).T
logstr = '# wcs {0} radec="{1}"; Ncat={2}; Nref={3}'
logstr = logstr.format(root, radec, xy_drz.shape[0], rd_ref.shape[0])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
#out_shift, out_rot, out_scale = np.zeros(2), 0., 1.
out_shift, out_rot, out_scale = guess[:2], guess[2], guess[3]
drz_wcs = utils.transform_wcs(drz_wcs, out_shift, out_rot, out_scale)
logstr = '# wcs {0} (guess) : {1:6.2f} {2:6.2f} {3:7.3f} {4:7.3f}'.format(root, guess[0], guess[1], guess[2]/np.pi*180, 1./guess[3])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
drz_crpix = drz_wcs.wcs.crpix
NGOOD, rms = 0, 0
for iter in range(NITER):
#print('xx iter {0} {1}'.format(iter, NITER))
xy = np.array(drz_wcs.all_world2pix(rd_ref, pix_origin))
pix = np.cast[int](np.round(xy)).T
### Find objects where drz pixels are non-zero
okp = (pix[0,:] > 0) & (pix[1,:] > 0)
okp &= (pix[0,:] < sh[1]) & (pix[1,:] < sh[0])
ok2 = drz_im[0].data[pix[1,okp], pix[0,okp]] != 0
N = ok2.sum()
if clip > 0:
status = clip_lists(xy_drz-drz_crpix, xy+0-drz_crpix, clip=clip)
if not status:
print('Problem xxx')
input, output = status
else:
input, output = xy_drz+0.-drz_crpix, xy+0-drz_crpix
#print np.sum(input) + np.sum(output)
if len(input) > max_sources:
print('Clip input list ({0}) to {1} objects'.format(len(input), max_sources))
ix = np.argsort(np.arange(len(input)))[:max_sources]
input = input[ix,:]
if len(output) > max_sources:
print('Clip output list ({0}) to {1} objects'.format(len(input), max_sources))
ix = np.argsort(np.arange(len(output)))[:max_sources]
output = output[ix,:]
toler=5
titer=0
while (titer < 3):
try:
res = match_lists(output, input, scl=1., simple=simple,
outlier_threshold=outlier_threshold, toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max,
assume_close=assume_close)
output_ix, input_ix, outliers, tf = res
break
except:
toler += 5
titer += 1
#print(output.shape, output_ix.shape, output_ix.min(), output_ix.max(), titer, toler, input_ix.shape, input.shape)
titer = 0
while (len(input_ix)*1./len(input) < 0.1) & (titer < 3):
titer += 1
toler += 5
try:
res = match_lists(output, input, scl=1., simple=simple,
outlier_threshold=outlier_threshold,
toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max,
assume_close=assume_close)
except:
pass
output_ix, input_ix, outliers, tf = res
#print(output.shape, output_ix.shape, output_ix.min(), output_ix.max(), titer, toler, input_ix.shape, input.shape)
| |
follow_val_x, '$\mathregular{^%s}\!$' % follow_val_x) #
# Deal with formatting of y axis labels
y_ax_lab = ''
y_ax_lab_components = plot_dict['y_label'].split(' \\n ')
if len(y_ax_lab_components) > 1:
for k in range(len(y_ax_lab_components)):
if k < len(y_ax_lab_components) - 1:
y_ax_lab += y_ax_lab_components[k] + '\n'
else:
y_ax_lab += y_ax_lab_components[k]
else:
y_ax_lab = plot_dict['y_label']
# Adjust so powers ("^") show correctly in y-axis label
try:
power_index_y = y_ax_lab.index('^')
follow_val_y = y_ax_lab[power_index_y+1]
except ValueError:
power_index_y = None
follow_val_y = None
if power_index_y is not None:
y_ax_lab = y_ax_lab.replace('^%s' % follow_val_y, '$\mathregular{^%s}\!$' % follow_val_y) #
if preference_arrow == 'Yes':
if invert_axis is not None:
if invert_axis[plt_order[0]] == 'No':
# Arrows point to right/up
ax_arr[sp_slot_tuple].set_xlabel('$\\longrightarrow$' + '\n' + x_ax_lab,
fontsize=all_label_font)
else:
ax_arr[sp_slot_tuple].set_xlabel('$\\longrightarrow$' + '\n' + x_ax_lab,
fontsize=all_label_font)
if invert_axis[plt_order[1]] == 'No':
# Arrows point to right/up
ax_arr[sp_slot_tuple].set_ylabel(y_ax_lab + '\n' + '$\\longrightarrow$',
fontsize=all_label_font)
else:
ax_arr[sp_slot_tuple].set_ylabel(y_ax_lab + '\n' + '$\\longrightarrow$',
fontsize=all_label_font)
else:
# Arrows point to right/up
ax_arr[sp_slot_tuple].set_xlabel('$\\longrightarrow$' + '\n' + x_ax_lab,
fontsize=all_label_font)
ax_arr[sp_slot_tuple].set_ylabel(y_ax_lab + '\n' + '$\\longrightarrow$',
fontsize=all_label_font)
else:
ax_arr[sp_slot_tuple].set_xlabel(x_ax_lab, fontsize=all_label_font)
ax_arr[sp_slot_tuple].set_ylabel(y_ax_lab, fontsize=all_label_font)
except KeyError:
ax_arr[sp_slot_tuple].set_ylabel(objs_to_plot[plt_order[1]], fontsize=all_label_font)
ax_arr[sp_slot_tuple].set_xlabel(objs_to_plot[plt_order[0]], fontsize=all_label_font)
# Axis Limits
if ax_lim is not None:
ax_arr[sp_slot_tuple].set_xlim(ax_lim[plt_order[0]][0], ax_lim[plt_order[0]][1])
ax_arr[sp_slot_tuple].set_ylim(ax_lim[plt_order[1]][0], ax_lim[plt_order[1]][1])
# Set fontsize for y tick labels
for tick in ax_arr[sp_slot_tuple].xaxis.get_major_ticks():
tick.label.set_fontsize(all_label_font)
# Set fontsize for x tick labels
for tick in ax_arr[sp_slot_tuple].yaxis.get_major_ticks():
tick.label.set_fontsize(all_label_font)
if invert_axis is not None:
if invert_axis[plt_order[0]] == 'Yes':
ax_arr[sp_slot_tuple].invert_xaxis()
if invert_axis[plt_order[1]] == 'Yes':
ax_arr[sp_slot_tuple].invert_yaxis()
# Turn top-most and right-most ticks off
ax_arr[sp_slot_tuple].tick_params(axis='x', which='both', top='off')
ax_arr[sp_slot_tuple].tick_params(axis='y', which='both', right='off')
# Turn top and right axes off (no line appears, so axes are not a box)
turn_off_top_right_axes = 'No'
if turn_off_top_right_axes == 'Yes':
ax_arr[sp_slot_tuple].spines["top"].set_visible(False)
ax_arr[sp_slot_tuple].spines["right"].set_visible(False)
# Add Axis arrows on x-axis as annotated text, indicating direction of increasing preference.
place_arrow_set_position = 'No' # Default is to place arrows under axis labels
if preference_arrow == 'Yes':
if place_arrow_set_position == 'Yes':
ax_arr[sp_slot_tuple].annotate('', xy=(0.25,-1.15), xycoords = 'axes fraction', xytext=(0.75,-1.15),
arrowprops=dict(arrowstyle='<-, head_width=0.15', color='k'))
# Add Axis arrows on y-axis as annotated text, indicating direction of increasing preference.
ax_arr[sp_slot_tuple].annotate('', xy=(-1.25,0.25), xycoords = 'axes fraction', xytext=(-1.25,0.75),
arrowprops=dict(arrowstyle='<-, head_width=0.15', color='k'))
try:
ax_arr[sp_slot_tuple].set_title(plot_dict['subplot_title'], fontweight='bold')
except KeyError:
pass
#fig.colorbar(ax_arr[sp_slot_tuple], shrink=0.75)
#fig.axes[-1].set_ylabel(objs_to_plot[plt_order[2]], fontsize=18)
#p.show()
if save_fig is not None:
save_fig = save_fig + os_fold + objs_to_plot[plt_order[0]] + '-' + objs_to_plot[
plt_order[1]] + '-' + objs_to_plot[plt_order[2]] + ".png"
elif num_objs_to_plot == 4:
if three_d_plot == 'Yes':
fig = pyplot.figure() # create the figure
else:
fig, ax_arr = axis_test
cmap = pyplot.cm.get_cmap("jet_r")
if three_d_plot == 'Yes':
ax3D = Axes3D(fig)
pts = ax3D.scatter(objective_values[plt_order[0]], objective_values[plt_order[1]], objective_values[plt_order[2]])
if plt_order is None:
plt_order = [0, 1, 2, 3]
pts = ax3D.scatter(objective_values[0], objective_values[1], objective_values[2], c= objective_values[3],
cmap=cmap, linewidth=0)
fig.colorbar(pts,ax=ax3D,shrink=0.75)
fig.axes[-1].set_ylabel(objs_to_plot[3])
else:
# Order of plotting specified.
pts = ax3D.scatter(objective_values[plt_order[0]], objective_values[plt_order[1]], objective_values[
plt_order[2]], c= objective_values[plt_order[3]], cmap=cmap, linewidth=0)
fig.colorbar(pts, ax=ax3D, shrink=0.75)
try:
fig.axes[-1].set_ylabel(plot_dict['c_label'])
except KeyError:
fig.axes[-1].set_ylabel(objs_to_plot[plt_order[3]])
else:
if plot_specs is not None:
rcParams.update(plot_specs)
ax_arr[sp_slot_tuple].scatter(objective_values[plt_order[0]], objective_values[plt_order[1]],
c=objective_values[plt_order[2]], cmap=cmap, marker='o',
s = objective_values[plt_order[3]]/100, linewidth=0)
# Axis labels.
try:
x_ax_lab = ''
x_ax_lab_components = plot_dict['x_label'].split(' \\n ')
y_ax_lab = ''
y_ax_lab_components = plot_dict['y_label'].split(' \\n ')
if len(x_ax_lab_components) > 1:
for k in range(len(x_ax_lab_components)):
if k < len(x_ax_lab_components) - 1:
x_ax_lab += x_ax_lab_components[k] + '\n'
else:
x_ax_lab += x_ax_lab_components[k]
else:
x_ax_lab = plot_dict['x_label']
if len(y_ax_lab_components) > 1:
for k in range(len(y_ax_lab_components)):
if k < len(y_ax_lab_components) - 1:
y_ax_lab += y_ax_lab_components[k] + '\n'
else:
y_ax_lab += y_ax_lab_components[k]
else:
y_ax_lab = plot_dict['y_label']
ax_arr[sp_slot_tuple].set_ylabel(y_ax_lab)
ax_arr[sp_slot_tuple].set_xlabel(x_ax_lab)
except KeyError:
ax_arr[sp_slot_tuple].set_ylabel(objs_to_plot[plt_order[1]])
ax_arr[sp_slot_tuple].set_xlabel(objs_to_plot[plt_order[0]])
# Axis limits
if ax_lim is not None:
ax_arr[sp_slot_tuple].set_xlim(ax_lim[plt_order[0]][0],ax_lim[plt_order[0]][1])
ax_arr[sp_slot_tuple].set_ylim(ax_lim[plt_order[1]][0],ax_lim[plt_order[1]][1])
# Plot title
try:
ax_arr[sp_slot_tuple].set_title(plot_dict['subplot_title'], fontweight='bold')
except KeyError:
pass
# Set fontsize for y tick labels
for tick in ax_arr[sp_slot_tuple].xaxis.get_major_ticks():
tick.label.set_fontsize(all_label_font)
# Set fontsize for x tick labels
for tick in ax_arr[sp_slot_tuple].yaxis.get_major_ticks():
tick.label.set_fontsize(all_label_font)
elif num_objs_to_plot == 5:
ax3D = Axes3D(fig)
cmap = pyplot.cm.get_cmap("jet_r")
if plt_order is None:
plt_order = [0, 1, 2, 3, 4]
pts = ax3D.scatter(objective_values[0], objective_values[1], objective_values[2], c= objective_values[3],
cmap=cmap, marker='o', s=objective_values[4]/10, linewidth=0)
fig.colorbar(pts,ax=ax3D,shrink=0.75)
fig.axes[-1].set_ylabel(objs_to_plot[3])
else:
# Order of plotting specified.
pts = ax3D.scatter(objective_values[plt_order[0]], objective_values[plt_order[1]], objective_values[
plt_order[2]], c = objective_values[plt_order[3]], cmap=cmap, marker='o', s=objective_values[
plt_order[4]]/10, linewidth=0)
fig.colorbar(pts, ax=ax3D, shrink=0.75)
try:
fig.axes[-1].set_ylabel(plot_dict['c_label'])
except KeyError:
fig.axes[-1].set_ylabel(objs_to_plot[plt_order[3]])
if (three_d_plot == 'Yes') or num_objs_to_plot == 5:
# Label axes, title plot, and save resulting plot.
try:
ax3D.set_xlabel(plot_dict['x_label'], fontsize=16, labelpad=10)
ax3D.set_ylabel(plot_dict['y_label'], fontsize=16, labelpad=10)
ax3D.set_zlabel(plot_dict['z_label'], fontsize=16, labelpad=10)
except KeyError:
ax3D.set_xlabel(objs_to_plot[plt_order[0]], fontsize=16, labelpad=10)
ax3D.set_ylabel(objs_to_plot[plt_order[1]], fontsize=16, labelpad=10)
ax3D.set_zlabel(objs_to_plot[plt_order[2]], fontsize=16, labelpad=10)
if ax_lim is not None:
# User set axis limits.
ax3D.set_xlim3d(ax_lim[plt_order[0]][0], ax_lim[plt_order[0]][1])
ax3D.set_ylim3d(ax_lim[plt_order[1]][0], ax_lim[plt_order[1]][1])
ax3D.set_zlim3d(ax_lim[plt_order[2]][0], ax_lim[plt_order[2]][1])
#p.show()
if save_fig is not None:
if type(save_fig) not in [str, unicode]:
# Appropriate figure name not given. Give figure a title and save in pysedsim current working directory.
fig.savefig("scatter_plot.png", dpi=1200, bbox_inches='tight')
else:
fig.savefig(save_fig, dpi=1200, bbox_inches='tight')
# Create text files of operating policy parameters for particular points on pareto front, if user desires.
if gen_op_policy_file == 'Yes':
Grab_Pareto_Min_Max(ref_set_array, objective_values, num_objs, num_dec_vars)
if ref_set_file_name is not None and tradeoff_plotting_subplot is None:
# Return reference set information if user is internally processing reference set here.
return ref_set_array, objective_values, dec_var_values
if ref_set_file_name is not None and tradeoff_plotting_subplot is not None:
# Return reference set information if user is internally processing reference set here.
return ref_set_array, objective_values, dec_var_values, fig, ax_arr
def Scatter_Plot_Objectives(ref_set_pref_dict, objs_to_plot, parse_objs=None, plot_dict=None,
save_fig = None, gen_op_policy_file='No', num_objs_to_plot=None,
tradeoff_plotting_subplot=None, axis_test = None, provided_3D_fig_axis=None):
'''
Purpose: Produces a scatter plot of objective function values up to 4D (4th dimension is color).
Args (if not defined here, it is defined in Reference_Set() method):
3. objs_to_plot: list of objective names as they appear in the pysedsim input file and in the reference set
file, e.g. ['Objective 1', 'Objective 2']. Only list the names of objectives that you want to plot.
4. gen_op_policy_file: Optional, specify 'Yes' or 'No'. default = 'No'. Calls Grab_Pareto_Min_Max()
function to generate text file of operating policy for minimum and maximum points in the reference set.
5. parse_objs: List of objective numbers (column numbers in 'PySedSim_reference.txt' to plot from
reference set
6. ref_set_pref: A dictionary containing the following keys:
a. num_objs: Number of objective function values
b. num_dec_vars: Number of decision variable values
c. ref_set_file_name: name of reference set file (e.g., 'ref_set_file.txt')
d. unit_conv: List of factors by which to multiply values in each array of objective values.
e. perc_conv: List of 'Yes' or 'No' for each objective, for converting objective values to percent from
fraction.
f. invert: List of 'Yes' or 'No' for each objective, for reversing percentage (doing 100-value).
9. plot_dict: Dictionary of plotting preferences. Keys include:
a. 'Axis Range': List of lists (sub-lists), containing axis limits for 2 or 3 primary axes: x, y,
z. e.g., [[2,3],[3,5],[4,5]]
b. 'x_labal': string to label x-axis.
c. 'y_label': string to label y-axis.
d. 'z_label': string to label z-axis.
e. 'c_label': string to label "colored" axis (if there is a 4th dimension).
f. 'Plot Specifications': dictionary, follows format of rcParams module from matplotlib. An example is:
'Plot Specifications': {'axes.labelsize': 18, 'xtick.labelsize': 18, 'ytick.labelsize': 18,
'text.usetex': False, 'figure.figsize': [9, 6], 'figure.figsize': [6.5, 5],
'legend.fontsize': 22}
g. '3d_plot': 'Yes' or 'No', indicates whether user wants to plot 3 objectives in 3D space, or in 2D space
using color as the third objective.
10. plt_order: Optional, this is a list that specifies in which order the objectives are plotted. Used if you
want to be able to determine which of the objectives become a color/point size/orientation in the >3D case.
Example: plt_order = [0,3,2,1] to have the third variable in the objective values array plotted in the 3D
space, while the third item is plotted last.
11. num_objs_to_plot: integer number of objectives you actually wish to plot. Will be computed as length of
objs_to_plot list if nothing is specified.
tradeoff_plotting_subplot: indicates whether call is being made from tradeoff_plotting module for a subplot,
in which case the figure object should be returned so it can be included in a subplot.
Returns:
Nothing, but several figures may be saved
'''
# Load basic information about the simulated scenarios, including input file directory and simulation names.
os_fold = Op_Sys_Folder_Operator()
# Unpack ref_set_pref_dict
try:
ref_set_file_name = | |
outstanding perturbations in this iteration
mapped_x, mapped_unnormalized_x = compute_x_after_mapping_back(
domain,
url_id,
html,
html_fname,
working_dir=working_dir,
browser_id=browser_id,
final_domain=final_domain
)
return mapped_x, mapped_unnormalized_x, original_url, url
def _deprocess_x(self, x, feature_types, verbal=False):
FEATURE_TYPES = {'F', 'B', 'C', 'S', 'D', 'L', 'FF'}
def deprocess_float_feature(val, ratio, ffloat=True):
val = float(val)
maxn = float(ratio[0])
minn = float(ratio[1])
if ffloat:
return float(val * (maxn - minn) + minn)
else:
return round(val * (maxn - minn) + minn)
def deprocess_nominal_feature(val, category_name):
val = float(val)
if val == 1.0:
return category_name
elif val == 0.0:
return None
else:
print("[ERROR] WTF? val: %s %s" % (str(val), category_name))
raise Exception
def deprocess_shift_feature(val, offset):
val = float(val)
offset = float(offset)
return int(math.ceil(val + offset))
features = x
deprocessed_features = []
for j in range(len(feature_types)):
assert feature_types[j]['type'] in FEATURE_TYPES, "[ERROR] Feature type not supported!"
if feature_types[j]['type'] == 'F':
ratio = feature_types[j]['val']
deprocessed_features.append(
deprocess_float_feature(features[j], ratio, ffloat=False))
elif feature_types[j]['type'] == 'FF':
ratio = feature_types[j]['val']
deprocessed_features.append(
deprocess_float_feature(features[j], ratio, ffloat=True))
elif feature_types[j]['type'] == 'C':
category_name = feature_types[j]['val']
new_val = deprocess_nominal_feature(features[j], category_name)
if new_val is not None:
deprocessed_features.append(new_val)
elif feature_types[j]['type'] == 'S':
offset = feature_types[j]['val']
deprocessed_features.append(
deprocess_shift_feature(features[j], offset))
elif feature_types[j]['type'] == 'B':
val = features[j]
deprocessed_features.append(int(float(val)))
elif feature_types[j]['type'] == 'D':
val = features[j]
deprocessed_features.append(val)
# label column
elif feature_types[j]['type'] == 'L':
label = features[j]
deprocessed_features.append(label)
else:
print("???")
return deprocessed_features
def _compare_x(self, x1, x2, tags=["X1", "X2"], it=None, X3=None):
assert len(x1) == len(x2), "[ERROR] Two inputs have different sizes!"
if it is not None:
print("Iter #%d" % it)
for i in range(len(x1)):
if x1[i] != x2[i]:
if X3 is not None:
print("i:", i, tags[0], ":", x1[i], tags[1], ":", x2[i], "original:", X3[i])
else:
print("i:", i, tags[0], ":", x1[i], tags[1], ":", x2[i])
def _retrain_local_model(self, model, x, y):
def __generate_mini_batch(x, y, whole_train_set):
# import random
train_set = whole_train_set.sample(n=50)
train_y = train_set.pop('CLASS').to_numpy().tolist()
train_x = train_set.to_numpy().tolist()
for _ in range(50):
train_x.append(x)
train_y.append(y)
return np.array(train_x), np.array(train_y)
train_x, train_y = __generate_mini_batch(x, y, self._train_data_set)
model.fit(
x=train_x,
y=train_y,
batch_size=100,
epochs=1
)
def _get_label(self, logits):
if logits[1] > logits[0]:
return "AD"
else:
return "NONAD"
def _run_one(self, a, epsilon, stepsize, iterations,
random_start, targeted, class_, return_early, model, browser_id,
perturbable_idx_set=None, only_increase_idx_set=None,
feature_defs=None, normalization_ratios=None,
enforce_interval=1, request_id="URL_dummy", dynamic_interval=False,
debug=False, draw=False, map_back_mode=True, remote_model=True,
check_correctness=False, check_init_correctness=True, feature_idx_map=None, logger=None):
if draw:
x_axis, y_l2, y_lf = [], [], []
domain, url_id = request_id.split(',')
final_domain = urlparse(domain)[1]
original_domain = self.final_domain_to_original_domain_mapping[final_domain]
min_, max_ = a.bounds()
s = max_ - min_
original = a.unperturbed.copy()
if random_start:
# using uniform noise even if the perturbation clipping uses
# a different norm because cleverhans does it the same way
noise = nprng.uniform(
-epsilon * s, epsilon * s, original.shape).astype(
original.dtype)
x = original + self._clip_perturbation(a, noise, epsilon)
strict = False # because we don't enforce the bounds here
else:
x = original
strict = True # we don't care about the bounds because we are not attacking image clf
success_cent = False
if enforce_interval == iterations - 1:
only_post_process = True
else:
only_post_process = False
is_first_iter = True
for i in range(iterations):
if dynamic_interval:
curr_interval = self._get_geometric_enforce_interval(enforce_interval, iterations, i)
else:
curr_interval = enforce_interval
if i != 0 and (i % curr_interval == 0 or i == iterations - 1):
should_enforce_policy = True
else:
should_enforce_policy = False
gradient = self._gradient(a, x, class_, strict=strict)
# non-strict only for the first call and
# only if random_start is True
if targeted:
gradient = -gradient
# untargeted: gradient ascent on cross-entropy to original class
# targeted: gradient descent on cross-entropy to target class
# (this is the actual attack step)
x = x + stepsize * gradient
if should_enforce_policy:
x_before_projection = x.copy()
# phase 1: reject disallowed perturbations by changing feature values
# back to original
if only_post_process:
if should_enforce_policy and perturbable_idx_set is not None:
x = self._reject_imperturbable_features(
x,
original,
perturbable_idx_set)
else:
if perturbable_idx_set is not None:
x = self._reject_imperturbable_features(
x,
original,
perturbable_idx_set)
# phase 1: reject decreasing perturbations by changing only-increase
# feature values back to original
if should_enforce_policy and only_increase_idx_set is not None:
x = self._reject_only_increase_features(
x,
original,
only_increase_idx_set)
# phase 2: change values back to allowed ranges
if should_enforce_policy and feature_defs is not None:
x = self._legalize_candidate(
x,
original,
feature_defs)
if should_enforce_policy:
l2_dist = self._compute_l2_distance([x_before_projection], [x])
lf_dist = self._compute_lp_distance([x_before_projection], [x])
if draw:
x_axis.append(i)
y_l2.append(l2_dist[0])
y_lf.append(lf_dist)
if debug:
print("Step #%d, L2 distance: %f | LP distance: %f" % (i, l2_dist, lf_dist))
x = original + self._clip_perturbation(a, x - original, original, epsilon, feature_defs)
x = np.clip(x, min_, max_)
if should_enforce_policy and map_back_mode:
diff = self._get_diff(
x,
original,
perturbable_idx_set,
normalization_ratios,
)
print("Delta at iter #%d: %s" % (i, str(diff)))
print("Domain: %s" % original_domain)
print("URL ID: %s" % url_id)
x_before_mapping_back = x.copy()
try:
x_cent, unnorm_x_cent, original_url, url = self._get_x_after_mapping_back(
original_domain,
final_domain,
url_id,
diff,
browser_id=browser_id,
feature_idx_map=feature_idx_map,
first_time=is_first_iter
)
except Exception as err:
print("Error occured mapping: %s" % err)
return False
is_first_iter = False
del unnorm_x_cent[-1] # remove label
for j in range(len(x_cent)):
if j in diff:
if j not in NORM_MAP:
continue
if diff[j] == 1.0:
x_cent[j] = 1.0
unnorm_x_cent[NORM_MAP[j]] = "1"
if diff[j] == -1.0:
x_cent[j] = 0.0
unnorm_x_cent[NORM_MAP[j]] = "0"
print("unnorm_x_cent:", unnorm_x_cent)
mapping_diff = self._get_diff(
x_cent,
original,
perturbable_idx_set,
normalization_ratios,
)
print("Delta between before and after mapping-back: %s" % mapping_diff)
if self._is_diff_zero(mapping_diff):
print("[ERROR] Xs before and after mapping back did not change!")
return False
if should_enforce_policy and remote_model and not map_back_mode:
unnorm_x = self._deprocess_x(x, normalization_ratios)
unnorm_unperturbed = self._deprocess_x(original, normalization_ratios)
if i == 0 and check_init_correctness:
unnorm_unperturbed = self._deprocess_x(original, normalization_ratios)
prediction_unperturbed_local = self._get_label(model.predict(np.array([original]))[0])
prediction_unperturbed_remote = predict(unnorm_unperturbed, self._remote_model)[0]
if prediction_unperturbed_local == prediction_unperturbed_remote == "AD":
print("Sanity check passed!")
else:
print("Local:", prediction_unperturbed_local)
print("Remote:", prediction_unperturbed_remote)
print("Sanity check failed!")
return False
if should_enforce_policy:
logits, is_adversarial = a.forward_one(x)
if logging.getLogger().isEnabledFor(logging.DEBUG):
if targeted:
ce = crossentropy(a.original_class, logits)
logging.debug('crossentropy to {} is {}'.format(
a.original_class, ce))
ce = crossentropy(class_, logits)
logging.debug('crossentropy to {} is {}'.format(class_, ce))
# Use X before mapping back as the starting point for next iteration
# as there might be feature perturbations that we cannot incoperate in
# feature space. That is, we should only "descend" by changing "perturbable"
# features in a "legitimate" fashion
if remote_model:
if map_back_mode:
prediction_remote_cent = predict(unnorm_x_cent, self._remote_model)[0]
else:
prediction_remote = predict(unnorm_x, self._remote_model)[0]
prediction_original = predict(unnorm_unperturbed, self._remote_model)[0]
if map_back_mode:
prediction_local_cent = self._get_label(model.predict(np.array([x_cent]))[0])
else:
prediction_local = self._get_label(model.predict(np.array([x]))[0])
if not only_post_process:
if map_back_mode:
retrain_cnt_cent = 0
print("Remote (cent): %s / local (cent): %s" %
(prediction_remote_cent, prediction_local_cent))
while prediction_remote_cent != prediction_local_cent and retrain_cnt_cent < 10:
retrain_cnt_cent += 1
self._retrain_local_model(model, x_cent, LABLE[prediction_remote_cent])
prediction_local = self._get_label(model.predict(np.array([x_cent]))[0])
print("(cent) iter #%d, has retrained %d time(s)" % (i, retrain_cnt_cent))
else:
retrain_cnt = 0
print("Remote: %s / local: %s" % (prediction_remote, prediction_local))
while prediction_remote != prediction_local and retrain_cnt < 10:
retrain_cnt += 1
self._retrain_local_model(model, x, LABLE[prediction_remote])
prediction_local = self._get_label(model.predict(np.array([x]))[0])
print("iter #%d, has retrained %d time(s)" % (i, retrain_cnt))
if map_back_mode:
if prediction_original == "AD" and prediction_remote_cent == "NONAD":
success_cent = True
if success_cent:
msg = "SUCCESS, iter_%d, %s, %s, %s, %s, %s, %s" % (i, original_domain, final_domain, str(mapping_diff), url_id, original_url, url)
print(msg)
logger.info(msg)
cmd = "cp %s %s" % (
self.BASE_HTML_DIR + '/' + original_domain + '.html',
self.BASE_EVAL_HTML_DIR + '/original_' + original_domain + '.html'
)
os.system(cmd)
cmd = "cp %s %s" % (
self.BASE_HTML_DIR + '/modified_' + original_domain + '.html',
self.BASE_EVAL_HTML_DIR + '/' + original_domain + '_' + url_id + '.html'
)
os.system(cmd)
return True
x = x_before_mapping_back
else:
if prediction_original == "AD" and prediction_remote == "NONAD":
msg = "SUCCESS, iter_%d, %s, %s, %s" % (i, domain, url_id, str(diff))
print(msg)
logger.info(msg)
return True
if draw:
plt.plot(x_axis, y_l2, linewidth=3)
plt.plot(x_axis, y_lf, linewidth=3)
plt.show()
msg = "FAIL, iter_%d, %s, %s, %s, %s, %s, %s" % (i, original_domain, final_domain, str(mapping_diff), url_id, original_url, url)
print(msg)
logger.info(msg)
return False
class LinfinityGradientMixin(object):
def _gradient(self, a, x, class_, strict=True):
gradient = a.gradient_one(x, class_, strict=strict)
gradient = np.sign(gradient)
min_, max_ = a.bounds()
gradient = (max_ - min_) * gradient
return gradient
class L1GradientMixin(object):
def _gradient(self, a, x, class_, strict=True):
gradient = a.gradient_one(x, class_, strict=strict)
# using mean to | |
= other + va
assert isinstance(result2, Other)
assert_equal(tuple(result2),
(self.Vec2(1,0), self.Vec2(4,1), self.Vec2(6,1)))
def test_add_vector_to_array(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)]) + self.Vec2(-2,3)
assert isinstance(va, self.Vec2Array), repr(va)
assert_equal(tuple(va),
(self.Vec2(-2,4), self.Vec2(0,6), self.Vec2(1,7)))
va = va + (2,-1)
assert_equal(tuple(va),
(self.Vec2(0,3), self.Vec2(2,5), self.Vec2(3,6)))
@raises(ValueError)
def test_add_arrays_of_different_length(self):
self.Vec2Array([(0,0), (0,1)]) + self.Vec2Array([(1,2)])
@raises(TypeError)
def test_add_incompatible(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) + [1, 2, 3]
def test_iadd_arrays(self):
va = a = self.Vec2Array([(1,2), (3,4)])
va += self.Vec2Array([(-1,-1), (1,-2)])
assert va is a
assert_equal(tuple(va), (self.Vec2(0,1), self.Vec2(4,2)))
def test_iadd_array_to_other(self):
class Other(self.Seq2):
pass
va = a = self.Vec2Array([(0,1), (2,3), (3,4)])
va += Other([(1,-1), (2,-2), (3,-3)])
assert va is a
assert_equal(tuple(va),
(self.Vec2(1,0), self.Vec2(4,1), self.Vec2(6,1)))
def test_iadd_vector_to_array(self):
va = a = self.Vec2Array([(0,1), (2,3), (3,4)])
va += self.Vec2(-2,3)
assert va is a
assert_equal(tuple(va),
(self.Vec2(-2,4), self.Vec2(0,6), self.Vec2(1,7)))
va += (-1,1)
assert va is a
assert_equal(tuple(va),
(self.Vec2(-3,5), self.Vec2(-1,7), self.Vec2(0,8)))
@raises(ValueError)
def test_iadd_arrays_of_different_length(self):
va = self.Vec2Array([(0,0), (0,1)])
va += self.Vec2Array([(1,2), (2,2), (3,2)])
@raises(TypeError)
def test_iadd_incompatible(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va += None
def test_sub_arrays(self):
va1 = self.Vec2Array([(1,2), (3,4)])
va2 = self.Vec2Array([(-1,-1), (1,-2)])
va3 = va1 - va2
assert va3 is not va1
assert va3 is not va2
assert_equal(tuple(va3), (self.Vec2(2,3), self.Vec2(2,6)))
def test_sub_array_from_other(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(0,1), (2,3), (3,4)])
other = Other([(1,-1), (2,-2), (3,-3)])
result2 = other - va
assert isinstance(result2, Other)
assert_equal(tuple(result2),
(self.Vec2(1,-2), self.Vec2(0,-5), self.Vec2(0,-7)))
@raises(TypeError)
def test_sub_other_from_array(self):
class Other(self.Seq2):
pass
self.Vec2Array([(0,1), (2,3), (3,4)]) - Other([(1,-1), (2,-2), (3,-3)])
def test_sub_vector_from_array(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)]) - self.Vec2(-2,3)
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(2,-2), self.Vec2(4,0), self.Vec2(5,1)))
va = va - (2,-1)
assert_equal(tuple(va),
(self.Vec2(0,-1), self.Vec2(2,1), self.Vec2(3,2)))
@raises(TypeError)
def test_sub_array_from_vector(self):
self.Vec2(9, 2) - self.Vec2Array([(0,1), (2,3)])
@raises(ValueError)
def test_sub_arrays_of_different_length(self):
self.Vec2Array([(0,0), (0,1)]) - self.Vec2Array([(1,2)])
@raises(ValueError)
def test_rsub_arrays_of_different_length(self):
class Other(self.Seq2):
pass
Other([(0,0), (0,1)]) - self.Vec2Array([(1,2)])
@raises(TypeError)
def test_sub_incompatible(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) - [1, 2, 3]
@raises(TypeError)
def test_rsub_incompatible(self):
None - self.Vec2Array([(0,1), (2,3), (3,4)])
def test_isub_arrays(self):
va = a = self.Vec2Array([(1,2), (3,4)])
va -= self.Vec2Array([(-1,-1), (1,-2)])
assert va is a
assert_equal(tuple(va), (self.Vec2(2,3), self.Vec2(2,6)))
@raises(TypeError)
def test_isub_other_from_array(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va -= Other([(1,-1), (2,-2), (3,-3)])
def test_isub_vector_from_array(self):
va = a = self.Vec2Array([(0,1), (2,3), (3,4)])
va -= self.Vec2(-2,3)
assert va is a
assert_equal(tuple(va),
(self.Vec2(2,-2), self.Vec2(4,0), self.Vec2(5,1)))
va -= (-1,1)
assert va is a
assert_equal(tuple(va),
(self.Vec2(3,-3), self.Vec2(5,-1), self.Vec2(6,0)))
@raises(ValueError)
def test_isub_arrays_of_different_length(self):
va = self.Vec2Array([(0,0), (0,1)])
va -= self.Vec2Array([(1,2), (2,2), (3,2)])
@raises(TypeError)
def test_isub_incompatible(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va -= None
def test_mul_arrays(self):
va1 = self.Vec2Array([(1,2), (3,4)])
va2 = self.Vec2Array([(-1,-1), (1,-2)])
va3 = va1 * va2
assert va3 is not va1
assert va3 is not va2
assert_equal(tuple(va3), (self.Vec2(-1,-2), self.Vec2(3,-8)))
def test_mul_array_with_other(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(0,1), (2,3), (3,4)])
other = Other([(1,-1), (2,-2), (3,-3)])
result = va * other
assert result is not other
assert isinstance(result, Other)
assert_equal(tuple(result),
(self.Vec2(0,-1), self.Vec2(4,-6), self.Vec2(9,-12)))
result2 = other * va
assert isinstance(result2, Other)
assert_equal(tuple(result2),
(self.Vec2(0,-1), self.Vec2(4,-6), self.Vec2(9,-12)))
def test_mul_vector_with_array(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)]) * self.Vec2(-2,3)
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(0,3), self.Vec2(-4,9), self.Vec2(-6,12)))
va = self.Vec2(-2,3) * self.Vec2Array([(0,1), (2,3), (3,4)])
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(0,3), self.Vec2(-4,9), self.Vec2(-6,12)))
# XXX doesn't work in py3
# va = va * (2,-1)
# assert_equal(tuple(va),
# (self.Vec2(0,-3), self.Vec2(-8,-9), self.Vec2(-12,-12)))
def test_mul_vector_with_scalar(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)]) * 4
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(0,4), self.Vec2(8,12), self.Vec2(12, 16)))
va = 4 * self.Vec2Array([(0,1), (2,3), (3,4)])
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(0,4), self.Vec2(8,12), self.Vec2(12, 16)))
@raises(ValueError)
def test_mul_arrays_of_different_length(self):
self.Vec2Array([(0,0), (0,1)]) * self.Vec2Array([(1,2)])
@raises(TypeError)
def test_mul_incompatible(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) * [1, 2, 3]
def test_imul_arrays(self):
va = a = self.Vec2Array([(1,2), (3,4)])
va *= self.Vec2Array([(-1,-1), (1,-2)])
assert va is a
assert_equal(tuple(va), (self.Vec2(-1,-2), self.Vec2(3,-8)))
@raises(TypeError)
def test_imul_array_with_other(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va *= Other([(1,-1), (2,-2), (3,-3)])
def test_imul_vector_with_array(self):
va = a = self.Vec2Array([(0,1), (2,3), (3,4)])
va *= self.Vec2(-2,3)
assert va is a
assert_equal(tuple(va),
(self.Vec2(0,3), self.Vec2(-4,9), self.Vec2(-6,12)))
def test_imul_vector_with_scalar(self):
va = a = self.Vec2Array([(0,1), (2,3), (3,4)])
va *= 4
assert va is a
assert_equal(tuple(va),
(self.Vec2(0,4), self.Vec2(8,12), self.Vec2(12, 16)))
@raises(ValueError)
def test_imul_arrays_of_different_length(self):
va = self.Vec2Array([(0,0), (0,1)])
va *= self.Vec2Array([(1,2)])
@raises(TypeError)
def test_imul_incompatible(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va *= [1, 2, 3]
def test_truediv_arrays(self):
va1 = self.Vec2Array([(1,2), (3,5)])
va2 = self.Vec2Array([(-1,-1), (1,-2)])
va3 = va1 / va2
assert va3 is not va1
assert va3 is not va2
assert_equal(tuple(va3), (self.Vec2(-1,-2), self.Vec2(3,-2.5)))
def test_truediv_other_by_array(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(2,1), (2,3), (3,4)])
other = Other([(1,-1), (2,-2), (3,-3)])
result = other / va
assert isinstance(result, Other)
assert_equal(tuple(result),
(self.Vec2(0.5,-1), self.Vec2(1,-2/3), self.Vec2(1,-0.75)))
def test_truediv_array_by_vector(self):
va = self.Vec2Array([(2,1), (1,3), (3,4)]) / self.Vec2(-2,3)
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(-1,1/3), self.Vec2(-0.5,1), self.Vec2(-1.5,4/3)))
@raises(TypeError)
def test_truediv_vector_by_array(self):
self.Vec2(-2,3) / self.Vec2Array([(0,1), (2,3), (3,4)])
def test_truediv_vector_with_scalar(self):
va = self.Vec2Array([(0,1), (5,3), (3,4)]) / 4
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(0,0.25), self.Vec2(1.25,0.75), self.Vec2(0.75, 1)))
@raises(ValueError)
def test_rtruediv_arrays_of_different_length(self):
class Other(self.Seq2):
pass
Other([(1,2)]) / self.Vec2Array([(0,0), (0,1)])
@raises(ValueError)
def test_truediv_arrays_of_different_length(self):
self.Vec2Array([(0,0), (0,1)]) / self.Vec2Array([(1,2)])
@raises(TypeError)
def test_truediv_incompatible(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) / [1, 2, 3]
@raises(ZeroDivisionError)
def test_truediv_by_zero_scalar(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) / 0
@raises(ZeroDivisionError)
def test_truediv_by_zero_vector(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) / self.Vec2(-1,0)
@raises(ZeroDivisionError)
def test_truediv_by_zero_vector_array(self):
self.Vec2Array([(0,1), (2,3)]) / self.Vec2Array([(1,1), (0,3)])
def test_itruediv_arrays(self):
va = a = self.Vec2Array([(1,2), (3,4)])
va /= self.Vec2Array([(-1,-5), (1,-3)])
assert va is a
assert_equal(tuple(va), (self.Vec2(-1,-2/5), self.Vec2(3,-4/3)))
def test_itruediv_other_by_array(self):
class Other(self.Seq2):
pass
other = Other([(1,-1), (2,-2), (3,-3)])
other /= self.Vec2Array([(3,1), (2,3), (3,4)])
assert isinstance(other, Other)
assert_equal(tuple(other),
(self.Vec2(1/3, -1), self.Vec2(1,-2/3), self.Vec2(1,-0.75)))
@raises(TypeError)
def test_itruediv_array_by_other(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va /= Other([(1,-1), (2,-2), (3,-3)])
def test_itruediv_array_by_vector(self):
va = a = self.Vec2Array([(0,1), (2,3), (3,4)])
va /= self.Vec2(-2,3)
assert va is a
assert_equal(tuple(va),
(self.Vec2(0,1/3), self.Vec2(-1,1), self.Vec2(-1.5,4/3)))
def test_itruediv_vector_with_scalar(self):
va = a = self.Vec2Array([(0,1), (2,3), (3,4)])
va /= 4
assert va is a
assert_equal(tuple(va),
(self.Vec2(0,0.25), self.Vec2(0.5,0.75), self.Vec2(0.75, 1)))
@raises(ValueError)
def test_itruediv_arrays_of_different_length(self):
va = self.Vec2Array([(0,0), (0,1)])
va /= self.Vec2Array([(1,2)])
@raises(TypeError)
def test_itruediv_incompatible(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va /= [1, 2, 3]
@raises(ZeroDivisionError)
def test_itruediv_by_zero_scalar(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va /= 0
@raises(ZeroDivisionError)
def test_itruediv_by_zero_vector(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va /= self.Vec2(-1,0)
@raises(ZeroDivisionError)
def test_itruediv_by_zero_vector_array(self):
va = self.Vec2Array([(0,1), (2,3)])
va /= self.Vec2Array([(1,1), (0,3)])
def test_floordiv_arrays(self):
va1 = self.Vec2Array([(1,2), (3,5)])
va2 = self.Vec2Array([(-1,-1), (1,-2)])
va3 = va1 // va2
assert va3 is not va1
assert va3 is not va2
assert_equal(tuple(va3), (self.Vec2(-1,-2), self.Vec2(3,-3)))
def test_floordiv_other_by_array(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(2,1), (2,3), (3,4)])
other = Other([(1,-1), (2,-2), (3,-3)])
result = other // va
assert isinstance(result, Other)
assert_equal(tuple(result),
(self.Vec2(0,-1), self.Vec2(1,-1), self.Vec2(1,-1)))
def test_floordiv_array_by_vector(self):
va = self.Vec2Array([(2,1), (1,3), (3,4)]) // self.Vec2(-2,3)
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(-1,0), self.Vec2(-1,1), self.Vec2(-2,1)))
@raises(TypeError)
def test_floordiv_vector_by_array(self):
self.Vec2(-2,3) // self.Vec2Array([(0,1), (2,3), (3,4)])
def test_floordiv_vector_with_scalar(self):
va = self.Vec2Array([(0,1), (5,3), (3,4)]) // 4
assert isinstance(va, self.Vec2Array)
assert_equal(tuple(va),
(self.Vec2(0,0), self.Vec2(1,0), self.Vec2(0, 1)))
@raises(ValueError)
def test_floordiv_arrays_of_different_length(self):
self.Vec2Array([(0,0), (0,1)]) // self.Vec2Array([(1,2)])
@raises(ValueError)
def test_rfloordiv_arrays_of_different_length(self):
class Other(self.Seq2):
pass
Other([(1,2)]) // self.Vec2Array([(0,0), (0,1)])
@raises(TypeError)
def test_floordiv_incompatible(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) // [1, 2, 3]
@raises(ZeroDivisionError)
def test_floordiv_by_zero_scalar(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) // 0
@raises(ZeroDivisionError)
def test_floordiv_by_zero_vector(self):
self.Vec2Array([(0,1), (2,3), (3,4)]) // self.Vec2(-1,0)
@raises(ZeroDivisionError)
def test_floordiv_by_zero_vector_array(self):
self.Vec2Array([(0,1), (2,3)]) // self.Vec2Array([(1,1), (0,3)])
def test_ifloordiv_arrays(self):
va = a = self.Vec2Array([(1,2), (3,4)])
va //= self.Vec2Array([(-1,-5), (1,-3)])
assert va is a
assert_equal(tuple(va), (self.Vec2(-1,-1), self.Vec2(3,-2)))
def test_ifloordiv_other_by_array(self):
class Other(self.Seq2):
pass
other = Other([(1,-1), (2,-2), (3,-9)])
other //= self.Vec2Array([(3,1), (2,3), (3,4)])
assert isinstance(other, Other)
assert_equal(tuple(other),
(self.Vec2(0, -1), self.Vec2(1,-1), self.Vec2(1,-3)))
@raises(TypeError)
def test_ifloordiv_array_by_other(self):
class Other(self.Seq2):
pass
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va //= Other([(1,-1), (2,-2), (3,-3)])
def test_ifloordiv_array_by_vector(self):
va = a = self.Vec2Array([(0,1), (2,3), (3,7)])
va //= self.Vec2(-2,3)
assert va is a
assert_equal(tuple(va),
(self.Vec2(0,0), self.Vec2(-1,1), self.Vec2(-2,2)))
def test_ifloordiv_vector_with_scalar(self):
va = a = self.Vec2Array([(0,1), (2,4), (3,9)])
va //= 4
assert va is a
assert_equal(tuple(va),
(self.Vec2(0,0), self.Vec2(0,1), self.Vec2(0,2)))
@raises(ValueError)
def test_ifloordiv_arrays_of_different_length(self):
va = self.Vec2Array([(0,0), (0,1)])
va //= self.Vec2Array([(1,2)])
@raises(TypeError)
def test_ifloordiv_incompatible(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va //= [1, 2, 3]
@raises(ZeroDivisionError)
def test_ifloordiv_by_zero_scalar(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va //= 0
@raises(ZeroDivisionError)
def test_ifloordiv_by_zero_vector(self):
va = self.Vec2Array([(0,1), (2,3), (3,4)])
va //= self.Vec2(-1,0)
@raises(ZeroDivisionError)
def test_ifloordiv_by_zero_vector_array(self):
va = self.Vec2Array([(0,1), (2,3)]) | |
<filename>scripts/PT_WebExtractor_v2_0/extras/Merge_Inventories/Merge_PT_to_XL.py
import os
import sys
import argparse
import glob
import traceback
import codecs
import fileinput
import datetime
import time
import dateutil.parser as parser
import csv
import collections
import re
from StringIO import StringIO
home_folder = os.path.abspath(os.path.join(__file__, "..\\..\\.."))
scripts_folder = os.path.join(os.sep, home_folder, 'scripts')
sys.path.append(scripts_folder)
from common import shared
from common import spreadsheet as sh
def copy_csv(csv_f, outcsv_file):
row_lst = []
# Grab each line in the CSV file
csv_lines = csv_f.readlines()
total_lines = len(csv_lines)
for idx, line in enumerate(csv_lines[1:]):
msg = "Copying %s of %s lines" % (idx + 1, len(csv_lines[1:]))
shared.print_oneliner(msg)
#print line.encode(sys.stdout.encoding, errors='replace')
items = line.split(',')
csv_bname = os.path.basename(csv_f.name).replace("_results.csv", "")
out_items = ['"%s"' % csv_bname] + items
out_line = ','.join(out_items)
out_line = shared.filter_unicode(out_line)
outcsv_file.write(out_line)
# Store the title and description in a list
row_lst.append()
print
def get_csv(csv_f, in_header=None):
# Grab each line in the CSV file
csv_lines = csv_f.readlines()
row_lst = []
#print csv_lines[0].replace(u'\ufeff', '')
#print "\nHeader: %s" % header
if in_header is None:
in_header = shared.get_header(csv_lines)
#print "\nHeader: %s" % header
for idx, line in enumerate(csv_lines[1:]):
msg = "Extracting %s of %s lines" % (idx + 1, len(csv_lines[1:]))
shared.print_oneliner(msg)
#print "header: %s" % header
# Get the source based on the file
source = os.path.basename(csv_f.name).replace("_results.csv", "")
dict_header = in_header[:]
if 'Source' not in in_header:
line = '"%s",%s' % (source, line)
#print "line: %s" % line
# Add the 'Source' to the header
dict_header.insert(0, 'Source')
#print "header: %s" % header
# Convert the line to a dictionary
#print "line: %s" % line
row_dict = row_to_dict(line, dict_header)
if row_dict is None: continue
if row_dict['Source'] == '':
row_dict = source
# Store the title and description in a list
row_lst.append(row_dict)
print
return row_lst
def row_to_dict(row, header):
#print header
#answer = raw_input("Press enter...")
# Convert each row in the row list to a CSV reader object
try:
filtered_row = shared.filter_unicode(row, french=True)
filtered_row = filtered_row.replace('\r', ' ')
#csv_reader = sh.UnicodeReader(StringIO(filtered_row))
entry_lst = shared.parse_csv_row(filtered_row)
#entry_lst = [item for item in csv_reader]
except Exception, e:
# If an error occurs, filter the row to remove unicode characters
filtered_row = shared.filter_unicode(row, french=True)
#csv_reader = csv.reader(StringIO(shared.filter_unicode(filtered_row)))
entry_lst = shared.parse_csv_row(filtered_row)
if len(entry_lst) == 0:
print "\n"
print "\nWARNING: The current row is empty."
answer = raw_input("Press enter...")
return None
if not len(entry_lst) == len(header):
print "\n"
print
print "Header: %s" % header
print
print "Number of columns: %s" % len(header)
print
print "Input Row: %s" % row
print
print "Filtered Row: %s" % filtered_row
print
print "Entry List: %s" % entry_lst
print
print len(entry_lst)
print "\nWARNING: The current row does not contain the correct " \
"number of columns."
answer = raw_input("Press enter...")
# Convert entry to dictionary
entry_dict = collections.OrderedDict()
for idx, h in enumerate(header):
entry_dict[h] = entry_lst[idx]
return entry_dict
def run(juris):
''' Merges all the CSV files in a province/territory to a single CSV file
:param juris: The province/territory to merge.
'''
work_folder = shared.get_home_folder() + '\\results\\%s' % juris.replace(" ", "_")
os.system("title Merging PT XLs - %s" % juris)
#csv_header = ['Title', 'Description', 'Type', 'Start Date', 'Recent Date',
# 'Update Frequency', 'Publisher',
# 'Licensing', 'Available Formats', 'Access', 'Download',
# 'Web Page URL', 'Web Map URL', 'Data URL',
# 'Spatial Reference', 'Service', 'Service Name', 'Service URL',
# 'Metadata URL', 'Metadata Type', 'Notes']
csv_header = sh.get_header_info()['csv']
# Get a list of the CSV files
csv_files = glob.glob(work_folder + "\\*.csv")
# Find the location of the 'err_log.csv' file and move it to the end of the file list
err_log = [(idx, fn) for idx, fn in enumerate(csv_files) if fn.find('err_log') > -1]
if len(err_log) > 0:
csv_files.append(csv_files.pop(err_log[0][0]))
csv_count = len(csv_files)
items_lst = []
# Go through each CSV file
row_lst = []
for idx, csv_fn in enumerate(csv_files[:csv_count - 2]):
print "\nCopying '%s':" % os.path.basename(csv_fn)
# Avoid a merged file if it exists
if csv_fn.find('merged') > -1: continue
if csv_fn.find('duplicate') > -1: continue
csv_f = codecs.open(csv_fn, encoding='utf-8', mode='r')
# Store the rows in a list
row_lst += get_csv(csv_f)
csv_f.close()
# Merge any duplicate records
dup_tuples = shared.find_duplicates(row_lst)
all_dups = [x for t in dup_tuples for x in t]
merged_rows = shared.merge_duplicates(row_lst, dup_tuples)
merged_indices = [v[0] for v in merged_rows]
# Go through each item in the row_lst, ignore duplicates indices
final_rows = []
for idx, row in enumerate(row_lst):
if idx in merged_indices:
# If the current index is contained in the merged_rows,
# make the output row the merged row
new_row = [v[1] for v in merged_rows if v[0] == idx][0]
final_rows.append(new_row)
else:
if idx not in all_dups:
# If the current index is not part of a duplicate,
# make the output row the original row
new_row = row
final_rows.append(new_row)
xl_header = sh.get_header_info()['xl']
outcsv_fn = "%s\\_%s_merged.xlsx" % (work_folder, juris.replace(" ", "_"))
pt_xl = sh.PT_XL(fn=outcsv_fn, header=xl_header, write_only=True,
replace_ws=True, ws_title='Merged Datasets')
for row in final_rows:
for k, v in row.items():
#print "%s: %s" % (k, v)
pt_xl.add_item(k, v)
pt_xl.write_row()
#answer = raw_input("Press enter...")
pt_xl.save_file()
def main():
try:
parser = argparse.ArgumentParser()
parser.add_argument("-j", "--jurisdiction", help="The province or territory to be extracted.",
metavar="Province or Territory")
args = parser.parse_args()
juris = args.jurisdiction
print "\n\n########################################################"\
"################################"
print
print " FGP P/T Web Extractor Merger version 1.1"
answer = ''
while not answer.lower() == 'quit' and \
not answer.lower() == 'exit':
answer = 'debug'
print
print "##########################################################"\
"##############################"
juris = shared.prompt_juris(juris)
if juris is None: continue
elif juris == 'help':
parser.print_help()
juris = None
continue
elif juris == 'exit':
print "\nExiting P/T Web Extractor Merger."
sys.exit(0)
run(juris)
print "\nMerge completed successfully."
# Reset parameters
juris = None
except Exception, err:
print 'ERROR: %s\n' % str(err)
print traceback.format_exc()
if __name__ == '__main__':
sys.exit(main())
# def merge_duplicates(row_lst, header):
# # Create the titles and descriptions to keep track of
# # what has already been seen
# # These lists will contain tuples with the current row index as
# # the first entry and the value as the second
# seen_lst = []
# dup_indices = []
# for idx, row in enumerate(row_lst):
# msg = "Checking for duplicates: %s of %s lines" % (idx + 1, len(row_lst))
# shared.print_oneliner(msg)
# row_dict = row_to_dict(row, header)
# #print entry_dict
# #answer = raw_input("Press enter...")
# title = row_dict['Title']
# desc = row_dict['Description']
# # Check for duplicates based on Title and Description
# for idx, s in enumerate(seen_lst):
# if title == s['Title'] and desc[50:] == s['Description'][50:]:
# #dup_idx = s['Index']
# # Merge the two duplicates together:
# # - Use the most recent date
# # - If one cell is blank but the other isn't, use the one
# # with the value
# # Determine the date
# date1 = s['Date']
# date2 = row_dict['Date']
# new_date = get_recent_date(date1, date2)
# row_dict['Date'] = '"%s"' % new_date
# # Go through each column and determine is the value is empty
# for h in header:
# # Skip the items already checked and updated
# if h == 'Title' or h == 'Description' or h == 'Date' or \
# h == 'Source':
# continue
# val1 = s[h]
# val2 = row_dict[h]
# #print "\n%s" % h
# #print "val1: %s" % val1
# #print "val2: %s" % val2
# # Check to see if the MapServer has a date
# ms_date = re.search('\d{4}\d{2}\d{2}', val1)
# if ms_date is not None:
# # Get the most recent MapServer dataset
# ms_date1 = re.search('\d{4}\d{2}\d{2}', val1)
# ms_date2 = re.search('\d{4}\d{2}\d{2}', val2)
# #print "ms_date1: %s" % ms_date1
# #print "ms_date2: %s" % ms_date2
# ms_newdate = get_recent_date(ms_date1.group(0),
# ms_date2.group(0),
# '%Y%m%d')
# if val1.find(ms_newdate) > -1:
# ms_newest = val1
# else:
# ms_newest = val2
# #print ms_newest
# #answer = raw_input("Press enter...")
# new_val = '"%s"' % val1
# if val1.strip('"').strip() == '':
# new_val = val2
# if val2.strip('"').strip() == '':
# new_val = val1
# #print "new_val: %s" % new_val
# row_dict[h] = new_val
# dup_indices.append(idx)
# #answer = raw_input("Press enter...")
# break
# # Add the current row dictionary to the seen list
# #entry_dict['Index'] = idx
# seen_lst.append(row_dict)
# print
# # FOR DEBUG ONLY:
# out_f = codecs.open('all_rows.txt', encoding='utf-8', mode='w')
# for s in seen_lst:
# row_lst = s.values()
# out_f.write("%s\n" % '\t'.join(row_lst))
# out_f.write('%s' % ", ".join([str(d) for d in dup_indices]))
# out_f.close()
# print dup_indices
# # Remove any duplicates from the seen list
# out_list = []
# for idx, row in enumerate(seen_lst):
# if idx not in dup_indices:
# out_list.append(row)
# print len(out_list)
# return out_list
# def remove_duplicates_old(csv_fn):
# # seen = set() # set for fast O(1) amortized lookup
# # for line in fileinput.FileInput(csv_fn, inplace=1):
# # # Split the line by ','
# # split_line = line.split(',')
# # # Remove first column from the line for duplicate check
# # check_line = ','.join(split_line[1:])
# # if check_line not in seen:
# # seen.add(check_line)
# # print line,
# print csv_fn
# # Open the CSV and add all lines to a list
# csv_f = codecs.open(csv_fn, encoding='utf-8', mode='rb')
# # Read the lines from the merged file
# lines = csv_f.readlines()
# # Get the header from the first line in the file
# header = lines[0].strip().split(',')
# # The output lines converted to dictionaries
# out_lines = []
# # Any lines that don't contain the same number
# # of columns as the header
# extra_lines = []
# # For Debugging:
# #tmp_lines = []
# indices = []
# # Go through each line to convert it to a dictionary
# f = codecs.open('err.txt', encoding='utf-8', mode='w')
# for idx, row in enumerate(lines[1:]):
# msg = "Checking for duplicates: %s of %s lines" % (idx + 1, len(lines[1:]))
# shared.print_oneliner(msg)
# try:
# row = shared.filter_unicode(row, french=True)
# csv_reader = sh.UnicodeReader(StringIO(row))
# row_lst = [item for item in csv_reader]
# except Exception, e:
# # If an error occurs, filter the row to remove unicode | |
REGEXP attached.
"""
self.__test_clean_regexp_good_h(self.STR, "1e.+")
def test_clean_regexp_int_good(self):
"""
Testing clean() on an int with a good REGEXP attached.
"""
# Note that this would be a pretty dumb regexp to put on an integer!
self.__test_clean_regexp_good_h(self.STR, "bar")
def test_clean_regexp_bool_good(self):
"""
Testing clean() on a Boolean with a good REGEXP attached.
"""
# Note that this would be a pretty dumb regexp to put on an integer!
self.__test_clean_regexp_good_h(self.STR, "T|F")
####
def __test_clean_regexp_bad_h(self, builtin_type, pattern):
"""
Helper to create bad REGEXP-constraint test cases.
"""
regexped_DT = Datatype(name="RegexpedDT", description="Datatype with bad REGEXP attached",
user=kive_user())
regexped_DT.full_clean()
regexped_DT.save()
regexped_DT.restricts.add(builtin_type)
regexp_constr = regexped_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP, rule="{}".format(pattern))
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexp_constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexped_DT.clean)
def test_clean_regexp_str_bad(self):
"""
Testing clean() on a string with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.STR, "(.+")
def test_clean_regexp_float_bad(self):
"""
Testing clean() on a float with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.FLOAT, "[a-z")
def test_clean_regexp_int_bad(self):
"""
Testing clean() on an int with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.INT, "1)")
def test_clean_regexp_bool_bad(self):
"""
Testing clean() on a Boolean with a bad REGEXP attached.
"""
self.__test_clean_regexp_bad_h(self.BOOL, "1919)")
####
def __test_clean_dtf_good_h(self, format_string):
"""
Helper for testing clean() on good DATETIMEFORMATs.
"""
dtf_DT = Datatype(name="GoodDTF", description="String with a DTF constraint attached",
user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(self.STR)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule=format_string)
self.assertEquals(dtf.clean(), None)
# Propagation check.
self.assertEquals(dtf_DT.clean(), None)
def test_clean_dtf_good(self):
"""
Testing clean() on a good DATETIMEFORMAT BasicConstraint.
"""
self.__test_clean_dtf_good_h("%Y %b %d")
def test_clean_dtf_good_2(self):
"""
Testing clean() on a second good DATETIMEFORMAT BasicConstraint.
"""
self.__test_clean_dtf_good_h("%A, %Y-%m-%d %H:%M:%S %z")
def test_clean_dtf_good_3(self):
"""
Testing clean() on a third good DATETIMEFORMAT BasicConstraint.
"""
self.__test_clean_dtf_good_h("FOOBAR")
def __test_clean_dtf_bad_h(self, builtin_type, format_string):
"""
Helper for testing clean() on DATETIMEFORMATs applied to non-strings.
"""
dtf_DT = Datatype(name="BadDTF", description="Non-string with a DTF constraint attached",
user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(builtin_type)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule=format_string)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, builtin_type)),
dtf.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, builtin_type)),
dtf_DT.clean)
def test_clean_dtf_float_bad(self):
"""
Testing clean() on a DATETIMEFORMAT applied to a float.
"""
self.__test_clean_dtf_bad_h(self.FLOAT, "%Y %b %d")
def test_clean_dtf_int_bad(self):
"""
Testing clean() on a DATETIMEFORMAT applied to an int.
"""
self.__test_clean_dtf_bad_h(self.INT, "FOOBAR")
def test_clean_dtf_bool_bad(self):
"""
Testing clean() on a DATETIMEFORMAT applied to a Boolean.
"""
self.__test_clean_dtf_bad_h(self.FLOAT, "2014-%m-%d %H:%M:%S %z")
########
def __test_clean_incomplete_parent_bad_h(self, BC_type, constr_val):
"""
Helper for clean() on a BasicConstraint attached to an incomplete Datatype.
"""
incomplete_DT = Datatype(name="IncompleteDT", description="Datatype that does not restrict any builtin",
user=kive_user())
incomplete_DT.full_clean()
incomplete_DT.save()
constr = incomplete_DT.basic_constraints.create(ruletype=BC_type, rule="{}".format(constr_val))
err_msg_key = "BC_DT_not_complete"
self.assertRaisesRegexp(ValidationError,
re.escape('Parent Datatype "{}" of BasicConstraint "{}" is not complete'
.format(incomplete_DT, constr)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('Parent Datatype "{}" of BasicConstraint "{}" is not complete'
.format(incomplete_DT, constr)),
incomplete_DT.clean)
def test_clean_incomplete_parent_regexp_bad(self):
"""
Testing clean() on a REGEXP BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.REGEXP, ".*")
def test_clean_incomplete_parent_dtf_bad(self):
"""
Testing clean() on a DATETIMEFORMAT BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.DATETIMEFORMAT, "%Y %b %d")
def test_clean_incomplete_parent_min_val_bad(self):
"""
Testing clean() on a MIN_VAL BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MIN_VAL, 16)
def test_clean_incomplete_parent_max_val_bad(self):
"""
Testing clean() on a MAX_VAL BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MAX_VAL, 333333)
def test_clean_incomplete_parent_min_length_bad(self):
"""
Testing clean() on a MIN_LENGTH BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MIN_LENGTH, 2)
def test_clean_incomplete_parent_max_length_bad(self):
"""
Testing clean() on a MAX_LENGTH BasicConstraint attached to an incomplete Datatype.
"""
self.__test_clean_incomplete_parent_bad_h(BasicConstraint.MAX_LENGTH, 27)
########
# Some "greatest hits" from the above testing cases where the
# parent Datatype does not directly inherit from a builtin.
def test_clean_second_gen_min_val_int_good(self):
"""
Testing clean() on a well-defined MIN_VAL constraint on a second-generation integer.
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.INT)
constr_DT = Datatype(name="ConstrDT", description="Constrained Datatype", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MIN_VAL, rule="{}".format(-7.5))
self.assertEquals(constr.clean(), None)
# Propagation check
self.assertEquals(constr_DT.clean(), None)
def test_clean_second_gen_max_val_float_bad(self):
"""
Testing clean() on a badly-defined MAX_VAL constraint (second-gen float).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.FLOAT)
constr_DT = Datatype(name="ConstrDT", description="Constrained Datatype", user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MAX_VAL, rule="foo")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on numeric value, '
'which is not a number'.format(constr, "foo")),
constr_DT.clean)
def test_clean_second_gen_min_length_bool_bad(self):
"""
Testing clean() on a badly-defined MIN_LENGTH constraint (second-gen Boolean).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.BOOL)
constr_DT = Datatype(name="BooleanWithLengthConstraint",
description="Incorrectly length-constrained Datatype",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MIN_LENGTH, rule="{}".format(12))
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on string length, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound on string length, '
'but its parent Datatype "{}" has builtin type {}'
.format(constr, constr_DT, self.BOOL)),
constr_DT.clean)
def test_clean_second_gen_str_max_length_bad(self):
"""
Testing clean() on a badly-defined (str) MIN_LENGTH constraint (second-gen Datatype).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.STR)
constr_DT = Datatype(name="NonIntegerLengthConstraint",
description="String with poorly-formed length constraint",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MAX_LENGTH, rule="bar")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not an integer'.format(constr, "bar")),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not an integer'.format(constr, "bar")),
constr_DT.clean)
def test_clean_second_gen_min_length_non_positive_edge(self):
"""
Testing clean() on an edge-case negative (0) MIN_LENGTH constraint (second-gen Datatype).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.STR)
constr_DT = Datatype(name="TooSmallLengthConstraint",
description="String with too-small length constraint",
user=kive_user())
constr_DT.full_clean()
constr_DT.save()
constr_DT.restricts.add(parent_DT)
constr = constr_DT.basic_constraints.create(ruletype=BasicConstraint.MIN_LENGTH, rule="{}".format(0))
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not positive'.format(constr, 0)),
constr.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a bound of "{}" on string length, '
'which is not positive'.format(constr, 0)),
constr_DT.clean)
def test_clean_second_gen_regexp_good(self):
"""
Testing clean() on a second-gen Datatype with good REGEXP attached.
"""
mother_DT = Datatype(name="Mother", description="Mother", user=kive_user())
mother_DT.full_clean()
mother_DT.save()
mother_DT.restricts.add(self.STR)
father_DT = Datatype(name="Father", description="Father", user=kive_user())
father_DT.full_clean()
father_DT.save()
father_DT.restricts.add(self.STR)
milkman_DT = Datatype(name="Milkman", description="Milkman", user=kive_user())
milkman_DT.full_clean()
milkman_DT.save()
milkman_DT.restricts.add(self.STR)
regexped_DT = Datatype(name="RegexpedDT",
description="Datatype with good REGEXP attached",
user=kive_user())
regexped_DT.full_clean()
regexped_DT.save()
regexped_DT.restricts.add(mother_DT)
regexped_DT.restricts.add(father_DT)
regexped_DT.restricts.add(milkman_DT)
regexp_constr = regexped_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule="foo")
self.assertEquals(regexp_constr.clean(), None)
# Propagation check.
self.assertEquals(regexped_DT.clean(), None)
def test_clean_second_gen_regexp_bad(self):
"""
Testing clean() on a second-gen Datatype with a bad REGEXP constraint.
"""
Danny_DT = Datatype(name="<NAME>", description="Ostensible father", user=kive_user())
# Danny_DT.full_house()
Danny_DT.full_clean()
Danny_DT.save()
Danny_DT.restricts.add(self.INT)
Joey_DT = Datatype(name="<NAME>", description="Popeye imitator", user=kive_user())
Joey_DT.full_clean()
Joey_DT.save()
Joey_DT.restricts.add(self.INT)
Jesse_DT = Datatype(name="<NAME>", description="Mercy-haver", user=kive_user())
Jesse_DT.full_clean()
Jesse_DT.save()
Jesse_DT.restricts.add(self.INT)
# The bad regexp pattern.
pattern = "(.+"
regexped_DT = Datatype(name="RegexpedDT",
description="Datatype with bad REGEXP attached",
user=kive_user())
regexped_DT.full_clean()
regexped_DT.save()
regexped_DT.restricts.add(Danny_DT)
regexped_DT.restricts.add(Joey_DT)
regexped_DT.restricts.add(Jesse_DT)
regexp_constr = regexped_DT.basic_constraints.create(ruletype=BasicConstraint.REGEXP,
rule=pattern)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexp_constr.clean)
# Propagation check.
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies an invalid regular expression "{}"'
.format(regexp_constr, pattern)),
regexped_DT.clean)
def test_clean_second_gen_dtf_good(self):
"""
Testing clean() on a good DATETIMEFORMAT (second-gen Datatype).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.STR)
dtf_DT = Datatype(name="GoodDTF", description="String with a DTF constraint attached", user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(parent_DT)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y %b %d")
self.assertEquals(dtf.clean(), None)
# Propagation check.
self.assertEquals(dtf_DT.clean(), None)
def test_clean_second_gen_dtf_bad_h(self):
"""
Testing clean() on a DATETIMEFORMATs applied to a float (second-gen).
"""
parent_DT = Datatype(name="Middleman DT", description="Middleman DT", user=kive_user())
parent_DT.full_clean()
parent_DT.save()
parent_DT.restricts.add(self.FLOAT)
dtf_DT = Datatype(name="BadDTF", description="Float with a DTF constraint attached", user=kive_user())
dtf_DT.full_clean()
dtf_DT.save()
dtf_DT.restricts.add(parent_DT)
dtf = dtf_DT.basic_constraints.create(ruletype=BasicConstraint.DATETIMEFORMAT,
rule="%Y %b %d")
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, self.FLOAT)),
dtf.clean)
self.assertRaisesRegexp(ValidationError,
re.escape('BasicConstraint "{}" specifies a date/time format, but its parent Datatype '
'"{}" has builtin type "{}"'.format(dtf, dtf_DT, self.FLOAT)),
dtf_DT.clean)
class BasicConstraintGetEffectiveNumConstraintTests(BasicConstraintTestSetup):
def test_get_effective_min_val_builtins(self):
"""
get_effective_num_constraint, when used to retrieve MIN_VAL restrictions,
should give (None, -float("Inf")) for all builtins.
"""
self.assertEquals(self.STR.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
self.assertEquals(self.INT.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
self.assertEquals(self.FLOAT.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
self.assertEquals(self.BOOL.get_effective_num_constraint(BasicConstraint.MIN_VAL), (None, -float("inf")))
def test_get_effective_max_val_builtins(self):
"""
get_effective_num_constraint, when used to retrieve MAX_VAL restrictions,
should give (None, float("Inf")) for all builtins.
"""
self.assertEquals(self.STR.get_effective_num_constraint(BasicConstraint.MAX_VAL), (None, | |
<gh_stars>0
# encoding=utf8
"""Particle swarm algorithm module."""
import numpy as np
from niapy.algorithms.algorithm import Algorithm
from niapy.util import full_array
from niapy.util.repair import reflect
__all__ = [
'ParticleSwarmAlgorithm',
'ParticleSwarmOptimization',
'CenterParticleSwarmOptimization',
'MutatedParticleSwarmOptimization',
'MutatedCenterParticleSwarmOptimization',
'ComprehensiveLearningParticleSwarmOptimizer',
'MutatedCenterUnifiedParticleSwarmOptimization',
'OppositionVelocityClampingParticleSwarmOptimization'
]
class ParticleSwarmAlgorithm(Algorithm):
r"""Implementation of Particle Swarm Optimization algorithm.
Algorithm:
Particle Swarm Optimization algorithm
Date:
2018
Authors:
<NAME>, <NAME>, <NAME>. and <NAME>
License:
MIT
Reference paper:
<NAME>. and <NAME>. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.
Attributes:
Name (List[str]): List of strings representing algorithm names
c1 (float): Cognitive component.
c2 (float): Social component.
w (Union[float, numpy.ndarray[float]]): Inertial weight.
min_velocity (Union[float, numpy.ndarray[float]]): Minimal velocity.
max_velocity (Union[float, numpy.ndarray[float]]): Maximal velocity.
repair (Callable[[numpy.ndarray, numpy.ndarray, numpy.ndarray, Optional[numpy.random.Generator]], numpy.ndarray]): Repair method for velocity.
See Also:
* :class:`niapy.algorithms.Algorithm`
"""
Name = ['WeightedVelocityClampingParticleSwarmAlgorithm', 'WVCPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""<NAME>. and <NAME>. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995."""
def __init__(self, population_size=25, c1=2.0, c2=2.0, w=0.7, min_velocity=-1.5, max_velocity=1.5, repair=reflect,
*args, **kwargs):
"""Initialize ParticleSwarmAlgorithm.
Args:
population_size (int): Population size
c1 (float): Cognitive component.
c2 (float): Social component.
w (Union[float, numpy.ndarray]): Inertial weight.
min_velocity (Union[float, numpy.ndarray]): Minimal velocity.
max_velocity (Union[float, numpy.ndarray]): Maximal velocity.
repair (Callable[[np.ndarray, np.ndarray, np.ndarray, dict], np.ndarray]): Repair method for velocity.
See Also:
* :func:`niapy.algorithms.Algorithm.__init__`
"""
super().__init__(population_size, *args, **kwargs)
self.c1 = c1
self.c2 = c2
self.w = w
self.min_velocity = min_velocity
self.max_velocity = max_velocity
self.repair = repair
def set_parameters(self, population_size=25, c1=2.0, c2=2.0, w=0.7, min_velocity=-1.5, max_velocity=1.5,
repair=reflect, **kwargs):
r"""Set Particle Swarm Algorithm main parameters.
Args:
population_size (int): Population size
c1 (float): Cognitive component.
c2 (float): Social component.
w (Union[float, numpy.ndarray]): Inertial weight.
min_velocity (Union[float, numpy.ndarray]): Minimal velocity.
max_velocity (Union[float, numpy.ndarray]): Maximal velocity.
repair (Callable[[np.ndarray, np.ndarray, np.ndarray, dict], np.ndarray]): Repair method for velocity.
See Also:
* :func:`niapy.algorithms.Algorithm.set_parameters`
"""
super().set_parameters(population_size=population_size, **kwargs)
self.c1 = c1
self.c2 = c2
self.w = w
self.min_velocity = min_velocity
self.max_velocity = max_velocity
self.repair = repair
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.Algorithm.get_parameters`
"""
d = super().get_parameters()
d.update({
'c1': self.c1,
'c2': self.c2,
'w': self.w,
'min_velocity': self.min_velocity,
'max_velocity': self.max_velocity
})
return d
def init(self, task):
r"""Initialize dynamic arguments of Particle Swarm Optimization algorithm.
Args:
task (Task): Optimization task.
Returns:
Dict[str, Union[float, numpy.ndarray]]:
* w (numpy.ndarray): Inertial weight.
* min_velocity (numpy.ndarray): Minimal velocity.
* max_velocity (numpy.ndarray): Maximal velocity.
* v (numpy.ndarray): Initial velocity of particle.
"""
return {
'w': full_array(self.w, task.dimension),
'min_velocity': full_array(self.min_velocity, task.dimension),
'max_velocity': full_array(self.max_velocity, task.dimension),
'v': np.zeros((self.population_size, task.dimension))
}
def init_population(self, task):
r"""Initialize population and dynamic arguments of the Particle Swarm Optimization algorithm.
Args:
task: Optimization task.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, list, dict]:
1. Initial population.
2. Initial population fitness/function values.
3. Additional arguments.
4. Additional keyword arguments:
* personal_best (numpy.ndarray): particles best population.
* personal_best_fitness (numpy.ndarray[float]): particles best positions function/fitness value.
* w (numpy.ndarray): Inertial weight.
* min_velocity (numpy.ndarray): Minimal velocity.
* max_velocity (numpy.ndarray): Maximal velocity.
* v (numpy.ndarray): Initial velocity of particle.
See Also:
* :func:`niapy.algorithms.Algorithm.init_population`
"""
pop, fpop, d = super().init_population(task)
d.update(self.init(task))
d.update({'personal_best': pop.copy(), 'personal_best_fitness': fpop.copy()})
return pop, fpop, d
def update_velocity(self, v, p, pb, gb, w, min_velocity, max_velocity, task, **kwargs):
r"""Update particle velocity.
Args:
v (numpy.ndarray): Current velocity of particle.
p (numpy.ndarray): Current position of particle.
pb (numpy.ndarray): Personal best position of particle.
gb (numpy.ndarray): Global best position of particle.
w (Union[float, numpy.ndarray]): Weights for velocity adjustment.
min_velocity (numpy.ndarray): Minimal velocity allowed.
max_velocity (numpy.ndarray): Maximal velocity allowed.
task (Task): Optimization task.
kwargs: Additional arguments.
Returns:
numpy.ndarray: Updated velocity of particle.
"""
return self.repair(
w * v + self.c1 * self.random(task.dimension) * (pb - p) + self.c2 * self.random(task.dimension) * (gb - p),
min_velocity, max_velocity)
def run_iteration(self, task, pop, fpop, xb, fxb, **params):
r"""Core function of Particle Swarm Optimization algorithm.
Args:
task (Task): Optimization task.
pop (numpy.ndarray): Current populations.
fpop (numpy.ndarray): Current population fitness/function values.
xb (numpy.ndarray): Current best particle.
fxb (float): Current best particle fitness/function value.
params (dict): Additional function keyword arguments.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, dict]:
1. New population.
2. New population fitness/function values.
3. New global best position.
4. New global best positions function/fitness value.
5. Additional arguments.
6. Additional keyword arguments:
* personal_best (numpy.ndarray): Particles best population.
* personal_best_fitness (numpy.ndarray[float]): Particles best positions function/fitness value.
* w (numpy.ndarray): Inertial weight.
* min_velocity (numpy.ndarray): Minimal velocity.
* max_velocity (numpy.ndarray): Maximal velocity.
* v (numpy.ndarray): Initial velocity of particle.
See Also:
* :class:`niapy.algorithms.algorithm.Algorithm.run_iteration`
"""
personal_best = params.pop('personal_best')
personal_best_fitness = params.pop('personal_best_fitness')
w = params.pop('w')
min_velocity = params.pop('min_velocity')
max_velocity = params.pop('max_velocity')
v = params.pop('v')
for i in range(len(pop)):
v[i] = self.update_velocity(v[i], pop[i], personal_best[i], xb, w, min_velocity, max_velocity, task)
pop[i] = task.repair(pop[i] + v[i], rng=self.rng)
fpop[i] = task.eval(pop[i])
if fpop[i] < personal_best_fitness[i]:
personal_best[i], personal_best_fitness[i] = pop[i].copy(), fpop[i]
if fpop[i] < fxb:
xb, fxb = pop[i].copy(), fpop[i]
return pop, fpop, xb, fxb, {'personal_best': personal_best, 'personal_best_fitness': personal_best_fitness,
'w': w, 'min_velocity': min_velocity, 'max_velocity': max_velocity, 'v': v}
class ParticleSwarmOptimization(ParticleSwarmAlgorithm):
r"""Implementation of Particle Swarm Optimization algorithm.
Algorithm:
Particle Swarm Optimization algorithm
Date:
2018
Authors:
<NAME>, <NAME>, <NAME>. and <NAME>
License:
MIT
Reference paper:
Kennedy, J. and <NAME>. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.
Attributes:
Name (List[str]): List of strings representing algorithm names
See Also:
* :class:`niapy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm`
"""
Name = ['ParticleSwarmAlgorithm', 'PSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""<NAME>. and <NAME>. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995."""
def __init__(self, *args, **kwargs):
"""Initialize ParticleSwarmOptimization."""
super().__init__(*args, **kwargs)
self.w = 1.0
self.min_velocity = -np.inf
self.max_velocity = np.inf
def set_parameters(self, **kwargs):
r"""Set core parameters of algorithm.
See Also:
* :func:`niapy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm.set_parameters`
"""
kwargs.pop('w', None), kwargs.pop('vMin', None), kwargs.pop('vMax', None)
super().set_parameters(w=1, min_velocity=-np.inf, max_velocity=np.inf, **kwargs)
class OppositionVelocityClampingParticleSwarmOptimization(ParticleSwarmAlgorithm):
r"""Implementation of Opposition-Based Particle Swarm Optimization with Velocity Clamping.
Algorithm:
Opposition-Based Particle Swarm Optimization with Velocity Clamping
Date:
2019
Authors:
<NAME>
License:
MIT
Reference paper:
<NAME>, et al. "Opposition-based particle swarm optimization with velocity clamping (OVCPSO)." Advances in Computational Intelligence. Springer, Berlin, Heidelberg, 2009. 339-348
Attributes:
p0: Probability of opposite learning phase.
w_min: Minimum inertial weight.
w_max: Maximum inertial weight.
sigma: Velocity scaling factor.
See Also:
* :class:`niapy.algorithms.basic.ParticleSwarmAlgorithm`
"""
Name = ['OppositionVelocityClampingParticleSwarmOptimization', 'OVCPSO']
@staticmethod
def info():
r"""Get basic information of algorithm.
Returns:
str: Basic information of algorithm.
See Also:
* :func:`niapy.algorithms.Algorithm.info`
"""
return r"""<NAME>, et al. "Opposition-based particle swarm optimization with velocity clamping (OVCPSO)." Advances in Computational Intelligence. Springer, Berlin, Heidelberg, 2009. 339-348"""
def __init__(self, p0=.3, w_min=.4, w_max=.9, sigma=.1, c1=1.49612, c2=1.49612, *args, **kwargs):
"""Initialize OppositionVelocityClampingParticleSwarmOptimization.
Args:
p0 (float): Probability of running Opposite learning.
w_min (numpy.ndarray): Minimal value of weights.
w_max (numpy.ndarray): Maximum value of weights.
sigma (numpy.ndarray): Velocity range factor.
c1 (float): Cognitive component.
c2 (float): Social component.
See Also:
* :func:`niapy.algorithm.basic.ParticleSwarmAlgorithm.__init__`
"""
kwargs.pop('w', None)
super().__init__(w=w_max, c1=c1, c2=c2, *args, **kwargs)
self.p0 = p0
self.w_min = w_min
self.w_max = w_max
self.sigma = sigma
def set_parameters(self, p0=.3, w_min=.4, w_max=.9, sigma=.1, c1=1.49612, c2=1.49612, **kwargs):
r"""Set core algorithm parameters.
Args:
p0 (float): Probability of running Opposite learning.
w_min (numpy.ndarray): Minimal value of weights.
w_max (numpy.ndarray): Maximum value of weights.
sigma (numpy.ndarray): Velocity range factor.
c1 (float): Cognitive component.
c2 (float): Social component.
See Also:
* :func:`niapy.algorithm.basic.ParticleSwarmAlgorithm.set_parameters`
"""
kwargs.pop('w', None)
super().set_parameters(w=w_max, c1=c1, c2=c2, **kwargs)
self.p0 = p0
self.w_min = w_min
self.w_max = w_max
self.sigma = sigma
def get_parameters(self):
r"""Get value of parameters for this instance of algorithm.
Returns:
Dict[str, Union[int, float, numpy.ndarray]]: Dictionary which has parameters mapped to values.
See Also:
* :func:`niapy.algorithms.basic.ParticleSwarmAlgorithm.get_parameters`
"""
d = ParticleSwarmAlgorithm.get_parameters(self)
d.pop('min_velocity', None), d.pop('max_velocity', None)
d.update({
'p0': self.p0, 'w_min': self.w_min, 'w_max': self.w_max, 'sigma': self.sigma
})
return d
@staticmethod
def opposite_learning(s_l, s_h, pop, fpop, task):
r"""Run opposite learning phase.
Args:
s_l (numpy.ndarray): lower limit of opposite particles.
s_h (numpy.ndarray): upper limit of opposite particles.
pop (numpy.ndarray): Current populations positions.
fpop (numpy.ndarray): Current populations functions/fitness values.
task (Task): | |
'session_token',
'uid',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
'payload':
(StringId,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_admin_user_uid_status_get_endpoint = _Endpoint(
settings={
'response_type': (UserStatus,),
'auth': [],
'endpoint_path': '/v1/admin/user/{uid}/status',
'operation_id': 'v1_admin_user_uid_status_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
],
'required': [
'session_token',
'uid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_admin_user_uid_status_update_post_endpoint = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/admin/user/{uid}/status/update',
'operation_id': 'v1_admin_user_uid_status_update_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
'payload',
],
'required': [
'session_token',
'uid',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
'payload':
(UserStatus,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_admin_user_user_id_suspension_update_put_endpoint = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/admin/user/{userId}/suspension/update',
'operation_id': 'v1_admin_user_user_id_suspension_update_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'session_token',
'user_id',
'payload',
],
'required': [
'session_token',
'user_id',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'user_id':
(int,),
'payload':
(UserSuspension,),
},
'attribute_map': {
'session_token': 'sessionToken',
'user_id': 'userId',
},
'location_map': {
'session_token': 'header',
'user_id': 'path',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_user_uid_follow_post_endpoint = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/user/{uid}/follow',
'operation_id': 'v1_user_uid_follow_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
'uid_list',
],
'required': [
'session_token',
'uid',
'uid_list',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
'uid_list':
(FollowersList,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
'uid_list': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v1_user_uid_followers_get_endpoint = _Endpoint(
settings={
'response_type': (FollowersListResponse,),
'auth': [],
'endpoint_path': '/v1/user/{uid}/followers',
'operation_id': 'v1_user_uid_followers_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
'limit',
'before',
'after',
],
'required': [
'session_token',
'uid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
'limit':
(int,),
'before':
(str,),
'after':
(str,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
'limit': 'limit',
'before': 'before',
'after': 'after',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
'limit': 'query',
'before': 'query',
'after': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_user_uid_following_get_endpoint = _Endpoint(
settings={
'response_type': (FollowingListResponse,),
'auth': [],
'endpoint_path': '/v1/user/{uid}/following',
'operation_id': 'v1_user_uid_following_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
'limit',
'before',
'after',
],
'required': [
'session_token',
'uid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
'limit':
(int,),
'before':
(str,),
'after':
(str,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
'limit': 'limit',
'before': 'before',
'after': 'after',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
'limit': 'query',
'before': 'query',
'after': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v1_user_uid_unfollow_post_endpoint = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/user/{uid}/unfollow',
'operation_id': 'v1_user_uid_unfollow_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
'uid_list',
],
'required': [
'session_token',
'uid',
'uid_list',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
'uid_list':
(FollowersList,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
'uid_list': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v2_admin_user_create_post_endpoint = _Endpoint(
settings={
'response_type': (V2UserDetail,),
'auth': [],
'endpoint_path': '/v2/admin/user/create',
'operation_id': 'v2_admin_user_create_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'payload',
],
'required': [
'session_token',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'payload':
(V2UserCreate,),
},
'attribute_map': {
'session_token': 'sessionToken',
},
'location_map': {
'session_token': 'header',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.v2_admin_user_list_get_endpoint = _Endpoint(
settings={
'response_type': (V2UserDetailList,),
'auth': [],
'endpoint_path': '/v2/admin/user/list',
'operation_id': 'v2_admin_user_list_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
'skip',
'limit',
],
'required': [
'session_token',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'skip':
(int,),
'limit':
(int,),
},
'attribute_map': {
'session_token': 'sessionToken',
'skip': 'skip',
'limit': 'limit',
},
'location_map': {
'session_token': 'header',
'skip': 'query',
'limit': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v2_admin_user_uid_get_endpoint = _Endpoint(
settings={
'response_type': (V2UserDetail,),
'auth': [],
'endpoint_path': '/v2/admin/user/{uid}',
'operation_id': 'v2_admin_user_uid_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
],
'required': [
'session_token',
'uid',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
},
'attribute_map': {
'session_token': 'sessionToken',
'uid': 'uid',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.v2_admin_user_uid_update_post_endpoint = _Endpoint(
settings={
'response_type': (V2UserDetail,),
'auth': [],
'endpoint_path': '/v2/admin/user/{uid}/update',
'operation_id': 'v2_admin_user_uid_update_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'uid',
'payload',
],
'required': [
'session_token',
'uid',
'payload',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'uid':
(int,),
'payload':
(V2UserAttributes,),
},
'attribute_map': {
'session_token': '<PASSWORD>',
'uid': 'uid',
},
'location_map': {
'session_token': 'header',
'uid': 'path',
'payload': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def v1_admin_system_roles_list_get(
self,
session_token,
**kwargs
):
"""Get a list of all roles available in the company (pod) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_admin_system_roles_list_get(session_token, async_req=True)
>>> result = thread.get()
Args:
session_token (str): Session authentication token.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
RoleDetailList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
| |
equivalences that go away because of cexs in cex_list"""
#### global f_name
#### abc('&r %s_gores.aig'%f_name)
#### for j in range(len(cex_list)):
#### cex_put(cex_list[j])
#### run_command('&resim') #put the jth cex into the cex space and use it to refine the equivs
#### abc('&w %s_gores.aig'%f_name)
#### return
####
#### def generate_srms():
#### """generates a synthesized reduced model (srms) from the gores file"""
#### global f_name, po_map
#### abc('&r %s_gores.aig; &srm -sf; r gsrms.aig; w %s_gsrms.aig'%(f_name,f_name))
#### print 'New srms = ',ps()
#### po_map = range(n_pos())
#### return 'OK'
####
#### def merge_proved_equivalences():
#### #this only changes the gores file.
#### run_command('&r %s_gores.aig; &equiv_mark -vf %s_gsrms.aig; &reduce -v; &w %s_gores.aig'%(f_name,f_name,f_name))
#### return
####
#### def generate_equivalences():
#### set_globals()
#### t = max(1,.5*G_T)
#### r = max(1,int(t))
#### cmd = "&get; &equiv2 -C %d -F 200 -T %f -S 1 -R %d"%((G_C),t,r)
#### abc(cmd)
#### #run_command('&ps')
#### eq_simulate(.5*t)
#### #run_command('&ps')
#### cmd = '&semi -W 63 -S 5 -C 500 -F 20 -T %d'%(.5*t)
#### abc(cmd)
#### #run_command('&ps')
#### run_command('&w %s_gores.aig'%f_name)
####
#### l=remove_const_v_pos() #makes sure no 0 pos to start
#### cex_list = []
#### n_pos_before = n_pos()
#### n_latches_before = n_latches()
###### print 'Generating equivalences'
#### generate_equivalences()
###### print 'Generating srms file'
#### generate_srms() #this should not create new 0 pos
###### if n_pos()>100:
###### removed
#### l=remove_const_v_pos()
#### n_pos_last = n_pos()
#### if n_pos_before == n_pos():
#### print 'No equivalences found. Quitting synculate'
#### return Undecided_no_reduction
#### print 'Initial synculation: ',ps()
###### ps()
#### set_globals()
#### simp_sw = init = True
#### simp_sw = False #temporary
#### print '\nIterating synculation refinement'
#### abc('w initial_sync.aig')
#### max_verify_time = t
#### print 'max_verify_time = %d'%max_verify_time
#### """
#### in the following loop we increase max_verify_time by twice time spent to find last cexs or Unsat's
#### We iterate only when we have proved cex + unsat > 1/2 n_pos. Then we update srms and repeat.
#### """
#### while True: # refinement loop
#### t = max_verify_time #this may have been increased since the last loop
###### print 'max_verify_time = %d'%max_verify_time
#### set_globals()
#### if not init:
#### generate_srms() #generates a new gsrms file and leaves it in workspace
###### print 'generate_srms done'
#### if n_pos() == n_pos_before:
#### break
#### if n_pos() == n_pos_last: #if nothing new, then quit if max_verification time is reached.
#### if t > max_verify_time:
#### break
#### if simp_sw: #Warning: If this holds then simplify could create some 0 pos
#### na = n_ands()
#### simplify()
#### while True:
#### npo = n_pos()
###### print 'npos = %d'%npo
#### merge_proved_equivalences() #So we need to merge them here. Can merging create more???
#### generate_srms()
#### if npo == n_pos():
#### break
#### if n_ands() > .7*na: #if not significant reduction, stop simplification
#### simp_sw = False #simplify only once.
#### if n_latches() == 0:
#### return check_sat()
#### n_pos_last = n_pos()
#### init = False # make it so that next time it is not the first time through
#### syn_par(t)
#### if (len(cex_list)+len(result)) == 0: #nothing happened aand ran out of time.
#### break
#### abc('w %s_gsrms.aig'%f_name)
#### #print 'No. of cexs after syn_parallel = %d'%len(cex_list)
#### merge_proved_equivalences() #changes the underlying gores file by merging fanouts of proved eqs
#### #print 'merge done'
#### refine_with_cexs() #changes the gores file by refining the equivalences in it using cex_list.
#### #print 'refine_with_cexs done'
#### continue
#### extract(0,n_pos_before) #get rid of unproved outputs
#### return
####
####def syn_par(t):
#### """prove n outputs at once and quit at first cex. Otherwise if no cex found return aig
#### with the unproved outputs"""
#### global trim_allowed,max_verify_time, n_pos_before
#### global cex_list, result
#### b_time = time.time()
#### n = n_pos()
#### if n == n_pos_before:
#### return
#### mx = n_pos()
#### if n_pos() - n_pos_before > 50:
#### mx = n_pos_before + 50
#### r = range(n_pos_before, mx)
#### N = max(1,(mx-n_pos_before)/2)
#### abc('w %s__ysavetemp.aig'%f_name)
#### F = [eval(FUNCS[18])] #create a timer function
#### #print r
#### for i in r:
#### F = F + [eval('(pyabc_split.defer(verify_only)(%d,%d))'%(i,t))]
#### cex_list = result = []
#### outcome = ''
#### #redirect printout here
###### with redirect.redirect( redirect.null_file, sys.stdout ):
###### with redirect.redirect( redirect.null_file, sys.stderr ):
#### for i,res in pyabc_split.abc_split_all(F):
#### status = get_status()
###### print i
#### if i == 0: #timed out
#### outcome = 'time expired after = %d'%(time.time() - b_time)
#### break
#### if status < Unsat:
#### cex_list = cex_list + [cex_get()]
#### if status == Unsat:
#### result = result + [r[i-1]]
#### if (len(result)+len(cex_list))>= N:
#### T = time.time() - b_time
#### if T > max_verify_time/2:
#### max_verify_time = 2*T
#### break
#### continue
#### if not outcome == '':
#### print outcome
###### print 'cex_list,prove_list = ',cex_list,result
#### abc('r %s__ysavetemp.aig'%f_name) #restore initial aig so that pos can be 0'ed out
#### if not result == []: # found some unsat's
###### min_r = min(result)
###### max_r = max(result)
###### no = n_pos()
###### assert (0 <= min_r and max_r < no), 'min_r, max_r, length = %d, %d, %d'%(min_r,max_r,len(result))
#### zero(result)
#### return
#### #print "Number PO's proved = %d"%len(result)
####
####def absec(n):
#### #abc('w t.aig')
#### for j in range(n):
#### print '\nFrame %d'%(j+1)
#### run_command('absec -F %d'%(j+1))
#### if is_unsat():
#### print 'UNSAT'
#### break
####
####
####"""
#### we might be proving some original pos as we go, and on the other hand we might have some equivalences that we
#### can't prove. There are two uses, in verification
#### verification - we want to remove the proved pos whether they are original or not. But if a cex for an original, then need to
#### remember this.
#### synthesis - the original outputs need to be kept and ignored in terms of cex's - supposedly they can't be proved.
####"""
####
####""" Experimental"""
####
####def csec():
#### global f_name
#### if os.path.exists('%s_part0.aig'%f_name):
#### os.remove('%s_part0.aig'%f_name)
#### run_command('demiter')
#### if not os.path.exists('%s_part0.aig'%f_name):
#### return
#### run_command('r %s_part0.aig'%f_name)
#### ps()
#### run_command('comb')
#### ps()
#### abc('w %s_part0comb.aig'%f_name)
#### run_command('r %s_part1.aig'%f_name)
#### ps()
#### run_command('comb')
#### ps()
#### abc('w %s_part1comb.aig'%f_name)
#### run_command('&get; &cec %s_part0comb.aig'%(f_name))
#### if is_sat():
#### return 'SAT'
#### if is_unsat():
#### return 'UNSAT'
#### else:
#### return 'UNDECIDED'
###########################
#### we will verify outputs ORed in groups of g[i]
#### here we take div = N so no ORing
## div = max(1,N/1)
## g = distribute(N,div)
## if len(g) <= 1:
## t = tt
## g.reverse()
#### print g
## x = 0
## G = []
## for i in range(div):
## y = x+g[i]
## F = F + [eval('(pyabc_split.defer(verify_range)(%d,%d,%s))'%(x,y,convert(t)))]
## G = G + [range(x,y)]
## x = y
#### print G
###########################
def get_lib():
run_command('read_genlib vsclib013.genlib')
def map_a_iter():
run_command('map -a')
mm = n_area()
abc('write_blif %s_min_map.blif'%f_name)
while True:
run_command('st; dch; map -a; ps')
if n_area() < mm:
mm = n_area()
abc('write_blif %s_min_map.blif'%f_name)
else:
break
abc('read_blif %s_min_map.blif'%f_name)
return n_area()
""" These commands map into luts and leave the result in mapped format. To return to aig format, you
have to do 'st'
"""
def sop_balance(k=4):
'''minimizes LUT logic levels '''
## kmax = k
kmax=min(k+2,15)
abc('st; if -K %d;ps'%kmax)
print nl(),
## for i in range(1):
## abc('st; if -K %d;ps'%kmax)
## run_command('ps')
kmax=min(k+2,15)
abc('st; if -g -C %d -K %d -F 2;ps'%(10,kmax)) #balance
print nl(),
for i in range(1):
abc('st;dch; if -C %d -K %d;ps'%(10,kmax))
print nl(),
def speedup(k=6):
run_command('&get;&st;&syn2;&st;&if -K %d -C 16 -g;&ps;&st;&synch2;&st;&if -K %d -C 16;&ps;&mfs;&ps;&put'%(k,k))
print nl()
#+++++++++++++++++ start SCC +++++++++++++++++++++++++++++++
def get_SCCs():
global fi,fo,all_seen
global n_init_pis, n_init_latches,n_init_pos
## fi = fi_sig(n_init_pis,n_init_pos)
## fo = fo_sig(fi)
check_fifo(fi,fo)
all_seen = set([]) #indicates it is already in an SCC
sccs = []
for i in range(n_pos()): # do each of the POs
if i in all_seen: # already in some scc
continue
if fo[i] == [] or fi[i] == []:
scci = set([-i]) # -i means scc singleton with no self loop
else:
tfoi = tfo(i) # list of nodes in transitive fanout of i
#excluding all_seen
## print len(tfoi)
tfii = tfi(i) # list of nodes in transitive fanin of i
#excluding all_seen
scci = tfoi&tfii
## print [len(tfii),len(tfoi),len(scci)],
if len(scci) == 1 and not i in fo[i]: #i is always in tfii and tfoi
scci = set([-i]) #case when no self loop on node i
sccs.append(list(scci)) # list of co's in same scc as co i
if len(scci) > 1:
print 'SCC size %d '%len(scci),
if scci == [-i]:
all_seen.add(i) # a singeton scc
else:
all_seen = all_seen | scci
return sccs
def fo_sig(fi=[]):
""" get fanout signals of each comb PO"""
global n_init_pis, n_init_latches,n_init_pos,lengths
if fi == []:
fis = fi_sig(n_init_pis,n_init_pos)
else:
fis = fi
fos = [set([]) for i in range(n_pos())]
for i in range(len(fos)):
for j in fis[i]:
if j > -1:
fos[j].add(i)
fos = [list(fos[j]) for j in xrange(len(fos))]
return fos
def fo_sig_pi(fi=[]):
""" get fanouts of the pis"""
global n_init_pis, n_init_latches,n_init_pos,lengths
if fi == []:
fis = fi_sig(n_init_pis,n_init_pos)
else:
fis = fi
fos = [set([]) for i in range(n_init_pis)]
for i in range(n_pos()):
fisi= [(abs(j)-1) for j in fis[i] if j < 0] #restrict to PIs
for j | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
main
====
Typical GUI screen, adapted from prior pyqt work
Attributes:
na
Todo:
* clean up docstrings (ideally to sphinx format, but to numpy/scipy
minimally)
Related projects:
Adapted from initial toy project https://github.com/sonlexqt/whack-a-mole
which is under MIT license
@author: DZLR3
"""
from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
import pickle
import sys
from os import listdir
from wam.game import GameManager
import pygame
from time import time
class QT_Basic(QtWidgets.QMainWindow):
def __init__(self, ui_file_loc, *args, **kwargs):
super().__init__(*args, **kwargs) # QT GUI
self.ui_file_loc = ui_file_loc
self.window = uic.loadUi(self.ui_file_loc, self)
self.tab_count = self.window.tabs.count()
# Default button hide conditions (lists so appendable)
self.next_btn_hide_tabs = [self.tab_count - 1]
self.back_btn_hide_tabs = [0]
def back_button_clicked(self):
"""
Dictates the actions for clicking the back button on a given screen
using the screen_fxn_dict dictionary that houses the screen dispay
functions
"""
self.window.tabs.setCurrentIndex(self.window.tabs.currentIndex() - 1)
self.window.next_btn.show()
self.window.back_btn.show()
if self.window.tabs.currentIndex() in self.back_btn_hide_tabs:
self.window.back_btn.hide()
def next_button_clicked(self):
"""
Dictates the actions for clicking the next button on a given screen
using the screen_fxn_dict dictionary that houses the screen dispay
functions
"""
self.window.tabs.setCurrentIndex(self.window.tabs.currentIndex() + 1)
self.window.next_btn.show()
self.window.back_btn.show()
if self.window.tabs.currentIndex() in self.next_btn_hide_tabs:
self.window.next_btn.hide()
def refresh_nav_buttons(self):
"""
Refreshs the navigation buttons upon tab clicks to ensure only the
relevant buttons are shown
"""
if self.window.tabs.currentIndex() in self.back_btn_hide_tabs:
self.window.next_btn.show()
self.window.back_btn.hide()
elif self.window.tabs.currentIndex() in self.next_btn_hide_tabs:
self.window.next_btn.hide()
self.window.back_btn.show()
else:
self.window.next_btn.show()
self.window.back_btn.show()
def set_combo_types(self, combobox, valid_list):
for item in valid_list:
combobox.addItem(item)
class QT_Config(QT_Basic):
def __init__(self, *args, **kwargs):
QT_Basic.__init__(self, 'ui\\QT_Config.ui', *args, **kwargs)
# Import the QT designer UI and name the window
self.setWindowTitle('BDM Whack-A-Mole')
self.file_config_loc = 'config\\Default.pkl'
self.save_dict = {}
# Connect the buttons and tabs to the relevant functions
self.window.back_btn.clicked.connect(self.back_button_clicked)
self.window.next_btn.clicked.connect(self.next_button_clicked)
self.window.save_btn.clicked.connect(self.save_button_clicked)
self.window.load_btn.clicked.connect(self.load_button_clicked)
self.window.tabs.currentChanged.connect(self.refresh_nav_buttons)
# Set the default visibility for the nav buttons and show the screen
self.window.back_btn.hide()
# Set the combo box valid values
self.set_combo_types(self.window.skill_type, ['Normal',
'lin_dist_skill',
'nonlin_dist_skill'])
self.set_combo_types(self.window.hit_type, ['Standard',
'Binomial'])
self.set_combo_types(self.window.stage_type, ['Standard',
'Attempts'])
self.set_combo_types(self.window.rand_type, ['uniform',
'normal'])
self.set_combo_types(self.window.drift_type, ['static',
'sin',
'linear',
'linear+sin',
'random'])
# Import the configuration
self.set_config_dict()
self.populate_main_game(self.master_dict['main_game'])
self.populate_scorer(self.master_dict['scorer'])
self.populate_hit_checker(self.master_dict['hit_checker'])
self.populate_margin_drifter(self.master_dict['margin_drifter'])
self.window.show()
def load_button_clicked(self):
self.file_config_loc = self.window.cond_set_file_path.text()
try:
self.set_config_dict()
self.populate_main_game(self.master_dict['main_game'])
except IOError:
print('File not found, make sure to use the format c:\\path\\..')
def set_config_dict(self):
self.master_dict = pickle.load(open(self.file_config_loc, 'rb'))
def populate_conditions_meta(self, cond_meta_dict):
# Screen Setup
self.window.cond_set_name.setText(str(cond_meta_dict['cond_set_name']))
self.window.cond_set_user.setText(str(cond_meta_dict['cond_set_user']))
self.window.cond_set_notes.setText(str(cond_meta_dict[
'cond_set_notes']))
def populate_main_game(self, main_game_dict):
# Screen Setup
self.window.screen_width.setText(str(main_game_dict['SCREEN_WIDTH']))
self.window.screen_height.setText(str(main_game_dict['SCREEN_HEIGHT']))
self.window.comm_bar_height.setText(str(main_game_dict[
'COMM_BAR_HEIGHT']))
self.window.twobytwo_len.setText(str(main_game_dict['TWO_X_TWO_LEN']))
self.window.twobytwo_x.setText(str(main_game_dict['TWO_X_TWO_LOC'][0]))
self.window.twobytwo_y.setText(str(main_game_dict['TWO_X_TWO_LOC'][1]))
self.window.font_size.setValue(main_game_dict['FONT_SIZE'])
# Animation Setup
self.window.fps.setValue(main_game_dict['FPS'])
self.window.mole_width.setValue(main_game_dict['MOLE_WIDTH'])
self.window.mole_height.setValue(main_game_dict['MOLE_HEIGHT'])
self.window.post_whack_interval.setValue(main_game_dict[
'post_whack_interval'])
self.window.mole_pause_interval.setValue(main_game_dict[
'mole_pause_interval'])
self.window.animation_interval.setValue(main_game_dict[
'animation_interval'])
self.window.mole_down_interval.setValue(main_game_dict[
'mole_down_interval'])
# Stage setup
self.window.stage_score_gap.setValue(main_game_dict['STAGE_SCORE_GAP'])
self.window.stages.setValue(main_game_dict['stages'])
self.window.stage_length.setValue(main_game_dict['stage_length'])
self.window.stage_type.setCurrentText(main_game_dict['stage_type'])
self.window.demo_2.setChecked(main_game_dict['demo'])
self.window.demo_len.setValue(main_game_dict['demo_len'])
self.window.update_delay.setValue(main_game_dict['update_delay'])
self.window.feedback_limit.setValue(main_game_dict['feedback_limit'])
self.window.feedback.setChecked(main_game_dict['FEEDBACK'])
self.window.stage_time_change.setChecked(main_game_dict[
'stage_time_change'])
# Mole hits and scoring
self.window.mole_radius.setValue(main_game_dict['MOLE_RADIUS'])
self.window.margin_start.setValue(main_game_dict['MARGIN_START'])
def populate_scorer(self, scorer_dict):
# Screen Setup
self.window.skill_type.setCurrentText(scorer_dict['skill_type'])
self.window.adjust.setChecked(scorer_dict['adjust'])
self.window.min_score.setValue(scorer_dict['min_score'])
self.window.max_score.setValue(scorer_dict['max_score'])
self.window.skill_luck_rat.setValue(scorer_dict['skill_luck_rat'])
self.window.rand_mean.setValue(scorer_dict['rand_mean'])
self.window.rand_sd.setValue(scorer_dict['rand_sd'])
self.window.rand_type.setCurrentText(scorer_dict['rand_type'])
def populate_hit_checker(self, hit_checker_dict):
# Screen Setup
self.window.hit_type.setCurrentText(hit_checker_dict['hit_type'])
self.window.diff_fact.setValue(hit_checker_dict['diff_fact'])
self.window.luck_mean.setText(str(hit_checker_dict['luck_mean']))
self.window.luck_sd.setValue(hit_checker_dict['luck_sd'])
self.window.luck_low_bnd.setText(str(hit_checker_dict['luck_low_bnd']))
self.window.luck_high_bnd.setText(
str(hit_checker_dict['luck_high_bnd']))
def populate_margin_drifter(self, margin_drifter_dict):
# Screen Setup
self.window.drift_type.setCurrentText(
margin_drifter_dict['drift_type'])
self.window.gradient.setText(str(margin_drifter_dict['gradient']))
self.window.amplitude.setValue(
margin_drifter_dict['amplitude'])
self.window.noise_truncated.setChecked(
margin_drifter_dict['noise_truncated'])
self.window.noise_mean.setText(str(margin_drifter_dict['noise_mean']))
self.window.noise_sd.setValue(margin_drifter_dict['noise_sd'])
self.window.noise_low_bnd.setText(
str(margin_drifter_dict['noise_low_bnd']))
self.window.noise_high_bnd.setText(
str(margin_drifter_dict['noise_high_bnd']))
self.window.always_pos.setChecked(margin_drifter_dict['always_pos'])
self.window.drift_clip.setChecked(margin_drifter_dict['drift_clip'])
self.window.clip_high_bnd.setText(
str(margin_drifter_dict['clip_high_bnd']))
self.window.clip_low_bnd.setText(
str(margin_drifter_dict['clip_low_bnd']))
@property
def save_check(self):
return True, 'No Errors'
def save_button_clicked(self):
self.fill_condition_meta_dict()
self.fill_main_game_dict()
self.fill_scorer_dict()
self.fill_hit_checker_dict()
self.fill_margin_drifter_dict()
can_save, errors = self.save_check
if can_save:
self.save()
else:
self.window.error_textbox.setText(errors)
def save(self):
file_path = (self.save_folder_path.text() + '\\' +
self.window.cond_set_name.text() +
'.pkl')
with open(file_path, 'wb') as f:
pickle.dump(self.save_dict, f)
self.window.error_textbox.setText('File - ' +
file_path +
' Saved Succesfully')
def fill_condition_meta_dict(self):
tmp = {'cond_set_name': (self.window.cond_set_name.text()),
'cond_set_user': (self.window.cond_set_user.text()),
'cond_set_notes': (self.window.cond_set_notes.toPlainText())}
self.save_dict['conditions_meta'] = tmp
def fill_main_game_dict(self):
tmp = { # Screen Setup
'SCREEN_WIDTH': int(self.window.screen_width.text()),
'SCREEN_HEIGHT': int(self.window.screen_height.text()),
'COMM_BAR_HEIGHT': int(self.window.comm_bar_height.text()),
'TWO_X_TWO_LEN': int(self.window.twobytwo_len.text()),
'TWO_X_TWO_LOC': (int(self.window.twobytwo_x.text()),
int(self.window.twobytwo_y.text())),
'FONT_SIZE': int(self.window.font_size.value()),
# Animation Setup
'FPS': self.window.fps.value(),
'MOLE_WIDTH': self.window.mole_width.value(),
'MOLE_HEIGHT': self.window.mole_height.value(),
'post_whack_interval': self.window.post_whack_interval.value(),
'mole_pause_interval': self.window.mole_pause_interval.value(),
'animation_interval': self.window.animation_interval.value(),
'mole_down_interval': self.window.mole_down_interval.value(),
# Stage setup
'STAGE_SCORE_GAP': self.window.stage_score_gap.value(),
'stages': self.window.stages.value(),
'stage_length': self.window.stage_length.value(),
'stage_type': self.window.stage_type.currentText(),
'demo': self.window.demo_2.isChecked(),
'demo_len': self.window.demo_len.value(),
'update_delay': self.window.update_delay.value(),
'feedback_limit': self.window.feedback_limit.value(),
'FEEDBACK': self.window.feedback.isChecked(),
'stage_time_change': self.window.stage_time_change.isChecked(),
# Mole hits and scoring
'MOLE_RADIUS': self.window.mole_radius.value(),
'MARGIN_START': self.window.margin_start.value()}
self.save_dict['main_game'] = tmp
def fill_scorer_dict(self):
tmp = {'skill_type': self.window.skill_type.currentText(),
'adjust': self.window.adjust.isChecked(),
'max_score': self.window.max_score.value(),
'min_score': self.window.min_score.value(),
'rand_type': self.window.rand_type.currentText(),
'rand_mean': float(self.window.rand_mean.text()),
'rand_sd': self.window.rand_sd.value(),
'skill_luck_rat': self.window.skill_luck_rat.value()
}
self.save_dict['scorer'] = tmp
def fill_hit_checker_dict(self):
tmp = {'hit_type': self.window.hit_type.currentText(),
'diff_fact': self.window.diff_fact.value(),
'luck_mean': float(self.window.luck_mean.text()),
'luck_sd': self.window.luck_sd.value(),
'luck_low_bnd': float(self.window.luck_low_bnd.text()),
'luck_high_bnd': float(self.window.luck_high_bnd.text())
}
self.save_dict['hit_checker'] = tmp
def fill_margin_drifter_dict(self):
tmp = {'drift_type': self.window.drift_type.currentText(),
'gradient': float(self.window.gradient.text()),
'amplitude': self.window.amplitude.value(),
'noise': self.window.noise.isChecked(),
'noise_mean': float(self.window.noise_mean.text()),
'noise_sd': self.window.noise_sd.value(),
'noise_truncated': self.window.noise_truncated.isChecked(),
'noise_high_bnd': float(self.window.noise_high_bnd.text()),
'noise_low_bnd': float(self.window.noise_low_bnd.text()),
'always_pos': self.window.always_pos.isChecked(),
'drift_clip': self.window.drift_clip.isChecked(),
'clip_high_bnd': float(self.window.clip_high_bnd.text()),
'clip_low_bnd': float(self.window.clip_low_bnd.text())
}
self.save_dict['margin_drifter'] = tmp
class QT_GUI(QT_Basic):
def __init__(self, *args, **kwargs):
QT_Basic.__init__(self, 'ui\\QT_Screen.ui', *args, **kwargs)
# File locations
self.intro_text_file_loc = 'text\\Introduction.txt'
self.disclaimer_text_file_loc = 'text\\Disclaimer.txt'
self.instruct_text_file_loc = 'text\\Instructions.txt'
self.debrief_text_file_loc = 'text\\Debrief.txt'
# Import the QT designer UI and name the window
self.window = uic.loadUi(self.ui_file_loc, self)
self.setWindowTitle('BDM Whack-A-Mole')
# Import images & set front screen image
self.pixmap_dict = {}
self.set_image_dict()
self.window.front_screen.setPixmap(self.pixmap_dict["Front_Screen"])
# Adjust the combobox content to support the valid values
self.set_combo_types(self.window.gender_combobox, ['',
'Prefer Not To Say',
'Female',
'Male',
'Other'])
self.set_combo_types(self.window.edu_combobox, ['',
'High School',
'Bachelors',
'Masters',
'PhD',
'Other'])
# Connect the buttons and tabs to the relevant functions
self.window.back_btn.clicked.connect(self.back_button_clicked)
self.window.next_btn.clicked.connect(self.next_button_clicked)
self.window.next_btn.clicked.connect(self.show_launch_check)
self.window.next_btn.clicked.connect(self.show_save_check)
self.window.launch_btn.clicked.connect(self.launch_button_clicked)
self.window.save_btn.clicked.connect(self.save_button_clicked)
self.window.tabs.currentChanged.connect(self.refresh_nav_buttons)
self.window.tabs.currentChanged.connect(self.check_disclaimer_nav)
# Open the 'database' table, set relevant file loc
self.csv_user_log_db = open('logs\\csv_user_log_db.csv', 'a')
# Import all the text from external sources (simplifies future changes)
# adjust with the appropriate text and fill the text boxes
# and set to read only to prevent user edits
self.get_set_text()
# Set the default visibility for the nav buttons and show the screen
self.launched = False
self.window.back_btn.hide()
self.window.launch_btn.hide()
self.window.save_btn.hide()
self.window.error_textbox.hide()
self.window.show()
def set_image_dict(self):
"""
Import and set the images for the urns into the pixmap_dict dictionary
for importing into the gui.
"""
files = listdir('images')
for file in files:
if file == 'Front_Screen.png':
pixmap = QPixmap('images\\' + file)
pixmap = pixmap.scaled(1001, 811,
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
self.pixmap_dict['Front_Screen'] = pixmap
else:
print('FYI - Non png file detected in image folder - ', file)
def set_cond_all(self):
"""
tbc
"""
pass
def check_disclaimer_nav(self):
"""
Ensures navigation cannot happen past the disclaimer screen unless
consent has been provided via the consent_checkbox
"""
if self.window.consent_checkbox.isChecked() is False:
if self.window.tabs.currentIndex() > 1:
self.window.tabs.setCurrentIndex(1)
self.window.launch_btn.hide()
self.window.back_btn.show()
self.window.next_btn.show()
self.refresh_nav_buttons()
else:
self.refresh_nav_buttons()
else:
self.refresh_nav_buttons()
def refresh_nav_buttons(self):
"""
Refreshs the navigation buttons upon tab clicks to ensure only the
relevant buttons are shown
"""
if self.window.tabs.currentIndex() == 0:
self.window.launch_btn.hide()
self.window.back_btn.hide()
elif self.window.tabs.currentIndex() == 3:
self.show_launch_check()
self.window.next_btn.hide()
self.window.back_btn.show()
else:
self.window.next_btn.show()
self.window.back_btn.show()
self.show_debrief_check()
self.window.launch_btn.hide()
def check_task_complete(self):
"""
Checks all activities, demographics etc have been submitted prior to
allowing the participant to save and exit. Should tasks not be
complete an error message will be supplied to the user detailing
the issue(s)
"""
complete = True
error_message = 'The following errors are preventing saving: '
# Check all the required attributes have been captured
if len(self.window.username_textbox.text()) > 0:
complete *= True
else:
complete *= False
error_message += 'username is blank, '
if self.window.consent_checkbox.isChecked():
complete *= True
else:
complete *= False
error_message += 'consent was not provided, '
if self.window.age_spinbox.value() > 17:
complete *= True
else:
complete *= False
error_message += 'must be an adult (18+) to participate, '
if str(self.window.edu_combobox.currentText()) != '':
complete *= True
else:
complete *= False
error_message += 'education level was not provided, '
print(self.window.edu_combobox.currentText())
if str(self.window.gender_combobox.currentText()) != '':
complete *= True
else:
complete *= False
error_message += 'gender was not provided, '
print(self.window.gender_combobox.currentText())
return (complete, error_message)
def get_save_details(self):
"""
Get the all the details from the experiment (incl. demographics and
consent), and cast them into a csv ready string, then return the
content as a list
"""
self.username = str(self.window.username_textbox.text())
self.consent = str(self.window.consent_checkbox.isChecked())
self.age = str(self.window.age_spinbox.value())
self.education = str(self.window.edu_combobox.currentText())
self.gender = str(self.window.gender_combobox.currentText())
save_details = [(self.username + ', ' +
self.consent + ', ' +
self.age + ', ' +
self.education + ', ' +
self.gender)]
return save_details
def show_launch_check(self):
"""
Check whether the save button should be shown, based upon the
completion of all the relevant criteria (consent, demographics, test)
"""
if (
self.window.consent_checkbox.isChecked() *
(self.launched is False) *
self.window.tabs.currentIndex() == 3
):
self.window.launch_btn.show()
else:
self.window.launch_btn.hide()
def show_save_check(self):
"""
Check whether the save button should be shown, based upon the
completion of all the relevant criteria (consent, demographics, test)
"""
if (
self.window.consent_checkbox.isChecked() *
self.launched *
self.window.tabs.currentIndex() == 4
):
self.window.save_btn.show()
else:
self.window.save_btn.hide()
def show_debrief_check(self):
"""
Check whether the save button should be shown, based upon the
completion of all the relevant | |
"Dragon Tooth+4",
854005: "Dragon Tooth+5",
855000: "Large Club",
855001: "Large Club+1",
855002: "Large Club+2",
855003: "Large Club+3",
855004: "Large Club+4",
855005: "Large Club+5",
855006: "Large Club+6",
855007: "Large Club+7",
855008: "Large Club+8",
855009: "Large Club+9",
855010: "Large Club+10",
855011: "Large Club+11",
855012: "Large Club+12",
855013: "Large Club+13",
855014: "Large Club+14",
855015: "Large Club+15",
855100: "Crystal Large Club",
855101: "Crystal Large Club+1",
855102: "Crystal Large Club+2",
855103: "Crystal Large Club+3",
855104: "Crystal Large Club+4",
855105: "Crystal Large Club+5",
855200: "Lightning Large Club",
855201: "Lightning Large Club+1",
855202: "Lightning Large Club+2",
855203: "Lightning Large Club+3",
855204: "Lightning Large Club+4",
855205: "Lightning Large Club+5",
855300: "Raw Large Club",
855301: "Raw Large Club+1",
855302: "Raw Large Club+2",
855303: "Raw Large Club+3",
855304: "Raw Large Club+4",
855305: "Raw Large Club+5",
855400: "Magic Large Club",
855401: "Magic Large Club+1",
855402: "Magic Large Club+2",
855403: "Magic Large Club+3",
855404: "Magic Large Club+4",
855405: "Magic Large Club+5",
855406: "Magic Large Club+6",
855407: "Magic Large Club+7",
855408: "Magic Large Club+8",
855409: "Magic Large Club+9",
855410: "Magic Large Club+10",
855500: "Enchanted Large Club",
855501: "Enchanted Large Club+1",
855502: "Enchanted Large Club+2",
855503: "Enchanted Large Club+3",
855504: "Enchanted Large Club+4",
855505: "Enchanted Large Club+5",
855600: "Divine Large Club",
855601: "Divine Large Club+1",
855602: "Divine Large Club+2",
855603: "Divine Large Club+3",
855604: "Divine Large Club+4",
855605: "Divine Large Club+5",
855606: "Divine Large Club+6",
855607: "Divine Large Club+7",
855608: "Divine Large Club+8",
855609: "Divine Large Club+9",
855610: "Divine Large Club+10",
855700: "Occult Large Club",
855701: "Occult Large Club+1",
855702: "Occult Large Club+2",
855703: "Occult Large Club+3",
855704: "Occult Large Club+4",
855705: "Occult Large Club+5",
855800: "Fire Large Club",
855801: "Fire Large Club+1",
855802: "Fire Large Club+2",
855803: "Fire Large Club+3",
855804: "Fire Large Club+4",
855805: "Fire Large Club+5",
855806: "Fire Large Club+6",
855807: "Fire Large Club+7",
855808: "Fire Large Club+8",
855809: "Fire Large Club+9",
855810: "Fire Large Club+10",
855900: "Chaos Large Club",
855901: "Chaos Large Club+1",
855902: "Chaos Large Club+2",
855903: "Chaos Large Club+3",
855904: "Chaos Large Club+4",
855905: "Chaos Large Club+5",
856000: "Smough's Hammer",
856001: "Smough's Hammer+1",
856002: "Smough's Hammer+2",
856003: "Smough's Hammer+3",
856004: "Smough's Hammer+4",
856005: "Smough's Hammer+5",
856100: "Smough's Hammer",
856101: "Smough's Hammer+1",
856102: "Smough's Hammer+2",
856103: "Smough's Hammer+3",
856104: "Smough's Hammer+4",
856105: "Smough's Hammer+5",
856200: "Smough's Hammer",
856201: "Smough's Hammer+1",
856202: "Smough's Hammer+2",
856203: "Smough's Hammer+3",
856204: "Smough's Hammer+4",
856205: "Smough's Hammer+5",
856300: "Smough's Hammer",
856301: "Smough's Hammer+1",
856302: "Smough's Hammer+2",
856303: "Smough's Hammer+3",
856304: "Smough's Hammer+4",
856305: "Smough's Hammer+5",
856400: "Smough's Hammer",
856401: "Smough's Hammer+1",
856402: "Smough's Hammer+2",
856403: "Smough's Hammer+3",
856404: "Smough's Hammer+4",
856405: "Smough's Hammer+5",
856500: "Smough's Hammer",
856501: "Smough's Hammer+1",
856502: "Smough's Hammer+2",
856503: "Smough's Hammer+3",
856504: "Smough's Hammer+4",
856505: "Smough's Hammer+5",
856600: "Smough's Hammer",
856601: "Smough's Hammer+1",
856602: "Smough's Hammer+2",
856603: "Smough's Hammer+3",
856604: "Smough's Hammer+4",
856605: "Smough's Hammer+5",
856700: "Smough's Hammer",
856701: "Smough's Hammer+1",
856702: "Smough's Hammer+2",
856703: "Smough's Hammer+3",
856704: "Smough's Hammer+4",
856705: "Smough's Hammer+5",
856800: "Smough's Hammer",
856801: "Smough's Hammer+1",
856802: "Smough's Hammer+2",
856803: "Smough's Hammer+3",
856804: "Smough's Hammer+4",
856805: "Smough's Hammer+5",
856900: "Smough's Hammer",
856901: "Smough's Hammer+1",
856902: "Smough's Hammer+2",
856903: "Smough's Hammer+3",
856904: "Smough's Hammer+4",
856905: "Smough's Hammer+5",
857000: "Smough's Hammer",
857001: "Smough's Hammer+1",
857002: "Smough's Hammer+2",
857003: "Smough's Hammer+3",
857004: "Smough's Hammer+4",
857005: "Smough's Hammer+5",
857100: "Smough's Hammer",
857101: "Smough's Hammer+1",
857102: "Smough's Hammer+2",
857103: "Smough's Hammer+3",
857104: "Smough's Hammer+4",
857105: "Smough's Hammer+5",
900000: "Fists",
901000: "Caestus",
901001: "Caestus+1",
901002: "Caestus+2",
901003: "Caestus+3",
901004: "Caestus+4",
901005: "Caestus+5",
901006: "Caestus+6",
901007: "Caestus+7",
901008: "Caestus+8",
901009: "Caestus+9",
901010: "Caestus+10",
901011: "Caestus+11",
901012: "Caestus+12",
901013: "Caestus+13",
901014: "Caestus+14",
901015: "Caestus+15",
901100: "Crystal Caestus",
901101: "Crystal Caestus+1",
901102: "Crystal Caestus+2",
901103: "Crystal Caestus+3",
901104: "Crystal Caestus+4",
901105: "Crystal Caestus+5",
901200: "Lightning Caestus",
901201: "Lightning Caestus+1",
901202: "Lightning Caestus+2",
901203: "Lightning Caestus+3",
901204: "Lightning Caestus+4",
901205: "Lightning Caestus+5",
901300: "Raw Caestus",
901301: "Raw Caestus+1",
901302: "Raw Caestus+2",
901303: "Raw Caestus+3",
901304: "Raw Caestus+4",
901305: "Raw Caestus+5",
901400: "Magic Caestus",
901401: "Magic Caestus+1",
901402: "Magic Caestus+2",
901403: "Magic Caestus+3",
901404: "Magic Caestus+4",
901405: "Magic Caestus+5",
901406: "Magic Caestus+6",
901407: "Magic Caestus+7",
901408: "Magic Caestus+8",
901409: "Magic Caestus+9",
901410: "Magic Caestus+10",
901500: "Enchanted Caestus",
901501: "Enchanted Caestus+1",
901502: "Enchanted Caestus+2",
901503: "Enchanted Caestus+3",
901504: "Enchanted Caestus+4",
901505: "Enchanted Caestus+5",
901600: "Divine Caestus",
901601: "Divine Caestus+1",
901602: "Divine Caestus+2",
901603: "Divine Caestus+3",
901604: "Divine Caestus+4",
901605: "Divine Caestus+5",
901606: "Divine Caestus+6",
901607: "Divine Caestus+7",
901608: "Divine Caestus+8",
901609: "Divine Caestus+9",
901610: "Divine Caestus+10",
901700: "Occult Caestus",
901701: "Occult Caestus+1",
901702: "Occult Caestus+2",
901703: "Occult Caestus+3",
901704: "Occult Caestus+4",
901705: "Occult Caestus+5",
901800: "Fire Caestus",
901801: "Fire Caestus+1",
901802: "Fire Caestus+2",
901803: "Fire Caestus+3",
901804: "Fire Caestus+4",
901805: "Fire Caestus+5",
901806: "Fire Caestus+6",
901807: "Fire Caestus+7",
901808: "Fire Caestus+8",
901809: "Fire Caestus+9",
901810: "Fire Caestus+10",
901900: "Chaos Caestus",
901901: "Chaos Caestus+1",
901902: "Chaos Caestus+2",
901903: "Chaos Caestus+3",
901904: "Chaos Caestus+4",
901905: "Chaos Caestus+5",
902000: "Claw",
902001: "Claw+1",
902002: "Claw+2",
902003: "Claw+3",
902004: "Claw+4",
902005: "Claw+5",
902006: "Claw+6",
902007: "Claw+7",
902008: "Claw+8",
902009: "Claw+9",
902010: "Claw+10",
902011: "Claw+11",
902012: "Claw+12",
902013: "Claw+13",
902014: "Claw+14",
902015: "Claw+15",
902100: "Crystal Claw",
902101: "Crystal Claw+1",
902102: "Crystal Claw+2",
902103: "Crystal Claw+3",
902104: "Crystal Claw+4",
902105: "Crystal Claw+5",
902200: "Lightning Claw",
902201: "Lightning Claw+1",
902202: "Lightning Claw+2",
902203: "Lightning Claw+3",
902204: "Lightning Claw+4",
902205: "Lightning Claw+5",
902300: "Raw Claw",
902301: "Raw Claw+1",
902302: "Raw Claw+2",
902303: "Raw Claw+3",
902304: "Raw Claw+4",
902305: "Raw Claw+5",
902400: "Magic Claw",
902401: "Magic Claw+1",
902402: "Magic Claw+2",
902403: "Magic Claw+3",
902404: "Magic Claw+4",
902405: "Magic Claw+5",
902406: "Magic Claw+6",
902407: "Magic Claw+7",
902408: "Magic Claw+8",
902409: "Magic Claw+9",
902410: "Magic Claw+10",
902500: "Enchanted Claw",
902501: "Enchanted Claw+1",
902502: "Enchanted Claw+2",
902503: "Enchanted Claw+3",
902504: "Enchanted Claw+4",
902505: "Enchanted Claw+5",
902600: "Divine Claw",
902601: "Divine Claw+1",
902602: "Divine Claw+2",
902603: "Divine Claw+3",
902604: "Divine Claw+4",
902605: "Divine Claw+5",
902606: "Divine Claw+6",
902607: "Divine Claw+7",
902608: "Divine Claw+8",
902609: "Divine Claw+9",
902610: "Divine Claw+10",
902700: "Occult Claw",
902701: "Occult Claw+1",
902702: "Occult Claw+2",
902703: "Occult Claw+3",
902704: | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Configuration file (powered by YACS)."""
import os
from pycls.core.io import pathmgr
from yacs.config import CfgNode
# Global config object (example usage: from core.config import cfg)
_C = CfgNode()
cfg = _C
# ---------------------------------- Model options ----------------------------------- #
_C.MODEL = CfgNode()
# Model type
_C.MODEL.TYPE = ""
# Number of weight layers
_C.MODEL.DEPTH = 0
# Number of classes
_C.MODEL.NUM_CLASSES = 10
# Loss function (see pycls/models/loss.py for options)
_C.MODEL.LOSS_FUN = "cross_entropy"
# Activation function (relu or silu/swish)
_C.MODEL.ACTIVATION_FUN = "relu"
# Perform activation inplace if implemented
_C.MODEL.ACTIVATION_INPLACE = True
# Model scaling parameters, see models/scaler.py (has no effect unless scaler is used)
_C.MODEL.SCALING_TYPE = ""
_C.MODEL.SCALING_FACTOR = 1.0
# ---------------------------------- ResNet options ---------------------------------- #
_C.RESNET = CfgNode()
# Transformation function (see pycls/models/resnet.py for options)
_C.RESNET.TRANS_FUN = "basic_transform"
# Number of groups to use (1 -> ResNet; > 1 -> ResNeXt)
_C.RESNET.NUM_GROUPS = 1
# Width of each group (64 -> ResNet; 4 -> ResNeXt)
_C.RESNET.WIDTH_PER_GROUP = 64
# Apply stride to 1x1 conv (True -> MSRA; False -> fb.torch)
_C.RESNET.STRIDE_1X1 = True
# ---------------------------------- AnyNet options ---------------------------------- #
_C.ANYNET = CfgNode()
# Stem type
_C.ANYNET.STEM_TYPE = "simple_stem_in"
# Stem width
_C.ANYNET.STEM_W = 32
# Block type
_C.ANYNET.BLOCK_TYPE = "res_bottleneck_block"
# Depth for each stage (number of blocks in the stage)
_C.ANYNET.DEPTHS = []
# Width for each stage (width of each block in the stage)
_C.ANYNET.WIDTHS = []
# Strides for each stage (applies to the first block of each stage)
_C.ANYNET.STRIDES = []
# Bottleneck multipliers for each stage (applies to bottleneck block)
_C.ANYNET.BOT_MULS = []
# Group widths for each stage (applies to bottleneck block)
_C.ANYNET.GROUP_WS = []
# Head width for first conv in head (if 0 conv is omitted, as is the default)
_C.ANYNET.HEAD_W = 0
# Whether SE is enabled for res_bottleneck_block
_C.ANYNET.SE_ON = False
# SE ratio
_C.ANYNET.SE_R = 0.25
# ---------------------------------- RegNet options ---------------------------------- #
_C.REGNET = CfgNode()
# Stem type
_C.REGNET.STEM_TYPE = "simple_stem_in"
# Stem width
_C.REGNET.STEM_W = 32
# Block type
_C.REGNET.BLOCK_TYPE = "res_bottleneck_block"
# Stride of each stage
_C.REGNET.STRIDE = 2
# Squeeze-and-Excitation (RegNetY)
_C.REGNET.SE_ON = False
_C.REGNET.SE_R = 0.25
# Depth
_C.REGNET.DEPTH = 10
# Initial width
_C.REGNET.W0 = 32
# Slope
_C.REGNET.WA = 5.0
# Quantization
_C.REGNET.WM = 2.5
# Group width
_C.REGNET.GROUP_W = 16
# Bottleneck multiplier (bm = 1 / b from the paper)
_C.REGNET.BOT_MUL = 1.0
# Head width for first conv in head (if 0 conv is omitted, as is the default)
_C.REGNET.HEAD_W = 0
# ------------------------------- EfficientNet options ------------------------------- #
_C.EN = CfgNode()
# Stem width
_C.EN.STEM_W = 32
# Depth for each stage (number of blocks in the stage)
_C.EN.DEPTHS = []
# Width for each stage (width of each block in the stage)
_C.EN.WIDTHS = []
# Expansion ratios for MBConv blocks in each stage
_C.EN.EXP_RATIOS = []
# Squeeze-and-Excitation (SE) ratio
_C.EN.SE_R = 0.25
# Strides for each stage (applies to the first block of each stage)
_C.EN.STRIDES = []
# Kernel sizes for each stage
_C.EN.KERNELS = []
# Head width
_C.EN.HEAD_W = 1280
# Drop connect ratio
_C.EN.DC_RATIO = 0.0
# Dropout ratio
_C.EN.DROPOUT_RATIO = 0.0
# ---------------------------- Vision Transformer options ---------------------------- #
_C.VIT = CfgNode()
# Patch Size (TRAIN.IM_SIZE must be divisible by PATCH_SIZE)
_C.VIT.PATCH_SIZE = 16
# Type of stem select from {'patchify', 'conv'}
_C.VIT.STEM_TYPE = "patchify"
# C-stem conv kernel sizes (https://arxiv.org/abs/2106.14881)
_C.VIT.C_STEM_KERNELS = []
# C-stem conv strides (the product of which must equal PATCH_SIZE)
_C.VIT.C_STEM_STRIDES = []
# C-stem conv output dims (last dim must equal HIDDEN_DIM)
_C.VIT.C_STEM_DIMS = []
# Number of layers in the encoder
_C.VIT.NUM_LAYERS = 12
# Number of self attention heads
_C.VIT.NUM_HEADS = 12
# Hidden dimension
_C.VIT.HIDDEN_DIM = 768
# Dimension of the MLP in the encoder
_C.VIT.MLP_DIM = 3072
# Type of classifier select from {'token', 'pooled'}
_C.VIT.CLASSIFIER_TYPE = "token"
# -------------------------------- Batch norm options -------------------------------- #
_C.BN = CfgNode()
# BN epsilon
_C.BN.EPS = 1e-5
# BN momentum (BN momentum in PyTorch = 1 - BN momentum in Caffe2)
_C.BN.MOM = 0.1
# Precise BN stats
_C.BN.USE_PRECISE_STATS = True
_C.BN.NUM_SAMPLES_PRECISE = 8192
# Initialize the gamma of the final BN of each block to zero
_C.BN.ZERO_INIT_FINAL_GAMMA = False
# Use a different weight decay for BN layers
_C.BN.USE_CUSTOM_WEIGHT_DECAY = False
_C.BN.CUSTOM_WEIGHT_DECAY = 0.0
# -------------------------------- Layer norm options -------------------------------- #
_C.LN = CfgNode()
# LN epsilon
_C.LN.EPS = 1e-5
# Use a different weight decay for LN layers
_C.LN.USE_CUSTOM_WEIGHT_DECAY = False
_C.LN.CUSTOM_WEIGHT_DECAY = 0.0
# -------------------------------- Optimizer options --------------------------------- #
_C.OPTIM = CfgNode()
# Type of optimizer select from {'sgd', 'adam', 'adamw'}
_C.OPTIM.OPTIMIZER = "sgd"
# Learning rate ranges from BASE_LR to MIN_LR*BASE_LR according to the LR_POLICY
_C.OPTIM.BASE_LR = 0.1
_C.OPTIM.MIN_LR = 0.0
# Learning rate policy select from {'cos', 'exp', 'lin', 'steps'}
_C.OPTIM.LR_POLICY = "cos"
# Steps for 'steps' policy (in epochs)
_C.OPTIM.STEPS = []
# Learning rate multiplier for 'steps' policy
_C.OPTIM.LR_MULT = 0.1
# Maximal number of epochs
_C.OPTIM.MAX_EPOCH = 200
# Momentum
_C.OPTIM.MOMENTUM = 0.9
# Momentum dampening
_C.OPTIM.DAMPENING = 0.0
# Nesterov momentum
_C.OPTIM.NESTEROV = True
# Betas (for Adam/AdamW optimizer)
_C.OPTIM.BETA1 = 0.9
_C.OPTIM.BETA2 = 0.999
# L2 regularization
_C.OPTIM.WEIGHT_DECAY = 5e-4
# Use a different weight decay for all biases (excluding those in BN/LN layers)
_C.OPTIM.BIAS_USE_CUSTOM_WEIGHT_DECAY = False
_C.OPTIM.BIAS_CUSTOM_WEIGHT_DECAY = 0.0
# Start the warm up from OPTIM.BASE_LR * OPTIM.WARMUP_FACTOR
_C.OPTIM.WARMUP_FACTOR = 0.1
# Gradually warm up the OPTIM.BASE_LR over this number of epochs
_C.OPTIM.WARMUP_EPOCHS = 0.0
# Exponential Moving Average (EMA) update value
_C.OPTIM.EMA_ALPHA = 1e-5
# Iteration frequency with which to update EMA weights
_C.OPTIM.EMA_UPDATE_PERIOD = 32
# WARM-UP Iter
_C.OPTIM.WARMUP_ITER = False
# --------------------------------- Training options --------------------------------- #
_C.TRAIN = CfgNode()
# Dataset and split
_C.TRAIN.DATASET = ""
_C.TRAIN.SPLIT = "train"
# Total mini-batch size
_C.TRAIN.BATCH_SIZE = 128
# Image size
_C.TRAIN.IM_SIZE = 224
# Resume training from the latest checkpoint in the output directory
_C.TRAIN.AUTO_RESUME = True
# Weights to start training from
_C.TRAIN.WEIGHTS = ""
# If True train using mixed precision
_C.TRAIN.MIXED_PRECISION = False
# Label smoothing value in 0 to 1 where (0 gives no smoothing)
_C.TRAIN.LABEL_SMOOTHING = 0.0
# Batch mixup regularization value in 0 to 1 (0 gives no mixup)
_C.TRAIN.MIXUP_ALPHA = 0.0
# Batch cutmix regularization value in 0 to 1 (0 gives no cutmix)
_C.TRAIN.CUTMIX_ALPHA = 0.0
# Standard deviation for AlexNet-style PCA jitter (0 gives no PCA jitter)
_C.TRAIN.PCA_STD = 0.1
# Data augmentation to use ("", "AutoAugment", "RandAugment_N2_M0.5", etc.)
_C.TRAIN.AUGMENT = ""
# --------------------------------- Testing options ---------------------------------- #
_C.TEST = CfgNode()
# Dataset and split
_C.TEST.DATASET = ""
_C.TEST.SPLIT = "val"
# Total mini-batch size
_C.TEST.BATCH_SIZE = 200
# Image size
_C.TEST.IM_SIZE = 256
# Weights to use for testing
_C.TEST.WEIGHTS = ""
# ------------------------------- Data loader options -------------------------------- #
_C.DATA_LOADER = CfgNode()
# Number of data loader workers per process
_C.DATA_LOADER.NUM_WORKERS = 8
# Load data to pinned host memory
_C.DATA_LOADER.PIN_MEMORY = True
# ---------------------------------- CUDNN options ----------------------------------- #
_C.CUDNN = CfgNode()
# Perform benchmarking to select fastest CUDNN algorithms (best for fixed input sizes)
_C.CUDNN.BENCHMARK = True
# ------------------------------- Precise time options ------------------------------- #
_C.PREC_TIME = CfgNode()
# Number of iterations to warm up the caches
_C.PREC_TIME.WARMUP_ITER = 3
# Number of iterations to compute avg time
_C.PREC_TIME.NUM_ITER = 30
# ---------------------------------- Launch options ---------------------------------- #
_C.LAUNCH = CfgNode()
# The launch mode, may be 'local' or 'slurm' (or 'submitit_local' for debugging)
# The 'local' mode uses a multi-GPU setup via torch.multiprocessing.run_processes.
# The 'slurm' mode uses submitit to launch a job on a SLURM cluster and provides
# support for MULTI-NODE jobs (and is the only way to launch MULTI-NODE jobs).
# In 'slurm' mode, the LAUNCH options below can be used to control the SLURM options.
# Note that NUM_GPUS (not part of LAUNCH options) determines total GPUs requested.
_C.LAUNCH.MODE = "local"
# Launch options that are only used if LAUNCH.MODE is 'slurm'
_C.LAUNCH.MAX_RETRY = 3
_C.LAUNCH.NAME = "pycls_job"
_C.LAUNCH.COMMENT = ""
_C.LAUNCH.CPUS_PER_GPU = 10
_C.LAUNCH.MEM_PER_GPU = 60
_C.LAUNCH.PARTITION = "devlab"
_C.LAUNCH.GPU_TYPE = "volta"
_C.LAUNCH.TIME_LIMIT = 4200
_C.LAUNCH.EMAIL = ""
# ----------------------------------- Misc options ----------------------------------- #
# Optional description of a config
_C.DESC = ""
# If True output additional info to log
_C.VERBOSE = True
# Number of GPUs to use (applies to both training and testing)
_C.NUM_GPUS = 1
# Maximum number of GPUs available per node (unlikely to need to be changed)
_C.MAX_GPUS_PER_NODE = 8
# Output directory
_C.OUT_DIR = "/tmp"
# Config destination (in OUT_DIR)
_C.CFG_DEST = "config.yaml"
# Note that non-determinism is still be present due to non-deterministic GPU ops
_C.RNG_SEED = 1
# Log destination ('stdout' or 'file')
_C.LOG_DEST = "stdout"
# Log period in iters
_C.LOG_PERIOD = 10
# Distributed backend
_C.DIST_BACKEND = "nccl"
# Hostname and port range for multi-process groups (actual port selected randomly)
_C.HOST = "localhost"
_C.PORT_RANGE = [10000, 65000]
# Models weights referred to by URL are downloaded to this local cache
_C.DOWNLOAD_CACHE = "/tmp/pycls-download-cache"
# ---------------------------------- Default config ---------------------------------- #
_CFG_DEFAULT = _C.clone()
_CFG_DEFAULT.freeze()
# --------------------------------- Deprecated keys ---------------------------------- #
_C.register_deprecated_key("MEM")
_C.register_deprecated_key("MEM.RELU_INPLACE")
_C.register_deprecated_key("OPTIM.GAMMA")
_C.register_deprecated_key("PREC_TIME.BATCH_SIZE")
_C.register_deprecated_key("PREC_TIME.ENABLED")
_C.register_deprecated_key("PORT")
_C.register_deprecated_key("TRAIN.EVAL_PERIOD")
_C.register_deprecated_key("TRAIN.CHECKPOINT_PERIOD")
def assert_cfg():
"""Checks config values invariants."""
err_str = "The first lr step must start at 0"
assert not _C.OPTIM.STEPS or _C.OPTIM.STEPS[0] == 0, err_str
data_splits = ["train", "val", "test"]
err_str = "Data split '{}' not supported"
assert _C.TRAIN.SPLIT in data_splits, err_str.format(_C.TRAIN.SPLIT)
assert _C.TEST.SPLIT in data_splits, err_str.format(_C.TEST.SPLIT)
err_str = "Mini-batch | |
UnaryExpression(Keyword('waypointloiterradius'), Array, Number),
UnaryExpression(Keyword('waypointloitertype'), Array, String),
UnaryExpression(Keyword('waypointname'), Array, String),
UnaryExpression(Keyword('waypointposition'), Array, Array),
UnaryExpression(Keyword('waypoints'), Object, Array),
UnaryExpression(Keyword('waypoints'), Group, Array),
UnaryExpression(Keyword('waypointscript'), Array, String),
UnaryExpression(Keyword('waypointsenableduav'), Object, Boolean),
UnaryExpression(Keyword('waypointshow'), Array, String),
UnaryExpression(Keyword('waypointspeed'), Array, String),
UnaryExpression(Keyword('waypointstatements'), Array, Array),
UnaryExpression(Keyword('waypointtimeout'), Array, Array),
UnaryExpression(Keyword('waypointtimeoutcurrent'), Group, Number),
UnaryExpression(Keyword('waypointtype'), Array, String),
UnaryExpression(Keyword('waypointvisible'), Array, Number),
UnaryExpression(Keyword('weaponcargo'), Object, Array),
UnaryExpression(Keyword('weaponinertia'), Object, Array),
UnaryExpression(Keyword('weaponlowered'), Object, Boolean),
UnaryExpression(Keyword('weapons'), Object, Array),
UnaryExpression(Keyword('weaponsitems'), Object, Array),
UnaryExpression(Keyword('weaponsitemscargo'), Object, Array),
UnaryExpression(Keyword('weaponstate'), Object, Array),
UnaryExpression(Keyword('weaponstate'), Array, Array),
UnaryExpression(Keyword('weightrtd'), Object, Array),
UnaryExpression(Keyword('wfsidetext'), Side, String),
UnaryExpression(Keyword('wfsidetext'), Object, String),
UnaryExpression(Keyword('wfsidetext'), Group, String),
UnaryExpression(Keyword('while'), Code, WhileType),
UnaryExpression(Keyword('wingsforcesrtd'), Object, Array),
UnaryExpression(Keyword('with'), Namespace, WithType),
UnaryExpression(Keyword('worldtoscreen'), Array, Array),
BinaryExpression(Object, Keyword('action'), Array, Nothing),
BinaryExpression(Object, Keyword('actionparams'), Number, Array),
BinaryExpression(Number, Keyword('add3denlayer'), String, Number),
BinaryExpression(Object, Keyword('addaction'), Array, Number),
BinaryExpression(Object, Keyword('addbackpack'), String, Nothing),
BinaryExpression(Object, Keyword('addbackpackcargo'), Array, Nothing),
BinaryExpression(Object, Keyword('addbackpackcargoglobal'), Array, Nothing),
BinaryExpression(Object, Keyword('addbackpackglobal'), String, Nothing),
BinaryExpression(Object, Keyword('addcuratoraddons'), Array, Nothing),
BinaryExpression(Object, Keyword('addcuratorcameraarea'), Array, Nothing),
BinaryExpression(Object, Keyword('addcuratoreditableobjects'), Array, Nothing),
BinaryExpression(Object, Keyword('addcuratoreditingarea'), Array, Nothing),
BinaryExpression(Object, Keyword('addcuratorpoints'), Number, Nothing),
BinaryExpression(Control, Keyword('addeditorobject'), Array, String),
BinaryExpression(Object, Keyword('addeventhandler'), Array, Number),
BinaryExpression(Object, Keyword('addforce'), Array, Nothing),
BinaryExpression(Object, Keyword('addgoggles'), String, Nothing),
BinaryExpression(Group, Keyword('addgroupicon'), Array, Number),
BinaryExpression(Object, Keyword('addhandgunitem'), String, Nothing),
BinaryExpression(Object, Keyword('addheadgear'), String, Nothing),
BinaryExpression(Object, Keyword('additem'), String, Nothing),
BinaryExpression(Object, Keyword('additemcargo'), Array, Nothing),
BinaryExpression(Object, Keyword('additemcargoglobal'), Array, Nothing),
BinaryExpression(Object, Keyword('additemtobackpack'), String, Nothing),
BinaryExpression(Object, Keyword('additemtouniform'), String, Nothing),
BinaryExpression(Object, Keyword('additemtovest'), String, Nothing),
BinaryExpression(Object, Keyword('addlivestats'), Number, Nothing),
BinaryExpression(Object, Keyword('addmagazine'), String, Nothing),
BinaryExpression(Object, Keyword('addmagazine'), Array, Nothing),
BinaryExpression(Object, Keyword('addmagazineammocargo'), Array, Nothing),
BinaryExpression(Object, Keyword('addmagazinecargo'), Array, Nothing),
BinaryExpression(Object, Keyword('addmagazinecargoglobal'), Array, Nothing),
BinaryExpression(Object, Keyword('addmagazineglobal'), String, Nothing),
BinaryExpression(Object, Keyword('addmagazines'), Array, Nothing),
BinaryExpression(Object, Keyword('addmagazineturret'), Array, Nothing),
BinaryExpression(Control, Keyword('addmenu'), Array, Number),
BinaryExpression(Control, Keyword('addmenuitem'), Array, Number),
BinaryExpression(Object, Keyword('addmpeventhandler'), Array, Number),
BinaryExpression(Object, Keyword('addownedmine'), Object, Nothing),
BinaryExpression(Object, Keyword('addplayerscores'), Array, Nothing),
BinaryExpression(Object, Keyword('addprimaryweaponitem'), String, Nothing),
BinaryExpression(String, Keyword('addpublicvariableeventhandler'), Code, Nothing),
BinaryExpression(String, Keyword('addpublicvariableeventhandler'), Array, Nothing),
BinaryExpression(Object, Keyword('addrating'), Number, Nothing),
BinaryExpression(TeamMember, Keyword('addresources'), Array, Nothing),
BinaryExpression(Object, Keyword('addscore'), Number, Nothing),
BinaryExpression(Side, Keyword('addscoreside'), Number, Nothing),
BinaryExpression(Object, Keyword('addsecondaryweaponitem'), String, Nothing),
BinaryExpression(TeamMember, Keyword('addteammember'), TeamMember, Nothing),
BinaryExpression(Object, Keyword('addtorque'), Array, Nothing),
BinaryExpression(Object, Keyword('adduniform'), String, Nothing),
BinaryExpression(Group, Keyword('addvehicle'), Object, Nothing),
BinaryExpression(Object, Keyword('addvest'), String, Nothing),
BinaryExpression(Group, Keyword('addwaypoint'), Array, Array),
BinaryExpression(Object, Keyword('addweapon'), String, Nothing),
BinaryExpression(Object, Keyword('addweaponcargo'), Array, Nothing),
BinaryExpression(Object, Keyword('addweaponcargoglobal'), Array, Nothing),
BinaryExpression(Object, Keyword('addweaponglobal'), String, Nothing),
BinaryExpression(Object, Keyword('addweaponitem'), Array, Nothing),
BinaryExpression(Object, Keyword('addweaponturret'), Array, Nothing),
BinaryExpression(Object, Keyword('aimedattarget'), Array, Number),
BinaryExpression(Control, Keyword('allow3dmode'), Boolean, Nothing),
BinaryExpression(Object, Keyword('allowcrewinimmobile'), Boolean, Nothing),
BinaryExpression(Object, Keyword('allowcuratorlogicignoreareas'), Boolean, Nothing),
BinaryExpression(Object, Keyword('allowdamage'), Boolean, Nothing),
BinaryExpression(Object, Keyword('allowdammage'), Boolean, Nothing),
BinaryExpression(Control, Keyword('allowfileoperations'), Boolean, Nothing),
BinaryExpression(Object, Keyword('allowfleeing'), Number, Nothing),
BinaryExpression(Group, Keyword('allowfleeing'), Number, Nothing),
BinaryExpression(Array, Keyword('allowgetin'), Boolean, Nothing),
BinaryExpression(Object, Keyword('allowsprint'), Boolean, Nothing),
BinaryExpression(Object, Keyword('ammo'), String, Number),
BinaryExpression(Object, Keyword('ammoonpylon'), String, Number),
BinaryExpression(Object, Keyword('ammoonpylon'), Number, Number),
BinaryExpression(Boolean, Keyword('and'), Boolean, Boolean),
BinaryExpression(Boolean, Keyword('and'), Code, Boolean),
BinaryExpression(Object, Keyword('animate'), Array, Nothing),
BinaryExpression(Object, Keyword('animatebay'), Array, Nothing),
BinaryExpression(Object, Keyword('animatedoor'), Array, Nothing),
BinaryExpression(Object, Keyword('animatepylon'), Array, Nothing),
BinaryExpression(Object, Keyword('animatesource'), Array, Nothing),
BinaryExpression(Object, Keyword('animationphase'), String, Number),
BinaryExpression(Object, Keyword('animationsourcephase'), String, Number),
BinaryExpression(Array, Keyword('append'), Array, Nothing),
BinaryExpression(Array, Keyword('apply'), Code, Array),
BinaryExpression(Array, Keyword('arrayintersect'), Array, Array),
BinaryExpression(Object, Keyword('assignascargo'), Object, Nothing),
BinaryExpression(Object, Keyword('assignascargoindex'), Array, Nothing),
BinaryExpression(Object, Keyword('assignascommander'), Object, Nothing),
BinaryExpression(Object, Keyword('assignasdriver'), Object, Nothing),
BinaryExpression(Object, Keyword('assignasgunner'), Object, Nothing),
BinaryExpression(Object, Keyword('assignasturret'), Array, Nothing),
BinaryExpression(Object, Keyword('assigncurator'), Object, Nothing),
BinaryExpression(Object, Keyword('assignitem'), String, Nothing),
BinaryExpression(Object, Keyword('assignteam'), String, Nothing),
BinaryExpression(Object, Keyword('assigntoairport'), Object, Nothing),
BinaryExpression(Object, Keyword('assigntoairport'), Number, Nothing),
BinaryExpression(Number, Keyword('atan2'), Number, Number),
BinaryExpression(Location, Keyword('attachobject'), Object, Nothing),
BinaryExpression(Object, Keyword('attachto'), Array, Nothing),
BinaryExpression(Object, Keyword('backpackspacefor'), String, Array),
BinaryExpression(Type, Keyword('breakout'), String, Anything),
BinaryExpression(Object, Keyword('buildingexit'), Number, Array),
BinaryExpression(Object, Keyword('buildingpos'), Number, Array),
BinaryExpression(Control, Keyword('buttonsetaction'), String, Nothing),
BinaryExpression(Type, Keyword('call'), Code, Anything),
BinaryExpression(String, Keyword('callextension'), String, String),
BinaryExpression(String, Keyword('callextension'), Array, Array),
BinaryExpression(Object, Keyword('camcommand'), String, Nothing),
BinaryExpression(Object, Keyword('camcommit'), Number, Nothing),
BinaryExpression(Object, Keyword('camcommitprepared'), Number, Nothing),
BinaryExpression(Object, Keyword('camconstuctionsetparams'), Array, Nothing),
BinaryExpression(String, Keyword('camcreate'), Array, Object),
BinaryExpression(Object, Keyword('cameraeffect'), Array, Nothing),
BinaryExpression(Object, Keyword('campreload'), Number, Nothing),
BinaryExpression(Object, Keyword('campreparebank'), Number, Nothing),
BinaryExpression(Object, Keyword('campreparedir'), Number, Nothing),
BinaryExpression(Object, Keyword('campreparedive'), Number, Nothing),
BinaryExpression(Object, Keyword('campreparefocus'), Array, Nothing),
BinaryExpression(Object, Keyword('campreparefov'), Number, Nothing),
BinaryExpression(Object, Keyword('campreparefovrange'), Array, Nothing),
BinaryExpression(Object, Keyword('campreparepos'), Array, Nothing),
BinaryExpression(Object, Keyword('campreparerelpos'), Array, Nothing),
BinaryExpression(Object, Keyword('campreparetarget'), Object, Nothing),
BinaryExpression(Object, Keyword('campreparetarget'), Array, Nothing),
BinaryExpression(Object, Keyword('camsetbank'), Number, Nothing),
BinaryExpression(Object, Keyword('camsetdir'), Array, Nothing),
BinaryExpression(Object, Keyword('camsetdive'), Number, Nothing),
BinaryExpression(Object, Keyword('camsetfocus'), Array, Nothing),
BinaryExpression(Object, Keyword('camsetfov'), Number, Nothing),
BinaryExpression(Object, Keyword('camsetfovrange'), Array, Nothing),
BinaryExpression(Object, Keyword('camsetpos'), Array, Nothing),
BinaryExpression(Object, Keyword('camsetrelpos'), Array, Nothing),
BinaryExpression(Object, Keyword('camsettarget'), Object, Nothing),
BinaryExpression(Object, Keyword('camsettarget'), Array, Nothing),
BinaryExpression(Object, Keyword('canadd'), String, Boolean),
BinaryExpression(Object, Keyword('canadd'), Array, Boolean),
BinaryExpression(Object, Keyword('canadditemtobackpack'), String, Boolean),
BinaryExpression(Object, Keyword('canadditemtobackpack'), Array, Boolean),
BinaryExpression(Object, Keyword('canadditemtouniform'), String, Boolean),
BinaryExpression(Object, Keyword('canadditemtouniform'), Array, Boolean),
BinaryExpression(Object, Keyword('canadditemtovest'), String, Boolean),
BinaryExpression(Object, Keyword('canadditemtovest'), Array, Boolean),
BinaryExpression(Object, Keyword('canslingload'), Object, Boolean),
BinaryExpression(Object, Keyword('canvehiclecargo'), Object, Array),
BinaryExpression(TryType, Keyword('catch'), Code, Anything),
BinaryExpression(Control, Keyword('cbsetchecked'), Boolean, Nothing),
BinaryExpression(Array, Keyword('checkvisibility'), Array, Number),
BinaryExpression(Type, Keyword('clear3denattribute'), String, Nothing),
BinaryExpression(Display, Keyword('closedisplay'), Number, Nothing),
BinaryExpression(Object, Keyword('commandartilleryfire'), Array, Nothing),
BinaryExpression(Array, Keyword('commandartilleryfire'), Array, Nothing),
BinaryExpression(Object, Keyword('commandchat'), String, Nothing),
BinaryExpression(Array, Keyword('commandchat'), String, Nothing),
BinaryExpression(Object, Keyword('commandfire'), Object, Nothing),
BinaryExpression(Array, Keyword('commandfire'), Object, Nothing),
BinaryExpression(Object, Keyword('commandfollow'), Object, Nothing),
BinaryExpression(Array, Keyword('commandfollow'), Object, Nothing),
BinaryExpression(Object, Keyword('commandfsm'), Array, Nothing),
BinaryExpression(Array, Keyword('commandfsm'), Array, Nothing),
BinaryExpression(Object, Keyword('commandmove'), Array, Nothing),
BinaryExpression(Array, Keyword('commandmove'), Array, Nothing),
BinaryExpression(Object, Keyword('commandradio'), String, Nothing),
BinaryExpression(Array, Keyword('commandradio'), String, Nothing),
BinaryExpression(Object, Keyword('commandsuppressivefire'), Object, Nothing),
BinaryExpression(Object, Keyword('commandsuppressivefire'), Array, Nothing),
BinaryExpression(Array, Keyword('commandsuppressivefire'), Object, Nothing),
BinaryExpression(Array, Keyword('commandsuppressivefire'), Array, Nothing),
BinaryExpression(Object, Keyword('commandtarget'), Object, Nothing),
BinaryExpression(Array, Keyword('commandtarget'), Object, Nothing),
BinaryExpression(Object, Keyword('commandwatch'), Array, Nothing),
BinaryExpression(Array, Keyword('commandwatch'), Array, Nothing),
BinaryExpression(Object, Keyword('commandwatch'), Object, Nothing),
BinaryExpression(Array, Keyword('commandwatch'), Object, Nothing),
BinaryExpression(String, Keyword('configclasses'), Config, Array),
BinaryExpression(Object, Keyword('confirmsensortarget'), Array, Nothing),
BinaryExpression(Object, Keyword('connectterminaltouav'), Object, Boolean),
BinaryExpression(Control, Keyword('controlsgroupctrl'), Number, Control),
BinaryExpression(Group, Keyword('copywaypoints'), Group, Nothing),
BinaryExpression(Code, Keyword('count'), Array, Number),
BinaryExpression(Object, Keyword('countenemy'), Array, Number),
BinaryExpression(Object, Keyword('countfriendly'), Array, Number),
BinaryExpression(Side, Keyword('countside'), Array, Number),
BinaryExpression(String, Keyword('counttype'), Array, Number),
BinaryExpression(Object, Keyword('countunknown'), Array, Number),
BinaryExpression(Group, Keyword('create3denentity'), Array, Anything),
BinaryExpression(Object, Keyword('creatediaryrecord'), Array, DiaryReport),
BinaryExpression(Object, Keyword('creatediarysubject'), Array, Number),
BinaryExpression(Display, Keyword('createdisplay'), String, Display),
BinaryExpression(Control, Keyword('createmenu'), Number, Nothing),
BinaryExpression(Display, Keyword('createmissiondisplay'), String, Display),
BinaryExpression(Display, Keyword('createmissiondisplay'), Array, Display),
BinaryExpression(Display, Keyword('creatempcampaigndisplay'), String, Nothing),
BinaryExpression(Object, Keyword('createsimpletask'), Array, Task),
BinaryExpression(String, Keyword('createsite'), Array, Object),
BinaryExpression(TeamMember, Keyword('createtask'), Array, Task),
BinaryExpression(String, Keyword('createunit'), Array, Nothing),
BinaryExpression(Group, Keyword('createunit'), Array, Object),
BinaryExpression(String, Keyword('createvehicle'), Array, Object),
BinaryExpression(String, Keyword('createvehiclelocal'), Array, Object),
BinaryExpression(Control, Keyword('ctdata'), Number, String),
BinaryExpression(Control, Keyword('ctfindheaderrows'), Number, Array),
BinaryExpression(Control, Keyword('ctfindrowheader'), Number, Number),
BinaryExpression(Control, Keyword('ctheadercontrols'), Number, Array),
BinaryExpression(Control, Keyword('ctremoveheaders'), Array, Nothing),
BinaryExpression(Control, Keyword('ctremoverows'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrladdeventhandler'), Array, Number),
BinaryExpression(Control, Keyword('ctrlanimatemodel'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlanimationphasemodel'), String, Number),
BinaryExpression(Control, Keyword('ctrlchecked'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlcommit'), Number, Nothing),
BinaryExpression(Display, Keyword('ctrlcreate'), Array, Control),
BinaryExpression(Control, Keyword('ctrlenable'), Boolean, Nothing),
BinaryExpression(Control, Keyword('ctrlmapanimadd'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlmapcursor'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlmapscreentoworld'), Array, Array),
BinaryExpression(Control, Keyword('ctrlmapworldtoscreen'), Array, Array),
BinaryExpression(Control, Keyword('ctrlremovealleventhandlers'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlremoveeventhandler'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetactivecolor'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetangle'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetautoscrolldelay'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetautoscrollrewind'), Boolean, Nothing),
BinaryExpression(Control, Keyword('ctrlsetautoscrollspeed'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetbackgroundcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetchecked'), Boolean, Nothing),
BinaryExpression(Control, Keyword('ctrlsetchecked'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetdisabledcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlseteventhandler'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfade'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfont'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth1'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth1b'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth2'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth2b'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth3'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth3b'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth4'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth4b'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth5'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth5b'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth6'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfonth6b'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheight'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheighth1'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheighth2'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheighth3'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheighth4'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheighth5'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheighth6'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontheightsecondary'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontp'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontp'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontpb'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetfontsecondary'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetforegroundcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetmodel'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetmodeldirandup'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetmodelscale'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetpixelprecision'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetpixelprecision'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsetposition'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsetscale'), Number, Nothing),
BinaryExpression(Control, Keyword('ctrlsetstructuredtext'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsettext'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsettextcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsettextcolorsecondary'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsettextsecondary'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsettooltip'), String, Nothing),
BinaryExpression(Control, Keyword('ctrlsettooltipcolorbox'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsettooltipcolorshade'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlsettooltipcolortext'), Array, Nothing),
BinaryExpression(Control, Keyword('ctrlshow'), Boolean, Nothing),
BinaryExpression(Control, Keyword('ctrowcontrols'), Number, Array),
BinaryExpression(Control, Keyword('ctsetcursel'), Number, Nothing),
BinaryExpression(Control, Keyword('ctsetdata'), Array, Nothing),
BinaryExpression(Control, Keyword('ctsetheadertemplate'), Config, Nothing),
BinaryExpression(Control, Keyword('ctsetrowtemplate'), Config, Nothing),
BinaryExpression(Control, Keyword('ctsetvalue'), Array, Nothing),
BinaryExpression(Control, Keyword('ctvalue'), Number, Number),
BinaryExpression(Object, Keyword('curatorcoef'), String, Number),
BinaryExpression(Object, Keyword('currentmagazinedetailturret'), Array, String),
BinaryExpression(Object, Keyword('currentmagazineturret'), Array, String),
BinaryExpression(Object, Keyword('currentweaponturret'), Array, String),
BinaryExpression(Object, Keyword('customchat'), Array, Nothing),
BinaryExpression(Object, Keyword('customradio'), Array, Nothing),
BinaryExpression(String, Keyword('cutfadeout'), Number, Number),
BinaryExpression(Number, Keyword('cutfadeout'), Number, Nothing),
BinaryExpression(String, Keyword('cutobj'), Array, Number),
BinaryExpression(Number, Keyword('cutobj'), Array, Nothing),
BinaryExpression(String, Keyword('cutrsc'), Array, Number),
BinaryExpression(Number, Keyword('cutrsc'), Array, Nothing),
BinaryExpression(String, Keyword('cuttext'), Array, Number),
BinaryExpression(Number, Keyword('cuttext'), Array, Nothing),
BinaryExpression(Number, Keyword('debugfsm'), Boolean, Nothing),
BinaryExpression(Array, Keyword('deleteat'), Number, Anything),
BinaryExpression(Control, Keyword('deleteeditorobject'), String, | |
'mwot',
'n': 'wikinews',
'netvillage': 'netvillage',
'nkcells': 'nkcells',
'nomcom': 'nomcom',
'nosmoke': 'nosmoke',
'nost': 'nost',
'oeis': 'oeis',
'oldwikisource': 'oldwikisource',
'olpc': 'olpc',
'onelook': 'onelook',
'openfacts': 'openfacts',
'openstreetmap': 'openstreetmap',
'openwetware': 'openwetware',
'openwiki': 'openwiki',
'opera7wiki': 'opera7wiki',
'organicdesign': 'organicdesign',
'orgpatterns': 'orgpatterns',
'orthodoxwiki': 'orthodoxwiki',
'osi reference model': 'osi reference model',
'otrs': 'otrs',
'otrswiki': 'otrswiki',
'ourmedia': 'ourmedia',
'paganwiki': 'paganwiki',
'panawiki': 'panawiki',
'pangalacticorg': 'pangalacticorg',
'patwiki': 'patwiki',
'perlconfwiki': 'perlconfwiki',
'perlnet': 'perlnet',
'personaltelco': 'personaltelco',
'phpwiki': 'phpwiki',
'phwiki': 'phwiki',
'planetmath': 'planetmath',
'pmeg': 'pmeg',
'pmwiki': 'pmwiki',
'psycle': 'psycle',
'purlnet': 'purlnet',
'pythoninfo': 'pythoninfo',
'pythonwiki': 'pythonwiki',
'pywiki': 'pywiki',
'q': 'wikiquote',
'qcwiki': 'qcwiki',
'quality': 'quality',
'qwiki': 'qwiki',
'r3000': 'r3000',
'raec': 'raec',
'rakwiki': 'rakwiki',
'reuterswiki': 'reuterswiki',
'rev': 'rev',
'revo': 'revo',
'rfc': 'rfc',
'rheinneckar': 'rheinneckar',
'robowiki': 'robowiki',
'rowiki': 'rowiki',
's': 'wikisource',
's23wiki': 's23wiki',
'scholar': 'scholar',
'schoolswp': 'schoolswp',
'scores': 'scores',
'scoutwiki': 'scoutwiki',
'scramble': 'scramble',
'seapig': 'seapig',
'seattlewiki': 'seattlewiki',
'seattlewireless': 'seattlewireless',
'senseislibrary': 'senseislibrary',
'silcode': 'silcode',
'slashdot': 'slashdot',
'slwiki': 'slwiki',
'smikipedia': 'smikipedia',
'sourceforge': 'sourceforge',
'spcom': 'spcom',
'species': 'species',
'squeak': 'squeak',
'stable': 'stable',
'strategywiki': 'strategywiki',
'sulutil': 'sulutil',
'susning': 'susning',
'svgwiki': 'svgwiki',
'svn': 'svn',
'swinbrain': 'swinbrain',
'swingwiki': 'swingwiki',
'swtrain': 'swtrain',
'tabwiki': 'tabwiki',
'takipedia': 'takipedia',
'tavi': 'tavi',
'tclerswiki': 'tclerswiki',
'technorati': 'technorati',
'tejo': 'tejo',
'tesoltaiwan': 'tesoltaiwan',
'testwiki': 'testwiki',
'thelemapedia': 'thelemapedia',
'theopedia': 'theopedia',
'theppn': 'theppn',
'thinkwiki': 'thinkwiki',
'tibiawiki': 'tibiawiki',
'ticket': 'ticket',
'tmbw': 'tmbw',
'tmnet': 'tmnet',
'tmwiki': 'tmwiki',
'tokyonights': 'tokyonights',
'tools': 'tools',
'translatewiki': 'translatewiki',
'trash!italia': 'trash!italia',
'tswiki': 'tswiki',
'turismo': 'turismo',
'tviv': 'tviv',
'tvtropes': 'tvtropes',
'twiki': 'twiki',
'twistedwiki': 'twistedwiki',
'tyvawiki': 'tyvawiki',
'uncyclopedia': 'uncyclopedia',
'unreal': 'unreal',
'urbandict': 'urbandict',
'usej': 'usej',
'usemod': 'usemod',
'v': 'wikiversity',
'valuewiki': 'valuewiki',
'veropedia': 'veropedia',
'vinismo': 'vinismo',
'vkol': 'vkol',
'vlos': 'vlos',
'voipinfo': 'voipinfo',
'w': 'wikipedia',
'warpedview': 'warpedview',
'webdevwikinl': 'webdevwikinl',
'webisodes': 'webisodes',
'webseitzwiki': 'webseitzwiki',
'wg': 'wg',
'wiki': 'wiki',
'wikia': 'wikia',
'wikianso': 'wikianso',
'wikiasite': 'wikiasite',
'wikible': 'wikible',
'wikibooks': 'wikibooks',
'wikichat': 'wikichat',
'wikichristian': 'wikichristian',
'wikicities': 'wikicities',
'wikicity': 'wikicity',
'wikif1': 'wikif1',
'wikifur': 'wikifur',
'wikihow': 'wikihow',
'wikiindex': 'wikiindex',
'wikilemon': 'wikilemon',
'wikilivres': 'wikilivres',
'wikimac-de': 'wikimac-de',
'wikimac-fr': 'wikimac-fr',
'wikimedia': 'wikimedia',
'wikinews': 'wikinews',
'wikinfo': 'wikinfo',
'wikinurse': 'wikinurse',
'wikinvest': 'wikinvest',
'wikipaltz': 'wikipaltz',
'wikipedia': 'wikipedia',
'wikipediawikipedia': 'wikipediawikipedia',
'wikiquote': 'wikiquote',
'wikireason': 'wikireason',
'wikischool': 'wikischool',
'wikisophia': 'wikisophia',
'wikisource': 'wikisource',
'wikispecies': 'wikispecies',
'wikispot': 'wikispot',
'wikiti': 'wikiti',
'wikitravel': 'wikitravel',
'wikitree': 'wikitree',
'wikiversity': 'wikiversity',
'wikiwikiweb': 'wikiwikiweb',
'wikt': 'wiktionary',
'wiktionary': 'wiktionary',
'wipipedia': 'wipipedia',
'wlug': 'wlug',
'wm2005': 'wm2005',
'wm2006': 'wm2006',
'wm2007': 'wm2007',
'wm2008': 'wm2008',
'wm2009': 'wm2009',
'wm2010': 'wm2010',
'wmania': 'wmania',
'wmcz': 'wmcz',
'wmf': 'wmf',
'wmrs': 'wmrs',
'wmse': 'wmse',
'wookieepedia': 'wookieepedia',
'world66': 'world66',
'wowwiki': 'wowwiki',
'wqy': 'wqy',
'wurmpedia': 'wurmpedia',
'wznan': 'wznan',
'xboxic': 'xboxic',
'zh-cfr': 'zh-cfr',
'zrhwiki': 'zrhwiki',
'zum': 'zum',
'zwiki': 'zwiki',
'zzz wiki': 'zzz wiki',
}
# A list of category redirect template names in different languages
# Note: It *is* necessary to list template redirects here
self.category_redirect_templates = {
'_default': []
}
# A list of languages that use hard (instead of soft) category redirects
self.use_hard_category_redirects = []
# A list of disambiguation template names in different languages
self.disambiguationTemplates = {
'_default': []
}
# A list of projects that share cross-project sessions.
self.cross_projects = []
# A list with the name for cross-project cookies.
# default for wikimedia centralAuth extensions.
self.cross_projects_cookies = ['centralauth_Session',
'centralauth_Token',
'centralauth_User']
self.cross_projects_cookie_username = 'centralauth_User'
# A list with the name in the cross-language flag permissions
self.cross_allowed = []
# A list with the name of the category containing disambiguation
# pages for the various languages. Only one category per language,
# and without the namespace, so add things like:
# 'en': "Disambiguation"
self.disambcatname = {}
# On most wikis page names must start with a capital letter, but some
# languages don't use this.
self.nocapitalize = []
# attop is a list of languages that prefer to have the interwiki
# links at the top of the page.
self.interwiki_attop = []
# on_one_line is a list of languages that want the interwiki links
# one-after-another on a single line
self.interwiki_on_one_line = []
# String used as separator between interwiki links and the text
self.interwiki_text_separator = config.line_separator * 2
# Similar for category
self.category_attop = []
# on_one_line is a list of languages that want the category links
# one-after-another on a single line
self.category_on_one_line = []
# String used as separator between category links and the text
self.category_text_separator = config.line_separator * 2
# When both at the bottom should categories come after interwikilinks?
self.categories_last = []
# Which languages have a special order for putting interlanguage
# links, and what order is it? If a language is not in
# interwiki_putfirst, alphabetical order on language code is used.
# For languages that are in interwiki_putfirst, interwiki_putfirst
# is checked first, and languages are put in the order given there.
# All other languages are put after those, in code-alphabetical
# order.
self.interwiki_putfirst = {}
# Languages in interwiki_putfirst_doubled should have a number plus
# a list of languages. If there are at least the number of interwiki
# links, all languages in the list should be placed at the front as
# well as in the normal list.
self.interwiki_putfirst_doubled = {} # THIS APPEARS TO BE UNUSED!
# Some families, e. g. commons and meta, are not multilingual and
# forward interlanguage links to another family (wikipedia).
# These families can set this variable to the name of the target
# family.
self.interwiki_forward = None
# Some families, e. g. wikipedia, receive forwarded interlanguage
# links from other families, e. g. incubator, commons, or meta.
# These families can set this variable to the names of their source
# families.
self.interwiki_forwarded_from = {}
# Which language codes no longer exist and by which language code
# should they be replaced. If for example the language with code xx:
# now should get code yy:, add {'xx':'yy'} to obsolete. If all
# links to language xx: should be removed, add {'xx': None}.
self.obsolete = {}
# Language codes of the largest wikis. They should be roughly sorted
# by size.
self.languages_by_size = []
# Some languages belong to a group where the possibility is high that
# equivalent articles have identical titles among the group.
self.language_groups = {
# languages using the arabic script (incomplete)
'arab': [
'ar', 'arz', 'ps', 'sd', 'ur', 'bjn', 'ckb',
# languages using multiple scripts, including arabic
'kk', 'ku', 'tt', 'ug', 'pnb'
],
# languages that use chinese symbols
'chinese': [
'wuu', 'zh', 'zh-classical', 'zh-yue', 'gan', 'ii',
# languages using multiple/mixed scripts, including chinese
'ja', 'za'
],
# languages that use the cyrillic alphabet
'cyril': [
'ab', 'av', 'ba', 'be', 'be-x-old', 'bg', 'bxr', 'ce', 'cu',
'cv', 'kbd', 'koi', 'kv', 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo',
'myv', 'mhr', 'mrj', 'os', 'ru', 'rue', 'sah', 'tg', 'tk',
'udm', 'uk', 'xal',
# languages using multiple scripts, including cyrillic
'ha', 'kk', 'sh', 'sr', 'tt'
],
# languages that use a greek script
'grec': [
'el', 'grc', 'pnt'
# languages using multiple scripts, including greek
],
# languages that use the latin alphabet
'latin': [
'aa', 'ace', 'af', 'ak', 'als', 'an', 'ang', 'ast', 'ay', 'bar',
'bat-smg', 'bcl', 'bi', 'bm', 'br', 'bs', 'ca', 'cbk-zam',
'cdo', 'ceb', 'ch', 'cho', 'chy', 'co', 'crh', 'cs', 'csb',
'cy', 'da', 'de', 'diq', 'dsb', 'ee', 'eml', 'en', 'eo', 'es',
'et', 'eu', 'ext', 'ff', 'fi', 'fiu-vro', 'fj', 'fo', 'fr',
'frp', 'frr', 'fur', 'fy', 'ga', 'gag', 'gd', 'gl', 'gn', 'gv',
'hak', 'haw', 'hif', 'ho', 'hr', 'hsb', 'ht', 'hu', 'hz', 'ia',
'id', 'ie', 'ig', 'ik', 'ilo', 'io', 'is', 'it', 'jbo', 'jv',
'kaa', 'kab', 'kg', 'ki', 'kj', 'kl', 'kr', 'ksh', 'kw', 'la',
'lad', 'lb', 'lg', 'li', 'lij', 'lmo', 'ln', 'lt', 'ltg', 'lv',
'map-bms', 'mg', 'mh', 'mi', 'ms', 'mt', 'mus', 'mwl', 'na',
'nah', 'nap', 'nds', 'nds-nl', 'ng', 'nl', 'nn', 'no', 'nov',
'nrm', 'nv', 'ny', 'oc', 'om', 'pag', 'pam', 'pap', 'pcd',
'pdc', 'pfl', 'pih', 'pl', 'pms', 'pt', 'qu', 'rm', 'rn', 'ro',
'roa-rup', 'roa-tara', 'rw', 'sc', 'scn', 'sco', 'se', 'sg',
'simple', 'sk', 'sl', 'sm', 'sn', 'so', 'sq', 'srn', 'ss', 'st',
'stq', 'su', 'sv', 'sw', 'szl', 'tet', 'tl', 'tn', 'to', 'tpi',
'tr', 'ts', 'tum', 'tw', 'ty', 'uz', 've', 'vec', 'vi', 'vls',
'vo', 'wa', 'war', 'wo', 'xh', | |
import math
from PySide2 import QtGui
from .core import Object
__author__ = "<NAME>"
class Point(Object):
def __init__(self, x = 0, y = 0):
super(Point, self).__init__()
if not isinstance(x, (int, float)):
x = 0
if not isinstance(y, (int, float)):
y = 0
self.x = x
self.y = y
def __str__(self):
return "Point(%s, %s)" % (self.x, self.y)
def distance(p1, p2):
return Point.distance2(p1.x, p1.y, p2.x, p2.y)
def distance2(x1, y1, x2, y2):
x = x1 - x2
y = y1 - y2
return math.sqrt(x ** 2 + y ** 2)
def interpolate(p1, p2, f):
return Point(p1.x + (p2.x - p1.x) * (1 - f), p1.y + (p2.y - p1.y) * (1 - f))
def polar(l, a):
return Point(l * math.cos(a), l * math.sin(a))
def length(self):
return Point.distance2(self.x, self.y, 0, 0)
def add(self, v):
return Point(self.x + v.x, self.y + v.y)
def setTo(self, x, y):
if not isinstance(x, (int, float)):
x = 0
if not isinstance(y, (int, float)):
y = 0
self.x = x
self.y = y
def copyFrom(self, s):
self.setTo(s.x, s.y)
def equals(self, t):
return self.x == t.x and self.y == t.y
def normalize(self, t):
scale = t / self.length()
self.x *= scale
self.y *= scale
def offset(self, dx, dy):
self.x += dx
self.y += dy
def substract(self, v):
return Point(self.x - v.x, self.y - v.y)
class Rectangle(Object):
def __init__(self, x = 0, y = 0, w = 0, h = 0):
super(Rectangle, self).__init__()
if not isinstance(x, (int, float)):
x = 0
if not isinstance(y, (int, float)):
y = 0
if not isinstance(w, (int, float)):
w = 0
if not isinstance(h, (int, float)):
h = 0
self.left = 0
self.right = 0
self.top = 0
self.bottom = 0
self.__x = x
self.__y = y
self.__width = w
self.__height = h
self.x = x
self.y = y
self.width = w
self.height = h
@property
def x(self):
return self.__x
@x.setter
def x(self, v):
self.__x = v
self.left = v
self.right = v + self.width
@property
def y(self):
return self.__y
@y.setter
def y(self, v):
self.__y = v
self.top = v
self.bottom = v + self.height
@property
def width(self):
return self.__width
@width.setter
def width(self, v):
self.__width = v
self.right = v + self.x
@property
def height(self):
return self.__height
@height.setter
def height(self, v):
self.__height = v
self.bottom = v + self.y
def contains(self, x, y):
return self.x <= x <= self.right and self.y <= y <= self.bottom
def containsRect(self, rect):
return rect.x >= self.x and rect.right <= self.right and rect.y >= self.y and rect.bottom <= self.bottom
def equals(self, v):
return v.x == self.x and v.width == self.width and v.y == self.y and v.height == self.height
def inflate(self, dx, dy):
self.width += dx
self.height += dy
def intersection(self, t):
ix = self.x if self.x > t.x else t.x
iy = self.y if self.y > t.y else t.y
ax = t.right if self.right > t.right else self.right
ay = t.bottom if self.bottom > t.bottom else self.bottom
if ix <= ax and iy <= ay:
return Rectangle(ix, iy, ax, ay)
else:
return Rectangle(0, 0, 0, 0)
def intersects(self, t):
ix = self.x if self.x > t.x else t.x
iy = self.y if self.y > t.y else t.y
ax = t.right if self.right > t.right else self.right
ay = t.bottom if self.bottom > t.bottom else self.bottom
return ix <= ax and iy <= ay
def isEmpty(self):
return self.x == 0 and self.y == 0 and self.width == 0 and self.height == 0
def offset(self, dx, dy):
self.x += dx
self.y += dy
def setEmpty(self):
self.x = 0
self.y = 0
self.width = 0
self.height = 0
def setTo(self, x, y, w, h):
if not isinstance(x, (int, float)):
x = 0
if not isinstance(y, (int, float)):
y = 0
if not isinstance(w, (int, float)):
w = 0
if not isinstance(h, (int, float)):
h = 0
self.x = x
self.y = y
self.width = w
self.height = h
def union(self, t):
return Rectangle(t.x if self.x > t.x else self.x, t.y if self.y > t.y else self.y, self.right if self.right > t.right else t.right, self.bottom if self.bottom > t.bottom else t.bottom)
class Circle(Object):
def __init__(self, x = 0, y = 0, r = 0):
super(Circle, self).__init__()
self.x = x
self.y = y
self.r = r
def getTransform(self, m):
v1 = m.toArray([self.x, self.y, 1])
v2 = m.toArray([self.x + self.r, self.y, 1])
dx = v1[0] - v2[0]
dy = v1[1] - v2[1]
return Circle(v1[0], v1[1], math.sqrt(dx ** 2 + dy ** 2))
def getProjection(self, axis):
pro = Vec2.dot(Vec2(self.x, self.y), axis) / axis.length()
return {"min" : pro - self.r, "max" : pro + self.r}
class Polygon(Object):
def __init__(self, o = None):
super(Polygon, self).__init__()
if isinstance(o, list):
self.vertices = o
elif isinstance(o, Rectangle):
x = o.x
y = o.y
w = o.width
h = o.height
self.vertices = [Vec2(x, y), Vec2(x + w, y), Vec2(x + w, y + h), Vec2(x, y + h)]
else:
self.vertices = []
def getTransform(self, m):
res = []
for p in self.vertices:
arr = m.toArray([p.x, p.y, 1])
res.append(Vec2(arr[0], arr[1]))
return Polygon(res)
def getSides(self):
vtx = self.vertices
l = len(vtx)
res = []
if l >= 3:
preP = vtx[0]
for p in vtx[1:]:
res.append(Vec2.substract(p, preP))
preP = p
res.append(Vec2.substract(vtx[0], vtx[l - 1]))
return res
def getProjection(self, axis):
vtx = self.vertices
mini = None
maxi = None
for p in vtx:
pro = Vec2.dot(p, axis) / axis.length()
if mini == None or pro < mini:
mini = pro
if maxi == None or pro > maxi:
maxi = pro
return {"min" : mini, "max" : maxi}
def getNearestPoint(self, p1):
vtx = self.vertices
rP = vtx[0]
minDis = Vec2.distance(p1, rP)
for p2 in vtx[1:]:
d = Vec2.distance(p1, p2)
if d < minDis:
minDis = d
rP = p2
return rP
class Vec2(Object):
def __init__(self, x = 0, y = 0):
super(Vec2, self).__init__()
self.x = x
self.y = y
def __str__(self):
return "Vec2(%s, %s)" % (self.x, self.y)
def __add__(self, v):
return Vec2.add(self, v)
def __sub__(self, v):
return Vec2.substract(self, v)
def distance(v1, v2):
dx = v1.x - v2.x
dy = v1.y - v2.y
return math.sqrt(dx ** 2 + dy ** 2)
def add(v1, v2):
return Vec2(v1.x + v2.x, v1.y + v2.y)
def substract(v1, v2):
return Vec2(v1.x - v2.x, v1.y - v2.y)
def dot(v1, v2):
return v1.x * v2.x + v1.y * v2.y
def cross(v1, v2):
return v1.x * v2.y - v1.y * v2.x
def length(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def normalize(self):
l = self.length()
return Vec2(self.x / l, self.y / l)
def normL(self):
return Vec2(self.y, -self.x)
def normR(self):
return Vec2(-self.y, self.x)
class SAT(object):
def __init__(self):
raise Exception("SAT cannot be instantiated.")
def hitTest(A, B):
res = None
if isinstance(A, Polygon) and isinstance(B, Polygon):
res = SAT.hitTestPolygons(A, B)
elif isinstance(A, Circle) and isinstance(B, Circle):
res = SAT.hitTestCircles(A, B)
else:
c = None
p = None
if isinstance(A, Circle):
c = A
p = B
else:
c = B
p = A
res = SAT.hitTestCircleAndPolygon(c, p)
return res
def hitTestPolygons(a, b):
sides = a.getSides()
sides.extend(b.getSides())
axises = []
for side in sides:
axises.append(side.normL())
return SAT.__isGap(axises, a, b)
def hitTestCircles(a, b):
axis = Vec2(a.x - b.x, a.y - b.y)
proA = a.getProjection(axis)
proB = b.getProjection(axis)
if SAT.__isOverlay(proA, proB):
return False
return True
def hitTestCircleAndPolygon(c, p):
sides = p.getSides()
axises = []
for side in sides:
axises.append(side.normL())
p1 = p.getNearestPoint(Vec2(c.x, c.y))
axises.append(Vec2(p1.x - c.x, p1.y - c.y))
return SAT.__isGap(axises, c, p)
def __isGap(axises, a, b):
for axis in axises:
proA = a.getProjection(axis)
proB = b.getProjection(axis)
if SAT.__isOverlay(proA, proB):
return False
return True
def __isOverlay(proA, proB):
mini = None
maxi = None
if proA["min"] < proB["min"]:
mini = proA["min"]
else:
mini = proB["min"]
if proA["max"] > proB["max"]:
maxi = proA["max"]
else:
maxi = proB["max"]
return (proA["max"] - proA["min"]) + (proB["max"] - proB["min"]) < maxi - mini
class Matrix(Object):
def __init__(self, a = None, b = None, c = None, d = None, tx = None, ty = None, u = None, v = None, w = None):
super(Matrix, self).__init__()
self.a = 1
self.b = 0
self.u = 0
self.c = 0
self.d = 1
self.v = 0
self.tx = 0
self.ty = 0
self.w = 1
self.setTo(a, b, c, d, tx, ty, u, v, w)
def setTo(self, a = None, b = None, c = None, d = None, tx = None, ty = None, u = None, v = None, w = None):
if a != None:
self.a = a
if b != None:
self.b = b
if c != None:
self.c = c
if d != None:
self.d = d
if tx != None:
self.tx = tx
if ty != None:
self.ty = ty
if u != None:
self.u = u
if v != None:
self.v = v
if w != None:
self.w = w
def isDentity(self):
return (self.a == 1 and self.b == 0 and self.c == 0 and self.d == 1 and self.tx == 0 and self.ty == 0 and self.u == 0 and self.v == 0 and self.w == 1)
def transform(self, c):
c.setTransform(self.toQTransform(), True)
def identity(self):
self.setTo(1, 0, 0, 1, 0, 0, 0, 0, 1)
def rotate(self, q):
radian = q * math.pi / 180
cos = math.cos(radian)
sin = math.sin(radian)
mtx = Matrix(cos, sin, -sin, cos, 0, 0, 0, 0, 1)
self.add(mtx)
return self
def scale(self, sx, sy):
mtx = Matrix(sx, 0, 0, sy, 0, 0, 0, 0, 1)
self.add(mtx)
return self
def translate(self, tx, ty):
mtx = Matrix(1, 0, 0, 1, tx, ty, 0, 0, 1)
self.add(mtx)
return self
def skew(self, kx, ky):
mtx = Matrix(1, ky, kx, 1, 0, 0, 0, 0, 1)
self.add(mtx)
return self
def add(self, mtx):
a = self.a * mtx.a + self.b * mtx.c + self.u * mtx.tx
b = self.a * mtx.b + self.b * mtx.d + self.u * mtx.ty
u = self.a * mtx.u + self.b * mtx.v + self.u * mtx.w
c = self.c * mtx.a + self.d * mtx.c + self.v * mtx.tx
d = self.c * mtx.b + self.d * mtx.d + self.v * mtx.ty
v = self.c * mtx.u + self.d * mtx.v + self.v * mtx.w
tx = self.tx * mtx.a + self.ty * mtx.c | |
<gh_stars>10-100
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
The `DistArray` data structure.
`DistArray` objects are proxies for collections of `LocalArray` objects. They
are meant to roughly emulate NumPy `ndarray`\s.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from __future__ import absolute_import, division
import operator
from itertools import product
from functools import reduce
import numpy as np
import distarray.localapi
from distarray.metadata_utils import sanitize_indices
from distarray.globalapi.maps import Distribution, asdistribution
from distarray.utils import _raise_nie
from distarray.metadata_utils import normalize_reduction_axes
__all__ = ['DistArray']
# ---------------------------------------------------------------------------
# Code
# ---------------------------------------------------------------------------
class DistArray(object):
__array_priority__ = 20.0
def __init__(self, distribution, dtype=float):
"""Creates an empty DistArray according to the `distribution` given."""
def _local_create(comm, ddpr, dtype):
from distarray.localapi import empty
from distarray.localapi.maps import Distribution
if len(ddpr):
dim_data = ddpr[comm.Get_rank()]
else:
dim_data = ()
dist = Distribution(comm=comm, dim_data=dim_data)
return proxyize(empty(dist, dtype))
ctx = distribution.context
ddpr = distribution.get_dim_data_per_rank()
da_key = ctx.apply(_local_create, (distribution.comm, ddpr, dtype),
targets=distribution.targets)
self.distribution = distribution
self.key = da_key[0]
self._dtype = dtype
@classmethod
def from_localarrays(cls, key, context=None, targets=None, distribution=None,
dtype=None):
"""The caller has already created the LocalArray objects. `key` is
their name on the engines. This classmethod creates a DistArray that
refers to these LocalArrays.
Either a `context` or a `distribution` must also be provided. If
`context` is provided, a ``dim_data_per_rank`` will be pulled from
the existing ``LocalArray``\s and a ``Distribution`` will be created
from it. If `distribution` is provided, it should accurately
reflect the distribution of the existing ``LocalArray``\s.
If `dtype` is not provided, it will be fetched from the engines.
"""
def get_dim_datas_and_dtype(arr):
return (arr.dim_data, arr.dtype)
da = cls.__new__(cls)
da.key = key
if (context is None) == (distribution is None):
errmsg = "Must provide `context` or `distribution` but not both."
raise RuntimeError(errmsg)
# has context, get dist and dtype
elif (distribution is None) and (dtype is None):
res = context.apply(get_dim_datas_and_dtype, args=(key,),
targets=targets)
dim_datas = [i[0] for i in res]
dtypes = [i[1] for i in res]
da._dtype = dtypes[0]
da.distribution = Distribution.from_dim_data_per_rank(context,
dim_datas,
targets)
# has context and dtype, get dist
elif (distribution is None) and (dtype is not None):
da._dtype = dtype
dim_datas = context.apply(getattr, args=(key, 'dim_data'),
targets=targets)
da.distribution = Distribution.from_dim_data_per_rank(context,
dim_datas,
targets)
# has distribution, get dtype
elif (distribution is not None) and (dtype is None):
da.distribution = distribution
da._dtype = distribution.context.apply(getattr,
args=(key, 'dtype'),
targets=[0])[0]
# has distribution and dtype
elif (distribution is not None) and (dtype is not None):
da.distribution = distribution
da._dtype = dtype
# sanity check that I didn't miss any cases above, because this is a
# confusing function
else:
assert False
return da
def __del__(self):
try:
self.context.delete_key(self.key, self.targets)
except Exception:
pass
def __repr__(self):
s = '<DistArray(shape=%r, targets=%r)>' % \
(self.shape, self.targets)
return s
def _get_view(self, index):
# to be run locally
def get_view(arr, index, ddpr, comm):
from distarray.localapi.maps import Distribution
if len(ddpr) == 0:
dim_data = ()
else:
dim_data = ddpr[comm.Get_rank()]
local_distribution = Distribution(comm=comm, dim_data=dim_data)
result = arr.global_index.get_slice(index, local_distribution)
return proxyize(result)
new_distribution = self.distribution.slice(index)
ddpr = new_distribution.get_dim_data_per_rank()
args = [self.key, index, ddpr, new_distribution.comm]
targets = new_distribution.targets
result = self.context.apply(get_view, args=args, targets=targets)[0]
return DistArray.from_localarrays(key=result,
targets=targets,
distribution=new_distribution,
dtype=self.dtype)
def _get_value(self, index):
# to be run locally
def get_value(arr, index):
return arr.global_index[index]
args = [self.key, index]
targets = self.distribution.owning_targets(index)
result = self.context.apply(get_value, args=args, targets=targets)
return [i for i in result if i is not None][0]
def _checked_getitem(self, index):
# to be run locally
def checked_getitem(arr, index):
return arr.global_index.checked_getitem(index)
args = [self.key, index]
targets = self.distribution.owning_targets(index)
result = self.context.apply(checked_getitem, args=args, targets=targets)
somethings = [i for i in result if i is not None]
if len(somethings) == 0:
# all return None
raise IndexError("Index %r is is not present." % (index,))
elif len(somethings) == 1:
return somethings[0]
else:
return result
def __getitem__(self, index):
return_type, index = sanitize_indices(index, ndim=self.ndim,
shape=self.shape)
if not self.distribution.has_precise_index:
result = self._checked_getitem(index)
elif return_type == 'view':
result = self._get_view(index)
elif return_type == 'value':
result = self._get_value(index)
else:
assert False
return result
def _set_value(self, index, value):
# to be run locally
def set_value(arr, index, value):
arr.global_index[index] = value
args = [self.key, index, value]
targets = self.distribution.owning_targets(index)
self.context.apply(set_value, args=args, targets=targets)
def _set_view(self, index, value):
# to be run locally
def set_view(arr, index, value, ddpr, comm):
from distarray.localapi.localarray import LocalArray
from distarray.localapi.maps import Distribution
if len(ddpr) == 0:
dim_data = ()
else:
dim_data = ddpr[comm.Get_rank()]
dist = Distribution(comm=comm, dim_data=dim_data)
if isinstance(value, LocalArray):
arr.global_index[index] = value.ndarray
else:
arr.global_index[index] = value[dist.global_slice]
new_distribution = self.distribution.slice(index)
if isinstance(value, DistArray):
if not value.distribution.is_compatible(new_distribution):
msg = "rvalue Distribution not compatible."
raise ValueError(msg)
value = value.key
else:
value = np.asarray(value) # convert to array
if value.shape != new_distribution.shape:
msg = "Slice shape does not equal rvalue shape."
raise ValueError(msg)
ddpr = new_distribution.get_dim_data_per_rank()
comm = new_distribution.comm
targets = new_distribution.targets
args = [self.key, index, value, ddpr, comm]
self.context.apply(set_view, args=args, targets=targets)
def _checked_setitem(self, index, value):
# to be run locally
def checked_setitem(arr, index, value):
return arr.global_index.checked_setitem(index, value)
args = [self.key, index, value]
targets = self.distribution.owning_targets(index)
result = self.context.apply(checked_setitem, args=args,
targets=targets)
result = [i for i in result if i is not None]
if len(result) > 1:
raise IndexError("Setting more than one result (%s) is "
"not supported yet." % (result,))
elif result == []:
raise IndexError("Index %s is out of bounds" % (index,))
def __setitem__(self, index, value):
set_type, index = sanitize_indices(index, ndim=self.ndim,
shape=self.shape)
if not self.distribution.has_precise_index:
self._checked_setitem(index, value)
elif set_type == 'view':
self._set_view(index, value)
elif set_type == 'value':
self._set_value(index, value)
else:
assert False
@property
def context(self):
return self.distribution.context
@property
def shape(self):
return self.distribution.shape
@property
def global_size(self):
return reduce(operator.mul, self.shape)
@property
def dist(self):
return self.distribution.dist
@property
def grid_shape(self):
return self.distribution.grid_shape
@property
def ndim(self):
return len(self.shape)
@property
def nbytes(self):
return self.global_size * self.itemsize
@property
def dtype(self):
return np.dtype(self._dtype)
@property
def itemsize(self):
return self._dtype.itemsize
@property
def targets(self):
return self.distribution.targets
@property
def __array_interface__(self):
return {'shape': self.shape,
'typestr': self.dtype.str,
'data': self.tondarray(),
'version': 3}
def tondarray(self):
"""Returns the distributed array as an ndarray."""
arr = np.empty(self.shape, dtype=self.dtype)
local_arrays = self.get_localarrays()
try:
for local_array in local_arrays:
gslice = local_array.distribution.global_slice
arr[gslice] = local_array.ndarray
except AttributeError:
# do it the slow way
for local_array in local_arrays:
maps = (list(ax_map.global_iter) for ax_map in
local_array.distribution)
for index in product(*maps):
arr[index] = local_array.global_index[index]
return arr
toarray = tondarray
def fill(self, value):
def inner_fill(arr, value):
arr.fill(value)
self.context.apply(inner_fill, args=(self.key, value), targets=self.targets)
def _reduce(self, local_reduce_name, axes=None, dtype=None, out=None):
if any(0 in localshape for localshape in self.localshapes()):
raise NotImplementedError("Reduction not implemented for empty "
"LocalArrays")
if out is not None:
_raise_nie()
dtype = dtype or self.dtype
out_dist = self.distribution.reduce(axes=axes)
ddpr = out_dist.get_dim_data_per_rank()
def _local_reduce(local_name, larr, out_comm, ddpr, dtype, axes):
import distarray.localapi.localarray as la
local_reducer = getattr(la, local_name)
res = proxyize(la.local_reduction(out_comm, local_reducer, larr, # noqa
ddpr, dtype, axes))
return res
local_reduce_args = (local_reduce_name, self.key, out_dist.comm, ddpr,
dtype, normalize_reduction_axes(axes, self.ndim))
out_key = self.context.apply(_local_reduce, local_reduce_args,
targets=self.targets)[0]
return DistArray.from_localarrays(key=out_key, distribution=out_dist,
dtype=dtype)
def sum(self, axis=None, dtype=None, out=None):
"""Return the sum of array elements over the given axis."""
if dtype is None and self.dtype == np.bool:
dtype = np.uint64
return self._reduce('sum_reducer', axis, dtype, out)
def mean(self, axis=None, dtype=float, out=None):
"""Return the mean of array elements over the given axis."""
return self._reduce('mean_reducer', axis, dtype, out)
def var(self, axis=None, dtype=float, out=None):
"""Return the variance of array elements over the given axis."""
return self._reduce('var_reducer', axis, dtype, out)
def std(self, axis=None, dtype=float, out=None):
"""Return the standard deviation of array elements over the given axis."""
return self._reduce('std_reducer', axis, dtype, out)
def min(self, axis=None, dtype=None, out=None):
"""Return the minimum of array elements over the given axis."""
return self._reduce('min_reducer', axis, dtype, out)
def max(self, axis=None, dtype=None, out=None):
"""Return the maximum of array elements over the given axis."""
return self._reduce('max_reducer', axis, dtype, out)
def get_ndarrays(self):
"""Pull the local ndarrays from the engines.
Returns
-------
list of ndarrays
one ndarray per process
"""
def get(key):
return key.ndarray
return self.context.apply(get, args=(self.key,), targets=self.targets)
def get_localarrays(self):
"""Pull the LocalArray objects from the engines.
Returns
-------
list of localarrays
one localarray per process
"""
def get(key):
return key.copy()
return self.context.apply(get, args=(self.key,), targets=self.targets)
def localshapes(self):
return self.distribution.localshapes()
def view(self, dtype=None):
"""
New view of array with the same data.
| |
+ m.x2328 + m.x2428 + m.x2528 + m.x2628 + m.x2728
+ m.x2828 + m.x2928 == 1)
m.c3030 = Constraint(expr= m.x29 + m.x129 + m.x229 + m.x329 + m.x429 + m.x529 + m.x629 + m.x729 + m.x829 + m.x929
+ m.x1029 + m.x1129 + m.x1229 + m.x1329 + m.x1429 + m.x1529 + m.x1629 + m.x1729 + m.x1829
+ m.x1929 + m.x2029 + m.x2129 + m.x2229 + m.x2329 + m.x2429 + m.x2529 + m.x2629 + m.x2729
+ m.x2829 + m.x2929 == 1)
m.c3031 = Constraint(expr= m.x30 + m.x130 + m.x230 + m.x330 + m.x430 + m.x530 + m.x630 + m.x730 + m.x830 + m.x930
+ m.x1030 + m.x1130 + m.x1230 + m.x1330 + m.x1430 + m.x1530 + m.x1630 + m.x1730 + m.x1830
+ m.x1930 + m.x2030 + m.x2130 + m.x2230 + m.x2330 + m.x2430 + m.x2530 + m.x2630 + m.x2730
+ m.x2830 + m.x2930 == 1)
m.c3032 = Constraint(expr= m.x31 + m.x131 + m.x231 + m.x331 + m.x431 + m.x531 + m.x631 + m.x731 + m.x831 + m.x931
+ m.x1031 + m.x1131 + m.x1231 + m.x1331 + m.x1431 + m.x1531 + m.x1631 + m.x1731 + m.x1831
+ m.x1931 + m.x2031 + m.x2131 + m.x2231 + m.x2331 + m.x2431 + m.x2531 + m.x2631 + m.x2731
+ m.x2831 + m.x2931 == 1)
m.c3033 = Constraint(expr= m.x32 + m.x132 + m.x232 + m.x332 + m.x432 + m.x532 + m.x632 + m.x732 + m.x832 + m.x932
+ m.x1032 + m.x1132 + m.x1232 + m.x1332 + m.x1432 + m.x1532 + m.x1632 + m.x1732 + m.x1832
+ m.x1932 + m.x2032 + m.x2132 + m.x2232 + m.x2332 + m.x2432 + m.x2532 + m.x2632 + m.x2732
+ m.x2832 + m.x2932 == 1)
m.c3034 = Constraint(expr= m.x33 + m.x133 + m.x233 + m.x333 + m.x433 + m.x533 + m.x633 + m.x733 + m.x833 + m.x933
+ m.x1033 + m.x1133 + m.x1233 + m.x1333 + m.x1433 + m.x1533 + m.x1633 + m.x1733 + m.x1833
+ m.x1933 + m.x2033 + m.x2133 + m.x2233 + m.x2333 + m.x2433 + m.x2533 + m.x2633 + m.x2733
+ m.x2833 + m.x2933 == 1)
m.c3035 = Constraint(expr= m.x34 + m.x134 + m.x234 + m.x334 + m.x434 + m.x534 + m.x634 + m.x734 + m.x834 + m.x934
+ m.x1034 + m.x1134 + m.x1234 + m.x1334 + m.x1434 + m.x1534 + m.x1634 + m.x1734 + m.x1834
+ m.x1934 + m.x2034 + m.x2134 + m.x2234 + m.x2334 + m.x2434 + m.x2534 + m.x2634 + m.x2734
+ m.x2834 + m.x2934 == 1)
m.c3036 = Constraint(expr= m.x35 + m.x135 + m.x235 + m.x335 + m.x435 + m.x535 + m.x635 + m.x735 + m.x835 + m.x935
+ m.x1035 + m.x1135 + m.x1235 + m.x1335 + m.x1435 + m.x1535 + m.x1635 + m.x1735 + m.x1835
+ m.x1935 + m.x2035 + m.x2135 + m.x2235 + m.x2335 + m.x2435 + m.x2535 + m.x2635 + m.x2735
+ m.x2835 + m.x2935 == 1)
m.c3037 = Constraint(expr= m.x36 + m.x136 + m.x236 + m.x336 + m.x436 + m.x536 + m.x636 + m.x736 + m.x836 + m.x936
+ m.x1036 + m.x1136 + m.x1236 + m.x1336 + m.x1436 + m.x1536 + m.x1636 + m.x1736 + m.x1836
+ m.x1936 + m.x2036 + m.x2136 + m.x2236 + m.x2336 + m.x2436 + m.x2536 + m.x2636 + m.x2736
+ m.x2836 + m.x2936 == 1)
m.c3038 = Constraint(expr= m.x37 + m.x137 + m.x237 + m.x337 + m.x437 + m.x537 + m.x637 + m.x737 + m.x837 + m.x937
+ m.x1037 + m.x1137 + m.x1237 + m.x1337 + m.x1437 + m.x1537 + m.x1637 + m.x1737 + m.x1837
+ m.x1937 + m.x2037 + m.x2137 + m.x2237 + m.x2337 + m.x2437 + m.x2537 + m.x2637 + m.x2737
+ m.x2837 + m.x2937 == 1)
m.c3039 = Constraint(expr= m.x38 + m.x138 + m.x238 + m.x338 + m.x438 + m.x538 + m.x638 + m.x738 + m.x838 + m.x938
+ m.x1038 + m.x1138 + m.x1238 + m.x1338 + m.x1438 + m.x1538 + m.x1638 + m.x1738 + m.x1838
+ m.x1938 + m.x2038 + m.x2138 + m.x2238 + m.x2338 + m.x2438 + m.x2538 + m.x2638 + m.x2738
+ m.x2838 + m.x2938 == 1)
m.c3040 = Constraint(expr= m.x39 + m.x139 + m.x239 + m.x339 + m.x439 + m.x539 + m.x639 + m.x739 + m.x839 + m.x939
+ m.x1039 + m.x1139 + m.x1239 + m.x1339 + m.x1439 + m.x1539 + m.x1639 + m.x1739 + m.x1839
+ m.x1939 + m.x2039 + m.x2139 + m.x2239 + m.x2339 + m.x2439 + m.x2539 + m.x2639 + m.x2739
+ m.x2839 + m.x2939 == 1)
m.c3041 = Constraint(expr= m.x40 + m.x140 + m.x240 + m.x340 + m.x440 + m.x540 + m.x640 + m.x740 + m.x840 + m.x940
+ m.x1040 + m.x1140 + m.x1240 + m.x1340 + m.x1440 + m.x1540 + m.x1640 + m.x1740 + m.x1840
+ m.x1940 + m.x2040 + m.x2140 + m.x2240 + m.x2340 + m.x2440 + m.x2540 + m.x2640 + m.x2740
+ m.x2840 + m.x2940 == 1)
m.c3042 = Constraint(expr= m.x41 + m.x141 + m.x241 + m.x341 + m.x441 + m.x541 + m.x641 + m.x741 + m.x841 + m.x941
+ m.x1041 + m.x1141 + m.x1241 + m.x1341 + m.x1441 + m.x1541 + m.x1641 + m.x1741 + m.x1841
+ m.x1941 + m.x2041 + m.x2141 + m.x2241 + m.x2341 + m.x2441 + m.x2541 + m.x2641 + m.x2741
+ m.x2841 + m.x2941 == 1)
m.c3043 = Constraint(expr= m.x42 + m.x142 + m.x242 + m.x342 + m.x442 + m.x542 + m.x642 + m.x742 + m.x842 + m.x942
+ m.x1042 + m.x1142 + m.x1242 + m.x1342 + m.x1442 + m.x1542 + m.x1642 + m.x1742 + m.x1842
+ m.x1942 + m.x2042 + m.x2142 + m.x2242 + m.x2342 + m.x2442 + m.x2542 + m.x2642 + m.x2742
+ m.x2842 + m.x2942 == 1)
m.c3044 = Constraint(expr= m.x43 + m.x143 + m.x243 + m.x343 + m.x443 + m.x543 + m.x643 + m.x743 + m.x843 + m.x943
+ m.x1043 + m.x1143 + m.x1243 + m.x1343 + m.x1443 + m.x1543 + m.x1643 + m.x1743 + m.x1843
+ m.x1943 + m.x2043 + m.x2143 + m.x2243 + m.x2343 + m.x2443 + m.x2543 + m.x2643 + m.x2743
+ m.x2843 + m.x2943 == 1)
m.c3045 = Constraint(expr= m.x44 + m.x144 + m.x244 + m.x344 + m.x444 + m.x544 + m.x644 + m.x744 + m.x844 + m.x944
+ m.x1044 + m.x1144 + m.x1244 + m.x1344 + m.x1444 + m.x1544 + m.x1644 + m.x1744 + m.x1844
+ m.x1944 + m.x2044 + m.x2144 + m.x2244 + m.x2344 + m.x2444 + m.x2544 + m.x2644 + m.x2744
+ m.x2844 + m.x2944 == 1)
m.c3046 = Constraint(expr= m.x45 + m.x145 + m.x245 + m.x345 + m.x445 + m.x545 + m.x645 + m.x745 + m.x845 + m.x945
+ m.x1045 + m.x1145 + m.x1245 + m.x1345 + m.x1445 + m.x1545 + m.x1645 + m.x1745 + m.x1845
+ m.x1945 + m.x2045 + m.x2145 + m.x2245 + m.x2345 + m.x2445 + m.x2545 + m.x2645 + m.x2745
+ m.x2845 + m.x2945 == 1)
m.c3047 = Constraint(expr= m.x46 + m.x146 + m.x246 + m.x346 + m.x446 + m.x546 + m.x646 + m.x746 + m.x846 + m.x946
+ m.x1046 + m.x1146 + m.x1246 + m.x1346 + m.x1446 + m.x1546 + m.x1646 + m.x1746 + m.x1846
+ m.x1946 + m.x2046 + m.x2146 + m.x2246 + m.x2346 + m.x2446 + m.x2546 + m.x2646 + m.x2746
+ m.x2846 + m.x2946 == 1)
m.c3048 = Constraint(expr= m.x47 + m.x147 + m.x247 + m.x347 + m.x447 + m.x547 + m.x647 + m.x747 + m.x847 + m.x947
+ m.x1047 + m.x1147 + m.x1247 + m.x1347 + m.x1447 + m.x1547 + m.x1647 + m.x1747 + m.x1847
+ m.x1947 + m.x2047 + m.x2147 + m.x2247 + m.x2347 + m.x2447 + m.x2547 + m.x2647 + m.x2747
+ m.x2847 + m.x2947 == 1)
m.c3049 = Constraint(expr= m.x48 + m.x148 + m.x248 + m.x348 + m.x448 + m.x548 + m.x648 + m.x748 + m.x848 + m.x948
+ m.x1048 + m.x1148 + m.x1248 + m.x1348 + m.x1448 + m.x1548 + m.x1648 + m.x1748 + m.x1848
+ m.x1948 + m.x2048 + m.x2148 + m.x2248 + m.x2348 + m.x2448 + m.x2548 + m.x2648 + m.x2748
+ m.x2848 + m.x2948 == 1)
m.c3050 = Constraint(expr= m.x49 + | |
% l)
self.save_flow(up_flow_f, '%s_flow_f_upflow' % l)
self.save_flow(up_flow_b, '%s_flow_b_upflow' % l)
x2_warp = self.warping_layer(x2, flow_f)
x1_warp = self.warping_layer(x1, flow_b)
# if norm feature
if self.if_norm_before_cost_volume:
x1, x2_warp = network_tools.normalize_features((x1, x2_warp), normalize=True, center=True, moments_across_channels=self.norm_moments_across_channels,
moments_across_images=self.norm_moments_across_images)
x2, x1_warp = network_tools.normalize_features((x2, x1_warp), normalize=True, center=True, moments_across_channels=self.norm_moments_across_channels,
moments_across_images=self.norm_moments_across_images)
# correlation
out_corr_f = Correlation(pad_size=self.search_range, kernel_size=1, max_displacement=self.search_range, stride1=1, stride2=1, corr_multiply=1)(x1, x2_warp)
out_corr_b = Correlation(pad_size=self.search_range, kernel_size=1, max_displacement=self.search_range, stride1=1, stride2=1, corr_multiply=1)(x2, x1_warp)
out_corr_relu_f = self.leakyRELU(out_corr_f)
out_corr_relu_b = self.leakyRELU(out_corr_b)
x_intm_f, flow_res_f = self.flow_estimators(torch.cat([out_corr_relu_f, x1_1by1, flow_f], dim=1))
x_intm_b, flow_res_b = self.flow_estimators(torch.cat([out_corr_relu_b, x2_1by1, flow_b], dim=1))
flow_f = flow_f + flow_res_f
flow_b = flow_b + flow_res_b
flow_fine_f = self.context_networks(torch.cat([x_intm_f, flow_f], dim=1))
flow_fine_b = self.context_networks(torch.cat([x_intm_b, flow_b], dim=1))
flow_f = flow_f + flow_fine_f
flow_b = flow_b + flow_fine_b
flows.append([flow_f, flow_b])
if l == self.output_level:
break
flow_f_out = upsample2d_flow_as(flow_f, x1_raw, mode="bilinear", if_rate=True)
flow_b_out = upsample2d_flow_as(flow_b, x1_raw, mode="bilinear", if_rate=True)
if self.if_save_running_process:
self.save_flow(flow_f_out, 'out_flow_f_up2d')
self.save_flow(flow_b_out, 'out_flow_b_up2d')
self.save_image(x1_raw, 'image1')
self.save_image(x2_raw, 'image2')
if self.if_upsample_flow_output:
if self.if_upsample_flow_mask:
_, up_flow_f, up_flow_f_mask = self.upsample_model(torch.cat((up_flow_f, up_flow_f_mask), dim=1), x1_raw, if_output_level=True)
_, up_flow_b, up_flow_b_mask = self.upsample_model(torch.cat((up_flow_b, up_flow_b_mask), dim=1), x2_raw, if_output_level=True)
# flow_f_out = flow_f_out * up_flow_f_mask + self.warping_layer(flow_f_out, up_flow_f) * (1 - up_flow_f_mask)
# flow_b_out = flow_b_out * up_flow_b_mask + self.warping_layer(flow_b_out, up_flow_b) * (1 - up_flow_b_mask)
flow_f_out = flow_f_out * up_flow_f_mask + tools.torch_warp(flow_f_out, up_flow_f) * (1 - up_flow_f_mask)
flow_b_out = flow_b_out * up_flow_b_mask + tools.torch_warp(flow_b_out, up_flow_b) * (1 - up_flow_b_mask)
if self.if_save_running_process:
self.save_flow(flow_f_out, 'out_flow_f_upbyflow')
self.save_flow(flow_b_out, 'out_flow_b_upbyflow')
self.save_flow(up_flow_f, '%s_flow_f_upflow' % 'out')
self.save_flow(up_flow_b, '%s_flow_b_upflow' % 'out')
self.save_mask(up_flow_f_mask, '%s_flow_f_upmask' % 'out')
self.save_mask(up_flow_b_mask, '%s_flow_b_upmask' % 'out')
else:
up_flow_f = self.upsample_model(up_flow_f, x1_raw, if_output_level=True)
up_flow_b = self.upsample_model(up_flow_b, x2_raw, if_output_level=True)
# flow_f_out = self.warping_layer(flow_f_out, up_flow_f)
# flow_b_out = self.warping_layer(flow_b_out, up_flow_b)
flow_f_out = tools.torch_warp(flow_f_out, up_flow_f)
flow_b_out = tools.torch_warp(flow_b_out, up_flow_b)
if self.if_save_running_process:
self.save_flow(flow_f_out, 'out_flow_f_upbyflow')
self.save_flow(flow_b_out, 'out_flow_b_upbyflow')
self.save_flow(up_flow_f, '%s_flow_f_upflow' % 'out')
self.save_flow(up_flow_b, '%s_flow_b_upflow' % 'out')
return flow_f_out, flow_b_out, flows[::-1]
def app_refine(self, img, flow, mask):
# occlusion mask: 0-1, where occlusion area is 0
input_im = img * mask
app_flow = self.appflow_model(torch.cat((input_im, img, mask), dim=1))
# app_flow = upsample2d_as(app_flow, input_im, mode="bilinear") * (1.0 / self._div_flow)
app_flow = app_flow * (1 - mask)
# img_restore = self.warping_layer_inpaint(input_im, app_flow)
flow_restore = self.warping_layer_inpaint(flow, app_flow)
# img_restore = self.warping_layer_inpaint(input_im, app_flow)
# flow_restore = tools.torch_warp(flow, app_flow)
img_restore = tools.torch_warp(input_im, app_flow)
return flow_restore, app_flow, input_im, img_restore
def app_loss(self, img_ori, img_restore, occ_mask):
diff = img_ori - img_restore
loss_mask = 1 - occ_mask # only take care about the inpainting area
diff = (torch.abs(diff) + 0.01).pow(0.4)
diff = diff * loss_mask
diff_sum = torch.sum(diff)
loss_mean = diff_sum / (torch.sum(loss_mask) * 2 + 1e-6)
return loss_mean
def forward(self, input_dict: dict):
'''
:param input_dict: im1, im2, im1_raw, im2_raw, start,if_loss
:return: output_dict: flows, flow_f_out, flow_b_out, photo_loss
'''
im1_ori, im2_ori = input_dict['im1'], input_dict['im2']
if input_dict['if_loss']:
sp_im1_ori, sp_im2_ori = input_dict['im1_sp'], input_dict['im2_sp']
if self.input_or_sp_input >= 1:
im1, im2 = im1_ori, im2_ori
elif self.input_or_sp_input > 0:
if tools.random_flag(threshold_0_1=self.input_or_sp_input):
im1, im2 = im1_ori, im2_ori
else:
im1, im2 = sp_im1_ori, sp_im2_ori
else:
im1, im2 = sp_im1_ori, sp_im2_ori
else:
im1, im2 = im1_ori, im2_ori
# 是否测试
if 'if_test' in input_dict.keys():
if_test = input_dict['if_test']
else:
if_test = False
# 是否传回一些结果用于展示
if 'save_running_process' in input_dict.keys():
self.if_save_running_process = input_dict['save_running_process']
else:
self.if_save_running_process = False
if self.if_save_running_process:
if 'process_dir' in input_dict.keys():
self.save_running_process_dir = input_dict['process_dir']
else:
self.if_save_running_process = False # 如果保存文件夹没指定的话,就还是False,不保存为好
self.save_running_process_dir = None
# 是否在测试或者验证的时候保存中间结果
if 'if_show' in input_dict.keys():
if_show = input_dict['if_show']
else:
if_show = False
output_dict = {}
# flow_f_pwc_out, flow_b_pwc_out, flows = self.forward_2_frame(im1, im2)
flow_f_pwc_out, flow_b_pwc_out, flows = self.forward_2_frame_v2(im1, im2)
occ_fw, occ_bw = self.occ_check_model(flow_f=flow_f_pwc_out, flow_b=flow_b_pwc_out)
if self.app_loss_weight > 0:
if self.app_occ_stop_gradient:
occ_fw, occ_bw = occ_fw.clone().detach(), occ_bw.clone().detach()
# tools.check_tensor(occ_fw, '%s' % (torch.sum(occ_fw == 1) / torch.sum(occ_fw)))
flow_f, app_flow_1, masked_im1, im1_restore = self.app_refine(img=im1, flow=flow_f_pwc_out, mask=occ_fw)
# tools.check_tensor(app_flow_1, 'app_flow_1')
flow_b, app_flow_2, masked_im2, im2_restore = self.app_refine(img=im2, flow=flow_b_pwc_out, mask=occ_bw)
app_loss = self.app_loss(im1, im1_restore, occ_fw)
app_loss += self.app_loss(im2, im2_restore, occ_bw)
app_loss *= self.app_loss_weight
# tools.check_tensor(app_loss, 'app_loss')
# print(' ')
if input_dict['if_loss']:
output_dict['app_loss'] = app_loss
if self.app_distilation_weight > 0:
flow_fw_label = flow_f.clone().detach()
flow_bw_label = flow_b.clone().detach()
appd_loss = network_tools.photo_loss_multi_type(x=flow_fw_label, y=flow_f_pwc_out, occ_mask=1 - occ_fw, photo_loss_type='abs_robust', photo_loss_use_occ=True)
appd_loss += network_tools.photo_loss_multi_type(x=flow_bw_label, y=flow_b_pwc_out, occ_mask=1 - occ_bw, photo_loss_type='abs_robust', photo_loss_use_occ=True)
appd_loss *= self.app_distilation_weight
if input_dict['if_loss']:
output_dict['appd_loss'] = appd_loss
if if_test:
flow_f_out = flow_f_pwc_out # use pwc output
flow_b_out = flow_b_pwc_out
else:
flow_f_out = flow_f_pwc_out # use pwc output
flow_b_out = flow_b_pwc_out
# flow_f_out = flow_f # use app refine output
# flow_b_out = flow_b
else:
if input_dict['if_loss']:
output_dict['appd_loss'] = None
flow_f_out = flow_f
flow_b_out = flow_b
if if_show:
output_dict['app_flow_1'] = app_flow_1
output_dict['masked_im1'] = masked_im1
output_dict['im1_restore'] = im1_restore
else:
if input_dict['if_loss']:
output_dict['app_loss'] = None
flow_f_out = flow_f_pwc_out
flow_b_out = flow_b_pwc_out
if if_show:
output_dict['app_flow_1'] = None
output_dict['masked_im1'] = None
output_dict['im1_restore'] = None
output_dict['flow_f_out'] = flow_f_out
output_dict['flow_b_out'] = flow_b_out
output_dict['occ_fw'] = occ_fw
output_dict['occ_bw'] = occ_bw
if self.if_test:
output_dict['flows'] = flows
if input_dict['if_loss']:
# 计算 smooth loss
if self.smooth_level == 'final':
s_flow_f, s_flow_b = flow_f_out, flow_b_out
s_im1, s_im2 = im1_ori, im2_ori
elif self.smooth_level == '1/4':
s_flow_f, s_flow_b = flows[0]
_, _, temp_h, temp_w = s_flow_f.size()
s_im1 = F.interpolate(im1_ori, (temp_h, temp_w), mode='area')
s_im2 = F.interpolate(im2_ori, (temp_h, temp_w), mode='area')
# tools.check_tensor(s_im1, 's_im1') # TODO
else:
raise ValueError('wrong smooth level choosed: %s' % self.smooth_level)
smooth_loss = 0
if self.smooth_order_1_weight > 0:
if self.smooth_type == 'edge':
smooth_loss += self.smooth_order_1_weight * network_tools.edge_aware_smoothness_order1(img=s_im1, pred=s_flow_f)
smooth_loss += self.smooth_order_1_weight * network_tools.edge_aware_smoothness_order1(img=s_im2, pred=s_flow_b)
elif self.smooth_type == 'delta':
smooth_loss += self.smooth_order_1_weight * network_tools.flow_smooth_delta(flow=s_flow_f, if_second_order=False)
smooth_loss += self.smooth_order_1_weight * network_tools.flow_smooth_delta(flow=s_flow_b, if_second_order=False)
else:
raise ValueError('wrong smooth_type: %s' % self.smooth_type)
# 计算 二阶 smooth loss
if self.smooth_order_2_weight > 0:
if self.smooth_type == 'edge':
smooth_loss += self.smooth_order_2_weight * network_tools.edge_aware_smoothness_order2(img=s_im1, pred=s_flow_f)
smooth_loss += self.smooth_order_2_weight * network_tools.edge_aware_smoothness_order2(img=s_im2, pred=s_flow_b)
elif self.smooth_type == 'delta':
smooth_loss += self.smooth_order_2_weight * network_tools.flow_smooth_delta(flow=s_flow_f, if_second_order=True)
smooth_loss += self.smooth_order_2_weight * network_tools.flow_smooth_delta(flow=s_flow_b, if_second_order=True)
else:
raise ValueError('wrong smooth_type: %s' % self.smooth_type)
output_dict['smooth_loss'] = smooth_loss
# 计算 photo loss
im1_s, im2_s, start_s = input_dict['im1_raw'], input_dict['im2_raw'], input_dict['start']
im1_warp = tools.nianjin_warp.warp_im(im2_s, flow_f_out, start_s) # warped im1 by forward flow and im2
im2_warp = tools.nianjin_warp.warp_im(im1_s, flow_b_out, start_s)
# im_diff_fw = im1 - im1_warp
# im_diff_bw = im2 - im2_warp
# photo loss
if self.stop_occ_gradient:
occ_fw, occ_bw = occ_fw.clone().detach(), occ_bw.clone().detach()
photo_loss = network_tools.photo_loss_multi_type(im1_ori, im1_warp, occ_fw, photo_loss_type=self.photo_loss_type,
photo_loss_delta=self.photo_loss_delta, photo_loss_use_occ=self.photo_loss_use_occ)
photo_loss += network_tools.photo_loss_multi_type(im2_ori, im2_warp, occ_bw, photo_loss_type=self.photo_loss_type,
photo_loss_delta=self.photo_loss_delta, photo_loss_use_occ=self.photo_loss_use_occ)
output_dict['photo_loss'] = photo_loss
output_dict['im1_warp'] = im1_warp
output_dict['im2_warp'] = im2_warp
# 计算 census loss
if self.photo_loss_census_weight > 0:
census_loss = loss_functions.census_loss_torch(img1=im1_ori, img1_warp=im1_warp, mask=occ_fw, q=self.photo_loss_delta,
charbonnier_or_abs_robust=False, if_use_occ=self.photo_loss_use_occ, averge=True) + \
loss_functions.census_loss_torch(img1=im2_ori, img1_warp=im2_warp, mask=occ_bw, q=self.photo_loss_delta,
charbonnier_or_abs_robust=False, if_use_occ=self.photo_loss_use_occ, averge=True)
census_loss *= self.photo_loss_census_weight
else:
census_loss = None
output_dict['census_loss'] = census_loss
# 计算多尺度蒸馏msd loss
if self.multi_scale_distillation_weight > 0:
flow_fw_label = flow_f_out.clone().detach()
flow_bw_label = flow_b_out.clone().detach()
msd_loss_ls = []
for i, (scale_fw, scale_bw) in enumerate(flows):
if self.multi_scale_distillation_style == 'down':
flow_fw_label_sacle = upsample_flow(flow_fw_label, target_flow=scale_fw)
occ_scale_fw = F.interpolate(occ_fw, [scale_fw.size(2), scale_fw.size(3)], mode='nearest')
flow_bw_label_sacle = upsample_flow(flow_bw_label, target_flow=scale_bw)
occ_scale_bw = F.interpolate(occ_bw, [scale_bw.size(2), scale_bw.size(3)], mode='nearest')
elif self.multi_scale_distillation_style == 'upup':
flow_fw_label_sacle = flow_fw_label
scale_fw = upsample_flow(scale_fw, target_flow=flow_fw_label_sacle)
occ_scale_fw = occ_fw
flow_bw_label_sacle = flow_bw_label
scale_bw = upsample_flow(scale_bw, target_flow=flow_bw_label_sacle)
occ_scale_bw = occ_bw
elif self.multi_scale_distillation_style == 'updown':
scale_fw = upsample_flow(scale_fw, target_size=(scale_fw.size(2) * 4, scale_fw.size(3) * 4)) #
flow_fw_label_sacle = upsample_flow(flow_fw_label, target_flow=scale_fw) #
occ_scale_fw = F.interpolate(occ_fw, [scale_fw.size(2), scale_fw.size(3)], mode='nearest') # occ
scale_bw = upsample_flow(scale_bw, target_size=(scale_bw.size(2) * 4, scale_bw.size(3) * 4))
flow_bw_label_sacle = upsample_flow(flow_bw_label, target_flow=scale_bw)
occ_scale_bw = F.interpolate(occ_bw, [scale_bw.size(2), scale_bw.size(3)], mode='nearest')
else:
raise ValueError('wrong multi_scale_distillation_style: %s' % self.multi_scale_distillation_style)
msd_loss_scale_fw = network_tools.photo_loss_multi_type(x=scale_fw, y=flow_fw_label_sacle, occ_mask=occ_scale_fw, photo_loss_type='abs_robust',
photo_loss_use_occ=self.multi_scale_distillation_occ)
msd_loss_ls.append(msd_loss_scale_fw)
msd_loss_scale_bw = network_tools.photo_loss_multi_type(x=scale_bw, y=flow_bw_label_sacle, occ_mask=occ_scale_bw, photo_loss_type='abs_robust',
photo_loss_use_occ=self.multi_scale_distillation_occ)
msd_loss_ls.append(msd_loss_scale_bw)
msd_loss = sum(msd_loss_ls)
msd_loss = self.multi_scale_distillation_weight * msd_loss
else:
# 换成计算多尺度photo loss: multi_scale_photo_weight
if self.multi_scale_photo_weight > 0:
_, _, h_raw, w_raw = im1_s.size()
_, _, h_temp_crop, h_temp_crop = im1_ori.size()
msd_loss_ls = []
for i, (scale_fw, scale_bw) in enumerate(flows):
if self.multi_scale_distillation_style == 'down': # 原图resize小计算photo loss
_, _, h_temp, w_temp = scale_fw.size()
rate = h_temp_crop / h_temp
occ_f_resize, occ_b_resize = self.occ_check_model(flow_f=scale_fw, flow_b=scale_bw)
im1_crop_resize = F.interpolate(im1_ori, [h_temp, w_temp], mode="bilinear", align_corners=True)
im2_crop_resize = F.interpolate(im2_ori, [h_temp, w_temp], mode="bilinear", align_corners=True)
im1_raw_resize = F.interpolate(im1_s, [int(h_raw / rate), int(w_raw / rate)], mode="bilinear", align_corners=True)
im2_raw_resize = F.interpolate(im2_s, [int(h_raw / rate), int(w_raw / rate)], mode="bilinear", align_corners=True)
im1_resize_warp = tools.nianjin_warp.warp_im(im2_raw_resize, scale_fw, start_s / rate) # use forward flow to warp im2
im2_resize_warp = tools.nianjin_warp.warp_im(im1_raw_resize, scale_bw, start_s / rate)
elif self.multi_scale_distillation_style == 'upup': # 多尺度flow resize大了和原图计算photo loss
occ_f_resize = | |
<filename>roboticstoolbox/blocks/arm.py
import numpy as np
from math import sin, cos, atan2, tan, sqrt, pi
import matplotlib.pyplot as plt
import time
from spatialmath import base, SE3
from bdsim.components import TransferBlock, FunctionBlock, SourceBlock
from bdsim.graphics import GraphicsBlock
from roboticstoolbox import tpoly_func, lspb_func
"""
Robot blocks:
- have inputs and outputs
- are a subclass of ``FunctionBlock`` |rarr| ``Block`` for kinematics and have no states
- are a subclass of ``TransferBlock`` |rarr| ``Block`` for dynamics and have states
"""
# The constructor of each class ``MyClass`` with a ``@block`` decorator becomes a method ``MYCLASS()`` of the BlockDiagram instance.
# ------------------------------------------------------------------------ #
class FKine(FunctionBlock):
"""
:blockname:`FKINE`
.. table::
:align: left
+------------+---------+---------+
| inputs | outputs | states |
+------------+---------+---------+
| 1 | 1 | 0 |
+------------+---------+---------+
| ndarray | SE3 | |
+------------+---------+---------+
"""
nin = 1
nout = 1
def __init__(self, robot=None, args={}, *inputs, **kwargs):
"""
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param robot: Robot model, defaults to None
:type robot: Robot subclass, optional
:param args: Options for fkine, defaults to {}
:type args: dict, optional
:param ``**kwargs``: common Block options
:return: a FORWARD_KINEMATICS block
:rtype: Foward_Kinematics instance
Robot arm forward kinematic model.
**Block ports**
:input q: Joint configuration vector as an ndarray.
:output T: End-effector pose as an SE(3) object
"""
super().__init__(inputs=inputs, **kwargs)
self.type = "forward-kinematics"
self.robot = robot
self.args = args
self.inport_names(("q",))
self.outport_names(("T",))
def output(self, t=None):
return [self.robot.fkine(self.inputs[0], **self.args)]
class IKine(FunctionBlock):
"""
:blockname:`IKINE`
.. table::
:align: left
+------------+---------+---------+
| inputs | outputs | states |
+------------+---------+---------+
| 1 | 1 | 0 |
+------------+---------+---------+
| SE3 | ndarray | |
+------------+---------+---------+
"""
nin = 1
nout = 1
def __init__(
self, robot=None, q0=None, useprevious=True, ik=None, *inputs, **kwargs
):
"""
:param robot: Robot model, defaults to None
:type robot: Robot subclass, optional
:param q0: Initial joint angles, defaults to None
:type q0: array_like(n), optional
:param useprevious: Use previous IK solution as q0, defaults to True
:type useprevious: bool, optional
:param ik: Specify an IK function, defaults to 'ikine_LM'
:type ik: callable f(T)
:param ``**kwargs``: common Block options
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:return: an INVERSE_KINEMATICS block
:rtype: Inverse_Kinematics instance
Robot arm inverse kinematic model.
The block has one input port:
1. End-effector pose as an SE(3) object
and one output port:
1. Joint configuration vector as an ndarray.
"""
super().__init__(inputs=inputs, **kwargs)
self.type = "inverse-kinematics"
self.robot = robot
self.q0 = q0
self.qprev = q0
self.useprevious = useprevious
self.ik = None
self.inport_names(("T",))
self.outport_names(("q",))
def start(self):
super().start()
if self.useprevious:
self.qprev = self.q0
def output(self, t=None):
if self.useprevious:
q0 = self.qprev
else:
q0 = self.q0
if self.ik is None:
sol = self.robot.ikine_LM(self.inputs[0], q0=q0)
else:
sol = self.ik(self.inputs[0])
if not sol.success:
raise RuntimeError("inverse kinematic failure for pose", self.inputs[0])
if self.useprevious:
self.qprev = sol.q
return [sol.q]
# ------------------------------------------------------------------------ #
class Jacobian(FunctionBlock):
"""
:blockname:`JACOBIAN`
.. table::
:align: left
+------------+---------+---------+
| inputs | outputs | states |
+------------+---------+---------+
| 1 | 1 | 0 |
+------------+---------+---------+
| ndarray | ndarray | |
+------------+---------+---------+
"""
nin = 1
nout = 1
def __init__(
self,
robot,
*inputs,
frame="0",
inverse=False,
pinv=False,
transpose=False,
**kwargs
):
"""
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param robot: Robot model
:type robot: Robot subclass
:param frame: Frame to compute Jacobian for, one of: '0' [default], 'e'
:type frame: str, optional
:param inverse: output inverse of Jacobian, defaults to False
:type inverse: bool, optional
:param pinv: output pseudo-inverse of Jacobian, defaults to False
:type pinv: bool, optional
:param transpose: output transpose of Jacobian, defaults to False
:type transpose: bool, optional
:param ``**kwargs``: common Block options
:return: a JACOBIAN block
:rtype: Jacobian instance
Robot arm Jacobian.
The block has one input port:
1. Joint configuration vector as an ndarray.
and one output port:
1. Jacobian matrix as an ndarray(6,n)
.. notes::
- Only one of ``inverse`` or ``pinv`` can be True
- ``inverse`` or ``pinv`` can be used in conjunction with ``transpose``
- ``inverse`` requires that the Jacobian is square
- If ``inverse`` is True and the Jacobian is singular a runtime
error will occur.
"""
super().__init__(inputs=inputs, **kwargs)
self.robot = robot
if frame == "0":
self.jfunc = robot.jacob0
elif frame == "e":
self.jfunc = robot.jacobe
else:
raise ValueError("unknown frame")
if inverse and robot.n != 6:
raise ValueError("cannot invert a non square Jacobian")
if inverse and pinv:
raise ValueError("can only set one of inverse and pinv")
self.inverse = inverse
self.pinv = pinv
self.transpose = transpose
self.inport_names(("q",))
self.outport_names(("J",))
def output(self, t=None):
J = self.jfunc(self.inputs[0])
if self.inverse:
J = np.linalg.inv(J)
if self.pinv:
J = np.linalg.pinv(J)
if self.transpose:
J = J.T
return [J]
class Tr2Delta(FunctionBlock):
"""
:blockname:`TR2DELTA`
.. table::
:align: left
+------------+------------+---------+
| inputs | outputs | states |
+------------+------------+---------+
| 2 | 1 | 0 |
+------------+------------+---------+
| SE3, SE3 | ndarray(6) | |
+------------+------------+---------+
"""
nin = 2
nout = 1
def __init__(self, *inputs, **kwargs):
"""
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param ``**kwargs``: common Block options
:return: a TR2DELTA block
:rtype: Tr2Delta instance
Difference between T1 and T2 as a 6-vector
The block has two input port:
1. T1 as an SE3.
2. T2 as an SE3.
and one output port:
1. delta as an ndarray(6,n)
:seealso: :func:`spatialmath.base.tr2delta`
"""
super().__init__(inputs=inputs, **kwargs)
self.inport_names(("T1", "T2"))
self.outport_names(("$\delta$",))
def output(self, t=None):
return [base.tr2delta(self.inputs[0].A, self.inputs[1].A)]
# ------------------------------------------------------------------------ #
class Delta2Tr(FunctionBlock):
"""
:blockname:`DELTA2TR`
.. table::
:align: left
+------------+----------+---------+
| inputs | outputs | states |
+------------+----------+---------+
| 1 | 1 | 0 |
+------------+----------+---------+
| ndarray(6) | SE3 | |
+------------+----------+---------+
"""
nin = 1
nout = 1
def __init__(self, *inputs, **kwargs):
"""
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param ``**kwargs``: common Block options
:return: a DELTA2TR block
:rtype: Delta2Tr instance
Delta to SE(3)
The block has one input port:
1. delta as an ndarray(6,n)
and one output port:
1. T as an SE3
:seealso: :func:`spatialmath.base.delta2tr`
"""
super().__init__(inputs=inputs, **kwargs)
self.inport_names(("$\delta$",))
self.outport_names(("T",))
def output(self, t=None):
return [SE3.Delta(self.inputs[0])]
# ------------------------------------------------------------------------ #
class Point2Tr(FunctionBlock):
"""
:blockname:`POINT2TR`
.. table::
:align: left
+------------+----------+---------+
| inputs | outputs | states |
+------------+----------+---------+
| 1 | 1 | 0 |
+------------+----------+---------+
| ndarray(3) | SE3 | |
+------------+----------+---------+
"""
nin = 1
nout = 1
def __init__(self, T, *inputs, **kwargs):
"""
:param T: the transform
:type T: SE3
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param ``**kwargs``: common Block options
:return: a POINT2TR block
:rtype: Point2Tr instance
The block has one input port:
1. a 3D point as an ndarray(3)
and one output port:
1. T as an SE3 with its position part replaced by the input
:seealso: :func:`spatialmath.base.delta2tr`
"""
super().__init__(inputs=inputs, **kwargs)
self.inport_names(("t",))
self.outport_names(("T",))
self.pose = T
def output(self, t=None):
T = SE3.SO3(self.pose.R, t=self.inputs[0])
return [T]
# ------------------------------------------------------------------------ #
class TR2T(FunctionBlock):
"""
:blockname:`TR2T`
.. table::
:align: left
+------------+----------+---------+
| inputs | outputs | states |
+------------+----------+---------+
| 1 | 3 | 0 |
+------------+----------+---------+
| SE3 | float | |
+------------+----------+---------+
"""
nin = 1
nout = 3
def __init__(self, *inputs, **kwargs):
"""
:param T: the transform
:type T: SE3
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param ``**kwargs``: common Block options
:return: a POINT2TR block
:rtype: Point2Tr instance
The block has one input port:
1. a 3D point as an ndarray(3)
and one output port:
1. T as an SE3 with its position part replaced by the input
:seealso: :func:`spatialmath.base.delta2tr`
"""
super().__init__(inputs=inputs, **kwargs)
self.inport_names(("T",))
self.outport_names(("x", "y", "z"))
def output(self, t=None):
t = self.inputs[0].t
return list(t)
# ------------------------------------------------------------------------ #
class FDyn(TransferBlock):
"""
:blockname:`FDYN`
.. table::
:align: left
+------------+---------+---------+
| inputs | outputs | states |
+------------+---------+---------+
| 1 | 3 | 0 |
+------------+---------+---------+
| ndarray | ndarray,| |
| | ndarray,| |
| | ndarray | |
+------------+---------+---------+
"""
nin = 1
nout = 3
def __init__(self, robot, *inputs, q0=None, **kwargs):
"""
:param ``*inputs``: Optional incoming connections
:type ``*inputs``: Block or Plug
:param robot: Robot model
:type robot: Robot subclass
:param end: Link to compute pose of, defaults to end-effector
:type end: Link or | |
<filename>tools/upgrade/errors.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import re
import subprocess
import sys
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Union, cast
import libcst
import libcst.matchers as libcst_matchers
from . import UserError, ast
LOG: logging.Logger = logging.getLogger(__name__)
MAX_LINES_PER_FIXME: int = 4
PyreError = Dict[str, Any]
PathsToErrors = Dict[Path, List[PyreError]]
class LineBreakTransformer(libcst.CSTTransformer):
def leave_SimpleWhitespace(
self,
original_node: libcst.SimpleWhitespace,
updated_node: libcst.SimpleWhitespace,
) -> Union[libcst.SimpleWhitespace, libcst.ParenthesizedWhitespace]:
whitespace = original_node.value.replace("\\", "")
if "\n" in whitespace:
first_line = libcst.TrailingWhitespace(
whitespace=libcst.SimpleWhitespace(
value=whitespace.split("\n")[0].rstrip()
),
comment=None,
newline=libcst.Newline(),
)
last_line = libcst.SimpleWhitespace(value=whitespace.split("\n")[1])
return libcst.ParenthesizedWhitespace(
first_line=first_line, empty_lines=[], indent=True, last_line=last_line
)
return updated_node
@staticmethod
def basic_parenthesize(
node: libcst.CSTNode,
whitespace: Optional[libcst.BaseParenthesizableWhitespace] = None,
) -> libcst.CSTNode:
if not hasattr(node, "lpar"):
return node
if whitespace:
return node.with_changes(
lpar=[libcst.LeftParen(whitespace_after=whitespace)],
rpar=[libcst.RightParen()],
)
return node.with_changes(lpar=[libcst.LeftParen()], rpar=[libcst.RightParen()])
def leave_Assert(
self, original_node: libcst.Assert, updated_node: libcst.Assert
) -> libcst.Assert:
test = updated_node.test
if not test:
return updated_node
assert_whitespace = updated_node.whitespace_after_assert
if isinstance(assert_whitespace, libcst.ParenthesizedWhitespace):
return updated_node.with_changes(
test=LineBreakTransformer.basic_parenthesize(test, assert_whitespace),
whitespace_after_assert=libcst.SimpleWhitespace(value=" "),
)
return updated_node.with_changes(
test=LineBreakTransformer.basic_parenthesize(test)
)
def leave_Assign(
self, original_node: libcst.Assign, updated_node: libcst.Assign
) -> libcst.Assign:
assign_value = updated_node.value
assign_whitespace = updated_node.targets[-1].whitespace_after_equal
if libcst_matchers.matches(
assign_whitespace, libcst_matchers.ParenthesizedWhitespace()
):
adjusted_target = updated_node.targets[-1].with_changes(
whitespace_after_equal=libcst.SimpleWhitespace(value=" ")
)
updated_targets = list(updated_node.targets[:-1])
updated_targets.append(adjusted_target)
return updated_node.with_changes(
targets=tuple(updated_targets),
value=LineBreakTransformer.basic_parenthesize(
assign_value, assign_whitespace
),
)
return updated_node.with_changes(
value=LineBreakTransformer.basic_parenthesize(assign_value)
)
def leave_Del(
self, original_node: libcst.Del, updated_node: libcst.Del
) -> libcst.Del:
delete_target = updated_node.target
delete_whitespace = updated_node.whitespace_after_del
if isinstance(delete_whitespace, libcst.ParenthesizedWhitespace):
return updated_node.with_changes(
target=LineBreakTransformer.basic_parenthesize(
delete_target, delete_whitespace
),
whitespace_after_del=libcst.SimpleWhitespace(value=" "),
)
return updated_node.with_changes(
target=LineBreakTransformer.basic_parenthesize(delete_target)
)
def leave_Raise(
self, original_node: libcst.Raise, updated_node: libcst.Raise
) -> libcst.Raise:
exception = updated_node.exc
if not exception:
return updated_node
raise_whitespace = updated_node.whitespace_after_raise
if isinstance(raise_whitespace, libcst.ParenthesizedWhitespace):
return updated_node.with_changes(
exc=LineBreakTransformer.basic_parenthesize(
exception, raise_whitespace
),
whitespace_after_raise=libcst.SimpleWhitespace(value=" "),
)
return updated_node.with_changes(
exc=LineBreakTransformer.basic_parenthesize(exception)
)
def leave_Return(
self, original_node: libcst.Return, updated_node: libcst.Return
) -> libcst.Return:
return_value = updated_node.value
if not return_value:
return updated_node
return_whitespace = updated_node.whitespace_after_return
if isinstance(return_whitespace, libcst.ParenthesizedWhitespace):
return updated_node.with_changes(
value=LineBreakTransformer.basic_parenthesize(
return_value, return_whitespace
),
whitespace_after_return=libcst.SimpleWhitespace(value=" "),
)
return updated_node.with_changes(
value=LineBreakTransformer.basic_parenthesize(return_value)
)
class PartialErrorSuppression(Exception):
def __init__(self, message: str, unsuppressed_paths: List[str]) -> None:
super().__init__(message)
self.unsuppressed_paths: List[str] = unsuppressed_paths
def error_path(error: Dict[str, Any]) -> str:
return error["path"]
class Errors:
@classmethod
def empty(cls) -> "Errors":
return cls([])
@staticmethod
def from_json(
json_string: str,
only_fix_error_code: Optional[int] = None,
from_stdin: bool = False,
) -> "Errors":
try:
errors = json.loads(json_string)
return Errors(_filter_errors(errors, only_fix_error_code))
except json.decoder.JSONDecodeError:
if from_stdin:
raise UserError(
"Received invalid JSON as input. "
"If piping from `pyre check` be sure to use `--output=json`."
)
else:
raise UserError(
f"Encountered invalid output when checking for pyre errors: `{json_string}`."
)
@staticmethod
def from_stdin(only_fix_error_code: Optional[int] = None) -> "Errors":
input = sys.stdin.read()
return Errors.from_json(input, only_fix_error_code, from_stdin=True)
def __init__(self, errors: List[Dict[str, Any]]) -> None:
self.errors: List[Dict[str, Any]] = errors
def __len__(self) -> int:
return len(self.errors)
def __eq__(self, other: "Errors") -> bool:
return self.errors == other.errors
@property
def paths_to_errors(self) -> Dict[str, List[PyreError]]:
return {
path: list(errors)
for path, errors in itertools.groupby(
sorted(self.errors, key=error_path), key=error_path
)
}
def suppress(
self,
comment: Optional[str] = None,
max_line_length: Optional[int] = None,
truncate: bool = False,
unsafe: bool = False,
) -> None:
unsuppressed_paths_and_exceptions = []
for path_to_suppress, errors in self.paths_to_errors.items():
LOG.info("Processing `%s`", path_to_suppress)
try:
path = Path(path_to_suppress)
input = path.read_text()
output = _suppress_errors(
input,
_build_error_map(errors),
comment,
max_line_length
if max_line_length and max_line_length > 0
else None,
truncate,
unsafe,
)
path.write_text(output)
except SkippingGeneratedFileException:
LOG.warning(f"Skipping generated file at {path_to_suppress}")
except SkippingUnparseableFileException:
LOG.warning(f"Skipping unparseable file at {path_to_suppress}")
except (ast.UnstableAST, SyntaxError) as exception:
unsuppressed_paths_and_exceptions.append((path_to_suppress, exception))
if unsuppressed_paths_and_exceptions:
exception_messages = "\n".join(
f"{path} - {str(exception)}"
for path, exception in unsuppressed_paths_and_exceptions
)
raise PartialErrorSuppression(
"Could not fully suppress errors due to the following exceptions: "
f"{exception_messages}\n Run with `--unsafe` to suppress anyway.",
[path for path, _ in unsuppressed_paths_and_exceptions],
)
def _filter_errors(
errors: List[Dict[str, Any]], only_fix_error_code: Optional[int] = None
) -> List[Dict[str, Any]]:
if only_fix_error_code is not None:
errors = [error for error in errors if error["code"] == only_fix_error_code]
return errors
def errors_from_targets(
project_directory: Path,
path: str,
targets: List[str],
check_alternate_names: bool = True,
) -> Errors:
buck_test_command = (
["buck", "test", "--show-full-json-output"] + targets + ["--", "--run-disabled"]
)
buck_test = subprocess.run(
buck_test_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
errors = Errors.empty()
if buck_test.returncode == 0:
# Successful run with no type errors
LOG.info("No errors in %s...", path)
elif buck_test.returncode == 32:
buck_test_output = buck_test.stdout.decode().split("\n")
pyre_error_pattern = re.compile(r"\W*(.*\.pyi?):(\d*):(\d*) (.* \[(\d*)\]: .*)")
errors = {}
for output_line in buck_test_output:
matched = pyre_error_pattern.match(output_line)
if matched:
path = matched.group(1)
line = int(matched.group(2))
column = int(matched.group(3))
description = matched.group(4)
code = matched.group(5)
error = {
"line": line,
"column": column,
"path": project_directory / path,
"code": code,
"description": description,
"concise_description": description,
}
errors[(line, column, path, code)] = error
errors = Errors(list(errors.values()))
elif check_alternate_names and buck_test.returncode == 5:
# Generated type check target was not named as expected.
LOG.warning("Could not find buck test targets: %s", targets)
LOG.info("Looking for similar targets...")
targets_to_retry = []
for target in targets:
query_command = ["buck", "query", target]
similar_targets = subprocess.run(
query_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = similar_targets.stdout.decode()
error_output = similar_targets.stderr.decode()
if output:
targets_to_retry.append(output.strip())
elif error_output:
typecheck_targets = [
target.strip()
for target in error_output.split("\n")
if target.strip().endswith("-pyre-typecheck")
]
targets_to_retry += typecheck_targets
if targets_to_retry:
LOG.info("Retrying similar targets: %s", targets_to_retry)
errors = errors_from_targets(
project_directory, path, targets_to_retry, check_alternate_names=False
)
else:
LOG.error("No similar targets to retry.")
else:
LOG.error(
"Failed to run buck test command:\n\t%s\n\n%s",
" ".join(buck_test_command),
buck_test.stderr.decode(),
)
return errors
def _remove_comment_preamble(lines: List[str]) -> None:
# Deprecated: leaving remove logic until live old-style comments are cleaned up.
while lines:
old_line = lines.pop()
new_line = re.sub(r"# pyre: .*$", "", old_line).rstrip()
if old_line == "" or new_line != "":
# The preamble has ended.
lines.append(new_line)
return
def _add_error_to_line_break_block(lines: List[str], errors: List[List[str]]) -> None:
# Gather unbroken lines.
line_break_block = [lines.pop() for _ in range(0, len(errors))]
line_break_block.reverse()
# Transform line break block to use parenthesis.
indent = len(line_break_block[0]) - len(line_break_block[0].lstrip())
line_break_block = [line[indent:] for line in line_break_block]
statement = "\n".join(line_break_block)
transformed_statement = libcst.Module([]).code_for_node(
cast(
libcst.CSTNode,
libcst.parse_statement(statement).visit(LineBreakTransformer()),
)
)
transformed_lines = transformed_statement.split("\n")
transformed_lines = [" " * indent + line for line in transformed_lines]
# Add to lines.
for line, comment in zip(transformed_lines, errors):
lines.extend(comment)
lines.append(line)
def _split_across_lines(
comment: str, indent: int, max_line_length: Optional[int]
) -> List[str]:
if not max_line_length or len(comment) <= max_line_length:
return [comment]
comment = comment.lstrip()
available_columns = max_line_length - indent - len("# ")
buffered_line = ""
result = []
prefix = " " * indent
for token in comment.split():
if buffered_line and (
len(buffered_line) + len(token) + len(" ") > available_columns
):
# This new token would make the line exceed the limit,
# hence terminate what we have accumulated.
result.append(("{}{}".format(prefix, buffered_line)).rstrip())
# The first line already has a comment token on it, so don't prefix #. For
# the rest, we need to add the comment symbol manually.
prefix = "{}# ".format(" " * indent)
buffered_line = ""
buffered_line = buffered_line + token + " "
result.append(("{}{}".format(prefix, buffered_line)).rstrip())
return result
class SkippingGeneratedFileException(Exception):
pass
class SkippingUnparseableFileException(Exception):
pass
def _get_unused_ignore_codes(errors: List[Dict[str, str]]) -> List[int]:
unused_ignore_codes: List[int] = []
ignore_errors = [error for error in errors if error["code"] == "0"]
for error in ignore_errors:
match = re.search(
r"The `pyre-ignore\[(.*?)\]` or `pyre-fixme\[.*?\]`", error["description"]
)
if match:
unused_ignore_codes.extend(
[int(code.strip()) for code in match.group(1).split(",")]
)
unused_ignore_codes.sort()
return unused_ignore_codes
def _remove_unused_ignores(line: str, errors: List[Dict[str, str]]) -> str:
unused_ignore_codes = _get_unused_ignore_codes(errors)
match = re.search(r"pyre-(ignore|fixme) *\[([0-9, ]*)\]", line)
stripped_line = re.sub(r"# pyre-(ignore|fixme).*$", "", line).rstrip()
if not match:
return stripped_line
# One or more codes are specified in the ignore comment.
# Remove only the codes that are erroring as unused.
ignore_codes_string = match.group(2)
ignore_codes = [int(code.strip()) for code in ignore_codes_string.split(",")]
remaining_ignore_codes = set(ignore_codes) - set(unused_ignore_codes)
if len(remaining_ignore_codes) == 0 or len(unused_ignore_codes) == 0:
return stripped_line
else:
return line.replace(
ignore_codes_string,
", ".join([str(code) for code in remaining_ignore_codes]),
)
def _suppress_errors(
input: str,
errors: Dict[int, List[Dict[str, str]]],
custom_comment: Optional[str] = None,
max_line_length: Optional[int] = None,
truncate: bool = False,
unsafe: bool = False,
) -> str:
if not unsafe and "@" "generated" in input:
raise SkippingGeneratedFileException()
lines: List[str] = input.split("\n")
# Do not suppress parse errors.
if any(
error["code"] == "404" | |
<filename>model_lr_and_gdbt_and_xgboost/feature_construct/feature_construct_part_1.py
import pandas as pd
import numpy as np
'''
this file for data_set_part_1
'''
##### file path
# input
path_df_D = "tianchi_fresh_comp_train_user.csv"
path_df_part_1 = "df_part_1.csv"
path_df_part_2 = "df_part_2.csv"
path_df_part_3 = "df_part_3.csv"
path_df_part_1_tar = "df_part_1_tar.csv"
path_df_part_2_tar = "df_part_2_tar.csv"
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# output
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
##========================================================##
##======================== Part 1 ========================##
##========================================================##
###########################################
'''Step 1.1 feature data set U of df_part_1
(1)
u_b1_count_in_6
u_b2_count_in_6
u_b3_count_in_6
u_b4_count_in_6
u_b_count_in_6
(2)
u_b1_count_in_3
u_b2_count_in_3
u_b3_count_in_3
u_b4_count_in_3
u_b_count_in_3
(2)
u_b1_count_in_1
u_b2_count_in_1
u_b3_count_in_1
u_b4_count_in_1
u_b_count_in_1
(3)
u_b4_rate (in_6)
u_b4_diff_hours (in_6)
'''
#loading data
path_df = path_df_part_1
try:
df_part_1 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_1.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
except Exception as e:
print(e)
# u_b_count_in_6
df_part_1['cumcount'] = df_part_1.groupby(['user_id', 'behavior_type']).cumcount()
df_part_1_u_b_count_in_6 = df_part_1.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_1_u_b_count_in_6 = pd.get_dummies(df_part_1_u_b_count_in_6['behavior_type']).join(
df_part_1_u_b_count_in_6[['user_id', 'cumcount']])
df_part_1_u_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_u_b_count_in_6['u_b1_count_in_6'] = df_part_1_u_b_count_in_6['behavior_type_1'] * (
df_part_1_u_b_count_in_6['cumcount'] + 1)
df_part_1_u_b_count_in_6['u_b2_count_in_6'] = df_part_1_u_b_count_in_6['behavior_type_2'] * (
df_part_1_u_b_count_in_6['cumcount'] + 1)
df_part_1_u_b_count_in_6['u_b3_count_in_6'] = df_part_1_u_b_count_in_6['behavior_type_3'] * (
df_part_1_u_b_count_in_6['cumcount'] + 1)
df_part_1_u_b_count_in_6['u_b4_count_in_6'] = df_part_1_u_b_count_in_6['behavior_type_4'] * (
df_part_1_u_b_count_in_6['cumcount'] + 1)
df_part_1_u_b_count_in_6 = df_part_1_u_b_count_in_6.groupby('user_id').agg({'u_b1_count_in_6': np.sum,
'u_b2_count_in_6': np.sum,
'u_b3_count_in_6': np.sum,
'u_b4_count_in_6': np.sum})
df_part_1_u_b_count_in_6.reset_index(inplace=True)
df_part_1_u_b_count_in_6['u_b_count_in_6'] = df_part_1_u_b_count_in_6[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_3
df_part_1_in_3 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-25')]
df_part_1_in_3['cumcount'] = df_part_1_in_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_1_u_b_count_in_3 = df_part_1.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_1_u_b_count_in_3 = pd.get_dummies(df_part_1_u_b_count_in_3['behavior_type']).join(
df_part_1_u_b_count_in_3[['user_id', 'cumcount']])
df_part_1_u_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_u_b_count_in_3['u_b1_count_in_3'] = df_part_1_u_b_count_in_3['behavior_type_1'] * (
df_part_1_u_b_count_in_3['cumcount'] + 1)
df_part_1_u_b_count_in_3['u_b2_count_in_3'] = df_part_1_u_b_count_in_3['behavior_type_2'] * (
df_part_1_u_b_count_in_3['cumcount'] + 1)
df_part_1_u_b_count_in_3['u_b3_count_in_3'] = df_part_1_u_b_count_in_3['behavior_type_3'] * (
df_part_1_u_b_count_in_3['cumcount'] + 1)
df_part_1_u_b_count_in_3['u_b4_count_in_3'] = df_part_1_u_b_count_in_3['behavior_type_4'] * (
df_part_1_u_b_count_in_3['cumcount'] + 1)
df_part_1_u_b_count_in_3 = df_part_1_u_b_count_in_3.groupby('user_id').agg({'u_b1_count_in_3': np.sum,
'u_b2_count_in_3': np.sum,
'u_b3_count_in_3': np.sum,
'u_b4_count_in_3': np.sum})
df_part_1_u_b_count_in_3.reset_index(inplace=True)
df_part_1_u_b_count_in_3['u_b_count_in_3'] = df_part_1_u_b_count_in_3[['u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_1
df_part_1_in_1 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-27')]
df_part_1_in_1['cumcount'] = df_part_1_in_1.groupby(['user_id', 'behavior_type']).cumcount()
df_part_1_u_b_count_in_1 = df_part_1_in_1.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_1_u_b_count_in_1 = pd.get_dummies(df_part_1_u_b_count_in_1['behavior_type']).join(
df_part_1_u_b_count_in_1[['user_id', 'cumcount']])
df_part_1_u_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_u_b_count_in_1['u_b1_count_in_1'] = df_part_1_u_b_count_in_1['behavior_type_1'] * (
df_part_1_u_b_count_in_1['cumcount'] + 1)
df_part_1_u_b_count_in_1['u_b2_count_in_1'] = df_part_1_u_b_count_in_1['behavior_type_2'] * (
df_part_1_u_b_count_in_1['cumcount'] + 1)
df_part_1_u_b_count_in_1['u_b3_count_in_1'] = df_part_1_u_b_count_in_1['behavior_type_3'] * (
df_part_1_u_b_count_in_1['cumcount'] + 1)
df_part_1_u_b_count_in_1['u_b4_count_in_1'] = df_part_1_u_b_count_in_1['behavior_type_4'] * (
df_part_1_u_b_count_in_1['cumcount'] + 1)
df_part_1_u_b_count_in_1 = df_part_1_u_b_count_in_1.groupby('user_id').agg({'u_b1_count_in_1': np.sum,
'u_b2_count_in_1': np.sum,
'u_b3_count_in_1': np.sum,
'u_b4_count_in_1': np.sum})
df_part_1_u_b_count_in_1.reset_index(inplace=True)
df_part_1_u_b_count_in_1['u_b_count_in_1'] = df_part_1_u_b_count_in_1[['u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1']].apply(lambda x: x.sum(),
axis=1)
# merge the result of count_in_6, count_in_3, count_in_1
df_part_1_u_b_count = pd.merge(df_part_1_u_b_count_in_6,
df_part_1_u_b_count_in_3, on=['user_id'], how='left').fillna(0)
df_part_1_u_b_count = pd.merge(df_part_1_u_b_count,
df_part_1_u_b_count_in_1, on=['user_id'], how='left').fillna(0)
df_part_1_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']] = df_part_1_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']].astype(int)
# u_b4_rate
df_part_1_u_b_count['u_b4_rate'] = df_part_1_u_b_count['u_b4_count_in_6'] / df_part_1_u_b_count['u_b_count_in_6']
# u_b4_diff_time
df_part_1 = df_part_1.sort_values(by=['user_id', 'time'])
df_part_1_u_b4_time = df_part_1[df_part_1['behavior_type'] == 4].drop_duplicates(['user_id'], 'first')[
['user_id', 'time']]
df_part_1_u_b4_time.columns = ['user_id', 'b4_first_time']
df_part_1_u_b_time = df_part_1.drop_duplicates(['user_id'], 'first')[['user_id', 'time']]
df_part_1_u_b_time.columns = ['user_id', 'b_first_time']
df_part_1_u_b_b4_time = pd.merge(df_part_1_u_b_time, df_part_1_u_b4_time, on=['user_id'])
df_part_1_u_b_b4_time['u_b4_diff_time'] = df_part_1_u_b_b4_time['b4_first_time'] - df_part_1_u_b_b4_time['b_first_time']
df_part_1_u_b_b4_time = df_part_1_u_b_b4_time[['user_id', 'u_b4_diff_time']]
df_part_1_u_b_b4_time['u_b4_diff_hours'] = df_part_1_u_b_b4_time['u_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
# generating feature set U
f_U_part_1 = pd.merge(df_part_1_u_b_count,
df_part_1_u_b_b4_time,
on=['user_id'], how='left')[['user_id',
'u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1',
'u_b4_rate',
'u_b4_diff_hours']]
# write to csv file
f_U_part_1 = f_U_part_1.round({'u_b4_rate': 3})
f_U_part_1.to_csv(path_df_part_1_U, index = False)
###########################################
'''Step 1.2 feature data set I of df_part_1
(1)
i_u_count_in_6
i_u_count_in_3
i_u_count_in_1
(2)
i_b1_count_in_6
i_b2_count_in_6
i_b3_count_in_6
i_b4_count_in_6
i_b_count_in_6
i_b1_count_in_3
i_b2_count_in_3
i_b3_count_in_3
i_b4_count_in_3
i_b_count_in_3
i_b1_count_in_1
i_b2_count_in_1
i_b3_count_in_1
i_b4_count_in_1
i_b_count_in_1
(3)
i_b4_rate (in_6)
i_b4_diff_hours (in_6)
'''
#loading data
path_df = path_df_part_1
try:
df_part_1 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
except Exception as e:
print(e)
# i_u_count_in_6
df_part_1_in_6 = df_part_1.drop_duplicates(['item_id', 'user_id'])
df_part_1_in_6['i_u_count_in_6'] = df_part_1_in_6.groupby('item_id').cumcount() + 1
df_part_1_i_u_count_in_6 = df_part_1_in_6.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_6']]
# i_u_count_in_3
df_part_1_in_3 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-25')].drop_duplicates(['item_id', 'user_id'])
df_part_1_in_3['i_u_count_in_3'] = df_part_1_in_3.groupby('item_id').cumcount() + 1
df_part_1_i_u_count_in_3 = df_part_1_in_3.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_3']]
# i_u_count_in_1
df_part_1_in_1 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-27')].drop_duplicates(['item_id', 'user_id'])
df_part_1_in_1['i_u_count_in_1'] = df_part_1_in_1.groupby('item_id').cumcount() + 1
df_part_1_i_u_count_in_1 = df_part_1_in_1.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_1']]
# merge for generation of i_u_count
df_part_1_i_u_count = pd.merge(df_part_1_i_u_count_in_6,
df_part_1_i_u_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_1_i_u_count = pd.merge(df_part_1_i_u_count,
df_part_1_i_u_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_1_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']] = df_part_1_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']].astype(int)
# i_b_count_in_6
df_part_1['cumcount'] = df_part_1.groupby(['item_id', 'behavior_type']).cumcount()
df_part_1_i_b_count_in_6 = df_part_1.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_1_i_b_count_in_6 = pd.get_dummies(df_part_1_i_b_count_in_6['behavior_type']).join(
df_part_1_i_b_count_in_6[['item_id', 'cumcount']])
df_part_1_i_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_i_b_count_in_6['i_b1_count_in_6'] = df_part_1_i_b_count_in_6['behavior_type_1'] * (
df_part_1_i_b_count_in_6['cumcount'] + 1)
df_part_1_i_b_count_in_6['i_b2_count_in_6'] = df_part_1_i_b_count_in_6['behavior_type_2'] * (
df_part_1_i_b_count_in_6['cumcount'] + 1)
df_part_1_i_b_count_in_6['i_b3_count_in_6'] = df_part_1_i_b_count_in_6['behavior_type_3'] * (
df_part_1_i_b_count_in_6['cumcount'] + 1)
df_part_1_i_b_count_in_6['i_b4_count_in_6'] = df_part_1_i_b_count_in_6['behavior_type_4'] * (
df_part_1_i_b_count_in_6['cumcount'] + 1)
df_part_1_i_b_count_in_6 = df_part_1_i_b_count_in_6[['item_id',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6']]
df_part_1_i_b_count_in_6 = df_part_1_i_b_count_in_6.groupby('item_id').agg({'i_b1_count_in_6': np.sum,
'i_b2_count_in_6': np.sum,
'i_b3_count_in_6': np.sum,
'i_b4_count_in_6': np.sum})
df_part_1_i_b_count_in_6.reset_index(inplace=True)
df_part_1_i_b_count_in_6['i_b_count_in_6'] = df_part_1_i_b_count_in_6['i_b1_count_in_6'] + \
df_part_1_i_b_count_in_6['i_b2_count_in_6'] + \
df_part_1_i_b_count_in_6['i_b3_count_in_6'] + \
df_part_1_i_b_count_in_6['i_b4_count_in_6']
# i_b_count_in_3
df_part_1_in_3 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-25')]
df_part_1_in_3['cumcount'] = df_part_1_in_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_1_i_b_count_in_3 = df_part_1.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_1_i_b_count_in_3 = pd.get_dummies(df_part_1_i_b_count_in_3['behavior_type']).join(
df_part_1_i_b_count_in_3[['item_id', 'cumcount']])
df_part_1_i_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_i_b_count_in_3['i_b1_count_in_3'] = df_part_1_i_b_count_in_3['behavior_type_1'] * (
df_part_1_i_b_count_in_3['cumcount'] + 1)
df_part_1_i_b_count_in_3['i_b2_count_in_3'] = df_part_1_i_b_count_in_3['behavior_type_2'] * (
df_part_1_i_b_count_in_3['cumcount'] + 1)
df_part_1_i_b_count_in_3['i_b3_count_in_3'] = df_part_1_i_b_count_in_3['behavior_type_3'] * (
df_part_1_i_b_count_in_3['cumcount'] + 1)
df_part_1_i_b_count_in_3['i_b4_count_in_3'] = df_part_1_i_b_count_in_3['behavior_type_4'] * (
df_part_1_i_b_count_in_3['cumcount'] + 1)
df_part_1_i_b_count_in_3 = df_part_1_i_b_count_in_3[['item_id',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3']]
df_part_1_i_b_count_in_3 = df_part_1_i_b_count_in_3.groupby('item_id').agg({'i_b1_count_in_3': np.sum,
'i_b2_count_in_3': np.sum,
'i_b3_count_in_3': np.sum,
'i_b4_count_in_3': np.sum})
df_part_1_i_b_count_in_3.reset_index(inplace=True)
df_part_1_i_b_count_in_3['i_b_count_in_3'] = df_part_1_i_b_count_in_3['i_b1_count_in_3'] + \
df_part_1_i_b_count_in_3['i_b2_count_in_3'] + \
df_part_1_i_b_count_in_3['i_b3_count_in_3'] + \
df_part_1_i_b_count_in_3['i_b4_count_in_3']
# i_b_count_in_1
df_part_1_in_1 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-27')]
df_part_1_in_1['cumcount'] = df_part_1_in_1.groupby(['item_id', 'behavior_type']).cumcount()
df_part_1_i_b_count_in_1 = df_part_1_in_1.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_1_i_b_count_in_1 = pd.get_dummies(df_part_1_i_b_count_in_1['behavior_type']).join(
df_part_1_i_b_count_in_1[['item_id', 'cumcount']])
df_part_1_i_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_i_b_count_in_1['i_b1_count_in_1'] = df_part_1_i_b_count_in_1['behavior_type_1'] * (
df_part_1_i_b_count_in_1['cumcount'] + 1)
df_part_1_i_b_count_in_1['i_b2_count_in_1'] = df_part_1_i_b_count_in_1['behavior_type_2'] * (
df_part_1_i_b_count_in_1['cumcount'] + 1)
df_part_1_i_b_count_in_1['i_b3_count_in_1'] = df_part_1_i_b_count_in_1['behavior_type_3'] * (
df_part_1_i_b_count_in_1['cumcount'] + 1)
df_part_1_i_b_count_in_1['i_b4_count_in_1'] = df_part_1_i_b_count_in_1['behavior_type_4'] * (
df_part_1_i_b_count_in_1['cumcount'] + 1)
df_part_1_i_b_count_in_1 = df_part_1_i_b_count_in_1[['item_id',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1']]
df_part_1_i_b_count_in_1 = df_part_1_i_b_count_in_1.groupby('item_id').agg({'i_b1_count_in_1': np.sum,
'i_b2_count_in_1': np.sum,
'i_b3_count_in_1': np.sum,
'i_b4_count_in_1': np.sum})
df_part_1_i_b_count_in_1.reset_index(inplace=True)
df_part_1_i_b_count_in_1['i_b_count_in_1'] = df_part_1_i_b_count_in_1['i_b1_count_in_1'] + \
df_part_1_i_b_count_in_1['i_b2_count_in_1'] + \
df_part_1_i_b_count_in_1['i_b3_count_in_1'] + \
df_part_1_i_b_count_in_1['i_b4_count_in_1']
# merge for generation of i_b_count
df_part_1_i_b_count = pd.merge(df_part_1_i_b_count_in_6,
df_part_1_i_b_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_1_i_b_count = pd.merge(df_part_1_i_b_count,
df_part_1_i_b_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_1_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']] = df_part_1_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']].astype(int)
# i_b4_rate
df_part_1_i_b_count['i_b4_rate'] = df_part_1_i_b_count['i_b4_count_in_6'] / df_part_1_i_b_count['i_b_count_in_6']
# i_b4_diff_time
df_part_1 = df_part_1.sort_values(by=['item_id', 'time'])
df_part_1_i_b4_time = df_part_1[df_part_1['behavior_type'] == 4].drop_duplicates(['item_id'], 'first')[
['item_id', 'time']]
df_part_1_i_b4_time.columns = ['item_id', 'b4_first_time']
df_part_1_i_b_time = df_part_1.drop_duplicates(['item_id'], 'first')[['item_id', 'time']]
df_part_1_i_b_time.columns = ['item_id', 'b_first_time']
df_part_1_i_b_b4_time = pd.merge(df_part_1_i_b_time, df_part_1_i_b4_time, on=['item_id'])
df_part_1_i_b_b4_time['i_b4_diff_time'] = df_part_1_i_b_b4_time['b4_first_time'] - df_part_1_i_b_b4_time['b_first_time']
df_part_1_i_b_b4_time['i_b4_diff_hours'] = df_part_1_i_b_b4_time['i_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_1_i_b_b4_time = df_part_1_i_b_b4_time[['item_id', 'i_b4_diff_hours']]
# generating feature set I
f_I_part_1 = pd.merge(df_part_1_i_b_count,
df_part_1_i_b_b4_time,
on=['item_id'], how='left')
f_I_part_1 = pd.merge(f_I_part_1,
df_part_1_i_u_count,
on=['item_id'], how='left')[['item_id',
'i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1',
'i_b4_rate',
'i_b4_diff_hours']]
# write to csv file
f_I_part_1 = f_I_part_1.round({'i_b4_rate': 3})
f_I_part_1.to_csv(path_df_part_1_I, index=False)
###########################################
'''Step 1.3 feature data set C of df_part_1
(1)
c_u_count_in_6
c_u_count_in_3
c_u_count_in_1
(2)
c_b1_count_in_6
c_b2_count_in_6
c_b3_count_in_6
c_b4_count_in_6
c_b_count_in_6
c_b1_count_in_3
c_b2_count_in_3
c_b3_count_in_3
c_b4_count_in_3
c_b_count_in_3
c_b1_count_in_1
c_b2_count_in_1
c_b3_count_in_1
c_b4_count_in_1
c_b_count_in_1
(3)
c_b4_rate (in_6)
c_b4_diff_hours (in_6)
'''
#loading data
path_df = path_df_part_1
try:
df_part_1 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_1.columns = ['time','user_id','item_id','behavior_type','item_category']
except Exception as e:
print(e)
# c_u_count_in_6
df_part_1_in_6 = df_part_1.drop_duplicates(['item_category', 'user_id'])
df_part_1_in_6['c_u_count_in_6'] = df_part_1_in_6.groupby('item_category').cumcount() + 1
df_part_1_c_u_count_in_6 = df_part_1_in_6.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_6']]
# c_u_count_in_3
df_part_1_in_3 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-25')].drop_duplicates(
['item_category', 'user_id'])
df_part_1_in_3['c_u_count_in_3'] = df_part_1_in_3.groupby('item_category').cumcount() + 1
df_part_1_c_u_count_in_3 = df_part_1_in_3.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_3']]
# c_u_count_in_1
df_part_1_in_1 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-27')].drop_duplicates(
['item_category', 'user_id'])
df_part_1_in_1['c_u_count_in_1'] = df_part_1_in_1.groupby('item_category').cumcount() + 1
df_part_1_c_u_count_in_1 = df_part_1_in_1.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_1']]
df_part_1_c_u_count = pd.merge(df_part_1_c_u_count_in_6, df_part_1_c_u_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_1_c_u_count = pd.merge(df_part_1_c_u_count, df_part_1_c_u_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_1_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']] = df_part_1_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']].astype(int)
# c_b_count_in_6
df_part_1['cumcount'] = df_part_1.groupby(['item_category', 'behavior_type']).cumcount()
df_part_1_c_b_count_in_6 = df_part_1.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_1_c_b_count_in_6 = pd.get_dummies(df_part_1_c_b_count_in_6['behavior_type']).join(
df_part_1_c_b_count_in_6[['item_category', 'cumcount']])
df_part_1_c_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_c_b_count_in_6['c_b1_count_in_6'] = df_part_1_c_b_count_in_6['behavior_type_1'] * (
df_part_1_c_b_count_in_6['cumcount'] + 1)
df_part_1_c_b_count_in_6['c_b2_count_in_6'] = df_part_1_c_b_count_in_6['behavior_type_2'] * (
df_part_1_c_b_count_in_6['cumcount'] + 1)
df_part_1_c_b_count_in_6['c_b3_count_in_6'] = df_part_1_c_b_count_in_6['behavior_type_3'] * (
df_part_1_c_b_count_in_6['cumcount'] + 1)
df_part_1_c_b_count_in_6['c_b4_count_in_6'] = df_part_1_c_b_count_in_6['behavior_type_4'] * (
df_part_1_c_b_count_in_6['cumcount'] + 1)
df_part_1_c_b_count_in_6 = df_part_1_c_b_count_in_6[['item_category',
'c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6']]
df_part_1_c_b_count_in_6 = df_part_1_c_b_count_in_6.groupby('item_category').agg({'c_b1_count_in_6': np.sum,
'c_b2_count_in_6': np.sum,
'c_b3_count_in_6': np.sum,
'c_b4_count_in_6': np.sum})
df_part_1_c_b_count_in_6.reset_index(inplace=True)
df_part_1_c_b_count_in_6['c_b_count_in_6'] = df_part_1_c_b_count_in_6['c_b1_count_in_6'] + \
df_part_1_c_b_count_in_6['c_b2_count_in_6'] + \
df_part_1_c_b_count_in_6['c_b3_count_in_6'] + \
df_part_1_c_b_count_in_6['c_b4_count_in_6']
# c_b_count_in_3
df_part_1_in_3 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-25')]
df_part_1_in_3['cumcount'] = df_part_1_in_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_1_c_b_count_in_3 = df_part_1_in_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_1_c_b_count_in_3 = pd.get_dummies(df_part_1_c_b_count_in_3['behavior_type']).join(
df_part_1_c_b_count_in_3[['item_category', 'cumcount']])
df_part_1_c_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_c_b_count_in_3['c_b1_count_in_3'] = df_part_1_c_b_count_in_3['behavior_type_1'] * (
df_part_1_c_b_count_in_3['cumcount'] + 1)
df_part_1_c_b_count_in_3['c_b2_count_in_3'] = df_part_1_c_b_count_in_3['behavior_type_2'] * (
df_part_1_c_b_count_in_3['cumcount'] + 1)
df_part_1_c_b_count_in_3['c_b3_count_in_3'] = df_part_1_c_b_count_in_3['behavior_type_3'] * (
df_part_1_c_b_count_in_3['cumcount'] + 1)
df_part_1_c_b_count_in_3['c_b4_count_in_3'] = df_part_1_c_b_count_in_3['behavior_type_4'] * (
df_part_1_c_b_count_in_3['cumcount'] + 1)
df_part_1_c_b_count_in_3 = df_part_1_c_b_count_in_3[['item_category',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3']]
df_part_1_c_b_count_in_3 = df_part_1_c_b_count_in_3.groupby('item_category').agg({'c_b1_count_in_3': np.sum,
'c_b2_count_in_3': np.sum,
'c_b3_count_in_3': np.sum,
'c_b4_count_in_3': np.sum})
df_part_1_c_b_count_in_3.reset_index(inplace=True)
df_part_1_c_b_count_in_3['c_b_count_in_3'] = df_part_1_c_b_count_in_3['c_b1_count_in_3'] + \
df_part_1_c_b_count_in_3['c_b2_count_in_3'] + \
df_part_1_c_b_count_in_3['c_b3_count_in_3'] + \
df_part_1_c_b_count_in_3['c_b4_count_in_3']
# c_b_count_in_1
df_part_1_in_1 = df_part_1[df_part_1['time'] >= np.datetime64('2014-11-27')]
df_part_1_in_1['cumcount'] = df_part_1_in_1.groupby(['item_category', 'behavior_type']).cumcount()
df_part_1_c_b_count_in_1 = df_part_1_in_1.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_1_c_b_count_in_1 = pd.get_dummies(df_part_1_c_b_count_in_1['behavior_type']).join(
df_part_1_c_b_count_in_1[['item_category', 'cumcount']])
df_part_1_c_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_1_c_b_count_in_1['c_b1_count_in_1'] = df_part_1_c_b_count_in_1['behavior_type_1'] * (
df_part_1_c_b_count_in_1['cumcount'] + 1)
df_part_1_c_b_count_in_1['c_b2_count_in_1'] = df_part_1_c_b_count_in_1['behavior_type_2'] * (
df_part_1_c_b_count_in_1['cumcount'] + 1)
df_part_1_c_b_count_in_1['c_b3_count_in_1'] = df_part_1_c_b_count_in_1['behavior_type_3'] * (
df_part_1_c_b_count_in_1['cumcount'] + 1)
df_part_1_c_b_count_in_1['c_b4_count_in_1'] = df_part_1_c_b_count_in_1['behavior_type_4'] * (
df_part_1_c_b_count_in_1['cumcount'] + 1)
df_part_1_c_b_count_in_1 = df_part_1_c_b_count_in_1[['item_category',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1']]
df_part_1_c_b_count_in_1 = df_part_1_c_b_count_in_1.groupby('item_category').agg({'c_b1_count_in_1': np.sum,
'c_b2_count_in_1': np.sum,
'c_b3_count_in_1': np.sum,
'c_b4_count_in_1': np.sum})
df_part_1_c_b_count_in_1.reset_index(inplace=True)
df_part_1_c_b_count_in_1['c_b_count_in_1'] | |
annotate the item.
"""
return pulumi.get(self, "comment")
@pulumi.output_type
class LoadBalancerMonitorHeader(dict):
def __init__(__self__, *,
header: str,
values: Sequence[str]):
"""
:param str header: The header name.
:param Sequence[str] values: A list of string values for the header.
"""
pulumi.set(__self__, "header", header)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def header(self) -> str:
"""
The header name.
"""
return pulumi.get(self, "header")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
A list of string values for the header.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class LoadBalancerPoolLoadShedding(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultPercent":
suggest = "default_percent"
elif key == "defaultPolicy":
suggest = "default_policy"
elif key == "sessionPercent":
suggest = "session_percent"
elif key == "sessionPolicy":
suggest = "session_policy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerPoolLoadShedding. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerPoolLoadShedding.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerPoolLoadShedding.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_percent: Optional[float] = None,
default_policy: Optional[str] = None,
session_percent: Optional[float] = None,
session_policy: Optional[str] = None):
"""
:param float default_percent: Percent of traffic to shed 0 - 100.
:param str default_policy: Method of shedding traffic "", "hash" or "random".
:param float session_percent: Percent of session traffic to shed 0 - 100.
:param str session_policy: Method of shedding session traffic "" or "hash".
"""
if default_percent is not None:
pulumi.set(__self__, "default_percent", default_percent)
if default_policy is not None:
pulumi.set(__self__, "default_policy", default_policy)
if session_percent is not None:
pulumi.set(__self__, "session_percent", session_percent)
if session_policy is not None:
pulumi.set(__self__, "session_policy", session_policy)
@property
@pulumi.getter(name="defaultPercent")
def default_percent(self) -> Optional[float]:
"""
Percent of traffic to shed 0 - 100.
"""
return pulumi.get(self, "default_percent")
@property
@pulumi.getter(name="defaultPolicy")
def default_policy(self) -> Optional[str]:
"""
Method of shedding traffic "", "hash" or "random".
"""
return pulumi.get(self, "default_policy")
@property
@pulumi.getter(name="sessionPercent")
def session_percent(self) -> Optional[float]:
"""
Percent of session traffic to shed 0 - 100.
"""
return pulumi.get(self, "session_percent")
@property
@pulumi.getter(name="sessionPolicy")
def session_policy(self) -> Optional[str]:
"""
Method of shedding session traffic "" or "hash".
"""
return pulumi.get(self, "session_policy")
@pulumi.output_type
class LoadBalancerPoolOrigin(dict):
def __init__(__self__, *,
address: str,
name: str,
enabled: Optional[bool] = None,
headers: Optional[Sequence['outputs.LoadBalancerPoolOriginHeader']] = None,
weight: Optional[float] = None):
"""
:param str address: The IP address (IPv4 or IPv6) of the origin, or the publicly addressable hostname. Hostnames entered here should resolve directly to the origin, and not be a hostname proxied by Cloudflare.
:param str name: A human-identifiable name for the origin.
:param bool enabled: Whether to enable (the default) this origin within the Pool. Disabled origins will not receive traffic and are excluded from health checks. The origin will only be disabled for the current pool.
:param Sequence['LoadBalancerPoolOriginHeaderArgs'] headers: The header name.
:param float weight: The weight (0.01 - 1.00) of this origin, relative to other origins in the pool. Equal values mean equal weighting. A weight of 0 means traffic will not be sent to this origin, but health is still checked. Default: 1.
"""
pulumi.set(__self__, "address", address)
pulumi.set(__self__, "name", name)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def address(self) -> str:
"""
The IP address (IPv4 or IPv6) of the origin, or the publicly addressable hostname. Hostnames entered here should resolve directly to the origin, and not be a hostname proxied by Cloudflare.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def name(self) -> str:
"""
A human-identifiable name for the origin.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Whether to enable (the default) this origin within the Pool. Disabled origins will not receive traffic and are excluded from health checks. The origin will only be disabled for the current pool.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def headers(self) -> Optional[Sequence['outputs.LoadBalancerPoolOriginHeader']]:
"""
The header name.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter
def weight(self) -> Optional[float]:
"""
The weight (0.01 - 1.00) of this origin, relative to other origins in the pool. Equal values mean equal weighting. A weight of 0 means traffic will not be sent to this origin, but health is still checked. Default: 1.
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class LoadBalancerPoolOriginHeader(dict):
def __init__(__self__, *,
header: str,
values: Sequence[str]):
"""
:param str header: The header name.
:param Sequence[str] values: A list of string values for the header.
"""
pulumi.set(__self__, "header", header)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def header(self) -> str:
"""
The header name.
"""
return pulumi.get(self, "header")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
A list of string values for the header.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class LoadBalancerPopPool(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "poolIds":
suggest = "pool_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerPopPool. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerPopPool.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerPopPool.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
pool_ids: Sequence[str],
pop: str):
"""
:param Sequence[str] pool_ids: A list of pool IDs in failover priority to use for traffic reaching the given PoP.
:param str pop: A 3-letter code for the Point-of-Presence. Allowed values can be found in the list of datacenters on the [status page](https://www.cloudflarestatus.com/). Multiple entries should not be specified with the same PoP.
"""
pulumi.set(__self__, "pool_ids", pool_ids)
pulumi.set(__self__, "pop", pop)
@property
@pulumi.getter(name="poolIds")
def pool_ids(self) -> Sequence[str]:
"""
A list of pool IDs in failover priority to use for traffic reaching the given PoP.
"""
return pulumi.get(self, "pool_ids")
@property
@pulumi.getter
def pop(self) -> str:
"""
A 3-letter code for the Point-of-Presence. Allowed values can be found in the list of datacenters on the [status page](https://www.cloudflarestatus.com/). Multiple entries should not be specified with the same PoP.
"""
return pulumi.get(self, "pop")
@pulumi.output_type
class LoadBalancerRegionPool(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "poolIds":
suggest = "pool_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerRegionPool. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerRegionPool.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerRegionPool.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
pool_ids: Sequence[str],
region: str):
"""
:param Sequence[str] pool_ids: A list of pool IDs in failover priority to use for traffic reaching the given PoP.
:param str region: A region code which must be in the list defined [here](https://support.cloudflare.com/hc/en-us/articles/115000540888-Load-Balancing-Geographic-Regions). Multiple entries should not be specified with the same region.
"""
pulumi.set(__self__, "pool_ids", pool_ids)
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="poolIds")
def pool_ids(self) -> Sequence[str]:
"""
A list of pool IDs in failover priority to use for traffic reaching the given PoP.
"""
return pulumi.get(self, "pool_ids")
@property
@pulumi.getter
def region(self) -> str:
"""
A region code which must be in the list defined [here](https://support.cloudflare.com/hc/en-us/articles/115000540888-Load-Balancing-Geographic-Regions). Multiple entries should not be specified with the same region.
"""
return pulumi.get(self, "region")
@pulumi.output_type
class LoadBalancerRule(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fixedResponse":
suggest = "fixed_response"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LoadBalancerRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LoadBalancerRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LoadBalancerRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
condition: Optional[str] = None,
disabled: Optional[bool] = None,
fixed_response: Optional['outputs.LoadBalancerRuleFixedResponse'] = None,
overrides: Optional[Sequence['outputs.LoadBalancerRuleOverride']] = None,
priority: Optional[int] = None,
terminates: Optional[bool] = None):
"""
:param str name: Human readable name for this rule.
:param str condition: The statement to evaluate to determine if this rules effects should be applied. An empty condition is always true. See [load balancing rules](https://developers.cloudflare.com/load-balancing/understand-basics/load-balancing-rules).
:param bool disabled: A disabled rule will be be executed.
:param 'LoadBalancerRuleFixedResponseArgs' fixed_response: Settings for a HTTP response to return directly to the eyeball if the condition is true. Note: overrides or fixed_response must be set. See the field documentation below.
:param Sequence['LoadBalancerRuleOverrideArgs'] overrides: The Load Balancer settings to alter if this rules condition is true. Note: overrides or fixed_response must be set. See the field | |
"""
Distributed Cloud Emulator (dcemulator)
(c) 2015 by <NAME> <<EMAIL>>
"""
import logging
import site
import time
from subprocess import Popen
import os
import re
import urllib2
from functools import partial
from mininet.net import Containernet
from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
from mininet.cli import CLI
from mininet.link import TCLink
import networkx as nx
from emuvim.dcemulator.monitoring import DCNetworkMonitor
from emuvim.dcemulator.node import Datacenter, EmulatorCompute
from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
class DCNetwork(Containernet):
"""
Wraps the original Mininet/Containernet class and provides
methods to add data centers, switches, etc.
This class is used by topology definition scripts.
"""
def __init__(self, controller=RemoteController, monitor=False,
enable_learning = True, # in case of RemoteController (Ryu), learning switch behavior can be turned off/on
dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
"""
Create an extended version of a Containernet network
:param dc_emulation_max_cpu: max. CPU time used by containers in data centers
:param kwargs: path through for Mininet parameters
:return:
"""
self.dcs = {}
# call original Docker.__init__ and setup default controller
Containernet.__init__(
self, switch=OVSKernelSwitch, controller=controller, **kwargs)
# Ryu management
self.ryu_process = None
if controller == RemoteController:
# start Ryu controller
self.startRyu(learning_switch=enable_learning)
# add the specified controller
self.addController('c0', controller=controller)
# graph of the complete DC network
self.DCNetwork_graph = nx.MultiDiGraph()
# initialize pool of vlan tags to setup the SDN paths
self.vlans = range(4096)[::-1]
# link to Ryu REST_API
ryu_ip = '0.0.0.0'
ryu_port = '8080'
self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)
# monitoring agent
if monitor:
self.monitor_agent = DCNetworkMonitor(self)
else:
self.monitor_agent = None
# initialize resource model registrar
self.rm_registrar = ResourceModelRegistrar(
dc_emulation_max_cpu, dc_emulation_max_mem)
def addDatacenter(self, label, metadata={}, resource_log_path=None):
"""
Create and add a logical cloud data center to the network.
"""
if label in self.dcs:
raise Exception("Data center label already exists: %s" % label)
dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
dc.net = self # set reference to network
self.dcs[label] = dc
dc.create() # finally create the data center in our Mininet instance
logging.info("added data center: %s" % label)
return dc
def addLink(self, node1, node2, **params):
"""
Able to handle Datacenter objects as link
end points.
"""
assert node1 is not None
assert node2 is not None
logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
# ensure type of node1
if isinstance( node1, basestring ):
if node1 in self.dcs:
node1 = self.dcs[node1].switch
if isinstance( node1, Datacenter ):
node1 = node1.switch
# ensure type of node2
if isinstance( node2, basestring ):
if node2 in self.dcs:
node2 = self.dcs[node2].switch
if isinstance( node2, Datacenter ):
node2 = node2.switch
# try to give containers a default IP
if isinstance( node1, Docker ):
if "params1" not in params:
params["params1"] = {}
if "ip" not in params["params1"]:
params["params1"]["ip"] = self.getNextIp()
if isinstance( node2, Docker ):
if "params2" not in params:
params["params2"] = {}
if "ip" not in params["params2"]:
params["params2"]["ip"] = self.getNextIp()
# ensure that we allow TCLinks between data centers
# TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
# see Containernet issue: https://github.com/mpeuster/containernet/issues/3
if "cls" not in params:
params["cls"] = TCLink
link = Containernet.addLink(self, node1, node2, **params)
# try to give container interfaces a default id
node1_port_id = node1.ports[link.intf1]
if isinstance(node1, Docker):
if "id" in params["params1"]:
node1_port_id = params["params1"]["id"]
node1_port_name = link.intf1.name
node2_port_id = node2.ports[link.intf2]
if isinstance(node2, Docker):
if "id" in params["params2"]:
node2_port_id = params["params2"]["id"]
node2_port_name = link.intf2.name
# add edge and assigned port number to graph in both directions between node1 and node2
# port_id: id given in descriptor (if available, otherwise same as port)
# port: portnumber assigned by Containernet
attr_dict = {}
# possible weight metrics allowed by TClink class:
weight_metrics = ['bw', 'delay', 'jitter', 'loss']
edge_attributes = [p for p in params if p in weight_metrics]
for attr in edge_attributes:
# if delay: strip ms (need number as weight in graph)
match = re.search('([0-9]*\.?[0-9]+)', params[attr])
if match:
attr_number = match.group(1)
else:
attr_number = None
attr_dict[attr] = attr_number
attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
'src_port_name': node1_port_name,
'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
'dst_port_name': node2_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
'src_port_name': node2_port_name,
'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
'dst_port_name': node1_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
return link
def addDocker( self, label, **params ):
"""
Wrapper for addDocker method to use custom container class.
"""
self.DCNetwork_graph.add_node(label)
return Containernet.addDocker(self, label, cls=EmulatorCompute, **params)
def removeDocker( self, label, **params ):
"""
Wrapper for removeDocker method to update graph.
"""
self.DCNetwork_graph.remove_node(label)
return Containernet.removeDocker(self, label, **params)
def addSwitch( self, name, add_to_graph=True, **params ):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
if add_to_graph:
self.DCNetwork_graph.add_node(name)
return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
def getAllContainers(self):
"""
Returns a list with all containers within all data centers.
"""
all_containers = []
for dc in self.dcs.itervalues():
all_containers += dc.listCompute()
return all_containers
def start(self):
# start
for dc in self.dcs.itervalues():
dc.start()
Containernet.start(self)
def stop(self):
# stop the monitor agent
if self.monitor_agent is not None:
self.monitor_agent.stop()
# stop emulator net
Containernet.stop(self)
# stop Ryu controller
self.stopRyu()
def CLI(self):
CLI(self)
# to remove chain do setChain( src, dst, cmd='del-flows')
def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
cmd = kwargs.get('cmd')
if cmd == 'add-flow':
ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
elif cmd == 'del-flows': # TODO: del-flow to be implemented
ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
ret = ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
else:
ret = "Command unknown"
return ret
def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
# TODO: this needs to be cleaned up
#check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
vnf_src_interface = link_dict[0]['src_port_id']
for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if link_dict[link]['src_port_id'] == vnf_src_interface:
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
break
if vnf_dst_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
vnf_dst_interface = link_dict[0]['dst_port_id']
vnf_dst_name = vnf_dst_name.split(':')[0]
for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
for link in link_dict:
if link_dict[link]['dst_port_id'] == vnf_dst_interface:
# found the right link and connected switch
dst_sw = connected_sw
dst_sw_outport_nr = link_dict[link]['src_port_nr']
break
# get shortest path
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
except:
logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name))
return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
# choose free vlan if path contains more than 1 switch
cmd = kwargs.get('cmd')
vlan = None
if cmd == 'add-flow':
if len(path) > 1:
vlan = self.vlans.pop()
for i in range(0,len(path)):
current_node = self.getNodeByName(current_hop)
if path.index(current_hop) < len(path)-1:
next_hop = path[path.index(current_hop)+1]
else:
#last switch reached
next_hop = vnf_dst_name
next_node = self.getNodeByName(next_hop)
if next_hop == vnf_dst_name:
switch_outport_nr = dst_sw_outport_nr
logging.info("end node reached: {0}".format(vnf_dst_name))
elif not isinstance( next_node, OVSSwitch ):
logging.info("Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
# take first link between switches by default
index_edge_out = 0
switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
# set of entry via ovs-ofctl
if isinstance( current_node, OVSSwitch ):
kwargs['vlan'] = vlan
kwargs['path'] = path
kwargs['current_hop'] = current_hop
if self.controller == RemoteController:
## set flow entry via ryu rest api
self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
else:
## set flow entry via ovs-ofctl
self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
# take first link between switches by default
if isinstance( next_node, OVSSwitch ):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
return "path {2} between {0} and {1}".format(vnf_src_name, vnf_dst_name, cmd)
def _set_flow_entry_ryu_rest(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
match = 'in_port=%s' % switch_inport_nr
cookie = kwargs.get('cookie')
match_input = kwargs.get('match')
cmd = kwargs.get('cmd')
path = kwargs.get('path')
current_hop = kwargs.get('current_hop')
vlan = kwargs.get('vlan')
s = ','
if match_input:
match = s.join([match, match_input])
flow = {}
flow['dpid'] = int(node.dpid, 16)
if cookie:
flow['cookie'] = int(cookie)
flow['actions'] = []
# possible Ryu | |
import os
import time
import PIL
import numpy as np
import scipy.sparse
import cv2
from sklearn.cluster import DBSCAN
from collections import Counter
#from utils.cython_bbox import bbox_overlaps
#from utils.boxes_grid import get_boxes_grid
#import subprocess
#import cPickle
#from fast_rcnn.config import cfg
#import math
#from rpn_msr.generate_anchors import generate_anchors_bv
from utils.transform import lidar_point_to_img, lidar_cnr_to_img, camera_to_lidar_cnr, lidar_to_corners_single,\
computeCorners3D, lidar_3d_to_bv, lidar_cnr_to_3d,lidar_cnr_to_camera,corners_to_boxes
import mayavi.mlab as mlab
def load_kitti_calib(index):
"""
load projection matrix
"""
data_path = '../data/KITTI/object/'
prefix = 'training/calib'
calib_dir = os.path.join(data_path, prefix, index + '.txt')
with open(calib_dir) as fi:
lines = fi.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
def load_kitti_annotation(index):
"""
Load image and bounding boxes info from txt file in the KITTI
format.
"""
classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
num_classes = len(classes)
class_to_ind = dict(zip(classes, xrange(num_classes)))
# filename = '$Faster-RCNN_TF/data/KITTI/object/training/label_2/000000.txt'
data_path = '../data/KITTI/object/'
filename = os.path.join(data_path, 'training/label_2', index + '.txt')
# print("Loading: ", filename)
# calib
calib = load_kitti_calib(index)
Tr = calib['Tr_velo2cam']
# print 'Loading: {}'.format(filename)
with open(filename, 'r') as f:
lines = f.readlines()
num_objs = len(lines)
translation = np.zeros((num_objs, 3), dtype=np.float32)
rys = np.zeros((num_objs), dtype=np.float32)
lwh = np.zeros((num_objs, 3), dtype=np.float32)
boxes = np.zeros((num_objs, 4), dtype=np.float32)
boxes_bv = np.zeros((num_objs, 4), dtype=np.float32)
boxes3D = np.zeros((num_objs, 6), dtype=np.float32)
boxes3D_lidar = np.zeros((num_objs, 6), dtype=np.float32)
boxes3D_cam_cnr = np.zeros((num_objs, 24), dtype=np.float32)
boxes3D_corners = np.zeros((num_objs, 24), dtype=np.float32)
alphas = np.zeros((num_objs), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, num_classes), dtype=np.float32)
# print(boxes3D.shape)
# Load object bounding boxes into a data frame.
ix = -1
for line in lines:
obj = line.strip().split(' ')
try:
cls = class_to_ind[obj[0].strip()]
# print cls
except:
continue
# ignore objects with undetermined difficult level
# level = self._get_obj_level(obj)
# if level > 3:
# continue
ix += 1
# 0-based coordinates
alpha = float(obj[3])
x1 = float(obj[4])
y1 = float(obj[5])
x2 = float(obj[6])
y2 = float(obj[7])
h = float(obj[8])
w = float(obj[9])
l = float(obj[10])
tx = float(obj[11])
ty = float(obj[12])
tz = float(obj[13])
ry = float(obj[14])
rys[ix] = ry
lwh[ix, :] = [l, w, h]
alphas[ix] = alpha
translation[ix, :] = [tx, ty, tz]
boxes[ix, :] = [x1, y1, x2, y2]
boxes3D[ix, :] = [tx, ty, tz, l, w, h]
# convert boxes3D cam to 8 corners(cam)
boxes3D_cam_cnr_single = computeCorners3D(boxes3D[ix, :], ry)
boxes3D_cam_cnr[ix, :] = boxes3D_cam_cnr_single.reshape(24)
# convert 8 corners(cam) to 8 corners(lidar)
boxes3D_corners[ix, :] = camera_to_lidar_cnr(boxes3D_cam_cnr_single, Tr)
# convert 8 corners(cam) to lidar boxes3D
boxes3D_lidar[ix, :] = lidar_cnr_to_3d(boxes3D_corners[ix, :], lwh[ix,:])
# convert 8 corners(lidar) to lidar bird view
boxes_bv[ix, :] = lidar_3d_to_bv(boxes3D_lidar[ix, :])
# boxes3D_corners[ix, :] = lidar_to_corners_single(boxes3D_lidar[ix, :])
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
rys.resize(ix+1)
lwh.resize(ix+1, 3)
translation.resize(ix+1, 3)
alphas.resize(ix+1)
boxes.resize(ix+1, 4)
boxes_bv.resize(ix+1, 4)
boxes3D.resize(ix+1, 6)
boxes3D_lidar.resize(ix+1, 6)
boxes3D_cam_cnr.resize(ix+1, 24)
boxes3D_corners.resize(ix+1, 24)
gt_classes.resize(ix+1)
# print(self.num_classes)
overlaps.resize(ix+1, num_classes)
# if index == '000142':
# print(index)
# print(overlaps)
overlaps = scipy.sparse.csr_matrix(overlaps)
# if index == '000142':
# print(overlaps)
return {'ry' : rys,
'lwh' : lwh,
'boxes' : boxes,
'boxes_bv' : boxes_bv,
'boxes_3D_cam' : boxes3D,
'boxes_3D' : boxes3D_lidar,
'boxes3D_cam_corners' : boxes3D_cam_cnr,
'boxes_corners' : boxes3D_corners,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'xyz' : translation,
'alphas' :alphas,
'flipped' : False}
def load_image_set_index():
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = '../data/KITTI/ImageSets/test.txt'
#image_set_file = '../start.txt'
#image_set_file = os.path.join(kitti_path, 'ImageSets',self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
print 'image sets length: ', len(image_index)
return image_index
def lidar3D_path_from_index(index):
"""
Construct an image path from the image's "index" identifier.
"""
# set the prefix
prefix = 'training/velodyne'
data_path = '../data/KITTI/object/'
# lidar_bv_path = '$Faster-RCNN_TF/data/KITTI/object/training/lidar_bv/000000.npy'
lidar3D_path = os.path.join(data_path, prefix, index + '.bin')
assert os.path.exists(lidar3D_path), \
'Path does not exist: {}'.format(lidar3D_path)
return lidar3D_path
def image_path_from_index(index):
"""
Construct an image path from the image's "index" identifier.
"""
# set the prefix
prefix = 'training/image_2/'
data_path = '../data/KITTI/object/'
# lidar_bv_path = '$Faster-RCNN_TF/data/KITTI/object/training/lidar_bv/000000.npy'
image_path = os.path.join(data_path, prefix, index + '.png')
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def draw_lidar(lidar, is_grid=False, is_top_region=False, fig=None):
pxs=lidar[:,0]
pys=lidar[:,1]
pzs=lidar[:,2]
prs=lidar[:,3]
if fig is None: fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500))
mlab.points3d(
#pxs, pys, pzs, prs,
pxs, pys, pzs,
mode='point', # 'point' 'sphere'
colormap='spectral', #'gnuplot', 'bone', #'spectral', #'copper',
scale_factor=10,
figure=fig)
mlab.orientation_axes()
mlab.view(azimuth=180,elevation=None,distance=50,focalpoint=[ 12.0909996 , -1.04700089, -2.03249991])#2.0909996 , -1.04700089, -2.03249991
if 1:
mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.2)
axes=np.array([
[2.,0.,0.,0.],
[0.,2.,0.,0.],
[0.,0.,2.,0.],
],dtype=np.float64)
fov=np.array([ ##<todo> : now is 45 deg. use actual setting later ...
[20., 20., 0.,0.],
[20.,-20., 0.,0.],
],dtype=np.float64)
mlab.plot3d([0, axes[0,0]], [0, axes[0,1]], [0, axes[0,2]], color=(1,0,0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[1,0]], [0, axes[1,1]], [0, axes[1,2]], color=(0,1,0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[2,0]], [0, axes[2,1]], [0, axes[2,2]], color=(0,0,1), tube_radius=None, figure=fig)
mlab.plot3d([0, fov[0,0]], [0, fov[0,1]], [0, fov[0,2]], color=(1,1,1), tube_radius=None, line_width=1, figure=fig)
mlab.plot3d([0, fov[1,0]], [0, fov[1,1]], [0, fov[1,2]], color=(1,1,1), tube_radius=None, line_width=1, figure=fig)
def draw_gt_boxes3d(gt_boxes3d, fig, color=(1,1,1), line_width=0.1):
num = len(gt_boxes3d)
for n in range(num):
b = gt_boxes3d[n]
#mlab.text3d(b[0,0], b[0,1], b[0,2], '%d'%n, scale=(1, 1, 1), color=color, figure=fig)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j=k,(k+1)%4
mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i,j=k+4,(k+1)%4 + 4
mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i,j=k,k+4
mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
mlab.view(azimuth=180,elevation=None,distance=50,focalpoint=[ 12.0909996 , -1.04700089, -2.03249991])#2.0909996 , -1.04700089, -2.03249991
def dbscan(point_can, point):
db = DBSCAN(eps=0.13, min_samples=1).fit(point_can)
labels = db.labels_
dict_labels = Counter(labels)
max_label_num = np.max(dict_labels.values())
for key, value in dict_labels.iteritems():
if value == max_label_num:
key_label = key
index_labels = labels == key_label
point_can = point_can[index_labels]
x = 0
y = 0
z = 0
low_z = 0
point_num = point_can.shape[0]
for i in range(point_num):
x = x + point_can[i, 0]
y = y + point_can[i, 1]
z = z + point_can[i, 2]
x = x/point_num
y = y/point_num
z = z/point_num
#print x,y,z
index = (point[:, 0] > x-0.6) * (point[:, 0] < x+0.6) * (point[:, 1] > y-0.6) * (point[:, 1] < y+0.6)\
* (point[:, 2] > z-1.5) * (point[:, 2] < z+2)
point_pede = point[index,:]
for k in range(point_pede.shape[0]):
if point_pede[k, 2]< low_z:
low_z = point_pede[k, 2]
index = point_pede[:, 2]>low_z+0.1
point_pede = point_pede[index,:]
db2 = DBSCAN(eps=0.3, min_samples=1).fit(point_pede)
db2_labels = db2.labels_
db2_dict_labels = Counter(db2_labels)
db2_max_label_num = np.max(db2_dict_labels.values())
for key, value in db2_dict_labels.iteritems():
if value == db2_max_label_num:
db2_key_label = key
db2_index_labels = db2_labels == db2_key_label
point_pede_single = point_pede[db2_index_labels]
return point_pede_single
def calculate_corner(point):
x_min = 100
x_max = 0
y_min = 100
y_max = -100
z_min = 100
z_max = -100
for i in range(point.shape[0]):
if point[i, 0]<x_min:
x_min = point[i, 0]
if point[i, 0]>x_max:
x_max = point[i, 0]
if point[i, 1]<y_min:
y_min = point[i, 1]
if point[i, 1]>y_max:
y_max = point[i, 1]
if point[i, 2]<z_min:
z_min = point[i, 2]
if point[i, 2]>z_max:
z_max = point[i, 2]
corners = np.zeros(((1,8,3)))
z_max = z_max+0.1
# if z_min>-1.6:
# z_min = -1.6
corners[0, 0, 0] = x_max
corners[0, 0, 1] = y_min
corners[0, 0, 2] = z_max
corners[0, 1, 0] = x_min
corners[0, 1, 1] = y_min
corners[0, 1, 2] = z_max
corners[0, 2, 0] = x_min
corners[0, 2, 1] = y_max
corners[0, 2, 2] = z_max
corners[0, 3, 0] = x_max
corners[0, 3, 1] = y_max
corners[0, 3, 2] = z_max
corners[0, 4, 0] = x_max
corners[0, 4, 1] = y_min
corners[0, 4, 2] = z_min
corners[0, 5, 0] = x_min
corners[0, 5, 1] = y_min
corners[0, 5, 2] = z_min
corners[0, 6, 0] = x_min
corners[0, 6, 1] = y_max
corners[0, 6, 2] = z_min
corners[0, 7, 0] = x_max
corners[0, 7, 1] = y_max
corners[0, 7, 2] = z_min
#print corners.shape
return corners
def calib_at(index):
"""
Return the calib sequence.
"""
calib_ori = load_kitti_calib(index)
calib = np.zeros((4, 12))
calib[0,:] = calib_ori['P2'].reshape(12)
calib[1,:] = calib_ori['P3'].reshape(12)
calib[2,:9] = calib_ori['R0'].reshape(9)
calib[3,:] = calib_ori['Tr_velo2cam'].reshape(12)
return calib
# def write_result():
# type = "pedestrian"
# truncation = -1
# occlusion = -1
# alpha = -10
# for setline in setlines:
# print "setline: ",setline.strip()
# for line in lines:
# line = line.split(",")
# number = int(line[0])-1
# if number | |
or 'application/octet-stream').split('/', 1)
if maintype == 'image':
att = MIMEImage(data, _subtype=subtype)
else:
att = MIMEBase(maintype, subtype)
att.set_payload(data)
encoders.encode_base64(att)
att.add_header('Content-Disposition', 'attachment',
filename=os.path.basename(fn))
return att
def add_attachments(self, session, filenames, filedata=None):
if not self.is_editable():
raise NotEditableError(_('Mailbox is read-only.'))
msg = self.get_msg()
if 'x-mp-internal-readonly' in msg:
raise NotEditableError(_('Message is read-only.'))
for fn in filenames:
msg.attach(self.make_attachment(fn, filedata=filedata))
return self.update_from_msg(session, msg)
def update_from_string(self, session, data, final=False):
if not self.is_editable():
raise NotEditableError(_('Mailbox is read-only.'))
oldmsg = self.get_msg()
if 'x-mp-internal-readonly' in oldmsg:
raise NotEditableError(_('Message is read-only.'))
if not data:
outmsg = oldmsg
else:
newmsg = email.parser.Parser().parsestr(data.encode('utf-8'))
outmsg = MIMEMultipart()
# Copy over editable headers from the input string, skipping blanks
for hdr in newmsg.keys():
if hdr.startswith('Attachment-'):
pass
else:
encoded_hdr = self.encoded_hdr(newmsg, hdr)
if len(encoded_hdr.strip()) > 0:
outmsg[hdr] = encoded_hdr
# Copy over the uneditable headers from the old message
for hdr in oldmsg.keys():
if ((hdr.lower() not in self.MIME_HEADERS)
and (hdr.lower() in self.UNEDITABLE_HEADERS)):
outmsg[hdr] = oldmsg[hdr]
# Copy the message text
new_body = newmsg.get_payload().decode('utf-8')
try:
new_body.encode('us-ascii')
charset = 'us-ascii'
except:
charset = 'utf-8'
textbody = MIMEText(new_body, _subtype='plain', _charset=charset)
outmsg.attach(textbody)
del textbody['MIME-Version']
# FIXME: Use markdown and template to generate fancy HTML part
# Copy the attachments we are keeping
attachments = [h for h in newmsg.keys()
if h.startswith('Attachment-')]
if attachments:
oldtree = self.get_message_tree()
for att in oldtree['attachments']:
hdr = 'Attachment-%s' % att['count']
if hdr in attachments:
# FIXME: Update the filename to match whatever
# the user typed
outmsg.attach(att['part'])
attachments.remove(hdr)
# Attach some new files?
for hdr in attachments:
try:
outmsg.attach(self.make_attachment(newmsg[hdr]))
except:
pass # FIXME: Warn user that failed...
# Save result back to mailbox
if final:
sender, rcpts, outmsg, ev = PrepareMessage(self.config, outmsg)
return self.update_from_msg(session, outmsg)
def update_from_msg(self, session, newmsg):
if not self.is_editable():
raise NotEditableError(_('Mailbox is read-only.'))
mbx, ptr, fd = self.get_mbox_ptr_and_fd()
# OK, adding to the mailbox worked
newptr = ptr[:MBX_ID_LEN] + mbx.add(newmsg)
# Remove the old message...
mbx.remove(ptr[MBX_ID_LEN:])
# FIXME: We should DELETE the old version from the index first.
# Update the in-memory-index
self.msg_info[self.index.MSG_PTRS] = newptr
self.index.set_msg_at_idx_pos(self.msg_idx_pos, self.msg_info)
self.index.index_email(session, Email(self.index, self.msg_idx_pos))
self.msg_parsed = None
return self
def get_msg_info(self, field=None):
if not self.msg_info:
self.msg_info = self.index.get_msg_at_idx_pos(self.msg_idx_pos)
if field is None:
return self.msg_info
else:
return self.msg_info[field]
def get_mbox_ptr_and_fd(self):
for msg_ptr in self.get_msg_info(self.index.MSG_PTRS).split(','):
try:
mbox = self.config.open_mailbox(None, msg_ptr[:MBX_ID_LEN])
fd = mbox.get_file_by_ptr(msg_ptr)
# FIXME: How do we know we have the right message?
return mbox, msg_ptr, fd
except (IOError, OSError, KeyError):
# FIXME: If this pointer is wrong, should we fix the index?
print 'WARNING: %s not found' % msg_ptr
return None, None, None
def get_file(self):
return self.get_mbox_ptr_and_fd()[2]
def get_msg_size(self):
mbox, ptr, fd = self.get_mbox_ptr_and_fd()
fd.seek(0, 2)
return fd.tell()
def get_msg(self, pgpmime=True):
if not self.msg_parsed:
fd = self.get_file()
if fd:
self.msg_parsed = ParseMessage(fd, pgpmime=pgpmime)
if not self.msg_parsed:
IndexError(_('Message not found?'))
return self.msg_parsed
def get_headerprint(self):
return HeaderPrint(self.get_msg())
def is_thread(self):
return ((self.get_msg_info(self.index.MSG_THREAD_MID)) or
(0 < len(self.get_msg_info(self.index.MSG_REPLIES))))
def get(self, field, default=''):
"""Get one (or all) indexed fields for this mail."""
field = field.lower()
if field == 'subject':
return self.get_msg_info(self.index.MSG_SUBJECT)
elif field == 'from':
return self.get_msg_info(self.index.MSG_FROM)
else:
raw = ' '.join(self.get_msg().get_all(field, default))
return self.index.hdr(0, 0, value=raw) or raw
def get_msg_summary(self):
# We do this first to make sure self.msg_info is loaded
msg_mid = self.get_msg_info(self.index.MSG_MID)
return [
msg_mid,
self.get_msg_info(self.index.MSG_ID),
self.get_msg_info(self.index.MSG_FROM),
self.index.expand_to_list(self.msg_info),
self.get_msg_info(self.index.MSG_SUBJECT),
self.get_msg_info(self.index.MSG_BODY),
self.get_msg_info(self.index.MSG_DATE),
self.get_msg_info(self.index.MSG_TAGS).split(','),
self.is_editable()
]
def extract_attachment(self, session, att_id,
name_fmt=None, mode='download'):
msg = self.get_msg()
count = 0
extracted = 0
filename, attributes = '', {}
for part in (msg.walk() if msg else []):
mimetype = part.get_content_type()
if mimetype.startswith('multipart/'):
continue
content_id = part.get('content-id', '')
pfn = part.get_filename() or ''
count += 1
if (('*' == att_id)
or ('#%s' % count == att_id)
or ('part:%s' % count == att_id)
or (content_id == att_id)
or (mimetype == att_id)
or (pfn.lower().endswith('.%s' % att_id))
or (pfn == att_id)):
payload = part.get_payload(None, True) or ''
attributes = {
'msg_mid': self.msg_mid(),
'count': count,
'length': len(payload),
'content-id': content_id,
'filename': pfn,
}
if pfn:
if '.' in pfn:
pfn, attributes['att_ext'] = pfn.rsplit('.', 1)
attributes['att_ext'] = attributes['att_ext'].lower()
attributes['att_name'] = pfn
if mimetype:
attributes['mimetype'] = mimetype
filesize = len(payload)
if mode.startswith('inline'):
attributes['data'] = payload
session.ui.notify(_('Extracted attachment %s') % att_id)
elif mode.startswith('preview'):
attributes['thumb'] = True
attributes['mimetype'] = 'image/jpeg'
attributes['disposition'] = 'inline'
thumb = StringIO.StringIO()
if thumbnail(payload, thumb, height=250):
session.ui.notify(_('Wrote preview to: %s') % filename)
attributes['length'] = thumb.tell()
filename, fd = session.ui.open_for_data(
name_fmt=name_fmt, attributes=attributes)
thumb.seek(0)
fd.write(thumb.read())
fd.close()
else:
session.ui.notify(_('Failed to generate thumbnail'))
raise UrlRedirectException('/static/img/image-default.png')
else:
filename, fd = session.ui.open_for_data(
name_fmt=name_fmt, attributes=attributes)
fd.write(payload)
session.ui.notify(_('Wrote attachment to: %s') % filename)
fd.close()
extracted += 1
if 0 == extracted:
session.ui.notify(_('No attachments found for: %s') % att_id)
return None, None
else:
return filename, attributes
def get_message_tags(self):
tids = self.get_msg_info(self.index.MSG_TAGS).split(',')
return [self.config.get_tag(t) for t in tids]
def get_message_tree(self, want=None):
msg = self.get_msg()
tree = {
'id': self.get_msg_info(self.index.MSG_ID)
}
for p in 'text_parts', 'html_parts', 'attachments':
if want is None or p in want:
tree[p] = []
if want is None or 'summary' in want:
tree['summary'] = self.get_msg_summary()
if want is None or 'tags' in want:
tree['tags'] = self.get_msg_info(self.index.MSG_TAGS).split(',')
if want is None or 'conversation' in want:
tree['conversation'] = {}
conv_id = self.get_msg_info(self.index.MSG_THREAD_MID)
if conv_id:
conv = Email(self.index, int(conv_id, 36))
tree['conversation'] = convs = [conv.get_msg_summary()]
for rid in conv.get_msg_info(self.index.MSG_REPLIES
).split(','):
if rid:
convs.append(Email(self.index, int(rid, 36)
).get_msg_summary())
if (want is None
or 'headers' in want
or 'editing_string' in want
or 'editing_strings' in want):
tree['headers'] = {}
for hdr in msg.keys():
tree['headers'][hdr] = self.index.hdr(msg, hdr)
if want is None or 'headers_lc' in want:
tree['headers_lc'] = {}
for hdr in msg.keys():
tree['headers_lc'][hdr.lower()] = self.index.hdr(msg, hdr)
if want is None or 'header_list' in want:
tree['header_list'] = [(k, self.index.hdr(msg, k, value=v))
for k, v in msg.items()]
# FIXME: Decide if this is strict enough or too strict...?
html_cleaner = Cleaner(page_structure=True, meta=True, links=True,
javascript=True, scripts=True, frames=True,
embedded=True, safe_attrs_only=True)
# Note: count algorithm must match that used in extract_attachment
# above
count = 0
for part in msg.walk():
mimetype = part.get_content_type()
if (mimetype.startswith('multipart/')
or mimetype == "application/pgp-encrypted"):
continue
try:
if (mimetype == "application/octet-stream"
and part.cryptedcontainer is True):
continue
except:
pass
count += 1
if (part.get('content-disposition', 'inline') == 'inline'
and mimetype in ('text/plain', 'text/html')):
payload, charset = self.decode_payload(part)
if (mimetype == 'text/html'
or '<html>' in payload
or '</body>' in payload):
if want is None or 'html_parts' in want:
tree['html_parts'].append({
'charset': charset,
'type': 'html',
'data': ((payload.strip()
and html_cleaner.clean_html(payload))
or '')
})
elif want is None or 'text_parts' in want:
text_parts = self.parse_text_part(payload, charset)
if want is None or 'text_parts' in want:
tree['text_parts'].extend(text_parts)
elif want is None or 'attachments' in want:
tree['attachments'].append({
'mimetype': mimetype,
'count': count,
'part': part,
'length': len(part.get_payload(None, True) or ''),
'content-id': part.get('content-id', ''),
'filename': part.get_filename() or ''
})
if self.is_editable():
if not want or 'editing_strings' in want:
tree['editing_strings'] = self.get_editing_strings(tree)
if not want or 'editing_string' in want:
tree['editing_string'] = self.get_editing_string(tree)
if want is None or 'crypto' in want:
if 'crypto' not in tree:
tree['crypto'] = {'encryption': msg.encryption_info,
'signature': msg.signature_info}
else:
tree['crypto']['encryption'] = msg.encryption_info
tree['crypto']['signature'] = msg.signature_info
return tree
# FIXME: This should be configurable by the user, depending on where
# he lives and what kind of e-mail he gets.
CHARSET_PRIORITY_LIST = ['utf-8', 'iso-8859-1']
def decode_text(self, payload, charset='utf-8', binary=True):
if charset:
charsets = [charset]
else:
charsets = self.CHARSET_PRIORITY_LIST
for charset in charsets:
try:
payload = payload.decode(charset)
return payload, charset
except (UnicodeDecodeError, TypeError):
pass
if binary:
return payload, '8bit'
else:
return _('[Binary data suppressed]\n'), 'utf-8'
def decode_payload(self, part):
charset = part.get_content_charset() or None
payload = part.get_payload(None, True) or ''
return self.decode_text(payload, charset=charset)
def parse_text_part(self, data, charset):
current = {
'type': 'bogus',
'charset': charset,
}
parse = []
block = 'body'
clines = []
for line in data.splitlines(True):
block, ltype = self.parse_line_type(line, block)
if ltype != current['type']:
# This is not great, it's a hack to move the preamble
# before a quote section into the quote itself.
if ltype == 'quote' and clines and '@' in clines[-1]:
current['data'] = ''.join(clines[:-1])
clines = clines[-1:]
elif (ltype == 'quote' and len(clines) > 2
and '@' in clines[-2] and '' == clines[-1].strip()):
current['data'] | |
be populated by parsing raw responses
# - these are aggregated into project_campaign_response_summary
#
#tablename = "project_campaign_response"
#define_table(tablename,
# message_id(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
# super_link("parameter_id", "stats_parameter",
# label = T("Keyword"),
# instance_types = ("project_campaign_keyword",),
# represent = S3Represent(lookup="stats_parameter"),
# readable = True,
# writable = True,
# empty = False,
# ),
# Getting this without TERA may be hard!
#location_id(writable = False),
# @ToDo: Link to the raw Message received
#self.msg_message_id(),
# s3_datetime(),
# s3_comments(),
# *s3_meta_fields())
# CRUD Strings
#crud_strings[tablename] = Storage(
# label_create = T("Add Response"),
# title_display = T("Response Details"),
# title_list = T("Responses"),
# title_update = T("Edit Response"),
# title_report = T("Response Report"),
# label_list_button = T("List Responses"),
# msg_record_created = T("Response Added"),
# msg_record_modified = T("Response Updated"),
# msg_record_deleted = T("Response Deleted"),
# msg_list_empty = T("No Responses Found")
#)
# ---------------------------------------------------------------------
# Project Campaign Response Summary
# - aggregated responses (by Keyword/Location)
# - TERA data comes in here
#
tablename = "project_campaign_response_summary"
define_table(tablename,
message_id(),
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Keyword"),
instance_types = ("project_campaign_keyword",),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
),
# Populated automatically (by TERA)
# & will be a msg_basestation?
location_id(writable = False),
Field("value", "integer",
label = T("Number of Responses"),
represent = IS_INT_AMOUNT.represent,
requires = IS_INT_IN_RANGE(0, None),
),
# @ToDo: Populate automatically from time Message is sent?
s3_date("date",
label = T("Start Date"),
#empty = False,
),
s3_date("end_date",
label = T("End Date"),
start_field = "project_campaign_response_summary_date",
default_interval = 1,
#empty = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Response Summary"),
title_display = T("Response Summary Details"),
title_list = T("Response Summaries"),
title_update = T("Edit Response Summary"),
title_report = T("Response Summary Report"),
label_list_button = T("List Response Summaries"),
msg_record_created = T("Response Summary Added"),
msg_record_modified = T("Response Summary Updated"),
msg_record_deleted = T("Response Summary Deleted"),
msg_list_empty = T("No Response Summaries Found")
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3ProjectFrameworkModel(S3Model):
"""
Project Framework Model
"""
names = ("project_framework",
"project_framework_organisation",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
messages = current.messages
ORGANISATION = messages.ORGANISATION
ORGANISATIONS = T("Organization(s)")
# ---------------------------------------------------------------------
# Project Frameworks
#
tablename = "project_framework"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
Field("name", length=255, unique=True,
label = T("Name"),
requires = [IS_LENGTH(255),
IS_NOT_IN_DB(db,
"project_framework.name"),
],
),
s3_comments("description",
label = T("Description"),
comment = None,
),
Field("time_frame",
label = T("Time Frame"),
represent = lambda v: v or messages.NONE,
),
*s3_meta_fields())
# CRUD Strings
if current.deployment_settings.get_auth_record_approval():
msg_record_created = T("Policy or Strategy added, awaiting administrator's approval")
else:
msg_record_created = T("Policy or Strategy added")
crud_strings[tablename] = Storage(
label_create = T("Create Policy or Strategy"),
title_display = T("Policy or Strategy"),
title_list = T("Policies & Strategies"),
title_update = T("Edit Policy or Strategy"),
title_upload = T("Import Policies & Strategies"),
label_list_button = T("List Policies & Strategies"),
msg_record_created = msg_record_created,
msg_record_modified = T("Policy or Strategy updated"),
msg_record_deleted = T("Policy or Strategy deleted"),
msg_list_empty = T("No Policies or Strategies found")
)
crud_form = S3SQLCustomForm(
"name",
S3SQLInlineComponent(
"framework_organisation",
label = ORGANISATIONS,
fields = ["organisation_id"],
),
"description",
"time_frame",
S3SQLInlineComponent(
"document",
label = T("Files"),
fields = ["file"],
filterby = dict(field = "file",
options = "",
invert = True,
)
),
)
#filter_widgets = [
# S3TextFilter(["name",
# "description",
# ],
# label = T("Name"),
# comment = T("Search for a Policy or Strategy by name or description."),
# ),
#]
self.configure(tablename,
super_entity="doc_entity",
crud_form = crud_form,
#filter_widgets = filter_widgets,
list_fields = ["name",
(ORGANISATIONS, "framework_organisation.organisation_id"),
"description",
"time_frame",
(T("Files"), "document.file"),
]
)
represent = S3Represent(lookup=tablename)
framework_id = S3ReusableField("framework_id", "reference %s" % tablename,
label = ORGANISATION,
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_framework.id",
represent
)),
)
self.add_components(tablename,
project_framework_organisation = "framework_id",
)
# ---------------------------------------------------------------------
# Project Framework Organisations
#
tablename = "project_framework_organisation"
define_table(tablename,
framework_id(),
self.org_organisation_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Organization"),
title_display = ORGANISATION,
title_list = T("Organizations"),
title_update = T("Edit Organization"),
label_list_button = T("List Organizations"),
msg_record_created = T("Organization added to Policy/Strategy"),
msg_record_modified = T("Organization updated"),
msg_record_deleted = T("Organization removed from Policy/Strategy"),
msg_list_empty = T("No Organizations found for this Policy/Strategy")
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3ProjectHazardModel(S3Model):
"""
Project Hazard Model
"""
names = ("project_hazard",
"project_hazard_project",
"project_hazard_id", # Exported for translation
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Hazard
#
tablename = "project_hazard"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = IS_NOT_EMPTY(),
),
s3_comments(
represent = lambda v: T(v) if v is not None \
else NONE,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Hazard"),
title_display = T("Hazard Details"),
title_list = T("Hazards"),
title_update = T("Edit Hazard"),
title_upload = T("Import Hazards"),
label_list_button = T("List Hazards"),
label_delete_button = T("Delete Hazard"),
msg_record_created = T("Hazard added"),
msg_record_modified = T("Hazard updated"),
msg_record_deleted = T("Hazard deleted"),
msg_list_empty = T("No Hazards currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
hazard_id = S3ReusableField("hazard_id", "reference %s" % tablename,
sortby = "name",
label = T("Hazards"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_hazard.id",
represent,
sort=True)),
represent = represent,
ondelete = "CASCADE",
)
# ---------------------------------------------------------------------
# Projects <> Hazards Link Table
#
tablename = "project_hazard_project"
define_table(tablename,
hazard_id(),
self.project_project_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Hazard"),
title_display = T("Hazard"),
title_list = T("Hazards"),
title_update = T("Edit Hazard"),
title_upload = T("Import Hazard data"),
label_list_button = T("List Hazards"),
msg_record_created = T("Hazard added to Project"),
msg_record_modified = T("Hazard updated"),
msg_record_deleted = T("Hazard removed from Project"),
msg_list_empty = T("No Hazards found for this Project"))
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("project_id",
"hazard_id",
),
),
)
# Pass names back to global scope (s3.*)
return {"project_hazard_id": hazard_id,
}
# =============================================================================
class S3ProjectHRModel(S3Model):
"""
Optionally link Projects <> Human Resources
"""
names = ("project_human_resource_project",)
def model(self):
T = current.T
settings = current.deployment_settings
status_opts = {1: T("Assigned"),
#2: T("Standing By"),
#3: T("Active"),
4: T("Left"),
#5: T("Unable to activate"),
}
community_volunteers = settings.get_project_community_volunteers()
# ---------------------------------------------------------------------
# Projects <> Human Resources
#
tablename = "project_human_resource_project"
self.define_table(tablename,
# Instance table
self.super_link("cost_item_id", "budget_cost_item"),
self.project_project_id(empty = False,
ondelete = "CASCADE",
),
self.project_location_id(ondelete = "CASCADE",
readable = community_volunteers,
writable = community_volunteers,
),
#self.org_sector_id(ondelete = "SET NULL",
# readable = project_hrs_use_sectors,
# writable = project_hrs_use_sectors,
# ),
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
Field("status", "integer",
default = 1,
label = T("Status"),
represent = lambda opt: \
status_opts.get(opt, current.messages.UNKNOWN_OPT),
requires = IS_IN_SET(status_opts),
),
*s3_meta_fields()
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Assign Human Resource"),
title_display = T("Human Resource Details"),
title_list = T("Assigned Human Resources"),
title_update = T("Edit Human Resource"),
label_list_button = T("List Assigned Human Resources"),
label_delete_button = T("Remove Human Resource from this project"),
msg_record_created = T("Human Resource assigned"),
msg_record_modified = T("Human Resource Assignment updated"),
msg_record_deleted = T("Human Resource unassigned"),
msg_list_empty = T("No Human Resources currently assigned to this project"))
self.configure(tablename,
context = {"project": "project_id",
},
onvalidation = self.project_human_resource_onvalidation,
super_entity = "budget_cost_item",
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_human_resource_onvalidation(form):
"""
Prevent the same human_resource record being added more than once
"""
hr = current.s3db.project_human_resource_project
# Fetch the first row that has the same project and human resource ids
# (which isn't this record!)
form_vars = form.request_vars
query = (hr.human_resource_id == form_vars.human_resource_id) & \
(hr.project_id == form_vars.project_id) & \
(hr.id != form_vars.id)
row = current.db(query).select(hr.id,
limitby=(0, 1)).first()
if row:
# We have a duplicate. Return an error to the user.
form.errors.human_resource_id = current.T("Record already exists")
# =============================================================================
class S3ProjectIndicatorModel(S3Model):
"""
Project Indicator Model
- depends on Stats module
Unused...instead use ProjectPlanningModel since Indicators are not reused across Projects
"""
names = ("project_indicator",
"project_indicator_data",
)
def model(self):
if | |
<gh_stars>0
import os
import sys
import json
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from nn import *
from myLSTM import *
class BaseModel(object):
def __init__(self, params, mode, num_words, word2vec, idx2word):
self.params = params
self.mode = mode
self.batch_size = params.batch_size if mode=='train' else 1
self.cnn_model = params.cnn_model
self.train_cnn = params.train_cnn
self.init_lstm_with_fc_feats = False
self.img_shape = [224, 224, 3]
self.save_dir = os.path.join(params.save_dir, self.cnn_model+'/')
self.global_step = tf.Variable(0, name = 'global_step', trainable = False)
self.num_words = num_words
self.word2vec = word2vec
self.idx2word = idx2word
self.build()
self.saver = tf.train.Saver(max_to_keep=100)
def build(self):
raise NotImplementedError()
def get_feed_dict(self, batch, is_train, contexts=None, feats=None):
raise NotImplementedError()
def train(self, sess, train_data, valid_data, valid_period=200, valid_num_batches=50):
""" Train the model. """
print("Training the model...")
params = self.params
num_epochs = params.num_epochs
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(params.logs_dir + '/train/', sess.graph)
minimum_valid_loss = 10
first = True
for epoch_no in range(num_epochs):
for idx in range(train_data.num_batches):
batch = train_data.next_batch()
feed_dict = self.get_feed_dict(batch, is_train=True)
if self.train_cnn:
# Train CNN and RNN
summary, _, loss0, loss1, global_step = sess.run([merged,
self.opt_op_cnn_rnn,
self.loss0,
self.loss1_cnn_rnn,
self.global_step],
feed_dict=feed_dict)
else:
summary, _, loss0, loss1, global_step = sess.run([merged,
self.opt_op_rnn,
self.loss0,
self.loss1_rnn,
self.global_step],
feed_dict=feed_dict)
train_writer.add_summary(summary, idx)
print(" Loss0=%f Loss1=%f Batch=%d" %(loss0, loss1, idx))
if idx > 1500 and idx % valid_period == 0:
print("Start validation! idx = {}".format(idx))
# Calculate the loss on validate data
valid_loss_list = []
# valid_data.current_index = 0
valid_data.reset()
for i in range(valid_num_batches):
valid_batch = valid_data.next_batch()
valid_feed_dict = self.get_feed_dict(valid_batch, is_train=True)
valid_loss0, valid_loss1_rnn = sess.run([self.loss0, self.loss1_rnn],
feed_dict=valid_feed_dict)
valid_loss_list.append(valid_loss0)
mean_valid_loss = sum(valid_loss_list) / float(len(valid_loss_list))
print("Mean valid loss = {}".format(mean_valid_loss))
if mean_valid_loss > minimum_valid_loss:
if not first:
break
first = False
else:
first = True
minimum_valid_loss = mean_valid_loss
self.save(sess)
# if (global_step + 1) % params.save_period == 0:
# self.save(sess)
# self.save(sess)
print("Training complete.")
def test(self, sess, test_data):
""" Test the model. """
print("Testing the model ...")
result_file = self.params.test_result_file
imagefile_caption = {}
# Generate the captions for the images
for k in tqdm(list(range(test_data.num_images))):
batch = test_data.next_batch()
img_files, imgs = batch
img_file = img_files[0]
feed_dict = self.get_feed_dict(batch, is_train=False)
result = sess.run(self.results, feed_dict=feed_dict)
sentence = test_data.indices_to_sent(result.squeeze())
imagefile_caption[img_file] = sentence
with open(result_file, 'wb') as fw:
json.dump(imagefile_caption, fw)
print("Testing complete.")
def save(self, sess):
""" Save the model. """
print(("Saving model to %s" % self.save_dir))
self.saver.save(sess, self.save_dir + '/model.ckpt', self.global_step)
def load(self, sess):
""" Load the model. """
print("Loading model...")
checkpoint = tf.train.get_checkpoint_state(self.save_dir)
if checkpoint is None:
print("Error: No saved model found. Please train first.")
sys.exit(0)
self.saver.restore(sess, checkpoint.model_checkpoint_path)
def load2(self, data_path, session, ignore_missing=True):
""" Load a pretrained CNN model. """
print("Loading CNN model from %s..." %data_path)
# data_dict = np.load(data_path).item()
data_dict = np.load(data_path, encoding="bytes").item()
count = 0
miss_count = 0
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
param_name = param_name.decode("utf-8")
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
count += 1
# print("Variable %s:%s loaded" %(op_name, param_name))
except ValueError:
miss_count += 1
# print("Variable %s:%s missed" %(op_name, param_name))
if not ignore_missing:
raise
print("%d variables loaded. %d variables missed." %(count, miss_count))
class CaptionGenerator(BaseModel):
def build(self):
""" Build the model. """
self.build_cnn()
self.build_rnn()
def build_cnn(self):
""" Build the CNN. """
print("Building the CNN part...")
self.build_resnet152()
print("CNN part built.")
def basic_block(self, input_feats, name1, name2, is_train, bn, c, s=2):
""" A basic block of ResNets. """
branch1_feats = convolution_no_bias(input_feats, 1, 1, 4*c, s, s, name1+'_branch1')
branch1_feats = batch_norm(branch1_feats, name2+'_branch1', is_train, bn, None)
branch2a_feats = convolution_no_bias(input_feats, 1, 1, c, s, s, name1+'_branch2a')
branch2a_feats = batch_norm(branch2a_feats, name2+'_branch2a', is_train, bn, 'relu')
branch2b_feats = convolution_no_bias(branch2a_feats, 3, 3, c, 1, 1, name1+'_branch2b')
branch2b_feats = batch_norm(branch2b_feats, name2+'_branch2b', is_train, bn, 'relu')
branch2c_feats = convolution_no_bias(branch2b_feats, 1, 1, 4*c, 1, 1, name1+'_branch2c')
branch2c_feats = batch_norm(branch2c_feats, name2+'_branch2c', is_train, bn, None)
output_feats = branch1_feats + branch2c_feats
output_feats = nonlinear(output_feats, 'relu')
return output_feats
def basic_block2(self, input_feats, name1, name2, is_train, bn, c):
""" Another basic block of ResNets. """
branch2a_feats = convolution_no_bias(input_feats, 1, 1, c, 1, 1, name1+'_branch2a')
branch2a_feats = batch_norm(branch2a_feats, name2+'_branch2a', is_train, bn, 'relu')
branch2b_feats = convolution_no_bias(branch2a_feats, 3, 3, c, 1, 1, name1+'_branch2b')
branch2b_feats = batch_norm(branch2b_feats, name2+'_branch2b', is_train, bn, 'relu')
branch2c_feats = convolution_no_bias(branch2b_feats, 1, 1, 4*c, 1, 1, name1+'_branch2c')
branch2c_feats = batch_norm(branch2c_feats, name2+'_branch2c', is_train, bn, None)
output_feats = input_feats + branch2c_feats
output_feats = nonlinear(output_feats, 'relu')
return output_feats
def build_resnet152(self):
""" Build the ResNet152 net. """
bn = self.params.batch_norm
imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)
is_train = tf.placeholder(tf.bool)
conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')
conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, bn, 'relu')
pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')
res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, bn, 64, 1)
res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, bn, 64)
res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, bn, 64)
res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, bn, 128)
temp = res3a_feats
for i in range(1, 8):
temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, bn, 128)
res3b7_feats = temp
res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, bn, 256)
temp = res4a_feats
for i in range(1, 36):
temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, bn, 256)
res4b35_feats = temp
res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, bn, 512)
res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, bn, 512)
res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, bn, 512)
res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])
self.conv_feats = res5c_feats_flat
self.conv_feat_shape = [49, 2048]
self.imgs = imgs
self.is_train = is_train
def build_rnn(self):
""" Build the RNN. """
print("Building the RNN part...")
params = self.params
bn = params.batch_norm
batch_size = self.batch_size
num_ctx = self.conv_feat_shape[0]
dim_ctx = self.conv_feat_shape[1]
num_words = self.num_words
max_sent_len = params.max_sent_len
num_lstm = params.num_lstm
dim_embed = params.dim_embed
dim_hidden = params.dim_hidden
dim_dec = params.dim_dec
with tf.variable_scope(tf.get_variable_scope()) as vscope:
contexts = self.conv_feats
if self.init_lstm_with_fc_feats:
feats = self.fc_feats
sentences = tf.placeholder(tf.int32, [batch_size, max_sent_len])
masks = tf.placeholder(tf.float32, [batch_size, max_sent_len])
is_train = self.is_train
# self.word_weight = np.exp(-np.array(self.word_table.word_freq)*self.class_balancing_factor)
self.position_weight = np.exp(-np.array(list(range(max_sent_len)))*0.003)
# initialize the word embedding
idx2vec = np.array([self.word2vec[self.idx2word[i]] for i in range(self.num_words)])
emb_w = weight('emb_w', [self.num_words, dim_embed], init_val=idx2vec, group_id=1)
# initialize the decoding layer
dec_w = weight('dec_w', [dim_dec, self.num_words], group_id=1)
dec_b = bias('dec_b', [self.num_words], init_val=0.0)
# compute the mean context
context_mean = tf.reduce_mean(contexts, 1)
# initialize the LSTMs
# lstm = tf.nn.rnn_cell.LSTMCell(dim_hidden, initializer=tf.random_normal_initializer(stddev=0.03))
lstm = myBasicLSTMCell(dim_hidden, initializer=tf.random_normal_initializer(stddev=0.03))
if self.init_lstm_with_fc_feats:
init_feats = feats
else:
init_feats = context_mean
if num_lstm == 1:
temp = init_feats
for i in range(params.num_init_layers):
temp = fully_connected(temp, dim_hidden, 'init_lstm_fc1'+str(i), group_id=1)
temp = batch_norm(temp, 'init_lstm_bn1'+str(i), is_train, bn, 'tanh')
memory = tf.identity(temp)
temp = init_feats
for i in range(params.num_init_layers):
temp = fully_connected(temp, dim_hidden, 'init_lstm_fc2'+str(i), group_id=1)
temp = batch_norm(temp, 'init_lstm_bn2'+str(i), is_train, bn, 'tanh')
output = tf.identity(temp)
state = tf.nn.rnn_cell.LSTMStateTuple(memory, output)
else:
temp = init_feats
for i in range(params.num_init_layers):
temp = fully_connected(temp, dim_hidden, 'init_lstm_fc11'+str(i), group_id=1)
temp = batch_norm(temp, 'init_lstm_bn11'+str(i), is_train, bn, 'tanh')
memory1 = tf.identity(temp)
temp = init_feats
for i in range(params.num_init_layers):
temp = fully_connected(temp, dim_hidden, 'init_lstm_fc12'+str(i), group_id=1)
temp = batch_norm(temp, 'init_lstm_bn12'+str(i), is_train, bn, 'tanh')
output1 = tf.identity(temp)
temp = init_feats
for i in range(params.num_init_layers):
temp = fully_connected(temp, dim_hidden, 'init_lstm_fc21'+str(i), group_id=1)
temp = batch_norm(temp, 'init_lstm_bn21'+str(i), is_train, bn, 'tanh')
memory2 = tf.identity(temp)
temp = init_feats
for i in range(params.num_init_layers):
temp = fully_connected(temp, dim_hidden, 'init_lstm_fc22'+str(i), group_id=1)
temp = batch_norm(temp, 'init_lstm_bn22'+str(i), is_train, bn, 'tanh')
output = tf.identity(temp)
state1 = tf.nn.rnn_cell.LSTMStateTuple(memory1, output1)
state2 = tf.nn.rnn_cell.LSTMStateTuple(memory2, output)
loss0 = 0.0
results = []
scores = []
channel_context_flat = tf.reshape(contexts, [-1, num_ctx])
# Generate the words one by one
for idx in range(max_sent_len):
# idx = 0
# Using C-S model channel-wise attention first then spatial-wise
# Channel-wise Attention mechanism
channel_context_encode1 = fully_connected(channel_context_flat, num_ctx, 'chanl_att_fc11', group_id=1)
channel_context_encode1 = batch_norm(channel_context_encode1, 'chanl_att_bn11', is_train, bn, None)
channel_context_encode2 = fully_connected_no_bias(output, num_ctx, 'chanl_att_fc12', group_id=1)
channel_context_encode2 = batch_norm(channel_context_encode2, 'chanl_att_bn12', is_train, bn, None)
channel_context_encode2 = tf.tile(tf.expand_dims(channel_context_encode2, 1), [1, dim_ctx, 1])
channel_context_encode2 = tf.reshape(channel_context_encode2, [-1, num_ctx])
# Why use relu???
channel_context_encode = channel_context_encode1 + channel_context_encode2
channel_context_encode = nonlinear(channel_context_encode, 'relu')
channel_context_encode = dropout(channel_context_encode, 0.5, is_train)
beta = fully_connected(channel_context_encode, 1, 'chanl_att_fc2', group_id=1)
beta = batch_norm(beta, 'chanl_att_bn2', is_train, bn, None)
beta = tf.reshape(beta, [-1, dim_ctx])
beta = tf.nn.softmax(beta)
channel_weighted_contexts = contexts * tf.expand_dims(beta, 1)
if idx == 0:
word_emb = tf.zeros([batch_size, dim_embed])
# weighted_context = tf.identity(context_mean)
else:
word_emb = tf.cond(is_train, lambda: tf.nn.embedding_lookup(emb_w, sentences[:, idx-1]), lambda: | |
<filename>reinvent-2019/drone-zone/ground-control.py
#!/usr/bin/env python
import argparse
import base64
import csv
import json
import logging
import os
import sys
import tempfile
import time
import uuid
import cv2
import numpy as np
import olympe
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
from AWSIoTPythonSDK.core.greengrass.discovery.providers import DiscoveryInfoProvider
from AWSIoTPythonSDK.core.protocol.connection.cores import ProgressiveBackOffCore
from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryInvalidRequestException
from olympe.messages import gimbal
from olympe.messages.ardrone3.Piloting import TakeOff, moveBy, Landing, PCMD
from olympe.messages.ardrone3.PilotingSettings import MaxTilt, MaxAltitude
from olympe.messages.ardrone3.PilotingSettingsState import MaxTiltChanged, MaxAltitudeChanged
from olympe.messages.ardrone3.PilotingState import AltitudeChanged, AttitudeChanged, FlyingStateChanged, \
AlertStateChanged, MotionState, WindStateChanged, VibrationLevelChanged, SpeedChanged
from olympe.messages.ardrone3.SpeedSettings import MaxVerticalSpeed, MaxRotationSpeed
from olympe.messages.ardrone3.SpeedSettingsState import MaxRotationSpeedChanged, MaxVerticalSpeedChanged
AllowedActions = ['both', 'publish', 'subscribe']
CLASSES = [
"ferrari-red",
"lamborghini-white",
"porsche-yellow",
"lamborghini-orange"
]
CarModels = {
'ferrari-red': 0,
'lamborghini-white': 1,
'porsche-yellow': 2,
'lamborghini-orange': 3
}
MAX_DISCOVERY_RETRIES = 10
GROUP_CA_PATH = "./groupCA/"
# General message notification callback
def customOnMessage(message):
print('Received message on topic %s: %s\n' % (message.topic, message.payload))
# Custom Shadow callback
def customShadowCallback_Update(payload, responseStatus, token):
# payload is a JSON string ready to be parsed using json.loads(...)
# in both Py2.x and Py3.x
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def customShadowCallback_Delete(payload, responseStatus, token):
if responseStatus == "timeout":
print("Delete request " + token + " time out!")
if responseStatus == "accepted":
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Delete request with token: " + token + " accepted!")
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Delete request " + token + " rejected!")
# Custom Shadow callback
def customShadowCallback_Delta(payload, responseStatus, token):
# payload is a JSON string ready to be parsed using json.loads(...)
# in both Py2.x and Py3.x
print(responseStatus)
payloadDict = json.loads(payload)
print("++++++++DELTA++++++++++")
print("state: " + str(payloadDict["state"]))
print("version: " + str(payloadDict["version"]))
print("+++++++++++++++++++++++\n\n")
if 'gimbal_pitch' in payloadDict['state'].keys():
droneThing.drone(gimbal.set_target(
gimbal_id=0,
control_mode="position",
yaw_frame_of_reference="none", # None instead of absolute
yaw=0.0,
pitch_frame_of_reference="absolute",
pitch=payloadDict['state']['gimbal_pitch'],
roll_frame_of_reference="none", # None instead of absolute
roll=0.0,
)).wait()
print('updated gimbal_pitch')
if 'gimbal_speed' in payloadDict['state'].keys():
droneThing.drone(gimbal.set_max_speed(
gimbal_id=0,
yaw=0.0,
pitch=payloadDict['state']['gimbal_speed'],
roll=0.0,
)).wait()
print('updated gimbal_speed')
if 'max_vertical_speed' in payloadDict['state'].keys():
droneThing.drone(MaxVerticalSpeed(payloadDict['state']['max_vertical_speed'])).wait()
print('updated max_vertical_speed')
if 'max_rotation_speed' in payloadDict['state'].keys():
droneThing.drone(MaxRotationSpeed(payloadDict['state']['max_rotation_speed'])).wait()
print('updated max_rotation_speed')
if 'max_tilt' in payloadDict['state'].keys():
droneThing.drone(MaxTilt(payloadDict['state']['max_tilt'])).wait()
print('updated max_tilt')
if 'flight_altitude' in payloadDict['state'].keys():
droneThing.flight_altitude = payloadDict['state']['flight_altitude']
print('updated flight_altitude')
if 'detection_enabled' in payloadDict['state'].keys():
droneThing.detection_enabled = payloadDict['state']['detection_enabled']
print('updated detection_enabled')
if 'detection_mode' in payloadDict['state'].keys():
droneThing.detection_mode = payloadDict['state']['detection_mode']
print('updated detection_mode')
if 'targeted_car' in payloadDict['state'].keys():
droneThing.targeted_car = payloadDict['state']['targeted_car']
droneThing.initBB = None
droneThing.trackerInitialized = False
print('updated targeted_car')
class DroneThing:
frameCount = 0
yaw_status = 0
def __init__(self):
# Create the olympe.Drone object from its IP address
self.drone = olympe.Drone(
"192.168.42.1",
loglevel=0,
)
self.state = {}
# default settings
self.IMAGE_WIDTH = int(1280*0.3)
self.IMAGE_HEIGHT = int(720*0.3)
self.gimbal_pitch = -90.0
self.max_altitude = 2.0
self.flight_altitude = 2.0
self.detection_enabled = True
self.detection_mode = 'cars'
self.targeted_car = 'porsche-yellow'
self.initBB = None
self.tracker = None
self.inferenceFrame = None
self.trackerInitialized = False
self.tempd = tempfile.mkdtemp(prefix="olympe_streaming_test_")
print("Olympe streaming example output dir: {}".format(self.tempd))
self.h264_frame_stats = []
self.h264_stats_file = open(
os.path.join(self.tempd, 'h264_stats.csv'), 'w+')
self.h264_stats_writer = csv.DictWriter(
self.h264_stats_file, ['fps', 'bitrate'])
self.h264_stats_writer.writeheader()
def start(self):
print('<< start() >>')
# Connect the the drone
self.drone.connection()
# Setup your callback functions to do some live video processing
self.drone.set_streaming_callbacks(
raw_cb=self.yuv_frame_cb,
h264_cb=self.h264_frame_cb
)
self.drone(MaxAltitude(self.max_altitude)).wait()
self.drone(gimbal.set_target(
gimbal_id=0,
control_mode="position",
yaw_frame_of_reference="none", # None instead of absolute
yaw=0.0,
pitch_frame_of_reference="absolute",
pitch=self.gimbal_pitch,
roll_frame_of_reference="none", # None instead of absolute
roll=0.0
)).wait()
def stop(self):
print('<< stop() >>')
# Properly stop the video stream and disconnect
self.drone.stop_video_streaming()
self.drone.disconnection()
self.h264_stats_file.close()
def yuv_frame_cb(self, yuv_frame):
"""
This function will be called by Olympe for each decoded YUV frame.
:type yuv_frame: olympe.VideoFrame
"""
# Every x frames, do inference (at 30fps rate)
LIMIT = 60
if self.frameCount < LIMIT:
self.frameCount = self.frameCount + 1
elif self.frameCount == LIMIT or self.frameCount > LIMIT:
self.frameCount = 0
# the VideoFrame.info() dictionary contains some useful informations
# such as the video resolution
info = yuv_frame.info()
height, width = info["yuv"]["height"], info["yuv"]["width"]
# print(str(width) + " x " + str(height))
# convert pdraw YUV flag to OpenCV YUV flag
cv2_cvt_color_flag = {
olympe.PDRAW_YUV_FORMAT_I420: cv2.COLOR_YUV2BGR_I420,
olympe.PDRAW_YUV_FORMAT_NV12: cv2.COLOR_YUV2BGR_NV12,
}[info["yuv"]["format"]]
# yuv_frame.as_ndarray() is a 2D numpy array with the proper "shape"
# i.e (3 * height / 2, width) because it's a YUV I420 or NV12 frame
# Use OpenCV to convert the yuv frame to RGB
cv2frame = cv2.cvtColor(yuv_frame.as_ndarray(), cv2_cvt_color_flag)
cv2frame = cv2.resize(cv2frame, None, fx=0.3, fy=0.3)
payload = {
'b64': self.frameToBase64String(cv2frame, 1.0),
'mode': self.detection_mode,
'target': CarModels[self.targeted_car]
}
# print("frameCount{}".format(self.frameCount))
if self.detection_enabled == True and self.frameCount == 0:
print("try inference")
self.inferenceFrame = cv2frame
myAWSIoTMQTTShadowClient.getMQTTConnection().publish('detections/{}/infer/input'.format(topic),
json.dumps(payload), 0)
# roll - right axis (left/right)
dY = 'HOLD' # direction
mY = 30.0 # magnitude
# pitch - front axis (forward/backward)
dX = 'HOLD' # direction
mX = 30.0 # magnitude
# gaz - down axis (up/down)
dZ = 'HOLD' # direction
mZ = 30.0 # magnitude
# check to see if we are currently tracking an object
if self.initBB is not None and self.trackerInitialized == True and self.detection_enabled == True:
# grab the new bounding box coordinates of the object
(success, box) = self.tracker.update(cv2frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
x1 = x
y1 = y
x2 = x + w
y2 = y + h
detectionWidth = w
offsetX = self.IMAGE_WIDTH/2-(x1+x2)/2
offsetY = self.IMAGE_HEIGHT/2-(y1+y2)/2
# proportionately derived from 25/640 and 25/360
offsetYPercentage = 0.07
offsetXPercentage = 0.04
if self.detection_mode == 'cars':
if offsetY < -offsetYPercentage*self.IMAGE_HEIGHT:
dX = 'BACKWARD'
elif offsetY > offsetYPercentage*self.IMAGE_HEIGHT:
dX = 'FORWARD'
if abs(offsetY) > offsetYPercentage*self.IMAGE_HEIGHT * 3:
mX = 40.0
elif abs(offsetY) > offsetYPercentage*self.IMAGE_HEIGHT * 2:
mX = 35.0
if offsetX < -offsetXPercentage*self.IMAGE_WIDTH:
dY = 'RIGHT'
elif offsetX > offsetXPercentage*self.IMAGE_WIDTH:
dY = 'LEFT'
if abs(offsetX) > offsetXPercentage*self.IMAGE_WIDTH * 3:
mY = 40.0
elif abs(offsetX) > offsetXPercentage*self.IMAGE_WIDTH * 2:
mY = 35.0
cv2.putText(cv2frame, str(CLASSES[self.last_detected_class]) + ' - ' + str(self.last_confidence), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), lineType=cv2.LINE_AA)
self.performPilotingCommands(dX, dY, dZ, mX, mY, mZ)
elif self.detection_mode == 'drones':
if offsetX < -offsetXPercentage*self.IMAGE_WIDTH:
dY = 'RIGHT'
elif offsetX > offsetXPercentage*self.IMAGE_WIDTH:
dY = 'LEFT'
if offsetY < -offsetYPercentage*self.IMAGE_HEIGHT:
dZ = 'DOWN'
elif offsetY > offsetYPercentage*self.IMAGE_HEIGHT:
dZ = 'UP'
if detectionWidth < self.IMAGE_HEIGHT*0.10:
dX = 'FORWARD'
elif detectionWidth > self.IMAGE_HEIGHT*0.40:
dX = 'BACKWARD'
cv2.putText(cv2frame, 'anafi-drone - ' + str(self.last_confidence), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), lineType=cv2.LINE_AA)
self.performPilotingCommands(dX, dY, dZ, mX, mY, mZ)
cv2.rectangle(cv2frame,(x1,y1),(x2,y2),(245,185,66),2)
cv2.putText(cv2frame, str(self.IMAGE_WIDTH) + ', ' + str(self.IMAGE_HEIGHT) + ' | ' + str(offsetX) + ', ' + str(offsetY) + ', ' + str(detectionWidth), (0, self.IMAGE_HEIGHT-10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), lineType=cv2.LINE_AA)
cv2.putText(cv2frame, dX + ', ' + dY + ', ' + dZ, (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), lineType=cv2.LINE_AA)
# # Use OpenCV to show this frame
cv2.imshow("Frame", cv2frame)
key = cv2.waitKey(1) & 0xFF
ui_payload = {
'b64': self.frameToBase64String(cv2frame, 1.0)
}
myAWSIoTMQTTShadowClient.getMQTTConnection().publish('{}/frames'.format(topic), json.dumps(ui_payload), 0)
def frameToBase64String(self, cv2frame, resizeRatio):
# resize to 50%, and then convert to bytearray
cv2frame_small = cv2.resize(cv2frame, None, fx=resizeRatio, fy=resizeRatio)
retval, buffer = cv2.imencode('.jpg', cv2frame_small)
jpg_bytes = base64.b64encode(buffer).decode()
return jpg_bytes
def h264_frame_cb(self, h264_frame):
"""
This function will be called by Olympe for each new h264 frame.
:type yuv_frame: olympe.VideoFrame
"""
def mission(self):
print('<< mission() >>')
self.drone(
TakeOff()
>> FlyingStateChanged("hovering", _timeout=5)
).wait()
# # up/down
# self.drone(moveBy(0,0,-1.0,0) >> FlyingStateChanged("hovering", _timeout=5)).wait()
# self.drone(moveBy(0,0,1.0,0) >> FlyingStateChanged("hovering", _timeout=5)).wait()
# # forward/backward
# self.drone(moveBy(-1.0,0,0,0) >> FlyingStateChanged("hovering", _timeout=5)).wait()
# self.drone(moveBy(1.0,0,0,0) >> FlyingStateChanged("hovering", _timeout=5)).wait()
# # left/right
# self.drone(moveBy(0,-1.0,0,0) >> FlyingStateChanged("hovering", _timeout=5)).wait()
# self.drone(moveBy(0,1.0,0,0) >> FlyingStateChanged("hovering", _timeout=5)).wait()
self.drone(Landing()).wait()
def land(self):
print('<< land() >>')
self.drone(Landing()).wait()
def takeOff(self):
print('<< takeOff() >>')
self.drone(
TakeOff()
>> FlyingStateChanged("hovering", _timeout=5)
).wait()
self.drone(
moveBy(0, 0, (1.0 - self.flight_altitude), 0)
>> FlyingStateChanged("hovering", _timeout=5)
).wait()
def startStreaming(self):
print('<< startStreaming() >>')
# Start video streaming
self.drone.start_video_streaming()
# self.drone.start_piloting()
def stopStreaming(self):
print('<< stopStreaming() >>')
# Stop video streaming
self.drone.stop_video_streaming()
self.drone.stop_piloting()
def notSupportedHandler(self):
print('<< notSupportedHandler() >>')
def predictionCallback(self, client, userdata, message):
print('<< predictionCallback() >>')
data = json.loads(message.payload.decode())
if len(data['prediction']) > 0 and data['prediction'][0][0] > -1:
self.last_confidence = round(data['prediction'][0][2]*100,2)
self.last_detected_class = int(data['prediction'][0][0])
x1 = data['prediction'][0][2]*self.IMAGE_WIDTH
y1 = data['prediction'][0][3]*self.IMAGE_HEIGHT
x2 = data['prediction'][0][4]*self.IMAGE_WIDTH
y2 = data['prediction'][0][5]*self.IMAGE_HEIGHT
w = x2-x1
h = y2-y1
print("({},{},{},{})".format(x1,x2,y1,y2))
print("({},{})".format(w,h))
self.initBB = (int(x1), int(y1), int(w), int(h))
# self.tracker = cv2.TrackerMOSSE_create()
# self.tracker = | |
: 2 * mini_batch_size]
# ...
# ```
#
# Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
# In[4]:
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, k * mini_batch_size : (k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k + 1) * mini_batch_size].reshape((1, mini_batch_size))
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m].reshape((1, m - num_complete_minibatches * mini_batch_size))
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# In[5]:
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# **Expected Output**:
#
# <table style="width:50%">
# <tr>
# <td > **shape of the 1st mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 2nd mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 3rd mini_batch_X** </td>
# <td > (12288, 20) </td>
# </tr>
# <tr>
# <td > **shape of the 1st mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 2nd mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 3rd mini_batch_Y** </td>
# <td > (1, 20) </td>
# </tr>
# <tr>
# <td > **mini batch sanity check** </td>
# <td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
# </tr>
#
# </table>
# <font color='blue'>
# **What you should remember**:
# - Shuffling and Partitioning are the two steps required to build mini-batches
# - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
# ## 3 - Momentum
#
# Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
#
# Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
#
# <img src="images/opt_momentum.png" style="width:400px;height:250px;">
# <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
#
#
# **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
# for $l =1,...,L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# ```
# **Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
# In[6]:
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
### END CODE HERE ###
return v
# In[7]:
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# **Expected Output**:
#
# <table style="width:40%">
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
# </table>
#
# **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
#
# $$ \begin{cases}
# v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
# W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
# \end{cases}\tag{3}$$
#
# $$\begin{cases}
# v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
# b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
# \end{cases}\tag{4}$$
#
# where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
# In[10]:
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' +
str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing | |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# observingconditions - Implements ASCOM Alpaca ObservingConditions device class
#
# Part of the Alpyca Client application interface package
#
# Author: <NAME> <<EMAIL>> (rbd)
#
# Python Compatibility: Requires Python 3.7 or later
# Doc Environment: Sphinx v4.5.0 with autodoc, autosummary, napoleon, and autoenum
# GitHub: https://github.com/BobDenny/alpyca-client
#
# -----------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2022 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
# Edit History:
# 02-May-22 (rbd) Initial Edit
# -----------------------------------------------------------------------------
from alpaca.device import Device
class ObservingConditions(Device):
"""ASCOM Standard IObservingConditions Interface
Provides measurements of meterological conditions as apply
to astronomy. Determination of safe/unsafe is made by a separate
:py:class:`~alpaca.safetymonitor.SafetyMonitor` device.
"""
def __init__(
self,
address: str,
device_number: int,
protocol: str = "http"
):
"""Initialize the ObservingConditions object.
Args:
address (str): IP address and port of the device (x.x.x.x:pppp)
device_number (int): The index of the device (usually 0)
protocol (str, optional): Only if device needs https. Defaults to "http".
"""
super().__init__(address, "observingconditions", device_number, protocol)
@property
def AveragePeriod(self) -> float:
"""(read/write) Gets And sets the time period (hours) over which observations will be averaged
Raises:
InvalidValueException: If the value set is out of bounds for this device.
All devices must accept 0.0 to specify that an instantaneous value
is to be made available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* AveragePeriod returns the time period (hours) over which sensor readings will be
averaged. If the device is delivering instantaneous sensor readings this property
will return a value of 0.0.
* Though discouraged in the specification, possible you will receive an exception
if you read a sensor property when insufficient time has passed to get a true
average reading.
"""
return self._get("averagePeriod")
@property
def CloudCover(self) -> float:
"""Amount of sky obscured by cloud (0.0-1.0)
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("cloudcover")
@property
def DewPoint(self) -> float:
"""Atmospheric dew point temperature (deg C) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("dewpoint")
@property
def Humidity(self) -> float:
"""Atmospheric relative humidity (0-100%) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("humidity")
@property
def Pressure(self) -> float:
"""Atmospheric pressure (hPa) at the observatory altitude
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
Not "corrected to sea level" as often encountered in weather reports.
The ConvertPressure() method may be used to get "sea level" pressure
"""
return self._get("pressure")
@property
def RainRate(self) -> float:
"""Rain rate (mm/hr) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("rainrate")
@property
def SkyBrightness(self) -> float:
"""Sky brightness (Lux) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("skybrightness")
@property
def SkyQuality(self) -> float:
"""Sky quality (mag per sq-arcsec) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("skyquality")
@property
def SkyTemperature(self) -> float:
"""Sky temperature (deg C) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("skytemperature")
@property
def StarFWHM(self) -> float:
"""Seeing (FWHM in arc-sec) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("starfwhm")
@property
def Temperature(self) -> float:
"""Atmospheric temperature (deg C) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("temperature")
@property
def WindDirection(self) -> float:
"""Direction (deg) from which the wind is blowing at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* **Meterological standards** Wind direction is that from which the wind
is blowing, measured in degrees clockwise from *true* North=0.0,
East=90.0, South=180.0, West=270.0 If the wind velocity is 0 then
direction is reported as 0.
"""
return self._get("winddirection")
@property
def WindGust(self) -> float:
"""Peak 3 second wind gust (m/s) at the observatory over the last 2 minutes
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("windgust")
@property
def WindSpeed(self) -> float:
"""Wind speed (m/s) at the observatory
Raises:
NotImplementedException: This property is not available.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("windspeed")
def Refresh(self) -> None:
"""Forces the device to immediately query its attached hardware to refresh sensor values
Raises:
NotImplementedException: This method is not supported.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of | |
# -*- coding: utf-8 -*-
"""
Survey_Estimate3dCoord.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2020-02-07'
__copyright__ = '(C) 2020, <NAME>'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import *
import qgis.utils
from numpy import radians, array, sin, cos, sqrt, matrix, zeros, floor, identity, diag
from numpy.linalg import pinv, norm
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.topogeo import str2HTML, String2CoordList, String2StringList, dms2dd
import os
from qgis.PyQt.QtGui import QIcon
class Estimate3dCoord(QgsProcessingAlgorithm):
COC = 'COC'
AZIMUTH = 'AZIMUTH'
ZENITH = 'ZENITH'
OUTPUT = 'OUTPUT'
WEIGHT = 'WEIGHT'
OPENOUTPUT = 'OPENOUTPUT'
HTML = 'HTML'
OPEN = 'OPEN'
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return Estimate3dCoord()
def name(self):
return 'estimate3dcoord'
def displayName(self):
return self.tr('Estimate 3D coordinates', 'Estimar coordenadas 3D')
def group(self):
return self.tr('Survey', 'Agrimensura')
def groupId(self):
return 'survey'
def tags(self):
return self.tr('survey,agrimensura,3D,coordinate,azimuth,zenith,angle,least square,minimum distantce,adjustment,slant').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/total_station.png'))
txt_en = 'This tool calculates the coordinates (X, Y, Z) of a point from azimuth and zenith angle measurements observed from two or more stations with known coordinates using the Foward Intersection Method adjusted by the Minimum Distances.'
txt_pt = 'Esta ferramenta calcula as coordenadas (X,Y,Z) de um ponto a partir de medições de azimute e ângulo zenital observados de duas ou mais estações de coordenadas conhecidas utilizando o Método de Interseção à Vante ajustado pelas Distâncias Mínimas.'
figure = 'images/tutorial/survey_3D_coord.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
nota_en = '''Notes: Data collected in the discipline of <i>Geodetic Surveys</i> in the Graduate Program at UFPE, in field work coordinated by <b>Prof. Dr. <NAME></b>.
For more information on the methodology used, please read the article at the link below:'''
nota_pt = '''Notas: Dados coletados na disciplina de <i>Levantamentos Geodésicos</i> no programa de Pós-Graduação da UFPE, em trabalho de campo coordenado pela <b>Profa. Dra. Andrea de Seixas</b>.
Para mais informações sobre a metodologia utilizada, por favor leia o artigo no link abaixo:'''
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<div>''' + self.tr(nota_en, nota_pt) + '''
</div>
<p align="right">
<b><a href="https://www.researchgate.net/publication/352817150_OPTIMIZED_DETERMINATION_OF_3D_COORDINATES_IN_THE_SURVEY_OF_INACCESSIBLE_POINTS_OF_BUILDINGS_-_EXAMPLE_OF_APPLICATION_IMPLEMENTED_IN_FREE_SOFTWARE_Determinacao_otimizada_de_coordenadas_3D_no_levantamen" target="_blank">'''+self.tr('<NAME>.; <NAME>.; <NAME>.; <NAME>. Optimized determination of 3D coordinates in the survey of inaccessible points of buildings - example of application implemented in free software. Bulletin of Geodetic Sciences. 27(2): e2021017, 2021. ') + '''</b>
''' +'</a><br><b>'+ self.tr('Author: <NAME>', 'Autor: <NAME>')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
def initAlgorithm(self, config=None):
# 'INPUTS'
self.addParameter(
QgsProcessingParameterString(
self.COC,
self.tr('Coordinates of Optical Centers', 'Coordenadas dos Centros Ópticos'),
defaultValue = '149867.058, 249817.768, 1.825; 149988.309, 249782.867, 1.962; 150055.018, 249757.128, 1.346; 150085.600, 249877.691, 1.559',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterString(
self.AZIMUTH,
self.tr('Azimuths', 'Azimutes'),
defaultValue = '''46°10'06.37”, 359°12'12.21”, 338°32'59.40”, 298°46'22.93”''',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterString(
self.ZENITH,
self.tr('Zenith Angles', 'Ângulos Zenitais'),
defaultValue = '''72°24'22.25”, 70°43'01.75", 74°17'54.17", 65°04'27.25"''',
multiLine = True
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.WEIGHT,
self.tr('Use Weight Matrix (W)', 'Usar Matrix Peso (P)'),
defaultValue = False
)
)
# 'OUTPUT'
self.addParameter(
QgsProcessingParameterFileDestination(
self.OUTPUT,
self.tr('Adjusted 3D Coordinates', 'Coordenadas 3D Ajustadas'),
fileFilter = 'CSV (*.csv)'
)
)
self.addParameter(
QgsProcessingParameterFileDestination(
'HTML',
self.tr('Adjustment Report', 'Relatório de Ajustamento'),
self.tr('HTML files (*.html)')
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.OPEN,
self.tr('Open output file after executing the algorithm', 'Abrir arquivo de saída com coordenadas 3D'),
defaultValue= True
)
)
def CosDir(self, Az, Z):
k = sin(Z)*sin(Az)
m = sin(Z)*cos(Az)
n = cos(Z)
return array([[k],[m],[n]])
def processAlgorithm(self, parameters, context, feedback):
COs = self.parameterAsString(
parameters,
self.COC,
context
)
Azimutes = self.parameterAsString(
parameters,
self.AZIMUTH,
context
)
ÂngulosZenitais = self.parameterAsString(
parameters,
self.ZENITH,
context
)
usar_peso = self.parameterAsBool(
parameters,
self.WEIGHT,
context
)
abrir_arquivo = self.parameterAsBool(
parameters,
self.OPENOUTPUT,
context
)
output = self.parameterAsFileOutput(
parameters,
self.OUTPUT,
context
)
if output[-3:] != 'csv':
output += '.csv'
html_output = self.parameterAsFileOutput(
parameters,
self.HTML,
context
)
# Pontos
Coords = String2CoordList(COs)
# Azimutes (radianos)
Az = []
for item in String2StringList(Azimutes):
Az += [dms2dd(item)]
Az = radians(array(Az))
# Ângulos Zenitais (radianos)
Z = []
for item in String2StringList(ÂngulosZenitais):
Z += [dms2dd(item)]
Z = radians(array(Z))
# Validação dos dados de entrada
if not (len(Coords) == len(Az) and len(Az) == len(Z)):
raise QgsProcessingException(self.tr('Wrong number of parameters!', 'Número de parâmetros errado!'))
else:
n = len(Coords)
# não deve haver valores nulos
# ângulos entre 0 e 360 graus
# Montagem do Vetor L
L = []
for k in range(len(Coords)):
L+= [[Coords[k][0]], [Coords[k][1]], [Coords[k][2]]]
L = array(L)
# Montagem da Matriz A
e = 3*n
p = 3 + n
A = matrix(zeros([e, p]))
for k in range(n):
A[3*k:3*k+3, 0:3] = identity(3)
A[3*k:3*k+3, 3+k] = -1*self.CosDir(Az[k], Z[k])
# Ajustamento MMQ
X = pinv(A.T*A)*A.T*L
V = A*X - L
sigma2 = (V.T*V)/(e-p)
SigmaX = sigma2[0,0]*pinv(A.T*A)
if usar_peso:
Ponto = array(X[0:3, :].T)[0]
d = []
for coord in Coords:
dist = norm(array(coord)-Ponto)
d += [1/dist, 1/dist, 1/dist]
P = diag(d)
X = pinv(A.T*P*A)*A.T*P*L
V = A*X - L
sigma2 = (V.T*P*V)/(e-p)
SigmaX = sigma2[0,0]*pinv(A.T*P*A)
VAR = str(round(sigma2[0,0],5))
x = round(float(X[0, 0]),3)
y = round(float(X[1, 0]),3)
z = round(float(X[2, 0]),3)
s_x = round(float(sqrt(SigmaX[0, 0])),3)
s_y = round(float(sqrt(SigmaX[1, 1])),3)
s_z = round(float(sqrt(SigmaX[2, 2])),3)
# Slant Range
slant_range = []
s_t = []
for k in range(len(Coords)):
slant_range += [round(float(X[k+3, 0]),3)]
s_t += [round(float(sqrt(SigmaX[k+3, k+3])),3)]
# Resultados
arq = open(output, 'w')
arq.write('X,Y,Z,'+ self.tr('type', 'tipo') + '\n')
arq.write('{},{},{},'.format(x,y,z) + self.tr('Adjusted 3D Coordinates', 'Coordenadas 3D Ajustadas') + '\n')
for k in range(len(Coords)):
arq.write('{},{},{},{}'.format(Coords[k][0],Coords[k][1],Coords[k][2], self.tr('Station', 'Estacao') + ' ' + str(k+1) ) + '\n')
arq.close()
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html; charset=ISO-8859-1" http-equiv="content-type">
<title>''' + self.tr('Estimate 3D Coordinates', str2HTML('Estimação de Coordenadas 3D')) + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftools.png?raw=true" type = "image/x-icon">
</head>
<body style="color: rgb(0, 0, 0); background-color: rgb(255, 255, 204);" alink="#000099" link="#000099" vlink="#990099">
<p class="MsoNormal" style="text-align: center;" align="center"><b><span style="font-size: 12pt; line-height: 107%;">''' + self.tr('ESTIMATE 3D COORDINATES', 'ESTIMAÇÃO DE COORDENADAS 3D') + '''<o:p></o:p></span></b></p>
<p class="MsoNormal" style="text-align: center;" align="center"><i>''' + self.tr('Minimum Distance Method', 'Método das Distâncias Mínimas') + '''</i></p>
<p class="MsoNormal" style="text-align: center;" align="center"><b><u>''' + self.tr('REPORT','RELATÓRIO') + '''<o:p></o:p></u></b></p>
<div>
<table style="text-align: center; width: 100%;" border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td width="50%"><b>''' + self.tr('Inputs', 'Dados de Entrada') + '''</b></td>
<td width="50%"><b>'''+ self.tr('Adjustment','Ajustamento') + '''</b></td>
</tr>
<tr>
<td style="text-align: center;">
<p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Coordinates of the Optical Centers','Coordenadas dos Centros Ópticos')+ '''</span><o:p></o:p></p>
<div align="center">
<table class="MsoTableGrid" style="border: medium none ; border-collapse: collapse;" border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr style="">
<td
style="border: 1pt solid windowtext; padding: 0cm 5.4pt; width: 39.3pt;"
valign="top" width="52">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>'''+self.tr('Station', str2HTML('Estação')) + '''</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>X</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>Y</i><o:p></o:p></p>
</td>
<td
style="border-style: solid solid solid none; border-color: windowtext windowtext windowtext -moz-use-text-color; border-width: 1pt 1pt 1pt medium; padding: 0cm 5.4pt; width: 46.05pt;"
valign="top" width="61">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i>Z</i><o:p></o:p></p>
</td>
</tr>
[tabela 1]
</tbody>
</table>
</div>
<p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;"></span></p>
<p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Azimuths','Azimutes') + '''</span><o:p></o:p></p>
<div align="center">
<table class="MsoTableGrid" style="border: medium none ; border-collapse: collapse;" border="0" cellpadding="0" cellspacing="0">
<tbody>
[tabela 2]
</tbody>
</table>
</div>
<p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;"></span></p>
<p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Zenith Angles', 'Ângulos Zenitais') + '''</span><o:p></o:p></p>
<div align="center">
<table class="MsoTableGrid" style="border: medium none ; border-collapse: collapse;" border="0" cellpadding="0" cellspacing="0">
<tbody>
[tabela 3]
</tbody>
</table>
<p class="MsoNormal" style="text-align: center;" align="center"><span style="font-style: italic;">''' + self.tr('Weight | |
QubitHamiltonian = bravyi_kitaev_tree(FermionicHamiltonian)
else:
raise ValueError('unknown transformation')
if threshold is None:
return QubitHamiltonian
else:
from openfermion.ops import QubitOperator
reduced_QubitHamiltonian = QubitOperator()
for key in QubitHamiltonian.terms:
if np.abs(QubitHamiltonian.terms[key]) > threshold:
reduced_QubitHamiltonian += QubitOperator(key, QubitHamiltonian.terms[key])
return reduced_QubitHamiltonian
def Get_sparse_Qubit_Hamiltonian_matrix(self, QubitOperator):
"""
Get sparse matrix of qubit Hamiltonian
Args:
QubitOperator (openfermion.ops._qubit_operator.QubitOperator): Qubit operator
Returns:
sparse matrix of Hamiltonian
e.g.
input:
(-0.09706626861762581+0j) [] +
(-0.045302615508689394+0j) [X0 X1 Y2 Y3] +
(0.045302615508689394+0j) [X0 Y1 Y2 X3] +
(0.045302615508689394+0j) [Y0 X1 X2 Y3] +
(-0.045302615508689394+0j) [Y0 Y1 X2 X3] +
(0.17141282639402383+0j) [Z0]
"""
from openfermion import qubit_operator_sparse
return qubit_operator_sparse(QubitOperator)
def Get_HF_spin_orbital_occupation(self):
#https://sunqm.github.io/pyscf/scf.html
if self.molecule is None:
self.Run_PySCF()
return self.molecule._pyscf_data['scf'].get_occ()
### psi4 implementaiton
# from openfermionpsi4 import run_psi4
# class Hamiltonian():
# """
#
# Object that can perform PSI4 quantum chemistry calculation and converts results for use
# on quantum computer.
#
# Args:
# MoleculeName (str): Name of Molecule
# run_scf (int): Boolean to run SCF calculation.
# run_mp2 (int): Boolean to run MP2 calculation.
# run_cisd (int): Boolean to run CISD calculation.
# run_ccsd (int): Boolean to run CCSD calculation.
# run_fci (int): Boolean to FCI calculation.
# multiplicity(int): Multiplicity of molecule (1=singlet, 2=doublet ... etc)
# geometry (list, optional): Geometry of Molecule (if None will find it online)
# delete_input (bool, optional): Optional boolean to delete psi4 input file.
# delete_output: Optional boolean to delete psi4 output file
#
# Attributes:
# molecule (openfermion.hamiltonians._molecular_data.MolecularData): An instance of the MolecularData class
#
# molecule.single_cc_amplitudes (numpy.array): h_pq (n_qubits x n_qubits) numpy array
# molecule.double_cc_amplitudes (numpy.array): h_pqrs (n_qubits x n_qubits x n_qubits x n_qubits) numpy array
#
# singles_hamiltonian (numpy.ndarray): h_pq (n_qubits x n_qubits) matrix.
# doubles_hamiltonian (numpy.ndarray): h_pqrs (n_qubits x n_qubits x n_qubits x n_qubits) matrix
# MolecularHamiltonian (openfermion.ops._interaction_operator.InteractionOperator):
# MolecularHamiltonianMatrix (scipy.sparse.csc.csc_matrix): Sparse matrix of Molecular Hamiltonian
# num_theta_parameters(int): Number of theta parameters in for ia_and_ijab_terms
#
# """
# def __init__(self, MoleculeName,
# run_scf = 1, run_mp2 = 1, run_cisd = 1, run_ccsd = 1, run_fci=1,
# basis='sto-3g', multiplicity=1, geometry=None, delete_input=False, delete_output=False):
#
# # basis = 'cc-pvtz'
#
# self.MoleculeName = MoleculeName
# self.run_scf = bool(run_scf)
# self.run_mp2 = bool(run_mp2)
# self.run_cisd = bool(run_cisd)
# self.run_ccsd = bool(run_ccsd)
# self.run_fci = bool(run_fci)
# self.geometry = geometry
# self.multiplicity = multiplicity
# self.basis = basis
# self.molecule = None
# self.delete_input = delete_input
# self.delete_output = delete_output
# self.num_theta_parameters = None
# self.MolecularHamiltonianMatrix = None
#
# def Run_Psi4(self):
#
# if self.geometry is None:
# self.Get_Geometry()
#
# # input
# self.molecule = MolecularData(
# self.geometry,
# self.basis,
# self.multiplicity,
# description=self.MoleculeName)
#
# #output file
# self.molecule.filename = self.MoleculeName
#
# # Run Psi4.
# self.molecule = run_psi4(self.molecule,
# run_scf=self.run_scf,
# run_mp2=self.run_mp2,
# run_cisd=self.run_cisd,
# run_ccsd=self.run_ccsd,
# run_fci=self.run_fci,
# delete_input=False,
# delete_output=False)
# # note template: ~/envs/ENV_NAME/lib/python3.7/site-packages/openfermionpsi4 : _psi4_template
# # https://github.com/quantumlib/OpenFermion-Psi4/blob/master/openfermionpsi4/_psi4_template
#
# def Get_Geometry(self):
#
# """
# Function to get molecule's geometry from the PubChem database.
#
# Args:
# MoleculeName: a string giving the molecule's name as required by the PubChem
# database.
#
# Returns:
# geometry: a list of tuples giving the coordinates of each atom with
# distances in Angstrom.
# """
#
# from openfermion.utils import geometry_from_pubchem
# self.geometry = geometry_from_pubchem(self.MoleculeName)
#
# if self.geometry is None:
# raise ValueError('Unable to find molecule in the PubChem database.')
#
# def PrintInfo(self):
# if self.molecule is None:
# self.Run_Psi4()
#
# print('Geometry: ', self.geometry)
# print('No Qubits: ', self.molecule.n_qubits)
# print('No. Spin Orbitals: ', self.molecule.n_orbitals * 2)
# print('multiplicity: ', self.multiplicity)
#
# print('HF Energy: ', self.molecule.hf_energy)
# print('CCSD: ', self.molecule.ccsd_energy)
# print('FCI: ', self.molecule.fci_energy)
#
# def Get_Molecular_Hamiltonian(self, Get_H_matrix=False):
# """
# Get sparse matrix of molecular Hamiltonian
#
# Attributes:
# singles_hamiltonian (numpy.ndarray): h_pq (n_qubits x n_qubits) matrix.
# doubles_hamiltonian (numpy.ndarray): h_pqrs (n_qubits x n_qubits x n_qubits x n_qubits) matrix
# MolecularHamiltonian (openfermion.ops._interaction_operator.InteractionOperator):
# MolecularHamiltonianMatrix (scipy.sparse.csc.csc_matrix): Sparse matrix of Molecular Hamiltonian
#
# """
# if self.molecule is None:
# self.Run_Psi4()
#
# # H = constant + ∑_pq (h_pq a†_p a_q) + ∑_pqrs (h_pqrs a†_p a†_q a_r a_s)
# self.MolecularHamiltonian = self.molecule.get_molecular_hamiltonian() # instance of the MolecularOperator class
# self.singles_hamiltonian = self.MolecularHamiltonian.one_body_tensor # h_pq (n_qubits x n_qubits numpy array)
# self.doubles_hamiltonian = self.MolecularHamiltonian.two_body_tensor # h_pqrs (n_qubits x n_qubits x n_qubits x n_qubits numpy array
#
#
# # Get Matrix Form of QubitHamiltonian
# if Get_H_matrix is True:
# from openfermion.transforms import get_sparse_operator
# self.MolecularHamiltonianMatrix = get_sparse_operator(self.MolecularHamiltonian)
#
# def Get_CCSD_Amplitudes(self):
# """
# Parse coupled cluster singles and doubles amplitudes from psi4 file
# where: H = constant + ∑_pq (h_pq a†_p a_q) + ∑_pqrs (h_pqrs a†_p a†_q a_r a_s)
# note that:
# - single amplitudes: h_pq is a (n_qubits x n_qubits) numpy array
# - doubles amplitudes: h_pqrs is a (n_qubits x n_qubits x n_qubits x n_qubits) numpy array
#
# indexing for single_cc_amplitudes = a, i --> (un_occupied=a and occupied=i)
# indexing for molecule = a,i, b,j --> (occupied=i,j and un_occupied=a,b)
#
# e.g. for H2 expect double_cc_amplitudes[2,0,3,1] (e- excited from 0-->2 and 1-->3)
#
# """
#
# from openfermionpsi4._psi4_conversion_functions import parse_psi4_ccsd_amplitudes
# # https://github.com/quantumlib/OpenFermion-Psi4/blob/master/openfermionpsi4/_psi4_conversion_functions.py
# if self.molecule is None:
# self.Run_Psi4()
#
# self.molecule.single_cc_amplitudes, self.molecule.double_cc_amplitudes = (
# parse_psi4_ccsd_amplitudes(
# 2 * self.molecule.n_orbitals,
# self.molecule.get_n_alpha_electrons(),
# self.molecule.get_n_beta_electrons(),
# self.molecule.filename + ".out"))
#
# def Get_FCI_from_MolecularHamialtonian(self):
#
# if self.MolecularHamiltonianMatrix is None:
# self.Get_Molecular_Hamiltonian(Get_H_matrix=True)
#
# from scipy.sparse.linalg import eigs
# eig_values, eig_vectors = eigs(self.MolecularHamiltonianMatrix)
# FCI_Energy = min(eig_values)
#
# if not np.isclose(FCI_Energy.real, self.molecule.fci_energy, rtol=1e-09, atol=0.0):
# raise ValueError('Calculated FCI energy from Moleular Hamiltonian Operator not equivalent to PSI4 calculation')
# return FCI_Energy
#
# def Get_NOON(self):
# """
# See https://journals.aps.org/prx/pdf/10.1103/PhysRevX.8.031022 appendix C for further details
#
# Taking the 1-RDM (in the canonical orbital basis from a CISD calculation) which are arranged as "spin-up,
# spin-down, spin-up, spin-down..." combining the spin up and down terms. Diagnoalizing the resultant matrix
# gives the 1-RDM fermionic natural molecular orbitals (NMO) basis. Eigenvalues of this matrix are the
# natural orbital occupation number (NOON). Orbitals with a small NOON can be assumed to be UNFILLED and REMOVED
# from the Hamiltonian! Orbitals with large NOON (close to 2) can assumed to be FILLED and also removed!
#
#
# returns:
# NOON (np.array): natural orbital occupation number
# NMO_basis (np.array): natural molecular orbitals (NMO) basis
#
# e.g. for LiH with a bond length of 1.45 A:
#
# NOON =
# array([1.99991759e+00,
# 1.96200679e+00,
# 3.45854731e-02,
# 4.91748520e-05,
# 1.72048547e-03,
# 1.72048547e-03])
#
# shows natural orbital occupation!
#
# therefore here can see that for orbitals with index 0, close to TWO, therefore can consider it
# always doubly occupied... so can remove any terms in the hamiltonian containing: a†0 ,a0, a†1, a1.
#
# Also have a look at indix 3... occupation is: 4.91748520e-05 ... VERY SMALL NOON... therefore can assume
# these orbitals are never occupied. Again can remove the fermion operators from the Hamiltonian too:
# a†6 ,a6, a†7, a7.
# """
# if self.molecule is None:
# self.Run_Psi4()
#
# # one_RDM = self.molecule.cisd_one_rdm
# one_RDM = self.molecule.fci_one_rdm
#
# one_rdm_a = one_RDM[np.arange(0, self.molecule.n_qubits, 2)][:, np.arange(0, self.molecule.n_qubits, 2)]
# one_rdm_b = one_RDM[np.arange(1, self.molecule.n_qubits, 2)][:, np.arange(1, self.molecule.n_qubits, 2)]
#
# one_RDM_combined = one_rdm_a + one_rdm_b #spin up + spin down
# from numpy.linalg import eig
# # diagonalizing gives 1-RDM in terms of natural molecular orbitals (NMOs)
# NOON, NMO_basis = eig(one_RDM_combined) # NOON = natural orbital occupation numbers = eigenvalues
#
# # ordering
# idx = NOON.argsort()[::-1]
# NOON_ordered = NOON[idx]
# NMO_basis_ordered = NMO_basis[:, idx]
#
#
# # Diag_matix = C^{-1} A C
# # Diag_matix = np.dot(np.linalg.inv(NMO_basis_ordered) , np.dot(np.diag(one_RDM_combined), NMO_basis_ordered))
#
# # basis_transformation_matrix = eig_vectors.transpose() #<--- here!
# # The Molecular Hamiltonian must also be rotated, using the same unitary matrixused to diagonalise the 1 - RDM.
# # equivalent to performing a change of basis, from the canonical orbital basis to the natural molecular orbital basis
#
#
# return NOON_ordered, NMO_basis_ordered #NOON, NMO_basis #NOON_ordered, NMO_basis_ordered#, basis_transformation_matrix
#
# def Get_Fermionic_Hamiltonian(self):
#
# """
# Returns the second quantised Fermionic Molecular Hamiltonian of the Molecular Hamiltonian
#
# e.g. H = h00 a†0a0 + h11a†1a1 + h22a†2a2 + h33a†3a3 + h0110 a†0a†1a1a0 +h2332a†2a†3a3a2 + ... etc etc
#
# note can get integrals (h_ia and h_ijab) from Get_CCSD_Amplitudes method of Hamiltonian class!
#
# returns:
# FermionicHamiltonian (openfermion.ops._fermion_operator.FermionOperator): Fermionic Operator
#
# e.g. for H2:
# 0.7151043387432434 [] +
# -1.2533097864345657 [0^ 0] +
# 0.3373779633738658 [0^ 0^ 0 0] +
# 0.09060523101737843 [0^ 0^ 2 2] +
# 0.3373779633738658 [0^ 1^ 1 0] +
# 0.09060523101737843 [0^ 1^ 3 2] + ... etc ... etc
#
# """
# if self.MolecularHamiltonianMatrix is None:
# self.Get_Molecular_Hamiltonian(Get_H_matrix=False)
#
# from openfermion.transforms import get_fermion_operator
# FermionicHamiltonian = get_fermion_operator(self.MolecularHamiltonian)
#
# return FermionicHamiltonian
#
# def Get_Qubit_Hamiltonian(self, threshold=None, transformation='JW'):
#
# """
# Returns the second quantised Qubit Molecular Hamiltonian of the Molecular Hamiltonian using the
# Jordan Wigner / Bravyi kitaev transformation. First gets the fermionic Hamiltonian and than performs transform.
#
# e.g. H = h0 I + h1 Z0 + h2 Z1 +h3 Z2 + h4 Z3 + h5 Z0Z1 ... etc etc
# note can get integrals (h_ia and h_ijab) from Get_CCSD_Amplitudes method of Hamiltonian class!
#
# args:
# threshold (optional, float): gives threshold of terms to ignore... e.g. the term
# (0.00003+0j) [Y0 X1 X2 Y3]] would be ignored if threshold = 1e-2
# returns:
# QubitHamiltonian (openfermion.ops._qubit_operator.QubitOperator): | |
<filename>albaceaScript.py
#!/usr/bin/env python3
################################################################################################
#
# AlbaceaScript: based on the Glacier Protocol (http://glacierprotocol.org)
#
# It is designed specifically for use in the context of executing the broader Glacier
# Protocol, a step-by-step procedure for high-security cold storage of Bitcoin. It is not
# intended to be used as standalone software.
#
# AlbaceScript primarily replaces tasks that users would otherwise be doing manually, such as
# typing things on the command line, copying-and-pasting strings, and hand-editing JSON. It
# mostly consists of print statements, user input, string & JSON manipulation, and command-line
# wrappers around Bitcoin Core and other applications (e.g. those involved in reading and writing
# QR codes.)
#
# AlbaceScript avoids cryptographic and other security-sensitive operations as much as possible.
#
# Albacea depends on the following command-line applications:
# - Bitcoin Core (http://bitcoincore.org)
# - qrencode (QR code writer: http://packages.ubuntu.com/xenial/qrencode)
# - zbarimg (QR code reader: http://packages.ubuntu.com/xenial/zbar-tools)
#
################################################################################################
# standard Python libraries
import argparse
from collections import OrderedDict
from decimal import Decimal
from hashlib import sha256, md5
import json
import os
import shlex
import subprocess
import sys
import time
import random
# Taken from https://github.com/keis/base58
from base58 import b58encode_check
SATOSHI_PLACES = Decimal("0.00000001")
verbose_mode = 0
################################################################################################
#
# Minor helper functions
#
################################################################################################
def hash_sha256(s):
"""A thin wrapper around the hashlib SHA256 library to provide a more functional interface"""
m = sha256()
m.update(s.encode('ascii'))
return m.hexdigest()
def hash_md5(s):
"""A thin wrapper around the hashlib md5 library to provide a more functional interface"""
m = md5()
m.update(s.encode('ascii'))
return m.hexdigest()
def satoshi_to_btc(satoshi):
"""
Converts a value in satoshi to a value in BTC
outputs => Decimal
satoshi: <int>
"""
value = Decimal(satoshi) / Decimal(100000000)
return value.quantize(SATOSHI_PLACES)
def btc_to_satoshi(btc):
"""
Converts a value in BTC to satoshi
outputs => <int>
btc: <Decimal> or <Float>
"""
value = btc * 100000000
return int(value)
################################################################################################
#
# Subprocess helper functions
#
################################################################################################
def verbose(content):
if verbose_mode: print(content)
def run_subprocess(exe, *args):
"""
Run a subprocess (bitcoind or bitcoin-cli)
Returns => (command, return code, output)
exe: executable file name (e.g. bitcoin-cli)
args: arguments to exe
"""
cmd_list = [exe] + cli_args + list(args)
verbose("bitcoin cli call:\n {0}\n".format(" ".join(shlex.quote(x) for x in cmd_list)))
with subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1024) as pipe:
output, _ = pipe.communicate()
output = output.decode('ascii')
retcode = pipe.returncode
verbose("bitcoin cli call return code: {0} output:\n {1}\n".format(retcode, output))
return (cmd_list, retcode, output)
def bitcoin_cli_call(*args):
"""
Run `bitcoin-cli`, return OS return code
"""
_, retcode, _ = run_subprocess("bitcoin-cli", *args)
return retcode
def bitcoin_cli_checkoutput(*args):
"""
Run `bitcoin-cli`, fail if OS return code nonzero, return output
"""
cmd_list, retcode, output = run_subprocess("bitcoin-cli", *args)
if retcode != 0: raise subprocess.CalledProcessError(retcode, cmd_list, output=output)
return output
def bitcoin_cli_json(*args):
"""
Run `bitcoin-cli`, parse output as JSON
"""
return json.loads(bitcoin_cli_checkoutput(*args))
def bitcoind_call(*args):
"""
Run `bitcoind`, return OS return code
"""
_, retcode, _ = run_subprocess("bitcoind", *args)
return retcode
################################################################################################
#
# Read & validate random data from the user
#
################################################################################################
def validate_rng_seed(seed, min_length):
"""
Validates random hexadecimal seed
returns => <boolean>
seed: <string> hex string to be validated
min_length: <int> number of characters required. > 0
"""
if len(seed) < min_length:
print("Error: Computer entropy must be at least {0} characters long".format(min_length))
return False
if len(seed) % 2 != 0:
print("Error: Computer entropy must contain an even number of characters.")
return False
try:
int(seed, 16)
except ValueError:
print("Error: Illegal character. Computer entropy must be composed of hexadecimal characters only (0-9, a-f).")
return False
return True
def read_rng_seed_interactive(min_length):
"""
Reads random seed (of at least min_length hexadecimal characters) from standard input
returns => string
min_length: <int> minimum number of bytes in the seed.
"""
char_length = min_length * 2
def ask_for_rng_seed(length):
print("Enter at least {0} characters of computer entropy. Spaces are OK, and will be ignored:".format(length))
ask_for_rng_seed(char_length)
seed = input()
seed = unchunk(seed)
while not validate_rng_seed(seed, char_length):
ask_for_rng_seed(char_length)
seed = input()
seed = unchunk(seed)
return seed
def validate_dice_seed(dice, min_length):
"""
Validates dice data (i.e. ensures all digits are between 1 and 6).
returns => <boolean>
dice: <string> representing list of dice rolls (e.g. "5261435236...")
"""
if len(dice) < min_length:
print("Error: You must provide at least {0} dice rolls".format(min_length))
return False
for die in dice:
try:
i = int(die)
if i < 1 or i > 6:
print("Error: Dice rolls must be between 1 and 6.")
return False
except ValueError:
print("Error: Dice rolls must be numbers between 1 and 6")
return False
return True
def read_dice_seed_interactive(min_length):
"""
Reads min_length dice rolls from standard input, as a string of consecutive integers
Returns a string representing the dice rolls
returns => <string>
min_length: <int> number of dice rolls required. > 0.
"""
def ask_for_dice_seed(x):
print("You can either: ")
print("- Enter {0} dice rolls Spaces are OK, and will be ignored:".format(x))
print("OR")
print("- You can use this randomly generated string: \n", dice_throw_generator())
ask_for_dice_seed(min_length)
dice = input("Your input: ")
dice = unchunk(dice)
while not validate_dice_seed(dice, min_length):
ask_for_dice_seed(min_length)
dice = input()
dice = unchunk(dice)
return dice
################################################################################################
#
# private key generation
#
################################################################################################
def dice_throw_generator():
"""
Emulates the result of throwing a dice 62 times.
To generate cryptographically secure random numbers, we are using SystemRandom, which uses os.urandom()
More info: https://realpython.com/python-random/
"""
throw = ""
for i in range(0, 62):
throw += str(random.SystemRandom().randint(1, 6))
return throw
def xor_hex_strings(str1, str2):
"""
Return xor of two hex strings.
An XOR of two pieces of data will be as random as the input with the most randomness.
We can thus combine two entropy sources in this way as a safeguard against one source being
compromised in some way.
For details, see http://crypto.stackexchange.com/a/17660
returns => <string> in hex format
"""
if len(str1) != len(str2):
raise Exception("tried to xor strings of unequal length")
str1_dec = int(str1, 16)
str2_dec = int(str2, 16)
xored = str1_dec ^ str2_dec
return "{:0{}x}".format(xored, len(str1))
def hex_private_key_to_WIF_private_key(hex_key):
"""
Converts a raw 256-bit hex private key to WIF format
returns => <string> in hex format
"""
hex_key_with_prefix = wif_prefix + hex_key + "01"
wif_key = b58encode_check(bytes.fromhex(hex_key_with_prefix))
return wif_key.decode('ascii')
################################################################################################
#
# Bitcoin helper functions
#
################################################################################################
def ensure_bitcoind_running():
"""
Start bitcoind (if it's not already running) and ensure it's functioning properly
"""
# start bitcoind. If another bitcoind process is already running, this will just print an error
# message (to /dev/null) and exit.
#
# -connect=0.0.0.0 because we're doing local operations only (and have no network connection anyway)
bitcoind_call("-daemon", "-connect=0.0.0.0")
# verify bitcoind started up and is functioning correctly
times = 0
while times <= 20:
times += 1
if bitcoin_cli_call("getnetworkinfo") == 0:
return
time.sleep(0.5)
raise Exception("Timeout while starting bitcoin server")
def require_minimum_bitcoind_version(min_version):
"""
Fail if the bitcoind version in use is older than required
<min_version> - required minimum version in format of getnetworkinfo, i.e. 150100 for v0.15.1
"""
networkinfo = bitcoin_cli_json("getnetworkinfo")
if int(networkinfo["version"]) < min_version:
print("ERROR: Your bitcoind version is too old. You have {}, I need {} or newer. Exiting...".format(networkinfo["version"], min_version))
sys.exit()
def get_address_for_wif_privkey(privkey):
"""A method for retrieving the address associated with a private key from bitcoin core
<privkey> - a bitcoin private key in WIF format"""
# Bitcoin Core doesn't have an RPC for "get the addresses associated w/this private key"
# just "get the addresses associated with this label"
# where "label" corresponds to an arbitrary tag we can associate with each private key
# so, we'll generate a unique "label" to attach to this private key.
label = hash_sha256(privkey)
ensure_bitcoind_running()
bitcoin_cli_call("importprivkey", privkey, label)
addresses = bitcoin_cli_json("getaddressesbylabel", label)
# getaddressesbylabel returns multiple addresses associated with
# this one privkey; since we use it only for communicating the
# pubkey to addmultisigaddress, it doesn't matter which one we
# choose; they are all associated with the same pubkey.
return next(iter(addresses))
def addmultisigaddress(m, addresses_or_pubkeys, address_type='p2sh-segwit'):
"""
Call `bitcoin-cli addmultisigaddress`
returns => JSON response from bitcoin-cli
m: <int> number of multisig keys required for withdrawal
addresses_or_pubkeys: List<string> either addresses or hex pubkeys for each of the N keys
"""
address_string = json.dumps(addresses_or_pubkeys)
return bitcoin_cli_json("addmultisigaddress", str(m), address_string, "", address_type)
def get_utxos(tx, address):
"""
Given a transaction, find all the outputs that were sent to an address
returns => List<Dictionary> list of UTXOs in bitcoin core format
tx - <Dictionary> in bitcoin core format
address - <string>
"""
utxos = []
for output in tx["vout"]:
if "addresses" not in output["scriptPubKey"]:
# In Bitcoin Core versions older than v0.16, native segwit outputs have | |
import numpy as np
import scipy.io as scio
import scipy.sparse as scsp
import h5py as hp
from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, \
load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset
from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss
from models.DeepCCAModels import DeepCCA, cca_loss, gcca, cca
from models.DBN import mv_DBN
from torch.utils.data import DataLoader
from torch.nn import MSELoss, CrossEntropyLoss
from torch import optim
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch
import random
from img_config import epochs_set, layer_size_set, \
batch_size_set_mvae, lr_set_mvae, wd_set_mvae, \
batch_size_set_dcca, lr_set_dcca, wd_set_dcca, alpha_set_dcca, \
batch_size_set_dsvdd, lr_set_dsvdd, wd_set_dsvdd, \
batch_size_set_fused, lr_set_fused, wd_set_fused, pt_epochs_set, \
batch_size_set_split, lr_set_split, wd_set_split, \
batch_size_set_tf, lr_set_tf, wd_set_tf, r_set_tf, \
batch_size_set_corr, lr_set_corr, wd_set_corr, alpha_set_corr, \
batch_size_set_sim, lr_set_sim, wd_set_sim, alpha_set_sim, m_set_sim, \
batch_size_set_dgcca, lr_set_dgcca, wd_set_dgcca, alpha_set_dgcca, \
batch_size_set_dbn, lr_set_dbn, wd_set_dbn
import os
from transformations import get_rp_num, trans_mv_data_new
def get_radius(dist: torch.Tensor, nu: float):
"""Optimally solve for radius R via the (1-nu)-quantile of distances."""
return np.quantile(np.sqrt(dist.clone().data.cpu().numpy()), 1 - nu)
# Baselines:
def simple_mvae(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
single_best_roc_results = [[] for i in range(repeat_times)]
single_best_pr_anom_results = [[] for i in range(repeat_times)]
single_best_pr_norm_results = [[] for i in range(repeat_times)]
single_best_tnr_results = [[] for i in range(repeat_times)]
# training config
layer_size = layer_size_set[dataset_name]
batch_size = batch_size_set_mvae[dataset_name]
lr = lr_set_mvae[dataset_name]
wd = wd_set_mvae[dataset_name]
print('layer_size: {}, batch size: {}, lr: {}, wd:{}'.format(layer_size, batch_size, lr, wd))
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mvae_ad(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
print(max(epochs))
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = [optim.Adam(model.ae_set[i].parameters(), lr=lr, weight_decay=wd) for i in range(len(X_train))]
# writer = SummaryWriter()
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('mvae, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('mvae, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
optimizer[view].zero_grad()
loss.backward()
optimizer[view].step()
losses_set[view].update(loss.item(), batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(view, losses_set[view].avg)
# writer.add_scalar('runs/{}_loss_clasId_{}_view_{}'.format(dataset_name, y_set[i], view), loss.item(), cout)
postfix += cur_postfix
cout += 1
trainloader.set_postfix(log=postfix)
# writer.close()
# del writer
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
scores = [[] for ss in range(len(X_test))]
with torch.no_grad():
model.eval()
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
# To find a single-best model
scores_set = torch.cat(scores, dim=-1)
single_best_roc = 0
single_best_pr_norm = 0
single_best_pr_anom = 0
for i in range(scores_set.shape[-1]):
cur_view_scores = scores_set[:, i].cpu().detach().numpy()
cur_roc, cur_pr_anom, cur_pr_norm, cur_tnr = save_roc_pr_curve_data(scores=cur_view_scores, labels=Y_test, file_path=None, verbose=False)
if cur_roc > single_best_roc:
single_best_roc = cur_roc
single_best_pr_anom = cur_pr_anom
single_best_pr_norm = cur_pr_norm
single_best_tnr = cur_tnr
single_best_roc_results[t].append(single_best_roc)
single_best_pr_norm_results[t].append(single_best_pr_norm)
single_best_pr_anom_results[t].append(single_best_pr_anom)
single_best_tnr_results[t].append(single_best_tnr)
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
file_name = results_path + dataset_name + '_mvae'
np.savez(file=file_name, ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(file_name)
file_name = results_path + dataset_name + '_mvae_single_best'
np.savez(file=file_name, ROC=single_best_roc_results, PR_norm=single_best_pr_norm_results, PR_anom=single_best_pr_anom_results, tnr=single_best_tnr_results)
load_print_results(file_name)
print('dataset: {}, layer_size: {}, batch size: {}, lr: {}, wd:{}, eppchs: {}'.format(dataset_name, layer_size, batch_size, lr, wd, max(epochs)))
def deepCCA(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_dcca[dataset_name]
layer_size = layer_size_set[dataset_name]
lr = lr_set_dcca[dataset_name]
wd = wd_set_dcca[dataset_name]
loss_func = MSELoss()
cca_loss_func = cca(outdim_size=layer_size[-1])
input_size_set = [x.shape[0] for x in X]
if results_path in ['./results/']:
alpha_set = [alpha_set_dcca[dataset_name]]
else:
alpha_set = [0.01, 0.1, 0.5, 0.9, 0.99]
for alpha in alpha_set:
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mv_corrAE(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
cca_losses = AverageMeter()
rec_losses = AverageMeter()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dcca, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dcca, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
try:
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
latent_set, outputs_set = model(batch)
recon_loss = torch.zeros(1).to(device)
cca_loss_ = torch.zeros(1).to(device)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
recon_loss += loss
for v_idx in range(view + 1, len(X_train)):
try:
cur_cca_loss = cca_loss_func.loss(latent_set[view], latent_set[v_idx])
except:
cur_cca_loss = torch.zeros(1).to(device)
cca_loss_ += cur_cca_loss
rec_losses.update(recon_loss.item(), batch[0].size(0))
cca_losses.update(cca_loss_.item(), batch[0].size(0))
tot_loss = (1 - alpha) * recon_loss + alpha * cca_loss_
postfix = ' rec_loss: {:.4f}, cca_loss: {:.4f} '.format(rec_losses.avg, cca_losses.avg)
optimizer.zero_grad()
tot_loss.backward()
filter_nan_grad(optimizer) # if grad contains grad, the batch is not used to update the parameters
optimizer.step()
cout += 1
trainloader.set_postfix(log=postfix)
except:
print('The error idx is {}'.format(idx))
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
if results_path in ['./results/']:
np.savez(file=results_path + dataset_name + '_dcca', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dcca')
else:
np.savez(file=results_path + dataset_name + '_dcca_alpha_{}'.format(alpha), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dcca_alpha_{}'.format(alpha))
def dgcca(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_dgcca[dataset_name]
lr = lr_set_dgcca[dataset_name]
wd = wd_set_dgcca[dataset_name]
layer_size = layer_size_set[dataset_name]
loss_func = MSELoss()
gcca_loss_func = gcca(outdim_size=layer_size[-1])
input_size_set = [x.shape[0] for x in X]
if results_path in ['./results/']:
alpha_set = [alpha_set_dgcca[dataset_name]]
else:
alpha_set = [0.01, 0.1, 0.5, 0.9, 0.99]
for alpha in alpha_set:
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mv_corrAE(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
gcca_losses = AverageMeter()
rec_losses = AverageMeter()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dgcca, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dgcca, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
# try:
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
latent_set, outputs_set = model(batch)
recon_loss = torch.zeros(1).to(device)
try:
gcca_loss_ = gcca_loss_func.loss(latent_set)
except:
gcca_loss_ = torch.zeros(1).to(device)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
recon_loss += loss
rec_losses.update(recon_loss.item(), batch[0].size(0))
gcca_losses.update(gcca_loss_.item(), batch[0].size(0))
tot_loss = (1 - alpha) * recon_loss + alpha * gcca_loss_
postfix = ' rec_loss: {:.4f}, gcca_loss: {:.4f} '.format(rec_losses.avg, gcca_losses.avg)
| |
# based on x360_imports.idc by xorloser
# work out which name generating function to use
#
# args: name of the library to get name from
# version of the library to get name from
# id that represents the function/data in the library
# returns: generated name
def DoNameGen(libName, version, id):
# remove the extension from the library name
libName = libName.rsplit('.', 1)[0]
funcName = ""
if "connectx" == libName:
funcName = connectxNameGen(libName, version, id)
elif "createprofile" == libName:
funcName = createprofileNameGen(libName, version, id)
elif "vk" == libName:
funcName = vkNameGen(libName, version, id)
elif "xam" == libName:
funcName = xamNameGen(libName, version, id)
elif "xapi" == libName:
funcName = xapiNameGen(libName, version, id)
elif "xbdm" == libName:
funcName = xbdmNameGen(libName, version, id)
elif "xboxkrnl" == libName:
funcName = xboxkrnlNameGen(libName, version, id)
elif "syscall" == libName:
funcName = syscallNameGen(libName, version, id)
else:
funcName = unknownNameGen(libName, version, id)
return funcName
def connectxNameGen(libName, version, id):
funcName = "%s_%08X" % (libName, id)
if id == 0x00000001: funcName = "CxGetVersion"
elif id == 0x00000002: funcName = "NbtNetbios"
elif id == 0x00000003: funcName = "SmbCloseHandle"
elif id == 0x00000004: funcName = "SmbCreateDirectoryW"
elif id == 0x00000005: funcName = "SmbCreateFileW"
elif id == 0x00000006: funcName = "SmbDeleteFileW"
elif id == 0x00000007: funcName = "SmbFindClose"
elif id == 0x00000008: funcName = "SmbFindFirstFileW"
elif id == 0x00000009: funcName = "SmbFindNextFile"
elif id == 0x0000000A: funcName = "SmbFlushFileBuffers"
elif id == 0x0000000B: funcName = "SmbGetDiskFreeSpaceW"
elif id == 0x0000000C: funcName = "SmbGetFileAttributesW"
elif id == 0x0000000D: funcName = "SmbGetFileInformationByHandle"
elif id == 0x0000000E: funcName = "SmbGetFileSize"
elif id == 0x0000000F: funcName = "SmbGetFileTime"
elif id == 0x00000010: funcName = "SmbMoveFileW"
elif id == 0x00000011: funcName = "SmbReadFile"
elif id == 0x00000012: funcName = "SmbRemoveDirectoryW"
elif id == 0x00000013: funcName = "SmbSetEndOfFile"
elif id == 0x00000014: funcName = "SmbSetFileAttributesW"
elif id == 0x00000015: funcName = "SmbSetFilePointer"
elif id == 0x00000016: funcName = "SmbSetFileTime"
elif id == 0x00000017: funcName = "SmbStartup"
elif id == 0x00000018: funcName = "SmbWriteFile"
return funcName
def createprofileNameGen(libName, version, id):
funcName = "%s_%08X" % (libName, id)
if id == 0x00000001: funcName = "CreateProfile_Register"
elif id == 0x00000002: funcName = "CreateProfile_Unregister"
return funcName
def vkNameGen(libName, version, id):
funcName = "%s_%08X" % (libName, id)
if id == 0x00000001: funcName = "RegisterXuiClasses"
elif id == 0x00000002: funcName = "VK_UnInit"
elif id == 0x00000003: funcName = "VK_CreateScene"
elif id == 0x00000004: funcName = "VK_GetUserCancelled"
return funcName
def xamNameGen(libName, version, id):
funcName = "%s_%08X" % (libName, id)
if id == 0x00000001: funcName = "NetDll_WSAStartup"
elif id == 0x00000002: funcName = "NetDll_WSACleanup"
elif id == 0x00000003: funcName = "NetDll_socket"
elif id == 0x00000004: funcName = "NetDll_closesocket"
elif id == 0x00000005: funcName = "NetDll_shutdown"
elif id == 0x00000006: funcName = "NetDll_ioctlsocket"
elif id == 0x00000007: funcName = "NetDll_setsockopt"
elif id == 0x00000008: funcName = "NetDll_getsockopt"
elif id == 0x00000009: funcName = "NetDll_getsockname"
elif id == 0x0000000A: funcName = "NetDll_getpeername"
elif id == 0x0000000B: funcName = "NetDll_bind"
elif id == 0x0000000C: funcName = "NetDll_connect"
elif id == 0x0000000D: funcName = "NetDll_listen"
elif id == 0x0000000E: funcName = "NetDll_accept"
elif id == 0x0000000F: funcName = "NetDll_select"
elif id == 0x00000010: funcName = "NetDll_WSAGetOverlappedResult"
elif id == 0x00000011: funcName = "NetDll_WSACancelOverlappedIO"
elif id == 0x00000012: funcName = "NetDll_recv"
elif id == 0x00000013: funcName = "NetDll_WSARecv"
elif id == 0x00000014: funcName = "NetDll_recvfrom"
elif id == 0x00000015: funcName = "NetDll_WSARecvFrom"
elif id == 0x00000016: funcName = "NetDll_send"
elif id == 0x00000017: funcName = "NetDll_WSASend"
elif id == 0x00000018: funcName = "NetDll_sendto"
elif id == 0x00000019: funcName = "NetDll_WSASendTo"
elif id == 0x0000001A: funcName = "NetDll_inet_addr"
elif id == 0x0000001B: funcName = "NetDll_WSAGetLastError"
elif id == 0x0000001C: funcName = "NetDll_WSASetLastError"
elif id == 0x0000001D: funcName = "NetDll_WSACreateEvent"
elif id == 0x0000001E: funcName = "NetDll_WSACloseEvent"
elif id == 0x0000001F: funcName = "NetDll_WSASetEvent"
elif id == 0x00000020: funcName = "NetDll_WSAResetEvent"
elif id == 0x00000021: funcName = "NetDll_WSAWaitForMultipleEvents"
elif id == 0x00000022: funcName = "NetDll___WSAFDIsSet"
elif id == 0x00000023: funcName = "NetDll_WSAEventSelect"
elif id == 0x00000024: funcName = "NetDll_WSAStartupEx"
elif id == 0x00000033: funcName = "NetDll_XNetStartup"
elif id == 0x00000034: funcName = "NetDll_XNetCleanup"
elif id == 0x00000035: funcName = "NetDll_XNetRandom"
elif id == 0x00000036: funcName = "NetDll_XNetCreateKey"
elif id == 0x00000037: funcName = "NetDll_XNetRegisterKey"
elif id == 0x00000038: funcName = "NetDll_XNetUnregisterKey"
elif id == 0x00000039: funcName = "NetDll_XNetXnAddrToInAddr"
elif id == 0x0000003A: funcName = "NetDll_XNetServerToInAddr"
elif id == 0x0000003B: funcName = "NetDll_XNetTsAddrToInAddr"
elif id == 0x0000003C: funcName = "NetDll_XNetInAddrToXnAddr"
elif id == 0x0000003D: funcName = "NetDll_XNetInAddrToServer"
elif id == 0x0000003E: funcName = "NetDll_XNetInAddrToString"
elif id == 0x0000003F: funcName = "NetDll_XNetUnregisterInAddr"
elif id == 0x00000040: funcName = "NetDll_XNetXnAddrToMachineId"
elif id == 0x00000041: funcName = "NetDll_XNetConnect"
elif id == 0x00000042: funcName = "NetDll_XNetGetConnectStatus"
elif id == 0x00000043: funcName = "NetDll_XNetDnsLookup"
elif id == 0x00000044: funcName = "NetDll_XNetDnsRelease"
elif id == 0x00000045: funcName = "NetDll_XNetQosListen"
elif id == 0x00000046: funcName = "NetDll_XNetQosLookup"
elif id == 0x00000047: funcName = "NetDll_XNetQosServiceLookup"
elif id == 0x00000048: funcName = "NetDll_XNetQosRelease"
elif id == 0x00000049: funcName = "NetDll_XNetGetTitleXnAddr"
elif id == 0x0000004A: funcName = "NetDll_XNetGetDebugXnAddr"
elif id == 0x0000004B: funcName = "NetDll_XNetGetEthernetLinkStatus"
elif id == 0x0000004C: funcName = "NetDll_XNetGetBroadcastVersionStatus"
elif id == 0x0000004D: funcName = "NetDll_XNetQosGetListenStats"
elif id == 0x0000004E: funcName = "NetDll_XNetGetOpt"
elif id == 0x0000004F: funcName = "NetDll_XNetSetOpt"
elif id == 0x00000050: funcName = "NetDll_XNetStartupEx"
elif id == 0x00000051: funcName = "NetDll_XNetReplaceKey"
elif id == 0x00000052: funcName = "NetDll_XNetGetXnAddrPlatform"
elif id == 0x00000053: funcName = "NetDll_XNetGetSystemLinkPort"
elif id == 0x00000054: funcName = "NetDll_XNetSetSystemLinkPort"
elif id == 0x00000065: funcName = "NetDll_XnpLoadConfigParams"
elif id == 0x00000066: funcName = "NetDll_XnpSaveConfigParams"
elif id == 0x00000067: funcName = "NetDll_XnpConfigUPnP"
elif id == 0x00000068: funcName = "NetDll_XnpConfig"
elif id == 0x00000069: funcName = "NetDll_XnpGetConfigStatus"
elif id == 0x0000006A: funcName = "NetDll_XnpLoadMachineAccount"
elif id == 0x0000006B: funcName = "NetDll_XnpSaveMachineAccount"
elif id == 0x0000006C: funcName = "NetDll_XnpCapture"
elif id == 0x0000006D: funcName = "NetDll_XnpEthernetInterceptSetCallbacks"
elif id == 0x0000006E: funcName = "NetDll_XnpEthernetInterceptXmit"
elif id == 0x0000006F: funcName = "NetDll_XnpEthernetInterceptRecv"
elif id == 0x00000070: funcName = "NetDll_XnpLogonGetStatus"
elif id == 0x00000071: funcName = "NetDll_XnpLogonGetQFlags"
elif id == 0x00000072: funcName = "NetDll_XnpLogonSetQFlags"
elif id == 0x00000073: funcName = "NetDll_XnpLogonSetQEvent"
elif id == 0x00000074: funcName = "NetDll_XnpLogonClearQEvent"
elif id == 0x00000075: funcName = "NetDll_XnpLogonGetQVals"
elif id == 0x00000076: funcName = "NetDll_XnpLogonSetQVals"
elif id == 0x00000077: funcName = "NetDll_XnpLogonSetPState"
elif id == 0x00000078: funcName = "NetDll_XnpGetVlanXboxName"
elif id == 0x00000079: funcName = "NetDll_XnpSetVlanXboxName"
elif id == 0x0000007A: funcName = "NetDll_XnpGetActiveSocketList"
elif id == 0x0000007B: funcName = "NetDll_XnpNoteSystemTime"
elif id == 0x0000007C: funcName = "NetDll_XnpRegisterKeyForCallerType"
elif id == 0x0000007D: funcName = "NetDll_XnpUnregisterKeyForCallerType"
elif id == 0x0000007E: funcName = "NetDll_XnpLogonGetChallenge"
elif id == 0x0000007F: funcName = "NetDll_XnpLogonClearChallenge"
elif id == 0x00000080: funcName = "NetDll_XnpLogonSetChallengeResponse"
elif id == 0x00000081: funcName = "NetDll_XnpGetSecAssocList"
elif id == 0x00000082: funcName = "NetDll_XnpGetKeyList"
elif id == 0x00000083: funcName = "NetDll_XnpGetQosLookupList"
elif id == 0x00000084: funcName = "NetDll_XnpPersistTitleState"
elif id == 0x00000085: funcName = "NetDll_XnpReplaceKeyForCallerType"
elif id == 0x00000086: funcName = "NetDll_XnpEthernetInterceptSetExtendedReceiveCallback"
elif id == 0x00000087: funcName = "NetDll_XnpQosHistoryLoad"
elif id == 0x00000088: funcName = "NetDll_XnpQosHistorySaveMeasurements"
elif id == 0x00000089: funcName = "NetDll_XnpQosHistoryGetEntries"
elif id == 0x0000008A: funcName = "NetDll_XnpQosHistoryGetAggregateMeasurement"
elif id == 0x0000008B: funcName = "NetDll_XnpToolSetCallbacks"
elif id == 0x0000008C: funcName = "NetDll_XnpToolIpProxyInject"
elif id == 0x0000008D: funcName = "NetDll_XnpUpdateConfigParams"
elif id == 0x0000008E: funcName = "NetDll_XnpEthernetInterceptXmitAsIp"
elif id == 0x00000097: funcName = "NetDll_XmlDownloadStart"
elif id == 0x00000098: funcName = "NetDll_XmlDownloadContinue"
elif id == 0x00000099: funcName = "NetDll_XmlDownloadStop"
elif id == 0x0000009A: funcName = "NetDll_XmlDownloadGetParseTime"
elif id == 0x0000009B: funcName = "NetDll_XmlDownloadGetReceivedDataSize"
elif id == 0x000000C6: funcName = "XnpGetXwppMemoryLogSnapshot"
elif id == 0x000000C7: funcName = "XnpGetXwppRuntimeFilter"
elif id == 0x000000C9: funcName = "NetDll_XHttpStartup"
elif id == 0x000000CA: funcName = "NetDll_XHttpShutdown"
elif id == 0x000000CB: funcName = "NetDll_XHttpOpen"
elif id == 0x000000CC: funcName = "NetDll_XHttpCloseHandle"
elif id == 0x000000CD: funcName = "NetDll_XHttpConnect"
elif id == 0x000000CE: funcName = "NetDll_XHttpSetStatusCallback"
| |
from typing import Callable, Dict, Optional, Sequence, Tuple
from functools import partial
import numpy as np
import torch
from torch import Tensor
from torch.nn import functional as F
# @TODO:
# after full classification metrics re-implementation, make a reference to
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics
# as a baseline
def process_multiclass_components(
outputs: torch.Tensor,
targets: torch.Tensor,
argmax_dim: int = -1,
num_classes: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, int]:
"""
Preprocess input in case multiclass classification task.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
num_classes: int, that specifies number of classes if it known
Returns:
preprocessed outputs, targets and num_classes
"""
# @TODO: better multiclass preprocessing, label -> class_id mapping
if not torch.is_tensor(outputs):
outputs = torch.from_numpy(np.array(outputs))
if not torch.is_tensor(targets):
targets = torch.from_numpy(np.array(targets))
if outputs.dim() == targets.dim() + 1:
# looks like we have scores/probabilities in our outputs
# let's convert them to final model predictions
num_classes = max(
outputs.shape[argmax_dim], int(targets.max().detach().item() + 1)
)
outputs = torch.argmax(outputs, dim=argmax_dim)
if num_classes is None:
# as far as we expect the outputs/targets tensors to be int64
# we could find number of classes as max available number
num_classes = max(
int(outputs.max().detach().item() + 1),
int(targets.max().detach().item() + 1),
)
if outputs.dim() == 1:
outputs = outputs.view(-1, 1)
elif outputs.dim() == 2 and outputs.size(0) == 1:
# transpose case
outputs.permute(1, 0)
else:
assert outputs.size(1) == 1 and outputs.dim() == 2, (
"Wrong `outputs` shape, "
"expected 1D or 2D with size 1 in the second dim "
"got {}".format(outputs.shape)
)
if targets.dim() == 1:
targets = targets.view(-1, 1)
elif targets.dim() == 2 and targets.size(0) == 1:
# transpose case
targets.permute(1, 0)
else:
assert targets.size(1) == 1 and targets.dim() == 2, (
"Wrong `outputs` shape, "
"expected 1D or 2D with size 1 in the second dim"
)
return outputs, targets, num_classes
def process_multilabel_components(
outputs: torch.Tensor,
targets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""General preprocessing for multi-label-based metrics.
Args:
outputs: NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary NxK tensor that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weights: importance for each sample
Returns:
processed ``outputs`` and ``targets``
with [batch_size; num_classes] shape
"""
if not torch.is_tensor(outputs):
outputs = torch.from_numpy(outputs)
if not torch.is_tensor(targets):
targets = torch.from_numpy(targets)
if weights is not None:
if not torch.is_tensor(weights):
weights = torch.from_numpy(weights)
weights = weights.squeeze()
if outputs.dim() == 1:
outputs = outputs.view(-1, 1)
else:
assert outputs.dim() == 2, (
"wrong `outputs` size "
"(should be 1D or 2D with one column per class)"
)
if targets.dim() == 1:
if outputs.shape[1] > 1:
# multi-class case
num_classes = outputs.shape[1]
targets = F.one_hot(targets, num_classes).float()
else:
# binary case
targets = targets.view(-1, 1)
else:
assert targets.dim() == 2, (
"wrong `targets` size "
"(should be 1D or 2D with one column per class)"
)
if weights is not None:
assert weights.dim() == 1, "Weights dimension should be 1"
assert weights.numel() == targets.size(
0
), "Weights dimension 1 should be the same as that of target"
assert torch.min(weights) >= 0, "Weight should be non-negative only"
assert torch.equal(
targets ** 2, targets
), "targets should be binary (0 or 1)"
return outputs, targets, weights
def get_binary_statistics(
outputs: Tensor, targets: Tensor, label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a binary classification problem for a given label.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., 1]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
label: integer, that specifies label of interest for statistics compute
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1, 0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1, 0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_binary_statistics(y_pred, y_true)
tensor(2) tensor(2) tensor(2) tensor(2) tensor(4)
"""
tn = ((outputs != label) * (targets != label)).to(torch.long).sum()
fp = ((outputs == label) * (targets != label)).to(torch.long).sum()
fn = ((outputs != label) * (targets == label)).to(torch.long).sum()
tp = ((outputs == label) * (targets == label)).to(torch.long).sum()
support = (targets == label).to(torch.long).sum()
return tn, fp, fn, tp, support
def get_multiclass_statistics(
outputs: Tensor,
targets: Tensor,
argmax_dim: int = -1,
num_classes: Optional[int] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a multi-class classification problem.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
num_classes: int, that specifies number of classes if it known
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([1, 2, 3, 0])
>>> y_true = torch.tensor([1, 3, 4, 0])
>>> tn, fp, fn, tp, support = get_multiclass_statistics(y_pred, y_true)
tensor([3., 3., 3., 2., 3.]), tensor([0., 0., 1., 1., 0.]),
tensor([0., 0., 0., 1., 1.]), tensor([1., 1., 0., 0., 0.]),
tensor([1., 1., 0., 1., 1.])
"""
outputs, targets, num_classes = process_multiclass_components(
outputs=outputs,
targets=targets,
argmax_dim=argmax_dim,
num_classes=num_classes,
)
tn = torch.zeros((num_classes,), device=outputs.device)
fp = torch.zeros((num_classes,), device=outputs.device)
fn = torch.zeros((num_classes,), device=outputs.device)
tp = torch.zeros((num_classes,), device=outputs.device)
support = torch.zeros((num_classes,), device=outputs.device)
for class_index in range(num_classes):
(
tn[class_index],
fp[class_index],
fn[class_index],
tp[class_index],
support[class_index],
) = get_binary_statistics(
outputs=outputs, targets=targets, label=class_index
)
return tn, fp, fn, tp, support
def get_multilabel_statistics(
outputs: Tensor, targets: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a multi-label classification problem.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1], [0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1], [0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 0., 0., 0.]) tensor([0., 1., 1., 0.]),
tensor([0., 1., 1., 0.]) tensor([0., 0., 0., 2.]),
tensor([0., 1., 1., 2.])
>>> y_pred = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> y_true = torch.tensor([0, 1, 2])
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 2., 2.]) tensor([0., 0., 0.])
tensor([0., 0., 0.]) tensor([1., 1., 1.])
tensor([1., 1., 1.])
>>> y_pred = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> y_true = torch.nn.functional.one_hot(torch.tensor([0, 1, 2]))
>>> tn, fp, fn, tp, support = get_multilabel_statistics(y_pred, y_true)
tensor([2., 2., 2.]) tensor([0., 0., 0.])
tensor([0., 0., 0.]) tensor([1., 1., 1.])
tensor([1., 1., 1.])
"""
outputs, targets, _ = process_multilabel_components(
outputs=outputs, targets=targets
)
assert outputs.shape == targets.shape
num_classes = outputs.shape[-1]
tn = torch.zeros((num_classes,), device=outputs.device)
fp = torch.zeros((num_classes,), device=outputs.device)
fn = torch.zeros((num_classes,), device=outputs.device)
tp = torch.zeros((num_classes,), device=outputs.device)
support = torch.zeros((num_classes,), device=outputs.device)
for class_index in range(num_classes):
class_outputs = outputs[..., class_index]
class_targets = targets[..., class_index]
(
tn[class_index],
fp[class_index],
fn[class_index],
tp[class_index],
support[class_index],
) = get_binary_statistics(
outputs=class_outputs, targets=class_targets, label=1
)
return tn, fp, fn, tp, support
def get_default_topk_args(num_classes: int) -> Sequence[int]:
"""Calculate list params for ``Accuracy@k`` and ``mAP@k``.
Args:
num_classes: number of classes
Returns:
iterable: array of accuracy arguments
Examples:
>>> get_default_topk_args(num_classes=4)
[1, 3]
>>> get_default_topk_args(num_classes=8)
[1, 3, 5]
"""
result = [1]
if num_classes is None:
return result
if num_classes > 3:
result.append(3)
if num_classes > 5:
result.append(5)
return result
def wrap_class_metric2dict(
metric_fn: Callable, class_args: Sequence[str] = None
) -> Callable:
"""# noqa: D202
Logging wrapper for metrics with torch.Tensor output
and [num_classes] shape.
| |
<reponame>machinezone/kubespec<filename>kubespec/certmanager/v1alpha3.py<gh_stars>1-10
# Code is generated: DO NOT EDIT
# Copyright 2019 Machine Zone, Inc. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from kubespec import context
from kubespec import types
from kubespec.certmanager.acme import v1alpha3 as acmev1alpha3
from kubespec.k8s import base
from kubespec.k8s import v1 as k8sv1
from typeguard import check_type, typechecked
from typing import Any, Dict, List, Optional
KeyAlgorithm = base.Enum("KeyAlgorithm", {"ECDSA": "ecdsa", "RSA": "rsa"})
KeyEncoding = base.Enum("KeyEncoding", {"PKCS1": "pkcs1", "PKCS8": "pkcs8"})
# KeyUsage specifies valid usage contexts for keys.
# See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
# https://tools.ietf.org/html/rfc5280#section-4.2.1.12
# Valid KeyUsage values are as follows:
# "signing",
# "digital signature",
# "content commitment",
# "key encipherment",
# "key agreement",
# "data encipherment",
# "cert sign",
# "crl sign",
# "encipher only",
# "decipher only",
# "any",
# "server auth",
# "client auth",
# "code signing",
# "email protection",
# "s/mime",
# "ipsec end system",
# "ipsec tunnel",
# "ipsec user",
# "timestamping",
# "ocsp signing",
# "microsoft sgc",
# "netscape sgc"
KeyUsage = base.Enum(
"KeyUsage",
{
"Any": "any",
"CRLSign": "crl sign",
"CertSign": "cert sign",
"ClientAuth": "client auth",
"CodeSigning": "code signing",
"ContentCommitment": "content commitment",
"DataEncipherment": "data encipherment",
"DecipherOnly": "decipher only",
"DigitalSignature": "digital signature",
"EmailProtection": "email protection",
"EncipherOnly": "encipher only",
"IPsecEndSystem": "ipsec end system",
"IPsecTunnel": "ipsec tunnel",
"IPsecUser": "ipsec user",
"KeyAgreement": "key agreement",
"KeyEncipherment": "key encipherment",
"MicrosoftSGC": "microsoft sgc",
"NetscapeSGC": "netscape sgc",
"OCSPSigning": "ocsp signing",
"SMIME": "s/mime",
"ServerAuth": "server auth",
"Signing": "signing",
"Timestamping": "timestamping",
},
)
class CAIssuer(types.Object):
@context.scoped
@typechecked
def __init__(self, secret_name: str = ""):
super().__init__()
self.__secret_name = secret_name
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
secret_name = self.secret_name()
check_type("secret_name", secret_name, str)
v["secretName"] = secret_name
return v
def secret_name(self) -> str:
"""
SecretName is the name of the secret used to sign Certificates issued
by this Issuer.
"""
return self.__secret_name
class X509Subject(types.Object):
"""
X509Subject Full X509 name specification
"""
@context.scoped
@typechecked
def __init__(
self,
countries: List[str] = None,
organizational_units: List[str] = None,
localities: List[str] = None,
provinces: List[str] = None,
street_addresses: List[str] = None,
postal_codes: List[str] = None,
serial_number: str = None,
):
super().__init__()
self.__countries = countries if countries is not None else []
self.__organizational_units = (
organizational_units if organizational_units is not None else []
)
self.__localities = localities if localities is not None else []
self.__provinces = provinces if provinces is not None else []
self.__street_addresses = (
street_addresses if street_addresses is not None else []
)
self.__postal_codes = postal_codes if postal_codes is not None else []
self.__serial_number = serial_number
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
countries = self.countries()
check_type("countries", countries, Optional[List[str]])
if countries: # omit empty
v["countries"] = countries
organizational_units = self.organizational_units()
check_type("organizational_units", organizational_units, Optional[List[str]])
if organizational_units: # omit empty
v["organizationalUnits"] = organizational_units
localities = self.localities()
check_type("localities", localities, Optional[List[str]])
if localities: # omit empty
v["localities"] = localities
provinces = self.provinces()
check_type("provinces", provinces, Optional[List[str]])
if provinces: # omit empty
v["provinces"] = provinces
street_addresses = self.street_addresses()
check_type("street_addresses", street_addresses, Optional[List[str]])
if street_addresses: # omit empty
v["streetAddresses"] = street_addresses
postal_codes = self.postal_codes()
check_type("postal_codes", postal_codes, Optional[List[str]])
if postal_codes: # omit empty
v["postalCodes"] = postal_codes
serial_number = self.serial_number()
check_type("serial_number", serial_number, Optional[str])
if serial_number: # omit empty
v["serialNumber"] = serial_number
return v
def countries(self) -> Optional[List[str]]:
"""
Countries to be used on the Certificate.
"""
return self.__countries
def organizational_units(self) -> Optional[List[str]]:
"""
Organizational Units to be used on the Certificate.
"""
return self.__organizational_units
def localities(self) -> Optional[List[str]]:
"""
Cities to be used on the Certificate.
"""
return self.__localities
def provinces(self) -> Optional[List[str]]:
"""
State/Provinces to be used on the Certificate.
"""
return self.__provinces
def street_addresses(self) -> Optional[List[str]]:
"""
Street addresses to be used on the Certificate.
"""
return self.__street_addresses
def postal_codes(self) -> Optional[List[str]]:
"""
Postal codes to be used on the Certificate.
"""
return self.__postal_codes
def serial_number(self) -> Optional[str]:
"""
Serial number to be used on the Certificate.
"""
return self.__serial_number
class CertificateSpec(types.Object):
"""
CertificateSpec defines the desired state of Certificate.
A valid Certificate requires at least one of a CommonName, DNSName, or
URISAN to be valid.
"""
@context.scoped
@typechecked
def __init__(
self,
subject: "X509Subject" = None,
common_name: str = None,
organization: List[str] = None,
duration: "base.Duration" = None,
renew_before: "base.Duration" = None,
dns_names: List[str] = None,
ip_addresses: List[str] = None,
uri_sans: List[str] = None,
secret_name: str = "",
issuer_ref: "k8sv1.TypedLocalObjectReference" = None,
is_ca: bool = None,
usages: List[KeyUsage] = None,
key_size: int = None,
key_algorithm: KeyAlgorithm = None,
key_encoding: KeyEncoding = None,
):
super().__init__()
self.__subject = subject
self.__common_name = common_name
self.__organization = organization if organization is not None else []
self.__duration = duration
self.__renew_before = renew_before
self.__dns_names = dns_names if dns_names is not None else []
self.__ip_addresses = ip_addresses if ip_addresses is not None else []
self.__uri_sans = uri_sans if uri_sans is not None else []
self.__secret_name = secret_name
self.__issuer_ref = (
issuer_ref if issuer_ref is not None else k8sv1.TypedLocalObjectReference()
)
self.__is_ca = is_ca
self.__usages = usages if usages is not None else []
self.__key_size = key_size
self.__key_algorithm = key_algorithm
self.__key_encoding = key_encoding
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
subject = self.subject()
check_type("subject", subject, Optional["X509Subject"])
if subject is not None: # omit empty
v["subject"] = subject
common_name = self.common_name()
check_type("common_name", common_name, Optional[str])
if common_name: # omit empty
v["commonName"] = common_name
organization = self.organization()
check_type("organization", organization, Optional[List[str]])
if organization: # omit empty
v["organization"] = organization
duration = self.duration()
check_type("duration", duration, Optional["base.Duration"])
if duration is not None: # omit empty
v["duration"] = duration
renew_before = self.renew_before()
check_type("renew_before", renew_before, Optional["base.Duration"])
if renew_before is not None: # omit empty
v["renewBefore"] = renew_before
dns_names = self.dns_names()
check_type("dns_names", dns_names, Optional[List[str]])
if dns_names: # omit empty
v["dnsNames"] = dns_names
ip_addresses = self.ip_addresses()
check_type("ip_addresses", ip_addresses, Optional[List[str]])
if ip_addresses: # omit empty
v["ipAddresses"] = ip_addresses
uri_sans = self.uri_sans()
check_type("uri_sans", uri_sans, Optional[List[str]])
if uri_sans: # omit empty
v["uriSANs"] = uri_sans
secret_name = self.secret_name()
check_type("secret_name", secret_name, str)
v["secretName"] = secret_name
issuer_ref = self.issuer_ref()
check_type("issuer_ref", issuer_ref, "k8sv1.TypedLocalObjectReference")
v["issuerRef"] = issuer_ref
is_ca = self.is_ca()
check_type("is_ca", is_ca, Optional[bool])
if is_ca: # omit empty
v["isCA"] = is_ca
usages = self.usages()
check_type("usages", usages, Optional[List[KeyUsage]])
if usages: # omit empty
v["usages"] = usages
key_size = self.key_size()
check_type("key_size", key_size, Optional[int])
if key_size: # omit empty
v["keySize"] = key_size
key_algorithm = self.key_algorithm()
check_type("key_algorithm", key_algorithm, Optional[KeyAlgorithm])
if key_algorithm: # omit empty
v["keyAlgorithm"] = key_algorithm
key_encoding = self.key_encoding()
check_type("key_encoding", key_encoding, Optional[KeyEncoding])
if key_encoding: # omit empty
v["keyEncoding"] = key_encoding
return v
def subject(self) -> Optional["X509Subject"]:
"""
Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name).
"""
return self.__subject
def common_name(self) -> Optional[str]:
"""
CommonName is a common name to be used on the Certificate.
The CommonName should have a length of 64 characters or fewer to avoid
generating invalid CSRs.
"""
return self.__common_name
def organization(self) -> Optional[List[str]]:
"""
Organization is the organization to be used on the Certificate
"""
return self.__organization
def duration(self) -> Optional["base.Duration"]:
"""
Certificate default Duration
"""
return self.__duration
def renew_before(self) -> Optional["base.Duration"]:
"""
Certificate renew before expiration duration
"""
return self.__renew_before
def dns_names(self) -> Optional[List[str]]:
"""
DNSNames is a list of subject alt names to be used on the Certificate.
"""
return self.__dns_names
def ip_addresses(self) -> Optional[List[str]]:
"""
IPAddresses is a list of IP addresses to be used on the Certificate
"""
return self.__ip_addresses
def uri_sans(self) -> Optional[List[str]]:
"""
URISANs is a list of URI Subject Alternative Names to be set on this
Certificate.
"""
return self.__uri_sans
def secret_name(self) -> str:
"""
SecretName is the name of the secret resource to store this secret in
"""
return self.__secret_name
def issuer_ref(self) -> "k8sv1.TypedLocalObjectReference":
"""
IssuerRef is a reference to the issuer for this certificate.
If the 'kind' field is not set, or set to 'Issuer', an Issuer resource
with the given name in the same namespace as the Certificate will be used.
If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer with the
provided name will be used.
The 'name' field in this stanza is required at all times.
"""
return self.__issuer_ref
def is_ca(self) -> Optional[bool]:
"""
IsCA will mark this Certificate as valid for signing.
This implies that the 'cert sign' usage is set
"""
return self.__is_ca
def usages(self) -> Optional[List[KeyUsage]]:
"""
Usages is the set of x509 actions | |
import os, math, ctypes
import numpy as np
from OpenGL import GL
from art import VERT_LENGTH
from palette import MAX_COLORS
# inactive layer alphas
LAYER_VIS_FULL = 1
LAYER_VIS_DIM = 0.25
LAYER_VIS_NONE = 0
class TileRenderable:
"""
3D visual representation of an Art. Each layer is rendered as grids of
rectangular OpenGL triangle-pairs. Animation frames are uploaded into our
buffers from source Art's numpy arrays.
"""
vert_shader_source = 'renderable_v.glsl'
"vertex shader: includes view projection matrix, XYZ camera uniforms."
frag_shader_source = 'renderable_f.glsl'
"Pixel shader: handles FG/BG colors."
log_create_destroy = False
log_animation = False
log_buffer_updates = False
grain_strength = 0.
alpha = 1.
"Alpha (0 to 1) for entire Renderable."
bg_alpha = 1.
"Alpha (0 to 1) *only* for tile background colors."
default_move_rate = 1
use_art_offset = True
"Use game object's art_off_pct values."
def __init__(self, app, art, game_object=None):
"Create Renderable with given Art, optionally bound to given GameObject"
self.app = app
"Application that renders us."
self.art = art
"Art we get data from."
self.art.renderables.append(self)
self.go = game_object
"GameObject we're attached to."
self.exporting = False
"Set True momentarily by image export process; users shouldn't touch."
self.visible = True
"Boolean for easy render / don't-render functionality."
self.frame = self.art.active_frame or 0
"Frame of our art's animation we're currently on"
self.animating = False
self.anim_timer, self.last_frame_time = 0, 0
# world space position and scale
self.x, self.y, self.z = 0, 0, 0
self.scale_x, self.scale_y, self.scale_z = 1, 1, 1
if self.go:
self.scale_x = self.go.scale_x
self.scale_y = self.go.scale_y
self.scale_z = self.go.scale_z
# width and height in XY render space
self.width, self.height = 1, 1
self.reset_size()
# TODO: object rotation matrix, if needed
self.goal_x, self.goal_y, self.goal_z = 0, 0, 0
self.move_rate = self.default_move_rate
# marked True when UI is interpolating it
self.ui_moving = False
self.camera = self.app.camera
# bind VAO etc before doing shaders etc
if self.app.use_vao:
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
self.shader = self.app.sl.new_shader(self.vert_shader_source, self.frag_shader_source)
self.proj_matrix_uniform = self.shader.get_uniform_location('projection')
self.view_matrix_uniform = self.shader.get_uniform_location('view')
self.position_uniform = self.shader.get_uniform_location('objectPosition')
self.scale_uniform = self.shader.get_uniform_location('objectScale')
self.charset_width_uniform = self.shader.get_uniform_location('charMapWidth')
self.charset_height_uniform = self.shader.get_uniform_location('charMapHeight')
self.char_uv_width_uniform = self.shader.get_uniform_location('charUVWidth')
self.char_uv_height_uniform = self.shader.get_uniform_location('charUVHeight')
self.charset_tex_uniform = self.shader.get_uniform_location('charset')
self.palette_tex_uniform = self.shader.get_uniform_location('palette')
self.grain_tex_uniform = self.shader.get_uniform_location('grain')
self.palette_width_uniform = self.shader.get_uniform_location('palTextureWidth')
self.grain_strength_uniform = self.shader.get_uniform_location('grainStrength')
self.alpha_uniform = self.shader.get_uniform_location('alpha')
self.brightness_uniform = self.shader.get_uniform_location('brightness')
self.bg_alpha_uniform = self.shader.get_uniform_location('bgColorAlpha')
self.create_buffers()
# finish
if self.app.use_vao:
GL.glBindVertexArray(0)
if self.log_create_destroy:
self.app.log('created: %s' % self)
def __str__(self):
"for debug purposes, return a concise unique name"
for i,r in enumerate(self.art.renderables):
if r is self:
break
return '%s %s %s' % (self.art.get_simple_name(), self.__class__.__name__, i)
def create_buffers(self):
# vertex positions and elements
# determine vertex count needed for render
self.vert_count = int(len(self.art.elem_array))
self.vert_buffer, self.elem_buffer = GL.glGenBuffers(2)
self.update_buffer(self.vert_buffer, self.art.vert_array,
GL.GL_ARRAY_BUFFER, GL.GL_STATIC_DRAW, GL.GL_FLOAT, 'vertPosition', VERT_LENGTH)
self.update_buffer(self.elem_buffer, self.art.elem_array,
GL.GL_ELEMENT_ARRAY_BUFFER, GL.GL_STATIC_DRAW, GL.GL_UNSIGNED_INT, None, None)
# tile data buffers
# use GL_DYNAMIC_DRAW given they change every time a char/color changes
self.char_buffer, self.uv_buffer = GL.glGenBuffers(2)
# character indices (which become vertex UVs)
self.update_buffer(self.char_buffer, self.art.chars[self.frame],
GL.GL_ARRAY_BUFFER, GL.GL_DYNAMIC_DRAW, GL.GL_FLOAT, 'charIndex', 1)
# UV "mods" - modify UV derived from character index
self.update_buffer(self.uv_buffer, self.art.uv_mods[self.frame],
GL.GL_ARRAY_BUFFER, GL.GL_DYNAMIC_DRAW, GL.GL_FLOAT, 'uvMod', 2)
self.fg_buffer, self.bg_buffer = GL.glGenBuffers(2)
# foreground/background color indices (which become rgba colors)
self.update_buffer(self.fg_buffer, self.art.fg_colors[self.frame],
GL.GL_ARRAY_BUFFER, GL.GL_DYNAMIC_DRAW, GL.GL_FLOAT, 'fgColorIndex', 1)
self.update_buffer(self.bg_buffer, self.art.bg_colors[self.frame],
GL.GL_ARRAY_BUFFER, GL.GL_DYNAMIC_DRAW, GL.GL_FLOAT, 'bgColorIndex', 1)
def update_geo_buffers(self):
self.update_buffer(self.vert_buffer, self.art.vert_array, GL.GL_ARRAY_BUFFER, GL.GL_STATIC_DRAW, GL.GL_FLOAT, None, None)
self.update_buffer(self.elem_buffer, self.art.elem_array, GL.GL_ELEMENT_ARRAY_BUFFER, GL.GL_STATIC_DRAW, GL.GL_UNSIGNED_INT, None, None)
# total vertex count probably changed
self.vert_count = int(len(self.art.elem_array))
def update_tile_buffers(self, update_chars, update_uvs, update_fg, update_bg):
"Update GL data arrays for tile characters, fg/bg colors, transforms."
updates = {}
if update_chars:
updates[self.char_buffer] = self.art.chars
if update_uvs:
updates[self.uv_buffer] = self.art.uv_mods
if update_fg:
updates[self.fg_buffer] = self.art.fg_colors
if update_bg:
updates[self.bg_buffer] = self.art.bg_colors
for update in updates:
self.update_buffer(update, updates[update][self.frame],
GL.GL_ARRAY_BUFFER, GL.GL_DYNAMIC_DRAW,
GL.GL_FLOAT, None, None)
def update_buffer(self, buffer_index, array, target, buffer_type, data_type,
attrib_name, attrib_size):
if self.log_buffer_updates:
self.app.log('update_buffer: %s, %s, %s, %s, %s, %s, %s' % (buffer_index, array, target, buffer_type, data_type, attrib_name, attrib_size))
GL.glBindBuffer(target, buffer_index)
GL.glBufferData(target, array.nbytes, array, buffer_type)
if attrib_name:
attrib = self.shader.get_attrib_location(attrib_name)
GL.glEnableVertexAttribArray(attrib)
GL.glVertexAttribPointer(attrib, attrib_size, data_type,
GL.GL_FALSE, 0, ctypes.c_void_p(0))
# unbind each buffer before binding next
GL.glBindBuffer(target, 0)
def advance_frame(self):
"Advance to our Art's next animation frame."
self.set_frame(self.frame + 1)
def rewind_frame(self):
"Rewind to our Art's previous animation frame."
self.set_frame(self.frame - 1)
def set_frame(self, new_frame_index):
"Set us to display our Art's given animation frame."
if new_frame_index == self.frame:
return
old_frame = self.frame
self.frame = new_frame_index % self.art.frames
self.update_tile_buffers(True, True, True, True)
if self.log_animation:
self.app.log('%s animating from frames %s to %s' % (self, old_frame, self.frame))
def start_animating(self):
"Start animation playback."
self.animating = True
self.anim_timer = 0
def stop_animating(self):
"Pause animation playback on current frame (in game mode)."
self.animating = False
# restore to active frame if stopping
if not self.app.game_mode:
self.set_frame(self.art.active_frame)
def set_art(self, new_art):
"Display and bind to given Art."
if self.art:
self.art.renderables.remove(self)
self.art = new_art
self.reset_size()
self.art.renderables.append(self)
# make sure frame is valid
self.frame %= self.art.frames
self.update_geo_buffers()
self.update_tile_buffers(True, True, True, True)
#print('%s now uses Art %s' % (self, self.art.filename))
def reset_size(self):
self.width = self.art.width * self.art.quad_width * abs(self.scale_x)
self.height = self.art.height * self.art.quad_height * self.scale_y
def move_to(self, x, y, z, travel_time=None):
"""
Start simple linear interpolation to given destination over given time.
Not very useful in Game Mode, mainly used for document switch UI effect
in Art Mode.
"""
# for fixed travel time, set move rate accordingly
if travel_time:
frames = (travel_time * 1000) / max(self.app.framerate, 30)
dx = x - self.x
dy = y - self.y
dz = z - self.z
dist = math.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
self.move_rate = dist / frames
else:
self.move_rate = self.default_move_rate
self.ui_moving = True
self.goal_x, self.goal_y, self.goal_z = x, y, z
if self.log_animation:
self.app.log('%s will move to %s,%s' % (self.art.filename, self.goal_x, self.goal_y))
def snap_to(self, x, y, z):
self.x, self.y, self.z = x, y, z
self.goal_x, self.goal_y, self.goal_z = x, y, z
self.ui_moving = False
def update_transform_from_object(self, obj):
"Update our position & scale based on that of given game object."
self.z = obj.z
if self.scale_x != obj.scale_x or self.scale_y != obj.scale_y:
self.reset_size()
self.x, self.y = obj.x, obj.y
if self.use_art_offset:
if obj.flip_x:
self.x += self.width * obj.art_off_pct_x
else:
self.x -= self.width * obj.art_off_pct_x
self.y += self.height * obj.art_off_pct_y
self.scale_x, self.scale_y = obj.scale_x, obj.scale_y
if obj.flip_x:
self.scale_x *= -1
self.scale_z = obj.scale_z
def update_loc(self):
# TODO: probably time to bust out the ol' vector module for this stuff
# get delta
dx = self.goal_x - self.x
dy = self.goal_y - self.y
dz = self.goal_z - self.z
dist = math.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
# close enough?
if dist <= self.move_rate:
self.x = self.goal_x
self.y = self.goal_y
self.z = self.goal_z
self.ui_moving = False
return
# normalize
inv_dist = 1 / dist
dir_x = dx * inv_dist
dir_y = dy * inv_dist
dir_z = dz * inv_dist
self.x += self.move_rate * dir_x
self.y += self.move_rate * dir_y
self.z += self.move_rate * dir_z
#self.app.log('%s moved to %s,%s' % (self, self.x, self.y))
def update(self):
if self.go:
self.update_transform_from_object(self.go)
if self.ui_moving:
self.update_loc()
if not self.animating:
return
if self.app.game_mode and self.app.gw.paused:
return
elapsed = self.app.get_elapsed_time() - self.last_frame_time
self.anim_timer += elapsed
new_frame = self.frame
this_frame_delay = self.art.frame_delays[new_frame] * 1000
while self.anim_timer >= this_frame_delay:
self.anim_timer -= this_frame_delay
# iterate through frames, but don't call set_frame until we're done
new_frame += 1
new_frame %= self.art.frames
this_frame_delay = self.art.frame_delays[new_frame] * 1000
# TODO: if new_frame < self.frame, count anim loop?
self.set_frame(new_frame)
self.last_frame_time = self.app.get_elapsed_time()
def destroy(self):
if self.app.use_vao:
GL.glDeleteVertexArrays(1, [self.vao])
GL.glDeleteBuffers(6, [self.vert_buffer, self.elem_buffer, self.char_buffer, self.uv_buffer, self.fg_buffer, self.bg_buffer])
if self.art and self in self.art.renderables:
self.art.renderables.remove(self)
if self.log_create_destroy:
self.app.log('destroyed: %s' % self)
def get_projection_matrix(self):
"""
UIRenderable overrides this so it doesn't have to override
Renderable.render and duplicate lots of code.
"""
return np.eye(4, 4) if self.exporting else self.camera.projection_matrix
def get_view_matrix(self):
return np.eye(4, 4) if self.exporting else self.camera.view_matrix
def get_loc(self):
"Returns world space location as (x, y, z) tuple."
export_loc = (-1, 1, 0)
return export_loc if self.exporting else (self.x, self.y, self.z)
def get_scale(self):
"Returns world space scale | |
<gh_stars>0
from collections import defaultdict
from enum import Enum, auto
import logging
import numpy as np
from PfgUtil import debug_mode
class TransformationOption(Enum):
NONE = auto()
AGGREGATE_CALLS = auto()
VERTICAL_STACK_CPU = auto()
COLLAPSE_GROUPS = auto()
class ColourMode(Enum):
BY_PARENT = auto()
BY_CPU = auto()
def get_durations_for_entities(
entities
):
wallclock_duration_by_cpu = defaultdict(int)
parallelism_intervals = defaultdict(int)
for entity in entities:
for cpu, cpu_interval in entity.per_cpu_intervals.items():
wallclock_duration_by_cpu[cpu] += cpu_interval
# Consider top of stack or whole stack?
# TODO make this a configurable option
#for parallelism, interval in entity.top_of_stack_parallelism_intervals.items():
for parallelism, interval in entity.parallelism_intervals.items():
parallelism_intervals[parallelism] += interval
cpus = []
wallclock_durations = []
for cpu, duration in wallclock_duration_by_cpu.items():
cpus.append(cpu)
wallclock_durations.append(duration)
if len(cpus) > 1:
sorted_indices = np.argsort(wallclock_durations)[::-1]
wallclock_durations = np.array(wallclock_durations)[sorted_indices].tolist()
cpus = np.array(cpus)[sorted_indices].tolist()
return cpus, wallclock_durations, parallelism_intervals
"""
This is what will actually be plotted as a sub-bar
These are built from the execution intervals of the node
"""
class PFGTreeNodeElement:
def __init__(self,
name,
wallclock_duration,
parallelism_intervals,
cpu_time=None
):
self.name = name
self.wallclock_duration = wallclock_duration # this is truly wallclock duration, must be careful to choose the correct one when merging
self.parallelism_intervals = parallelism_intervals # total CPU time spent in each parallelism state, this should sum to CPU time
if cpu_time is None:
self.cpu_time = wallclock_duration
else:
self.cpu_time = cpu_time
"""
This is a specific execution interval
Adding these up should give total CPU time of the node in each function
"""
class PFGTreeNodeExecutionInterval:
def __init__(self,
name,
cpu,
duration,
parallelism_intervals
):
self.cpu = cpu
self.name = name
self.duration = duration
self.parallelism_intervals = parallelism_intervals # these are how long this CPU spent in each parallelism state, should sum to duration
class PFGTreeNode:
def __init__(self,
parent_node,
name,
cpu,
wallclock_duration,
parallelism_intervals,
depth,
alignment_node=None
):
# TODO a node should maintain an aggregate name from its partitions
self.original_parent_node = parent_node
self.original_depth = depth
self.cpus = [cpu]
self.wallclock_durations = [wallclock_duration] # currently this is only ever of length 1 (corresponding to the total wallclock duration of the node partitions)
self.parent_node = parent_node
if alignment_node is None:
self.ancestor_alignment_node = parent_node
else:
self.ancestor_alignment_node = alignment_node
self.child_nodes = []
first_element = PFGTreeNodeElement(name, wallclock_duration, parallelism_intervals)
self.node_partitions = [first_element]
first_interval = PFGTreeNodeExecutionInterval(name, cpu, wallclock_duration, parallelism_intervals)
self.execution_intervals = [first_interval]
# for plotting
self.start_x = None
self.width = None
self.start_y = None
self.height = None
def build_node_partitions_from_intervals(self):
self.node_partitions.clear()
wallclock_durations_by_group = defaultdict(int)
cpu_time_by_group = defaultdict(int)
parallelism_intervals_by_group = {}
for execution_interval in self.execution_intervals:
if execution_interval.cpu == self.cpus[0]:
wallclock_durations_by_group[execution_interval.name] += execution_interval.duration
if execution_interval.cpu not in self.cpus:
self.cpus.append(execution_interval.cpu)
for parallelism, interval in execution_interval.parallelism_intervals.items():
parallelism_intervals_by_group[execution_interval.name][parallelism] += interval
else:
parallelism_intervals_by_group[execution_interval.name] = execution_interval.parallelism_intervals
cpu_time_by_group[execution_interval.name] += execution_interval.duration
for group, duration in cpu_time_by_group.items():
if group not in wallclock_durations_by_group:
# this means it is an appended node part from a different CPU
wallclock_durations_by_group[group] += duration
total_wallclock_duration = 0
for group, duration in wallclock_durations_by_group.items():
cpu_time = cpu_time_by_group[group]
parallelism_intervals_for_group = parallelism_intervals_by_group[group]
part = PFGTreeNodeElement(group, duration, parallelism_intervals_for_group, cpu_time)
total_wallclock_duration += duration
self.node_partitions.append(part)
self.wallclock_durations[0] = total_wallclock_duration
# Return the durations over all intervals. For each CPU, we assume the intervals occur sequentially is wallclock.
# TODO fix what happens in a recursive function, where one CPU calls a function many times - the wallclocks of these functions will be summed!
def get_per_cpu_wallclock_durations(self):
wallclock_durations_by_cpu = defaultdict(int)
for execution_interval in self.execution_intervals:
wallclock_durations_by_cpu[execution_interval.cpu] += execution_interval.duration
return wallclock_durations_by_cpu
class PFGTree:
def __init__(self, root_entities, aggregate_stack_frames_by_default=False):
self.merged_siblings_by_cpu = False
self.root_nodes = []
self.stack_frames_aggregated = aggregate_stack_frames_by_default
self.colour_mode = ColourMode.BY_CPU
# As part of the constructor, build the tree
self.gen_tree_process_entities_into_child_nodes(None, root_entities, self.root_nodes, 0, aggregate_stack_frames_by_default)
"""
The basic tree has each node representing an entity, i.e., a single stack
frame on a single CPU. A child node is thus a single function call from the
parent. Child nodes are therefore of the same CPU (unless there was a fork)
Optionally, the tree can be built by default with the entitites aggregated
such that each node represents all entities of one function called by one
parent on one CPU
Only upon transformation are nodes potentially composed of entities from
multiple groups or CPUs, through merging
"""
def gen_tree_process_entities_into_child_nodes(self, parent_node, entities, parent_node_list, depth, aggregate_stack_frames_by_default):
# split the entities by CPU
entities_by_cpu = defaultdict(list)
for entity in entities:
entities_by_cpu[entity.cpu].append(entity)
# for each CPU's entities, split them into their groups
for cpu, entities_for_cpu in entities_by_cpu.items():
entities_by_group = defaultdict(list)
for entity in entities_for_cpu:
entities_by_group[entity.group].append(entity)
# for each group, create a node
for group, entities_for_group in entities_by_group.items():
if aggregate_stack_frames_by_default == False:
for entity in entities_for_group:
entity_as_list = [entity]
cpus, wallclock_durations, parallelism_intervals = get_durations_for_entities(entity_as_list)
node = PFGTreeNode(
parent_node=parent_node,
name=group,
cpu=cpu,
wallclock_duration=wallclock_durations[0],
parallelism_intervals=parallelism_intervals,
depth=depth
)
parent_node_list.append(node)
if len(entity.child_entities) > 0:
self.gen_tree_process_entities_into_child_nodes(node, entity.child_entities, node.child_nodes, depth+1, aggregate_stack_frames_by_default)
else:
cpus, wallclock_durations, parallelism_intervals = get_durations_for_entities(entities_for_group)
node = PFGTreeNode(
parent_node=parent_node,
name=group,
cpu=cpu,
wallclock_duration=wallclock_durations[0],
parallelism_intervals=parallelism_intervals,
depth=depth
)
parent_node_list.append(node)
# collect all children of these entities, and convert them to nodes
child_entities = []
for entity in entities_for_group:
for child_entity in entity.child_entities:
child_entities.append(child_entity)
if len(child_entities) > 0:
self.gen_tree_process_entities_into_child_nodes(node, child_entities, node.child_nodes, depth+1, aggregate_stack_frames_by_default)
"""
Transform the tree such that each node represents a set of entities,
grouped by symbol and CPU, which were executed with a common parent node
(set of function calls)
Child nodes are then the set of entities (again grouped by symbol/CPU) that
any entity of the parent node called.
"""
def transform_tree_aggregate_stack_frames(self):
self.stack_frames_aggregated = True
node_sets_queue = [self.root_nodes]
while len(node_sets_queue) > 0:
sibling_nodes = node_sets_queue[0]
sibling_nodes_by_group_by_cpu = {}
for node in sibling_nodes:
if node.cpus[0] in sibling_nodes_by_group_by_cpu:
sibling_nodes_by_group_by_cpu[node.cpus[0]][node.node_partitions[0].name].append(node)
else:
sibling_nodes_by_group_by_cpu[node.cpus[0]] = defaultdict(list)
sibling_nodes_by_group_by_cpu[node.cpus[0]][node.node_partitions[0].name].append(node)
for cpu, nodes_by_group in sibling_nodes_by_group_by_cpu.items():
for group, nodes in nodes_by_group.items():
# Because we are merging intra-CPU, there is no need to sort them to ensure we retain maximum wallclock
merged_node = self.merge_node_set(nodes)
node_sets_queue.append(merged_node.child_nodes)
node_sets_queue.pop(0)
"""
This function goes through all siblings node sets, and collects nodes (of any group) by CPU, and merges each collection
If a CPU executes 2 function calls sequentially, this merges them as one node with 2 node partitions
The reason for this is to ensure that their sequential execution is not not broken when transforming the tree
Where they become considered a single atomic node in the graph
"""
def transform_tree_merge_sibling_nodes_by_cpu(self):
self.merged_siblings_by_cpu = True
node_sets_queue = [self.root_nodes]
while len(node_sets_queue) > 0:
sibling_nodes = node_sets_queue[0]
sibling_nodes_by_cpu = defaultdict(list)
for node in sibling_nodes:
sibling_nodes_by_cpu[node.cpus[0]].append(node)
# Now merge all of the siblings into one node, regardless of group
for cpu, nodes in sibling_nodes_by_cpu.items():
# Because we are merging intra-CPU, there is no need to sort them to ensure we retain maximum wallclock
merged_node = self.merge_node_set(nodes)
node_sets_queue.append(merged_node.child_nodes)
node_sets_queue.pop(0)
"""
Take a set of nodes and merge them into a single node with the union of all execution intervals
If sort is False, the *first* node in the list is the base node that others will be merged into
- meaning its wallclock will be used for node width when the tree is plot)
If sort is True, the function will find the node with the maximum wallclock on its CPU to be the base node
"""
def merge_node_set(self, nodes_to_merge, sort=True):
if len(nodes_to_merge) == 1:
return nodes_to_merge[0]
if len(nodes_to_merge) == 0:
logging.error("Cannot merge a node set of length 0.")
raise ValueError()
# If sort, then have the nodes merged into the longest wallclock_duration node
if sort:
longest_wallclock_duration = 0
longest_wallclock_duration_node_idx = 0
for node_idx, node in enumerate(nodes_to_merge):
if node.wallclock_durations[0] > longest_wallclock_duration:
longest_wallclock_duration = node.wallclock_durations[0]
longest_wallclock_duration_node_idx = node_idx
new_nodes_to_merge = [nodes_to_merge[longest_wallclock_duration_node_idx]]
new_nodes_to_merge.extend([n for i,n in enumerate(nodes_to_merge) if i != longest_wallclock_duration_node_idx])
nodes_to_merge = new_nodes_to_merge
merged_node = nodes_to_merge[0]
if len(nodes_to_merge) > 1:
while len(nodes_to_merge) > 1:
node_to_merge = nodes_to_merge[1]
node_to_merge.original_parent_node = merged_node.original_parent_node
merged_node.execution_intervals.extend(node_to_merge.execution_intervals)
merged_node.child_nodes.extend(node_to_merge.child_nodes)
if node_to_merge.parent_node:
node_to_merge.parent_node.child_nodes.remove(node_to_merge)
else:
self.root_nodes.remove(node_to_merge)
for child_node in node_to_merge.child_nodes:
child_node.parent_node = merged_node
child_node.ancestor_alignment_node = merged_node
child_node.original_parent_node = merged_node
nodes_to_merge.remove(node_to_merge)
merged_node.build_node_partitions_from_intervals()
return merged_node
"""
This function aims to aggregate the tree nodes, such that each child node
is accurately displayed within the wallclock of its parent, even if it is
executed in parallel
The nodes are merged into single nodes with multiple CPUs if possible, with
those sequential function calls that do not fit side-by-side, instead
stacked vertically
To do this, we process outgoing-edge nodes (starting at the root):
- We find the set of nodes with the longest wallclock on a single CPU
- These nodes are merged into one base node, with multiple partitions
- We then try to merge any of the other CPU's node sets into this base
node.
- The condition is, for the nodes of each CPU independently:
-- Are all corresponding groups of the CPU < wallclock of the base groups
-- AND can all of the non-corresponding groups be appended sequentially
with the base node (without extending past the parent)
- If yes, then add the intervals of the CPU to the base node
- If not, then change the parent node of these CPU's nodes to be the base
node, to be processed as children of the base node (i.e. above the base
node, rather than adjacent)
- After all CPUs' nodes have been considered, process the outgoing-edges
of the base node
"""
def transform_tree_collapse_groups(self):
self.colour_mode = ColourMode.BY_PARENT
# Assumes each node has one cpu and one group, and that node.cpus and node.wallclock_durations is correct
parent_nodes_to_process = [node for node in self.root_nodes]
while len(parent_nodes_to_process) > 0:
parent_node = parent_nodes_to_process.pop(0)
if len(parent_node.child_nodes) == 0:
continue
child_nodes_by_group_by_cpu = {}
for child_node in parent_node.child_nodes:
if child_node.cpus[0] not in child_nodes_by_group_by_cpu:
child_nodes_by_group_by_cpu[child_node.cpus[0]] = defaultdict(list)
child_nodes_by_group_by_cpu[child_node.cpus[0]][child_node.node_partitions[0].name].append(child_node)
longest_wallclock_duration = 0
longest_wallclock_duration_cpu = 0
longest_wallclock_duration_duration_by_group = {} # i.e. for the set of nodes with the total longest duration, what is the duration of each group?
for cpu, child_nodes_by_group in child_nodes_by_group_by_cpu.items():
total_wallclock_duration = 0
wallclock_duration_by_group = {}
for group, child_nodes in child_nodes_by_group.items():
wallclock_duration_for_group = 0
for child_node in child_nodes:
wallclock_duration_for_group += child_node.wallclock_durations[child_node.cpus.index(cpu)]
wallclock_duration_by_group[group] = wallclock_duration_for_group
total_wallclock_duration += wallclock_duration_for_group
if total_wallclock_duration > longest_wallclock_duration:
longest_wallclock_duration = total_wallclock_duration
longest_wallclock_duration_cpu = cpu
longest_wallclock_duration_duration_by_group = wallclock_duration_by_group
# We now have the nodes (of multiple groups) with the longest wallclock duration on one CPU
# This becomes our single merged base node
sibling_nodes_to_merge = []
for group, nodes in | |
"""
Code take from <NAME>'s excellent python tracing library: https://github.com/donkirkby/live-py-plugin
"""
import argparse
from ast import (fix_missing_locations, iter_fields, parse, Add, Assign, AST,
Attribute, BitAnd, BitOr, BitXor, Call, Div, Ellipsis,
ExceptHandler, Expr, ExtSlice, FloorDiv, ImportFrom, Index,
List, Load, LShift, Mod, Mult, Name, NodeTransformer, Num,
Pow, Raise, Return, RShift, Slice, Store, Str, Sub, Subscript,
Tuple, Yield)
from contextlib import contextmanager
from copy import deepcopy
import __future__
import imp
from inspect import currentframe
import sys
import traceback
import types
import os
from importlib import import_module
try:
builtins = import_module('__builtin__')
except ImportError:
import builtins
from report_builder import ReportBuilder
from random import seed
# Import some classes that are only available in Python 3.
try:
from ast import arg, Starred
except ImportError:
arg = Starred = None
try:
from ast import FormattedValue
except ImportError:
FormattedValue = None
try:
from ast import TryExcept, TryFinally
except ImportError:
# Make Python 3.3 try class compatible with old versions.
from ast import Try as TryExcept
TryFinally = TryExcept
try:
from itertools import izip_longest
except ImportError:
from itertools import zip_longest as izip_longest
CONTEXT_NAME = '__live_coding_context__'
RESULT_NAME = '__live_coding_result__'
PSEUDO_FILENAME = '<live coding source>'
SCOPE_NAME = '__live_coding__'
OPERATOR_CHARS = {Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
Pow: '**',
RShift: '>>',
LShift: '<<',
BitAnd: '&',
BitXor: '^',
BitOr: '|'}
# noinspection PyPep8Naming
class Tracer(NodeTransformer):
def _set_statement_line_numbers(self,
statements,
previous_line_number=None):
""" Make sure that a series of statements have line numbers in order.
previous_line_number is the line number to start with, or None."""
for statement in statements:
line_number = getattr(statement, 'lineno', None)
if (line_number is None and statement is not None and
previous_line_number is not None):
statement.lineno = previous_line_number
else:
line_numbers = set()
self._find_line_numbers(statement, line_numbers)
previous_line_number = max(line_numbers)
def visit(self, node):
new_node = super(Tracer, self).visit(node)
body = getattr(new_node, 'body', None)
if body is not None:
previous_line_number = getattr(new_node, 'lineno', None)
try:
statements = iter(body)
except TypeError:
# body doesn't contain statements
statements = []
self._set_statement_line_numbers(statements, previous_line_number)
return new_node
@staticmethod
def _get_attribute_names(attribute_node):
names = []
while isinstance(attribute_node, Attribute):
names.insert(0, attribute_node.attr)
attribute_node = attribute_node.value
if not names:
return None
names.insert(0, getattr(attribute_node, 'id', '<?>'))
return names
def _wrap_subscript_target(self, subscript, index_to_get=None):
""" Build string describing subscript target and wrap indexes.
For example, "x[{!r}]" for one index. Each index will be wrapped in a
call to context.add_assignment_index() or
context.get_assignment_index().
@param index_to_get: if this is None, wrap in add_assignment_index(),
otherwise, wrap in get_assignment_index(index_to_get).
@return: string, or None if no assignment can be reported.
"""
slice_text, next_index = self._wrap_slice(subscript.slice,
index_to_get)
value = subscript.value
if isinstance(value, Name):
value_text = value.id
elif isinstance(value, Subscript):
value_text = self._wrap_subscript_target(value,
index_to_get=next_index)
elif isinstance(value, Attribute):
value_text = '.'.join(self._get_attribute_names(value))
else:
value_text = None
if value_text is None:
format_text = None
else:
format_text = '{}[{}]'.format(value_text, slice_text)
return format_text
def _wrap_assignment_index(self, index_node, index_to_get):
""" Wrap an index node in an assignment index method.
@param index_node: the node to read when setting the index.
@param index_to_get: None when setting the index, or an integer when
getting the index.
@return: the wrapped node
"""
if index_to_get is None:
return self._create_bare_context_call(
'add_assignment_index',
[index_node])
return self._create_bare_context_call(
'get_assignment_index',
[Num(n=index_to_get)])
def _wrap_slice(self, sliceNode, index_to_get=None):
""" Wrap a slice in calls to assignment index methods.
Also build a format string for the slice.
@param index_to_get: if this is None, wrap in add_assignment_index(),
otherwise, wrap in get_assignment_index(index_to_get).
@return: format_text, next_index_to_get
"""
if isinstance(sliceNode, (Index, Ellipsis)):
if (isinstance(sliceNode, Ellipsis) or
isinstance(sliceNode.value, Ellipsis)):
index_to_get = None
format_text = '...'
else:
sliceNode.value = self._wrap_assignment_index(
sliceNode.value,
index_to_get)
format_text = '{!r}'
if index_to_get is not None:
index_to_get -= 1
elif isinstance(sliceNode, Slice):
index_to_get = None
if sliceNode.step is None:
step_text = ''
else:
step_text = ':{!r}'
sliceNode.step = self._wrap_assignment_index(
sliceNode.step,
index_to_get)
if sliceNode.upper is None:
upper_text = ''
else:
upper_text = '{!r}'
sliceNode.upper = self._wrap_assignment_index(
sliceNode.upper,
index_to_get)
if sliceNode.lower is None:
lower_text = ''
else:
lower_text = '{!r}'
sliceNode.lower = self._wrap_assignment_index(
sliceNode.lower,
index_to_get)
format_text = '{}:{}{}'.format(lower_text, upper_text, step_text)
else:
assert isinstance(sliceNode, ExtSlice)
index_to_get = None
format_text = ', '.join(self._wrap_slice(subslice)[0]
for subslice in sliceNode.dims)
return format_text, index_to_get
def visit_Call(self, node):
existing_node = self.generic_visit(node)
func_node = existing_node.func
if self._is_untraceable_attribute(func_node):
return existing_node
comparisons = [] # [(name, node)]
names = self._get_attribute_names(func_node)
if names is not None:
comparisons.append(('.'.join(names[:-1]),
existing_node.func.value))
for arg_node in existing_node.args:
if isinstance(arg_node, Name):
comparisons.append((arg_node.id, arg_node))
if not comparisons:
return existing_node
args = [List(elts=[], ctx=Load()),
List(elts=[], ctx=Load()),
existing_node,
List(elts=[], ctx=Load()),
Num(n=existing_node.lineno)]
for name, node in comparisons:
args[0].elts.append(Str(s=name)) # name
args[1].elts.append( # repr() before
self._create_bare_context_call('get_repr', [node]))
args[3].elts.append( # repr() after
self._create_bare_context_call('get_repr', [node]))
new_node = self._create_bare_context_call('record_call', args)
return new_node
def visit_Delete(self, node):
existing_node = self.generic_visit(node)
for target in existing_node.targets:
attribute_names = self._get_attribute_names(target)
if attribute_names:
target_name = '.'.join(attribute_names[:-1])
else:
target_value = getattr(target, 'value', None)
attribute_names = self._get_attribute_names(target_value)
if attribute_names:
target_name = '.'.join(attribute_names)
else:
target_name = getattr(target_value, 'id', None)
if target_name is not None:
args = [Str(s=target_name), target.value, Num(n=target.lineno)]
target.value = self._create_bare_context_call('record_delete',
args)
return existing_node
def _is_untraceable_attribute(self, node):
if isinstance(node, Attribute):
if isinstance(node.value, Name):
return False
if isinstance(node.value, Attribute):
return self._is_untraceable_attribute(node.value)
return True
return False
def visit_Assign(self, node):
existing_node = self.generic_visit(node)
if any(map(self._is_untraceable_attribute, existing_node.targets)):
return existing_node
line_numbers = set()
self._find_line_numbers(existing_node, line_numbers)
first_line_number = min(line_numbers)
last_line_number = max(line_numbers)
new_nodes = []
format_string = self._wrap_assignment_targets(
existing_node.targets)
if (len(existing_node.targets) == 1 and
isinstance(existing_node.targets[0], Tuple)):
existing_node.value = Call(func=Name(id='tuple', ctx=Load()),
args=[existing_node.value],
keywords=[],
starargs=None,
kwargs=None)
existing_node.value = self._create_bare_context_call(
'set_assignment_value',
[existing_node.value])
new_nodes.append(self._create_context_call('start_assignment'))
try_body = [existing_node]
if format_string is not None:
try_body.append(self._create_context_call(
'report_assignment',
[Str(s=format_string), Num(n=existing_node.lineno)]))
end_assignment = self._create_context_call('end_assignment')
finally_body = [end_assignment]
new_nodes.append(TryFinally(body=try_body,
finalbody=finally_body,
handlers=[],
orelse=[],
lineno=first_line_number))
self._set_statement_line_numbers(try_body, first_line_number)
self._set_statement_line_numbers(finally_body, last_line_number)
return new_nodes
def visit_AugAssign(self, node):
read_target = deepcopy(node.target)
existing_node = self.generic_visit(node)
line_numbers = set()
self._find_line_numbers(existing_node, line_numbers)
first_line_number = min(line_numbers)
last_line_number = max(line_numbers)
new_nodes = []
try_body = [existing_node]
new_nodes.append(self._create_context_call('start_assignment'))
format_string = self._wrap_assignment_target(existing_node.target)
if format_string is not None:
if ':' in format_string:
existing_node.value = self._create_bare_context_call(
'set_assignment_value',
[existing_node.value])
operator_char = OPERATOR_CHARS.get(type(existing_node.op), '?')
format_string += ' {}= {{!r}} '.format(operator_char)
else:
self._wrap_assignment_target(read_target, index_to_get=-1)
read_target.ctx = Load()
set_assignment_value = self._create_context_call(
'set_assignment_value',
[read_target])
try_body.append(set_assignment_value)
format_string += ' = {!r}'
try_body.append(self._create_context_call(
'report_assignment',
[Str(s=format_string), Num(n=existing_node.lineno)]))
end_assignment = self._create_context_call('end_assignment')
finally_body = [end_assignment]
new_nodes.append(TryFinally(body=try_body,
finalbody=finally_body,
handlers=[],
orelse=[],
lineno=first_line_number))
self._set_statement_line_numbers(try_body, first_line_number)
self._set_statement_line_numbers(finally_body, last_line_number)
return new_nodes
def _find_line_numbers(self, node, line_numbers):
""" Populates a set containing all line numbers used by the node and its
descendants.
line_numbers is a set that all the line numbers will be added to."""
if FormattedValue is not None and isinstance(node, FormattedValue):
# FormattedValue is a separate code block with its own line nums.
return
line_number = getattr(node, 'lineno', None)
if line_number is not None:
line_numbers.add(line_number)
for _, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self._find_line_numbers(item, line_numbers)
elif isinstance(value, AST):
self._find_line_numbers(value, line_numbers)
def visit_For(self, node):
new_node = self.generic_visit(node)
line_numbers = set()
self._find_line_numbers(new_node, line_numbers)
args = [Num(n=min(line_numbers)),
Num(n=max(line_numbers))]
new_body = [self._create_context_call('start_block', args)]
new_body.extend(self._trace_assignment_list(new_node.target))
new_body.extend(new_node.body)
new_node.body = new_body
return new_node
def visit_While(self, node):
new_node = self.generic_visit(node)
line_numbers = set()
self._find_line_numbers(new_node, line_numbers)
args = [Num(n=min(line_numbers)),
Num(n=max(line_numbers))]
new_node.body.insert(0,
self._create_context_call('start_block', args))
return new_node
# noinspection PyTypeChecker
def visit_FunctionDef(self, node):
""" Instrument a function definition by creating a new report builder
for this stack frame and putting it in a local variable. The local
variable has the same name as the global variable so all calls can
use the same CONTEXT_NAME symbol, but it means that I had to use this:
x = globals()['x'].start_frame()
Kind of ugly, but I think it was worth it to handle recursive calls.
"""
new_node = self.generic_visit(node)
line_numbers = set()
self._find_line_numbers(new_node, line_numbers)
first_line_number = min(line_numbers)
last_line_number = max(line_numbers)
args = [Num(n=first_line_number),
Num(n=last_line_number)]
try_body = new_node.body
globals_call = Call(func=Name(id='globals', ctx=Load()),
args=[],
keywords=[],
starargs=None,
kwargs=None)
global_context = Subscript(value=globals_call,
slice=Index(value=Str(s=CONTEXT_NAME)),
ctx=Load())
start_frame_call = Call(func=Attribute(value=global_context,
attr='start_frame',
ctx=Load()),
args=args,
keywords=[],
starargs=None,
kwargs=None)
context_assign = Assign(targets=[Name(id=CONTEXT_NAME, ctx=Store())],
value=start_frame_call)
new_node.body = [context_assign]
if isinstance(try_body[0], Expr) and isinstance(try_body[0].value, Str):
# Move docstring back to top of function.
new_node.body.insert(0, try_body.pop(0))
# trace function parameter values
for target in new_node.args.args:
if isinstance(target, Name) and target.id == 'self':
continue
if arg and isinstance(target, arg) and target.arg == 'self':
continue
new_node.body.append(self._trace_assignment(target, node.lineno))
if new_node.args.vararg is not None:
new_node.body.append(
self._trace_assignment(new_node.args.vararg, node.lineno))
if new_node.args.kwarg is not None:
new_node.body.append(
self._trace_assignment(new_node.args.kwarg, node.lineno))
if try_body:
handler_body = [self._create_context_call('exception'),
Raise()]
new_node.body.append(
TryExcept(body=try_body,
handlers=[ExceptHandler(body=handler_body)],
orelse=[],
finalbody=[]))
self._set_statement_line_numbers(try_body, first_line_number)
self._set_statement_line_numbers(handler_body, last_line_number)
return new_node
@staticmethod
def _is_module_header(statement):
if isinstance(statement, ImportFrom):
| |
have a cache, return *None*
"""
if node in self.model.cache:
return self.model.cache[node].has(content)
def local_cache_lookup(self, node, content):
"""Check if the local cache of a node has a content object, without
changing the internal state of the cache.
The local cache is an area of the cache of a node reserved for
uncoordinated caching. This is currently used only by hybrid
hash-routing strategies.
This method is meant to be used by data collectors to calculate
metrics. It should not be used by strategies to look up for contents
during the simulation. Instead they should use
`NetworkController.get_content_local_cache`.
Parameters
----------
node : any hashable type
The node identifier
content : any hashable type
The content identifier
Returns
-------
has_content : bool
*True* if the cache of the node has the content, *False* otherwise.
If the node does not have a cache, return *None*
"""
if node in self.model.local_cache:
return self.model.local_cache[node].has(content)
else:
return False
def cache_dump(self, node):
"""Returns the dump of the content of a cache in a specific node
Parameters
----------
node : any hashable type
The node identifier
Returns
-------
dump : list
List of contents currently in the cache
"""
if node in self.model.cache:
return self.model.cache[node].dump()
class NetworkModel(object):
"""Models the internal state of the network.
This object should never be edited by strategies directly, but only through
calls to the network controller.
"""
def __init__(self, topology, cache_policy, sched_policy, n_services, rate, seed=0, shortest_path=None):
"""Constructor
Parameters
----------
topology : fnss.Topology
The topology object
cache_policy : dict or Tree
cache policy descriptor. It has the name attribute which identify
the cache policy name and keyworded arguments specific to the
policy
shortest_path : dict of dict, optional
The all-pair shortest paths of the network
"""
# Filter inputs
if not isinstance(topology, fnss.Topology):
raise ValueError('The topology argument must be an instance of '
'fnss.Topology or any of its subclasses.')
# Shortest paths of the network
self.shortest_path = shortest_path if shortest_path is not None \
else symmetrify_paths(nx.all_pairs_dijkstra_path(topology))
# Network topology
self.topology = topology
self.topology_depth = 0
# Dictionary mapping each content object to its source
# dict of location of contents keyed by content ID
self.content_source = {}
# Dictionary mapping the reverse, i.e. nodes to set of contents stored
self.source_node = {}
# A heap with events (see Event class above)
self.eventQ = []
# Dictionary of link types (internal/external)
self.link_type = nx.get_edge_attributes(topology, 'type')
self.link_delay = fnss.get_delays(topology)
# Instead of this manual assignment, I could have converted the
# topology to directed before extracting type and link delay but that
# requires a deep copy of the topology that can take long time if
# many content source mappings are included in the topology
if not topology.is_directed():
for (u, v), link_type in list(self.link_type.items()):
self.link_type[(v, u)] = link_type
for (u, v), delay in list(self.link_delay.items()):
self.link_delay[(v, u)] = delay
cache_size = {}
comp_size = {}
service_size = {}
self.rate = rate
for node in topology.nodes_iter():
stack_name, stack_props = fnss.get_stack(topology, node)
# get the depth of the tree
if stack_name == 'router' and 'depth' in self.topology[node].keys():
depth = self.topology.node[node]['depth']
if depth > self.topology_depth:
self.topology_depth = depth
# get computation size per depth
if stack_name == 'router':
if 'cache_size' in stack_props:
cache_size[node] = stack_props['cache_size']
if 'computation_size' in stack_props:
comp_size[node] = stack_props['computation_size']
if 'service_size' in stack_props:
service_size[node] = stack_props['service_size']
elif stack_name == 'source': # A Cloud with infinite resources
comp_size[node] = float('inf')
service_size[node] = float('inf')
contents = stack_props['contents']
self.source_node[node] = contents
for content in contents:
self.content_source[content] = node
if any(c < 1 for c in cache_size.values()):
logger.warn('Some content caches have size equal to 0. '
'I am setting them to 1 and run the experiment anyway')
for node in cache_size:
if cache_size[node] < 1:
cache_size[node] = 1
policy_name = cache_policy['name']
policy_args = {k: v for k, v in cache_policy.items() if k != 'name'}
# The actual cache objects storing the content
self.cache = {node: CACHE_POLICY[policy_name](cache_size[node], **policy_args)
for node in cache_size}
# Generate the actual services processing requests
self.services = []
self.n_services = n_services
internal_link_delay = 0.001 # This is the delay from receiver to router
service_time_min = 0.10 # used to be 0.001
service_time_max = 0.10 # used to be 0.1
#delay_min = 0.005
delay_min = 2*topology.graph['receiver_access_delay'] + service_time_max
delay_max = delay_min + 2*topology.graph['depth']*topology.graph['link_delay'] + 0.005
service_indx = 0
random.seed(seed)
for service in range(0, n_services):
service_time = random.uniform(service_time_min, service_time_max)
#service_time = 2*random.uniform(service_time_min, service_time_max)
deadline = random.uniform(delay_min, delay_max) + 2*internal_link_delay
#deadline = service_time + 1.5*(random.uniform(delay_min, delay_max) + 2*internal_link_delay)
s = Service(service_time, deadline)
#print ("Service " + str(service) + " has a deadline of " + str(deadline))
self.services.append(s)
#""" #END OF Generating Services
### Prepare input for the optimizer
if False:
aFile = open('inputToOptimizer.txt', 'w')
aFile.write("# 1. ServiceIDs\n")
first = True
tostr = ""
for service in range(0, n_services):
if first:
tostr += str(service)
first = False
else:
tostr += "," + str(service)
aFile.write(s)
aFile.write("# 2. Set of APs:\n")
first = True
tostr = ""
for ap in topology.graph['receivers']:
if first:
tostr = str(ap)
first = False
else:
tostr += "," + str(ap)
tostr += '\n'
aFile.write(tostr)
aFile.write("# 3. Set of nodes:\n")
first = True
tostr = ""
for node in topology.nodes_iter():
if node in topology.graph['receivers']:
continue
if first:
tostr = str(node)
first = False
else:
tostr = "," + str(node)
tostr += '\n'
aFile.write(tostr)
aFile.write("# 4. NodeID, serviceID, numCores\n")
if topology.graph['type'] == 'TREE':
ap_node_to_services = {}
ap_node_to_delay = {}
for ap in topology.graph['receivers']:
node_to_delay = {}
node_to_services = {}
node_to_delay[ap] = 0.0
ap_node_to_services[ap] = node_to_services
ap_node_to_delay[ap] = node_to_delay
for node in topology.nodes_iter():
for egress, ingress in topology.edges_iter():
#print str(ingress) + " " + str(egress)
if ingress in node_to_delay.keys() and egress not in node_to_delay.keys():
node_to_delay[egress] = node_to_delay[ingress] + topology.edge[ingress][egress]['delay']
node_to_services[egress] = []
service_indx = 0
for s in self.services:
if s.deadline >= (s.service_time + 2*node_to_delay[egress]):
node_to_services[egress].append(service_indx)
service_indx += 1
aFile.write("# 4. Ap,Node,service1,service2, ....]\n")
for ap in topology.graph['receivers']:
node_to_services = ap_node_to_services[ap]
node_to_delay = ap_node_to_delay[ap]
for node, services in node_to_services.items():
s = str(ap) + "," + str(node) #+ "," + str(node_to_delay[node])
for serv in services:
s += "," + str(serv)
s += '\n'
aFile.write(s)
aFile.write("# 5. AP, rate_service1, rate_service2, ... rate_serviceN\n")
rate = 1.0/(len(topology.graph['receivers'])*len(self.services))
for ap in topology.graph['receivers']:
s = str(ap) + ","
for serv in self.services:
s += str(rate)
s += '\n'
aFile.write(s)
aFile.close()
ComputationSpot.services = self.services
self.compSpot = {node: ComputationSpot(self, comp_size[node], service_size[node], self.services, node, sched_policy, None)
for node in comp_size}
#print ("Generated Computation Spot Objects")
sys.stdout.flush()
# This is for a local un-coordinated cache (currently used only by
# Hashrouting with edge cache)
self.local_cache = {}
# Keep track of nodes and links removed to simulate failures
self.removed_nodes = {}
# This keeps track of neighbors of a removed node at the time of removal.
# It is needed to ensure that when the node is restored only links that
# were removed as part of the node removal are restored and to prevent
# restoring nodes that were removed manually before removing the node.
self.disconnected_neighbors = {}
self.removed_links = {}
self.removed_sources = {}
self.removed_caches = {}
self.removed_local_caches = {}
class NetworkController(object):
"""Network controller
This class is in charge of executing operations on the network model on
behalf of a strategy implementation. It is also in charge of notifying
data collectors of relevant events.
"""
def __init__(self, model):
"""Constructor
Parameters
----------
model : NetworkModel
Instance of the network model
"""
self.session = {}
self.model = model
self.collector = None
def attach_collector(self, collector):
"""Attach a data collector to which all events will be reported.
Parameters
----------
collector : DataCollector
The data collector
"""
self.collector = collector
def detach_collector(self):
"""Detach the data collector."""
self.collector = None
def start_session(self, timestamp, receiver, content, log, flow_id=0, deadline=0):
"""Instruct the controller to start a new session (i.e. the retrieval
of a content).
Parameters
----------
timestamp : int
The timestamp of the event
receiver : any hashable type
The | |
<gh_stars>1-10
import importlib
import os.path
import warnings
import numpy as np
import pysb
from pysb.simulator import ScipyOdeSimulator
from scipy.stats import norm, uniform
from ..sampled_parameter import SampledParameter
default_solver = ScipyOdeSimulator
def is_numbers(inputString):
return all(char.isdigit() for char in inputString)
def parse_directive(directive, priors, no_sample, Keq_sample):
words = directive.split()
if words[1] == 'prior':
if is_numbers(words[2]):
par_idx = int(words[2])
par = model.parameters[par_idx].name
else:
par = words[2]
priors[par] = words[3]
elif words[1] == 'no-sample':
if is_numbers(words[2]):
par_idx = int(words[2])
par = model.parameters[par_idx].name
else:
par = words[2]
no_sample.append(par)
elif words[1] == 'sample_Keq':
if is_numbers(words[2]):
par_idx = int(words[2])
par_f = model.parameters[par_idx].name
if is_numbers(words[3]):
par_idx = int(words[3])
par_r = model.parameters[par_idx].name
if is_numbers(words[4]):
par_idx = int(words[4])
par_rep = model.parameters[par_idx].name
no_sample.append(par_rep)
Keq_sample.append((par_f, par_r, par_rep))
return
def prune_no_samples(parameters, no_sample):
pruned_pars = [parameter for parameter in parameters if parameter[0].name not in no_sample]
return pruned_pars
def update_with_Keq_samples(parameters, Keq_sample):
k_reps = [sample[2] for sample in Keq_sample]
pruned_pars = [parameter for parameter in parameters if parameter[0].name not in k_reps]
return pruned_pars
def write_norm_param(p_name, p_val):
line = "sp_{} = SampledParameter(\'{}\', norm(loc=np.log10({}), scale=2.0))\n".format(p_name, p_name, p_val)
return line
def write_uniform_param(p_name, p_val):
line = "sp_{} = SampledParameter(\'{}\', uniform(loc=np.log10({})-1.0, scale=2.0))\n".format(p_name, p_name, p_val)
return line
class NestIt(object):
"""Container to store parameters and priors for sampling.
This object is geared towards flagging PySB model parameters at the level of
model definition for Nested Sampling. However, it could be used outside
model definition.
Attributes:
parms (dict of :obj:): A dictionary keyed to the parameter names. The
values are the parameter priors as given in the call function.
"""
def __init__(self):
self.parms = dict()
self._default_prior = 'uniform'
return
def __call__(self, parameter, prior=None):
"""Add a parameter to the list.
Args:
parameter (:obj:pysb.Parameter): The parameter to be registered for
Nested Sampling.
prior (:obj:scipy.stats.RVS): The prior for the parameter. Should
Should be defined using log10 scale. Default: None
If None, a uniform prior centered on the parameter.value
is used with a scale 4 orders of magnitude.
"""
if prior is None:
if self._default_prior == 'norm':
# Default to norm distribution
prior = norm(loc=np.log10(parameter.value), scale=2.0)
else:
# Default to uniform distribution
prior = uniform(loc=np.log10(parameter.value)-2.0, scale=4.0)
self.parms[parameter.name] = prior
return parameter
def __getitem__(self, key):
return self.parms[key]
def __setitem__(self, key, prior):
self.parms[key] = prior
def __delitem__(self, key):
del self.parms[key]
def __contains__(self, key):
return (key in self.names)
def __iadd__(self, parm):
self.__call__(parm)
return self
def __isub__(self, parm):
try:
name = parm.name
self.__delitem__(name)
except:
self.__delitem__(parm)
return self
def names(self):
return list(self.parms.keys())
def keys(self):
return self.parms.keys()
def mask(self, model_parameters):
names = self.names()
return [(parm.name in names) for parm in model_parameters]
def priors(self):
return [self.parm[name] for name in self.parm.keys()]
def sampled_parameters(self):
return [SampledParameter(name, self.parms[name]) for name in self.keys()]
def default_to_norm_prior(self):
self._default_prior = 'norm'
return
def default_to_uniform_prior(self):
self._default_prior = 'uniform'
return
def add_all_kinetic_params(self, pysb_model):
for rule in pysb_model.rules:
if rule.rate_forward:
try:
self.__call__(rule.rate_forward)
except:
warnings.warn('Ignoring kinetic rate defined as a PySB Expression.')
if rule.rate_reverse:
try:
self.__call__(rule.rate_reverse)
except:
warnings.warn('Ignoring kinetic rate defined as a PySB Expression.')
return
def add_all_nonkinetic_params(self, pysb_model):
kinetic_params = list()
for rule in pysb_model.rules:
if rule.rate_forward:
kinetic_params.append(rule.rate_forward)
if rule.rate_reverse:
kinetic_params.append(rule.rate_reverse)
for param in pysb_model.paramters:
if param not in kinetic_params:
self.__call__(param)
return
def add_by_name(self, pysb_model, name_or_list):
if isinstance(name_or_list, (list, tuple)):
for param in pysb_model.parameters:
if param.name in name_or_list:
self.__call__(param)
else:
for param in pysb_model.parameters:
if param.name == name_or_list:
self.__call__(param)
return
class NestedSampleIt(object):
"""Create instances of Nested Samling objects for PySB models.
Args:
model (pysb.Model): The instance of the PySB model that you want to run
Nested Sampling on.
observable_data (dict of tuple): Defines the observable data to
use when computing the loglikelihood function. It is a dictionary
keyed to the model Observables (or species names) that the
data corresponds to. Each element is a 3 item tuple of format:
(:numpy.array:data, None or :numpy.array:data_standard_deviations,
None or :list like:time_idxs or :list like:time_mask).
timespan (numpy.array): The timespan for model simulations.
solver (:obj:): The ODE solver to use when running model simulations.
Defaults to pysb.simulator.ScipyOdeSimulator.
solver_kwargs (dict): Dictionary of optional keyword arguments to
pass to the solver when it is initialized. Defaults to dict().
nest_it (:obj:gleipnir.pysb_utilities.nestedsample_it.NestIt): An
instance of the NestIt class with the data about the parameters to
be sampled. If None (and builder is None), the default parameters to be sampled
are the kinetic rate parameters with uniform priors of four orders of
magnitude. Default: None
builder (:obj:pysb.builder.Builder): An instance of the Builder class
with the data about the parameters to be sampled. If None
(and nest_it is None), the default parameters to be sampled
are the kinetic rate parameters with uniform priors of four orders
of magnitude. Default: None
Attributes:
model
observable_data
timespan
solver
solver_kwargs
"""
def __init__(self, model, observable_data, timespan,
solver=default_solver,
solver_kwargs=None, nest_it=None, builder=None):
"""Inits the NestedSampleIt."""
if solver_kwargs is None:
solver_kwargs = dict()
self.model = model
self.observable_data = observable_data
self.timespan = timespan
self.solver = solver
self.solver_kwargs = solver_kwargs
# self.ns_version = None
self._ns_kwargs = None
self._like_data = dict()
self._data = dict()
self._data_mask = dict()
for observable_key in observable_data.keys():
self._like_data[observable_key] = norm(loc=observable_data[observable_key][0],
scale=observable_data[observable_key][1])
self._data[observable_key] = observable_data[observable_key][0]
self._data_mask[observable_key] = observable_data[observable_key][2]
# print(observable_data[observable_key][2])
if observable_data[observable_key][2] is None:
self._data_mask[observable_key] = range(len(self.timespan))
self._model_solver = solver(self.model, tspan=self.timespan, **solver_kwargs)
if nest_it is not None:
parm_mask = nest_it.mask(model.parameters)
self._sampled_parameters = [SampledParameter(parm.name, nest_it[parm.name]) for i,parm in enumerate(model.parameters) if parm_mask[i]]
self._rate_mask = parm_mask
elif builder is not None:
pnames = [parm.name for parm in builder.estimate_params]
self._rate_mask = [(parm.name in pnames) for parm in model.parameters]
self._sampled_parameters = [SampledParameter(parm.name, builder.priors[pnames.index(parm.name)]) for i,parm in enumerate(model.parameters) if self._rate_mask[i]]
else:
params = list()
for rule in model.rules:
if rule.rate_forward:
params.append(rule.rate_forward)
if rule.rate_reverse:
params.append(rule.rate_reverse)
rate_mask = [model.parameters.index(param) for param in params]
self._sampled_parameters = [SampledParameter(param.name, uniform(loc=np.log10(param.value)-2.0, scale=4.0)) for i,param in enumerate(model.parameters) if i in rate_mask]
self._rate_mask = rate_mask
self._param_values = np.array([param.value for param in model.parameters])
self._custom_loglikelihood = None
return
def sum_norm_logpdfs_loglikelihood(self, position):
"""Compute the loglikelihood using the normal distribution estimator.
Args:
position (numpy.array): The parameter vector the compute loglikelihood
of.
Returns:
float: The natural logarithm of the likelihood estimate.
"""
Y = np.copy(position)
params = self._param_values.copy()
params[self._rate_mask] = 10**Y
sim = self._model_solver.run(param_values=[params]).all
logl = 0.
for observable in self._like_data.keys():
sim_vals = sim[observable][self._data_mask[observable]]
logl += np.sum(self._like_data[observable].logpdf(sim_vals))
if np.isnan(logl):
return -np.inf
return logl
def mse_loglikelihood(self, position):
"""Compute the loglikelihood using the negative mean squared error estimator.
Args:
position (numpy.array): The parameter vector the compute loglikelihood of.
Returns:
float: The natural logarithm of the likelihood estimate.
"""
Y = np.copy(position)
params = self._param_values.copy()
params[self._rate_mask] = 10**Y
sim = self._model_solver.run(param_values=[params]).all
logl = 0.0
for observable in self._like_data.keys():
sim_vals = sim[observable][self._data_mask[observable]]
logl -= np.mean((self._data[observable]-sim_vals)**2)
if np.isnan(logl):
return -np.inf
return logl
def sse_loglikelihood(self, position):
"""Compute the loglikelihood using the negative sum of squared errors estimator.
Args:
position (numpy.array): The parameter vector the compute loglikelihood
of.
Returns:
float: The natural logarithm of the likelihood estimate.
"""
Y = np.copy(position)
params = self._param_values.copy()
params[self._rate_mask] = 10**Y
sim = self._model_solver.run(param_values=[params]).all
logl = 0.0
for observable in self._like_data.keys():
sim_vals = sim[observable][self._data_mask[observable]]
logl -= np.sum((self._data[observable]-sim_vals)**2)
if np.isnan(logl):
return -np.inf
return logl
def custom_loglikelihood(self, position):
"""Compute the cost using the negative sum of squared errors estimator.
Args:
position (numpy.array): The parameter vector the compute cost
of.
Returns:
float: The natural logarithm of the likelihood estimate.
"""
Y = np.copy(position)
params = self._param_values.copy()
params[self._rate_mask] = 10.**Y
sim = self._model_solver.run(param_values=[params]).all
logl = self._custom_loglikelihood(self.model, sim)
if np.isnan(logl):
return -np.inf
return logl
def __call__(self, ns_version='built-in',
ns_population_size=1000, ns_kwargs=None,
log_likelihood_type='snlpdf',
custom_loglikelihood=None):
"""Call the NestedSampleIt instance to construct to instance of the NestedSampling object.
Args:
ns_version (str): Defines which version of Nested Sampling to use.
Options are 'built-in'=>Gleipnir's built-in implementation
of the classic Nested Sampling algorithm, 'multinest'=>Use the
MultiNest code via Gleipnir, 'polychord'=>Use the PolyChord code
via Gleipnir, or 'dnest4'=>Use the DNest4 program via Gleipnir.
Defaults to 'built-in'.
ns_population_size (int): Set the size of the active population
of sample points to use during Nested Sampling runs.
Defaults to 1000.
ns_kwargs (dict): Dictionary of any additional optional keyword
arguments to pass to NestedSampling object constructor.
Defaults to dict().
log_likelihood_type (str): Define the type of loglikelihood estimator
to use. Options are 'snlpdf'=>Compute the loglikelihood using
the normal distribution estimator, 'mse'=>Compute the
loglikelihood using the negative mean squared error estimator,
'sse'=>Compute the | |
a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = self.freq
return PeriodIndex(self.values, freq=freq)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occuring frequency
"""
# Superdumb, punting on any optimizing
freq = datetools.to_offset(freq)
snapped = np.empty(len(self), dtype='M8[us]')
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = np.datetime64(s)
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if isinstance(freq, basestring):
freq = datetools.to_offset(freq)
return Index.shift(self, n, freq)
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shift with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self.values.take(indices, axis=axis)
return DatetimeIndex(taken, tz=self.tz)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if self._can_fast_union(other):
return self._fast_union(other)
else:
return Index.union(self, other)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if not isinstance(other, DatetimeIndex) and len(other) > 0:
try:
other = DatetimeIndex(other)
except ValueError:
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
return DatetimeIndex(joined, name=name)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None:
return False
if len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_start = right[0]
# Only need to "adjoin", not overlap
return (left_end + offset) >= right_start
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = np.concatenate((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = getattr(obj, 'offset', None)
self.tz = getattr(obj, 'tz', None)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
return Index.intersection(self, other)
elif other.offset != self.offset:
return Index.intersection(self, other)
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._view_like(left_chunk)
def _partial_date_slice(self, reso, parsed):
if not self.is_monotonic:
raise TimeSeriesError('Partial indexing only valid for ordered time'
' series')
if reso == 'year':
t1 = Timestamp(datetime(parsed.year, 1, 1))
t2 = Timestamp(datetime(parsed.year, 12, 31))
elif reso == 'month':
d = lib.monthrange(parsed.year, parsed.month)[1]
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, parsed.month, d))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = lib.monthrange(parsed.year, qe)[1] # at end of month
t1 = Timestamp(datetime(parsed.year, parsed.month, 1))
t2 = Timestamp(datetime(parsed.year, qe, d))
else:
raise KeyError
stamps = self.asi8
left = stamps.searchsorted(t1.value, side='left')
right = stamps.searchsorted(t2.value, side='right')
return slice(left, right)
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
return Index.get_value(self, series, key)
except KeyError:
try:
asdt, parsed, reso = datetools.parse_time_string(key)
key = asdt
loc = self._partial_date_slice(reso, parsed)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
stamp = Timestamp(key)
try:
return self._engine.get_value(series, stamp)
except KeyError:
raise KeyError(stamp)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
try:
return self._get_string_slice(key)
except (TypeError, KeyError):
pass
stamp = Timestamp(key)
try:
return self._engine.get_loc(stamp)
except KeyError:
raise KeyError(stamp)
def _get_string_slice(self, key):
asdt, parsed, reso = datetools.parse_time_string(key)
key = asdt
loc = self._partial_date_slice(reso, parsed)
return loc
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if isinstance(start, basestring) or isinstance(end, basestring):
try:
if start:
start_loc = self._get_string_slice(start).start
else:
start_loc = 0
if end:
end_loc = self._get_string_slice(end).stop
else:
end_loc = len(self)
return start_loc, end_loc
except KeyError:
pass
return Index.slice_locs(self, start, end)
def __getitem__(self, key):
"""Override numpy.ndarray's __getitem__ method to work as desired"""
arr_idx = self.view(np.ndarray)
if np.isscalar(key):
val = arr_idx[key]
return Timestamp(val, offset=self.offset, tz=self.tz)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
key = lib.maybe_booleans_to_slice(key)
new_offset = None
if isinstance(key, slice):
if self.offset is not None and key.step is not None:
new_offset = key.step * self.offset
else:
new_offset = self.offset
result = arr_idx[key]
if result.ndim > 1:
return result
return self._simple_new(result, self.name, new_offset, self.tz)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
return f(self)
except:
return Index.map(self, f)
# alias to offset
@property
def freq(self):
return self.offset
# Fast field accessors for periods of datetime index
# --------------------------------------------------------------
@property
def year(self):
return lib.fast_field_accessor(self.asi8, 'Y')
@property
def month(self):
return lib.fast_field_accessor(self.asi8, 'M')
@property
def day(self):
return lib.fast_field_accessor(self.asi8, 'D')
@property
def hour(self):
return lib.fast_field_accessor(self.asi8, 'h')
@property
def minute(self):
return lib.fast_field_accessor(self.asi8, 'm')
@property
def second(self):
return lib.fast_field_accessor(self.asi8, 's')
@property
def microsecond(self):
return lib.fast_field_accessor(self.asi8, 'us')
@property
def weekofyear(self):
return lib.fast_field_accessor(self.asi8, 'woy')
week = weekofyear
@property
def dayofweek(self):
return lib.fast_field_accessor(self.asi8, 'dow')
weekday = dayofweek
@property
def dayofyear(self):
return lib.fast_field_accessor(self.asi8, 'doy')
@property
def quarter(self):
return lib.fast_field_accessor(self.asi8, 'q')
def __iter__(self):
return iter(self.asobject)
def searchsorted(self, key, side='left'):
if isinstance(key, np.ndarray):
key = np.array(key, dtype='M8[us]', copy=False)
else:
key = _to_m8(key)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
# hack to workaround argmin failure
def argmin(self):
return (-self).argmax()
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@property
def _constructor(self):
return DatetimeIndex
@property
def dtype(self):
return np.dtype('M8')
@property
def is_all_dates(self):
return True
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'datetime64'):
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if type(item) == datetime:
item = _to_m8(item)
if self.offset is not None and not self.offset.onOffset(item):
raise ValueError("Cannot insert value at non-conforming time")
return super(DatetimeIndex, self).insert(loc, item)
def _view_like(self, ndarray):
result = ndarray.view(type(self))
result.offset = self.offset
| |
returned by getList."""
result = _convert_dict(
asset, {
'name': 'id',
},
defaults={'type': 'Image'})
return result
def _convert_asset_type_for_get_list_result(asset_type):
"""Converts an EarthEngineAsset.Type to the format returned by getList."""
return _convert_value(
asset_type, {
'IMAGE': 'Image',
'IMAGE_COLLECTION': 'ImageCollection',
'TABLE': 'Table',
'FOLDER': 'Folder'
}, 'Unknown')
def convert_asset_type_for_create_asset(asset_type):
"""Converts a createAsset asset type to an EarthEngineAsset.Type."""
return _convert_value(
asset_type, {
'Image': 'IMAGE',
'ImageCollection': 'IMAGE_COLLECTION',
'Table': 'TABLE',
'Folder': 'FOLDER'
}, asset_type)
def convert_asset_id_to_asset_name(asset_id):
"""Converts an internal asset ID to a Cloud API asset name.
If asset_id already matches the format 'projects/*/assets/**', it is returned
as-is.
Args:
asset_id: The asset ID to convert.
Returns:
An asset name string in the format 'projects/*/assets/**'.
"""
if re.match(ASSET_NAME_PATTERN, asset_id) or is_asset_root(asset_id):
return asset_id
elif asset_id.split('/')[0] in ['users', 'projects']:
return 'projects/earthengine-legacy/assets/{}'.format(asset_id)
else:
return 'projects/earthengine-public/assets/{}'.format(asset_id)
def split_asset_name(asset_name):
"""Splits an asset name into the parent and ID parts.
Args:
asset_name: The asset ID to split, in the form 'projects/*/assets/**'.
Returns:
The parent ('projects/*') and ID ('**') parts of the name.
"""
projects, parent, _, remainder = asset_name.split('/', 3)
return projects + '/' + parent, remainder
def convert_operation_name_to_task_id(operation_name):
"""Converts an Operation name to a task ID."""
found = re.search(r'^.*operations/(.*)$', operation_name)
return found.group(1) if found else operation_name
def convert_task_id_to_operation_name(task_id):
"""Converts a task ID to an Operation name."""
return 'projects/{}/operations/{}'.format(_cloud_api_user_project, task_id)
def convert_params_to_image_manifest(params):
"""Converts params to an ImageManifest for ingestion."""
return _convert_dict(
params, {
'id': ('name', convert_asset_id_to_asset_name),
'tilesets': ('tilesets', convert_tilesets_to_one_platform_tilesets)
},
retain_keys=True)
def convert_params_to_table_manifest(params):
"""Converts params to a TableManifest for ingestion."""
return _convert_dict(
params, {
'id': ('name', convert_asset_id_to_asset_name),
'sources': ('sources', convert_sources_to_one_platform_sources),
},
retain_keys=True)
def convert_tilesets_to_one_platform_tilesets(tilesets):
"""Converts a tileset to a one platform representation of a tileset."""
converted_tilesets = []
for tileset in tilesets:
converted_tileset = _convert_dict(
tileset,
{'sources': ('sources', convert_sources_to_one_platform_sources)},
retain_keys=True)
converted_tilesets.append(converted_tileset)
return converted_tilesets
def convert_sources_to_one_platform_sources(sources):
"""Converts the sources to one platform representation of sources."""
converted_sources = []
for source in sources:
converted_source = copy.deepcopy(source)
if 'primaryPath' in converted_source:
file_sources = [converted_source['primaryPath']]
if 'additionalPaths' in converted_source:
file_sources += converted_source['additionalPaths']
del converted_source['additionalPaths']
del converted_source['primaryPath']
converted_source['uris'] = file_sources
if 'maxError' in converted_source:
converted_source['maxErrorMeters'] = converted_source['maxError']
del converted_source['maxError']
converted_sources.append(converted_source)
return converted_sources
def encode_number_as_cloud_value(number):
# Numeric values in constantValue-style nodes end up stored in doubles. If the
# input is an integer that loses precision as a double, use the int64 slot
# ("integerValue") in ValueNode.
if (isinstance(number, six.integer_types) and float(number) != number):
return {'integerValue': str(number)}
else:
return {'constantValue': number}
def convert_algorithms(algorithms):
"""Converts a ListAlgorithmsResult to the internal format.
The internal code expects a dict mapping each algorithm's name to a dict
containing:
- description: string
- returns: string
- arguments: list of dicts, each containing
- name: argument name
- type: argument type
- description: argument description (optional)
- optional: bool (optional)
- default: default value (optional)
- hidden: bool (optional)
- preview: bool (optional)
- deprecated: string containing deprecation reason (optional)
Args:
algorithms: A ListAlgorithmResult.
Returns:
A version of that algorithms list that can be interpreted by
apifunction.initialize().
"""
return dict(
_convert_algorithm(algorithm) for algorithm in algorithms['algorithms'])
def _convert_algorithm(algorithm):
"""Converts an Algorithm to the internal format."""
# Strip leading 'algorithms/' from the name.
algorithm_name = algorithm['name'][11:]
converted_algorithm = _convert_dict(
algorithm, {
'description': 'description',
'returnType': 'returns',
'arguments': ('args', _convert_algorithm_arguments),
'hidden': 'hidden',
'preview': 'preview'
},
defaults={
'description': '',
'returns': '',
'args': []
})
if algorithm.get('deprecated'):
converted_algorithm['deprecated'] = algorithm.get('deprecationReason', '')
return algorithm_name, converted_algorithm
def _convert_algorithm_arguments(args):
return [_convert_algorithm_argument(arg) for arg in args]
def _convert_algorithm_argument(arg):
return _convert_dict(
arg, {
'argumentName': 'name',
'type': 'type',
'description': 'description',
'optional': 'optional',
'defaultValue': 'default'
},
defaults={
'description': '',
'type': ''
})
def convert_to_image_file_format(format_str):
"""Converts a legacy file format string to an ImageFileFormat enum value.
Args:
format_str: A string describing an image file format that was passed to
one of the functions in ee.data that takes image file formats.
Returns:
A best guess at the corresponding ImageFileFormat enum name.
"""
if format_str is None:
return 'AUTO_JPEG_PNG'
format_str = format_str.upper()
if format_str == 'JPG':
return 'JPEG'
elif format_str == 'AUTO':
return 'AUTO_JPEG_PNG'
elif format_str == 'GEOTIFF':
return 'GEO_TIFF'
elif format_str == 'TFRECORD':
return 'TF_RECORD_IMAGE'
else:
# It's probably "JPEG" or "PNG", but might be some other supported format.
# Let the server validate it.
return format_str
def convert_to_table_file_format(format_str):
"""Converts a legacy file format string to a TableFileFormat enum value.
Args:
format_str: A string describing a table file format that was passed to
one of the functions in ee.data that takes table file formats.
Returns:
A best guess at the corresponding TableFileFormat enum name.
"""
format_str = format_str.upper()
if format_str == 'GEOJSON':
return 'GEO_JSON'
elif format_str == 'TFRECORD':
return 'TF_RECORD_TABLE'
else:
# It's probably "CSV" or "KML" or one of the others.
# Let the server validate it.
return format_str
def convert_to_band_list(bands):
"""Converts a band list, possibly as CSV, to a real list of bands.
Args:
bands: A list of strings containing band names, or a string containing
a comma-separated list of band names, or None.
Returns:
A list of band names.
"""
if bands is None:
return []
elif isinstance(bands, six.string_types):
return bands.split(',')
elif isinstance(bands, list):
return bands
else:
raise ee_exception.EEException('Invalid band list ' + bands)
def convert_to_visualization_options(params):
"""Extracts a VisualizationOptions from a param dict.
Args:
params: See ee.data.getMapId() for the description of the keys and values
that might appear here.
Returns:
A VisualizationOptions proto, in dict form.
"""
result = {}
if 'palette' in params:
palette = params['palette']
if isinstance(palette, six.string_types):
palette = palette.split(',')
result['paletteColors'] = palette
value_range = len(palette) - 1
else:
value_range = 255
ranges = []
if 'gain' in params or 'bias' in params:
if 'min' in params or 'max' in params:
raise ee_exception.EEException(
'Gain and bias can\'t be specified together with min and max')
# The Cloud API doesn't support gain/bias, only min/max. Extract and
# convert.
gains = _convert_csv_numbers_to_list(params.get('gain'))
biases = _convert_csv_numbers_to_list(params.get('bias'))
if not gains:
gains = [1.0] * len(biases)
elif not biases:
biases = [0.0] * len(gains)
elif len(gains) != len(biases):
raise ee_exception.EEException('Length of gain and bias must match.')
for gain, bias in zip(gains, biases):
# The transformation equations are
# x -> x * gain + bias
# x -> range * (x - min) / (max - min)
# Solving for (min, max) given (gain, bias) gives:
range_min = -bias / gain
range_max = value_range / gain + range_min
ranges.append({'min': range_min, 'max': range_max})
elif 'min' in params or 'max' in params:
mins = _convert_csv_numbers_to_list(params.get('min'))
maxes = _convert_csv_numbers_to_list(params.get('max'))
if not mins:
mins = [0.0] * len(maxes)
elif not maxes:
maxes = [1.0] * len(mins)
elif len(mins) != len(maxes):
raise ee_exception.EEException('Length of min and max must match.')
for range_min, range_max in zip(mins, maxes):
ranges.append({'min': range_min, 'max': range_max})
if ranges:
result['ranges'] = ranges
gammas = _convert_csv_numbers_to_list(params.get('gamma'))
if len(gammas) > 1:
raise ee_exception.EEException('Only one gamma value is supported.')
elif gammas:
result['gamma'] = {'value': gammas[0]}
return result
def _convert_csv_numbers_to_list(value):
"""Converts a string containing CSV numbers to a list."""
if not value:
return []
return [float(x) for x in value.split(',')]
def convert_operation_to_task(operation):
"""Converts an Operation to a legacy Task."""
result = _convert_dict(
operation['metadata'], {
'createTime': ('creation_timestamp_ms', _convert_timestamp_to_msec),
'updateTime': ('update_timestamp_ms', _convert_timestamp_to_msec),
'startTime': ('start_timestamp_ms', _convert_timestamp_to_msec),
'attempt': 'attempt',
'state': ('state', _convert_operation_state_to_task_state),
'description': 'description',
'type': 'task_type',
'destinationUris': 'destination_uris',
})
if operation.get('done'):
if 'error' in operation:
result['error_message'] = operation['error']['message']
result['id'] = convert_operation_name_to_task_id(operation['name'])
result['name'] = operation['name']
return result
def _convert_operation_state_to_task_state(state):
"""Converts a state string from an Operation to the Task equivalent."""
return _convert_value(
state, {
'PENDING': 'READY',
'RUNNING': 'RUNNING',
'CANCELLING': 'CANCEL_REQUESTED',
'SUCCEEDED': 'COMPLETED',
'CANCELLED': 'CANCELLED',
'FAILED': 'FAILED'
}, 'UNKNOWN')
def convert_iam_policy_to_acl(policy):
"""Converts an IAM Policy proto to the legacy ACL format."""
bindings = {
binding['role']: binding.get('members', [])
for binding in policy.get('bindings', [])
}
owners = bindings.get('roles/owner', [])
readers = bindings.get('roles/viewer', [])
writers = bindings.get('roles/editor', [])
if 'allUsers' in readers:
all_users_can_read = True
readers.remove('allUsers')
else:
all_users_can_read = False
result = {'owners': owners, 'readers': readers, 'writers': writers}
if all_users_can_read:
result['all_users_can_read'] = True
return result
def convert_acl_to_iam_policy(acl):
"""Converts the legacy ACL format to an IAM Policy proto."""
owners = acl.get('owners', [])
readers = acl.get('readers', [])
if acl.get('all_users_can_read', False):
readers.append('allUsers')
writers = acl.get('writers', [])
bindings = []
if owners:
bindings.append({'role': 'roles/owner', 'members': owners})
if readers:
bindings.append({'role': 'roles/viewer', 'members': readers})
if writers:
bindings.append({'role': 'roles/editor', | |
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_members_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_members_id_get`")
collection_formats = {}
resource_path = '/PortalMembers/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_members_id_head(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_members_id_head(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_members_id_head_with_http_info(id, **kwargs)
else:
(data) = self.portal_members_id_head_with_http_info(id, **kwargs)
return data
def portal_members_id_head_with_http_info(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_members_id_head_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_members_id_head" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_members_id_head`")
collection_formats = {}
resource_path = '/PortalMembers/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_members_id_member_get(self, id, **kwargs):
"""
Fetches belongsTo relation member.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_members_id_member_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalMember id (required)
:param bool refresh:
:return: TeamMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_members_id_member_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_members_id_member_get_with_http_info(id, **kwargs)
return data
def portal_members_id_member_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation member.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_members_id_member_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalMember id (required)
:param bool refresh:
:return: TeamMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_members_id_member_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_members_id_member_get`")
collection_formats = {}
resource_path = '/PortalMembers/{id}/member'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_members_id_patch(self, id, **kwargs):
"""
Patch attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_members_id_patch(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalMember id (required)
:param PortalMember data: An object of model property name/value pairs
:return: PortalMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_members_id_patch_with_http_info(id, **kwargs)
else:
(data) = self.portal_members_id_patch_with_http_info(id, **kwargs)
return data
def portal_members_id_patch_with_http_info(self, id, **kwargs):
"""
Patch attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_members_id_patch_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalMember id (required)
:param PortalMember data: An object of model property name/value pairs
:return: PortalMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_members_id_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_members_id_patch`")
collection_formats = {}
resource_path = '/PortalMembers/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_members_id_portal_get(self, id, **kwargs):
"""
Fetches belongsTo relation portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_members_id_portal_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalMember id (required)
:param bool refresh:
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_members_id_portal_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_members_id_portal_get_with_http_info(id, **kwargs)
return data
def portal_members_id_portal_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> | |
2min and 30 min data
two_minute = [327587572,247923021,149505899,147921014,471015233,383553764,166698220,
41232189,298165335,198456033,181240911,165370459,219752116,27533327,
39464221,441120034,140282069,32869782,365653206,229980646]
thirty_minute = [219820925,229945862,440765193,8591766,417544924,144599609,207440438,
219094190,233095291,219114641,233067231,233075513,219897252,233205654]
times = []
fluxes = []
if tic_number in two_minute:
lc_all = lk.search_lightcurve('TIC {}'.format(tic_number), author='SPOC', exptime=120).download_all()
for lc in lc_all:
# Correcting for systematics:
# CBV corrector (cotrending basis vectors)
cbvCorrector = CBVCorrector(lc, interpolate_cbvs=True, extrapolate_cbvs=True)
cbvCorrector.cbvs
cbv_type = ['MultiScale.1', 'MultiScale.2', 'MultiScale.3','Spike']
cbv_indices = [np.arange(1,9), np.arange(1,9), np.arange(1,9), 'ALL']
cbvCorrector.correct_gaussian_prior(cbv_type=cbv_type, cbv_indices=cbv_indices)
cbvCorrector_lc = cbvCorrector.corrected_lc
# Regression corrector
a = lk.correctors.RegressionCorrector(cbvCorrector_lc)
dm1 = create_spline_matrix(np.arange(len(cbvCorrector_lc)), n_knots=5, name='spline')
dm2 = DesignMatrix(np.arange(len(cbvCorrector_lc)), name='slope')
dmc = DesignMatrixCollection([dm1, dm2])
corrected_lc = a.correct(design_matrix_collection=dmc)
# PLD corrector (pixel level decorrelation)
corrector = corrected_lc.to_corrector()
lc = corrector.correct()
lc2 = lc.normalize().remove_nans().remove_outliers()
times.extend(lc2.time.value)
fluxes.extend(lc2.flux.value)
elif tic_number in thirty_minute:
lc_all = eleanor.multi_sectors(tic=tic_number, sectors='all')
for sector in lc_all:
sector_number = sector.sector
filename = ('/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/' + str(tic_number) +
'_sector_' + str(sector_number) + '.fits')
star=eleanor.Source(tic=tic_number)
data=eleanor.TargetData(star)
data.load(directory='/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/', fn=filename)
lc = data.to_lightkurve()
lc2 = lc.normalize().remove_nans().remove_outliers()
times.extend(lc2.time.value)
fluxes.extend(lc2.flux.value)
else:
pass
lc3 = lk.LightCurve(time=times, flux=fluxes)
pgram=lc3.to_periodogram(method='lombscargle', normalization='amplitude')
N=len(pgram.frequency)
sigma_rms = np.nanstd(lc3.flux)
# mean noise level in amplitude spectrum
sigma_amp = np.sqrt(np.pi/N)*sigma_rms
freq=pgram.frequency
amp=pgram.power
microhertz=11.574074074*freq
hertz = freq*0.000011574074074
per_hour = freq*0.041666666667
fig, ax1 = plt.subplots()
ax1.plot(microhertz, amp*100, color='k', label='TIC {}'.format(tic_number))
plt.xlim(min_freq, max_freq)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Frequency [uHz]', fontsize=15)
plt.ylabel('Amplitude [percent]', fontsize=15)
plt.hlines(5*sigma_amp*100, 0, max_freq, colors='r', linestyles='dashed', label='5 x Mean\nNoise Level')
plt.legend(loc='upper left', fontsize=10)
ax2 = ax1.twiny()
plt.xlabel('Period [hours]', fontsize=15)
ax2.set_xticklabels([round(24*11.574074074/x, 1) for x in ax1.get_xticks()])
plt.xticks(fontsize=15)
def fold_lc(tic_number, sector_number):
# Plots folded, normalized lightcurve (in percent);
# zero at center with positive and negative values for other data points
# Works for 2min and 30min stars
# Need two inputs : TIC number and Sector number
# The filename variable should be in the local directory where the fits files are stored on your computer.
two_minute = [327587572,247923021,149505899,147921014,471015233,383553764,166698220,
41232189,298165335,198456033,181240911,165370459,219752116,27533327,
39464221,441120034,140282069,32869782,365653206,229980646]
thirty_minute = [219820925,229945862,440765193,8591766,417544924,144599609,207440438,
219094190,233095291,219114641,233067231,233075513,219897252,233205654]
if tic_number in two_minute:
lc = lk.search_lightcurve('TIC {}'.format(tic_number), author='SPOC',
sector = sector_number, exptime=120).download()
# Correcting for systematics:
# CBV corrector (cotrending basis vectors)
cbvCorrector = CBVCorrector(lc, interpolate_cbvs=True, extrapolate_cbvs=True)
cbvCorrector.cbvs
cbv_type = ['MultiScale.1', 'MultiScale.2', 'MultiScale.3','Spike']
cbv_indices = [np.arange(1,9), np.arange(1,9), np.arange(1,9), 'ALL']
cbvCorrector.correct_gaussian_prior(cbv_type=cbv_type, cbv_indices=cbv_indices)
cbvCorrector_lc = cbvCorrector.corrected_lc
# Regression corrector
a = lk.correctors.RegressionCorrector(cbvCorrector_lc)
dm1 = create_spline_matrix(np.arange(len(cbvCorrector_lc)), n_knots=5, name='spline')
dm2 = DesignMatrix(np.arange(len(cbvCorrector_lc)), name='slope')
dmc = DesignMatrixCollection([dm1, dm2])
corrected_lc = a.correct(design_matrix_collection=dmc)
# PLD corrector (pixel level decorrelation)
corrector = corrected_lc.to_corrector()
lc = corrector.correct()
elif tic_number in thirty_minute:
filename = ('/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/' + str(tic_number) +
'_sector_' + str(sector_number) + '.fits')
star=eleanor.Source(tic=tic_number)
data=eleanor.TargetData(star)
data.load(directory='/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/', fn=filename)
lc = data.to_lightkurve()
else:
print('TIC not a JWST standard')
pass
lc2 = lc.remove_nans().remove_outliers().normalize()
pgram=lc2.to_periodogram(method='lombscargle', normalization='amplitude')
best_fit_period = pgram.period_at_max_power
lc3 = lc2.fold(period=best_fit_period, normalize_phase=True)
times=lc3.time.value
fluxes=lc3.flux.value
plt.scatter(times, (fluxes*100-100), s=3, c='k', label='TIC {}'.format(tic_number)+
'\nSector {}'.format(sector_number))
plt.xlabel('Phase', fontsize=15)
plt.ylabel('Normalized Flux [percent]', fontsize=15)
plt.legend(loc='lower right', fontsize=11)
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
plt.show()
plt.close()
def crowd_sap_1star_1sector(tic_number, sector_number):
# returns crowd sap value of one sector of one star
# Function works for 2min data
# example:
lc = lk.search_lightcurve('TIC {}'.format(tic_number), author='SPOC',
sector = sector_number, exptime=120).download()
return lc.crowdsap
def crowd_sap_all_2min(array):
# Function works for 2min data
# saves all crowd saps for all sectors of all 2min stars to csv in excel format
# example:
# crowd_sap(two_minute)
with open('crwdsaps.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', dialect='excel')
spamwriter.writerow(['TIC', 'sector', 'crowd sap'])
for tic_number in array:
lc = lk.search_lightcurve('TIC {}'.format(tic_number),
author='SPOC', exptime=120).download_all()
z=0
while z < len(lc):
a = tic_number
b = lc[z].sector
c = lc[z].crowdsap
spamwriter.writerow([a] + [b] + [c])
z+=1
def bootstrap(tic_number, sector_number):
# Getting the noise level for 1 sector of 1 star; function returns 5 x avg noise level in percent!
# one in a thousand false alarm probability
# output is 5 times the noise level reported from previous equation
# previous equation -> sigma_amp = np.sqrt(np.pi/N)*sigma_rms,
# where sigma_amp = 1 * noise level, sigma_rms = stdev of fluxes, and N = number of data points
# the fct does the following:
# takes all of the times and fluxes,
# shuffle the fluxes,
# takes a periodogram,
# measures the highest peak,
# keeps track of it,
# performs this 10K times
# ie does 10,000 shuffles
# report "99 % of the time never see a peak higher than _____ due to noise alone"
# since the fluxes are shuffled the time series has no periodicity
# therefore the max amplitude of the periodogram is due to noise alone
# period range for the periodogram is 2 hours (.0833 days) to 10 days
# max_amp_list = list of the highest amps for each of the 10K shuffles
# np.percentile(max_amp_list, 99.9)
two_minute = [327587572,247923021,149505899,147921014,471015233,383553764,166698220,
41232189,298165335,198456033,181240911,165370459,219752116,27533327,
39464221,441120034,140282069,32869782,365653206,229980646]
thirty_minute = [219820925,229945862,440765193,8591766,417544924,144599609,207440438,
219094190,233095291,219114641,233067231,233075513,219897252,233205654]
if tic_number in two_minute:
lc = lk.search_lightcurve('TIC {}'.format(tic_number), author='SPOC',
sector = sector_number, exptime=120).download()
# Correcting for systematics:
# CBV corrector (cotrending basis vectors)
cbvCorrector = CBVCorrector(lc, interpolate_cbvs=True, extrapolate_cbvs=True)
cbvCorrector.cbvs
cbv_type = ['MultiScale.1', 'MultiScale.2', 'MultiScale.3','Spike']
cbv_indices = [np.arange(1,9), np.arange(1,9), np.arange(1,9), 'ALL']
cbvCorrector.correct_gaussian_prior(cbv_type=cbv_type, cbv_indices=cbv_indices)
cbvCorrector_lc = cbvCorrector.corrected_lc
# Regression corrector
a = lk.correctors.RegressionCorrector(cbvCorrector_lc)
dm1 = create_spline_matrix(np.arange(len(cbvCorrector_lc)), n_knots=5, name='spline')
dm2 = DesignMatrix(np.arange(len(cbvCorrector_lc)), name='slope')
dmc = DesignMatrixCollection([dm1, dm2])
corrected_lc = a.correct(design_matrix_collection=dmc)
# PLD corrector (pixel level decorrelation)
corrector = corrected_lc.to_corrector()
lc = corrector.correct()
elif tic_number in thirty_minute:
filename = ('/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/' + str(tic_number) +
'_sector_' + str(sector_number) + '.fits')
star=eleanor.Source(tic=tic_number)
data=eleanor.TargetData(star)
data.load(directory='/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/', fn=filename)
lc = data.to_lightkurve()
else:
print('TIC not a JWST standard')
pass
#########################################
def shuffle(aList):
return random.shuffle(aList)
###########################################
lc2 = lc.remove_nans().remove_outliers().normalize()
times=lc2.time.value
fluxes=lc2.flux.value
max_amp_list = []
N = 10000 # number of shuffles
n = 0
while n < N:
shuffle(fluxes)
lc3 = lk.LightCurve(time = times, flux = fluxes)
pgram=lc3.to_periodogram(method='lombscargle', normalization='amplitude',
minimum_period = .0833, maximum_period = 10)
max_amp = pgram.max_power*100 # times 100 to turn into percent
max_amp_list.append(max_amp)
n+=1
# use 99.9 percentile because 10K shuffles and 1/1000 probability of amp = 5*(avg noise level)
five_times_noise_level = np.percentile(max_amp_list, 99.9)
return five_times_noise_level
def stitch(tic_number):
# makes a light curve of ALL available sectors for one star
# works for 2min and 30 min data
two_minute = [327587572,247923021,149505899,147921014,471015233,383553764,166698220,
41232189,298165335,198456033,181240911,165370459,219752116,27533327,
39464221,441120034,140282069,32869782,365653206,229980646]
thirty_minute = [219820925,229945862,440765193,8591766,417544924,144599609,207440438,
219094190,233095291,219114641,233067231,233075513,219897252,233205654]
times = []
fluxes = []
if tic_number in two_minute:
lc_all = lk.search_lightcurve('TIC {}'.format(tic_number), author='SPOC', exptime=120).download_all()
for lc in lc_all:
# Correcting for systematics:
# CBV corrector (cotrending basis vectors)
cbvCorrector = CBVCorrector(lc, interpolate_cbvs=True, extrapolate_cbvs=True)
cbvCorrector.cbvs
cbv_type = ['MultiScale.1', 'MultiScale.2', 'MultiScale.3','Spike']
cbv_indices = [np.arange(1,9), np.arange(1,9), np.arange(1,9), 'ALL']
cbvCorrector.correct_gaussian_prior(cbv_type=cbv_type, cbv_indices=cbv_indices)
cbvCorrector_lc = cbvCorrector.corrected_lc
# Regression corrector
a = lk.correctors.RegressionCorrector(cbvCorrector_lc)
dm1 = create_spline_matrix(np.arange(len(cbvCorrector_lc)), n_knots=5, name='spline')
dm2 = DesignMatrix(np.arange(len(cbvCorrector_lc)), name='slope')
dmc = DesignMatrixCollection([dm1, dm2])
corrected_lc = a.correct(design_matrix_collection=dmc)
# PLD corrector (pixel level decorrelation)
corrector = corrected_lc.to_corrector()
lc = corrector.correct()
lc2 = lc.normalize().remove_nans().remove_outliers()
times.extend(lc2.time.value)
fluxes.extend(lc2.flux.value)
elif tic_number in thirty_minute:
lc_all = eleanor.multi_sectors(tic=tic_number, sectors='all')
for sector in lc_all:
sector_number = sector.sector
filename = ('/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/' + str(tic_number) +
'_sector_' + str(sector_number) + '.fits')
star=eleanor.Source(tic=tic_number)
data=eleanor.TargetData(star)
data.load(directory='/Users/mkunz/Tess_JWST_calibration_stars/thirty_minz/', fn=filename)
lc = data.to_lightkurve()
lc2 = lc.normalize().remove_nans().remove_outliers()
times.extend(lc2.time.value)
fluxes.extend(lc2.flux.value)
else:
pass
lc3 = lk.LightCurve(time=times, flux=fluxes)
lc3.scatter(label='TIC {}'.format(tic_number))
def one_noise_level(N, fluxes):
# Funtion returns 1*average noise level (in percent!) in time series data
# This is an alternative to using the bootstrap method
# N = len(lc.time) ; ie number of data points
# sigma_amp = np.sqrt(np.pi/N)*sigma_rms,
# where sigma_amp = 1 * noise level, sigma_rms = stdev of fluxes, and N = number of data points
# Example of using this function:
# lc = lc_1sector(41232189, 3)
# N = len(lc.time)
# fluxes = lc.flux.value
# noise = one_noise_level(N, fluxes)
# print(noise)
sigma_rms = np.std(fluxes) # root mean square is standard deviation of flux values
sigma_amp = np.sqrt(np.pi/N)*sigma_rms
return sigma_amp*100 # multiply by 100 to be in percent units
def bootstrap_all_standards(name_of_csv):
# bootstraps all JWST standards and saves the data to csv in excel format
# info reported is TIC, sector, and 5*NoiseLevel
# Input is | |
import codecs
import csv
from io import StringIO, BytesIO
from unittest.mock import MagicMock
from zipfile import ZipFile
import pytest
from export.enums import FileFormat
from logs.logic.reporting.filters import (
DateDimensionFilter,
ForeignKeyDimensionFilter,
ExplicitDimensionFilter,
)
from logs.logic.reporting.slicer import FlexibleDataSlicer, SlicerConfigError
from logs.logic.reporting.export import (
FlexibleDataSimpleCSVExporter,
FlexibleDataZipCSVExporter,
FlexibleDataExcelExporter,
)
from logs.models import Metric, DimensionText, AccessLog, Dimension
from organizations.models import Organization
from publications.models import Platform, Title
def remap_row_keys_to_short_names(row: dict, primary_dimension, dimensions: list) -> dict:
result = {}
for key, value in row.items():
if key.startswith('grp-'):
parts = key[4:].split(',')
new_key_parts = []
for part, dimension in zip(parts, dimensions):
obj = dimension.objects.get(pk=int(part))
if hasattr(obj, 'short_name'):
new_key_parts.append(obj.short_name)
else:
new_key_parts.append(str(obj))
new_key = '-'.join(new_key_parts)
result[new_key] = value
elif key == 'pk':
new_value = primary_dimension.objects.get(pk=value).short_name
result[key] = new_value
return result
@pytest.mark.django_db
class TestFlexibleDataSlicerComputations:
def test_data_generator(self, flexible_slicer_test_data):
# the 1 + 3 bellow is 1 for report type with 1 dimension and 3 for report type with
# 2 dimensions which contributes 4* the number of records
assert AccessLog.objects.count() == 3 * 3 * 3 * 3 * 4 * 3 * (1 + 4)
# # uncomment the following to get the test data in a CSV file
# # it is useful when you want to use pivot table in a spreadsheet to check the calculations
# # it will produce a table with primary keys for all dimensions
# cursor = connection.cursor()
#
# with open('/tmp/TestFlexibleDataSlicer.csv', 'w') as out:
# cursor.copy_expert(
# "COPY (SELECT report_type_id, organization_id, platform_id, metric_id, target_id, "
# "date, dim1, dim2, value FROM logs_accesslog) TO stdout WITH CSV HEADER",
# out,
# )
def test_org_sum_all(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by:
DimensionFilter:
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
# without any groups/columns, the result would not make much sense
with pytest.raises(SlicerConfigError):
slicer.get_data()
def test_org_sum_by_platform(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by: platform
DimensionFilter:
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
slicer.add_group_by('platform')
data = list(slicer.get_data())
assert len(data) == Organization.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Organization, [Platform]) for row in data] == [
{'pk': 'org1', 'pl1': 519318, 'pl2': 717606, 'pl3': 915894},
{'pk': 'org2', 'pl1': 1114182, 'pl2': 1312470, 'pl3': 1510758},
{'pk': 'org3', 'pl1': 1709046, 'pl2': 1907334, 'pl3': 2105622},
]
def test_org_sum_by_platform_filter_metric(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by: platform
DimensionFilter: metric
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
slicer.add_group_by('platform')
metric = flexible_slicer_test_data['metrics'][0]
slicer.add_filter(ForeignKeyDimensionFilter('metric', [metric.pk]))
data = list(slicer.get_data())
assert len(data) == Organization.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Organization, [Platform]) for row in data] == [
{'pk': 'org1', 'pl1': 151074, 'pl2': 217170, 'pl3': 283266},
{'pk': 'org2', 'pl1': 349362, 'pl2': 415458, 'pl3': 481554},
{'pk': 'org3', 'pl1': 547650, 'pl2': 613746, 'pl3': 679842},
]
def test_org_sum_by_platform_filter_platform(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by: platform
DimensionFilter: platform
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
platforms = flexible_slicer_test_data['platforms'][1:]
slicer.add_filter(ForeignKeyDimensionFilter('platform', platforms), add_group=True)
data = list(slicer.get_data())
assert len(data) == Organization.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Organization, [Platform]) for row in data] == [
{'pk': 'org1', 'pl2': 717606, 'pl3': 915894},
{'pk': 'org2', 'pl2': 1312470, 'pl3': 1510758},
{'pk': 'org3', 'pl2': 1907334, 'pl3': 2105622},
]
def test_org_sum_by_platform_filter_platform_metric(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by: platform
DimensionFilter: platform, metric
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
platforms = flexible_slicer_test_data['platforms'][1:]
metrics = flexible_slicer_test_data['metrics'][1:]
slicer.add_filter(ForeignKeyDimensionFilter('platform', platforms), add_group=True)
slicer.add_filter(ForeignKeyDimensionFilter('metric', metrics), add_group=False)
data = list(slicer.get_data())
assert len(data) == Organization.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Organization, [Platform]) for row in data] == [
{'pk': 'org1', 'pl2': 500436, 'pl3': 632628},
{'pk': 'org2', 'pl2': 897012, 'pl3': 1029204},
{'pk': 'org3', 'pl2': 1293588, 'pl3': 1425780},
]
def test_org_sum_by_platform_filter_organization(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by: platform
DimensionFilter: organization
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
organizations = flexible_slicer_test_data['organizations'][1:]
slicer.add_filter(
ForeignKeyDimensionFilter('organization', [org.pk for org in organizations]),
add_group=False,
)
slicer.add_group_by('platform')
data = list(slicer.get_data())
assert len(data) == len(organizations)
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Organization, [Platform]) for row in data] == [
{'pk': 'org2', 'pl1': 1114182, 'pl2': 1312470, 'pl3': 1510758},
{'pk': 'org3', 'pl1': 1709046, 'pl2': 1907334, 'pl3': 2105622},
]
def test_org_sum_by_platform_filter_platform_organization(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by: platform
DimensionFilter: organization, platform
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
platforms = flexible_slicer_test_data['platforms'][1:]
organizations = flexible_slicer_test_data['organizations'][1:]
slicer.add_filter(
ForeignKeyDimensionFilter('platform', [pl.pk for pl in platforms]), add_group=True
)
slicer.add_filter(
ForeignKeyDimensionFilter('organization', [org.pk for org in organizations]),
add_group=False,
)
data = list(slicer.get_data())
assert len(data) == len(organizations)
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Organization, [Platform]) for row in data] == [
{'pk': 'org2', 'pl2': 1312470, 'pl3': 1510758},
{'pk': 'org3', 'pl2': 1907334, 'pl3': 2105622},
]
def test_org_sum_by_platform_metric_filter_platform_metric(self, flexible_slicer_test_data):
"""
Primary dimension: organization
Group by: platform, metric
DimensionFilter: platform, metric
"""
slicer = FlexibleDataSlicer(primary_dimension='organization')
platforms = flexible_slicer_test_data['platforms'][1:]
metrics = flexible_slicer_test_data['metrics'][1:]
slicer.add_filter(
ForeignKeyDimensionFilter('platform', [pl.pk for pl in platforms]), add_group=True
)
slicer.add_filter(
ForeignKeyDimensionFilter('metric', [met.pk for met in metrics]), add_group=True
)
data = list(slicer.get_data())
assert len(data) == Organization.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [
remap_row_keys_to_short_names(row, Organization, [Platform, Metric]) for row in data
] == [
{'pk': 'org1', 'pl2-m2': 239202, 'pl3-m2': 305298, 'pl2-m3': 261234, 'pl3-m3': 327330},
{'pk': 'org2', 'pl2-m2': 437490, 'pl3-m2': 503586, 'pl2-m3': 459522, 'pl3-m3': 525618},
{'pk': 'org3', 'pl2-m2': 635778, 'pl3-m2': 701874, 'pl2-m3': 657810, 'pl3-m3': 723906},
]
def test_platform_sum_by_metric_filter_dim1(self, flexible_slicer_test_data):
"""
Primary dimension: platform
Group by: metric
DimensionFilter: dim1
"""
slicer = FlexibleDataSlicer(primary_dimension='platform')
texts = flexible_slicer_test_data['dimension_values'][0][:2]
dim1_ids = DimensionText.objects.filter(text__in=texts).values_list('pk', flat=True)
slicer.add_filter(ExplicitDimensionFilter('dim1', dim1_ids), add_group=False)
slicer.add_group_by('metric')
data = list(slicer.get_data())
assert len(data) == Platform.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Platform, [Metric]) for row in data] == [
{'pk': 'pl1', 'm1': 698112, 'm2': 742176, 'm3': 786240},
{'pk': 'pl2', 'm1': 830304, 'm2': 874368, 'm3': 918432},
{'pk': 'pl3', 'm1': 962496, 'm2': 1006560, 'm3': 1050624},
]
@pytest.mark.parametrize(['order_by'], (('platform',), ('-platform',)))
def test_platform_order_by_platform(self, flexible_slicer_test_data, order_by):
"""
Primary dimension: platform
Group by: metric
DimensionFilter: None
Order by: platform (primary dimension)
"""
slicer = FlexibleDataSlicer(primary_dimension='platform')
slicer.add_group_by('metric')
slicer.order_by = [order_by]
data = list(slicer.get_data())
assert len(data) == Platform.objects.count()
remapped = [remap_row_keys_to_short_names(row, Platform, [Metric]) for row in data]
if order_by.startswith('-'):
assert remapped[0]['pk'] == 'pl3'
else:
assert remapped[0]['pk'] == 'pl1'
@pytest.fixture(params=["-", ""])
def order_by_sign(self, request):
return request.param
@pytest.mark.parametrize(
['primary_dim'],
[
("platform",),
("metric",),
("target",),
("organization",),
("dim1",),
("dim2",),
("date",),
("date__year",),
],
)
def test_order_by_primary_dim(self, flexible_slicer_test_data, primary_dim, order_by_sign):
"""
Test that ordering by primary dimension works for any primary dimension - the purpose
of the test is to make sure it does not crash
"""
slicer = FlexibleDataSlicer(primary_dimension=primary_dim)
report_type = flexible_slicer_test_data['report_types'][1]
slicer.add_filter(ForeignKeyDimensionFilter('report_type', report_type))
slicer.add_group_by('metric')
slicer.order_by = [order_by_sign + primary_dim]
data = list(slicer.get_data())
assert len(data) > 0
def test_platform_sum_by_metric_dim1_filter_dim1(self, flexible_slicer_test_data):
"""
Primary dimension: platform
Group by: metric, dim1
DimensionFilter: dim1
This is not possible because dimensions have different meaning under different report types
so we do not allow group by explicit dimension unless report type is fixed to exactly one
"""
slicer = FlexibleDataSlicer(primary_dimension='platform')
texts = flexible_slicer_test_data['dimension_values'][0][:2]
dim1_ids = DimensionText.objects.filter(text__in=texts).values_list('pk', flat=True)
slicer.add_filter(ExplicitDimensionFilter('dim1', dim1_ids), add_group=True)
slicer.add_group_by('metric')
with pytest.raises(SlicerConfigError):
slicer.get_data()
def test_platform_sum_by_metric_filter_dim1_rt(self, flexible_slicer_test_data):
"""
Primary dimension: platform
Group by: metric
DimensionFilter: dim1, report_type
"""
slicer = FlexibleDataSlicer(primary_dimension='platform')
texts = flexible_slicer_test_data['dimension_values'][0][:2]
report_type = flexible_slicer_test_data['report_types'][0]
dim1_ids = DimensionText.objects.filter(text__in=texts).values_list('pk', flat=True)
slicer.add_filter(ExplicitDimensionFilter('dim1', dim1_ids), add_group=False)
slicer.add_filter(ForeignKeyDimensionFilter('report_type', report_type))
slicer.add_group_by('metric')
data = list(slicer.get_data())
assert len(data) == Platform.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the following numbers were obtained by a separate calculation in a spreadsheet pivot table
assert [remap_row_keys_to_short_names(row, Platform, [Metric]) for row in data] == [
{'pk': 'pl1', 'm1': 24624, 'm2': 27216, 'm3': 29808},
{'pk': 'pl2', 'm1': 32400, 'm2': 34992, 'm3': 37584},
{'pk': 'pl3', 'm1': 40176, 'm2': 42768, 'm3': 45360},
]
def test_platform_sum_by_metric_dim1_filter_dim1_rt(self, flexible_slicer_test_data):
"""
Primary dimension: platform
Group by: metric, dim1
DimensionFilter: dim1, report_type
"""
slicer = FlexibleDataSlicer(primary_dimension='platform')
texts = flexible_slicer_test_data['dimension_values'][0][:2]
report_type = flexible_slicer_test_data['report_types'][0]
dim1_ids = DimensionText.objects.filter(text__in=texts).values_list('pk', flat=True)
slicer.add_filter(ExplicitDimensionFilter('dim1', dim1_ids), add_group=True)
slicer.add_filter(ForeignKeyDimensionFilter('report_type', report_type))
slicer.add_group_by('metric')
data = list(slicer.get_data())
assert len(data) == Platform.objects.count()
data.sort(key=lambda rec: rec['pk'])
# the | |
<filename>code/multidop_time.py<gh_stars>0
import matplotlib
# Needed for Blues
matplotlib.use('agg')
from matplotlib import rcParams
from matplotlib import pyplot as plt
# PyART
import pyart
import gzip
import sys
from scipy import ndimage
import shutil, os
from datetime import timedelta, datetime
import numpy as np
import tempfile
import re
import time
from copy import deepcopy
from IPython.display import Image, display
import multidop
import math
from ipyparallel import Client
from time import sleep
import time_procedures
def do_multidop_for_time(frame_time):
import matplotlib
# Needed for Blues
matplotlib.use('agg')
from matplotlib import rcParams
# PyART
import sys
from scipy import ndimage
import shutil, os
from datetime import timedelta, datetime
import tempfile
import glob
import re
import time
from copy import deepcopy
from IPython.display import Image, display
import multidop
import math
from netCDF4 import Dataset
from datetime import timedelta, datetime
import time_procedures
import os
import numpy as np
year_str = "%04d" % frame_time.year
day_str = "%02d" % frame_time.day
month_str = "%02d" % frame_time.month
hour_str = "%02d" % frame_time.hour
minute_str = "%02d" % frame_time.minute
fname = (time_procedures.out_data_path + 'ddop/cf_compliant_grid'
+ year_str
+ month_str
+ day_str
+ hour_str
+ minute_str +'.nc')
print('Does file ' + fname + ' exist?')
if(not os.path.isfile(fname)):
one_day_ago = frame_time-timedelta(days=1, minutes=1)
sounding_times = time_procedures.get_sounding_times(one_day_ago.year,
one_day_ago.month,
one_day_ago.day,
one_day_ago.hour,
one_day_ago.minute,
frame_time.year,
frame_time.month,
frame_time.day,
frame_time.hour,
frame_time.minute,
minute_interval=60)
print(sounding_times)
sounding_time = sounding_times[len(sounding_times)-1]
Sounding_netcdf = time_procedures.get_sounding(sounding_time)
# Convert timestamps to datetime format
Time = Sounding_netcdf.variables['time_offset'][:]
base_time = Sounding_netcdf.variables['base_time'][:]
alt = Sounding_netcdf.variables['alt'][:]
u = Sounding_netcdf.variables['u_wind'][:]
v = Sounding_netcdf.variables['v_wind'][:]
base_timestamp = timedelta(seconds=float(base_time)) + datetime(1970,1,1)
Sounding_netcdf.close()
time_delta = datetime(frame_time.year,
frame_time.month,
frame_time.day,
frame_time.hour,
frame_time.minute,
frame_time.second) - base_timestamp
seconds_in_file = time_delta.days*(24*60*60) + time_delta.seconds
one_minute_later = frame_time+timedelta(minutes=5)
ten_minutes_ago = frame_time-timedelta(minutes=10)
one_minute_earlier = frame_time-timedelta(minutes=5)
times_berr, dates = time_procedures.get_radar_times_berr_cfradial(one_minute_earlier.year,
one_minute_earlier.month,
one_minute_earlier.day,
one_minute_earlier.hour,
one_minute_earlier.minute,
one_minute_later.year,
one_minute_later.month,
one_minute_later.day,
one_minute_later.hour,
one_minute_later.minute,
minute_interval=0)
sounding_file_name = (time_procedures.out_data_path + 'soundings/'
+ year_str
+ month_str
+ day_str
+ hour_str
+ minute_str)
file = open(sounding_file_name, 'w')
# Take 1000 evenly spaced levels from the sounding and place them into the file
us = u[u > -75]
vs = v[u > -75]
alts = alt[u > -75]
step = int(math.floor(len(us)/500))
if(step > 0):
for i in range(0, len(us), step):
input_string = (str(alts[i]) + ' ' + str(us[i]) + ' ' + str(vs[i]) + '\n')
file.write(input_string)
# If baloon popped below 15 km (approximate tropopause), then don't use sounding
if(alts[-1] < 15000 or step == 0):
use_sounding = 0
else:
use_sounding = 1
file.close()
# Calculate texture of velocity field for Berrima and CPOL
# if previous frame is not available, just use (u,v) = 0
try:
Radar = time_procedures.get_radar_from_cpol_cfradial(frame_time)
except:
print('Could not load CPOL radar data file!')
return
try:
Radar_berr = time_procedures.get_radar_from_berr_cfradial(times_berr[0])
except:
print('Cannot find matching time from Berrima radar, skipping')
return
if(frame_time.year <= 2007):
cpol_ref_field = 'reflectivity'
cpol_vel_field = 'velocity'
else:
cpol_ref_field = 'Refl'
cpol_vel_field = 'Vel'
bt = time.time()
print('Calculating texture....')
try:
nyq_Gunn = Radar.instrument_parameters['nyquist_velocity']['data'][0]
nyq_Berr = Radar_berr.instrument_parameters['nyquist_velocity']['data'][0]
data = ndimage.filters.generic_filter(Radar.fields['corrected_velocity']['data'],
pyart.util.interval_std, size = (4,4),
extra_arguments = (-nyq_Gunn, nyq_Gunn))
filtered_data = ndimage.filters.median_filter(data, size = (4,4))
texture_field = pyart.config.get_metadata('corrected_velocity')
texture_field['data'] = filtered_data
Radar.add_field('velocity_texture', texture_field, replace_existing = True)
data = ndimage.filters.generic_filter(Radar_berr.fields['corrected_velocity']['data'],
pyart.util.interval_std, size = (4,4),
extra_arguments = (-nyq_Gunn, nyq_Gunn))
filtered_data = ndimage.filters.median_filter(data, size = (4,4))
texture_field = pyart.config.get_metadata('corrected_velocity')
texture_field['data'] = filtered_data
Radar_berr.add_field('velocity_texture', texture_field, replace_existing = True)
print('Done!')
print((time.time()-bt)/60.0, 'minutes to process')
except:
print('No unfolded velocities! Skipping!')
return
# Apply gatefilter based on velocity and despeckling
gatefilter_Gunn = pyart.correct.despeckle_field(Radar,
cpol_ref_field,
size=6)
gatefilter_Gunn.exclude_above('velocity_texture', 3)
gatefilter_Gunn.exclude_below(cpol_ref_field, 1)
gatefilter_Berr = pyart.correct.despeckle_field(Radar_berr,
'Refl',
size=6)
gatefilter_Berr.exclude_above('velocity_texture', 4)
gatefilter_Berr.exclude_below('Refl', 1)
# Change variable names to DT (reflectivity) and VT (velocity) expected by multidop
# If you needed to dealias or perform other corrections,
# this would be the time to start doing that.
# Both datasets already have aliasing corrections
cp = deepcopy(Radar.fields[cpol_ref_field]['data'])
texture = Radar.fields['velocity_texture']['data']
Radar.add_field_like(cpol_ref_field, 'DT', cp, replace_existing=True)
cp = deepcopy(Radar.fields['corrected_velocity']['data'])
Radar.add_field_like('corrected_velocity', 'VT', cp, replace_existing=True)
cp = deepcopy(Radar_berr.fields['Refl']['data'])
Radar_berr.add_field_like('Refl', 'DT',
cp, replace_existing=True)
cp = deepcopy(Radar_berr.fields['corrected_velocity']['data'])
Radar_berr.add_field_like('corrected_velocity', 'VT',
cp, replace_existing=True)
# The analysis engine currently expects the "missing_value" attribute
Radar.fields['DT']['missing_value'] = 1.0 * Radar.fields['DT']['_FillValue']
Radar_berr.fields['DT']['missing_value'] = 1.0 * Radar_berr.fields['DT']['_FillValue']
Radar.fields['VT']['missing_value'] = 1.0 * Radar.fields['VT']['_FillValue']
Radar_berr.fields['VT']['missing_value'] = 1.0 * Radar_berr.fields['VT']['_FillValue']
# Grid the data to a Cartesian grid. The Dual doppler domain does not extend ~60 km
# from both radars, so no need to store more data than that.
grid_cpol = time_procedures.grid_radar(Radar,
origin=(Radar.latitude['data'][0],
Radar.longitude['data'][0]),
xlim=(-60000, 50000),
ylim=(-50000, 30000),
fields=['DT', 'VT'],
min_radius=750.0,
bsp=1.0, nb=1.5,
h_factor=3.0,
gatefilter=gatefilter_Gunn,
zlim=(500, 20000),
grid_shape=(40, 81, 111))
grid_Berr = time_procedures.grid_radar(Radar_berr,
origin=(Radar.latitude['data'][0],
Radar.longitude['data'][0]),
fields=['DT', 'VT'],
xlim=(-60000, 50000),
ylim=(-50000, 30000),
zlim=(500, 20000),
min_radius=750.0,
grid_shape=(40, 81, 111),
gatefilter=gatefilter_Berr,
bsp=1.0, nb=1.5,
h_factor=4.0)
# Berrima reflectivities are corrupt for many scans -- prefer CPOL reflectivities for grid
grid_Berr.fields['DT']['data'] = grid_cpol.fields['DT']['data']
# The analysis engine requires azimuth and elevation to be part of the grid.
# This information is computed from the grid geometry.
grid_cpol = multidop.angles.add_azimuth_as_field(grid_cpol)
grid_Berr = multidop.angles.add_azimuth_as_field(grid_Berr)
grid_cpol = multidop.angles.add_elevation_as_field(grid_cpol)
grid_Berr = multidop.angles.add_elevation_as_field(grid_Berr)
cpol_grid_name = (time_procedures.out_data_path + 'cpol/cpol_'
+ year_str
+ month_str
+ day_str
+ hour_str
+ minute_str +'.nc')
berr_grid_name = (time_procedures.out_data_path + 'berr/berr_'
+ year_str
+ month_str
+ day_str
+ hour_str
+ minute_str +'.nc')
# Save the input grids for later.
pyart.io.write_grid(cpol_grid_name, grid_cpol)
pyart.io.write_grid(berr_grid_name, grid_Berr)
# Load previous time period for storm motion (use 0 if no previous frame)
try:
Radar_prev = get_radar_from_cpol_cfradial(ten_minutes_ago)
print('Calculating storm motion....')
nyq_Gunn = Radar_prev.instrument_parameters['nyquist_velocity']['data'][0]
data = ndimage.filters.generic_filter(Radar_prev.fields['corrected_velocity']['data'],
pyart.util.interval_std, size = (4,4),
extra_arguments = (-nyq_Gunn, nyq_Gunn))
filtered_data = ndimage.filters.median_filter(data, size = (4,4))
texture_field = pyart.config.get_metadata('corrected_velocity')
texture_field['data'] = filtered_data
print('Gridding previous frame...')
cp = deepcopy(Radar_prev.fields['corrected_reflectivity']['data'])
cp = np.ma.masked_where(texture_field['data'] > 4, cp)
Radar_prev.add_field_like('corrected_reflectivity', 'DT', cp,
replace_existing=True)
grid_prev = time_procedures.grid_radar(Radar_prev,
origin=(Radar_prev.latitude['data'][0],
Radar_prev.longitude['data'][0]),
xlim=(-60000, 60000),
ylim=(-50000, 30000),
fields=['DT'],
zlim=(500, 20000),
grid_shape=(40, 121, 81))
(vt,ut) = pyart.retrieve.grid_displacement_pc(grid_prev, grid_cpol,
'DT', 9,
return_value='corrected_velocity')
except:
(vt,ut) = (0,0)
# You don't have to define everything.
# Most of these keywords are default values.
# If you don't define something the program will provide a default value.
# Check parameters.py for what keyword default values are.
calc_file_name = (time_procedures.out_data_path + '/dda_files/cpol_calc'
+ year_str
+ month_str
+ day_str
+ hour_str
+ minute_str +'.dda')
frprmn_out_name = (time_procedures.out_data_path + '/fprmn/frprmn_out'
+ year_str
+ month_str
+ day_str
+ hour_str
+ minute_str +'.nc')
localfile = tempfile.NamedTemporaryFile()
# If sounding is available, favor constraint based on sounding
# vs. data, otherwise favor data more
if(use_sounding == 0):
C8b = 0.0
C1b = 1.0
sounding_file_name = None
else:
C1b = 0.1
C8b = 0.01
pd = {'dir': './',
'x': [-60000.0, 1000.0, 111], # start, step, max = min + (steps-1)
'y': [-50000.0, 1000.0, 81],
'z': [500.0, 500.0, 40],
'grid': [grid_cpol.origin_longitude['data'][0],
grid_cpol.origin_latitude['data'][0],
50.0],
'files': [berr_grid_name,
cpol_grid_name],
'radar_names': ['Berrima', 'CPOL'],
'refl': 'DT', # Name of reflectivity field. Must be common between radars.
'vt': 'VT', # Name of velocity field. Must be common between radars.
'bgfile': sounding_file_name, # Name of sounding file
'writeout': localfile.name, # Name of output grid file
'frprmn_out': frprmn_out_name,
'min_cba': 30.0, # Minimum beam-crossing angle
'calc_params': calc_file_name, # .dda file for parameters
# related to minimization routine
'anel': 1, # 0 = Boussinesq approximation 1 = anelastic
'laplace': 0, # 0 = 1st order derivatives for smoothing, 1 = second
'read_dataweights': 2, # 0 = calculate data constraint weights/output,
# 1 = read from file, 2 = weigh all equally
'max_dist': 10.0, # How much distance analysis and observational
# grid must match in m
'cutoff': 0.0, # Deny observations below this level from analysis (m)
'UT': ut, # U of prescribed storm motion vector
'VT': vt, # V of prescribed storm motion vector
'output_error': 0, # 1 = output verification stats after each iteration
'weak_height': -1, # Sounding height constraint weakened in regions
# > 10 dBZ below this height (-1 = disabled)
'upper_bc': 1, # 1 = w = 0 as upper boundary | |
# FileName:AWGboard.py
# Author:
# E-mail:
# All right reserved.
# Modified: 2019.2.18
# Description:The class of AWG
import os
import socket
import time
import numpy as np
from . import AWGBoardDefines
# import AWGBoardDefines
import struct
from itertools import repeat
awg_para = {
'000423' : [[1.030,1.025,1.018,1.010], [79, 467, 154, 327]],
'000106' : [[1.045,1.045,1.049,1.038], [ 400, 315, 510, 435]],
'C18EFFFE1':[[1.162,1.16,1.144,1.138], [193,259,411,279]],
'C18EFFFE2':[[1.118,1.116,1.146,1.146], [46,106,14,-135]],
'C18EFFFE3':[[1.162,1.166,1.13,1.138], [493,352,396,422]],
'C18EFFFE4':[[1.316,1.252,1.262,1.248], [-576,215,-612,146]],
'C18EFFFCD':[[0.93,0.93,0.91,0.91], [226,315,260,123]],
'C18EFFFDA':[[0.93,0.92,0.92,0.91], [330,220,191,213]],
'C18EFFFDC':[[0.94,0.94,0.92,0.9], [160,312,162,116]],
'C18EFFFDD':[[0.91,0.92,0.9,0.92], [186,164,78,134]],
}
class RAWBoard(object):
"""
AWG 板对象
实现与AWG硬件的连接,提供基础访问接口
4个基础函数::
- ``init`` 完成AWG板对象的初始化
- ``connect`` 完成AWG板对象的连接,在连接成功后会给出AWG板对象的版本标识
- ``disconnect`` 断开与AWG板的连接
- ``receive_data`` 完成AWG发送的网络数据的接收
- ``send_data`` 完成向AWG发送网络数据
4个基础命令::
- ``Write_Reg`` 写寄存器,完成AWG对象各模块的参数配置写入
- ``Read_Reg`` 读寄存器,完成AWG对象各模块的参数配置读出
- ``Read_RAM`` 读存储区,完成AWG各通道数据存储区的读出
- ``Write_RAM`` 写存储区,完成AWG各通道数据存储区的写入
1个基础状态读取::
- ``Read_Status_Block`` 读取AWG状态包命令
"""
def __init__(self):
self.board_def = AWGBoardDefines.AWGBoardDefines()
self.port = 80
self.dev_id = 'AWG01'
self.dev_ip = '0.0.0.0'
# Create a TCP/IP socket
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sockfd.settimeout(5.0)
self.soft_version = 1.1
def connect(self, host, dev_id=None):
"""
:param dev_id: 可选的设备标识,如果没有给,会以IP的后两个数做设备标识
:param host: AWG 板的IP地址
:return:
:notes::
连接AWG板,连接成功,读取板上的版本标识
连接失败,返回错误,并打印错误消息,关闭socket
"""
self.dev_ip = host
try:
self.sockfd.connect((host, self.port))
print('连接成功')
# 读取硬件ID
self.dev_id = self.Read_Reg(8, 0, 0)
print(f'AWG IP: {self.dev_ip}, ID: {self.dev_id}')
return 1
except socket.error as msg:
self.sockfd.close()
self.sockfd = None
print(f'ERROR:{msg}')
if self.sockfd is None:
print(f'ERROR:{host}:Socket打开失败')
return -1
def disconnect(self):
"""
:return: None
:notes::
断开当前AWG对象的连接
"""
if self.sockfd is not None:
print('Closing socket')
self.sockfd.close()
def Write_Reg(self, bank, addr, data):
"""
:param bank: 寄存器对象所属的BANK(设备或模块),4字节
:param addr: 偏移地址,4字节
:param data: 待写入数据,4字节
:return: 4字节,写入是否成功,此处的成功是指TCP/IP协议的成功,也可以等效为数据写入成功
:notes::
这条命令下,AWG对象返回8字节数据,4字节表示状态,4字节表示数据
"""
cmd = self.board_def.CMD_WRITE_REG
packet = struct.pack(">LLLL", cmd, bank, addr, data)
try:
self.send_data(packet)
except socket.timeout:
print("Timeout raised and caught")
stat = 0
try:
stat, data, data2 = self.receive_data()
except socket.timeout:
print("Timeout raised and caught")
if stat != 0x0:
print('Issue with Write Command stat: {}'.format(stat))
return self.board_def.STAT_ERROR
return self.board_def.STAT_SUCCESS
def Read_Reg(self, bank, addr, data=0):
"""
:param bank: 寄存器对象所属的BANK(设备或模块),4字节
:param addr: 偏移地址,4字节
:param data: 待写入数据,4字节
:return: 4字节,读取是否成功,如果成功,返回读取的数据,否则,返回错误状态
"""
cmd = self.board_def.CMD_READ_REG
packet = struct.pack(">LLLL", cmd, bank, addr, data)
try:
self.send_data(packet)
except socket.timeout:
print("Timeout raised and caught")
recv_stat = 0
recv_data = 0
try:
recv_stat, recv_data, recv_data2 = self.receive_data()
except socket.timeout:
print("Timeout raised and caught")
if recv_stat != 0x0:
print('Issue with Reading Register stat={}!!!'.format(recv_stat))
return self.board_def.STAT_ERROR
if bank == 8:
return '{:X}'.format((recv_data2<<16)+(recv_data>>16))
return recv_data
def Read_RAM(self, bank, addr, length):
"""
:param bank: 数据对象所属的BANK(设备或模块),4字节
:param addr: 读取存储区的起始地址
:param length: 读取存储区的数据长度
:return: 读取成功的数据或读取失败的错误状态
:notes::
"""
cmd = self.board_def.CMD_READ_MEM
packet = struct.pack(">LLLL", cmd, bank, addr, length)
self.send_data(packet)
# next read from the socket, read length has 20 byte head
recv_stat, ram_data = self.receive_RAM(int(length + 20))
if recv_stat != 0x0:
print('Issue with Reading RAM stat: {}'.format(recv_stat))
return self.board_def.STAT_ERROR
return ram_data, recv_stat
def Write_RAM(self, bank, start_addr, data, length):
"""
:param bank: 数据对象所属的BANK(设备或模块),4字节
:param start_addr: 写入存储区的起始地址
:param data: 写入存储区的数据,数据是byts类型的list
:param length: 写入存储区的数据长度
:return: 写入成功或失败的错误状态
"""
cmd = self.board_def.CMD_WRITE_MEM
packet = struct.pack(">LLLL", cmd, bank, start_addr, length)
packet = packet + data
self.send_data(packet)
recv_stat, recv_data, recv_data2 = self.receive_data()
if recv_stat != 0x0:
print('Ram Write cmd Error stat={}!!!'.format(recv_stat))
return self.board_def.STAT_ERROR
def send_data(self, data):
"""
:param data: 待发送数据的字节流
:return: 命令发送状态(已发送字节数)
"""
totalsent = 0
while totalsent < len(data):
sent = self.sockfd.send(data)
if sent == 0:
raise RuntimeError("Socket connection broken")
totalsent = totalsent + sent
return totalsent
def receive_data(self):
"""
:return: 20字节数据,网络接口接收到的数据,仅限4条基础指令的响应数据,5个4字节,20字节长
:notes::
从网络接口接收数据,接收到的是发送方发送的数据加上读请求返回的寄存器数据
+-------------+----------------------+----------------+------------+----------+
| 写寄存器标识| 模块标识 | 寄存器偏移地址 | 写寄存器值 | 返回值 |
+-------------+----------------------+----------------+------------+----------+
| 0xAAAAAAAA | 32位(见模块标识定义) | 32位 | 32位 | 0表示正常|
+-------------+----------------------+----------------+------------+----------+
| 读寄存器标识| 模块标识 | 寄存器偏移地址 | 写寄存器值 | 返回值 |
+-------------+----------------------+----------------+------------+----------+
| 0x55555555 | 32位(见模块标识定义) | 32位 | 32位 | 0表示正常|
+-------------+----------------------+----------------+------------+----------+
| 写RAM标识 | 模块标识 | 寄存器偏移地址 | 写寄存器值 | 返回值 |
+-------------+----------------------+----------------+------------+----------+
| 0x55AAAA55 | 32位(见模块标识定义) | 32位 | 32位 | 0表示正常|
+-------------+----------------------+----------------+------------+----------+
"""
chunks = b''
bytes_recd = 0
while bytes_recd < 20:
tmp = self.sockfd.recv(min(20 - bytes_recd, 20))
if tmp == '':
raise RuntimeError("Socket connection broken")
chunks += tmp
bytes_recd = bytes_recd + len(tmp)
stat_tuple = struct.unpack('>LLLLL', chunks)
stat = stat_tuple[-1]
data = stat_tuple[-2]
data2 = stat_tuple[-3]
return stat, data, data2
def receive_RAM(self, length):
"""
:param length: 待读取的字节数
:return: length字节数据,网络接口接收到的数据,仅限读取RAM和status包使用
:notes::
从网络接口接收数据,长度以字节为单位
该命令配合``Read_RAM``或``Read_Status_RAM``指令实现大批量数据的读取
+-------------+----------------------+----------------+------------+----------+
| 读RAM标识 | 模块标识 | 寄存器偏移地址 | 读回数据 | 返回值 |
+-------------+----------------------+----------------+------------+----------+
| 0xAA5555AA | 32位(见模块标识定义) | 32位 | 最大1MB | 0表示正常|
+-------------+----------------------+----------------+------------+----------+
"""
ram_data = b''
bytes_recd = 0
self.sockfd.settimeout(5)
while bytes_recd < length:
chunk = self.sockfd.recv(min(length - bytes_recd, length))
ram_data += chunk
if chunk == '':
raise RuntimeError("Socket connection broken")
bytes_recd = bytes_recd + len(chunk)
return ram_data[:-4], ram_data[-4:-1]
class AWGBoard(RAWBoard):
def __init__(self):
super().__init__()
# Initialize core AWG parameters
self.zeros = list(repeat(0, 1024))
self.channel_count = 4
self.frequency = 2e9
self.isTrigSource = 0
self.awgTrigDelayOffset = 0
# 电压预留系数, 0.1倍的预留空间,防止用户设置过大的偏置时,波形输出出现截止的情况
self.coe = [1.1] * 4
# DAC增益系数,用于多通道之间的一致性输出校准
self.channel_gain = [1] * self.channel_count
# 放大器增益系数,用于多通道之间的一致性输出校准
self.diffampgain = [1] * self.channel_count
# 该参数是出厂时的校准值,使AWG在32768码值时整体AWG输出电压为0V(DAC输出为满伏电压的一半)
self._calibrated_offset = [0] * self.channel_count
# 该参数是用户设置的偏置电压,该值使AWG输出为用户指定的电压值
self.offset_volt = [0] * self.channel_count
# 默认0电平码值
self.defaultcode = [32768] * self.channel_count
self.amp_gain = [32767] * self.channel_count
# 每个通道对应的指令集执行的次数
self.loopcnt = [60000] * self.channel_count
# 每个通道是否在上一条指令执行后保持输出,默认不保持
self.holdoutput = [0] * self.channel_count
# wave length is the maximum data samples for each channel
self.max_sample_cnt = 200000
# 电压范围是负载以50欧匹配时看到的最大电压值
self.voltrange = [1.0] * self.channel_count
# 命令集
self.commands = [None] * self.channel_count
# 波形集
self.waves = [None] * self.channel_count
self.bank_dic = {'awg': [self.board_def.CHIP_AWG_1,
self.board_def.CHIP_AWG_1,
self.board_def.CHIP_AWG_2,
self.board_def.CHIP_AWG_2],
'dac': [self.board_def.CHIP_9136_1,
self.board_def.CHIP_9136_1,
self.board_def.CHIP_9136_2,
self.board_def.CHIP_9136_2]
}
# 以下是常用AWG方法
def setAWG_ID(self, dev_id):
"""
设置AWG的ID标识
:return: None
"""
self.dev_id = dev_id
# 以下是常用AWG方法
def set_channel_gain(self, channel, gain=1.0):
"""
设置AWG通道的增益
:param channel: AWG 通道 [1,2,3,4]
:param gain:增益系数,取值要小于等于1
:return: None
"""
assert gain <= 1.0
self.channel_gain[channel - 1] = gain
def _commit_para(self, channel):
"""
提交AWG的配置参数
:param self:
:param channel: AWG 通道 [1,2,3,4]
:return: None,网络通信失败表示命令设置失败
"""
self._channel_check(channel)
bank = self.bank_dic['awg'][channel - 1]
sub_ch = (((channel - 1) & 0x01) << 3)
reg_low = self.holdoutput[channel - 1] | (self.amp_gain[channel - 1] << 16)
reg_high = self.loopcnt[channel - 1]
addr = self.board_def.REG_CNFG_REG0 + sub_ch
self.Write_Reg(bank, addr, reg_low)
addr = addr + 4
self.Write_Reg(bank, addr, reg_high)
def SetLoop(self, channel, loopcnt):
"""
:param self: AWG对象
:param loopcnt: 通道ch波形输出循环次数,两字节
:param channel: AWG 通道值(1,2,3,4)
:return: None,网络通信失败表示命令设置失败
"""
self._channel_check(channel)
self.loopcnt[channel - 1] = loopcnt
self._commit_para(channel)
def SetAmpGain(self, channel, amp_gain):
"""
:param self: AWG对象
:param amp_gain: 通道ch波形输出增益系数,范围[-1,1]
:param channel: AWG 通道值(1,2,3,4)
:return: None,网络通信失败表示命令设置失败
"""
assert -1 <= amp_gain <= 1
self._channel_check(channel)
_b = struct.pack('<h', int(amp_gain*32767))
_hex = struct.unpack('<H', _b)[0]
self.amp_gain[channel - 1] = _hex
self._commit_para(channel)
def Start(self, channel):
"""
:param self: AWG对象
:param channel: 通道输出使能[1,2,3,4]
:return: None,网络通信失败表示命令设置失败
:notes::
"""
assert channel in [1, 2, 3, 4]
self._channel_check(channel)
_channel = 1 << ((channel - 1) & 0x01)
_bank = self.bank_dic['awg'][channel - 1]
self.Write_Reg(_bank, self.board_def.REG_CTRL_REG, _channel)
def Stop(self, channel):
"""
:param self: AWG对象
:param channel: 通道输出禁止[1,2,3,4]
:return: None,网络通信失败表示命令设置失败
:notes::
"""
assert channel in [1, 2, 3, 4]
self._channel_check(channel)
_channel = 16 << ((channel - 1) & 0x01)
_bank = self.bank_dic['awg'][channel - 1]
self.Write_Reg(_bank, self.board_def.REG_CTRL_REG, _channel)
@staticmethod
def _channel_check(channel):
"""
:param channel: channel to be checked
:return:
"""
assert channel in [1, 2, 3, 4]
def _SetDACMaxVolt(self, channel, volt):
"""
该函数用于设置芯片的最大输出电压
:param channel: AWG 通道(1,2,3,4)
:param volt: 最大电压值
:return:
:notes::
满电流值计算公式:
IOUTFS = 20.48 + (DACFSC_x × 13.1 mA)/2^(10 ? 1)
"""
assert volt <= 1.351
assert volt >= 0.696
cur = volt / 0.05
code = (cur - 20.48) * 1024 / 13.1
code = int(code) & 0x3FF
self._SetDACFullScale(channel, code)
def _SetDACFullScale(self, channel, data):
"""
该函数根据输入的码值写入芯片
:param self: AWG对象
:param channel: AWG 通道 [1,2,3,4]
:param data: 增益值,该值是12位二进制补码形式的增益数据
:return:
"""
self._channel_check(channel)
_bank = self.bank_dic['dac'][channel - 1]
addr = 0x40 + 4 * ((channel-1)&0x01)
self.Write_Reg(_bank, addr+1, data & 0xFF) # 写入满电流值低字节
self.Write_Reg(_bank, addr, (data >> 8) & 0x03) # 写入满电流值高字节
def SetOutputHold(self, channel, is_hold):
"""
:param self: AWG对象
:param channel: 通道值(1,2,3,4)
:param is_hold: 对应通道的输出是否需要保持, 1,保持,0,不保持
:return: None,网络通信失败表示命令设置失败
:notes::
保持的时候波形输出会保持为最后的8个采样点
的值不断的重复输出(此处要注意这个特性)
该特性可以实现很少的码值输出很长的台阶波形
在不保持时,波形输出完成后会回到设定的默认电压值
"""
self._channel_check(channel)
self.holdoutput[channel - 1] = is_hold
self._commit_para(channel)
def SetOffsetVolt(self, channel, offset_volt):
"""
设置某个通道的偏置电压,该功能对当前通道的波形做偏置设置
由于是通过DA码值来实现的,因此会消耗DA的有效码值范围
:param self: AWG对象
:param channel: 通道值(1,2,3,4)
:param offset_volt: 对应的偏置电压值
:return: None,网络通信失败表示命令设置失败
"""
self._channel_check(channel)
assert abs(offset_volt) < 0.1 # 电压偏移不能超过 ±0.1V
volt = offset_volt
if abs(volt) > self.voltrange[channel - 1]:
print(f'偏移电压设置值{volt}超过AWG范围[{-self.voltrange[channel-1]}-{self.voltrange[channel-1]}], 电压值将会设为0')
volt = 0
code = int(volt * 65535 / self.coe[channel - 1] / (2 * self.voltrange[channel - 1]))
self._SetDacOffset(channel, code)
self.offset_volt[channel - 1] = offset_volt
def _SetCalibratedOffsetCode(self, channel, offset_code):
"""
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 09:10:58 2020
Module for visualising Metrica tracking and event data
Data can be found at: https://github.com/metrica-sports/sample-data
@author: <NAME> (@EightyFivePoint)
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
import Metrica_PitchControl as mpc
def plot_pitch( field_dimen = (106.0,68.0), field_color ='green', linewidth=2, markersize=20):
""" plot_pitch
Plots a soccer pitch. All distance units converted to meters.
Parameters
-----------
field_dimen: (length, width) of field in meters. Default is (106,68)
field_color: color of field. options are {'green','white'}
linewidth : width of lines. default = 2
markersize : size of markers (e.g. penalty spot, centre spot, posts). default = 20
Returrns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
fig,ax = plt.subplots(figsize=(12,8)) # create a figure
# decide what color we want the field to be. Default is green, but can also choose white
if field_color=='green':
ax.set_facecolor('mediumseagreen')
lc = 'whitesmoke' # line color
pc = 'w' # 'spot' colors
elif field_color=='white':
lc = 'k'
pc = 'k'
# ALL DIMENSIONS IN m
border_dimen = (3,3) # include a border arround of the field of width 3m
meters_per_yard = 0.9144 # unit conversion from yards to meters
half_pitch_length = field_dimen[0]/2. # length of half pitch
half_pitch_width = field_dimen[1]/2. # width of half pitch
signs = [-1,1]
# Soccer field dimensions typically defined in yards, so we need to convert to meters
goal_line_width = 8*meters_per_yard
box_width = 20*meters_per_yard
box_length = 6*meters_per_yard
area_width = 44*meters_per_yard
area_length = 18*meters_per_yard
penalty_spot = 12*meters_per_yard
corner_radius = 1*meters_per_yard
D_length = 8*meters_per_yard
D_radius = 10*meters_per_yard
D_pos = 12*meters_per_yard
centre_circle_radius = 10*meters_per_yard
# plot half way line # center circle
ax.plot([0,0],[-half_pitch_width,half_pitch_width],lc,linewidth=linewidth)
ax.scatter(0.0,0.0,marker='o',facecolor=lc,linewidth=0,s=markersize)
y = np.linspace(-1,1,50)*centre_circle_radius
x = np.sqrt(centre_circle_radius**2-y**2)
ax.plot(x,y,lc,linewidth=linewidth)
ax.plot(-x,y,lc,linewidth=linewidth)
for s in signs: # plots each line seperately
# plot pitch boundary
ax.plot([-half_pitch_length,half_pitch_length],[s*half_pitch_width,s*half_pitch_width],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length,s*half_pitch_length],[-half_pitch_width,half_pitch_width],lc,linewidth=linewidth)
# goal posts & line
ax.plot( [s*half_pitch_length,s*half_pitch_length],[-goal_line_width/2.,goal_line_width/2.],pc+'s',markersize=6*markersize/20.,linewidth=linewidth)
# 6 yard box
ax.plot([s*half_pitch_length,s*half_pitch_length-s*box_length],[box_width/2.,box_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length,s*half_pitch_length-s*box_length],[-box_width/2.,-box_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length-s*box_length,s*half_pitch_length-s*box_length],[-box_width/2.,box_width/2.],lc,linewidth=linewidth)
# penalty area
ax.plot([s*half_pitch_length,s*half_pitch_length-s*area_length],[area_width/2.,area_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length,s*half_pitch_length-s*area_length],[-area_width/2.,-area_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length-s*area_length,s*half_pitch_length-s*area_length],[-area_width/2.,area_width/2.],lc,linewidth=linewidth)
# penalty spot
ax.scatter(s*half_pitch_length-s*penalty_spot,0.0,marker='o',facecolor=lc,linewidth=0,s=markersize)
# corner flags
y = np.linspace(0,1,50)*corner_radius
x = np.sqrt(corner_radius**2-y**2)
ax.plot(s*half_pitch_length-s*x,-half_pitch_width+y,lc,linewidth=linewidth)
ax.plot(s*half_pitch_length-s*x,half_pitch_width-y,lc,linewidth=linewidth)
# draw the D
y = np.linspace(-1,1,50)*D_length # D_length is the chord of the circle that defines the D
x = np.sqrt(D_radius**2-y**2)+D_pos
ax.plot(s*half_pitch_length-s*x,y,lc,linewidth=linewidth)
# remove axis labels and ticks
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# set axis limits
xmax = field_dimen[0]/2. + border_dimen[0]
ymax = field_dimen[1]/2. + border_dimen[1]
ax.set_xlim([-xmax,xmax])
ax.set_ylim([-ymax,ymax])
ax.set_axisbelow(True)
return fig,ax
def plot_frame( hometeam, awayteam, figax=None, team_colors=('r','b'), field_dimen = (106.0,68.0), include_player_velocities=False, PlayerMarkerSize=10, PlayerAlpha=0.7, annotate=False ):
""" plot_frame( hometeam, awayteam )
Plots a frame of Metrica tracking data (player positions and the ball) on a football pitch. All distances should be in meters.
Parameters
-----------
hometeam: row (i.e. instant) of the home team tracking data frame
awayteam: row of the away team tracking data frame
fig,ax: Can be used to pass in the (fig,ax) objects of a previously generated pitch. Set to (fig,ax) to use an existing figure, or None (the default) to generate a new pitch plot,
team_colors: Tuple containing the team colors of the home & away team. Default is 'r' (red, home team) and 'b' (blue away team)
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
include_player_velocities: Boolean variable that determines whether player velocities are also plotted (as quivers). Default is False
PlayerMarkerSize: size of the individual player marlers. Default is 10
PlayerAlpha: alpha (transparency) of player markers. Defaault is 0.7
annotate: Boolean variable that determines with player jersey numbers are added to the plot (default is False)
Returrns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
if figax is None: # create new pitch
fig,ax = plot_pitch( field_dimen = field_dimen )
else: # overlay on a previously generated pitch
fig,ax = figax # unpack tuple
# plot home & away teams in order
for team,color in zip( [hometeam,awayteam], team_colors) :
x_columns = [c for c in team.keys() if c[-2:].lower()=='_x' and c!='ball_x'] # column header for player x positions
y_columns = [c for c in team.keys() if c[-2:].lower()=='_y' and c!='ball_y'] # column header for player y positions
ax.plot( team[x_columns], team[y_columns], color+'o', MarkerSize=PlayerMarkerSize, alpha=PlayerAlpha ) # plot player positions
if include_player_velocities:
vx_columns = ['{}_vx'.format(c[:-2]) for c in x_columns] # column header for player x positions
vy_columns = ['{}_vy'.format(c[:-2]) for c in y_columns] # column header for player y positions
ax.quiver( team[x_columns], team[y_columns], team[vx_columns], team[vy_columns], color=color, scale_units='inches', scale=10.,width=0.0015,headlength=5,headwidth=3,alpha=PlayerAlpha)
if annotate:
[ ax.text( team[x]+0.5, team[y]+0.5, x.split('_')[1], fontsize=10, color=color ) for x,y in zip(x_columns,y_columns) if not ( np.isnan(team[x]) or np.isnan(team[y]) ) ]
# plot ball
ax.plot( hometeam['ball_x'], hometeam['ball_y'], 'ko', MarkerSize=6, alpha=1.0, LineWidth=0)
return fig,ax
def save_match_clip(hometeam,awayteam, fpath, fname='clip_test', figax=None, frames_per_second=25, team_colors=('r','b'), field_dimen = (106.0,68.0), include_player_velocities=False, PlayerMarkerSize=10, PlayerAlpha=0.7):
""" save_match_clip( hometeam, awayteam, fpath )
Generates a movie from Metrica tracking data, saving it in the 'fpath' directory with name 'fname'
Parameters
-----------
hometeam: home team tracking data DataFrame. Movie will be created from all rows in the DataFrame
awayteam: away team tracking data DataFrame. The indices *must* match those of the hometeam DataFrame
fpath: directory to save the movie
fname: movie filename. Default is 'clip_test.mp4'
fig,ax: Can be used to pass in the (fig,ax) objects of a previously generated pitch. Set to (fig,ax) to use an existing figure, or None (the default) to generate a new pitch plot,
frames_per_second: frames per second to assume when generating the movie. Default is 25.
team_colors: Tuple containing the team colors of the home & away team. Default is 'r' (red, home team) and 'b' (blue away team)
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
include_player_velocities: Boolean variable that determines whether player velocities are also plotted (as quivers). Default is False
PlayerMarkerSize: size of the individual player marlers. Default is 10
PlayerAlpha: alpha (transparency) of player markers. Defaault is 0.7
Returrns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
# check that indices match first
assert np.all( hometeam.index==awayteam.index ), "Home and away team Dataframe indices must be the same"
# in which case use home team index
index = hometeam.index
# Set figure and movie settings
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='Tracking Data', artist='Matplotlib', comment='Metrica tracking data clip')
writer = FFMpegWriter(fps=frames_per_second, metadata=metadata)
fname = fpath + '/' + fname + '.mp4' # path and filename
# create football pitch
if figax is None:
fig,ax = plot_pitch(field_dimen=field_dimen)
else:
fig,ax = figax
fig.set_tight_layout(True)
# Generate movie
print("Generating movie...",end='')
with writer.saving(fig, fname, 100):
for i in index:
figobjs = [] # this is used to collect up all the axis objects so that they can be deleted after each iteration
for team,color in zip( [hometeam.loc[i],awayteam.loc[i]], team_colors) :
x_columns = [c for c in team.keys() if c[-2:].lower()=='_x' and c!='ball_x'] # column header for player x positions
y_columns = [c for c in team.keys() if c[-2:].lower()=='_y' and c!='ball_y'] # column header for player y positions
objs, = ax.plot( team[x_columns], team[y_columns], color+'o', MarkerSize=PlayerMarkerSize, alpha=PlayerAlpha ) # plot player positions
figobjs.append(objs)
if include_player_velocities:
vx_columns = ['{}_vx'.format(c[:-2]) for c in x_columns] # column header for player x positions
vy_columns = ['{}_vy'.format(c[:-2]) for c in y_columns] # column header for player y positions
objs = ax.quiver( team[x_columns], team[y_columns], team[vx_columns], team[vy_columns], color=color, scale_units='inches', scale=10.,width=0.0015,headlength=5,headwidth=3,alpha=PlayerAlpha)
figobjs.append(objs)
# plot ball
objs, = ax.plot( team['ball_x'], team['ball_y'], 'ko', MarkerSize=6, alpha=1.0, LineWidth=0)
figobjs.append(objs)
# include match time at the top
frame_minute = int( team['Time [s]']/60. )
frame_second = ( team['Time [s]']/60. - frame_minute ) * 60.
timestring = "%d:%1.2f" % ( frame_minute, frame_second )
objs = ax.text(-2.5,field_dimen[1]/2.+1., timestring, fontsize=14 )
figobjs.append(objs)
writer.grab_frame()
# Delete all axis objects (other than pitch lines) in preperation for next frame
for figobj in figobjs:
figobj.remove()
print("done")
plt.clf()
plt.close(fig)
def plot_events( events, figax=None, field_dimen = (106.0,68), indicators | |
from smartvadhis2.cli import remove_keys, print_error_categories
def test_remove_keys():
metadata = {
"programStages": [
{
"lastUpdated": "2018-04-19T13:38:20.185",
"id": "pQ8gaWKD3pi",
"created": "2018-03-07T19:31:51.060",
"name": "<NAME>",
"executionDateLabel": "Date of death",
"allowGenerateNextVisit": False,
"validCompleteOnly": False,
"preGenerateUID": False,
"openAfterEnrollment": False,
"repeatable": False,
"captureCoordinates": False,
"remindCompleted": False,
"displayGenerateEventBox": False,
"generatedByEnrollmentDate": False,
"autoGenerateEvent": True,
"sortOrder": 1,
"hideDueDate": False,
"blockEntryForm": False,
"minDaysFromStart": 0,
"program": {
"id": "HPrJOsYuM1K"
},
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"notificationTemplates": [],
"programStageDataElements": [
{
"lastUpdated": "2018-04-19T13:38:20.042",
"id": "kI1px9KVtbz",
"created": "2018-03-07T19:31:51.060",
"displayInReports": False,
"externalAccess": False,
"renderOptionsAsRadio": False,
"allowFutureDate": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 0,
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "C2OT4YktNGX"
},
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": []
},
{
"lastUpdated": "2018-04-19T13:38:20.049",
"id": "mxZzEsvsdcF",
"created": "2018-03-07T19:31:51.061",
"displayInReports": False,
"externalAccess": False,
"renderOptionsAsRadio": False,
"allowFutureDate": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 1,
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "t6O2A1gou0g"
},
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": []
}
],
"translations": [],
"attributeValues": [],
"programStageSections": []
}
],
"categoryOptionCombos": [],
"programs": [
{
"lastUpdated": "2018-04-19T13:20:49.440",
"id": "HPrJOsYuM1K",
"created": "2018-03-07T19:31:51.129",
"name": "<NAME>",
"shortName": "VA",
"publicAccess": "rw------",
"completeEventsExpiryDays": 0,
"description": "Data integration program from smartvadhis2",
"ignoreOverdueEvents": False,
"skipOffline": False,
"captureCoordinates": False,
"displayFrontPageList": False,
"onlyEnrollOnce": False,
"programType": "WITHOUT_REGISTRATION",
"version": 4,
"selectIncidentDatesInFuture": False,
"displayIncidentDate": False,
"selectEnrollmentDatesInFuture": False,
"expiryDays": 0,
"useFirstStageDuringRegistration": False,
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"user": {
"id": "rJYzV7B0Md5"
},
"programTrackedEntityAttributes": [],
"notificationTemplates": [],
"translations": [],
"organisationUnits": [],
"userGroupAccesses": [],
"attributeValues": [],
"validationCriterias": [],
"programStages": [
{
"id": "pQ8gaWKD3pi"
}
],
"userAccesses": []
}
],
"optionSets": [
{
"code": "VA-CoD",
"created": "2018-04-19T13:07:32.552",
"lastUpdated": "2018-04-19T13:07:32.724",
"name": "VA- Cause of Death",
"id": "tMkHCKkuxpD",
"publicAccess": "rw------",
"version": 0,
"valueType": "TEXT",
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"user": {
"id": "rJYzV7B0Md5"
},
"userGroupAccesses": [],
"attributeValues": [],
"translations": [],
"userAccesses": [],
"options": [
{
"id": "cjrgMGLMwbu"
}
]
},
{
"code": "VA-Sex",
"created": "2018-04-19T06:55:22.425",
"lastUpdated": "2018-04-19T06:55:22.442",
"name": "VA- Sex",
"id": "fXYBfOoLXgh",
"publicAccess": "rw------",
"version": 0,
"valueType": "TEXT",
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"user": {
"id": "rJYzV7B0Md5"
},
"userGroupAccesses": [],
"attributeValues": [],
"translations": [],
"userAccesses": [],
"options": [
{
"id": "xwtfJn76te7"
},
{
"id": "QgHSjOft8v4"
},
{
"id": "LbjOq1YRCW2"
},
{
"id": "K7wBJlg8eRQ"
},
{
"id": "k8sIQ2bEaYT"
}
]
}
],
"categories": [],
"programStageDataElements": [
{
"created": "2018-04-19T12:05:57.227",
"lastUpdated": "2018-04-19T13:38:20.167",
"id": "hGHimNEVl0B",
"displayInReports": False,
"renderOptionsAsRadio": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 15,
"allowFutureDate": False,
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "NsmhGfGhFRO"
}
},
{
"created": "2018-03-07T19:31:51.062",
"lastUpdated": "2018-04-19T13:38:20.131",
"id": "Z06nEMh81QI",
"displayInReports": False,
"renderOptionsAsRadio": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 10,
"allowFutureDate": False,
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "NM9CFZmYq9S"
}
}
],
"dataElements": [
{
"code": "gen_5_0b",
"lastUpdated": "2018-04-19T13:42:10.250",
"id": "VEGzj76HCEN",
"created": "2018-04-19T13:42:10.250",
"name": "VA- Second Surname",
"shortName": "Surname 2",
"aggregationType": "NONE",
"domainType": "TRACKER",
"publicAccess": "rw------",
"description": "Second surname",
"valueType": "TEXT",
"formName": "Second Surname",
"zeroIsSignificant": False,
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"user": {
"id": "rJYzV7B0Md5"
},
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": [],
"legendSets": [],
"aggregationLevels": []
},
{
"lastUpdated": "2018-04-19T13:42:10.256",
"id": "lFKqfDj9Rhk",
"created": "2018-04-19T13:42:10.256",
"name": "VA- Age category",
"shortName": "Age category",
"aggregationType": "COUNT",
"domainType": "TRACKER",
"publicAccess": "rw------",
"description": "Age category",
"valueType": "INTEGER_POSITIVE",
"formName": "Age category",
"zeroIsSignificant": False,
"optionSet": {
"id": "AXlVIlSE9zB"
},
"lastUpdatedBy": {
"id": "rJYzV7B0Md5"
},
"user": {
"id": "rJYzV7B0Md5"
},
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": [],
"legendSets": [],
"aggregationLevels": []
}
],
"options": [
{
"lastUpdated": "2018-04-19T13:07:32.584",
"code": "107",
"created": "2018-04-19T13:07:32.551",
"name": "Adult - Colorectal Cancer (C18)",
"id": "U3gJ2b6mW1k",
"sortOrder": 6,
"optionSet": {
"id": "tMkHCKkuxpD"
},
"translations": [],
"attributeValues": []
},
{
"lastUpdated": "2018-04-19T13:07:32.584",
"code": "135",
"created": "2018-04-19T13:07:32.552",
"name": "Adult - Chronic Respiratory (J44)",
"id": "UgfoWoGUBO5",
"sortOrder": 32,
"optionSet": {
"id": "tMkHCKkuxpD"
},
"translations": [],
"attributeValues": []
}
]
}
expected = {
"programStages": [
{
"lastUpdated": "2018-04-19T13:38:20.185",
"id": "pQ8gaWKD3pi",
"created": "2018-03-07T19:31:51.060",
"name": "<NAME>",
"executionDateLabel": "Date of death",
"allowGenerateNextVisit": False,
"validCompleteOnly": False,
"preGenerateUID": False,
"openAfterEnrollment": False,
"repeatable": False,
"captureCoordinates": False,
"remindCompleted": False,
"displayGenerateEventBox": False,
"generatedByEnrollmentDate": False,
"autoGenerateEvent": True,
"sortOrder": 1,
"hideDueDate": False,
"blockEntryForm": False,
"minDaysFromStart": 0,
"program": {
"id": "HPrJOsYuM1K"
},
"notificationTemplates": [],
"programStageDataElements": [
{
"lastUpdated": "2018-04-19T13:38:20.042",
"id": "kI1px9KVtbz",
"created": "2018-03-07T19:31:51.060",
"displayInReports": False,
"externalAccess": False,
"renderOptionsAsRadio": False,
"allowFutureDate": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 0,
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "C2OT4YktNGX"
},
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": []
},
{
"lastUpdated": "2018-04-19T13:38:20.049",
"id": "mxZzEsvsdcF",
"created": "2018-03-07T19:31:51.061",
"displayInReports": False,
"externalAccess": False,
"renderOptionsAsRadio": False,
"allowFutureDate": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 1,
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "t6O2A1gou0g"
},
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": []
}
],
"translations": [],
"attributeValues": [],
"programStageSections": []
}
],
"categoryOptionCombos": [],
"programs": [
{
"lastUpdated": "2018-04-19T13:20:49.440",
"id": "HPrJOsYuM1K",
"created": "2018-03-07T19:31:51.129",
"name": "<NAME>",
"shortName": "VA",
"publicAccess": "rw------",
"completeEventsExpiryDays": 0,
"description": "Data integration program from smartvadhis2",
"ignoreOverdueEvents": False,
"skipOffline": False,
"captureCoordinates": False,
"displayFrontPageList": False,
"onlyEnrollOnce": False,
"programType": "WITHOUT_REGISTRATION",
"version": 4,
"selectIncidentDatesInFuture": False,
"displayIncidentDate": False,
"selectEnrollmentDatesInFuture": False,
"expiryDays": 0,
"useFirstStageDuringRegistration": False,
"programTrackedEntityAttributes": [],
"notificationTemplates": [],
"translations": [],
"organisationUnits": [],
"userGroupAccesses": [],
"attributeValues": [],
"validationCriterias": [],
"programStages": [
{
"id": "pQ8gaWKD3pi"
}
],
"userAccesses": []
}
],
"optionSets": [
{
"code": "VA-CoD",
"created": "2018-04-19T13:07:32.552",
"lastUpdated": "2018-04-19T13:07:32.724",
"name": "VA- Cause of Death",
"id": "tMkHCKkuxpD",
"publicAccess": "rw------",
"version": 0,
"valueType": "TEXT",
"userGroupAccesses": [],
"attributeValues": [],
"translations": [],
"userAccesses": [],
"options": [
{
"id": "cjrgMGLMwbu"
}
]
},
{
"code": "VA-Sex",
"created": "2018-04-19T06:55:22.425",
"lastUpdated": "2018-04-19T06:55:22.442",
"name": "VA- Sex",
"id": "fXYBfOoLXgh",
"publicAccess": "rw------",
"version": 0,
"valueType": "TEXT",
"userGroupAccesses": [],
"attributeValues": [],
"translations": [],
"userAccesses": [],
"options": [
{
"id": "xwtfJn76te7"
},
{
"id": "QgHSjOft8v4"
},
{
"id": "LbjOq1YRCW2"
},
{
"id": "K7wBJlg8eRQ"
},
{
"id": "k8sIQ2bEaYT"
}
]
}
],
"categories": [],
"programStageDataElements": [
{
"created": "2018-04-19T12:05:57.227",
"lastUpdated": "2018-04-19T13:38:20.167",
"id": "hGHimNEVl0B",
"displayInReports": False,
"renderOptionsAsRadio": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 15,
"allowFutureDate": False,
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "NsmhGfGhFRO"
}
},
{
"created": "2018-03-07T19:31:51.062",
"lastUpdated": "2018-04-19T13:38:20.131",
"id": "Z06nEMh81QI",
"displayInReports": False,
"renderOptionsAsRadio": False,
"compulsory": False,
"allowProvidedElsewhere": False,
"sortOrder": 10,
"allowFutureDate": False,
"programStage": {
"id": "pQ8gaWKD3pi"
},
"dataElement": {
"id": "NM9CFZmYq9S"
}
}
],
"dataElements": [
{
"code": "gen_5_0b",
"lastUpdated": "2018-04-19T13:42:10.250",
"id": "VEGzj76HCEN",
"created": "2018-04-19T13:42:10.250",
"name": "VA- Second Surname",
"shortName": "Surname 2",
"aggregationType": "NONE",
"domainType": "TRACKER",
"publicAccess": "rw------",
"description": "Second surname",
"valueType": "TEXT",
"formName": "Second Surname",
"zeroIsSignificant": False,
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": [],
"legendSets": [],
"aggregationLevels": []
},
{
"lastUpdated": "2018-04-19T13:42:10.256",
"id": "lFKqfDj9Rhk",
"created": "2018-04-19T13:42:10.256",
"name": "VA- Age category",
"shortName": "Age category",
"aggregationType": "COUNT",
"domainType": "TRACKER",
"publicAccess": "rw------",
"description": "Age category",
"valueType": "INTEGER_POSITIVE",
"formName": "Age category",
"zeroIsSignificant": False,
"optionSet": {
"id": "AXlVIlSE9zB"
},
"translations": [],
"userGroupAccesses": [],
"attributeValues": [],
"userAccesses": [],
"legendSets": [],
"aggregationLevels": []
}
],
"options": [
{
"lastUpdated": "2018-04-19T13:07:32.584",
"code": "107",
"created": "2018-04-19T13:07:32.551",
"name": "Adult - Colorectal Cancer (C18)",
"id": "U3gJ2b6mW1k",
"sortOrder": 6,
"optionSet": {
"id": "tMkHCKkuxpD"
},
"translations": [],
"attributeValues": []
},
{
"lastUpdated": "2018-04-19T13:07:32.584",
"code": "135",
"created": "2018-04-19T13:07:32.552",
"name": "Adult - Chronic Respiratory (J44)",
"id": "UgfoWoGUBO5",
"sortOrder": 32,
"optionSet": {
"id": "tMkHCKkuxpD"
},
"translations": [],
"attributeValues": []
}
]
}
assert remove_keys(metadata, ['lastUpdatedBy', 'user']) == expected
def test_print_error_categories(capsys):
expected = "Validation Errors (600-699)\n" \
"- ID:600 - Could not parse [birth_date]\n" \
"- ID:601 - Could not parse [death_date]\n" \
"- ID:602 - Could not parse [age] as Integer\n" \
"- ID:603 - [age] is not between 0 and 120 years\n" \
"- ID:604 - [age] is missing\n" \
"- ID:605 - [icd10] does not match mapping\n" \
"- ID:606 - [icd10] missing\n" \
"- ID:607 - [sex] is not an Integer in (1, 2, 3, 8, 9)\n" \
"- ID:608 - [sex] is missing\n" \
"- ID:609 - [sid] does not match regex expression\n" \
"- ID:610 - [sid] is missing\n" \
"- ID:611 - orgunit is missing\n" \
"- ID:612 - orgunit UID is not a valid UID\n" \
"Import Errors (700-799)\n" \
"- ID:700 - OrgUnit is not a valid UID\n" \
"- ID:701 - | |
#!/usr/bin/env
"""
Read in two extracted light curves (interest band and reference band), split
into segments, compute the power spectra per band and cross spectrum of each
segment, averages cross spectrum of all the segments, and computes frequency
lags between the two bands.
Example call:
python simple_cross_spectra.py ./cygx1_i.lc ./cygx1_ref.lc -o "./cygx1"
Enter python simple_cross_spectra.py -h at the command line for help.
"""
from __future__ import print_function
from astropy.table import Table, Column
from astropy.io import fits
import numpy as np
from scipy import fftpack
import argparse
import subprocess
from datetime import datetime
__author__ = "<NAME> <A.L.Stevens at uva.nl>"
__year__ = "2016"
class Band(object):
def __init__(self, n_bins=8192, dt=0.0078125):
self.power = np.zeros(n_bins, dtype=np.float64)
self.mean_rate = 0.0
self.rms = 0.0
self.freq = fftpack.fftfreq(n_bins, d=dt)
################################################################################
def type_power_of_two(num):
"""
Check if an input is a power of 2 (1 <= num < 2147483648), as an argparse
type.
Parameters
----------
num : int
The number in question.
Returns
-------
n : int
The number in question, if it's a power of two
Raises
------
ArgumentTypeError if n isn't a power of two.
"""
n = int(num)
x = 2
assert n > 0
if n == 1:
return n
else:
while x <= n and x < 2147483648:
if n == x:
return n
x *= 2
message = "%d is not a power of two." % n
raise argparse.ArgumentTypeError(message)
################################################################################
def get_key_val(fits_file, ext, keyword):
"""
Get the value of a keyword from a FITS header. Keyword does not seem to be
case-sensitive.
Parameters
----------
fits_file : str
File name of the FITS file.
ext : int
The FITS extension in which to search for the given keyword.
keyword : str
The keyword for which you want the associated value.
Returns
-------
any type
Value of the given keyword.
Raises
------
IOError if the input file isn't actually a FITS file.
"""
ext = np.int8(ext)
assert (ext >= 0 and ext <= 2)
keyword = str(keyword)
try:
hdulist = fits.open(fits_file)
except IOError:
print("\tERROR: File does not exist: %s" % fits_file)
exit()
key_value = hdulist[ext].header[keyword]
hdulist.close()
return key_value
################################################################################
def raw_to_absrms(power, mean_rate, n_bins, dt, noisy=True):
"""
Normalize the power spectrum to absolute rms^2 normalization.
TODO: cite paper.
Parameters
----------
power : np.array of floats
The raw power at each Fourier frequency, as a 1-D or 2-D array.
Size = (n_bins) or (n_bins, detchans).
mean_rate : float
The mean count rate for the light curve, in cts/s.
n_bins : int
Number of bins per segment of light curve.
dt : float
Timestep between bins in n_bins, in seconds.
noisy : boolean
True if there is Poisson noise in the power spectrum (i.e., from real
data), False if there is no noise in the power spectrum (i.e.,
simulations without Poisson noise). Default is True.
Returns
-------
np.array of floats
The noise-subtracted power spectrum in absolute rms^2 units, in the
same size array as the input power.
"""
if noisy:
noise = 2.0 * mean_rate
else:
noise = 0.0
return power * (2.0 * dt / np.float(n_bins)) - noise
################################################################################
def var_and_rms(power, df):
"""
Computes the variance and rms (root mean square) of a power spectrum.
Assumes the negative-frequency powers have been removed. DOES NOT WORK ON
2-D POWER ARRAYS! Not sure why.
TODO: cite textbook or paper.
Parameters
----------
power : np.array of floats
1-D array (size = n_bins/2+1) of the raw power at each of the *positive*
Fourier frequencies.
df : float
The step size between Fourier frequencies.
Returns
-------
variance : float
The variance of the power spectrum.
rms : float
The rms of the power spectrum.
"""
variance = np.sum(power * df, axis=0)
rms = np.where(variance >= 0, np.sqrt(variance), np.nan)
return variance, rms
################################################################################
def cs_out(out_base, meta_dict, cs_avg, ci, ref):
"""
Saving header data, the cross spectrum, CoI power spectrum, and reference
band power spectrum to a FITS file to use in the program make_lags.py to get
cross-spectral lags. Cross spectra and power spectra are raw, as in un-
normalized.
Parameters
----------
out_base : str
The name the FITS file to write the cross spectrum and power spectra to,
for computing the lags.
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
cs_avg : np.array of complex numbers
2-D array of the averaged cross spectrum. Size = (n_bins, detchans).
ci : ccf_lc.Lightcurve object
The channel of interest light curve. Must already have freq, mean_rate,
and power assigned.
ref : ccf_lc.Lightcurve object
The reference band light curve. Must already have mean_rate, rms, and
power assigned.
Returns
-------
nothing, but writes to the file "*_cs.fits"
"""
out_file = out_base + "_cs.fits"
out_dir = out_file[0:out_file.rfind("/")+1]
if len(out_dir) >= 2:
subprocess.call(['mkdir', '-p', out_dir])
print("Output sent to: %s" % out_file)
out_table = Table()
out_table.add_column(Column(data=ci.freq, name='FREQUENCY', unit='Hz'))
out_table.add_column(Column(data=cs_avg, name='CROSS'))
out_table.add_column(Column(data=ci.power, name='POWER_CI'))
out_table.add_column(Column(data=ref.power, name='POWER_REF'))
out_table.meta['TYPE'] = "Cross spectrum and power spectra, saved for lags."
out_table.meta['DATE'] = str(datetime.now())
out_table.meta['EVTLIST'] = " "
out_table.meta['DT'] = meta_dict['dt']
out_table.meta['DF'] = meta_dict['df']
out_table.meta['N_BINS'] = meta_dict['n_bins']
out_table.meta['SEGMENTS'] = meta_dict['n_seg']
out_table.meta['SEC_SEG'] = meta_dict['n_seconds']
out_table.meta['EXPOSURE'] = meta_dict['exposure']
out_table.meta['DETCHANS'] = meta_dict['detchans']
out_table.meta['RATE_CI'] = ci.mean_rate
out_table.meta['RATE_REF'] = ref.mean_rate
out_table.meta['RMS_REF'] = float(ref.rms)
out_table.meta['NYQUIST'] = meta_dict['nyquist']
out_table.write(out_file, overwrite=True)
################################################################################
def make_cs(rate_ci, rate_ref, meta_dict):
"""
Generate the power spectra for each band and the cross spectrum for one
segment of the light curve.
Parameters
----------
rate_ci : np.array of floats
2-D array of the channel of interest light curve, Size = (n_bins).
rate_ref : np.array of floats
1-D array of the reference band lightcurve, Size = (n_bins).
meta_dict : dict
Dictionary of necessary meta-parameters for data analysis.
Returns
-------
cs_seg : np.array of complex numbers
2-D array of the cross spectrum of each channel of interest with the
reference band.
ci_seg : Band object
The channel of interest light curve.
ref_seg : Band object
The reference band light curve.
"""
assert np.shape(rate_ci) == (meta_dict['n_bins'], ),\
"ERROR: CoI light curve has wrong dimensions. Must have size (n_bins, "\
")."
assert np.shape(rate_ref) == (meta_dict['n_bins'], ), "ERROR: Reference "\
"light curve has wrong dimensions. Must have size (n_bins, )."
ci_seg = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
ref_seg = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
## Computing the mean count rate of the segment
ci_seg.mean_rate = np.mean(rate_ci)
ref_seg.mean_rate = np.mean(rate_ref)
## Subtracting the mean off each value of 'rate'
rate_sub_mean_ci = np.subtract(rate_ci, ci_seg.mean_rate)
rate_sub_mean_ref = np.subtract(rate_ref, ref_seg.mean_rate)
## Taking the FFT of the time-domain photon count rate
## SciPy is faster than NumPy or pyFFTW for my array sizes
fft_data_ci = fftpack.fft(rate_sub_mean_ci)
fft_data_ref = fftpack.fft(rate_sub_mean_ref)
## Computing the power from the fourier transform
ci_seg.power = np.absolute(fft_data_ci) ** 2
ref_seg.power = np.absolute(fft_data_ref) ** 2
## Computing the cross spectrum from the fourier transform
cs_seg = np.multiply(fft_data_ci, np.conj(fft_data_ref))
return cs_seg, ci_seg, ref_seg
################################################################################
def lc_in(interest_file, ref_file, meta_dict):
n_seg = 0
interest_band = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
ref_band = Band(n_bins=meta_dict['n_bins'], dt=meta_dict['dt'])
cross_spec = np.zeros(meta_dict['n_bins'], dtype=np.complex128)
## Open the light curve files and load the data as astropy tables
try:
interest_table = Table.read(interest_file)
except IOError:
print("\tERROR: File does not exist: %s" % interest_file)
exit()
try:
ref_table = Table.read(ref_file)
except IOError:
print("\tERROR: File does not exist: %s" % ref_file)
exit()
start_time_i = interest_table['TIME'][0]
end_time_i = interest_table['TIME'][-1]
start_time_r = ref_table['TIME'][0]
end_time_r = ref_table['TIME'][-1]
len_i = len(interest_table['TIME'])
len_r = len(ref_table['TIME'])
# print("i: %.15f \t %.15f" % (start_time_i, end_time_i))
# print("r: %.15f \t %.15f" % (start_time_r, end_time_r))
# print("len i:", len_i)
# print("len r:", len_r)
# assert len_i == len_r
# assert start_time_i == start_time_r
# assert end_time_i == end_time_r
## The following is in case the two files aren't the exact same length.
a = 0 ## start of bin index to make segment of data
c = 0
b = meta_dict['n_bins'] ## end of bin index to make segment of data for
## inner for-loop
d = meta_dict['n_bins']
if start_time_i > start_time_r:
bin_diff = int((start_time_i - start_time_r) / meta_dict['dt'])
assert bin_diff < len_r
c += bin_diff
d += bin_diff
elif start_time_r > start_time_i:
bin_diff = int((start_time_r - start_time_i) / meta_dict['dt'])
assert bin_diff < len_i
a += bin_diff
b += bin_diff
## Loop through segments of the light curves
while b <= len_i and d <= len_r:
n_seg += | |
<reponame>CUAHSI/qa-automation-engine<gh_stars>1-10
""" Runs various smoke tests for the data.cuahsi.org """
from hc_macros import (
Search,
Marker,
Services,
Keywords,
Advanced,
Filter,
About,
QuickStart,
Zendesk,
Workspace,
ResourceCreator,
)
from hc_elements import ZendeskArticlePage
from cuahsi_base.utils import External, TestSystem
from cuahsi_base.cuahsi_base import BaseTest, parse_args_run_tests
from config import BASE_URL
# Test cases definition
class HydroclientTestSuite(BaseTest):
"""Test suite for the HydroClient software system"""
def setUp(self):
super(HydroclientTestSuite, self).setUp()
self.driver.maximize_window()
self.driver.get(BASE_URL)
def test_A_000002(self):
"""Confirms Archbold service metadata is available through
HydroClient and that a sample of the data downloads successfully
"""
def oracle():
"""The Lake Annie Florida data can be successfully sent to the
workspace, and then is processed successfully in the
workspace ("completed" status)
"""
self.assertEqual(Workspace.count_complete(self.driver), 1)
Search.search_location(self.driver, "Lake Annie Highlands County")
Services.filters(self.driver, orgs="Archbold Biological Station")
Filter.open(self.driver)
Filter.to_workspace_cell(self.driver, 1, 1)
oracle()
def test_A_000003(self):
"""Confirms repeated search for Lake Annie data does not result
in problematic behavior
"""
def oracle():
"""51 results are returned for a Lake Annie Florida data search,
when the search is filtered to only include "Archbold Biological
Center" service
"""
self.assertEqual(51, Search.count_results(self.driver))
Search.search_location(self.driver, "Lake Annie Highlands County")
Services.filters(self.driver, orgs="Archbold Biological Station")
Search.search(self.driver, 60)
oracle()
def test_A_000004(self):
"""Confirms the start and end date in a NWIS Unit Values
data search are applied throughout search and workspace export
workflow
"""
def oracle():
"""Start date and end date in workspace match the initial
date filtering values established in the Search interface
"""
self.assertTrue(
Workspace.is_in_results(self.driver, ["2015-12-01", "2015-12-30"], 10)
)
Search.search_location(self.driver, "Tampa ")
Services.filters(self.driver, titles="NWIS Unit Values")
Search.filter_dates(self.driver, "12/01/2015", "12/30/2015")
Filter.open(self.driver)
Filter.to_workspace_text(self.driver, "Derived Value")
oracle()
def test_A_000005(self):
"""Confirms metadata and data availability for the NASA Goddard
Earth Sciences services, using the New Haven CT Site X416-Y130.
The two associated services are NLDAS Hourly NOAH Data and NLDAS
Hourly Primary Forcing Data
"""
def oracle():
"""The time series are sent to the workspace and processed,
resulting in a "completed" status for all time series
"""
self.assertEqual(Workspace.count_complete(self.driver, 6), 3)
Search.search_location(self.driver, "New Haven ")
Services.filters(
self.driver,
titles=["NLDAS Hourly NOAH Data", "NLDAS Hourly Primary Forcing Data"],
)
Filter.open(self.driver)
Filter.search_field(self.driver, "X416-Y130")
Filter.to_workspace_cell_multi(self.driver, [1, 5, 9]) # rows 1, 5, 9
oracle()
def test_A_000006(self):
"""Confirms metadata availability and the capability to download data
near Köln Germany
"""
def oracle():
"""Results are exported to workspace and number of successfully
processed time series is above 0
"""
self.assertNotEqual(Workspace.count_complete(self.driver), 0)
Search.search_location(self.driver, "Köln ")
Search.search(self.driver)
Search.to_random_map_marker(self.driver)
Marker.to_workspace_one(self.driver)
oracle()
def test_A_000008(self):
"""Confirms that map layer naming, as defined in the HydroClient
user interface, is consistent with the Quick Start and help pages
documentation
"""
def oracle_1():
"""Map layer naming in the Quick Start modal matches the
naming within the HydroClient map search interface
"""
for layer in layers:
self.assertIn(
"<li>{}</li>".format(layer), TestSystem.page_source(driver)
)
def oracle_2():
"""Map layer naming in the help documentation page matches the
naming within the HydroClient search interface
"""
for layer in layers:
self.assertIn(layer, TestSystem.page_source(driver))
driver = self.driver
layers = ["USGS Stream Gages", "Nationalmap Hydrology", "EPA Watersheds"]
for layer in layers: # Turn all layer on
Search.toggle_layer(driver, layer)
for layer in layers: # Turn all layers off
Search.toggle_layer(driver, layer)
Search.to_quickstart(driver)
QuickStart.section(driver, "Using the Layer Control")
oracle_1()
num_windows_opened = len(driver.window_handles)
QuickStart.more(driver, "Click for more information on the Layer Control")
External.switch_new_page(
driver, num_windows_opened, ZendeskArticlePage.article_header_locator
)
oracle_2()
def test_A_000009(self):
"""Confirms that additional help on layer control can be
accessed using the Zendesk widget
"""
def oracle():
"""A valid help center page is opened from the Zendesk
widget, and the page contains the word "Layers"
"""
self.assertIn("Help Center", TestSystem.title(driver))
self.assertIn("Layer", TestSystem.title(driver))
driver = self.driver
Search.search_location(driver, "San Diego")
Search.search_location(driver, "Amsterdam")
num_windows_opened = len(driver.window_handles)
Zendesk.to_help(driver, "Layers", "Using the Layer Control")
External.switch_new_page(
driver, num_windows_opened, ZendeskArticlePage.article_header_locator
)
oracle()
def test_A_000010(self):
"""Confirms that filtering by all keywords and all data types
returns the same number of results as if no search parameters
were applied. This test is applied near both the Dallas Texas
and Rio De Janeiro Brazil areas
"""
def oracle():
"""A search which filters for all keywords and all data types
returns the same number of results as a search without any
filters
"""
for rio_count in rio_counts:
self.assertEqual(rio_count, rio_counts[0])
for dallas_count in dallas_counts:
self.assertEqual(dallas_count, dallas_counts[0])
driver = self.driver
rio_counts = []
dallas_counts = []
Search.search_location(driver, "Rio De Janeiro")
Keywords.filter_root(driver, ["Biological", "Chemical", "Physical"])
rio_counts.append(Search.count_results(driver))
Advanced.filter_all_value_types(driver)
rio_counts.append(Search.count_results(driver))
Search.reset(driver)
Search.search(self.driver)
rio_counts.append(Search.count_results(driver))
Search.search_location(driver, "Dallas")
Keywords.filter_root(driver, ["Biological", "Chemical", "Physical"])
dallas_counts.append(Search.count_results(driver))
Advanced.filter_all_value_types(driver)
dallas_counts.append(Search.count_results(driver))
Search.reset(driver)
Search.search(self.driver)
dallas_counts.append(Search.count_results(driver))
oracle()
def test_A_000011(self):
"""Confirms "About" modal dropdown links successfully open up
the associated resource in a new tab and do not show a 404 Error
"""
def oracle():
"""None of the resource pages contain the text "404 Error" """
self.assertNotIn(
"404 Error",
external_sources,
msg='"{}" page was not found.'.format(page),
)
driver = self.driver
page = "Help Center"
for to_helpcenter_link in [About.to_helpcenter, About.to_contact]:
to_helpcenter_link(driver)
external_sources = External.source_new_page(driver)
External.close_new_page(driver)
oracle()
About.contact_close(driver)
page = "CUAHSI GitHub repository"
# opens in new window
About.to_license_repo_top(driver)
external_sources = External.source_new_page(driver)
External.close_new_page(driver)
oracle()
About.licensing_close(driver)
# opens in the same window
About.to_license_repo_inline(driver)
external_sources = External.source_new_page(driver)
# TODO Brian fix of _blank target inconsistency in the works
# External.close_new_page(driver)
oracle()
def test_A_000012(self):
"""Confirms repeated map scrolls, followed by a location search,
returns nonzero results for an area which normally has nonzero
search results. Effectively, this test confirms that viewing
duplicate map instances to the left and right of the main (starting)
map instance does not cause problems during location searching.
"""
def oracle():
"""Results count is nonzero after map navigations and a map
location search (in that order)
"""
self.assertNotEqual(Search.count_results(self.driver), 0)
Search.scroll_map(self.driver, 25)
Search.search_location(self.driver, "Raleigh")
Search.search(self.driver)
oracle()
def test_A_000013(self):
"""Confirms operations within the Workspace user interface
do not result in errors when the workspace is empty
"""
def oracle():
"""The browser is still on using the HydroClient system
after Workspace operations are used on an empty Workspace
"""
self.assertIn("HydroClient", TestSystem.title(driver))
driver = self.driver
Search.to_workspace(driver)
Workspace.select_all(driver)
Workspace.clear_select(driver)
Workspace.remove_select(driver)
Workspace.clear_select(driver)
Workspace.select_all(driver)
Workspace.remove_select(driver)
Workspace.to_csv(driver)
Workspace.to_viewer(driver)
Workspace.to_none(driver)
Workspace.to_viewer(driver)
Workspace.to_csv(driver)
oracle()
def test_A_000014(self):
"""Confirms NLDAS data is not available over the ocean (from
either of the two services)"""
def oracle():
"""The results count over Cape Cod Bay (no land in view)
is 0 after filtering for only NLDAS services
"""
self.assertEqual(Search.count_results(self.driver), 0)
Search.search_location(self.driver, "Cape Cod Bay")
Search.zoom_in(self.driver, 3)
Services.filters(
self.driver,
titles=["NLDAS Hourly NOAH Data", "NLDAS Hourly Primary Forcing Data"],
)
oracle()
def test_A_000015(self):
"""Confirms no date filters returns the same number of results as
applying a date filter from 01/01/0100 to 01/01/9000"""
def oracle(init_count, final_count):
self.assertEqual(init_count, final_count)
Search.search_location(self.driver, "United States")
Search.search(self.driver)
Search.filter_dates(self.driver, "01/01/0100", "01/01/9000")
init_count = Search.count_results(self.driver)
Search.clear_date_filter(self.driver)
Search.search(self.driver)
final_count = Search.count_results(self.driver)
oracle(init_count, final_count)
def test_A_000016(self):
"""Austin, TX search successfully pulls metadata, which is then viewable
within the Filter Results dialog.
"""
def oracle(result_nums):
"""Results count is between 1k and 10k, as seen from Filter Results
dialog reporting
"""
self.assertEqual(result_nums[0], 1) # first results page is active
self.assertEqual(result_nums[1], 10) # 10 results on first page
self.assertTrue(1000 < result_nums[2] and result_nums[2] < 10000)
Search.search_location(self.driver, "<NAME>")
Search.search(self.driver)
Filter.open(self.driver)
TestSystem.wait(10)
result_nums = Filter.count_results(self.driver)
result_nums = [int(result_num) for result_num in result_nums]
oracle(result_nums)
def test_A_000017(self):
"""Confirm Reset button clears Filter Results text and categorical
filters"""
def oracle_results_count(expected_results, should_match):
if should_match:
self.assertEqual(Search.count_results(self.driver), expected_results)
else:
self.assertNotEqual(Search.count_results(self.driver), expected_results)
def oracle_data_prop_selection(data_props, should_be_selected):
"""Checks that filter options not selected"""
for data_prop in data_props:
if should_be_selected:
self.assertTrue(
Filter.data_prop_is_selected(self.driver, data_prop)
)
else:
self.assertFalse(
Filter.data_prop_is_selected(self.driver, data_prop)
)
def oracle_data_service_selection(data_services, should_be_selected):
"""Checks that filter options not selected"""
for data_service in data_services:
if should_be_selected:
self.assertTrue(
Filter.data_service_is_selected(self.driver, data_service)
)
else:
self.assertFalse(
Filter.data_service_is_selected(self.driver, data_service)
)
data_props = ["Data Type", "Sample Medium"]
data_services = ["Community Collaborative Rain, Hail and Snow Network"]
Search.search_location(self.driver, "Montreal ")
Search.search(self.driver)
expected_results = Search.count_results(self.driver)
Filter.open(self.driver)
Filter.selection(self.driver)
Filter.close(self.driver)
TestSystem.wait(5)
Search.reset(self.driver)
Search.search(self.driver)
oracle_results_count(expected_results, should_match=True)
Filter.open(self.driver)
Filter.find_in_table(self.driver, "DOLLARD")
oracle_results_count(expected_results, should_match=False)
Search.reset(self.driver)
Search.search(self.driver)
oracle_results_count(expected_results, should_match=True)
Filter.open(self.driver)
Filter.set_data_props(self.driver, data_props)
Filter.open(self.driver)
Filter.set_data_services(self.driver, data_services)
oracle_results_count(expected_results, should_match=False)
Filter.open(self.driver)
oracle_data_prop_selection(data_props, should_be_selected=True)
oracle_data_service_selection(data_services, should_be_selected=True)
Filter.close(self.driver)
Search.reset(self.driver)
Search.search(self.driver)
oracle_results_count(expected_results, should_match=True)
Filter.open(self.driver)
oracle_data_prop_selection(data_props, should_be_selected=False)
oracle_data_service_selection(data_services, should_be_selected=False)
def test_A_000018(self):
"""Search results for Antarctica is greater than 0"""
def oracle():
"""Search result is greater than zero"""
self.assertGreater(Search.count_results(self.driver), 0)
Search.search_location(self.driver, "Antarctica")
Search.search(self.driver)
oracle()
def test_A_000019(self):
"""Confirm empty operations on the filter modals don't affect the
results set or | |
<gh_stars>1-10
# coding=utf-8
# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for FSMT."""
import json
import os
import re
import unicodedata
from typing import Dict, List, Optional, Tuple
import sacremoses as sm
from ...file_utils import add_start_docstrings
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"src_vocab_file": "vocab-src.json",
"tgt_vocab_file": "vocab-tgt.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"src_vocab_file": {"stas/tiny-wmt19-en-de": "https://cdn.huggingface.co/stas/tiny-wmt19-en-de/vocab-src.json"},
"tgt_vocab_file": {"stas/tiny-wmt19-en-de": "https://cdn.huggingface.co/stas/tiny-wmt19-en-de/vocab-tgt.json"},
"merges_file": {"stas/tiny-wmt19-en-de": "https://cdn.huggingface.co/stas/tiny-wmt19-en-de/merges.txt"},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"stas/tiny-wmt19-en-de": 1024}
PRETRAINED_INIT_CONFIGURATION = {
"stas/tiny-wmt19-en-de": {
"langs": ["en", "de"],
"model_max_length": 1024,
"special_tokens_map_file": None,
"full_tokenizer_file": None,
}
}
def get_pairs(word):
"""
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def replace_unicode_punct(text):
"""
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
"""
text = text.replace(",", ",")
text = re.sub(r"。\s*", ". ", text)
text = text.replace("、", ",")
text = text.replace("”", '"')
text = text.replace("“", '"')
text = text.replace("∶", ":")
text = text.replace(":", ":")
text = text.replace("?", "?")
text = text.replace("《", '"')
text = text.replace("》", '"')
text = text.replace(")", ")")
text = text.replace("!", "!")
text = text.replace("(", "(")
text = text.replace(";", ";")
text = text.replace("1", "1")
text = text.replace("」", '"')
text = text.replace("「", '"')
text = text.replace("0", "0")
text = text.replace("3", "3")
text = text.replace("2", "2")
text = text.replace("5", "5")
text = text.replace("6", "6")
text = text.replace("9", "9")
text = text.replace("7", "7")
text = text.replace("8", "8")
text = text.replace("4", "4")
text = re.sub(r".\s*", ". ", text)
text = text.replace("~", "~")
text = text.replace("’", "'")
text = text.replace("…", "...")
text = text.replace("━", "-")
text = text.replace("〈", "<")
text = text.replace("〉", ">")
text = text.replace("【", "[")
text = text.replace("】", "]")
text = text.replace("%", "%")
return text
def remove_non_printing_char(text):
"""
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
"""
output = []
for char in text:
cat = unicodedata.category(char)
if cat.startswith("C"):
continue
output.append(char)
return "".join(output)
# Porting notes:
# this one is modeled after XLMTokenizer
#
# added:
# - src_vocab_file,
# - tgt_vocab_file,
# - langs,
class FSMTTokenizer(PreTrainedTokenizer):
"""
Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
- Moses preprocessing and tokenization.
- Normalizing all inputs text.
- The arguments ``special_tokens`` and the function ``set_special_tokens``, can be used to add additional symbols
(like "__classify__") to a vocabulary.
- The argument :obj:`langs` defines a pair of languages.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
langs (:obj:`List[str]`):
A list of two languages to translate from and to, for instance :obj:`["en", "ru"]`.
src_vocab_file (:obj:`str`):
File containing the vocabulary for the source language.
tgt_vocab_file (:obj:`st`):
File containing the vocabulary for the target language.
merges_file (:obj:`str`):
File containing the merges.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to lowercase the input when tokenizing.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
langs=None,
src_vocab_file=None,
tgt_vocab_file=None,
merges_file=None,
do_lower_case=False,
unk_token="<unk>",
bos_token="<s>",
sep_token="</s>",
pad_token="<pad>",
**kwargs
):
super().__init__(
langs=langs,
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
bos_token=bos_token,
sep_token=sep_token,
pad_token=pad_token,
**kwargs,
)
self.src_vocab_file = src_vocab_file
self.tgt_vocab_file = tgt_vocab_file
self.merges_file = merges_file
self.do_lower_case = do_lower_case
# cache of sm.MosesPunctNormalizer instance
self.cache_moses_punct_normalizer = dict()
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = dict()
self.cache_moses_detokenizer = dict()
if langs and len(langs) == 2:
self.src_lang, self.tgt_lang = langs
else:
raise ValueError(
f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
"Usually that means that tokenizer can't find a mapping for the given model path "
"in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer."
)
with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
self.encoder = json.load(src_vocab_handle)
with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
tgt_vocab = json.load(tgt_vocab_handle)
self.decoder = {v: k for k, v in tgt_vocab.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
# hack override
def get_vocab(self) -> Dict[str, int]:
return self.get_src_vocab()
# hack override
@property
def vocab_size(self) -> int:
return self.src_vocab_size
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
return self.cache_moses_punct_normalizer[lang].normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
return self.cache_moses_tokenizer[lang].tokenize(
text, aggressive_dash_splits=True, return_str=False, escape=True
)
def moses_detokenize(self, tokens, lang):
if lang not in self.cache_moses_tokenizer:
moses_detokenizer = sm.MosesDetokenizer(lang=self.tgt_lang)
self.cache_moses_detokenizer[lang] = moses_detokenizer
return self.cache_moses_detokenizer[lang].detokenize(tokens)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
@property
def src_vocab_size(self):
return len(self.encoder)
@property
def tgt_vocab_size(self):
return len(self.decoder)
def get_src_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def get_tgt_vocab(self):
return dict(self.decoder, **self.added_tokens_decoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + "</w>",)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
if word == "\n </w>":
word = "\n</w>"
self.cache[token] = word
return word
def _tokenize(self, text, lang="en", bypass_tokenizer=False):
"""
Tokenize a string given language code using Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
Args:
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
languages. However, we don't enforce it.
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Returns:
List of tokens.
"""
# ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en
# if lang != self.src_lang:
# raise ValueError(f"Expected lang={self.src_lang}, but got {lang}")
lang = self.src_lang
if self.do_lower_case:
text = text.lower()
if bypass_tokenizer:
text = text.split()
else:
text = self.moses_pipeline(text, lang=lang)
text = self.moses_tokenize(text, lang=lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(" ")])
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) | |
settings are in default_hparams() function in mask_rcnn_params.py.
labels: The input labels in a dictionary. The labels include groundtruth
masks, boxes and classes.
class_targets: A tensor with a shape of [batch_size, batch_size_per_im,
num_classes] representing the classification target of each object.
box_targets: A tensor with a shape of [batch_size, batch_size_per_im,
num_classes] representing the classification target of each object.
box_rois: A tensor with a shape of [batch_size, batch_size_per_im, 4]
representing the proposal boxes.
proposal_to_label_map: A tensor with a shape of [batch_size, K]. This tensor
keeps the mapping between proposal to labels.
detections: A tensor with a shape of [batch_size, num_detections, 7],
representing the class and box predictions in PREDICT mode.
Returns:
mask_outputs: A tensor with a shape of [batch_size, max_num_fg,
mrcnn_resolution, mrcnn_resolution] representing the mask prediction of
each object.
The following is for training only.
mask_targets: A tensor with a shape of [batch_size, max_num_fg,
mrcnn_resolution, mrcnn_resolution] representing the mask target of each
object.
class_targets: A tensor with a shape of [batch_size, max_num_fg,
num_classes * 4] representing the classification target of each object.
Raises:
ValueError: `detections` must be present in PREDICT mode, while all other
optional arguments must be present in TRAIN.
"""
def _is_missing(tensors):
for tensor in tensors:
if tensor is None:
return True
return False
if (mode == tf.estimator.ModeKeys.TRAIN and
_is_missing([labels, class_targets, box_targets, box_rois,
proposal_to_label_map])):
raise ValueError('One of the required arguments is missing in TRAIN mode.')
if mode == tf.estimator.ModeKeys.PREDICT and detections is None:
raise ValueError('detections must be provided in PREDICT mode.')
if mode == tf.estimator.ModeKeys.TRAIN:
(class_targets, box_targets, box_rois,
proposal_to_label_map) = select_fg_for_masks(
class_targets, box_targets, box_rois, proposal_to_label_map,
max_num_fg=int(params['batch_size_per_im'] * params['fg_fraction']))
mask_targets = get_mask_targets(
box_rois, proposal_to_label_map, box_targets,
labels['cropped_gt_masks'], params['mrcnn_resolution'],
core_assignment=core_assignment_utils.get_core_assignment(
core_assignment_utils.CORE_0, params['num_cores_per_replica']))
mask_outputs = mask_rcnn_heads(
features, box_rois, num_classes=params['num_classes'],
mrcnn_resolution=params['mrcnn_resolution'],
core_assignment=core_assignment_utils.get_core_assignment(
core_assignment_utils.CORE_1, params['num_cores_per_replica']))
class_indices = tf.to_int32(class_targets)
else:
box_rois = detections[:, :, 1:5]
mask_outputs = mask_rcnn_heads(
features, box_rois, num_classes=params['num_classes'],
mrcnn_resolution=params['mrcnn_resolution'])
class_indices = tf.to_int32(detections[:, :, 6])
if mode == tf.estimator.ModeKeys.TRAIN:
device = core_assignment_utils.get_core_assignment(
core_assignment_utils.CORE_1, params['num_cores_per_replica'])
else:
device = None
with tf.device(device):
mask_outputs = mask_post_processing(class_indices, params['num_classes'],
mask_outputs)
if mode == tf.estimator.ModeKeys.TRAIN:
return mask_outputs, class_targets, mask_targets
else:
return mask_outputs
def get_mask_targets(fg_boxes, fg_proposal_to_label_map, fg_box_targets,
mask_gt_labels, output_size=28, core_assignment=None):
"""Crop and resize on ground truth masks.
Args:
fg_boxes: A 3-D tensor of shape [batch_size, num_masks, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
fg_proposal_to_label_map: A tensor of shape [batch_size, num_masks].
fg_box_targets: a float tensor representing the box label for each box
with a shape of [batch_size, num_masks, 4].
mask_gt_labels: A tensor with a shape of [batch_size, M, H+4, W+4, 4]. M is
NUM_MAX_INSTANCES (i.e., 100 in this implementation) in each image, while
H and W are ground truth mask size. The `+4` comes from padding of two
zeros in both directions of height and width dimension.
output_size: A scalar to indicate the output crop size.
core_assignment: A TensorFlow device to specify where the op is placed.
`None` means no specification.
Returns:
A 4-D tensor representing ground truth masks with a shape of [batch_size,
num_boxes, output_size, output_size].
"""
with tf.name_scope('get_mask_targets'):
(batch_size, num_instances, max_feature_height,
max_feature_width) = mask_gt_labels.get_shape().as_list()
_, num_masks = fg_proposal_to_label_map.get_shape().as_list()
# Use a convolution to pre-compute a box of four values (for interpolation)
# and stack four values in the channel dimension.
features_all = tf.reshape(
mask_gt_labels,
[batch_size * num_instances, max_feature_height, max_feature_width, 1])
value = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
features_all = tf.layers.conv2d(
features_all, filters=4, kernel_size=(2, 2), strides=(1, 1),
padding='same', data_format='channels_last', use_bias=False,
kernel_initializer=tf.constant_initializer(value), trainable=False,
name='get_mask_targets_conv2d')
# Transposes tensor dimension for TPU performance.
features_all = tf.transpose(features_all, [1, 2, 0, 3])
features_all = tf.reshape(features_all, [-1, 4])
batch_dim_size = num_instances
width_dim_size = batch_size * batch_dim_size
height_dim_size = max_feature_width * width_dim_size
# proposal_to_label_map might have a -1 paddings.
levels = tf.maximum(fg_proposal_to_label_map, 0)
# Projects box location and sizes to corresponding cropped ground truth
# mask coordinates.
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=fg_boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=fg_box_targets, num_or_size_splits=4, axis=2)
valid_feature_width = max_feature_width - 4
valid_feature_height = max_feature_height - 4
y_transform = (bb_y_min - gt_y_min) * valid_feature_height / (
gt_y_max - gt_y_min + _EPSILON) + 2
x_transform = (bb_x_min - gt_x_min) * valid_feature_width / (
gt_x_max - gt_x_min + _EPSILON) + 2
h_transform = (bb_y_max - bb_y_min) * valid_feature_height / (
gt_y_max - gt_y_min + _EPSILON)
w_transform = (bb_x_max - bb_x_min) * valid_feature_width / (
gt_x_max - gt_x_min + _EPSILON)
# Compute y and x coordinate indices.
box_grid_y = []
box_grid_x = []
for i in range(output_size):
box_grid_y.append(y_transform + (0.5 + i) * h_transform / output_size)
box_grid_x.append(x_transform + (0.5 + i) * w_transform / output_size)
box_grid_y = tf.stack(box_grid_y, axis=2)
box_grid_x = tf.stack(box_grid_x, axis=2)
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
# Check boundary.
box_y0 = tf.minimum(
tf.to_float(max_feature_width-1), tf.maximum(0., box_grid_y0))
box_x0 = tf.minimum(
tf.to_float(max_feature_height-1), tf.maximum(0., box_grid_x0))
y_indices = tf.cast(
tf.reshape(box_y0,
[batch_size, num_masks, output_size]), dtype=tf.int32)
x_indices = tf.cast(
tf.reshape(box_x0,
[batch_size, num_masks, output_size]), dtype=tf.int32)
indices = tf.reshape(
tf.tile(tf.reshape(tf.range(batch_size) * batch_dim_size,
[batch_size, 1, 1, 1]),
[1, num_masks, output_size, output_size]) +
tf.tile(tf.reshape(levels,
[batch_size, num_masks, 1, 1]),
[1, 1, output_size, output_size]) +
tf.tile(tf.reshape(y_indices * height_dim_size,
[batch_size, num_masks, output_size, 1]),
[1, 1, 1, output_size]) +
tf.tile(tf.reshape(x_indices * width_dim_size,
[batch_size, num_masks, 1, output_size]),
[1, 1, output_size, 1]), [-1])
with tf.device(core_assignment):
features_per_box = tf.reshape(
tf.gather(features_all, indices),
[batch_size, num_masks, output_size, output_size, 4])
# The output can be computed by bilinear interpolation of four neighboring
# points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
ly = tf.tile(
tf.reshape(ly, [batch_size, num_masks, output_size, 1, 1]),
[1, 1, 1, output_size, 1])
lx = tf.tile(
tf.reshape(lx, [batch_size, num_masks, 1, output_size, 1]),
[1, 1, output_size, 1, 1])
hy = 1.0 - ly
hx = 1.0 - lx
interpolation_kernel = tf.concat([hy*hx, hy*lx, ly*hx, ly*lx], axis=4)
# Interpolate the gathered features with computed interpolation kernels.
features_per_box *= tf.cast(interpolation_kernel,
dtype=features_per_box.dtype)
features_per_box = tf.reduce_sum(features_per_box, axis=4)
# Masks are binary outputs.
features_per_box = tf.where(
tf.greater_equal(features_per_box, 0.5),
tf.ones_like(features_per_box),
tf.zeros_like(features_per_box))
# mask_targets depend on box RoIs, which have gradients. This
# stop_gradient prevents the flow of gradient to box RoIs.
features_per_box = tf.stop_gradient(features_per_box)
return features_per_box
def multilevel_crop_and_resize(features, boxes, output_size=7,
core_assignment=None):
"""Crop and resize on multilevel feature pyramid.
Following the ROIAlign technique (see https://arxiv.org/pdf/1703.06870.pdf,
figure 3 for reference), we want to sample pixel level feature information
from our feature map at the box boundaries. For each feature map, we select
an (output_size, output_size) set of pixels corresponding to our box
location, and then use bilinear interpolation to select the feature value
for each pixel.
For performance, we perform the gather and interpolation on all layers as a
single operation. This is op the multi-level features are first stacked and
gathered into [2*output_size, 2*output_size] feature points. Then bilinear
interpolation is performed on the gathered feature points to generate
[output_size, output_size] RoIAlign feature map.
Here is the step-by-step algorithm:
1. Pad all multi-level features to a fixed spatial dimension and stack them
into a Tensor of shape [batch_size, level, height, width, num_filters].
2. The multi-level features are then gathered into a
[batch_size, num_boxes, output_size*2, output_size*2, num_filters]
Tensor. The Tensor contains four neighboring feature points for each
vertice in the output grid.
Instead of performing gather operation in one-step, a two-step gather
algorithm is performed. First, the Tensor containining multi-level
features is gathered into
[batch_size, num_boxes, output_size*2, width, num_filters].
Then the tensor is transposed to
[batch_size, num_boxes, width, output_size*2, num_filters]
then gathered to
[batch_size, num_boxes, output_size*2, output_size*2, num_filters].
The 2-step gather algorithm makes sure each gather operation performs on
large contiguous memory.
3. Compute the interpolation kernel of shape
[batch_size, num_boxes, output_size*2, output_size*2]. The last 2 axis
can be seen as stacking 2x2 interpolation kernels for all vertices in the
output grid.
4. Element-wise multiply the gathered features and interpolation kernel.
Then | |
with density sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="density"
)
sensor.sensor_value.payload = DPTArray(
(
0x44,
0xA5,
0xCB,
0x27,
)
)
self.assertEqual(sensor.resolve_state(), 1326.3485107421875)
self.assertEqual(sensor.unit_of_measurement(), "kg/m³")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electrical_conductivity(self):
"""Test resolve state with electrical_conductivity sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electrical_conductivity",
)
sensor.sensor_value.payload = DPTArray(
(
0xC4,
0xC6,
0xF5,
0x6E,
)
)
self.assertEqual(sensor.resolve_state(), -1591.669677734375)
self.assertEqual(sensor.unit_of_measurement(), "S/m")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_charge(self):
"""Test resolve state with electric_charge sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_charge",
)
sensor.sensor_value.payload = DPTArray(
(
0x46,
0x14,
0xF6,
0xA0,
)
)
self.assertEqual(sensor.resolve_state(), 9533.65625)
self.assertEqual(sensor.unit_of_measurement(), "C")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_current(self):
"""Test resolve state with electric_current sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_current",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xAD,
0x45,
0x90,
)
)
self.assertEqual(sensor.resolve_state(), 5544.6953125)
self.assertEqual(sensor.unit_of_measurement(), "A")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_current_density(self):
"""Test resolve state with electric_current_density sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_current_density",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x7C,
0x57,
0xF6,
)
)
self.assertEqual(sensor.resolve_state(), 4037.49755859375)
self.assertEqual(sensor.unit_of_measurement(), "A/m²")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_dipole_moment(self):
"""Test resolve state with electric_dipole_moment sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_dipole_moment",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x58,
0xF1,
0x73,
)
)
self.assertEqual(sensor.resolve_state(), 3471.090576171875)
self.assertEqual(sensor.unit_of_measurement(), "C m")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_displacement(self):
"""Test resolve state with electric_displacement sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_displacement",
)
sensor.sensor_value.payload = DPTArray(
(
0xC5,
0x34,
0x8B,
0x0,
)
)
self.assertEqual(sensor.resolve_state(), -2888.6875)
self.assertEqual(sensor.unit_of_measurement(), "C/m²")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_field_strength(self):
"""Test resolve state with electric_field_strength sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_field_strength",
)
sensor.sensor_value.payload = DPTArray(
(
0xC6,
0x17,
0x1C,
0x39,
)
)
self.assertEqual(sensor.resolve_state(), -9671.0556640625)
self.assertEqual(sensor.unit_of_measurement(), "V/m")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_flux(self):
"""Test resolve state with electric_flux sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="electric_flux"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x8F,
0x6C,
0xFD,
)
)
self.assertEqual(sensor.resolve_state(), 4589.62353515625)
self.assertEqual(sensor.unit_of_measurement(), "c")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_flux_density(self):
"""Test resolve state with electric_flux_density sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_flux_density",
)
sensor.sensor_value.payload = DPTArray(
(
0xC6,
0x0,
0x50,
0xA8,
)
)
self.assertEqual(sensor.resolve_state(), -8212.1640625)
self.assertEqual(sensor.unit_of_measurement(), "C/m²")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_polarization(self):
"""Test resolve state with electric_polarization sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_polarization",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xF8,
0x89,
0xC6,
)
)
self.assertEqual(sensor.resolve_state(), 7953.2216796875)
self.assertEqual(sensor.unit_of_measurement(), "C/m²")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_potential(self):
"""Test resolve state with electric_potential sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_potential",
)
sensor.sensor_value.payload = DPTArray(
(
0xC6,
0x18,
0xA4,
0xAF,
)
)
self.assertEqual(sensor.resolve_state(), -9769.1708984375)
self.assertEqual(sensor.unit_of_measurement(), "V")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electric_potential_difference(self):
"""Test resolve state with electric_potential_difference sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electric_potential_difference",
)
sensor.sensor_value.payload = DPTArray(
(
0xC6,
0xF,
0x1D,
0x6,
)
)
self.assertEqual(sensor.resolve_state(), -9159.255859375)
self.assertEqual(sensor.unit_of_measurement(), "V")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electromagnetic_moment(self):
"""Test resolve state with electromagnetic_moment sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electromagnetic_moment",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x82,
0x48,
0xAE,
)
)
self.assertEqual(sensor.resolve_state(), 4169.0849609375)
self.assertEqual(sensor.unit_of_measurement(), "A m²")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_electromotive_force(self):
"""Test resolve state with electromotive_force sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="electromotive_force",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xBC,
0xEF,
0xEB,
)
)
self.assertEqual(sensor.resolve_state(), 6045.98974609375)
self.assertEqual(sensor.unit_of_measurement(), "V")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_energy(self):
"""Test resolve state with energy sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="energy"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x4B,
0xB3,
0xF8,
)
)
self.assertEqual(sensor.resolve_state(), 3259.248046875)
self.assertEqual(sensor.unit_of_measurement(), "J")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_enthalpy(self):
"""Test resolve state with enthalpy sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="enthalpy"
)
sensor.sensor_value.payload = DPTArray(
(
0x76,
0xDD,
)
)
self.assertEqual(sensor.resolve_state(), 287866.88)
self.assertEqual(sensor.unit_of_measurement(), "H")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_flow_rate_m3h(self):
"""Test resolve state with flow_rate_m3h sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="flow_rate_m3h"
)
sensor.sensor_value.payload = DPTArray(
(
0x99,
0xEA,
0xC0,
0x55,
)
)
self.assertEqual(sensor.resolve_state(), -1712668587)
self.assertEqual(sensor.unit_of_measurement(), "m³/h")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_force(self):
"""Test resolve state with force sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="force"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x9E,
0x2C,
0xE1,
)
)
self.assertEqual(sensor.resolve_state(), 5061.60986328125)
self.assertEqual(sensor.unit_of_measurement(), "N")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_frequency(self):
"""Test resolve state with frequency sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="frequency"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xC2,
0x3C,
0x44,
)
)
self.assertEqual(sensor.resolve_state(), 6215.533203125)
self.assertEqual(sensor.unit_of_measurement(), "Hz")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_heatcapacity(self):
"""Test resolve state with heatcapacity sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="heatcapacity"
)
sensor.sensor_value.payload = DPTArray(
(
0xC5,
0xB3,
0x56,
0x7E,
)
)
self.assertEqual(sensor.resolve_state(), -5738.8115234375)
self.assertEqual(sensor.unit_of_measurement(), "J/K")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_heatflowrate(self):
"""Test resolve state with heatflowrate sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="heatflowrate"
)
sensor.sensor_value.payload = DPTArray(
(
0x44,
0xEC,
0x80,
0x7A,
)
)
self.assertEqual(sensor.resolve_state(), 1892.014892578125)
self.assertEqual(sensor.unit_of_measurement(), "W")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_heat_quantity(self):
"""Test resolve state with heat_quantity sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="heat_quantity"
)
sensor.sensor_value.payload = DPTArray(
(
0xC5,
0xA6,
0xB6,
0xD5,
)
)
self.assertEqual(sensor.resolve_state(), -5334.85400390625)
self.assertEqual(sensor.unit_of_measurement(), "J")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_humidity(self):
"""Test resolve state with humidity sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="humidity"
)
sensor.sensor_value.payload = DPTArray(
(
0x7E,
0xE1,
)
)
self.assertEqual(sensor.resolve_state(), 577044.48)
self.assertEqual(sensor.unit_of_measurement(), "%")
self.assertEqual(sensor.ha_device_class(), "humidity")
def test_str_impedance(self):
"""Test resolve state with impedance sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="impedance"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xDD,
0x79,
0x6D,
)
)
self.assertEqual(sensor.resolve_state(), 7087.17822265625)
self.assertEqual(sensor.unit_of_measurement(), "Ω")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_illuminance(self):
"""Test resolve state with illuminance sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="illuminance"
)
sensor.sensor_value.payload = DPTArray(
(
0x7C,
0x5E,
)
)
self.assertEqual(sensor.resolve_state(), 366346.24)
self.assertEqual(sensor.unit_of_measurement(), "lx")
self.assertEqual(sensor.ha_device_class(), "illuminance")
def test_str_kelvin_per_percent(self):
"""Test resolve state with kelvin_per_percent sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="kelvin_per_percent",
)
sensor.sensor_value.payload = DPTArray(
(
0xFA,
0xBD,
)
)
self.assertEqual(sensor.resolve_state(), -441384.96)
self.assertEqual(sensor.unit_of_measurement(), "K/%")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_length(self):
"""Test resolve state with length sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="length"
)
sensor.sensor_value.payload = DPTArray(
(
0xC5,
0x9D,
0xAE,
0xC5,
)
)
self.assertEqual(sensor.resolve_state(), -5045.84619140625)
self.assertEqual(sensor.unit_of_measurement(), "m")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_length_mm(self):
"""Test resolve state with length_mm sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="length_mm"
)
sensor.sensor_value.payload = DPTArray(
(
0x56,
0xB9,
)
)
self.assertEqual(sensor.resolve_state(), 22201)
self.assertEqual(sensor.unit_of_measurement(), "mm")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_light_quantity(self):
"""Test resolve state with light_quantity sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="light_quantity"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x4A,
0xF5,
0x68,
)
)
self.assertEqual(sensor.resolve_state(), 3247.337890625)
self.assertEqual(sensor.unit_of_measurement(), "lm s")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_long_delta_timesec(self):
"""Test resolve state with long_delta_timesec sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="long_delta_timesec",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xB2,
0x17,
0x54,
)
)
self.assertEqual(sensor.resolve_state(), 1169299284)
self.assertEqual(sensor.unit_of_measurement(), "s")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_luminance(self):
"""Test resolve state with luminance sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="luminance"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0x18,
0xD9,
0x76,
)
)
self.assertEqual(sensor.resolve_state(), 2445.59130859375)
self.assertEqual(sensor.unit_of_measurement(), "cd/m²")
self.assertEqual(sensor.ha_device_class(), "illuminance")
def test_str_luminous_flux(self):
"""Test resolve state with luminous_flux sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="luminous_flux"
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xBD,
0x16,
0x9,
)
)
self.assertEqual(sensor.resolve_state(), 6050.75439453125)
self.assertEqual(sensor.unit_of_measurement(), "lm")
self.assertEqual(sensor.ha_device_class(), "illuminance")
def test_str_luminous_intensity(self):
"""Test resolve state with luminous_intensity sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="luminous_intensity",
)
sensor.sensor_value.payload = DPTArray(
(
0x46,
0xB,
0xBE,
0x7E,
)
)
self.assertEqual(sensor.resolve_state(), 8943.623046875)
self.assertEqual(sensor.unit_of_measurement(), "cd")
self.assertEqual(sensor.ha_device_class(), "illuminance")
def test_str_magnetic_field_strength(self):
"""Test resolve state with magnetic_field_strength sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="magnetic_field_strength",
)
sensor.sensor_value.payload = DPTArray(
(
0x44,
0x15,
0xF1,
0xAD,
)
)
self.assertEqual(sensor.resolve_state(), 599.7761840820312)
self.assertEqual(sensor.unit_of_measurement(), "A/m")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_magnetic_flux(self):
"""Test resolve state with magnetic_flux sensor."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="magnetic_flux"
)
sensor.sensor_value.payload = DPTArray(
(
0xC5,
0xCB,
0x3C,
0x98,
)
)
self.assertEqual(sensor.resolve_state(), -6503.57421875)
self.assertEqual(sensor.unit_of_measurement(), "Wb")
self.assertEqual(sensor.ha_device_class(), None)
def test_str_magnetic_flux_density(self):
"""Test resolve state with magnetic_flux_density sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type="magnetic_flux_density",
)
sensor.sensor_value.payload = DPTArray(
(
0x45,
0xB6,
0xBD,
0x42,
)
)
self.assertEqual(sensor.resolve_state(), 5847.6572265625)
self.assertEqual(sensor.unit_of_measurement(), | |
from django.db import transaction
from django.db.models import Q
from rest_framework import exceptions
from rest_framework.serializers import (
CharField,
DateField,
FloatField,
IntegerField,
ListField,
PrimaryKeyRelatedField,
)
from ..core import serializers
from . import models, validators
from .jexl import QuestionJexl
class QuestionJexlField(serializers.JexlField):
def __init__(self, **kwargs):
super().__init__(QuestionJexl(), **kwargs)
class SaveFormSerializer(serializers.ModelSerializer):
class Meta:
model = models.Form
fields = ("slug", "name", "description", "meta", "is_archived", "is_published")
class CopyFormSerializer(serializers.ModelSerializer):
source = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Form.objects, required=True
)
@transaction.atomic
def create(self, validated_data):
user = self.context["request"].user
source = validated_data["source"]
validated_data["meta"] = dict(source.meta)
form = super().create(validated_data)
new_form_questions = [
models.FormQuestion(
sort=sort,
form=form,
question=form_question.question,
created_by_user=user.username,
created_by_group=user.group,
)
for sort, form_question in enumerate(
reversed(models.FormQuestion.objects.filter(form=source)), start=1
)
]
models.FormQuestion.objects.bulk_create(new_form_questions)
return form
class Meta:
model = models.Form
fields = ("slug", "name", "description", "source", "is_published")
class AddFormQuestionSerializer(serializers.ModelSerializer):
form = serializers.GlobalIDField(source="slug")
question = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Question.objects
)
def update(self, instance, validated_data):
# default sort is 0, as per default form question are sorted
# in descending order this will be at the end
_, created = models.FormQuestion.objects.get_or_create(
form=self.instance, question=validated_data["question"]
)
if created:
# reassign sort from start 1 so a newly added item with sort 0 will
# be at the end again
for sort, question in enumerate(
self.instance.questions.all().order_by("formquestion__sort"), start=1
):
models.FormQuestion.objects.filter(
form=instance, question=question
).update(sort=sort)
return instance
class Meta:
fields = ("form", "question")
model = models.Form
class RemoveFormQuestionSerializer(serializers.ModelSerializer):
form = serializers.GlobalIDField(source="slug")
question = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Question.objects
)
def update(self, instance, validated_data):
models.FormQuestion.objects.filter(
form=instance, question=validated_data["question"]
).delete()
return instance
class Meta:
fields = ("form", "question")
model = models.Form
class FormQuestionRelatedField(serializers.GlobalIDPrimaryKeyRelatedField):
def get_queryset(self):
form = self.parent.parent.instance
return form.questions.all()
class ReorderFormQuestionsSerializer(serializers.ModelSerializer):
form = serializers.GlobalIDField(source="slug")
questions = FormQuestionRelatedField(many=True)
def update(self, instance, validated_data):
questions = validated_data["questions"]
curr_questions = set(instance.questions.all())
if len(questions) != len(curr_questions) or set(questions) - set(
instance.questions.all()
):
raise exceptions.ValidationError(
"Input questions are not the same as current form questions"
)
for sort, question in enumerate(reversed(questions), start=1):
models.FormQuestion.objects.filter(form=instance, question=question).update(
sort=sort
)
return instance
class Meta:
fields = ("form", "questions")
model = models.Form
class CopyQuestionSerializer(serializers.ModelSerializer):
source = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Question.objects, required=True
)
@transaction.atomic
def create(self, validated_data):
user = self.context["request"].user
source = validated_data["source"]
validated_data["type"] = source.type
validated_data["is_required"] = source.is_required
validated_data["is_hidden"] = source.is_hidden
validated_data["configuration"] = dict(source.configuration)
validated_data["meta"] = dict(source.meta)
validated_data["row_form"] = source.row_form
question = super().create(validated_data)
new_question_options = [
models.QuestionOption(
sort=sort,
question=question,
option=question_option.option,
created_by_user=user.username,
created_by_group=user.group,
)
for sort, question_option in enumerate(
reversed(models.QuestionOption.objects.filter(question=source)), start=1
)
]
models.QuestionOption.objects.bulk_create(new_question_options)
return question
class Meta:
model = models.Question
fields = ("slug", "label", "source")
class SaveQuestionSerializer(serializers.ModelSerializer):
is_hidden = QuestionJexlField(required=False)
is_required = QuestionJexlField(required=False)
class Meta:
model = models.Question
fields = (
"slug",
"label",
"info_text",
"is_required",
"is_hidden",
"meta",
"is_archived",
)
class SaveTextQuestionSerializer(SaveQuestionSerializer):
max_length = IntegerField(min_value=1, required=False, allow_null=True)
def validate(self, data):
data["type"] = models.Question.TYPE_TEXT
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("max_length", "placeholder")
class SaveTextareaQuestionSerializer(SaveQuestionSerializer):
max_length = IntegerField(min_value=1, required=False, allow_null=True)
def validate(self, data):
data["type"] = models.Question.TYPE_TEXTAREA
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("max_length", "placeholder")
class SaveDateQuestionSerializer(SaveQuestionSerializer):
def validate(self, data):
data["type"] = models.Question.TYPE_DATE
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields
class SaveQuestionOptionsMixin(object):
def create_question_options(self, question, options):
user = self.context["request"].user
question_option = [
models.QuestionOption(
sort=sort,
question=question,
option=option,
created_by_user=user.username,
created_by_group=user.group,
)
for sort, option in enumerate(reversed(options), start=1)
]
models.QuestionOption.objects.bulk_create(question_option)
@transaction.atomic
def create(self, validated_data):
options = validated_data.pop("options")
instance = super().create(validated_data)
self.create_question_options(instance, options)
return instance
@transaction.atomic
def update(self, instance, validated_data):
options = validated_data.pop("options")
models.QuestionOption.objects.filter(question=instance).delete()
instance = super().update(instance, validated_data)
self.create_question_options(instance, options)
return instance
class SaveMultipleChoiceQuestionSerializer(
SaveQuestionOptionsMixin, SaveQuestionSerializer
):
options = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Option.objects.all(), many=True, required=True
)
def validate(self, data):
data["type"] = models.Question.TYPE_MULTIPLE_CHOICE
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("options",)
class SaveChoiceQuestionSerializer(SaveQuestionOptionsMixin, SaveQuestionSerializer):
options = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Option.objects.all(), many=True, required=True
)
def validate(self, data):
data["type"] = models.Question.TYPE_CHOICE
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("options",)
class SaveDynamicChoiceQuestionSerializer(SaveQuestionSerializer):
data_source = CharField()
def validate(self, data):
data["type"] = models.Question.TYPE_DYNAMIC_CHOICE
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("data_source",)
class SaveDynamicMultipleChoiceQuestionSerializer(SaveQuestionSerializer):
data_source = CharField()
def validate(self, data):
data["type"] = models.Question.TYPE_DYNAMIC_MULTIPLE_CHOICE
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("data_source",)
class SaveFloatQuestionSerializer(SaveQuestionSerializer):
min_value = FloatField(required=False, allow_null=True)
max_value = FloatField(required=False, allow_null=True)
def validate(self, data):
min_value = (
data.get("min_value")
if data.get("min_value") is not None
else float("-inf")
)
max_value = (
data.get("max_value") if data.get("max_value") is not None else float("inf")
)
if min_value > max_value:
raise exceptions.ValidationError(
f"max_value {max_value} is smaller than {min_value}"
)
data["type"] = models.Question.TYPE_FLOAT
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + (
"min_value",
"max_value",
"placeholder",
)
class SaveIntegerQuestionSerializer(SaveQuestionSerializer):
min_value = IntegerField(required=False, allow_null=True)
max_value = IntegerField(required=False, allow_null=True)
def validate(self, data):
min_value = (
data.get("min_value")
if data.get("min_value") is not None
else float("-inf")
)
max_value = (
data.get("max_value") if data.get("max_value") is not None else float("inf")
)
if min_value > max_value:
raise exceptions.ValidationError(
f"max_value {max_value} is smaller than {min_value}"
)
data["type"] = models.Question.TYPE_INTEGER
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + (
"min_value",
"max_value",
"placeholder",
)
class SaveTableQuestionSerializer(SaveQuestionSerializer):
row_form = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Form.objects,
required=True,
help_text=models.Question._meta.get_field("row_form").help_text,
)
def validate(self, data):
data["type"] = models.Question.TYPE_TABLE
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("row_form",)
class SaveFormQuestionSerializer(SaveQuestionSerializer):
sub_form = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Form.objects, required=True
)
def validate(self, data):
data["type"] = models.Question.TYPE_FORM
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields + ("sub_form",)
class SaveFileQuestionSerializer(SaveQuestionSerializer):
def validate(self, data):
data["type"] = models.Question.TYPE_FILE
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = SaveQuestionSerializer.Meta.fields
class SaveStaticQuestionSerializer(SaveQuestionSerializer):
static_content = CharField(required=True)
def validate(self, data):
data["type"] = models.Question.TYPE_STATIC
return super().validate(data)
class Meta(SaveQuestionSerializer.Meta):
fields = (
"label",
"slug",
"info_text",
"is_hidden",
"meta",
"is_archived",
"static_content",
)
class SaveOptionSerializer(serializers.ModelSerializer):
class Meta:
fields = ("slug", "label", "meta")
model = models.Option
class CopyOptionSerializer(serializers.ModelSerializer):
source = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Option.objects, required=True
)
def create(self, validated_data):
source = validated_data["source"]
validated_data["meta"] = dict(source.meta)
return super().create(validated_data)
class Meta:
fields = ("slug", "label", "source")
model = models.Option
class RemoveOptionSerializer(serializers.ModelSerializer):
option = serializers.GlobalIDField(source="slug")
def update(self, instance, validated_data):
models.Option.objects.filter(pk=instance).delete()
return instance
class Meta:
fields = ("option",)
model = models.Option
class DocumentSerializer(serializers.ModelSerializer):
@transaction.atomic
def create(self, validated_data):
"""
Create a document family.
Create a document for the given form, as well as
subdocuments for all the child FormQuestions, and
attach them as answers
"""
form = validated_data.get("form")
main_document = super().create(validated_data)
models.Document.objects.create_and_link_child_documents(form, main_document)
return main_document
class Meta:
model = models.Document
fields = ("id", "form", "meta")
class SaveAnswerSerializer(serializers.ModelSerializer):
def validate(self, data):
validators.AnswerValidator().validate(**data, info=self.context["info"])
return super().validate(data)
class Meta:
model = models.Answer
fields = ("question", "document", "meta", "value")
class SaveDocumentStringAnswerSerializer(SaveAnswerSerializer):
value = CharField(allow_blank=True)
class Meta(SaveAnswerSerializer.Meta):
pass
class SaveDocumentListAnswerSerializer(SaveAnswerSerializer):
value = ListField(child=CharField())
class Meta(SaveAnswerSerializer.Meta):
pass
class SaveDocumentIntegerAnswerSerializer(SaveAnswerSerializer):
value = IntegerField()
class Meta(SaveAnswerSerializer.Meta):
pass
class SaveDocumentFloatAnswerSerializer(SaveAnswerSerializer):
value = FloatField()
class Meta(SaveAnswerSerializer.Meta):
pass
class SaveDocumentDateAnswerSerializer(SaveAnswerSerializer):
value = DateField(source="date")
class Meta(SaveAnswerSerializer.Meta):
pass
def _get_document_tree(document_id):
answers = models.AnswerDocument.objects.filter(document_id=document_id).values(
"answer"
)
child_documents = models.Document.objects.filter(
Q(answers=answers) | Q(parent_answers=answers)
).distinct()
for child_document in child_documents:
yield from _get_document_tree(child_document.pk)
yield document_id
class SaveDocumentTableAnswerSerializer(SaveAnswerSerializer):
value = serializers.GlobalIDPrimaryKeyRelatedField(
source="documents",
queryset=models.Document.objects,
many=True,
required=True,
help_text="List of document IDs representing the rows in the table.",
)
def validate(self, data):
documents = (
data.get("documents")
or self.instance
and self.instance.documents.all()
or []
)
question = data.get("question") or self.instance and self.instance.question
for document in documents:
if document.form_id != question.row_form_id:
raise exceptions.ValidationError(
f"Document {document.pk} is not of form type {question.form.pk}."
)
return super().validate(data)
def create_answer_documents(self, answer, documents):
family = answer.document.family
document_ids = [document.pk for document in documents]
for sort, document_id in enumerate(reversed(document_ids), start=1):
models.AnswerDocument.objects.create(
answer=answer, document_id=document_id, sort=sort
)
# attach document answers to root document family
models.Document.objects.filter(
family__in=models.Document.objects.filter(pk__in=document_ids).values(
"family"
)
).update(family=family)
@transaction.atomic
def create(self, validated_data):
documents = validated_data.pop("documents")
instance = super().create(validated_data)
self.create_answer_documents(instance, documents)
return instance
@transaction.atomic
def update(self, instance, validated_data):
documents = validated_data.pop("documents")
# detach answers to its own family tree
answer_documents = models.AnswerDocument.objects.filter(answer=instance)
for answer_document in models.Document.objects.filter(
pk__in=answer_documents.values("document")
):
children = _get_document_tree(answer_document.pk)
models.Document.objects.filter(pk__in=children).update(
family=answer_document.pk
)
answer_documents.delete()
instance = super().update(instance, validated_data)
self.create_answer_documents(instance, documents)
return instance
class Meta(SaveAnswerSerializer.Meta):
pass
class SaveDocumentFormAnswerSerializer(SaveAnswerSerializer):
value = serializers.GlobalIDPrimaryKeyRelatedField(
queryset=models.Document.objects,
required=True,
help_text="Document IDs representing the content of the form.",
source="value_document",
)
def validate(self, data):
document = (
data.get("value_document") or self.instance and self.instance.value_document
)
question = data.get("question") or self.instance and self.instance.question
if document.form_id != question.sub_form_id:
raise exceptions.ValidationError(
f"Document {document.pk} is of form type {document.form_id}, but should be of type {question.sub_form_id}."
)
return super().validate(data)
def set_family(self, answer, document):
family = answer.document.family
# attach document answers to root document family
models.Document.objects.filter(
family=models.Document.objects.get(pk=document.id).family
).update(family=family)
@transaction.atomic
def create(self, validated_data):
document = validated_data.get("document")
instance = super().create(validated_data)
self.set_family(instance, document)
return instance
@transaction.atomic
def update(self, instance, validated_data):
value_document = validated_data.get("value_document")
# detach current answer document to its own family tree
answer_document = instance.value_document
if answer_document:
children = _get_document_tree(answer_document.pk)
models.Document.objects.filter(pk__in=children).update(
family=answer_document.pk
)
instance = super().update(instance, validated_data)
self.set_family(instance, value_document)
return instance
class Meta(SaveAnswerSerializer.Meta):
pass
class SaveDocumentFileAnswerSerializer(SaveAnswerSerializer):
value = CharField(write_only=True, source="file")
value_id = PrimaryKeyRelatedField(read_only=True, source="file")
| |
shape[axis] = mult.shape[axis]
mult = mult.reshape(shape)
return (digits * mult).sum(axis=axis, keepdims=keepdims)
def numbers_to_digits(numbers, n_digits, base=10):
""" Convert number to array of digits, assumed little-endian. """
numbers = numbers.copy()
digits = []
for i in range(n_digits):
digits.append(numbers % base)
numbers //= base
return np.stack(digits, -1)
def pformat(v):
return pretty(v)
def _list_repr_pretty(lst, p, cycle):
if cycle:
p.text('[...]')
else:
with p.group(4, '['):
p.breakable('')
for idx, item in enumerate(lst):
if idx:
p.text(',')
p.breakable()
p.pretty(item)
p.breakable('')
p.text(']')
for_type(list, _list_repr_pretty)
def _ndarray_repr_pretty(a, p, cycle):
if cycle:
p.text(f'ndarray(..., shape={a.shape}, dtype={a.dtype})')
else:
with p.group(4, 'ndarray('):
p.breakable('')
x = str(a)
for idx, line in enumerate(x.split('\n')):
if idx:
p.breakable()
p.text(line)
p.text(',')
p.breakable()
p.text(f'shape={a.shape}, dtype={a.dtype}')
p.breakable('')
p.text(')')
for_type(np.ndarray, _ndarray_repr_pretty)
def _named_dict_repr_pretty(name, d, p, cycle):
if cycle:
p.text(f'{name}(...)')
else:
with p.group(4, f'{name}({{'):
p.breakable('')
for idx, (k, v) in enumerate(sorted(d.items())):
if idx:
p.text(',')
p.breakable()
p.pretty(k)
p.text(': ')
p.pretty(v)
p.breakable('')
p.text('})')
NotSupplied = object()
class Param:
def __init__(self, default=NotSupplied, aliases=None, help="", type=None):
""" aliases are different ways to fill the value (i.e. from config or kwargs),
but should not be used to access the value as a class attribute. """
self.default = default
if isinstance(aliases, str):
aliases = aliases.split()
self.aliases = aliases or []
self.help = help
self.type = type
class Parameterized:
""" An object that can have `Param` class attributes. These class attributes will be
turned into instance attributes at instance creation time. To set a value for the instance
attributes, we perform the following checks (the first value that is found is used):
1. check the kwargs passed into class constructor for a value with key "<param-name>"
2. check dps.cfg for values with keys of the form "<class-name>:<param-name>". `class-name`
can be the class of the object or any base class thereof. More derived/specific classes
will be checked first (specifically, we iterate through classes in order of the MRO).
3. check dps.cfg for values with name "<param-name>"
4. fallback on the param's default value, if one was supplied.
If no value is found by this point, an AttributeError is raised.
A note on deep copies: at instance creation time, we store values for all the parameters.
Deep copies are created by creating a new object using those creation-time parameter values.
"""
_resolved = False
def __new__(cls, *args, **kwargs):
obj = super(Parameterized, cls).__new__(cls)
obj._resolve_params(**kwargs)
# Stored for copying purposes, to get parameter as they are before __init__ is called.
obj._params_at_creation_time = obj.param_values()
return obj
def __getnewargs_ex__(self):
""" Required for pickling to work properly, ensures that when __new__ is called, the parameters are all present.
"""
return ((), self.param_values())
def __init__(self, *args, **kwargs):
pass
def __str__(self):
return repr(self)
def __repr__(self):
return pformat(self)
def _repr_pretty_(self, p, cycle):
d = self.param_values()
_named_dict_repr_pretty(self.__class__.__name__, d, p, cycle)
@classmethod
def _get_param_value(cls, name, param, kwargs):
aliases = list([name] + param.aliases)
# Check kwargs
for alias in aliases:
value = kwargs.get(alias, NotSupplied)
if value is not NotSupplied:
return value
# Check cfg with class name label
for _cls in cls.__mro__:
for alias in aliases:
key = _cls.__name__ + ":" + alias
value = getattr(dps.cfg, key, NotSupplied)
if value is not NotSupplied:
return value
# Check cfg
for alias in aliases:
value = getattr(dps.cfg, alias, NotSupplied)
if value is not NotSupplied:
return value
# Try the default value
if value is NotSupplied:
if param.default is not NotSupplied:
return param.default
else:
raise AttributeError(
"Could not find value for parameter `{}` for class `{}` "
"in either kwargs or config, and no default was provided.".format(
name, cls.__name__))
def _resolve_params(self, **kwargs):
if not self._resolved:
for k, v in self._capture_param_values(**kwargs).items():
setattr(self, k, v)
self._resolved = True
@classmethod
def _capture_param_values(cls, **kwargs):
""" Return the params that would be created if an object of the
current class were constructed in the current context with the given kwargs. """
param_values = dict()
for name in cls.param_names():
param = getattr(cls, name)
value = cls._get_param_value(name, param, kwargs)
if param.type is not None:
value = param.type(value)
param_values[name] = value
return param_values
@classmethod
def param_names(cls):
params = []
for p in dir(cls):
try:
if p != 'params' and isinstance(getattr(cls, p), Param):
params.append(p)
except Exception:
pass
return params
def param_values(self, original=False):
if not self._resolved:
raise Exception("Parameters have not yet been resolved.")
if original:
return {k: self._params_at_creation_time[k] for k in self._params_at_creation_time}
else:
return {n: getattr(self, n) for n in self.param_names()}
def __deepcopy__(self, memo):
cls = self.__class__
kwargs = self._params_at_creation_time
result = cls.__new__(cls, **kwargs)
result.__init__(**kwargs)
memo[id(self)] = result
return result
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du', '-sh', str(path)]).split()[0].decode('utf-8')
class launch_pdb_on_exception:
def __init__(self, context=11):
self.context = context
def __enter__(self):
pass
def __exit__(self, type_, value, tb):
from bdb import BdbQuit
if type_ and type_ is not BdbQuit:
traceback.print_exc()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
ipdb_postmortem(tb, self.context)
return True
def ipdb_postmortem(tb=None, context=10):
from ipdb.__main__ import _init_pdb, wrap_sys_excepthook
wrap_sys_excepthook()
p = _init_pdb(context)
p.reset()
if tb is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
tb = sys.exc_info()[2]
if tb:
p.interaction(None, tb)
def camel_to_snake(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def process_path(path, real_path=False):
path = os.path.expandvars(os.path.expanduser(str(path)))
if real_path:
path = os.path.realpath(path)
return path
def path_stem(path):
no_ext = os.path.splitext(path)[0]
return os.path.basename(no_ext)
@contextmanager
def catch(exception_types, action=None):
""" A try-except block as a context manager. """
try:
yield
except exception_types as e:
if isinstance(action, str):
print(action)
elif action:
action(e)
class Alarm(BaseException):
pass
def raise_alarm(*args, **kwargs):
raise Alarm("Raised by `raise_alarm`.")
class time_limit(object):
""" Example use:
with time_limit(seconds=5) as tl:
while True:
pass
if tl.ran_out:
print("Time ran out.")
"""
_stack = []
def __init__(self, seconds, verbose=False, timeout_callback=None):
self.seconds = seconds
self.verbose = verbose
self.ran_out = False
self.timeout_callback = timeout_callback
def __str__(self):
return (
"time_limit(seconds={}, verbose={}, ran_out={}, "
"timeout_callback={})".format(
self.seconds, self.verbose, self.ran_out, self.timeout_callback))
def __enter__(self):
if time_limit._stack:
raise Exception(
"Only one instance of `time_limit` may be active at once. "
"Another time_limit instance {} was already active.".format(
time_limit._stack[0]))
self.old_handler = signal.signal(signal.SIGALRM, raise_alarm)
if self.seconds <= 0:
raise_alarm("Didn't get started.")
if not np.isinf(self.seconds):
signal.alarm(int(np.floor(self.seconds)))
self.then = time.time()
time_limit._stack.append(self)
return self
def __exit__(self, exc_type, exc, exc_tb):
self.elapsed_time = time.time() - self.then
signal.signal(signal.SIGALRM, self.old_handler)
time_limit._stack.pop()
if exc_type is Alarm:
self.ran_out = True
if self.verbose:
print("Block ran for {} seconds (limit was {}).".format(
self.elapsed_time, self.seconds))
if self.timeout_callback:
self.timeout_callback(self)
return True
else:
signal.alarm(0) # Cancel the alarm.
return False
def timed_func(func):
@wraps(func)
def f(*args, **kwargs):
with timed_block(func.__name__):
return func(*args, **kwargs)
return f
_timer_stats = {}
@contextmanager
def timed_block(name=None, flag=True, record_stats=False, reset_stats=False):
if name is None:
frame = inspect.stack()[1]
name = "{}:{}".format(frame.filename, frame.lineno)
start_time = time.time()
yield
duration = time.time() - start_time
if record_stats:
if name not in _timer_stats or reset_stats:
_timer_stats[name] = RunningStats()
_timer_stats[name].add(duration)
if flag:
print(f"Call to block <{name}> took {duration} seconds.")
if record_stats:
mean, var, mn, mx = _timer_stats[name].get_stats()
std = np.sqrt(var)
print(f"Duration stats - mean={mean}, std={std}, mn={mn}, mx={mx}")
# From py.test
class KeywordMapping(object):
""" Provides a local mapping for keywords.
Can be used to implement user-friendly name selection
using boolean expressions.
names=[orange], pattern = "ora and e" -> True
names=[orange], pattern = "orang" -> True
names=[orange], pattern = "orane" -> False
names=[orange], pattern = "ora and z" -> False
names=[orange], pattern = "ora or z" -> True
Given a list of names, map any string that is a substring
of one of those names to True.
``names`` are the things we are trying to select, ``pattern``
is the thing we are using to select them. Note that supplying
multiple names does not mean "apply the pattern to each one
separately". Rather, we are selecting the list as a whole,
which doesn't seem that useful. The different names should be
thought of as different names for a single object.
"""
def __init__(self, names):
self._names = names
def __getitem__(self, subname):
if subname == "_":
return True
for name in self._names:
if subname in name:
return True
return False
def eval(self, pattern):
return eval(pattern, {}, self)
@staticmethod
def batch(batch, pattern):
""" Apply a single pattern to a batch of names. """
return [KeywordMapping([b]).eval(pattern) for b in batch]
class SigTerm(Exception):
pass
class NumpySeed(object):
def __init__(self, seed):
if seed < 0:
seed = None
self.seed | |
"""
Removes a template.
Returns:
dict:
"""
return self._router_request(
self._make_request_data(
'deleteTemplate',
dict(uid=self.uid),
)
)
def get_data_sources(self):
"""
Gets data sources configured for a template.
Returns:
list(ZenossDataSource):
"""
ds_data = self._router_request(
self._make_request_data(
'getDataSources',
dict(uid=self.uid),
)
)
datasources = []
for ds in ds_data['data']:
datasources.append(
ZenossDataSource(
self.api_url,
self.api_headers,
self.ssl_verify,
ds
)
)
return datasources
def list_data_sources(self):
"""
Rerturns data sources configured for a template as a list.
Returns:
list(str):
"""
ds_data = self._router_request(
self._make_request_data(
'getDataSources',
dict(uid=self.uid),
)
)
datasources = []
for ds in ds_data['data']:
datasources.append(ds['uid'].replace('/zport/dmd/', '', 1))
return datasources
def get_data_source(self, datasource):
"""
Get a particular data source.
Arguments:
datasource (str): Name of the data source to get
Returns:
ZenossDataSource:
"""
datasource_uid = '{}/datasources/{}'.format(self.uid, datasource)
return self._get_data_source_by_uid(datasource_uid)
def add_data_source(self, datasource, type):
"""
Adds a data source to a template.
Arguments:
datasource (str): Name of the new data source
type (str): Type of the new data source, must be one of the types
returned by get_data_source_types()
Returns:
ZenossDataSource:
"""
response_data = self._router_request(
self._make_request_data(
'addDataSource',
dict(
templateUid=self.uid,
name=datasource,
type=type,
)
)
)
return self._get_data_source_by_uid(
'{}/datasources/{}'.format(self.uid, datasource)
)
def delete_data_source(self, datasource):
"""
Deletes a data source from a template.
Arguments:
datasource (str): Name the data source to remove
Returns:
dict:
"""
datasource_uid = '{}/datasources/{}'.format(self.uid, datasource)
return self._router_request(
self._make_request_data(
'deleteDataSource',
dict(uid=datasource_uid),
)
)
def get_data_points(self):
"""
Get all the data points in a template.
Returns:
list(ZenossDataPoint):
"""
dp_data = self._router_request(
self._make_request_data(
'getDataPoints',
dict(uid=self.uid)
)
)
datapoints = []
for dp in dp_data['data']:
datapoints.append(
ZenossDataPoint(
self.api_url,
self.api_headers,
self.ssl_verify,
dp
)
)
return datapoints
def list_data_points(self):
"""
Returns all the data points in a template as a list.
Returns:
list(str):
"""
dp_data = self._router_request(
self._make_request_data(
'getDataPoints',
dict(uid=self.uid)
)
)
datapoints = []
for dp in dp_data['data']:
datapoints.append(dp['uid'].replace('/zport/dmd/', '', 1))
return datapoints
def get_thresholds(self):
"""
Gets the thresholds of a template.
Returns:
list(ZenossThresholds):
"""
threshold_data = self._router_request(
self._make_request_data(
'getThresholds',
dict(uid=self.uid),
)
)
thresholds = []
for t in threshold_data['data']:
thresholds.append(
ZenossThreshold(
self.api_url,
self.api_headers,
self.ssl_verify,
t
)
)
return thresholds
def list_thresholds(self):
"""
Returns the thresholds of a template as a list.
Returns:
list(str):
"""
threshold_data = self._router_request(
self._make_request_data(
'getThresholds',
dict(uid=self.uid),
)
)
thresholds = []
for t in threshold_data['data']:
thresholds.append(t['uid'].replace('/zport/dmd/', '', 1))
return thresholds
def get_threshold(self, threshold):
"""
Get a particular threshold.
Arguments:
threshold (str): Name of the threshold to get details on
Returns:
ZenossThreshold:
"""
threshold_uid = '{}/thresholds/{}'.format(self.uid, threshold)
return self._get_threshold_by_uid(threshold_uid)
def add_threshold(self, threshold, threshold_type, datapoints):
"""
Adds a threshold to a template.
Arguments:
threshold (str): Name of the new threshold
threshold_type (str): Type of the new threshold, must be one of the types
returned by get_threshold_types()
datapoints (list): List of datapoints to select for the threshold
Returns:
ZenossThreshold:
"""
if not isinstance(datapoints, list):
raise ZenossAPIClientError('Type error: datapoints to add to a threshold must be a list')
response_data = self._router_request(
self._make_request_data(
'addThreshold',
dict(
uid=self.uid,
thresholdId=threshold,
thresholdType=threshold_type,
dataPoints=datapoints,
)
)
)
return self._get_threshold_by_uid(
'{}/thresholds/{}'.format(self.uid, threshold)
)
def delete_threshold(self, threshold):
"""
Deletes a threshold.
Arguments:
threshold (str): Name of the threshold to remove
Returns:
dict:
"""
threshold_uid = '{}/thresholds/{}'.format(self.uid, threshold)
return self._router_request(
self._make_request_data(
'removeThreshold',
dict(uid=threshold_uid),
)
)
def get_graphs(self):
"""
Get the graphs defined for a template.
Returns:
list(ZenossGraph):
"""
graphs_data = self._router_request(
self._make_request_data(
'getGraphs',
dict(uid=self.uid),
)
)
graphs = []
for g in graphs_data:
graphs.append(
ZenossGraph(
self.api_url,
self.api_headers,
self.ssl_verify,
g
)
)
return graphs
def list_graphs(self):
"""
Returns the graphs defined for a template as a list.
Returns:
list(str):
"""
graphs_data = self._router_request(
self._make_request_data(
'getGraphs',
dict(uid=self.uid),
)
)
graphs = []
for g in graphs_data:
graphs.append(g['uid'].replace('/zport/dmd/', '', 1))
return graphs
def get_graph(self, graph):
"""
Get a particular graph.
Arguments:
graph (str): Name of the graph to get the definition of
Returns:
ZenossGraph:
"""
graph_uid = '{}/graphDefs/{}'.format(self.uid, graph)
return self._get_graph_by_uid(graph_uid)
def add_graph(self, graph):
"""
Add a new graph to a template.
Arguments:
graph (str): Name for the new graph
Returns:
ZenossGraph:
"""
response_data = self._router_request(
self._make_request_data(
'addGraphDefinition',
dict(
templateUid=self.uid,
graphDefinitionId=graph,
)
)
)
return self._get_graph_by_uid(
'{}/graphDefs/{}'.format(self.uid, graph)
)
def delete_graph(self, graph):
"""
Delete a particular graph.
Arguments:
graph (str): The name of the graph to delete.
Returns:
dict:
"""
graph_uid = '{0}/graphDefs/{1}'.format(self.uid, graph)
return self._router_request(
self._make_request_data(
'deleteGraphDefinition',
data=dict(
uid=graph_uid,
)
)
)
class ZenossDataSource(TemplateRouter):
"""
Class for Zenoss template data sources
"""
def __init__(self, url, headers, ssl_verify, ds_data):
super(ZenossDataSource, self).__init__(url, headers, ssl_verify)
uid = ds_data.pop('uid')
self.uid = uid.replace('/zport/dmd/', '', 1)
self.name = ds_data.pop('name')
self.id = ds_data.pop('id')
self.eventClass = ds_data.pop('eventClass')
self.eventKey = ds_data.pop('eventKey')
self.severity = ds_data.pop('severity')
self.type = ds_data.pop('type')
self.component = ds_data.pop('component')
self.properties = ds_data
self.parent = self.uid.split('/datasources/')[0]
def delete(self):
"""
Deletes a data source from a template.
Returns:
dict:
"""
return self._router_request(
self._make_request_data(
'deleteDataSource',
dict(uid=self.uid),
)
)
def get_data_points(self):
"""
Get all the data points for a datasource.
Returns:
list(ZenossDataPoint):
"""
dp_data = self._router_request(
self._make_request_data(
'getDataPoints',
dict(uid=self.parent)
)
)
datapoints = []
for dp in dp_data['data']:
if dp['name'].startswith(self.name):
datapoints.append(
ZenossDataPoint(
self.api_url,
self.api_headers,
self.ssl_verify,
dp
)
)
return datapoints
def list_data_points(self):
"""
Returns all the data points for a datasource as a list.
Returns:
list(str):
"""
dp_data = self._router_request(
self._make_request_data(
'getDataPoints',
dict(uid=self.parent)
)
)
datapoints = []
for dp in dp_data['data']:
if dp['name'].startswith(self.name):
datapoints.append(dp['uid'].replace('/zport/dmd/', '', 1))
return datapoints
def get_data_point(self, datapoint):
"""
Get a particular data point.
Arguments:
datapoint (str): Name of the data point to get details for
Returns:
ZenossDataPoint:
"""
datapoint_uid = '{}/datapoints/{}'.format(self.uid, datapoint)
return self._get_data_point_by_uid(datapoint_uid)
def add_data_point(self, datapoint):
"""
Adds a data point to a data source.
Arguments:
datapoint (str): Name of the new data point
Returns:
ZenossDataPoint:
"""
response_data = self._router_request(
self._make_request_data(
'addDataPoint',
dict(
dataSourceUid=self.uid,
name=datapoint,
)
)
)
return self._get_data_point_by_uid(
'{}/datapoints/{}'.format(self.uid, datapoint)
)
def delete_data_point(self, datapoint):
"""
Deletes a data point from a template.
Arguments:
datapoint (str): Name of the data point to remove
Returns:
dict:
"""
datapoint_uid = '{}/datapoints/{}'.format(self.uid, datapoint)
return self._router_request(
self._make_request_data(
'deleteDataPoint',
dict(uid=datapoint_uid),
)
)
class ZenossDataPoint(TemplateRouter):
"""
Class for Zenoss data points
"""
def __init__(self, url, headers, ssl_verify, dp_data):
super(ZenossDataPoint, self).__init__(url, headers, ssl_verify)
self.isrow = dp_data['isrow']
self.leaf = dp_data['leaf']
self.description = dp_data['description']
self.rrdmin = dp_data['rrdmin']
self.name = dp_data['name']
self.rate = dp_data['rate']
self.newId = dp_data['newId']
self.createCmd = dp_data['createCmd']
self.rrdtype = dp_data['rrdtype']
self.rrdmax = dp_data['rrdmax']
self.aliases = dp_data['aliases']
self.type = dp_data['type']
self.id = dp_data['id']
self.uid = dp_data['uid'].replace('/zport/dmd/', '', 1)
self.parent = self.uid.split('/datapoints/')[0]
def set_threshold(self, threshold, threshold_type):
"""
Adds a threshold for the data point
Arguments:
threshold (str): Name of the threshold to add
threshold_type (str): Type of the new threshold, must be one of the
types returned by get_threshold_types()
Returns:
ZenossThreshold:
"""
template = self.uid.split('/datasources/')[0]
response_data = self._router_request(
self._make_request_data(
'addThreshold',
dict(
uid=template,
thresholdId=threshold,
thresholdType=threshold_type,
dataPoints=[self.uid],
)
)
)
return self._get_threshold_by_uid(
'{}/thresholds/{}'.format(self.uid, threshold)
)
def make_counter(self):
"""
Sets the RRD Type of the data point to COUNTER
Returns:
bool:
"""
self.set_properties(dict(rrdtype='COUNTER'))
self.rrdtype = 'COUNTER'
return True
def make_gauge(self):
"""
Sets the RRD Type of the data point to GAUGE
Returns:
bool:
"""
self.set_properties(dict(rrdtype='GAUGE'))
self.rrdtype = 'GAUGE'
return True
def add_to_graph(self, graph, include_thresholds=False):
"""
Adds a data point to a graph.
Arguments:
graph (str): Name of the graph to add the data point to
include_thresholds (bool): Set to True to include the related
thresholds for the data point
Returns:
dict:
"""
graph_uid = '{0}/graphDefs/{1}'.format(self.uid.split('/datasources/')[0], graph)
return self.add_data_point_to_graph(self.uid, graph_uid, include_thresholds)
def delete(self):
"""
Deletes a data point from a template.
Returns:
dict:
"""
return self._router_request(
self._make_request_data(
'deleteDataPoint',
dict(uid=self.uid),
)
)
class ZenossThreshold(TemplateRouter):
"""
Class for Zenoss thresholds
"""
def __init__(self, url, headers, ssl_verify, threshold_data):
super(ZenossThreshold, self).__init__(url, headers, ssl_verify)
self.description = threshold_data.pop('description')
self.enabled = threshold_data.pop('enabled')
self.name = threshold_data.pop('name')
self.dataPoints = threshold_data.pop('dataPoints')
self.eventClass = threshold_data.pop('eventClass')
self.type = threshold_data.pop('type')
self.id = threshold_data.pop('id')
self.severity = threshold_data.pop('severity')
uid = threshold_data.pop('uid')
self.uid = uid.replace('/zport/dmd/', '', 1)
self.parent = self.uid.split('/thresholds/')[0]
self.properties = threshold_data
def set_max(self, maxval):
"""
Sets the threshold value for a MinMaxThreshold checking the max
value of a data point.
Arguments:
maxval (str): Maximum value for the data point before alerting
Returns:
bool:
"""
self.set_properties(dict(maxval=maxval))
self.properties['maxval'] = maxval
return True
def set_min(self, minval):
"""
Sets the threshold value for a MinMaxThreshold checking the minimum
| |
# don't want to modify the original (there's a big
# chance that we break it).
# Copy it every time, because the source file might
# have changed since last time we ran.
assert file.filename.exists(), "File doesn't exist: %s" % file.filename
tmpfile = Filename.temporary('', "p3d_" + file.filename.getBasename())
tmpfile.setBinary()
file.filename.copyTo(tmpfile)
file.filename = tmpfile
file.deleteTemp = True
# Alter the dependencies to have a relative path rather than absolute
for filename in framework_deps:
loc = self.__locateFrameworkLibrary(filename)
if loc == file.filename:
os.system('install_name_tool -id "%s" "%s"' % (os.path.basename(filename), file.filename.toOsSpecific()))
elif "/System/" in loc.toOsSpecific():
# Let's keep references to system frameworks absolute
os.system('install_name_tool -change "%s" "%s" "%s"' % (filename, loc.toOsSpecific(), file.filename.toOsSpecific()))
else:
os.system('install_name_tool -change "%s" "%s" "%s"' % (filename, os.path.basename(filename), file.filename.toOsSpecific()))
def __addImplicitDependenciesOSX(self):
""" Walks through the list of files, looking for dylib's
and executables that might include implicit dependencies
on other dylib's. Tries to determine those dependencies,
and adds them back into the filelist. """
# We walk through the list as we modify it. That's OK,
# because we want to follow the transitive closure of
# dependencies anyway.
for file in self.files:
if not file.executable:
continue
if file.isExcluded(self):
# Skip this file.
continue
origFilename = Filename(file.filename)
tempFile = Filename.temporary('', 'p3d_', '.txt')
command = '/usr/bin/otool -arch all -L "%s" >"%s"' % (
origFilename.toOsSpecific(),
tempFile.toOsSpecific())
if self.arch:
arch = self.arch
if arch == "amd64":
arch = "x86_64"
command = '/usr/bin/otool -arch %s -L "%s" >"%s"' % (
arch,
origFilename.toOsSpecific(),
tempFile.toOsSpecific())
exitStatus = os.system(command)
if exitStatus != 0:
self.notify.warning('Command failed: %s' % (command))
filenames = None
if tempFile.exists():
filenames = self.__parseDependenciesOSX(tempFile)
tempFile.unlink()
if filenames is None:
self.notify.warning("Unable to determine dependencies from %s" % (origFilename))
continue
# Attempt to resolve the dependent filename relative
# to the original filename, before we resolve it along
# the PATH.
path = DSearchPath(Filename(origFilename.getDirname()))
# Find the dependencies that are referencing a framework
framework_deps = []
for filename in filenames:
if '.framework/' in filename:
framework_deps.append(filename)
if len(framework_deps) > 0:
# Fixes dependencies like @executable_path/../Library/Frameworks/Cg.framework/Cg
self.__alterFrameworkDependencies(file, framework_deps)
for filename in filenames:
if '@loader_path' in filename:
filename = filename.replace('@loader_path', origFilename.getDirname())
if False and '.framework/' in filename:
# It references a framework, and besides the fact
# that those often contain absolute paths, they
# aren't commonly on the library path either.
filename = self.__locateFrameworkLibrary(filename)
filename.setBinary()
else:
# It's just a normal library - find it on the path.
filename = Filename.fromOsSpecific(filename)
filename.setBinary()
if filename.isLocal():
filename.resolveFilename(path)
else:
# It's a fully-specified filename; look
# for it under the system root first.
if self.packager.systemRoot:
f2 = Filename(self.packager.systemRoot, filename)
if f2.exists():
filename = f2
# Skip libraries and frameworks in system directory
if "/System/" in filename.toOsSpecific():
continue
newName = Filename(file.dependencyDir, filename.getBasename())
self.addFile(filename, newName = str(newName),
explicit = False, executable = True)
def __parseDependenciesOSX(self, tempFile):
""" Reads the indicated temporary file, the output from
otool -L, to determine the list of dylibs this
executable file depends on. """
lines = open(tempFile.toOsSpecific(), 'rU').readlines()
filenames = []
for line in lines:
if not line[0].isspace():
continue
line = line.strip()
s = line.find(' (compatibility')
if s != -1:
line = line[:s]
else:
s = line.find('.dylib')
if s != -1:
line = line[:s + 6]
else:
continue
filenames.append(line)
return filenames
def __readAndStripELF(self, file):
""" Reads the indicated ELF binary, and returns a list with
dependencies. If it contains data that should be stripped,
it writes the stripped library to a temporary file. Returns
None if the file failed to read (e.g. not an ELF file). """
# Read the first 16 bytes, which identify the ELF file.
elf = open(file.filename.toOsSpecific(), 'rb')
try:
ident = elf.read(16)
except IOError:
elf.close()
return None
if not ident.startswith(b"\177ELF"):
# No elf magic! Beware of orcs.
return None
# Make sure we read in the correct endianness and integer size
byteOrder = "<>"[ord(ident[5:6]) - 1]
elfClass = ord(ident[4:5]) - 1 # 0 = 32-bits, 1 = 64-bits
headerStruct = byteOrder + ("HHIIIIIHHHHHH", "HHIQQQIHHHHHH")[elfClass]
sectionStruct = byteOrder + ("4xI8xIII8xI", "4xI16xQQI12xQ")[elfClass]
dynamicStruct = byteOrder + ("iI", "qQ")[elfClass]
type, machine, version, entry, phoff, shoff, flags, ehsize, phentsize, phnum, shentsize, shnum, shstrndx \
= struct.unpack(headerStruct, elf.read(struct.calcsize(headerStruct)))
dynamicSections = []
stringTables = {}
# Seek to the section header table and find the .dynamic section.
elf.seek(shoff)
for i in range(shnum):
type, offset, size, link, entsize = struct.unpack_from(sectionStruct, elf.read(shentsize))
if type == 6 and link != 0: # DYNAMIC type, links to string table
dynamicSections.append((offset, size, link, entsize))
stringTables[link] = None
# Read the relevant string tables.
for idx in stringTables.keys():
elf.seek(shoff + idx * shentsize)
type, offset, size, link, entsize = struct.unpack_from(sectionStruct, elf.read(shentsize))
if type != 3: continue
elf.seek(offset)
stringTables[idx] = elf.read(size)
# Loop through the dynamic sections and rewrite it if it has an rpath/runpath.
rewriteSections = []
filenames = []
rpath = []
for offset, size, link, entsize in dynamicSections:
elf.seek(offset)
data = elf.read(entsize)
tag, val = struct.unpack_from(dynamicStruct, data)
newSectionData = b""
startReplace = None
pad = 0
# Read tags until we find a NULL tag.
while tag != 0:
if tag == 1: # A NEEDED entry. Read it from the string table.
filenames.append(stringTables[link][val : stringTables[link].find(b'\0', val)])
elif tag == 15 or tag == 29:
rpath += stringTables[link][val : stringTables[link].find(b'\0', val)].split(b':')
# An RPATH or RUNPATH entry.
if not startReplace:
startReplace = elf.tell() - entsize
if startReplace:
pad += entsize
elif startReplace is not None:
newSectionData += data
data = elf.read(entsize)
tag, val = struct.unpack_from(dynamicStruct, data)
if startReplace is not None:
newSectionData += data + (b"\0" * pad)
rewriteSections.append((startReplace, newSectionData))
elf.close()
# No rpaths/runpaths found, so nothing to do any more.
if len(rewriteSections) == 0:
return filenames
# Attempt to resolve any of the directly
# dependent filenames along the RPATH.
for f in range(len(filenames)):
filename = filenames[f]
for rdir in rpath:
if os.path.isfile(os.path.join(rdir, filename)):
filenames[f] = os.path.join(rdir, filename)
break
if not file.deleteTemp:
# Copy the file to a temporary location because we
# don't want to modify the original (there's a big
# chance that we break it).
tmpfile = Filename.temporary('', "p3d_" + file.filename.getBasename())
tmpfile.setBinary()
file.filename.copyTo(tmpfile)
file.filename = tmpfile
file.deleteTemp = True
# Open the temporary file and rewrite the dynamic sections.
elf = open(file.filename.toOsSpecific(), 'r+b')
for offset, data in rewriteSections:
elf.seek(offset)
elf.write(data)
elf.write(b"\0" * pad)
elf.close()
return filenames
def __addImplicitDependenciesPosix(self):
""" Walks through the list of files, looking for so's
and executables that might include implicit dependencies
on other so's. Tries to determine those dependencies,
and adds them back into the filelist. """
# We walk through the list as we modify it. That's OK,
# because we want to follow the transitive closure of
# dependencies anyway.
for file in self.files:
if not file.executable:
continue
if file.isExcluded(self):
# Skip this file.
continue
# Check if this is an ELF binary.
filenames = self.__readAndStripELF(file)
# If that failed, perhaps ldd will help us.
if filenames is None:
self.notify.warning("Reading ELF library %s failed, using ldd instead" % (file.filename))
tempFile = Filename.temporary('', 'p3d_', '.txt')
command = 'ldd "%s" >"%s"' % (
file.filename.toOsSpecific(),
tempFile.toOsSpecific())
try:
os.system(command)
except:
pass
if tempFile.exists():
filenames = self.__parseDependenciesPosix(tempFile)
tempFile.unlink()
if filenames is None:
self.notify.warning("Unable to determine dependencies from %s" % (file.filename))
continue
# Attempt to resolve the dependent filename relative
# to the original filename, before we resolve it along
# the PATH.
path = DSearchPath(Filename(file.filename.getDirname()))
for filename in filenames:
# These vDSO's provided by Linux aren't
# supposed to be anywhere on the system.
if filename in ["linux-gate.so.1", "linux-vdso.so.1"]:
continue
filename = Filename.fromOsSpecific(filename)
filename.resolveFilename(path)
filename.setBinary()
newName = Filename(file.dependencyDir, filename.getBasename())
self.addFile(filename, newName = str(newName),
explicit = False, executable = True)
def __parseDependenciesPosix(self, tempFile):
""" Reads the indicated temporary file, the output from
ldd, to determine the list of so's this executable file
depends on. """
lines = open(tempFile.toOsSpecific(), 'rU').readlines()
filenames = []
for line in lines:
line = line.strip()
s = line.find(' => ')
if s == -1:
continue
line = line[:s].strip()
filenames.append(line)
| |
list of locales for available DisplayText translations
that can be performed using this form.
return: (osid.locale.LocaleList) - a list of available locales
or an empty list if no translation operations are
available
compliance: mandatory - This method must be implemented.
"""
raise Unimplemented()
def set_locale(self, language_type=None, script_type=None):
"""Specifies a language and script type for DisplayText fields in
this form.
Setting a locale to something other than the default locale may
affect the Metadata in this form.
If multiple locales are available for managing translations, the
Metadata indicates the fields are unset as they may be returning
a defeult value based on the default locale.
arg: languageType (osid.type.Type): the language type
arg: scriptType (osid.type.Type): the script type
raise: NullArgument - languageType or scriptType is null
raise: Unsupported - languageType and scriptType not available
from get_locales()
compliance: mandatory - This method must be implemented.
"""
raise Unsupported()
def get_journal_comment_metadata(self):
"""Gets the metadata for the comment corresponding to this form
submission.
The comment is used for describing the nature of the change to
the corresponding object for the purposes of logging and
auditing.
return: (osid.Metadata) - metadata for the comment
compliance: mandatory - This method must be implemented.
"""
return Metadata(**settings.METADATA['journalComment'])
def set_journal_comment(self, comment=None):
"""Sets a comment.
arg: comment (string): the new comment
raise: InvalidArgument - comment is invalid
raise: NoAccess - metadata.is_readonly() is true
raise: NullArgument - comment is null
compliance: mandatory - This method must be implemented.
"""
if comment is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['comment'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(comment, metadata, array=False):
self._my_map['journalComment']['text'] = comment
else:
raise InvalidArgument()
def is_valid(self):
"""Tests if ths form is in a valid state for submission.
A form is valid if all required data has been supplied compliant
with any constraints.
return: (boolean) - false if there is a known error in this
form, true otherwise
raise: OperationFailed - attempt to perform validation failed
compliance: mandatory - This method must be implemented.
"""
validity = True
for element in self._validity_map:
if self._validity_map[element] is not VALID:
validity = False
return validity
def get_validation_messages(self):
"""Gets text messages corresponding to additional instructions to
pass form validation.
return: (osid.locale.DisplayText) - a list of messages
compliance: mandatory - This method must be implemented.
"""
pass
def get_invalid_metadata(self):
"""Gets a list of metadata for the elements in this form which are
not valid.
return: (osid.Metadata) - invalid metadata
compliance: mandatory - This method must be implemented.
"""
pass
default_locale = property(get_default_locale)
locales = property(get_locales)
journal_comment_metadata = property(get_journal_comment_metadata)
validation_messages = property(get_validation_messages)
invalid_metadata = property(get_invalid_metadata)
journal_comment = property(fset=set_journal_comment)
class OsidIdentifiableForm(abc_osid_objects.OsidIdentifiableForm, OsidForm):
"""The OsidIdentifiableForm is used to create and update identifiable
objects.
The form is a container for data to be sent to an update or create
method of a session.
"""
class OsidExtensibleForm(abc_osid_objects.OsidExtensibleForm, OsidForm, markers.Extensible):
"""The OsidExtensibleForm is used to create and update extensible
objects.
The form is a container for data to be sent to an update or create
method of a session.
"""
def get_required_record_types(self):
"""Gets the required record types for this form.
The required records may change as a result of other data in
this form and should be checked before submission.
return: (osid.type.TypeList) - a list of required record types
compliance: mandatory - This method must be implemented.
"""
pass
required_record_types = property(get_required_record_types)
class OsidBrowsableForm(abc_osid_objects.OsidBrowsableForm, OsidForm):
"""The OsidBrowsableForm is used to create and update browsable
objects.
The form is a container for data to be sent to an update or create
method of a session.
"""
class OsidTemporalForm(abc_osid_objects.OsidTemporalForm, OsidForm):
"""This form is used to create and update temporals."""
def get_start_date_metadata(self):
"""Gets the metadata for a start date.
return: (osid.Metadata) - metadata for the date
*compliance: mandatory -- This method must be implemented.*
"""
pass
start_date_metadata = property(get_start_date_metadata)
def set_start_date(self, date=None):
"""Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_start_date(self):
"""Clears the start date.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def get_end_date_metadata(self):
"""Gets the metadata for an end date.
return: (osid.Metadata) - metadata for the date
*compliance: mandatory -- This method must be implemented.*
"""
pass
def set_end_date(self, date=None):
"""Sets the end date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def clear_end_date(self):
"""Clears the end date.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OsidAggregateableForm(OsidForm):
"""This form is used to create and update assemblages."""
class OsidSourceableForm(abc_osid_objects.OsidSourceableForm, OsidForm):
"""This form is used to create and update sourceables."""
def get_provider_metadata(self):
"""Gets the metadata for a provider.
return: (osid.Metadata) - metadata for the provider
compliance: mandatory - This method must be implemented.
"""
pass
def set_provider(self, provider_id=None):
"""Sets a provider.
arg: providerId (osid.id.Id): the new provider
raise: INVALID_ARGUMENT - providerId is invalid
raise: NO_ACCESS - metadata.is_read_only() is true
raise: NULL_ARGUMENT - providerId is null
compliance: mandatory - This method must be implemented.
"""
pass
def clear_provider(self):
"""Removes the provider.
raise: NO_ACCESS - metadata.is_required() is true or
metadata.is_read_only() is true
compliance: mandatory - This method must be implemented.
"""
pass
def get_branding_metadata(self):
"""Gets the metadata for the asset branding.
return: (osid.Metadata) - metadata for the asset branding.
compliance: mandatory - This method must be implemented.
"""
pass
def set_branding(self, asset_ids=None):
"""Sets the branding.
arg: assetIds (osid.id.Id): the new assets
raise: INVALID_ARGUMENT - assetIds is invalid
raise: NO_ACCESS - metadata.is_read_only() is true
raise: NULL_ARGUMENT - assetIds is null
compliance: mandatory - This method must be implemented.
"""
pass
def clear_branding(self):
"""Removes the branding.
raise: NO_ACCESS - metadata.is_required() is true or
metadata.is_read_only() is true
compliance: mandatory - This method must be implemented.
"""
pass
def get_license_metadata(self):
"""Gets the metadata for the license.
return: (osid.Metadata) - metadata for the license
compliance: mandatory - This method must be implemented.
"""
pass
def set_license(self, license=None):
"""Sets the license.
arg: license (string): the new license
raise: INVALID_ARGUMENT - license is invalid
raise: NO_ACCESS - metadata.is_read_only() is true
raise: NULL_ARGUMENT - license is null
compliance: mandatory - This method must be implemented.
"""
pass
def clear_license(self):
"""Removes the license.
raise: NO_ACCESS - metadata.is_required() is true or
metadata.is_read_only() is true
compliance: mandatory - This method must be implemented.
"""
pass
class OsidFederateableForm(abc_osid_objects.OsidFederateableForm, OsidForm):
"""This form is used to create and update federateables."""
class OsidObjectForm(abc_osid_objects.OsidObjectForm, OsidIdentifiableForm, OsidExtensibleForm, OsidBrowsableForm):
"""The OsidObjectForm is used to create and update OsidObjects.
The form is not an OsidObject but merely a container for data to be
sent to an update or create method of a session. A provider may or
may not combine the OsidObject and OsidObjectForm interfaces into a
single object.
Generally, a set method parallels each get method of an OsidObject.
Additionally, Metadata may be examined for each data element to
assist in understanding particular rules concerning acceptable data.
The form may provide some feedback as to the validity of certain
data updates before the update transaction is issued to the
correspodning session but a successful modification of the form is
not a guarantee of success for the update transaction. A consumer
may elect to perform all updates within a single update transaction
or break up a large update intio smaller units. The tradeoff is the
granularity of error feedback vs. the performance gain of a single
transaction.
As with all aspects of the OSIDs, nulls cannot be used. Methods to
clear values are also defined in the form.
A new OsidForm should be acquired for each transaction upon an
OsidObject. Forms should not be reused from one object to another
even if the supplied data is the same as the | |
<reponame>xuuuuuuchen/PASTA
import tensorflow as tf
def Combining_Affine_Para(tensors):
imgs = tensors[0]
array = tensors[1]
n_batch = tf.shape(imgs)[0]
tx = tf.squeeze(tf.slice(array, [0,0], [n_batch, 1]), 1)
ty = tf.squeeze(tf.slice(array, [0,1], [n_batch, 1]), 1)
sin0 = tf.squeeze(tf.slice(array, [0,2], [n_batch, 1]),1)
cos0 = tf.sqrt(1.0-tf.square(sin0))
sx = tf.squeeze(tf.slice(array, [0,3], [n_batch, 1]), 1)
sy = tf.squeeze(tf.slice(array, [0,4], [n_batch, 1]), 1)
cx = tf.squeeze(tf.slice(array, [0,5], [n_batch, 1]), 1)
cy = tf.squeeze(tf.slice(array, [0,6], [n_batch, 1]), 1)
"""
CORE
"""
x1 = cos0 * cx - sin0 * cx * sx
x2 = sin0 * cx + cos0 * cx * sx
x3 = cos0 * cy * sy - sin0 * cy
x4 = sin0 * cy * sy + cos0 * cy
x5 = tx
x6 = ty
x1 = tf.expand_dims(x1, 1)
x2 = tf.expand_dims(x2, 1)
x3 = tf.expand_dims(x3, 1)
x4 = tf.expand_dims(x4, 1)
x5 = tf.expand_dims(x5, 1)
x6 = tf.expand_dims(x6, 1)
# print(" >>>>>>>>>> x1: "+str(x1.shape))
# print(" >>>>>>>>>> x2: "+str(x2.shape))
# print(" >>>>>>>>>> x3: "+str(x3.shape))
# print(" >>>>>>>>>> x4: "+str(x4.shape))
# print(" >>>>>>>>>> x5: "+str(x5.shape))
# print(" >>>>>>>>>> x6: "+str(x6.shape))
array = tf.concat([x1,x2,x5,x3,x4,x6], 1)
# print(" >>>>>>>>>> matrix: "+str(array.shape))
return array
def Combining_Affine_Para3D(tensors):
imgs = tensors[0]
array = tensors[1]
n_batch = tf.shape(imgs)[0]
tx = tf.squeeze(tf.slice(array, [0,0], [n_batch, 1]), 1)
ty = tf.squeeze(tf.slice(array, [0,1], [n_batch, 1]), 1)
tz = tf.squeeze(tf.slice(array, [0,2], [n_batch, 1]), 1)
sin0x = tf.squeeze(tf.slice(array, [0,3], [n_batch, 1]),1)
sin0y = tf.squeeze(tf.slice(array, [0,4], [n_batch, 1]),1)
sin0z = tf.squeeze(tf.slice(array, [0,5], [n_batch, 1]),1)
cos0x = tf.sqrt(1.0-tf.square(sin0x))
cos0y = tf.sqrt(1.0-tf.square(sin0y))
cos0z = tf.sqrt(1.0-tf.square(sin0z))
shxy = tf.squeeze(tf.slice(array, [0,6], [n_batch, 1]), 1)
shyx = tf.squeeze(tf.slice(array, [0,7], [n_batch, 1]), 1)
shxz = tf.squeeze(tf.slice(array, [0,8], [n_batch, 1]), 1)
shzx = tf.squeeze(tf.slice(array, [0,9], [n_batch, 1]), 1)
shzy = tf.squeeze(tf.slice(array, [0,10], [n_batch, 1]), 1)
shyz = tf.squeeze(tf.slice(array, [0,11], [n_batch, 1]), 1)
scx = tf.squeeze(tf.slice(array, [0,12], [n_batch, 1]), 1)
scy = tf.squeeze(tf.slice(array, [0,13], [n_batch, 1]), 1)
scz = tf.squeeze(tf.slice(array, [0,14], [n_batch, 1]), 1)
"""
CORE
"""
x1 = -shxz*scx*sin0y+scx*cos0y*cos0z+shxy*scx*cos0y*sin0z
x2 = shxz*scx*sin0x*cos0y+shxy*scx*cos0x*cos0z +scx*sin0x*sin0y*cos0z-scx*cos0x*sin0z+shxy*scx*sin0x*sin0y*sin0z
x3 = shxz*scx*cos0x*cos0y-shxy*scx*sin0x*cos0z+scx*cos0x*sin0y*cos0z+scx*sin0x*sin0z+shxy*scx*cos0x*sin0y*sin0z
x4 = scy*cos0y*sin0z+scy*cos0y*cos0z*shyx-scy*sin0y*shyz
x5 = scy*cos0x*cos0z+scy*sin0x*sin0y*sin0z+scy*sin0x*sin0y*cos0z*shyx-scy*cos0x*sin0z*shyx+scy*sin0x*cos0y*shyz
x6 = -scy*sin0x*cos0z+scy*cos0x*sin0y*sin0z+scy*cos0x*sin0y*cos0z*shyx+scy*sin0x*sin0z*shyx+scy*cos0x*cos0y*shyz
x7 = -scz*sin0y+shzx*scz*cos0y*cos0z+shzy*scz*cos0y*sin0z
x8 = scz*sin0x*cos0y+shzy*scz*cos0x*cos0z+shzx*scz*sin0x*sin0y*cos0z-shzx*scz*cos0x*sin0z+shzy*scz*sin0x*sin0y*sin0z
x9 = scz*cos0x*cos0y-shzy*scz*sin0x*cos0z+shzx*scz*cos0x*sin0y*cos0z+shzx*scz*sin0x*sin0z+shzy*scz*cos0x*sin0y*sin0z
x10 = tx
x11 = ty
x12 = tz
x1 = tf.expand_dims(x1, 1)
x2 = tf.expand_dims(x2, 1)
x3 = tf.expand_dims(x3, 1)
x4 = tf.expand_dims(x4, 1)
x5 = tf.expand_dims(x5, 1)
x6 = tf.expand_dims(x6, 1)
x7 = tf.expand_dims(x7, 1)
x8 = tf.expand_dims(x8, 1)
x9 = tf.expand_dims(x9, 1)
x10 = tf.expand_dims(x10, 1)
x11 = tf.expand_dims(x11, 1)
x12 = tf.expand_dims(x12, 1)
array = tf.concat([ x1,x2,x3,x10,
x4,x5,x6,x11,
x7,x8,x9,x12,], 1)
# print(" >>>>>>>>>> matrix: "+str(array.shape))
return array
def affine_flow(tensors):
# print(" >>>>>>>>>> affine_flow_layer")
imgs = tensors[0]
array = tensors[1]
# print(" >>>>>>>>>> imgs: "+str(imgs.shape))
# print(" >>>>>>>>>> array: "+str(array.shape))
n_batch = tf.shape(imgs)[0]
xlen = tf.shape(imgs)[2]
ylen = tf.shape(imgs)[3]
grids = batch_mgrid(n_batch, xlen, ylen)
coords = tf.reshape(grids, [n_batch, 2, -1])
theta = tf.reshape(array, [-1, 2, 3])
matrix = tf.slice(theta, [0, 0, 0], [-1, -1, 2])
t = tf.slice(theta, [0, 0, 2], [-1, -1, -1])
T_g = tf.matmul(matrix, coords) + t
T_g = tf.reshape(T_g, [n_batch, 2, xlen, ylen])
output = batch_warp2d(imgs, T_g)
return output
def affine_flow_3D(tensors):
imgs = tensors[0]
array = tensors[1]
print(imgs.shape)
n_batch = tf.shape(imgs)[0]
xlen = tf.shape(imgs)[2]
ylen = tf.shape(imgs)[3]
zlen = tf.shape(imgs)[4]
grids = batch_mgrid(n_batch, xlen, ylen, zlen)
grids = tf.reshape(grids, [n_batch, 3, -1])
theta = tf.reshape(array, [-1, 3, 4])
matrix = tf.slice(theta, [0, 0, 0], [-1, -1, 3])
t = tf.slice(theta, [0, 0, 3], [-1, -1, -1])
T_g = tf.matmul(matrix, grids) + t
T_g = tf.reshape(T_g, [n_batch, 3, xlen, ylen, zlen])
output = batch_warp3d(imgs, T_g)
return output
def affine_flow_output_shape(input_shapes):
shape1 = list(input_shapes[0])
return (shape1[0],1,shape1[2],shape1[3])
def affine_flow_3D_output_shape(input_shapes):
shape1 = list(input_shapes[0])
return (shape1[0], shape1[2],shape1[3],shape1[4])
def batch_mgrid(n_batch, *args, **kwargs):
"""
create batch of orthogonal grids
similar to np.mgrid
Parameters
----------
n_batch : int
number of grids to create
args : int
number of points on each axis
low : float
minimum coordinate value
high : float
maximum coordinate value
Returns
-------
grids : tf.Tensor [n_batch, len(args), args[0], ...]
batch of orthogonal grids
"""
grid = mgrid(*args, **kwargs)
grid = tf.expand_dims(grid, 0)
grids = tf.tile(grid, [n_batch] + [1 for _ in range(len(args) + 1)])
return grids
def mgrid(*args, **kwargs):
"""
create orthogonal grid
similar to np.mgrid
Parameters
----------
args : int
number of points on each axis
low : float
minimum coordinate value
high : float
maximum coordinate value
Returns
-------
grid : tf.Tensor [len(args), args[0], ...]
orthogonal grid
"""
low = kwargs.pop("low", -1)
high = kwargs.pop("high", 1)
low = tf.to_float(low)
high = tf.to_float(high)
coords = (tf.linspace(low, high, arg) for arg in args)
grid = tf.stack(tf.meshgrid(*coords, indexing='ij'))
return grid
def batch_warp2d(imgs, mappings):
n_batch = tf.shape(imgs)[0]
coords = tf.reshape(mappings, [n_batch, 2, -1])
x_coords = tf.slice(coords, [0, 0, 0], [-1, 1, -1])
y_coords = tf.slice(coords, [0, 1, 0], [-1, 1, -1])
x_coords_flat = tf.reshape(x_coords, [-1])
y_coords_flat = tf.reshape(y_coords, [-1])
# print(" >>>>>>>>>> imgs: "+str(imgs.shape))
# imgs = tf.transpose(imgs,[0,2,3,1])
# print(" >>>>>>>>>> imgs: "+str(imgs.shape))
output = _interpolate2d(imgs, x_coords_flat, y_coords_flat)
return output
def batch_warp3d(imgs, mappings):
n_batch = tf.shape(imgs)[0]
coords = tf.reshape(mappings, [n_batch, 3, -1])
x_coords = tf.slice(coords, [0, 0, 0], [-1, 1, -1])
y_coords = tf.slice(coords, [0, 1, 0], [-1, 1, -1])
z_coords = tf.slice(coords, [0, 2, 0], [-1, 1, -1])
x_coords_flat = tf.reshape(x_coords, [-1])
y_coords_flat = tf.reshape(y_coords, [-1])
z_coords_flat = tf.reshape(z_coords, [-1])
output = _interpolate3d(imgs, x_coords_flat, y_coords_flat, z_coords_flat)
return output
def batch_displacement_warp3d(tensors):
print(" batch_displacement_warp3d")
imgs = tensors[0]
vector_fields = tensors[1]
print(" >>>>>>>>>> imgs: "+str(imgs.shape))
print(" >>>>>>>>>> vector_fields: "+str(vector_fields.shape))
n_batch = tf.shape(imgs)[0]
xlen = tf.shape(imgs)[2]
ylen = tf.shape(imgs)[3]
grids = batch_mgrid(n_batch, xlen, ylen)
print(" >>>>>>>>>> grids: "+str(grids.shape))
# T_g = grids + vector_fields
# print(" >>>>>>>>>> T_g: "+str(T_g.shape))
output = batch_warp3d(imgs, vector_fields)
return output
def warp_3d_layer_output_shape(input_shapes):
shape1 = list(input_shapes[0])
# print(" >>>>>>>>>> shape1: "+str(tuple(shape1).shape))
return tuple(shape1)
def _interpolate2d(imgs, x, y):
# print("interpolate2d")
n_batch = tf.shape(imgs)[0]
n_channel = tf.shape(imgs)[1]
xlen = tf.shape(imgs)[2]
ylen = tf.shape(imgs)[3]
x = tf.cast(x, tf.float32)
y = tf.cast(y, tf.float32)
xlen_f = tf.cast(xlen, tf.float32)
ylen_f = tf.cast(ylen, tf.float32)
zero = tf.zeros([], dtype='int32')
max_x = tf.cast(xlen - 1, 'int32')
max_y = tf.cast(ylen - 1, 'int32')
# scale indices from [-1, 1] to [0, xlen/ylen]
x = (x + 1.) * (xlen_f - 1.) * 0.5
y = (y + 1.) * (ylen_f - 1.) * 0.5
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
def _repeat(base_indices, n_repeats):
base_indices = tf.matmul(
tf.reshape(base_indices, [-1, 1]),
tf.ones([1, n_repeats], dtype='int32'))
return tf.reshape(base_indices, [-1])
base = _repeat(tf.range(n_batch) * xlen * ylen, ylen * xlen)
base_x0 = base + x0 * ylen
base_x1 = base + x1 * ylen
index00 = base_x0 + y0
index01 = base_x0 + y1
index10 = base_x1 + y0
index11 = base_x1 + y1
# use indices to lookup pixels in the flat image and restore
# n_channel dim
imgs_flat = tf.reshape(imgs, [-1, n_channel])
imgs_flat = tf.to_float(imgs_flat)
I00 = tf.gather(imgs_flat, index00)
I01 = tf.gather(imgs_flat, index01)
I10 = tf.gather(imgs_flat, index10)
I11 = tf.gather(imgs_flat, index11)
# and finally calculate interpolated values
dx = x - tf.to_float(x0)
dy = y - tf.to_float(y0)
w00 = tf.expand_dims((1. - dx) * (1. - dy), 1)
w01 = tf.expand_dims((1. - dx) * dy, 1)
w10 = tf.expand_dims(dx * (1. - dy), 1)
w11 = tf.expand_dims(dx * dy, 1)
output = tf.add_n([w00*I00, w01*I01, w10*I10, w11*I11])
# reshape
output = tf.reshape(output, [n_batch, n_channel, xlen, ylen])
return output
def _interpolate3d(imgs, x, y, z):
n_batch = tf.shape(imgs)[0]
n_channel = tf.shape(imgs)[1]
xlen = tf.shape(imgs)[2]
ylen = tf.shape(imgs)[3]
zlen = tf.shape(imgs)[4]
x = tf.cast(x, tf.float32)
y = tf.cast(y, tf.float32)
z = tf.cast(z, tf.float32)
xlen_f = tf.cast(xlen, tf.float32)
ylen_f = tf.cast(ylen, tf.float32)
zlen_f = tf.cast(zlen, tf.float32)
zero = tf.zeros([], dtype='int32')
max_x = tf.cast(xlen - 1, 'int32')
| |
#!/bin/env python3
"""Main executable for running puppetmaster."""
import bisect
import json
import os
import random
import statistics
from argparse import ArgumentParser, Namespace
from multiprocessing import Pool
from pathlib import PurePath
from typing import Dict, List, Sequence, Set
try:
import matplotlib.pyplot as plt
import numpy as np
except ImportError:
plt = np = None # type: ignore
from api import ObjSetMakerFactory, TransactionExecutor, TransactionSchedulerFactory
from executors import RandomExecutor
from generator import TransactionGeneratorFactory
from pmtypes import SimulationParams, Transaction
from schedulers import (
GreedySchedulerFactory,
MaximalSchedulerFactory,
TournamentSchedulerFactory,
)
from sets import (
ApproximateObjSetMakerFactory,
FiniteObjSetMakerFactory,
IdealObjSetMakerFactory,
)
from simulator import Simulator
DEFAULT_EXECUTOR = RandomExecutor()
DEFAULT_OBJ_SET_MAKER_FACTORY = IdealObjSetMakerFactory()
def get_args() -> Namespace:
"""Parse and print command-line arguments."""
parser = ArgumentParser(
description="Puppetmaster, a hardware accelerator for transactional memory"
)
subparsers = parser.add_subparsers(title="commands")
# Options for tabulation script.
tpar_parser = subparsers.add_parser("tpar", help="tabulate parallelism")
tpar_parser.add_argument(
"--log-max-period",
help="Log-2 of the maximum clock period",
default=6,
type=int,
)
tpar_parser.add_argument(
"--log-max-cores",
help="Log-2 of the maximum number of cores",
default=10,
type=int,
)
tpar_parser.set_defaults(func=make_parallelism_table)
if plt is not None:
# Options for statistics plotting script.
stat_parser = subparsers.add_parser("stat", help="plot system statistics")
stat_parser.add_argument(
"-t",
"--clock-period",
help="clock period in nanoseconds",
default=1,
type=int,
)
stat_parser.add_argument(
"-c", "--num-cores", help="number of execution cores", default=4, type=int
)
stat_parser.set_defaults(func=make_stats_plot)
# Options for latency plotting script.
latency_parser = subparsers.add_parser(
"latency", help="plot transaction latencies"
)
latency_parser.add_argument(
"-t",
"--clock-period",
help="length of one hardware operation (clock period)",
default=1,
type=int,
)
latency_parser.add_argument(
"-c", "--num-cores", help="number of execution cores", default=4, type=int
)
latency_parser.set_defaults(func=make_latency_plot)
# Options for pool size finder.
psfind_parser = subparsers.add_parser("psfind", help="tabulate optimal pool sizes")
psfind_parser.add_argument(
"--log-max-period",
help="Log-2 of the maximum clock period",
default=6,
type=int,
)
psfind_parser.add_argument(
"--log-max-cores",
help="Log-2 of the maximum number of cores",
default=10,
type=int,
)
psfind_parser.set_defaults(func=make_ps_table)
# General options.
parser.add_argument("template", help="transaction template file", type=str)
parser.add_argument(
"-m", "--memsize", help="memory size (# of objects)", default=1024, type=int
)
parser.add_argument(
"-p",
"--poolsize",
help="size of scheduling pool (lookahead)",
type=int,
default=16,
)
parser.add_argument(
"-q",
"--queuesize",
help="size of queue for transactions waiting to be executed",
type=int,
)
parser.add_argument(
"-z",
"--zipf-param",
help="parameter of the Zipf's law distribution",
default=0,
type=float,
)
parser.add_argument(
"-r",
"--repeats",
help="number of times the experiment with a given set of parameters is re-run",
default=10,
type=int,
)
parser.add_argument(
"-v", "--verbose", help="print debugging information", action="count", default=0
)
args = parser.parse_args()
args.n = 2 ** (getattr(args, "log_max_cores", 10) + 1)
print(
f"Template: {os.path.basename(args.template)}\n"
f"No. of transactions: {args.n}\n"
f"Memory size (-m): {args.memsize}\n"
f"Scheduling pool size or lookahead (-p): {args.poolsize or 'infinite'}\n"
f"Execution queue size (-q): {args.queuesize or 'infinite'}\n"
f"Object address distribution's Zipf parameter (-z): {args.zipf_param:.2f}\n"
f"Runs per configuration (-r): {args.repeats}\n"
)
return args
def run_prl_sims(
core_counts, clock_period, sched_factory, executor, obj_set_maker_factory
):
"""Return average parallelism for all core counts and the given clock period."""
prls: List[float] = []
for core_count in core_counts:
results = []
params = SimulationParams(
clock_period, core_count, ARGS.poolsize, ARGS.queuesize
)
for _, path in run_sim(
params,
tr_factory,
sched_factory,
executor,
obj_set_maker_factory,
):
start = end = t_prev = t_cur = None
total = 0
for state in path:
# Skip over warm-up phase (until first transaction completes).
if (
len(state.incoming)
+ len(state.pending)
+ len(state.scheduled)
+ len(state.cores)
== ARGS.n
):
continue
if start is None:
start = state.clock
t_prev = start
else:
t_cur = state.clock
total += len(state.cores) * (t_cur - t_prev)
t_prev = t_cur
# Skip over tail (when pool is empty).
if not state.incoming and len(state.pending) < ARGS.poolsize:
end = state.clock
break
assert start is not None and end is not None
if start == end:
results.append(len(state.cores) + 1)
else:
results.append(total / (end - start))
if ARGS.verbose >= 1:
rename_steps = path[-1].obj_set_maker.history
print(
f"Rename steps: {statistics.mean(rename_steps):.2f} (avg), "
f"{statistics.median(rename_steps)} (median), "
f"{max(rename_steps)} (max)"
)
prls.append(statistics.mean(results))
return prls
def make_parallelism_table(tr_factory: TransactionGeneratorFactory) -> None:
"""Print parallelism as a function of scheduling time and core count."""
clock_periods = [0, *(2 ** logp for logp in range(ARGS.log_max_period + 1))]
core_counts = [2 ** logcores for logcores in range(ARGS.log_max_cores + 1)]
thead, tbody = get_table_templates(
varname="Steady-state parallelism",
xname="Number of cores",
yname="Clock period",
xvals=core_counts,
yvals=clock_periods,
max_value=max(core_counts),
precision=1,
)
def run_sims(
sched_factory: TransactionSchedulerFactory,
obj_set_maker_factory: ObjSetMakerFactory = DEFAULT_OBJ_SET_MAKER_FACTORY,
executor: TransactionExecutor = DEFAULT_EXECUTOR,
):
print(get_title(sched_factory, executor, obj_set_maker_factory))
print()
print(thead)
sim_params = [
(
core_counts,
clock_period,
sched_factory,
executor,
obj_set_maker_factory,
)
for clock_period in clock_periods
]
for clock_period, prls in zip(
clock_periods, PROCESS_POOL.starmap(run_prl_sims, sim_params)
):
print(tbody.format(clock_period, *prls))
print()
for log_size in (7, 8, 9, 10):
run_sims(
TournamentSchedulerFactory(comparator_limit=1),
ApproximateObjSetMakerFactory(2 ** log_size),
)
run_sims(
TournamentSchedulerFactory(), ApproximateObjSetMakerFactory(2 ** log_size)
)
run_sims(
TournamentSchedulerFactory(comparator_limit=1),
FiniteObjSetMakerFactory(2 ** log_size),
)
run_sims(TournamentSchedulerFactory(), FiniteObjSetMakerFactory(2 ** log_size))
run_sims(TournamentSchedulerFactory(comparator_limit=1))
run_sims(TournamentSchedulerFactory())
run_sims(GreedySchedulerFactory())
run_sims(MaximalSchedulerFactory())
def make_stats_plot(tr_factory: TransactionGeneratorFactory) -> None:
"""Plot number of scheduled transactions as a function of time."""
filename = (
f"{PurePath(ARGS.template).stem}_{ARGS.n}_m{ARGS.memsize}_p{ARGS.poolsize}"
f"_q{ARGS.queuesize if ARGS.queuesize is not None else 'inf'}"
f"_z{ARGS.zipf_param}_t{ARGS.clock_period}_c{ARGS.num_cores}.pdf"
)
params = SimulationParams(
ARGS.clock_period, ARGS.num_cores, ARGS.poolsize, ARGS.queuesize
)
fig, axes = plt.subplots(ARGS.repeats, 4)
j = 0
def run_sims(sched_factory: TransactionSchedulerFactory):
nonlocal j
print(get_title(sched_factory))
print()
lines = []
for i, path in run_sim(params, tr_factory, sched_factory):
scheduled_counts = {}
for state in path:
if state.clock not in scheduled_counts:
scheduled_counts[state.clock] = len(state.scheduled)
times = np.array(list(scheduled_counts.keys()))
stats = np.array(list(scheduled_counts.values()))
axis = axes[i][j]
lines.append(axis.plot(times, stats))
if i == 0:
axis.set_title(get_title(get_title(sched_factory)))
j += 1
return lines
run_sims(TournamentSchedulerFactory(comparator_limit=1))
midlines = run_sims(TournamentSchedulerFactory())
run_sims(GreedySchedulerFactory())
if ARGS.poolsize <= 20:
run_sims(MaximalSchedulerFactory())
axes[-1][1].legend(
handles=midlines[-1],
labels=["scheduled"],
loc="upper center",
bbox_to_anchor=(0.5, -0.2),
fancybox=False,
shadow=False,
ncol=3,
)
plt.show()
fig.savefig(filename)
def make_latency_plot(tr_factory: TransactionGeneratorFactory) -> None:
"""Plot number of scheduled transactions as a function of time."""
filename = (
f"{PurePath(ARGS.template).stem}_latency_{ARGS.n}_m{ARGS.memsize}"
f"_p{ARGS.poolsize}_q{ARGS.queuesize if ARGS.queuesize is not None else 'inf'}"
f"_z{ARGS.zipf_param}_t{ARGS.clock_period}_c{ARGS.num_cores}.pdf"
)
params = SimulationParams(
ARGS.clock_period, ARGS.num_cores, ARGS.poolsize, ARGS.queuesize
)
fig, axes = plt.subplots(ARGS.repeats, 4)
j = 0
def run_sims(sched_factory: TransactionSchedulerFactory):
nonlocal j
print(get_title(sched_factory))
print()
lines = []
for i, path in run_sim(params, tr_factory, sched_factory):
start_times = {}
end_times = {}
prev_pending: Set[Transaction] = set()
prev_executing: Set[Transaction] = set()
for state in path:
for tr in prev_pending - state.pending:
start_times[tr] = state.clock
prev_pending = state.pending
for tr in prev_executing - {c.transaction for c in state.cores}:
end_times[tr] = state.clock + tr.time
prev_executing = {c.transaction for c in state.cores}
latencies = {end_times[t] - start_times[t] for t in start_times}
hist, bin_edges = np.histogram(list(latencies), bins=32)
print(hist)
axis = axes[i][j]
lines.append(axis.plot(bin_edges[1:], hist))
if i == 0:
axis.set_title(get_title(sched_factory))
j += 1
return lines
run_sims(TournamentSchedulerFactory(comparator_limit=1))
midlines = run_sims(TournamentSchedulerFactory())
run_sims(GreedySchedulerFactory())
if ARGS.poolsize <= 20:
run_sims(MaximalSchedulerFactory())
axes[-1][1].legend(
handles=midlines[-1],
labels=["scheduled"],
loc="upper center",
bbox_to_anchor=(0.5, -0.2),
fancybox=False,
shadow=False,
ncol=3,
)
plt.show()
fig.savefig(filename)
def make_ps_table(tr_factory: TransactionGeneratorFactory) -> None:
"""Print minimum pool size as a function of scheduling time and core count."""
sched_factory = TournamentSchedulerFactory()
executor = RandomExecutor()
class ScheduledCountSequence(Sequence[int]):
def __init__(self, clock_period, core_num):
self.clock_period = clock_period
self.core_num = core_num
def __getitem__(self, key):
pool_size = key
if ARGS.verbose == 1:
print("Trying pool size of", pool_size, end="...")
if ARGS.verbose >= 2:
print("Trying pool size of", pool_size)
min_sched_counts = []
params = SimulationParams(
self.clock_period, self.core_num, pool_size, ARGS.queuesize
)
for _, path in run_sim(ARGS, params, tr_factory, sched_factory, executor):
scheduled_counts = {}
for state in path:
# Skip over warm-up phase (until first transaction completes).
if (
len(state.incoming)
+ len(state.pending)
+ len(state.scheduled)
+ len(state.cores)
== ARGS.n
):
continue
if state.clock not in scheduled_counts:
scheduled_counts[state.clock] = (
len(state.scheduled) - state.core_count + len(state.cores)
)
# Skip over tail (when pool is empty).
if not state.incoming and len(state.pending) < ARGS.poolsize:
break
min_sched_count = min(scheduled_counts.values())
min_sched_counts.append(min_sched_count)
if ARGS.verbose == 1:
print(min_sched_count, end=", ")
if min_sched_count == 0:
break
if ARGS.verbose == 1:
print()
if ARGS.verbose >= 2:
print("Results:", ", ".join(map(str, min_sched_counts)))
return min_sched_count
def __len__(self):
return ARGS.n
clock_periods = [2 ** logp for logp in range(ARGS.log_max_period + 1)]
core_counts = [2 ** logcores for logcores in range(ARGS.log_max_cores + 1)]
thead, tbody = get_table_templates(
varname="Minimum pool size for keeping the cores busy",
xname="Number of cores",
yname="Clock period",
xvals=core_counts,
yvals=clock_periods,
max_value=ARGS.n,
precision=0,
)
print(get_title(sched_factory, executor))
print()
print(thead)
for clock_period in clock_periods:
min_poolsizes = []
for core_count in core_counts:
scheduled_counts = ScheduledCountSequence(clock_period, core_count)
min_poolsize = bisect.bisect_left(scheduled_counts, 0, lo=1)
min_poolsizes.append(min_poolsize)
print(tbody.format(clock_period, *min_poolsizes))
print()
def run_sim(
params: SimulationParams,
gen_factory: TransactionGeneratorFactory,
sched_factory: TransactionSchedulerFactory,
executor: TransactionExecutor = DEFAULT_EXECUTOR,
obj_set_maker_factory: ObjSetMakerFactory = DEFAULT_OBJ_SET_MAKER_FACTORY,
):
"""Yield index and path through the state space found by the simulator."""
for i in range(ARGS.repeats):
gen = tr_factory()
obj_set_maker = obj_set_maker_factory()
scheduler = sched_factory(
params.clock_period, params.pool_size, params.queue_size
)
sim = Simulator(gen, obj_set_maker, scheduler, executor, params.core_num)
yield i, sim.run(ARGS.verbose)
def get_table_templates(
varname: str,
| |
import moderngl
from .p4dyna.equation import Equation
from numpy.random import random
class ParticleSystem:
def __init__(self, game, brender, fname, target, miss, move_data):
self.game = game
self.brender = brender # meh
self.N = 500000
self.particles = 0
self.fname = fname
self.target = target
self.miss = miss
self.move_data = move_data
# TODO: convert to proper change
self.basis = (-1.0, 1.0, 1.0) if self.target[0] == 0 else (1.0, 1.0, 1.0)
print("Target:", self.target, self.basis)
# TODO: temp
self.time_alive = -0.8
self.step_count = self.time_alive
self.warp = 1
self.step_warp = 1
self.step_size = self.game.m_par.step_size * self.step_warp
self.emitters = []
self.renderers = []
self.miscs = []
self.equations = {}
self.transformer = None
self.load_context_objects()
self.spawn_elements()
def on_tick(self, time, frame_time):
if (not self.particles) and self.time_alive > 1:
return False
for eq in self.equations.values():
eq.reset_eval()
frame_time *= self.warp * (3 if self.game.m_par.fast_forward else 1)
self.step_size = self.game.m_par.step_size * self.step_warp
self.time_alive += frame_time
time = self.time_alive
self.step_count += frame_time
steps = 0
if self.step_count > self.step_size:
# # Matrix
# self.warp = max(0.1, self.warp * 0.995)
# self.step_warp = self.warp
steps = int(self.step_count // self.step_size)
self.step_count = self.step_count % self.step_size
for _ in range(steps):
self.transformer.render(time, self.step_size)
for emitter in self.emitters:
emitter.render(time, self.step_size)
self.switch_buffers()
for misc in self.miscs:
misc.render(time)
self.render(time, frame_time)
# print(time)
# if steps:
# self.switch_buffers()
return True
def render(self, time, frame_time):
# Solid
self.game.m_par.set_render(1)
for renderer in (x for x in self.renderers if x.equation == 1):
renderer.render(time, frame_time)
# Alpha
self.game.m_par.set_render(2)
for renderer in (x for x in self.renderers if x.equation == 2):
renderer.render(time, frame_time)
# Anti
self.game.m_par.set_render(4)
for renderer in (x for x in self.renderers if x.equation == 3):
renderer.render(time, frame_time)
self.game.m_par.set_render(3)
def r(self, owner, name, parse=True, throw=True):
name = f"field_{name}"
if name in owner.params:
v = owner.params[name]
if parse and "!" in str(v):
return self.p(v)
elif "?" in str(v):
return v.replace("?key?", "key")
return v
if throw:
raise UnboundLocalError(f"Entry is missing named: {name}.")
return "0"
def switch_buffers(self):
self.vbo1, self.vbo2 = self.vbo2, self.vbo1
for renderer in self.renderers:
renderer.switch_buffers()
self.transformer.switch_buffers()
err = self.game.ctx.error
if err != "GL_NO_ERROR":
print(err)
def load_context_objects(self):
self.vbo1 = self.game.ctx.buffer(reserve=self.N * self.game.m_par.stride)
self.vbo2 = self.game.ctx.buffer(reserve=self.N * self.game.m_par.stride)
def spawn_elements(self):
js = self.game.m_res.get_particle(self.fname, self.move_data)
self.transformer = Transformer(self.game, self)
stage_dict = {}
for node in js["nodes"]:
if node["title"] == "Stage":
stage = node["content"]["field_stage"]
for output in node["outputs"]:
stage_dict[output["id"]] = stage
filters = set()
for node in js["nodes"]:
if node["title"][:6].lower() == "filter":
for output in node["outputs"]:
filters.add(output["id"])
filters = list(filters)
filter_edge_dict = {}
for edge in js["edges"]:
if edge["start"] in filters:
filter_edge_dict[edge["end"]] = edge["start"]
if edge["end"] in filters:
filter_edge_dict[edge["start"]] = edge["end"]
edge_dict = {}
for edge in js["edges"]:
if edge["start"] in stage_dict.keys():
edge_dict[edge["end"]] = stage_dict[edge["start"]]
if edge["end"] in stage_dict.keys():
edge_dict[edge["start"]] = stage_dict[edge["end"]]
for i, letter in enumerate("xyz"):
self.equations[f"user_{letter}"] = Equation(
self,
f"user_{letter}",
f"self.system.brender.get_loc_fighter({1 if self.target[0] == 0 else 0}, {self.basis})[{i}]",
)
self.equations[f"target_{letter}"] = Equation(
self,
f"target_{letter}",
f"self.system.brender.get_loc_fighter({0 if self.target[0] == 0 else 1}, {self.basis})[{i}]",
)
for node in js["nodes"]:
if node["title"] == "Equation":
# print(node["title"])
self.add_equation(node)
for node in js["nodes"]:
# print(node)
if node["title"] == "Emit":
# print(node["title"])
stage = int(edge_dict[node["inputs"][0]["id"]])
self.emitters.append(Emitter(self.game, self, node, stage))
elif node["title"] == "Trigger":
# print(node["title"])
self.miscs.append(Trigger(self.game, self, node))
elif node["title"] == "Actor":
# print(node["title"])
self.miscs.append(Actor(self.game, self, node))
elif node["title"] == "Camera":
# print(node["title"])
self.miscs.append(Camera(self.game, self, node))
elif node["title"] == "Equation":
pass
elif node["title"] == "Render":
# print(node["title"])
stage = int(edge_dict[node["inputs"][0]["id"]])
self.renderers.append(Renderer(self.game, self, node, stage))
for node in js["nodes"]:
if node["title"].lower()[:3] == "geo":
# print("geo")
# print(node["title"])
id = node["inputs"][0]["id"]
if id in edge_dict:
# print("stagebound")
stage = int(edge_dict[id])
self.transformer.add_geoblock(node, None, stage)
else:
# print("filterbound")
filter = int(filter_edge_dict[id])
self.transformer.add_geoblock(node, filter, None)
for node in js["nodes"]:
if node["title"][:6].lower() == "filter":
# print(node["title"])
# print(node["inputs"])
stage_in = int(
edge_dict[
[x for x in node["inputs"] if x["socket_type"] == 3][0]["id"]
]
)
stage_out = int(
edge_dict[
[x for x in node["inputs"] if x["socket_type"] == 0][0]["id"]
]
)
self.transformer.add_geoblock(node, None, stage_in, stage_out)
self.transformer.load_programs()
self.transformer.load_context_objects()
def add_equation(self, params):
self.params = params["content"] # meh
self.equations[self.r(self, "label")] = Equation(
self,
self.r(self, "label", parse=False),
self.r(self, "equation", parse=False),
)
def p(self, q, parse=True):
if "!" in q:
for key, value in self.equations.items():
if f"!{key}!" in q:
q = q.replace(f"!{key}!", str(value.get(self.time_alive)))
if parse:
return eval(q)
return q
class Emitter:
def __init__(self, game, system, params, stage):
self.game = game
self.system = system
self.N = self.system.N
self.params = params["content"]
self.title = "Emitter"
self.stage = stage
self.emit_want = 0
self.counters = 0
self.alive = True
self.load_programs()
self.load_context_objects()
self.set_fields()
def set_fields(self):
self.delay = float(self.system.r(self, "delay"))
self.emit_count = float(self.system.r(self, "count"))
self.duration = float(self.system.r(self, "duration"))
self.prog_emit["Stage"] = self.stage
self.prog_emit["Position"] = (
float(self.system.r(self, "pos_x"))
+ float(self.system.r(self, "vel_x")) / 45,
float(self.system.r(self, "pos_y"))
+ float(self.system.r(self, "vel_y")) / 45,
float(self.system.r(self, "pos_z"))
+ float(self.system.r(self, "vel_z")) / 45,
)
self.prog_emit["Position_sway"] = (
float(self.system.r(self, "pos_range_x"))
+ float(self.system.r(self, "vel_x")) / 45,
float(self.system.r(self, "pos_range_y"))
+ float(self.system.r(self, "vel_y")) / 45,
float(self.system.r(self, "pos_range_z"))
+ float(self.system.r(self, "vel_z")) / 45,
)
self.prog_emit["Position_radial"] = False
self.prog_emit["Velocity"] = (
float(self.system.r(self, "vel_x")),
float(self.system.r(self, "vel_y")),
float(self.system.r(self, "vel_z")),
)
self.prog_emit["Velocity_sway"] = (
float(self.system.r(self, "vel_range_x")),
float(self.system.r(self, "vel_range_y")),
float(self.system.r(self, "vel_range_z")),
)
self.prog_emit["Velocity_radial"] = False
self.prog_emit["Size"] = float(self.system.r(self, "size"))
self.prog_emit["Size_sway"] = float(self.system.r(self, "size_range"))
self.prog_emit["Colour"] = (
float(self.system.r(self, "col_r")),
float(self.system.r(self, "col_g")),
float(self.system.r(self, "col_b")),
)
self.prog_emit["Colour_sway"] = (
float(self.system.r(self, "col_range_r")),
float(self.system.r(self, "col_range_g")),
float(self.system.r(self, "col_range_b")),
)
self.prog_emit["Rotation"] = float(self.system.r(self, "rot"))
self.prog_emit["Rotation_sway"] = float(self.system.r(self, "rot_range"))
self.prog_emit["Rotation_velocity"] = float(self.system.r(self, "rot_vel"))
self.prog_emit["Rotation_velocity_sway"] = float(
self.system.r(self, "rot_vel_range")
)
self.prog_emit["Life"] = float(self.system.r(self, "life"))
self.prog_emit["Life_sway"] = float(self.system.r(self, "life_range"))
def on_tick(self, time, frame_time):
return self.render(time, frame_time)
def on_exit(self):
pass
def load_programs(self):
self.prog_emit = self.game.m_res.get_program_varyings(
"p4_emit_ver", varyings=self.game.m_par.get_varyings(),
)
def load_context_objects(self):
self.vao_emit = self.game.ctx._vertex_array(self.prog_emit, [])
def render(self, time, frame_time):
self.set_fields()
emit_count = 0
self.counters += frame_time
# self.active_particles = 0
if self.delay < self.counters < self.delay + self.duration:
self.emit_want += self.emit_count * frame_time
emit_count = int(min(self.N - self.system.particles, self.emit_want))
if emit_count > 0: # and not int(time * 6) % 10:
self.emit_want -= emit_count
self.prog_emit["time"].value = max(time, 0) + random() / 50
with self.game.query:
self.vao_emit.transform(
self.system.vbo2,
vertices=emit_count,
buffer_offset=self.system.particles * self.game.m_par.stride,
)
# print(self.system.particles, emit_count, self.game.query.primitives)
self.system.particles += self.game.query.primitives
# print(self.system.particles)
# print(
# f"Emitting on {self.counters} for {frame_time}, {emit_count} particles {self.active_particles}."
# )`
class Renderer:
def __init__(self, game, system, params, stage):
self.game = game
self.system = system
self.N = self.system.N
self.params = params["content"]
self.stage = stage
self.title = "Renderer"
eqname = self.system.r(self, "equation").strip().lower()
self.equation = 1 if eqname == "solid" else 3 if eqname == "anti" else 2
# TODO: temp
self.texture = self.game.m_res.get_texture(
"particle", self.system.r(self, "file")
)
self.load_programs()
self.load_context_objects()
self.set_fields()
self.step_quantity = self.game.m_par.step_quantity
# TODO TEMP
self.texture_noise = self.game.m_res.get_noise()
self.prog["Size"].value = 1.0
self.prog["Stage"] = stage
self.prog["Basis"] = self.system.basis
self.vao_receive_dict = {}
self.buffer_requests = []
self.current_active_buffer = 1
self.prog["texture0"] = 0
self.prog["texturearray1"] = 10
self.prog["Usenoise"] = self.equation != 1
self.rotvel = bool(self.system.r(self, "rotvel"))
self.noise_speed = 0
self.noise_id = 0
def set_fields(self):
self.opacity = float(self.system.r(self, "opacity"))
self.noise_speed = float(self.system.r(self, "noise"))
def load_programs(self):
# Renders particle to the screen
self.prog = self.game.m_res.get_program("p4_render")
def load_context_objects(self):
# Render vaos. The render to screen version of the tranform vaos above
self.vao1_rend = self.game.ctx.vertex_array(
self.prog, [self.game.m_par.vao_def(self.system.vbo1, render=True)],
)
self.vao2_rend = self.game.ctx.vertex_array(
self.prog, [self.game.m_par.vao_def(self.system.vbo2, render=True)],
)
def render(self, time, frame_time):
self.set_fields()
self.prog["CameraPosition"] = self.game.m_cam.pos
self.noise_id = (
time * self.step_quantity * 2 * self.noise_speed
) % 5680 # 710 * 8
self.prog["noise_id"] = self.noise_id // 8
self.prog["projection"].write(self.game.m_cam.mvp)
self.prog["BillboardFace"].write(
self.game.m_cam.bill_rot.astype("f4").tobytes()
)
self.emit_gpu(time, frame_time)
def switch_buffers(self):
self.vao1_rend, self.vao2_rend = self.vao2_rend, self.vao1_rend
def emit_gpu(self, time, frame_time):
self.prog["opacity"] = self.opacity
self.prog["Usenoise"] = (self.equation != 1) and (self.noise_speed != 0)
self.prog["Rotvel"] = self.rotvel
self.texture.use(0)
self.texture_noise.use(10)
self.vao1_rend.render(moderngl.POINTS, vertices=self.system.particles)
class Transformer:
def __init__(self, game, system):
self.game = game
self.system = system
self.geo_declarations = set(["\n"])
self.geo_code = ""
self.geo_target = {}
self.uniforms = set()
def add_geoblock(self, params, target_filter, stage_in, stage_out=None):
title = (
params["title"].strip().lower().replace(" ", "_")
if "filter" in params["title"].lower()
else params["title"][4:].strip().lower().replace(" ", "_")
)
block = self.game.m_res.get_geoblock(title)
self.params = params["content"]
constants = block.split("// CONSTANTS")[1].split("// CONSTANTS_END")[0].strip()
dict_block = {}
if stage_out is not None:
dict_block["%TARGET_STAGE%"] = str(stage_out)
for line in constants.split("\n"):
if line[:5] == "// --":
continue
perc_parts = line.split("%")
typ = perc_parts[0]
varn = perc_parts[1]
if typ in ("float", "int",):
dict_block[f"%{varn}%"] = str(self.r(varn))
if typ in ("bool",):
| |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-off jobs for activities."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core import jobs
from core.domain import collection_services
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import question_services
from core.domain import search_services
from core.domain import skill_services
from core.platform import models
import feconf
import python_utils
(
collection_models, exp_models, question_models,
skill_models, story_models, topic_models,
subtopic_models
) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration, models.NAMES.question,
models.NAMES.skill, models.NAMES.story, models.NAMES.topic,
models.NAMES.subtopic
])
transaction_services = models.Registry.import_transaction_services()
class ActivityContributorsSummaryOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job that computes the number of commits done by contributors for
each collection and exploration.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [collection_models.CollectionModel, exp_models.ExplorationModel]
@staticmethod
def map(model):
if model.deleted:
return
if isinstance(model, collection_models.CollectionModel):
summary = collection_services.get_collection_summary_by_id(model.id)
summary.contributors_summary = (
collection_services.compute_collection_contributors_summary(
model.id))
summary.contributor_ids = list(summary.contributors_summary)
collection_services.save_collection_summary(summary)
else:
summary = exp_fetchers.get_exploration_summary_by_id(model.id)
summary.contributors_summary = (
exp_services.compute_exploration_contributors_summary(model.id))
summary.contributor_ids = list(summary.contributors_summary)
exp_services.save_exploration_summary(summary)
yield ('SUCCESS', model.id)
@staticmethod
def reduce(key, values):
yield (key, len(values))
class AuditContributorsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Audit job that compares the contents of contributor_ids and
contributors_summary.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExpSummaryModel,
collection_models.CollectionSummaryModel]
@staticmethod
def map(model):
ids_set = set(model.contributor_ids)
summary_set = set(model.contributors_summary)
if len(ids_set) != len(model.contributor_ids):
# When the contributor_ids contain duplicate ids.
yield (
'DUPLICATE_IDS',
(model.id, model.contributor_ids, model.contributors_summary)
)
if ids_set - summary_set:
# When the contributor_ids contain id that is not in
# contributors_summary.
yield (
'MISSING_IN_SUMMARY',
(model.id, model.contributor_ids, model.contributors_summary)
)
if summary_set - ids_set:
# When the contributors_summary contains id that is not in
# contributor_ids.
yield (
'MISSING_IN_IDS',
(model.id, model.contributor_ids, model.contributors_summary)
)
yield ('SUCCESS', model.id)
@staticmethod
def reduce(key, values):
if key == 'SUCCESS':
yield (key, len(values))
else:
yield (key, values)
class IndexAllActivitiesJobManager(jobs.BaseMapReduceOneOffJobManager):
"""Job that indexes all explorations and collections and compute their
ranks.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExpSummaryModel,
collection_models.CollectionSummaryModel]
@staticmethod
def map(item):
if not item.deleted:
if isinstance(item, exp_models.ExpSummaryModel):
search_services.index_exploration_summaries([item])
else:
search_services.index_collection_summaries([item])
@staticmethod
def reduce(key, values):
pass
class AddContentUserIdsContentJob(jobs.BaseMapReduceOneOffJobManager):
"""For every snapshot content of a rights model, merge the data from all
the user id fields in content together and put them in the
content_user_ids field of an appropriate RightsSnapshotMetadataModel.
"""
@staticmethod
def _add_collection_user_ids(snapshot_content_model):
"""Merge the user ids from the snapshot content and put them in
the snapshot metadata content_user_ids field.
"""
content_dict = (
collection_models.CollectionRightsModel.convert_to_valid_dict(
snapshot_content_model.content))
reconstituted_rights_model = (
collection_models.CollectionRightsModel(**content_dict))
snapshot_metadata_model = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
snapshot_content_model.id))
snapshot_metadata_model.content_user_ids = list(sorted(
set(reconstituted_rights_model.owner_ids) |
set(reconstituted_rights_model.editor_ids) |
set(reconstituted_rights_model.voice_artist_ids) |
set(reconstituted_rights_model.viewer_ids)))
snapshot_metadata_model.update_timestamps(
update_last_updated_time=False)
snapshot_metadata_model.put()
@staticmethod
def _add_exploration_user_ids(snapshot_content_model):
"""Merge the user ids from the snapshot content and put them in
the snapshot metadata content_user_ids field.
"""
content_dict = (
exp_models.ExplorationRightsModel.convert_to_valid_dict(
snapshot_content_model.content))
reconstituted_rights_model = (
exp_models.ExplorationRightsModel(**content_dict))
snapshot_metadata_model = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
snapshot_content_model.id))
snapshot_metadata_model.content_user_ids = list(sorted(
set(reconstituted_rights_model.owner_ids) |
set(reconstituted_rights_model.editor_ids) |
set(reconstituted_rights_model.voice_artist_ids) |
set(reconstituted_rights_model.viewer_ids)))
snapshot_metadata_model.update_timestamps(
update_last_updated_time=False)
snapshot_metadata_model.put()
@staticmethod
def _add_topic_user_ids(snapshot_content_model):
"""Merge the user ids from the snapshot content and put them in
the snapshot metadata content_user_ids field.
"""
reconstituted_rights_model = topic_models.TopicRightsModel(
**snapshot_content_model.content)
snapshot_metadata_model = (
topic_models.TopicRightsSnapshotMetadataModel.get_by_id(
snapshot_content_model.id))
snapshot_metadata_model.content_user_ids = list(sorted(set(
reconstituted_rights_model.manager_ids)))
snapshot_metadata_model.update_timestamps(
update_last_updated_time=False)
snapshot_metadata_model.put()
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
# We can raise the number of shards for this job, since it goes only
# over three types of entity class.
super(AddContentUserIdsContentJob, cls).enqueue(
job_id, shard_count=64)
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [collection_models.CollectionRightsSnapshotContentModel,
exp_models.ExplorationRightsSnapshotContentModel,
topic_models.TopicRightsSnapshotContentModel]
@staticmethod
def map(rights_snapshot_model):
"""Implements the map function for this job."""
class_name = rights_snapshot_model.__class__.__name__
if isinstance(
rights_snapshot_model,
collection_models.CollectionRightsSnapshotContentModel):
AddContentUserIdsContentJob._add_collection_user_ids(
rights_snapshot_model)
elif isinstance(
rights_snapshot_model,
exp_models.ExplorationRightsSnapshotContentModel):
AddContentUserIdsContentJob._add_exploration_user_ids(
rights_snapshot_model)
elif isinstance(
rights_snapshot_model,
topic_models.TopicRightsSnapshotContentModel):
AddContentUserIdsContentJob._add_topic_user_ids(
rights_snapshot_model)
yield ('SUCCESS-%s' % class_name, rights_snapshot_model.id)
@staticmethod
def reduce(key, ids):
"""Implements the reduce function for this job."""
yield (key, len(ids))
class AuditSnapshotMetadataModelsJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that audits commit_cmds field of the snapshot metadata models. We log
the length of the commit_cmd, the possible 'cmd' values, and all the other
keys.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
# We can raise the number of shards for this job, since it goes only
# over three types of entity class.
super(AuditSnapshotMetadataModelsJob, cls).enqueue(
job_id, shard_count=64)
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [collection_models.CollectionRightsSnapshotMetadataModel,
exp_models.ExplorationRightsSnapshotMetadataModel,
topic_models.TopicRightsSnapshotMetadataModel]
@staticmethod
def map(snapshot_model):
"""Implements the map function for this job."""
if isinstance(
snapshot_model,
collection_models.CollectionRightsSnapshotMetadataModel):
model_type_name = 'collection'
elif isinstance(
snapshot_model,
exp_models.ExplorationRightsSnapshotMetadataModel):
model_type_name = 'exploration'
elif isinstance(
snapshot_model,
topic_models.TopicRightsSnapshotMetadataModel):
model_type_name = 'topic'
if snapshot_model.deleted:
yield ('%s-deleted' % model_type_name, 1)
return
first_commit_cmd = None
for commit_cmd in snapshot_model.commit_cmds:
if 'cmd' in commit_cmd:
cmd_name = commit_cmd['cmd']
yield ('%s-cmd-%s' % (model_type_name, cmd_name), 1)
else:
cmd_name = 'missing_cmd'
yield ('%s-missing-cmd' % model_type_name, 1)
if first_commit_cmd is None:
first_commit_cmd = cmd_name
for field_key in commit_cmd.keys():
if field_key != 'cmd':
yield (
'%s-%s-field-%s' % (
model_type_name, cmd_name, field_key
),
1
)
if first_commit_cmd is not None:
yield (
'%s-%s-length-%s' % (
model_type_name,
first_commit_cmd,
len(snapshot_model.commit_cmds)),
1
)
else:
yield (
'%s-length-%s' % (
model_type_name, len(snapshot_model.commit_cmds)),
1
)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
yield (key, len(values))
class ValidateSnapshotMetadataModelsJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that validates whether each SnapshotMetadata model has a
corresponding CommitLog model pair and the corresponding parent model.
"""
FAILURE_PREFIX = 'VALIDATION FAILURE'
SNAPSHOT_METADATA_MODELS = [
collection_models.CollectionSnapshotMetadataModel,
collection_models.CollectionRightsSnapshotMetadataModel,
exp_models.ExplorationSnapshotMetadataModel,
exp_models.ExplorationRightsSnapshotMetadataModel,
question_models.QuestionSnapshotMetadataModel,
skill_models.SkillSnapshotMetadataModel,
story_models.StorySnapshotMetadataModel,
topic_models.TopicSnapshotMetadataModel,
subtopic_models.SubtopicPageSnapshotMetadataModel,
topic_models.TopicRightsSnapshotMetadataModel
]
MODEL_NAMES_TO_PROPERTIES = {
'CollectionSnapshotMetadataModel': {
'parent_model_class': collection_models.CollectionModel,
'commit_log_model_class': (
collection_models.CollectionCommitLogEntryModel),
'id_string_format': 'collection-%s-%s'
},
'ExplorationSnapshotMetadataModel': {
'parent_model_class': exp_models.ExplorationModel,
'commit_log_model_class': exp_models.ExplorationCommitLogEntryModel,
'id_string_format': 'exploration-%s-%s'
},
'QuestionSnapshotMetadataModel': {
'parent_model_class': question_models.QuestionModel,
'commit_log_model_class': (
question_models.QuestionCommitLogEntryModel),
'id_string_format': 'question-%s-%s'
},
'SkillSnapshotMetadataModel': {
'parent_model_class': skill_models.SkillModel,
'commit_log_model_class': skill_models.SkillCommitLogEntryModel,
'id_string_format': 'skill-%s-%s'
},
'StorySnapshotMetadataModel': {
'parent_model_class': story_models.StoryModel,
'commit_log_model_class': story_models.StoryCommitLogEntryModel,
'id_string_format': 'story-%s-%s'
},
'TopicSnapshotMetadataModel': {
'parent_model_class': topic_models.TopicModel,
'commit_log_model_class': topic_models.TopicCommitLogEntryModel,
'id_string_format': 'topic-%s-%s'
},
'SubtopicPageSnapshotMetadataModel': {
'parent_model_class': subtopic_models.SubtopicPageModel,
'commit_log_model_class': (
subtopic_models.SubtopicPageCommitLogEntryModel),
'id_string_format': 'subtopicpage-%s-%s'
},
'TopicRightsSnapshotMetadataModel': {
'parent_model_class': topic_models.TopicRightsModel,
'commit_log_model_class': topic_models.TopicCommitLogEntryModel,
'id_string_format': 'rights-%s-%s'
},
'CollectionRightsSnapshotMetadataModel': {
'parent_model_class': collection_models.CollectionRightsModel,
'commit_log_model_class': (
collection_models.CollectionCommitLogEntryModel),
'id_string_format': 'rights-%s-%s'
},
'ExplorationRightsSnapshotMetadataModel': {
'parent_model_class': exp_models.ExplorationRightsModel,
'commit_log_model_class': exp_models.ExplorationCommitLogEntryModel,
'id_string_format': 'rights-%s-%s'
},
}
# This list consists of the rights snapshot metadata models for which
# the commit log model is not created when the commit cmd is "create"
# or "delete".
MODEL_NAMES_WITH_PARTIAL_COMMIT_LOGS = [
'CollectionRightsSnapshotMetadataModel',
'ExplorationRightsSnapshotMetadataModel'
]
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of SnapshotMetadata models that is associated
with a CommitLogEntry model.
"""
return ValidateSnapshotMetadataModelsJob.SNAPSHOT_METADATA_MODELS
@staticmethod
def map(snapshot_model):
"""Implements the map function for this job."""
job_class = ValidateSnapshotMetadataModelsJob
class_name = snapshot_model.__class__.__name__
missing_commit_log_msg = (
'%s - MISSING COMMIT LOGS' % job_class.FAILURE_PREFIX)
found_commit_log_msg = 'FOUND COMMIT LOGS'
# Note: The subtopic snapshot ID is in the format
# '<topicId>-<subtopicNum>-<version>'.
model_id, version = snapshot_model.id.rsplit('-', 1)
model_properties = job_class.MODEL_NAMES_TO_PROPERTIES[class_name]
commit_log_id = (
model_properties['id_string_format'] % (model_id, version))
parent_model_class = (
model_properties['parent_model_class'].get_by_id(model_id))
commit_log_model_class = (
model_properties['commit_log_model_class'].get_by_id(
commit_log_id))
if class_name in job_class.MODEL_NAMES_WITH_PARTIAL_COMMIT_LOGS:
if snapshot_model.commit_type in ['create', 'delete']:
missing_commit_log_msg = (
'COMMIT LOGS SHOULD NOT EXIST AND DOES NOT EXIST')
found_commit_log_msg = (
'%s - COMMIT LOGS SHOULD NOT EXIST BUT EXISTS' % (
job_class.FAILURE_PREFIX))
message_prefix = (
missing_commit_log_msg if commit_log_model_class is None
else found_commit_log_msg)
yield ('%s - %s' % (message_prefix, class_name), snapshot_model.id)
if parent_model_class is None:
yield (
'%s - MISSING PARENT MODEL - %s' % (
job_class.FAILURE_PREFIX, class_name),
snapshot_model.id)
else:
yield ('FOUND PARENT MODEL - %s' % class_name, 1)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
if key.startswith(ValidateSnapshotMetadataModelsJob.FAILURE_PREFIX):
yield (key, values)
else:
yield (key, len(values))
class AddMissingCommitLogsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that adds the missing commit log entry model for the corresponding
snapshot models. These commit log entry model were found missing on running
a validation job introduced in the PR #10770. This job needs to be run once
only to fill the missing data.
"""
# This list consists of the snapshot models whose associated commit log
# models are missing. The commit logs were found missing in a validation
# job.
SNAPSHOT_METADATA_MODELS_WITH_MISSING_COMMIT_LOGS = [
exp_models.ExplorationRightsSnapshotMetadataModel,
question_models.QuestionSnapshotMetadataModel,
skill_models.SkillSnapshotMetadataModel
]
MODEL_NAMES_TO_PROPERTIES = {
'QuestionSnapshotMetadataModel': {
| |
<gh_stars>100-1000
import numpy as np
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from ..utils import ConvModule, bias_init_with_prob
from .anchor_head import AnchorHead
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..losses import sigmoid_focal_loss, FocalLoss, iou_loss
from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, force_fp32,
multi_apply, multiclass_nms)
import pdb
@HEADS.register_module
class FSAFHead(AnchorHead):
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
octave_base_scale=4,
scales_per_octave=3,
conv_cfg=None,
norm_cfg=None,
feat_strides=[8, 16, 32, 64, 128],
eps_e=0.2,
eps_i=0.5,
FL_alpha=0.25,
FL_gamma=2.0,
bbox_offset_norm=4,
**kwargs):
self.stacked_convs = stacked_convs
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
anchor_scales = octave_scales * octave_base_scale
self.feat_strides = feat_strides
self.eps_e = eps_e
self.eps_i = eps_i
self.FL_alpha = FL_alpha
self.FL_gamma = FL_gamma
self.bbox_offset_norm = bbox_offset_norm
super(FSAFHead, self).__init__(
num_classes, in_channels, anchor_scales=anchor_scales, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.fsaf_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fsaf_reg = nn.Conv2d(
self.feat_channels, 4, 3, padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fsaf_cls, std=0.01, bias=bias_cls)
normal_init(self.fsaf_reg, std=0.1)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.fsaf_cls(cls_feat)
bbox_pred = self.relu(self.fsaf_reg(reg_feat))
return cls_score, bbox_pred
# per-level loss operation
def loss_single(self,
cls_scores,
bbox_preds,
cls_targets_list,
reg_targets_list,
level,
img_metas,
cfg,
gt_bboxes_ignore=None):
device = cls_scores[0].device
num_imgs = cls_targets_list.shape[0]
# loss-cls
scores = cls_scores.permute(0,2,3,1).reshape(-1,1)
labels = cls_targets_list.permute(0,2,3,1).reshape(-1)
valid_cls_idx = labels != -1
loss_cls = sigmoid_focal_loss(scores[valid_cls_idx], labels[valid_cls_idx],
gamma=self.FL_gamma, alpha=self.FL_alpha, reduction='sum')
norm_cls = (labels == 1).long().sum()
# loss-reg
offsets = bbox_preds.permute(0,2,3,1).reshape(-1,4)
gtboxes = reg_targets_list.permute(0,2,3,1).reshape(-1,4)
valid_reg_idx = (gtboxes[:,0] != -1)
if valid_reg_idx.long().sum() != 0:
offsets = offsets[valid_reg_idx]
gtboxes = gtboxes[valid_reg_idx]
H,W = bbox_preds.shape[2:]
y,x = torch.meshgrid([torch.arange(0,H), torch.arange(0,W)])
y = (y.float() + 0.5) * self.feat_strides[level]
x = (x.float() + 0.5) * self.feat_strides[level]
xy = torch.cat([x.unsqueeze(2),y.unsqueeze(2)], dim=2).float().to(device)
xy = xy.permute(2,0,1).unsqueeze(0).repeat(num_imgs,1,1,1)
xy = xy.permute(0,2,3,1).reshape(-1,2)
xy = xy[valid_reg_idx]
dist_pred = offsets * self.feat_strides[level]
bboxes = self.dist2bbox(xy, dist_pred, self.bbox_offset_norm)
loss_reg = iou_loss(bboxes, gtboxes, reduction='sum')
norm_reg = valid_reg_idx.long().sum()
else:
loss_reg = torch.tensor(0).float().to(device)
norm_reg = torch.tensor(0).float().to(device)
return loss_cls, loss_reg, norm_cls, norm_reg
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
cls_reg_targets = self.fsaf_target(
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore)
(cls_targets_list, reg_targets_list) = cls_reg_targets
level_list = [i for i in range(len(self.feat_strides))]
loss_cls, loss_reg, norm_cls, norm_reg = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
cls_targets_list,
reg_targets_list,
level_list,
img_metas=img_metas,
cfg=cfg,
gt_bboxes_ignore=None)
loss_cls = sum(loss_cls)/sum(norm_cls)
loss_reg = sum(loss_reg)/sum(norm_reg)
return dict(loss_cls=loss_cls, loss_bbox=loss_reg)
def fsaf_target(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore_list=None):
device = cls_scores[0].device
# target placeholder
num_levels = len(cls_scores)
cls_targets_list = []
reg_targets_list = []
for level in range(num_levels):
cls_targets_list.append(torch.zeros_like(cls_scores[level]).long()) # 0 init
reg_targets_list.append(torch.ones_like(bbox_preds[level]) * -1) # -1 init
# detached network prediction for online GT generation
num_imgs = len(gt_bboxes)
cls_scores_list = []
bbox_preds_list = []
for img in range(num_imgs):
# detached prediction for online pyramid level selection
cls_scores_list.append([lvl[img].detach() for lvl in cls_scores])
bbox_preds_list.append([lvl[img].detach() for lvl in bbox_preds])
# generate online GT
num_imgs = len(gt_bboxes)
for img in range(num_imgs):
# sort objects according to their size
gt_bboxes_img_xyxy = gt_bboxes[img]
gt_bboxes_img_xywh = self.xyxy2xywh(gt_bboxes_img_xyxy)
gt_bboxes_img_size = gt_bboxes_img_xywh[:,-2] * gt_bboxes_img_xywh[:,-1]
_, gt_bboxes_img_idx = gt_bboxes_img_size.sort(descending=True)
for obj_idx in gt_bboxes_img_idx:
label = gt_labels[img][obj_idx]-1
gt_bbox_obj_xyxy = gt_bboxes_img_xyxy[obj_idx]
# get optimal online pyramid level for each object
opt_level = self.get_online_pyramid_level(
cls_scores_list[img], bbox_preds_list[img], gt_bbox_obj_xyxy, label)
# get the effective/ignore area
H,W = cls_scores[opt_level].shape[2:]
b_p_xyxy = gt_bbox_obj_xyxy / self.feat_strides[opt_level]
e_spatial_idx, i_spatial_idx = self.get_spatial_idx(b_p_xyxy,W,H,device)
# cls-GT
# fill prob= 1 for the effective area
cls_targets_list[opt_level][img, label, e_spatial_idx] = 1
# fill prob=-1 for the ignoring area
_i_spatial_idx = cls_targets_list[opt_level][img, label] * i_spatial_idx.long()
i_spatial_idx = i_spatial_idx - (_i_spatial_idx == 1)
cls_targets_list[opt_level][img, label, i_spatial_idx] = -1
# fill prob=-1 for the adjacent ignoring area; lower
if opt_level != 0:
H_l,W_l = cls_scores[opt_level-1].shape[2:]
b_p_xyxy_l = gt_bbox_obj_xyxy / self.feat_strides[opt_level-1]
_, i_spatial_idx_l = self.get_spatial_idx(b_p_xyxy_l,W_l,H_l,device)
# preserve cls-gt that is already filled as effective area
_i_spatial_idx_l = cls_targets_list[opt_level-1][img, label] * i_spatial_idx_l.long()
i_spatial_idx_l = i_spatial_idx_l - (_i_spatial_idx_l == 1)
cls_targets_list[opt_level-1][img, label][i_spatial_idx_l] = -1
# fill prob=-1 for the adjacent ignoring area; upper
if opt_level != num_levels-1:
H_u,W_u = cls_scores[opt_level+1].shape[2:]
b_p_xyxy_u = gt_bbox_obj_xyxy / self.feat_strides[opt_level+1]
_, i_spatial_idx_u = self.get_spatial_idx(b_p_xyxy_u,W_u,H_u,device)
# preserve cls-gt that is already filled as effective area
_i_spatial_idx_u = cls_targets_list[opt_level+1][img, label] * i_spatial_idx_u.long()
i_spatial_idx_u = i_spatial_idx_u - (_i_spatial_idx_u == 1)
cls_targets_list[opt_level+1][img, label][i_spatial_idx_u] = -1
# reg-GT
reg_targets_list[opt_level][img, :, e_spatial_idx] = gt_bbox_obj_xyxy.unsqueeze(1)
return cls_targets_list, reg_targets_list
def get_spatial_idx(self, b_p_xyxy, W, H, device):
# zero-tensor w/ (H,W)
e_spatial_idx = torch.zeros((H,W)).byte()
i_spatial_idx = torch.zeros((H,W)).byte()
# effective idx
b_e_xyxy = self.get_prop_xyxy(b_p_xyxy, self.eps_e, W, H)
e_xmin = b_e_xyxy[0]
e_xmax = b_e_xyxy[2]+1
e_ymin = b_e_xyxy[1]
e_ymax = b_e_xyxy[3]+1
e_spatial_idx[e_ymin:e_ymax, e_xmin:e_xmax] = 1
# ignore idx
b_i_xyxy = self.get_prop_xyxy(b_p_xyxy, self.eps_i, W, H)
i_xmin = b_i_xyxy[0]
i_xmax = b_i_xyxy[2]+1
i_ymin = b_i_xyxy[1]
i_ymax = b_i_xyxy[3]+1
i_spatial_idx[i_ymin:i_ymax, i_xmin:i_xmax] = 1
i_spatial_idx[e_ymin:e_ymax, e_xmin:e_xmax] = 0
return e_spatial_idx.to(device), i_spatial_idx.to(device)
def get_online_pyramid_level(self, cls_scores_img, bbox_preds_img, gt_bbox_obj_xyxy, gt_label_obj):
device = cls_scores_img[0].device
num_levels = len(cls_scores_img)
level_losses = torch.zeros(num_levels)
for level in range(num_levels):
H,W = cls_scores_img[level].shape[1:]
b_p_xyxy = gt_bbox_obj_xyxy / self.feat_strides[level]
b_e_xyxy = self.get_prop_xyxy(b_p_xyxy, self.eps_e, W, H)
# Eqn-(1)
N = (b_e_xyxy[3]-b_e_xyxy[1]+1) * (b_e_xyxy[2]-b_e_xyxy[0]+1)
# cls loss; FL
score = cls_scores_img[level][gt_label_obj,b_e_xyxy[1]:b_e_xyxy[3]+1,b_e_xyxy[0]:b_e_xyxy[2]+1]
score = score.contiguous().view(-1).unsqueeze(1)
label = torch.ones_like(score).long()
label = label.contiguous().view(-1)
loss_cls = sigmoid_focal_loss(score, label, gamma=self.FL_gamma, alpha=self.FL_alpha, reduction='mean')
#loss_cls /= N
# reg loss; IoU
offsets = bbox_preds_img[level][:,b_e_xyxy[1]:b_e_xyxy[3]+1,b_e_xyxy[0]:b_e_xyxy[2]+1]
offsets = offsets.contiguous().permute(1,2,0) # (b_e_H,b_e_W,4)
offsets = offsets.reshape(-1,4) # (#pix-e,4)
# predicted bbox
y,x = torch.meshgrid([torch.arange(b_e_xyxy[1],b_e_xyxy[3]+1), torch.arange(b_e_xyxy[0],b_e_xyxy[2]+1)])
y = (y.float() + 0.5) * self.feat_strides[level]
x = (x.float() + 0.5) * self.feat_strides[level]
xy = torch.cat([x.unsqueeze(2),y.unsqueeze(2)], dim=2).float().to(device)
xy = xy.reshape(-1,2)
dist_pred = offsets * self.feat_strides[level]
bboxes = self.dist2bbox(xy, dist_pred, self.bbox_offset_norm)
loss_reg = iou_loss(bboxes, gt_bbox_obj_xyxy.unsqueeze(0).repeat(N,1), reduction='mean')
#loss_cls /= N
loss = loss_cls + loss_reg
level_losses[level] = loss
min_level_idx = torch.argmin(level_losses)
#print(level_losses, min_level_idx)
return min_level_idx
def get_prop_xyxy(self, xyxy, scale, w, h):
# scale bbox
xywh = self.xyxy2xywh(xyxy)
xywh[2:] *= scale
xyxy = self.xywh2xyxy(xywh)
# clamp bbox
xyxy[0] = xyxy[0].floor().clamp(0, w-2).int() # x1
xyxy[1] = xyxy[1].floor().clamp(0, h-2).int() # y1
xyxy[2] = xyxy[2].ceil().clamp(1, w-1).int() # x2
xyxy[3] = xyxy[3].ceil().clamp(1, h-1).int() # y2
return xyxy.int()
def xyxy2xywh(self, xyxy):
if xyxy.dim() == 1:
return torch.cat((0.5 * (xyxy[0:2] + xyxy[2:4]), xyxy[2:4] - xyxy[0:2]), dim=0)
else:
return torch.cat((0.5 * (xyxy[:, 0:2] + xyxy[:, 2:4]), xyxy[:, 2:4] - xyxy[:, 0:2]), dim=1)
def xywh2xyxy(self, xywh):
if xywh.dim() == 1:
return torch.cat((xywh[0:2] - 0.5 * xywh[2:4], xywh[0:2] + 0.5 * xywh[2:4]), dim=0)
else:
return torch.cat((xywh[:, 0:2] - 0.5 * xywh[:, 2:4], xywh[:, 0:2] + 0.5 * xywh[:, 2:4]), dim=1)
def dist2bbox(self, center, dist_pred, norm_const):
'''
args
center ; (N,2)
bbox_pred ; (N,4)
'''
x1y1 = center - (norm_const * torch.cat([dist_pred[:,1].unsqueeze(1),dist_pred[:,0].unsqueeze(1)],dim=1))
x2y2 = center + (norm_const * torch.cat([dist_pred[:,3].unsqueeze(1),dist_pred[:,2].unsqueeze(1)],dim=1))
bbox = torch.cat([x1y1,x2y2],dim=1)
return bbox
def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg,
rescale=False):
# note: only single-img evaluation is available now
num_levels = len(cls_scores)
assert len(cls_scores) == len(bbox_preds) == num_levels
device = bbox_preds[0].device
dtype = bbox_preds[0].dtype
scale_factor = img_metas[0]['scale_factor']
# generate center-points
xy_list = []
for level in range(num_levels):
H,W = bbox_preds[level].shape[2:]
y,x = torch.meshgrid([torch.arange(0,H), torch.arange(0,W)])
y = (y.float() + 0.5) * self.feat_strides[level]
x = (x.float() + 0.5) * self.feat_strides[level]
xy = torch.cat([x.unsqueeze(2),y.unsqueeze(2)], dim=2).float().to(device)
xy = xy.permute(2,0,1).unsqueeze(0)
xy_list.append(xy)
mlvl_bboxes = []
mlvl_scores = []
for level, (cls_score, bbox_pred, xy) in enumerate(zip(cls_scores, bbox_preds, xy_list)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score[0].permute(1, 2, 0).reshape(
-1, self.cls_out_channels)
scores = cls_score.sigmoid()
bbox_pred = bbox_pred[0].permute(1, 2, 0).reshape(-1, 4)
xy = xy[0].permute(1, 2, 0).reshape(-1, 2)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
xy = | |
from __future__ import division
import logging
try:
from Queue import Empty
except:
from queue import Empty
from redis import StrictRedis
from time import time, sleep
from threading import Thread
from collections import defaultdict
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager, Queue
from multiprocessing import Process, Queue
from msgpack import Unpacker, packb
from os import path, kill, getpid
from math import ceil
import traceback
import operator
import re
import os
import errno
import sys
import os.path
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# literal_eval required to evaluate Redis sets
from ast import literal_eval
import settings
# @modified 20171216 - Task #2236: Change Boundary to only send to Panorama on alert
# Added move_file
from skyline_functions import (
send_graphite_metric, write_data_to_file, move_file,
# @added 20181126 - Task #2742: Update Boundary
# Feature #2034: analyse_derivatives
nonNegativeDerivative, in_list)
from boundary_alerters import trigger_alert
from boundary_algorithms import run_selected_algorithm
from algorithm_exceptions import (TooShort, Stale, Boring)
skyline_app = 'boundary'
skyline_app_logger = skyline_app + 'Log'
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(sys.version_info[0])
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.' + settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
skyline_app_graphite_namespace = 'skyline.' + skyline_app + SERVER_METRIC_PATH
REDIS_SOCKET = settings.REDIS_SOCKET_PATH
BOUNDARY_METRICS = settings.BOUNDARY_METRICS
FULL_NAMESPACE = settings.FULL_NAMESPACE
ENABLE_BOUNDARY_DEBUG = settings.ENABLE_BOUNDARY_DEBUG
try:
BOUNDARY_AUTOAGGRERATION = settings.BOUNDARY_AUTOAGGRERATION
except:
BOUNDARY_AUTOAGGRERATION = False
try:
BOUNDARY_AUTOAGGRERATION_METRICS = settings.BOUNDARY_AUTOAGGRERATION_METRICS
except:
BOUNDARY_AUTOAGGRERATION_METRICS = (
("auotaggeration_metrics_not_declared", 60)
)
class Boundary(Thread):
def __init__(self, parent_pid):
"""
Initialize the Boundary
"""
super(Boundary, self).__init__()
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Task #3032: Debug number of Python processes and memory use
# Branch #3002: docker
# Reduce amount of Manager instances that are used as each requires a
# copy of entire memory to be copied into each subprocess so this
# results in a python process per Manager instance, using as much
# memory as the parent. OK on a server, not so much in a container.
# Disabled all the Manager() lists below and replaced with Redis sets
# self.boundary_metrics = Manager().list()
# self.anomalous_metrics = Manager().list()
self.exceptions_q = Queue()
self.anomaly_breakdown_q = Queue()
# @added 20171214 - Bug #2232: Expiry boundary last_seen keys appropriately
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous_metrics = Manager().list()
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
def unique_noHash(self, seq):
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
# This is to make a dump directory in /tmp if ENABLE_BOUNDARY_DEBUG is True
# for dumping the metric timeseries data into for debugging purposes
def mkdir_p(self, path):
try:
os.makedirs(path)
return True
except OSError as exc:
# Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# @modified 20171216 - Task #2236: Change Boundary to only send to Panorama on alert
# Pass added_at as an argument to spin_process so that the panaroma_anomaly_file
# can be moved from SKYLINE_TMP_DIR to the PANORAMA_CHECK_PATH
# def spin_process(self, i, boundary_metrics):
def spin_process(self, i, boundary_metrics, added_at):
"""
Assign a bunch of metrics for a process to analyze.
"""
# Determine assigned metrics
bp = settings.BOUNDARY_PROCESSES
bm_range = len(boundary_metrics)
keys_per_processor = int(ceil(float(bm_range) / float(bp)))
if i == settings.BOUNDARY_PROCESSES:
assigned_max = len(boundary_metrics)
else:
# This is a skyine bug, the original skyline code uses 1 as the
# beginning position of the index, python indices begin with 0
# assigned_max = len(boundary_metrics)
# This closes the etsy/skyline pull request opened by @languitar on 17 Jun 2014
# https://github.com/etsy/skyline/pull/94 Fix analyzer worker metric assignment
assigned_max = min(len(boundary_metrics), i * keys_per_processor)
assigned_min = (i - 1) * keys_per_processor
assigned_keys = range(assigned_min, assigned_max)
# Compile assigned metrics
assigned_metrics_and_algos = [boundary_metrics[index] for index in assigned_keys]
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: printing assigned_metrics_and_algos')
for assigned_metric_and_algo in assigned_metrics_and_algos:
logger.info('debug :: assigned_metric_and_algo - %s' % str(assigned_metric_and_algo))
# Compile assigned metrics
assigned_metrics = []
for i in assigned_metrics_and_algos:
assigned_metrics.append(i[0])
# unique unhashed things
def unique_noHash(seq):
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
unique_assigned_metrics = unique_noHash(assigned_metrics)
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: unique_assigned_metrics - %s' % str(unique_assigned_metrics))
logger.info('debug :: printing unique_assigned_metrics:')
for unique_assigned_metric in unique_assigned_metrics:
logger.info('debug :: unique_assigned_metric - %s' % str(unique_assigned_metric))
# Check if this process is unnecessary
if len(unique_assigned_metrics) == 0:
return
# Multi get series
try:
raw_assigned = self.redis_conn.mget(unique_assigned_metrics)
except:
logger.error('error :: failed to mget assigned_metrics from redis')
return
# Make process-specific dicts
exceptions = defaultdict(int)
anomaly_breakdown = defaultdict(int)
# Reset boundary_algortims
all_boundary_algorithms = []
for metric in BOUNDARY_METRICS:
all_boundary_algorithms.append(metric[1])
# The unique algorithms that are being used
boundary_algorithms = unique_noHash(all_boundary_algorithms)
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: boundary_algorithms - %s' % str(boundary_algorithms))
discover_run_metrics = []
# Distill metrics into a run list
for i, metric_name, in enumerate(unique_assigned_metrics):
self.check_if_parent_is_alive()
try:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: unpacking timeseries for %s - %s' % (metric_name, str(i)))
raw_series = raw_assigned[i]
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
except Exception as e:
exceptions['Other'] += 1
logger.error('error :: redis data error: ' + traceback.format_exc())
logger.error('error :: %e' % e)
base_name = metric_name.replace(FULL_NAMESPACE, '', 1)
# Determine the metrics BOUNDARY_METRICS metric tuple settings
for metrick in BOUNDARY_METRICS:
CHECK_MATCH_PATTERN = metrick[0]
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(base_name)
metric_pattern_matched = False
if pattern_match:
metric_pattern_matched = True
algo_pattern_matched = False
for algo in boundary_algorithms:
for metric in BOUNDARY_METRICS:
CHECK_MATCH_PATTERN = metric[0]
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(base_name)
if pattern_match:
if ENABLE_BOUNDARY_DEBUG:
logger.info("debug :: metric and algo pattern MATCHED - " + metric[0] + " | " + base_name + " | " + str(metric[1]))
metric_expiration_time = False
metric_min_average = False
metric_min_average_seconds = False
metric_trigger = False
algorithm = False
algo_pattern_matched = True
algorithm = metric[1]
try:
if metric[2]:
metric_expiration_time = metric[2]
except:
metric_expiration_time = False
try:
if metric[3]:
metric_min_average = metric[3]
except:
metric_min_average = False
try:
if metric[4]:
metric_min_average_seconds = metric[4]
except:
metric_min_average_seconds = 1200
try:
if metric[5]:
metric_trigger = metric[5]
except:
metric_trigger = False
try:
if metric[6]:
alert_threshold = metric[6]
except:
alert_threshold = False
try:
if metric[7]:
metric_alerters = metric[7]
except:
metric_alerters = False
if metric_pattern_matched and algo_pattern_matched:
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: added metric - %s, %s, %s, %s, %s, %s, %s, %s, %s' % (str(i), metric_name, str(metric_expiration_time), str(metric_min_average), str(metric_min_average_seconds), str(metric_trigger), str(alert_threshold), metric_alerters, algorithm))
discover_run_metrics.append([i, metric_name, metric_expiration_time, metric_min_average, metric_min_average_seconds, metric_trigger, alert_threshold, metric_alerters, algorithm])
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: printing discover_run_metrics')
for discover_run_metric in discover_run_metrics:
logger.info('debug :: discover_run_metrics - %s' % str(discover_run_metric))
logger.info('debug :: build unique boundary metrics to analyze')
# Determine the unique set of metrics to run
run_metrics = unique_noHash(discover_run_metrics)
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: printing run_metrics')
for run_metric in run_metrics:
logger.info('debug :: run_metrics - %s' % str(run_metric))
# Distill timeseries strings and submit to run_selected_algorithm
for metric_and_algo in run_metrics:
self.check_if_parent_is_alive()
try:
raw_assigned_id = metric_and_algo[0]
metric_name = metric_and_algo[1]
base_name = metric_name.replace(FULL_NAMESPACE, '', 1)
metric_expiration_time = metric_and_algo[2]
metric_min_average = metric_and_algo[3]
metric_min_average_seconds = metric_and_algo[4]
metric_trigger = metric_and_algo[5]
alert_threshold = metric_and_algo[6]
metric_alerters = metric_and_algo[7]
algorithm = metric_and_algo[8]
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: unpacking timeseries for %s - %s' % (metric_name, str(raw_assigned_id)))
raw_series = raw_assigned[metric_and_algo[0]]
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: unpacked OK - %s - %s' % (metric_name, str(raw_assigned_id)))
autoaggregate = False
autoaggregate_value = 0
# Determine if the namespace is to be aggregated
if BOUNDARY_AUTOAGGRERATION:
for autoaggregate_metric in BOUNDARY_AUTOAGGRERATION_METRICS:
autoaggregate = False
autoaggregate_value = 0
CHECK_MATCH_PATTERN = autoaggregate_metric[0]
base_name = metric_name.replace(FULL_NAMESPACE, '', 1)
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(base_name)
if pattern_match:
autoaggregate = True
autoaggregate_value = autoaggregate_metric[1]
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: BOUNDARY_AUTOAGGRERATION passed - %s - %s' % (metric_name, str(autoaggregate)))
if ENABLE_BOUNDARY_DEBUG:
logger.info(
'debug :: analysing - %s, %s, %s, %s, %s, %s, %s, %s, %s, %s' % (
metric_name, str(metric_expiration_time),
str(metric_min_average),
str(metric_min_average_seconds),
str(metric_trigger), str(alert_threshold),
metric_alerters, autoaggregate,
autoaggregate_value, algorithm)
)
# Dump the the timeseries data to a file
# @modified 20170913 | |
ticket_dependences=ticket_dependences_code_list)
if form.is_valid():
main_ticket = form.cleaned_data['ticket']
note = form.cleaned_data['note']
if Ticket2Ticket.main_is_already_used(main_ticket):
messages.add_message(request, messages.ERROR,
_("La dipendenza non può essere aggiunta. "
"La richiesta <b>{}</b> è dipendente da "
"altre richieste").format(main_ticket))
return redirect('uni_ticket:add_ticket_dependence_url',
structure_slug=structure_slug,
ticket_id=ticket_id)
if ticket.blocks_some_ticket():
messages.add_message(request, messages.ERROR,
_("La dipendenza non può essere aggiunta. "
"Ci sono richieste che dipendono da "
"quella corrente <b>{}</b>").format(ticket))
return redirect('uni_ticket:add_ticket_dependence_url',
structure_slug=structure_slug,
ticket_id=ticket_id)
t2t = Ticket2Ticket(subordinate_ticket=ticket,
main_ticket=main_ticket,
note=note)
t2t.save()
# log action
logger.info('[{}] {} added new dependence to'
' ticket {} from ticket {}'.format(timezone.localtime(),
request.user,
ticket,
main_ticket))
ticket.update_log(user=request.user,
note=_("Aggiunta dipendenza dalla richiesta:"
" {}".format(main_ticket)))
messages.add_message(request, messages.SUCCESS,
_("Dipendenza dalla richiesta <b>{}</b>"
" aggiunta con successo").format(main_ticket.code))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id = ticket_id)
else: # pragma: no cover
for k,v in get_labeled_errors(form).items():
messages.add_message(request, messages.ERROR,
"<b>{}</b>: {}".format(k, strip_tags(v)))
d = {'form': form,
'structure': structure,
'sub_title': sub_title,
'ticket': ticket,
'title': title,}
return render(request, template, d)
@login_required
@has_admin_privileges
@ticket_is_taken_for_employee
@ticket_assigned_to_structure
@ticket_is_taken_and_not_closed
def ticket_dependence_remove(request, structure_slug,
ticket_id, main_ticket_id,
structure, can_manage, ticket):
"""
Removes ticket dependence
:type structure_slug: String
:type ticket_id: String
:type main_ticket_id: String
:type structure: OrganizationalStructure (from @has_admin_privileges)
:type can_manage: Dictionary (from @has_admin_privileges)
:type ticket: Ticket (from @ticket_assigned_to_structure)
:param structure_slug: structure slug
:param ticket_id: ticket code
:param main_ticket_id: main ticket code
:param structure: structure object (from @has_admin_privileges)
:param can_manage: if user can manage or can read only (from @has_admin_privileges)
:param ticket: ticket object (from @ticket_assigned_to_structure)
:return: redirect
"""
user_type = get_user_type(request.user, structure)
main_ticket = get_object_or_404(Ticket, code=main_ticket_id)
to_remove = get_object_or_404(Ticket2Ticket,
subordinate_ticket=ticket,
main_ticket=main_ticket)
# Se il ticket main che sto eliminando non è assegnato alla struttura corrente
if structure not in main_ticket.get_assigned_to_structures():
return custom_message(request,
_("La richiesta <b>{}</b> non è stata assegnata"
" a questa struttura, pertanto"
" non puoi gestirla").format(main_ticket),
structure_slug=structure.slug)
else:
# log action
logger.info('[{}] {} removed dependence to'
' ticket {} from ticket {}'.format(timezone.localtime(),
request.user,
ticket,
main_ticket))
to_remove.delete()
ticket.update_log(user=request.user,
note=_("Rimossa dipendenza dalla richiesta:"
" {}".format(main_ticket)))
messages.add_message(request, messages.SUCCESS,
_("Dipendenza rimossa correttamente"))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id = ticket_id)
@login_required
def ticket_close_url(request, structure_slug, ticket_id): # pragma: no cover
"""
Makes URL redirect to closing ticket page depending of user role
:type structure_slug: String
:type ticket_id: String
:param structure_slug: structure slug
:param ticket_id: ticket code
:render: redirect
"""
structure = get_object_or_404(OrganizationalStructure,
slug=structure_slug)
user_type = get_user_type(request.user, structure)
return redirect('uni_ticket:{}_close_ticket'.format(user_type),
structure_slug, ticket_id)
@login_required
@has_admin_privileges
@ticket_is_taken_for_employee
@ticket_assigned_to_structure
@ticket_is_taken_and_not_closed
def ticket_close(request, structure_slug, ticket_id,
structure, can_manage, ticket, office_employee=None):
"""
Closes ticket
:type structure_slug: String
:type ticket_id: String
:type structure: OrganizationalStructure (from @has_admin_privileges)
:type can_manage: Dictionary (from @has_admin_privileges)
:type ticket: Ticket (from @ticket_assigned_to_structure)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: structure slug
:param ticket_id: ticket code
:param structure: structure object (from @has_admin_privileges)
:param can_manage: if user can manage or can read only (from @has_admin_privileges)
:param ticket: ticket object (from @ticket_assigned_to_structure)
:param office_employee: operator offices queryset (from @is_operator)
:return: render
"""
# Se il ticket non è chiudibile (per dipendenze attive)
if not ticket.is_closable():
# log action
logger.error('[{}] {} tried to'
' close not closable ticket {}'.format(timezone.localtime(),
request.user,
ticket))
return custom_message(request,
_("Non è possibile chiudere la richiesta,"
" ci sono dipendenze attive!"),
structure_slug=structure.slug)
title = _('Chiusura della richiesta')
sub_title = ticket
category = ticket.input_module.ticket_category
default_replies = TicketCategoryDefaultReply.objects.filter(ticket_category=category,
is_active=True)
form = TicketCloseForm()
if request.method=='POST':
form = TicketCloseForm(request.POST)
if form.is_valid():
motivazione = form.cleaned_data['note']
closing_status = form.cleaned_data['status']
ticket.is_closed = True
ticket.closed_by = request.user
ticket.closing_reason = motivazione.format(user=ticket.created_by)
ticket.closing_status = closing_status
ticket.closed_date = timezone.localtime()
ticket.save(update_fields = ['is_closed',
'closed_by',
'closing_reason',
'closing_status',
'closed_date'])
# log action
logger.info('[{}] {} closed ticket {}'.format(timezone.localtime(),
request.user,
ticket))
ticket.update_log(user=request.user,
note=_("Chiusura richiesta ({}): {}"
"").format(dict(settings.CLOSING_LEVELS).get(closing_status),
motivazione))
opened_ticket_url = reverse('uni_ticket:manage_opened_ticket_url',
kwargs={'structure_slug': structure.slug})
messages.add_message(request, messages.SUCCESS,
_("Richiesta {} chiusa correttamente"
"<br>"
"<a class='text-success' href='{}'><b>"
"Clicca qui per tornare alle richieste assegnate"
"</b></a>").format(ticket,
opened_ticket_url))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
else: # pragma: no cover
for k,v in get_labeled_errors(form).items():
messages.add_message(request, messages.ERROR,
"<b>{}</b>: {}".format(k, strip_tags(v)))
user_type = get_user_type(request.user, structure)
template = "{}/ticket_close.html".format(user_type)
d = {'default_replies': default_replies,
'form': form,
'structure': structure,
'sub_title': sub_title,
'ticket': ticket,
'title': title,}
return render(request, template, d)
@login_required
@has_admin_privileges
@ticket_is_taken_for_employee
@ticket_assigned_to_structure
def ticket_reopen(request, structure_slug, ticket_id,
structure, can_manage, ticket):
"""
Reopen ticket
:type structure_slug: String
:type ticket_id: String
:type structure: OrganizationalStructure (from @has_admin_privileges)
:type can_manage: Dictionary (from @has_admin_privileges)
:type ticket: Ticket (from @ticket_assigned_to_structure)
:param structure_slug: structure slug
:param ticket_id: ticket code
:param structure: structure object (from @has_admin_privileges)
:param can_manage: if user can manage or can read only (from @has_admin_privileges)
:param ticket: ticket object (from @ticket_assigned_to_structure)
:return: redirect
"""
if not ticket.is_closed:
# log action
logger.error('[{}] {} tried to reopen'
' not closed ticket {}'.format(timezone.localtime(),
request.user,
ticket))
return custom_message(request,
_("La richiesta {} non è stata chiusa").format(ticket),
structure_slug=structure.slug)
if ticket.is_notification:
# log action
logger.error('[{}] {} tried to reopen'
' a notification ticket {}'.format(timezone.localtime(),
request.user,
ticket))
return custom_message(request,
_("La richiesta {} non può essere riaperta").format(ticket),
structure_slug=structure.slug)
if not ticket.closed_by:
# log action
logger.error('[{}] {} tried to reopen'
' a ticket closed by owner user{}'
''.format(timezone.localtime(),
request.user,
ticket))
return custom_message(request,
_("La richiesta {} è stata chiusa dall'utente, "
" pertanto non può essere riaperta").format(ticket),
structure_slug=structure.slug)
# at least one of ticket offices must be active and must follow
active_assignments = TicketAssignment.objects.filter(ticket=ticket,
office__is_active=True,
follow=True,
readonly=False)
if not active_assignments:
return custom_message(request,
_("Nessuno degli uffici assegnati in precedenza "
"può prendere nuovamente in carico la richiesta {} e "
"pertanto questa non può essere riaperta").format(ticket),
structure_slug=structure.slug)
ticket.is_closed = False
ticket.save(update_fields = ['is_closed'])
ticket.update_log(user=request.user,
note= _("Riapertura richiesta"))
# log action
logger.info('[{}] {} reopened ticket {}'.format(timezone.localtime(),
request.user,
ticket))
messages.add_message(request, messages.SUCCESS,
_("Richiesta {} riaperta correttamente").format(ticket))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
@login_required
def ticket_competence_add_url(request, structure_slug, ticket_id): # pragma: no cover
"""
Makes URL redirect to adding ticket competence page depending of user role
:type structure_slug: String
:type ticket_id: String
:param structure_slug: structure slug
:param ticket_id: ticket code
:return: redirect
"""
structure = get_object_or_404(OrganizationalStructure,
slug=structure_slug)
user_type = get_user_type(request.user, structure)
return redirect('uni_ticket:{}_add_ticket_competence'.format(user_type),
structure_slug, ticket_id)
@login_required
@has_admin_privileges
@ticket_is_taken_for_employee
@ticket_assigned_to_structure
@ticket_is_taken_and_not_closed
def ticket_competence_add_new(request, structure_slug, ticket_id,
structure, can_manage, ticket,
office_employee=None):
"""
Adds new ticket competence (first step)
:type structure_slug: String
:type ticket_id: String
:type structure: OrganizationalStructure (from @has_admin_privileges)
:type can_manage: Dictionary (from @has_admin_privileges)
:type ticket: Ticket (from @ticket_assigned_to_structure)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: structure slug
:param ticket_id: ticket code
:param structure: structure object (from @has_admin_privileges)
:param can_manage: if user can manage or can read only (from @has_admin_privileges)
:param ticket: ticket object (from @ticket_assigned_to_structure)
:param office_employee: operator offices queryset (from @is_operator)
:return: render
"""
if can_manage['readonly']:
messages.add_message(request, messages.ERROR, settings.READONLY_COMPETENCE_OVER_TICKET)
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
user_type = get_user_type(request.user, structure)
template = "{}/add_ticket_competence.html".format(user_type)
title = _('Trasferisci competenza richiesta')
sub_title = '{} ({})'.format(ticket.subject, ticket_id)
strutture = OrganizationalStructure.objects.filter(is_active=True)
d = {'structure': structure,
'strutture': strutture,
'sub_title': sub_title,
'ticket': ticket,
'title': title,}
return render(request, template, d)
@login_required
@has_admin_privileges
@ticket_is_taken_for_employee
@ticket_assigned_to_structure
@ticket_is_taken_and_not_closed
def ticket_competence_add_final(request, structure_slug, ticket_id,
new_structure_slug, structure, can_manage, ticket,
office_employee=None):
"""
Adds new ticket competence (second step)
:type structure_slug: String
:type ticket_id: String
:type new_structure_slug: String
:type structure: OrganizationalStructure (from @has_admin_privileges)
:type can_manage: Dictionary (from @has_admin_privileges)
:type ticket: Ticket (from @ticket_assigned_to_structure)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: structure slug
:param ticket_id: ticket code
:param new_structure_slug: selected structure slug
:param structure: structure object (from @has_admin_privileges)
:param can_manage: if user can manage or can read only (from @has_admin_privileges)
:param ticket: ticket object (from @ticket_assigned_to_structure)
:param office_employee: operator offices queryset (from @is_operator)
:return: render
"""
strutture = OrganizationalStructure.objects.filter(is_active = True)
# Lista uffici ai quali il ticket è assegnato
ticket_offices = ticket.get_assigned_to_offices(office_active=False)
operator_offices_list = []
assignments = TicketAssignment.objects.filter(ticket=ticket,
office__organizational_structure=structure,
office__is_active=True,
follow=True,
taken_date__isnull=False)
for assignment in assignments:
if user_manage_office(user=request.user,
office=assignment.office):
operator_offices_list.append(assignment.office)
new_structure = get_object_or_404(OrganizationalStructure,
slug=new_structure_slug,
is_active=True)
offices = OrganizationalStructureOffice.objects.filter(organizational_structure=new_structure,
is_active=True)
# exclude private offices if not in same structure
if new_structure != structure:
offices = offices.exclude(is_private=True)
if request.method == 'POST':
form = TicketCompetenceSchemeForm(data=request.POST)
if form.is_valid():
office_slug = form.cleaned_data['office_slug']
follow = form.cleaned_data['follow']
readonly = form.cleaned_data['readonly']
selected_office_slug = form.cleaned_data['selected_office']
# L'ufficio passato in POST esiste?
new_office = get_object_or_404(OrganizationalStructureOffice,
slug=office_slug,
organizational_structure=new_structure,
is_active=True)
# se viene forzato il POST con un ufficio privato!
if new_structure != structure and new_office.is_private:
return custom_message(request,
_("Impossibile assegnare la richiesta "
"all'ufficio selezionato"),
structure_slug=structure.slug)
selected_office = None
if selected_office_slug:
selected_office = get_object_or_404(OrganizationalStructureOffice,
slug=selected_office_slug,
organizational_structure=structure,
is_active=True)
if new_office in ticket_offices:
messages.add_message(request, messages.ERROR,
_("La richiesta è già di competenza"
" dell'ufficio <b>{}</b>"
"").format(new_office))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
messages.add_message(request, messages.SUCCESS,
_("Competenza <b>{}</b> aggiunta"
" correttamente".format(new_office)))
# If not follow anymore
if not follow:
abandoned_offices = ticket.block_competence(user=request.user,
structure=structure,
allow_readonly=False,
selected_office=selected_office)
for off in | |
#!/usr/bin/env python3
""" Helper functions and classes for GUI controls """
import logging
import re
import tkinter as tk
from tkinter import ttk
from itertools import zip_longest
from functools import partial
from .tooltip import Tooltip
from .utils import ContextMenu, FileHandler, get_config, get_images
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# We store Tooltips, ContextMenus and Commands globally when they are created
# Because we need to add them back to newly cloned widgets (they are not easily accessible from
# original config or are prone to getting destroyed when the original widget is destroyed)
_RECREATE_OBJECTS = dict(tooltips=dict(), commands=dict(), contextmenus=dict())
def get_tooltip(widget, text, wraplength=600):
""" Store the tooltip layout and widget id in _TOOLTIPS and return a tooltip """
_RECREATE_OBJECTS["tooltips"][str(widget)] = {"text": text,
"wraplength": wraplength}
logger.debug("Adding to tooltips dict: (widget: %s. text: '%s', wraplength: %s)",
widget, text, wraplength)
return Tooltip(widget, text=text, wraplength=wraplength)
def get_contextmenu(widget):
""" Create a context menu, store its mapping and return """
rc_menu = ContextMenu(widget)
_RECREATE_OBJECTS["contextmenus"][str(widget)] = rc_menu
logger.debug("Adding to Context menu: (widget: %s. rc_menu: %s)",
widget, rc_menu)
return rc_menu
def add_command(name, func):
""" For controls that execute commands, the command must be added to the _COMMAND list so that
it can be added back to the widget during cloning """
logger.debug("Adding to commands: %s - %s", name, func)
_RECREATE_OBJECTS["commands"][str(name)] = func
def set_slider_rounding(value, var, d_type, round_to, min_max):
""" Set the underlying variable to correct number based on slider rounding """
if d_type == float:
var.set(round(float(value), round_to))
else:
steps = range(min_max[0], min_max[1] + round_to, round_to)
value = min(steps, key=lambda x: abs(x - int(float(value))))
var.set(value)
def adjust_wraplength(event):
""" dynamically adjust the wraplength of a label on event """
label = event.widget
label.configure(wraplength=event.width - 1)
class ControlPanelOption():
"""
A class to hold a control panel option. A list of these is expected
to be passed to the ControlPanel object.
Parameters
----------
title: str
Title of the control. Will be used for label text and control naming
dtype: datatype object
Datatype of the control.
group: str, optional
The group that this control should sit with. If provided, all controls in the same
group will be placed together. Default: None
default: str, optional
Default value for the control. If None is provided, then action will be dictated by
whether "blank_nones" is set in ControlPanel
initial_value: str, optional
Initial value for the control. If None, default will be used
choices: list or tuple, object
Used for combo boxes and radio control option setting
is_radio: bool, optional
Specifies to use a Radio control instead of combobox if choices are passed
rounding: int or float, optional
For slider controls. Sets the stepping
min_max: int or float, optional
For slider controls. Sets the min and max values
sysbrowser: dict, optional
Adds Filesystem browser buttons to ttk.Entry options.
Expects a dict: {sysbrowser: str, filetypes: str}
helptext: str, optional
Sets the tooltip text
"""
def __init__(self, title, dtype, # pylint:disable=too-many-arguments
group=None, default=None, initial_value=None, choices=None, is_radio=False,
rounding=None, min_max=None, sysbrowser=None, helptext=None):
logger.debug("Initializing %s: (title: '%s', dtype: %s, group: %s, default: %s, "
"initial_value: %s, choices: %s, is_radio: %s, rounding: %s, min_max: %s, "
"sysbrowser: %s, helptext: '%s')", self.__class__.__name__, title, dtype,
group, default, initial_value, choices, is_radio, rounding, min_max,
sysbrowser, helptext)
self.dtype = dtype
self.sysbrowser = sysbrowser
self._options = dict(title=title,
group=group,
default=default,
initial_value=initial_value,
choices=choices,
is_radio=is_radio,
rounding=rounding,
min_max=min_max,
helptext=helptext)
self.control = self.get_control()
self.tk_var = self.get_tk_var()
logger.debug("Initialized %s", self.__class__.__name__)
@property
def name(self):
""" Lowered title for naming """
return self._options["title"].lower()
@property
def title(self):
""" Title case title for naming with underscores removed """
return self._options["title"].replace("_", " ").title()
@property
def group(self):
""" Return group or _master if no group set """
group = self._options["group"]
group = "_master" if group is None else group
return group
@property
def default(self):
""" Return either selected value or default """
return self._options["default"]
@property
def value(self):
""" Return either selected value or default """
val = self._options["initial_value"]
val = self.default if val is None else val
return val
@property
def choices(self):
""" Return choices """
return self._options["choices"]
@property
def is_radio(self):
""" Return is_radio """
return self._options["is_radio"]
@property
def rounding(self):
""" Return rounding """
return self._options["rounding"]
@property
def min_max(self):
""" Return min_max """
return self._options["min_max"]
@property
def helptext(self):
""" Format and return help text for tooltips """
helptext = self._options["helptext"]
if helptext is None:
return helptext
logger.debug("Format control help: '%s'", self.name)
if helptext.startswith("R|"):
helptext = helptext[2:].replace("\nL|", "\n - ").replace("\n", "\n\n")
else:
helptext = helptext.replace("\n\t", "\n - ").replace("%%", "%")
helptext = ". ".join(i.capitalize() for i in helptext.split(". "))
helptext = self.title + " - " + helptext
logger.debug("Formatted control help: (name: '%s', help: '%s'", self.name, helptext)
return helptext
def get(self):
""" Return the value from the tk_var """
return self.tk_var.get()
def set(self, value):
""" Set the tk_var to a new value """
self.tk_var.set(value)
def get_control(self):
""" Set the correct control type based on the datatype or for this option """
if self.choices and self.is_radio:
control = ttk.Radiobutton
elif self.choices:
control = ttk.Combobox
elif self.dtype == bool:
control = ttk.Checkbutton
elif self.dtype in (int, float):
control = ttk.Scale
else:
control = ttk.Entry
logger.debug("Setting control '%s' to %s", self.title, control)
return control
def get_tk_var(self):
""" Correct variable type for control """
if self.dtype == bool:
var = tk.BooleanVar()
elif self.dtype == int:
var = tk.IntVar()
elif self.dtype == float:
var = tk.DoubleVar()
else:
var = tk.StringVar()
logger.debug("Setting tk variable: (name: '%s', dtype: %s, tk_var: %s)",
self.name, self.dtype, var)
return var
class ControlPanel(ttk.Frame): # pylint:disable=too-many-ancestors
"""
A Control Panel to hold control panel options.
This class handles all of the formatting, placing and TK_Variables
in a consistent manner.
It can also provide dynamic columns for resizing widgets
Parameters
----------
parent: tk object
Parent widget that should hold this control panel
options: list of ControlPanelOptions objects
The list of controls that are to be built into this control panel
label_width: int, optional
The width that labels for controls should be set to.
Defaults to 20
columns: int, optional
The maximum number of columns that this control panel should be able
to accomodate. Setting to 1 means that there will only be 1 column
regardless of how wide the control panel is. Higher numbers will
dynamically fill extra columns if space permits. Defaults to 1
option_columns: int, optional
For checkbutton and radiobutton containers, how many options should
be displayed on each row. Defaults to 4
header_text: str, optional
If provided, will place an information box at the top of the control
panel with these contents.
blank_nones: bool, optional
How the control panel should handle Nones. If set to True then Nones
will be converted to empty strings. Default: False
"""
def __init__(self, parent, options, # pylint:disable=too-many-arguments
label_width=20, columns=1, option_columns=4, header_text=None, blank_nones=True):
logger.debug("Initializing %s: (parent: '%s', options: %s, label_width: %s, columns: %s, "
"option_columns: %s, header_text: %s, blank_nones: %s)",
self.__class__.__name__, parent, options, label_width, columns,
option_columns, header_text, blank_nones)
super().__init__(parent)
self.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.options = options
self.controls = []
self.label_width = label_width
self.columns = columns
self.option_columns = option_columns
self.header_text = header_text
self.group_frames = dict()
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0)
self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.mainframe, self.optsframe = self.get_opts_frame()
self.optscanvas = self.canvas.create_window((0, 0), window=self.mainframe, anchor=tk.NW)
self.build_panel(blank_nones)
logger.debug("Initialized %s", self.__class__.__name__)
def get_opts_frame(self):
""" Return an autofill container for the options inside a main frame """
mainframe = ttk.Frame(self.canvas)
if self.header_text is not None:
self.add_info(mainframe)
optsframe = ttk.Frame(mainframe, name="opts_frame")
optsframe.pack(expand=True, fill=tk.BOTH)
holder = AutoFillContainer(optsframe, self.columns)
logger.debug("Opts frames: '%s'", holder)
return mainframe, holder
def add_info(self, frame):
""" Plugin information """
gui_style = ttk.Style()
gui_style.configure('White.TFrame', background='#FFFFFF')
gui_style.configure('Header.TLabel',
background='#FFFFFF',
font=get_config().default_font + ("bold", ))
gui_style.configure('Body.TLabel',
background='#FFFFFF')
info_frame = ttk.Frame(frame, style='White.TFrame', relief=tk.SOLID)
info_frame.pack(fill=tk.X, side=tk.TOP, expand=True, padx=10, pady=10)
label_frame = ttk.Frame(info_frame, style='White.TFrame')
label_frame.pack(padx=5, pady=5, fill=tk.X, expand=True)
for idx, line in enumerate(self.header_text.splitlines()):
if not line:
continue
style = "Header.TLabel" if idx == 0 else "Body.TLabel"
info = ttk.Label(label_frame, text=line, style=style, anchor=tk.W)
info.bind("<Configure>", adjust_wraplength)
info.pack(fill=tk.X, padx=0, pady=0, expand=True, side=tk.TOP)
def build_panel(self, blank_nones):
""" Build the options frame for this command """
logger.debug("Add Config Frame")
self.add_scrollbar()
self.canvas.bind("<Configure>", self.resize_frame)
for option in self.options:
group_frame = self.get_group_frame(option.group)
ctl = ControlBuilder(group_frame["frame"],
option,
label_width=self.label_width,
checkbuttons_frame=group_frame["chkbtns"],
option_columns=self.option_columns,
blank_nones=blank_nones)
if group_frame["chkbtns"].items > | |
<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) 2022 <NAME>. All rights reserved
# coding=utf-8
# -*- coding: utf8 -*
# reference [1]: https://zhuanlan.zhihu.com/p/22252270
# reference [2]: https://zh.d2l.ai/chapter_optimization/adadelta.html
import copy
import numpy as np
try:
from OptimizerClass import IterationOptimizer
except:
from .OptimizerClass import IterationOptimizer
def numerical_gradient(fun, x, dx=1e-10, dtype=np.float_, y_out=False):
y = fun( x )
if x.ndim ==0:
x = np.array( [x], dtype=x.dtype )
dy_dx = np.zeros( (len(x),), dtype=dtype)
for ii in range( len(x) ):
# x + dx
x_tmp = copy.deepcopy( x )
x_tmp[ii] += dx
# derivative
dy_dx[ii] = ( fun(x_tmp)-y )/dx
if dy_dx.size==1:
return dy_dx[0]
if y_out:
return y, dy_dx
return dy_dx
class GradientDescentOptimizer(IterationOptimizer):
def __init__(self, target_fun=None, fun_gradient=None, name:str='', max_iter:int= 5e4,
print_iter_flag:bool=True, print_every_iter:int=10, save_history_flag:bool=False,
dx:float=1e-10, tol:float=1e-5):
super().__init__( target_fun=target_fun, name=name, max_iter=max_iter,
print_iter_flag=print_iter_flag, print_every_iter=print_every_iter,
save_history_flag=save_history_flag )
self._parameter['dx_num_grad'] = dx
self._parameter['tol'] = tol
self.fun_gradient = fun_gradient
self.state_reset()
""" untility (overload) """
def print_state(self):
""" print parameter's information """
super().print_state()
if self.optimized_flag:
print('dx : ', self._dx)
return self
def reset_history(self):
""" reset history """
super().reset_history()
self.dx_history = []
return self
def state_reset(self):
super().state_reset()
self._dx = np.inf
return self
""" calculation """
def y_and_gradient(self, x:np.ndarray):
""" gradient of target function """
# whether self._fun_gradient is provided or not
if (self._fun_gradient is not None):
# whether self._fun_gradient is a bool or not
if isinstance(self._fun_gradient, bool):
# whether self._fun_gradient is True
if self._fun_gradient:
return self._target_fun(x)
y, dy_dx = self.target_fun(x), self.fun_gradient(x)
return y, dy_dx
""" protected (overload) """
def _pre_iteration(self, x_init:np.ndarray):
""" preparation before iteration """
super()._pre_iteration(x_init)
self._dx = -np.ones( x_init.shape, dtype=x_init.dtype)
return self
def _add_one_to_history(self):
""" add the current state into history list """
super()._add_one_to_history()
self.dx_history.append( copy.deepcopy( self._dx))
return self
def _print_one(self):
""" print the information in one step """
if self._parameter['counter'] == 0:
if len( self._x ) > IterationOptimizer._DATA_LEN_:
print( '{0:>10s} {1:>10s} {2:>10s}'.format('count', 'y', 'max(abs(dx))') )
else:
print( '{0:>10s} {1:>10s} {2:>10s} {3:<10s}'.format('count', 'y', 'max(abs(dx))', 'x') )
print( '-'*47 )
if len( self._x ) > IterationOptimizer._DATA_LEN_:
print( '{0:>10d} {1:>10.5e} {2:>10.5e}'.format(self._parameter['counter'], self._y, np.max( np.abs(self._dx) )) )
else:
str1 = '{0}'.format(self._x)
print( '{0:>10d} {1:>10.5e} {2:>10.5e} {3:<10s}'.format(self._parameter['counter'], self._y, np.max( np.abs(self._dx) ), str1) )
return self
def _termination(self):
return ( np.max( np.abs(self._dx) ) > self._parameter['tol'] ) & (self._parameter['counter']<self._parameter['max_iter'])
""" property (overload) """
""" property """
@property
def target_fun(self):
# whether self._fun_gradient is provided or not
if (self._fun_gradient is not None):
# whether self._fun_gradient is a bool or not
if isinstance(self._fun_gradient, bool):
# whether self._fun_gradient is True
if self._fun_gradient:
return lambda x: self._target_fun(x)[0]
# self._target_fun is a normal single output function
return self._target_fun
@property
def history_dict(self):
"""
return a history dict
Usage
-----
key_tuple, his_dict = optimizer.history_dict
Returns
-------
key_tuple : a tuple with key of his_dict (with correct output order)
/ 'count_list', 'y_history', ''x_history', 'dx_history'
his_dict : a dictory of histroy
"""
if (not self.optimized_flag) or (not self._parameter['save_history_flag']):
return [], {}
key_tuple, tmp_dict = super().history_dict
key_list = list(key_tuple)
# if dx_history is a list or array not a single value
# change dx_history to dx[0]_history, dx[1]_history, ...
if ((not isinstance( self.dx_history[0], list )) and
( (not isinstance( self.dx_history[0], np.ndarray )) or self.dx_history[0].ndim==0 ) ):
tmp_dict['dx_history'] = np.array( self.dx_history )
key_list.append( 'dx_history' )
else:
for ii in range( len(self.dx_history[0]) ):
tmp_key = 'dx_{0}_history'.format(ii)
key_list.append(tmp_key)
tmp_dict[tmp_key]= np.array( [ l1[ii] for l1 in self.dx_history] )
return tuple(key_list), tmp_dict
@property
def fun_gradient(self):
# gradient of the target function is not provided
if self._fun_gradient is None:
return lambda x: numerical_gradient( self.target_fun, x, dx=self._parameter['dx_num_grad'])
# if self._fun_gradient is a bool flag
if isinstance(self._fun_gradient, bool):
# self._target_fun returns y and gradient simultaneously
if self._fun_gradient:
return lambda x: self._target_fun(x)[1]
# self._target_fun only returns y
return lambda x: numerical_gradient( self.target_fun, x, dx=self._parameter['dx_num_grad'])
# self._fun_gradient is a normal function with single output
return self._fun_gradient
@property
def dx_num_grad(self):
return self._parameter['dx_num_grad']
@property
def tol(self):
return self._parameter['tol']
""" setter """
@target_fun.setter
def target_fun(self, target_fun):
""" target function """
self._target_fun = target_fun
self.state_reset()
@fun_gradient.setter
def fun_gradient(self, fun_gradient):
""" gradient of target function """
self._fun_gradient = fun_gradient
self.state_reset()
@dx_num_grad.setter
def dx_num_grad(self, dx_num_grad:float):
self._parameter['dx_num_grad'] = dx_num_grad
@tol.setter
def tol(self, tol:float):
self._parameter['tol'] = tol
class GradientDescent(GradientDescentOptimizer):
def __init__(self, target_fun=None, fun_gradient=None, name:str='GradientDescent', max_iter:int= 5e4,
print_iter_flag:bool=True, print_every_iter:int=10, save_history_flag:bool=False,
dx:float=1e-10, tol:float=1e-5, lr:float=1e-2):
super().__init__( target_fun=target_fun, fun_gradient=fun_gradient,
name=name, max_iter= max_iter,
print_iter_flag=print_iter_flag,print_every_iter=print_every_iter,
save_history_flag=save_history_flag,
dx=dx, tol=tol )
self._parameter['learning_rate'] = lr
""" protected (overload) """
def _updata_one(self):
""" update the parameter in one step"""
self._parameter['counter'] += 1
self._x += self._dx
self._y, dy_dx = self.y_and_gradient( self._x )
self._dx = -self._parameter['learning_rate']*dy_dx
""" property """
@property
def learning_rate(self):
return self._parameter['learning_rate']
""" setter """
@property
def learning_rate(self, learning_rate:float):
self._parameter['learning_rate'] = learning_rate
class Momentum(GradientDescentOptimizer):
def __init__(self, target_fun=None, fun_gradient=None, name:str='Momentum', max_iter:int= 5e4,
print_iter_flag:bool=True, print_every_iter:int=10, save_history_flag:bool=False,
dx:float=1e-10, tol:float=1e-5, v_init:float=0.0, lr:float=0.001, beta:float=0.9):
super().__init__( target_fun=target_fun, fun_gradient=fun_gradient,
name=name, max_iter= max_iter,
print_iter_flag=print_iter_flag,print_every_iter=print_every_iter,
save_history_flag=save_history_flag,
dx=dx, tol=tol )
self._parameter['v_init'] = v_init
self._parameter['learning_rate'] = lr
self._parameter['beta'] = beta
def _pre_iteration(self, x_init):
super()._pre_iteration(x_init)
self._dx = np.ones( x_init.shape, dtype=x_init.dtype) * self._parameter['v_init']
return self
def _updata_one(self):
""" update the parameter in one step"""
self._parameter['counter'] += 1
self._x += self._dx
self._y, dy_dx = self.y_and_gradient( self._x )
self._dx = self._parameter['beta'] * self._dx - self._parameter['learning_rate'] * dy_dx
""" property """
@property
def v_init(self):
return self._parameter['v_init']
@property
def learning_rate(self):
return self._parameter['learning_rate']
@property
def beta(self):
return self._parameter['beta']
""" setter """
@v_init.setter
def v_init(self, v_init:float):
self._parameter['v_init'] = v_init
@learning_rate.setter
def learning_rate(self, learning_rate:float):
self._parameter['learning_rate'] = learning_rate
@beta.setter
def beta(self, beta:float):
self._parameter['beta'] = beta
class AdaGrad(GradientDescentOptimizer):
def __init__(self, target_fun=None, fun_gradient=None, name:str='AdaGrad', max_iter:int= 5e4,
print_iter_flag:bool=True, print_every_iter:int=10, save_history_flag:bool=False,
dx:float=1e-10, tol:float=1e-5, epsilon:float=1e-07, lr:float=0.001):
super().__init__( target_fun=target_fun, fun_gradient=fun_gradient,
name=name, max_iter= max_iter,
print_iter_flag=print_iter_flag,print_every_iter=print_every_iter,
save_history_flag=save_history_flag,
dx=dx, tol=tol )
self._parameter['epsilon'] = epsilon
self._parameter['learning_rate'] = lr
self.state_reset()
def state_reset(self):
super().state_reset()
self.__sigma = None
return self
def _pre_iteration(self, x_init):
super()._pre_iteration(x_init)
self.__sigma = np.zeros( x_init.shape, dtype=np.float64 )
return self
def _updata_one(self):
""" update the parameter in one step"""
self._parameter['counter'] += 1
self._x += self._dx
self._y, dy_dx = self.y_and_gradient( self._x )
self.__sigma += dy_dx**2
self._dx = - self._parameter['learning_rate'] * dy_dx/np.sqrt( self.__sigma+self._parameter['epsilon'] )
return self
""" property """
@property
def epsilon(self):
return self._parameter['epsilon']
@property
def learning_rate(self):
return self._parameter['learning_rate']
""" setter """
@epsilon.setter
def epsilon(self, epsilon):
self._parameter['epsilon'] = epsilon
@learning_rate.setter
def learning_rate(self, learning_rate):
self._parameter['learning_rate'] = learning_rate
class AdaDelta(GradientDescentOptimizer):
def __init__(self, target_fun=None, fun_gradient=None, name:str='AdaDelta', max_iter:int= 5e4,
print_iter_flag:bool=True, print_every_iter:int=10, save_history_flag:bool=False,
dx:float=1e-10, tol:float=1e-5, rho:float=0.95, epsilon:float=1e-7, lr=1.0):
super().__init__( target_fun=target_fun, fun_gradient=fun_gradient,
name=name, max_iter= max_iter,
print_iter_flag=print_iter_flag,print_every_iter=print_every_iter,
save_history_flag=save_history_flag,
dx=dx, tol=tol )
self._parameter['rho'] = rho
self._parameter['epsilon'] = epsilon
self._parameter['learning_rate'] = lr
self.state_reset()
def state_reset(self):
super().state_reset()
self.__sigma = None
self.__deltaX = None
return self
def _pre_iteration(self, x_init):
super()._pre_iteration(x_init)
self.__sigma = np.zeros( x_init.shape, dtype=np.float64 )
self.__deltaX = np.zeros( x_init.shape, dtype=np.float64 )
return self
def _updata_one(self):
""" update the parameter in one step"""
self._parameter['counter'] += 1
self._x += self._dx
self._y, dy_dx = self.y_and_gradient( self._x )
self.__sigma = self._parameter['rho']*self.__sigma+(1-self._parameter['rho'])*(dy_dx**2)
self._dx = - self._parameter['learning_rate'] * np.sqrt( self.__deltaX+self._parameter['epsilon'] ) * dy_dx/np.sqrt( self.__sigma+self._parameter['epsilon'] )
self.__deltaX = self._parameter['rho']*self.__deltaX+(1-self._parameter['rho'])*(self._dx**2)
return self
""" property """
@property
def rho(self):
return self._parameter['rho']
@property
def epsilon(self):
return self._parameter['epsilon']
""" setter """
@rho.setter
def rho(self, rho:float):
self._parameter['rho'] = rho
@epsilon.setter
def epsilon(self, epsilon:float):
self._parameter['epsilon'] = epsilon
class RMSprop(GradientDescentOptimizer):
def __init__(self, target_fun=None, fun_gradient=None, name:str='RMSprop', max_iter:int= 5e4,
print_iter_flag:bool=True, print_every_iter:int=10, save_history_flag:bool=False,
dx:float=1e-10, tol:float=1e-5, rho:float=0.9, epsilon:float=1e-7, lr:float=0.001):
super().__init__( target_fun=target_fun, fun_gradient=fun_gradient,
name=name, max_iter= max_iter,
print_iter_flag=print_iter_flag,print_every_iter=print_every_iter,
save_history_flag=save_history_flag,
dx=dx, tol=tol )
self._parameter['rho'] = rho
self._parameter['epsilon'] = epsilon
self._parameter['learning_rate'] = lr
self.state_reset()
def state_reset(self):
super().state_reset()
self.__sigma = None
return self
def _pre_iteration(self, x_init):
super()._pre_iteration(x_init)
self.__sigma = np.zeros( x_init.shape, dtype=np.float64 )
return self
def _updata_one(self):
""" update the parameter in one step"""
self._parameter['counter'] += 1
self._x += self._dx
self._y, dy_dx = self.y_and_gradient( self._x )
self.__sigma = self._parameter['rho']*self.__sigma+(1-self._parameter['rho'])* (dy_dx**2)
self._dx = - self._parameter['learning_rate'] * dy_dx/np.sqrt( self.__sigma+self._parameter['epsilon'] )
return self
""" property """
@property
def rho(self):
return self._parameter['rho']
@property
def epsilon(self):
return self._parameter['epsilon']
@property
def learning_rate(self):
return self._parameter['learning_rate']
""" setter """
@rho.setter
def rho(self, rho:float):
self._parameter['rho'] = rho
@epsilon.setter
def epsilon(self, epsilon:float):
self._parameter['epsilon'] = epsilon
@learning_rate.setter
def learning_rate(self, learning_rate:float):
self._parameter['learning_rate'] = learning_rate
class Adam(GradientDescentOptimizer):
def __init__(self, target_fun=None, fun_gradient=None, name:str='Adam', max_iter:int= 5e4,
print_iter_flag:bool=True, print_every_iter:int=10, save_history_flag:bool=False,
dx:float=1e-10, tol:float=1e-5,
beta1:float=0.9, beta2:float=0.999, epsilon:float=1e-7, lr:float=0.001):
super().__init__( target_fun=target_fun, fun_gradient=fun_gradient,
name=name, max_iter= max_iter, | |
import sys
import logging
import time
import argparse
import os
import shutil
import tarfile
import subprocess
sys.path.append(".")
try:
from . import util
from . import configuration
except:
import util
import configuration
def process_hl7_shell_commands(my_config, target_file):
process_shell_commands(my_config.hl7_operation_shell_commands, target_file)
def process_ack_shell_commands(my_config, target_file):
process_shell_commands(my_config.ack_operation_shell_commands, target_file)
def process_shell_commands(command_list, target_file):
for command in command_list:
command = command.replace('"', '')
command = command.replace(r"&target", '"' + os.path.abspath(target_file) + '"')
try:
return_code = subprocess.call([command], shell=True)
if return_code == 0:
logging.info("Successfully executed command:%s" % command)
else:
logging.info("Returned value is: %d from command:%s" % (return_code, command))
except subprocess.CalledProcessError as e:
print(e.output)
logging.debug("Error happened in command execution:%s" % command)
logging.debug(e.output)
def get_hl7_message_files(my_config):
return get_file_list(my_config.folder_localoutbox)
def is_valid_hl7_message(hl7_fullname):
try:
tar = tarfile.open(hl7_fullname, 'r:gz')
for tar_info in tar:
name_info = (tar_info.name).upper()
if "SUBMISSION.XML" in name_info:
logging.info("%s is a valid HL7 message tar.gz package!" % hl7_fullname)
return True
return False
except tarfile.ReadError as re:
logging.info(hl7_fullname + " " + str(re))
return False
except Exception as e:
logging.error("Unacceptable HL7 tar.gz package received. it will be ignored!")
logging.exception("Exception")
return False
return False
def is_valid_hl7(my_config, hl7_file):
if my_config.hl7_operation_delay > 0:
time.sleep(my_config.hl7_operation_delay)
return is_valid_hl7_message(os.path.join(my_config.folder_localoutbox, os.path.basename(hl7_file)))
def get_file_list(folder):
onlyfiles = [f for f in os.listdir(folder)
if (os.path.isfile(os.path.join(folder, f)) and (not f.startswith('.')))]
return onlyfiles
def create_ack1_flag_from_hl7(my_config, hl7_file):
try:
src_file = os.path.join(my_config.folder_localoutbox, hl7_file)
target_file = os.path.join(my_config.folder_ack1flag, hl7_file)
#if my_config.hl7_operation_delay>0:
# time.sleep(my_config.hl7_operation_delay)
logging.debug("Start to copy %s to %s" % (src_file, target_file))
shutil.copyfile(src_file, target_file)
logging.info("Successfully copied %s to %s!" % (src_file, target_file))
return True
except IOError as (errno, strerror):
logging.error("I/O error({0}): {1}".format(errno, strerror))
return False
except Exception as e:
logging.error("Unexpected error:", sys.exc_info()[0])
logging.exception("Error happened in copy %s to ack1_flag folder!" % hl7_file)
return False
def copy_or_move_wrong_hl7(my_config, hl7_file):
try:
src_file = os.path.join(my_config.folder_localoutbox, hl7_file)
target_file = os.path.join(my_config.folder_hl7flag, hl7_file)
if my_config.hl7_operation_method_is_copy:
logging.debug("Start to copy wrong hl7 %s to %s" % (src_file, target_file))
shutil.copyfile(src_file, target_file)
logging.info("Successfully copied wrong hl7 %s to %s!" % (src_file, target_file))
os.remove(src_file)
logging.info("Successfully removed wrong hl7 source file after copy:%s" % src_file)
else:
logging.debug("Start to move wrong hl7 %s to %s" % (src_file, target_file))
shutil.move(src_file, target_file)
logging.info("Successfully moved wrong hl7 %s to %s!" % (src_file, target_file))
return True
except IOError as (errno, strerror):
logging.error("I/O error({0}) for wrong hl7: {1}".format(errno, strerror))
return False
except Exception as e:
logging.error("Unexpected error:{0}".format(sys.exc_info()[0]))
logging.exception("Error happened in copy wrong hl7 %s to remote outbox folder!" % hl7_file)
return False
def copy_or_move_hl7_to_remoteoutbox(my_config, hl7_file):
try:
src_file = os.path.join(my_config.folder_localoutbox, hl7_file)
target_file = os.path.join(my_config.folder_remoteoutbox, hl7_file)
if my_config.hl7_operation_method_is_copy:
logging.debug("Start to copy %s to %s" % (src_file, target_file))
shutil.copyfile(src_file, target_file)
logging.info("Successfully copied %s to %s!" % (src_file, target_file))
os.remove(src_file)
logging.info("Successfully removed source file after copy:%s" % src_file)
else:
logging.debug("Start to move %s to %s" % (src_file, target_file))
shutil.move(src_file, target_file)
logging.info("Successfully moved %s to %s!" % (src_file, target_file))
#20160902 added in order to support shell command after copy or move
process_hl7_shell_commands(my_config, target_file)
#20160902 done
return True
except IOError as (errno, strerror):
logging.error("I/O error({0}): {1}".format(errno, strerror))
return False
except Exception as e:
logging.error("Unexpected error:{0}".format(sys.exc_info()[0]))
logging.exception("Error happened in copy %s to remote outbox folder!" % hl7_file)
return False
def process_hl7_message(my_config):
logging.info("Start to process HL7 message in local outbox folder...")
if not os.path.exists(my_config.folder_localoutbox):
logging.error("Local outbox folder doesn't exist! Please check your configuration file!")
return
file_list = get_hl7_message_files(my_config)
for hl7_file in file_list:
if is_valid_hl7(my_config, hl7_file):
ack1_flag_copy_status = create_ack1_flag_from_hl7(my_config, hl7_file)
if ack1_flag_copy_status:
hl7_copy_status = copy_or_move_hl7_to_remoteoutbox(my_config, hl7_file)
else:
logging.warning("Unknown file found in HL7 local outbox folder:%s" % os.path.basename(hl7_file))
copy_or_move_wrong_hl7(my_config, hl7_file)
logging.info("Processing in local outbox folder has been finished!")
def detect_ack_file(my_config, orphan, file_content,
ack1_flag_files, ack2_flag_files, ack3_flag_files):
file_name = os.path.basename(orphan)
logging.info("Start to detect ack types!")
logging.info("Looking for file name:%s in ack1-flag folder" % file_name)
if file_name in ack1_flag_files:
logging.info("Found ack1 flag:%s" % file_name)
return 'ACK1', True
else:
logging.info("This is not a ACK1 for CCM")
logging.info("Looking for message ID from potential ack2 content:%s" % orphan)
message_id = get_messageid_from_ack2_content(file_content)
if message_id:
logging.info("Found message ID from ack2:%s" % message_id)
if message_id in ack2_flag_files:
logging.info("Found this message ID in ack2 flag folder!")
return 'ACK2', True
else:
logging.info("This message ID is not for CCM!")
return 'ACK2', False
else:
logging.info("Couldn't find message ID from this content. it's not a ACK2 file!")
logging.info("Try to look for core ID from ack3 content")
core_id = get_coreid_from_ack3_content(file_content)
if core_id:
logging.info("Found core id:%s" % core_id)
if core_id in ack3_flag_files:
logging.info("Found this core id in ack3 flag folder!")
return 'ACK3', True
else:
logging.info("This cord ID is not for CCM!")
return 'ACK3', False
else:
logging.info("Couldn't find CoreID from this content. This is not a ACK3 file!")
logging.info("Nothing detected!")
return '', False
def get_coreid_from_ack2_content(file_content):
logging.debug(file_content)
CORE_ID = r'CoreId:'
DATA_RECEIVED = r'DateTime Receipt Generated:'
int_start = file_content.find(CORE_ID)
int_end = file_content.find(DATA_RECEIVED, int_start)
if int_start>=0 and int_end>=0:
coreId = file_content[int_start+len(CORE_ID) : int_end]
if coreId:
return coreId.strip()
return None
def get_coreid_from_ack3_content(file_content):
logging.debug(file_content)
ACK3_XML1 = r'<?xml version="1.0" encoding="UTF-8"?>' #ACK3 file from FDA doesn't have this tag
ACK3_XML2 = r'<submission>'
CORE_ID_START = r'<coreId>'
CORE_ID_END = r'</coreId>'
#print(file_content)
if file_content.find(ACK3_XML2)>=0:
int_start = file_content.find(CORE_ID_START)
int_end = file_content.find(CORE_ID_END)
if int_start>=0 and int_end>=0:
return file_content[int_start+len(CORE_ID_START):int_end]
return None
def get_messageid_from_ack1_content(file_content):
logging.debug(file_content)
return file_content[file_content.find("<")+1 : file_content.find(">")]
def get_messageid_from_ack2_content(file_content):
'''
get message id from ACK2
:param file_content:
:return:
'''
logging.debug(file_content)
if file_content.find('MessageId')>=0:
logging.info("Found MessageID in ack2 content!")
return (file_content[file_content.find("<")+1:file_content.rfind(">")]).strip()
else:
return None
def touch(file_name):
''' create an empty file with specific name
'''
with open(file_name, 'a'):
os.utime(file_name, None)
def read_content_from_orphan(my_config, orphan):
with open(os.path.join(my_config.folder_remoteorphan, orphan), 'r') as content_file:
return content_file.read()
def create_file(my_config, source_file, target_file, file_content, notes):
if my_config.operation_delay>0:
time.sleep(my_config.operation_delay)
try:
if my_config.operation_method_is_move:
shutil.move(source_file, target_file)
logging.info("Successfully moved %s to %s for %s file." %
(source_file, target_file, notes))
elif my_config.operation_method_is_copy:
shutil.copyfile(source_file, target_file)
logging.info("Successfully copied %s to %s for %s file." %
(source_file, target_file, notes))
else:
if my_config.recheck_content:
new_file_content = read_content_from_orphan(my_config, os.path.basename(source_file))
if not file_content == new_file_content:
logging.debug("%s file content has been changed since ACK detect, "
"it will be ignored this time." % notes)
with open(target_file, "w") as target:
target.write(file_content)
logging.info("Successfully write down %s content into file:%s" % (notes,target_file))
#20160902 added to support shell commands after copy or move
process_ack_shell_commands(my_config, target_file)
#20160902 Done
return True
except Exception as e:
logging.exception("Error happened in %s file operation!" % notes)
return False
return False
def process_ack1_file(my_config, orphan, file_content):
try:
message_id = get_messageid_from_ack1_content(file_content)
source_file = os.path.join(my_config.folder_remoteorphan, orphan)
target_file = os.path.join(my_config.folder_localinbox, orphan)
ack1_flag = os.path.join(my_config.folder_ack1flag, orphan)
ack2_flag = os.path.join(my_config.folder_ack2flag, message_id)
file_tobedelete = os.path.join(my_config.folder_tobedeleted, orphan)
# copy or write, it's a question. let's try from write content!
# shutil.copyfile(source_file, target_file)
logging.debug("Start to write ack1 content into local inbox folder!")
if not create_file(my_config, source_file, target_file, file_content, "ACK1"):
logging.error("Unexpected error happened in ack1 file creation!")
return
touch(ack2_flag)
logging.info("Successfully create an empty ack2 flag file:%s" % ack2_flag)
touch(file_tobedelete)
logging.info("Successfully create an empty file to be deleted:%s" % file_tobedelete)
try:
if not my_config.operation_method_is_move:
os.remove(source_file)
logging.info("Successfully removed original source file from remote orphan folder:%s" % source_file)
os.remove(file_tobedelete)
logging.info("Successfully removed flag of file to be deleted!")
os.remove(ack1_flag)
logging.info("Successfully removed ack1 flag!")
except IOError as (errno, strerror):
logging.error("process_ack1_file remove I/O error({0}): {1}".format(errno, strerror))
except Exception as e:
logging.exception("process_ack1_file remove Unexpected error:{0}".format(sys.exc_info()[0]))
except UnicodeDecodeError as ude:
logging.error(("%s has invalid content as ack1 file, error message:" % orphan)+ str(ude))
logging.exception("process_ack1_file UnicodeDecodeError: %s" % orphan)
except IOError as (errno, strerror):
logging.error("process_ack1_file I/O error({0}): {1}".format(errno, strerror))
except Exception as e:
logging.exception("process_ack1_file Unexpected error:", sys.exc_info()[0])
def process_ack2_file(my_config, orphan, file_content):
try:
message_id = get_messageid_from_ack2_content(file_content)
core_id = get_coreid_from_ack2_content(file_content)
source_file = os.path.join(my_config.folder_remoteorphan, orphan)
target_file = os.path.join(my_config.folder_localinbox, orphan)
ack2_flag = os.path.join(my_config.folder_ack2flag, message_id)
ack3_flag = os.path.join(my_config.folder_ack3flag, core_id)
file_tobedelete = os.path.join(my_config.folder_tobedeleted, orphan)
# shutil.copyfile(source_file, target_file)
logging.debug("Start to write down content to ACK2 file!")
if not create_file(my_config, source_file, target_file, file_content, "ACK2"):
logging.error("Unexpected error happened in ack2 file creation!")
return
touch(ack3_flag)
logging.info("Successfully create an empty ack3 flag:%s" % ack3_flag)
touch(file_tobedelete)
logging.info("Successfully create an empty flag for file to be delete")
try:
if not my_config.operation_method_is_move:
os.remove(source_file)
logging.info("Successfully removed original source file:%s" % source_file)
os.remove(file_tobedelete)
logging.info("Successfully removed flag of file to be delete!")
os.remove(ack2_flag)
logging.info("Successfully removed ack2 flag!")
except IOError as (errno, strerror):
logging.error("process_ack2_file remove I/O error({0}): {1}".format(errno, strerror))
except Exception as e:
logging.exception("process_ack2_file remove Unexpected error:{0}".format(sys.exc_info()[0]))
except UnicodeDecodeError as ude:
logging.error(("%s has invalid content as ack2 file, error message:" % orphan)+ str(ude))
logging.exception("process_ack2_file UnicodeDecodeError: %s" % orphan)
except IOError as (errno, strerror):
logging.error("process_ack2_file I/O error({0}): {1}".format(errno, strerror))
except Exception as e:
logging.exception("process_ack2_file Unexpected error:{0}".format(sys.exc_info()[0]))
def process_ack3_file(my_config, orphan, file_content):
try:
core_id = get_coreid_from_ack3_content(file_content)
source_file = os.path.join(my_config.folder_remoteorphan, orphan)
| |
from logging import PlaceHolder
from sqlite3 import Cursor
import streamlit as st
from hydralit import HydraHeadApp
from db import db_conn as oracle_db
import streamlit as st
import pandas as pd
import numpy as np
import datetime
import altair as alt
import pydeck as pdk
from PIL import Image
import matplotlib.pyplot as plt
plt.style.use('default')
import plotly.graph_objects as go
import plotly.io as pio
pio.renderers
import plotly.express as px
import random
class State(HydraHeadApp):
def __init__(self):
self.year = ()
self.day = None
self.day2 = None
self.year = None
self.weather = None
self.time = None
self.temp = None
self.location_choice = None
self.latitude = 32.3792
self.longitude = -86.3077
self.date_choice = ""
self.condition = ""
self.temperature = ""
self.state1 = ""
self.state2 = ""
self.city1 = ""
self.city2 = ""
self.sum1 = None
self.sum2 = None
self.add_queries = ""
self.state_selection = False
self.df_location1 = pd.DataFrame()
self.df_location1 = pd.DataFrame()
self.time_query = pd.DataFrame()
self.wthr_query = pd.DataFrame()
self.temp_query = pd.DataFrame()
self.formState1 = False
self.formState2 = False
self.form = False
self.cursor = oracle_db.connection.cursor()
self.state_name = ('Alabama', 'Arizona', 'Arkansas', 'California', 'Colorado',
'Connecticut', 'Delaware', 'Florida', 'Georgia', 'Idaho',
'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine',
'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi',
'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey',
'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma',
'Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota',
'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington',
'West Virginia', 'Wisconsin', 'Wyoming')
self.sql_choice = 'Hide'
def update_state(self, name):
match name:
case "Alabama":
self.latitude = 32.318230
self.longitude = -86.902298
case "Arizona":
self.latitude = 34.048927
self.longitude = -111.093735
case "Arkansas":
self.latitude = 34.799999
self.longitude = -92.199997
case "California":
self.latitude = 36.778259
self.longitude = -119.417931
case "Colorado":
self.latitude = 39.113014
self.longitude = -105.358887
case "Connecticut":
self.latitude = 41.599998
self.longitude = -72.699997
case "Delaware":
self.latitude = 39.000000
self.longitude = -75.500000
case "Florida":
self.latitude = 27.994402
self.longitude = -81.760254
case "Georgia":
self.latitude = 33.247875
self.longitude = -83.441162
case "Idaho":
self.latitude = 44.068203
self.longitude = -114.742043
case "Illinois":
self.latitude = 40.000000
self.longitude = -89.000000
case "Indiana":
self.latitude = 40.273502
self.longitude = -86.126976
case "Iowa":
self.latitude = 42.032974
self.longitude = -93.581543
case "Kansas":
self.latitude = 38.500000
self.longitude = -98.000000
case "Kentucky":
self.latitude = 37.839333
self.longitude = -84.270020
case "Louisiana":
self.latitude = 30.391830
self.longitude = -92.329102
case "Maine":
self.latitude = 45.367584
self.longitude = -68.972168
case "Maryland":
self.latitude = 39.045753
self.longitude = -76.641273
case "Massachusetts":
self.latitude = 42.407211
self.longitude = -71.382439
case "Michigan":
self.latitude = 44.182205
self.longitude = -84.506836
case "Minnesota":
self.latitude = 46.392410
self.longitude = -94.636230
case "Mississippi":
self.latitude = 33.000000
self.longitude = -90.000000
case "Missouri":
self.latitude = 38.573936
self.longitude = -92.603760
case "Montana":
self.latitude = 46.965260
self.longitude = -109.533691
case "Nebraska":
self.latitude = 41.500000
self.longitude = -100.000000
case "Nevada":
self.latitude = 39.876019
self.longitude = -117.224121
case "New Hampshire":
self.latitude = 44.000000
self.longitude = -71.500000
case "New Jersey":
self.latitude = 39.833851
self.longitude = -74.871826
case "New Mexico":
self.latitude = 34.307144
self.longitude = -106.018066
case "New York":
self.latitude = 43.000000
self.longitude = -75.000000
case "North Carolina":
self.latitude = 35.782169
self.longitude = -80.793457
case "North Dakota":
self.latitude = 47.650589
self.longitude = -100.437012
case "Ohio":
self.latitude = 40.367474
self.longitude = -82.996216
case "Oklahoma":
self.latitude = 36.084621
self.longitude = -96.921387
case "Oregon":
self.latitude = 44.000000
self.longitude = -120.500000
case "Pennsylvania":
self.latitude = 41.203323
self.longitude = -77.194527
case "Rhode Island":
self.latitude = 41.742325
self.longitude = -71.742332
case "South Carolina":
self.latitude = 33.836082
self.longitude = -81.163727
case "South Dakota":
self.latitude = 44.500000
self.longitude = -100.000000
case "Tennessee":
self.latitude = 35.860119
self.longitude = -86.660156
case "Texas":
self.latitude = 31.000000
self.longitude = -100.000000
case "Utah":
self.latitude = 39.419220
self.longitude = -111.950684
case "Vermont":
self.latitude = 44.000000
self.longitude = -72.699997
case "Virginia":
self.latitude = 37.926868
self.longitude = -78.024902
case "Washington":
self.latitude = 47.751076
self.longitude = -120.740135
case "West Virginia":
self.latitude = 39.000000
self.longitude = -80.500000
case "Wisconsin":
self.latitude = 44.500000
self.longitude = -89.500000
case "Wyoming":
self.latitude = 43.075970
self.longitude = -107.290283
def load_sidebar(self):
with st.sidebar:
st.image(Image.open('images/logo2.png'), width = 250)
self.date_choice = st.radio(
"Query by Year or Date Range",
("Year", "Date")
)
self.location_choice = st.radio(
"Query by State or City",
("State", "City")
)
with st.form(key = 'form1'):
if (self.date_choice == "Date"):
st.header('Accidents by Date', anchor = None)
self.day = st.date_input(
"Date 1:", datetime.datetime(2016, 2, 8),
min_value = datetime.datetime(2016, 2, 8),
max_value = datetime.datetime(2020, 12, 30)
)
self.day2 = st.date_input(
"Date 2:", datetime.datetime(2016, 2, 8),
min_value = datetime.datetime(2016, 2, 8),
max_value = datetime.datetime(2020, 12, 30)
)
else:
st.header('Accidents by Year', anchor = None)
self.year = st.slider(
'Select the range of years',
2016, 2020, (2016, 2017)
)
if (self.location_choice == "City"):
st.header('City', anchor = None)
self.state_selection = True
self.state1 = st.selectbox("Select State 1", self.state_name)
self.city1 = "Montgomery"
self.city1 = st.text_input("Enter city name 1")
self.state2 = st.selectbox("Select State 2", self.state_name)
self.city2 = "Montgomery"
self.city2 = st.text_input("Enter city name 2")
else:
# State selection
self.state_selection = True
st.header('State', anchor = None)
self.state1 = st.selectbox(
"Select State 1", self.state_name
)
self.state2 = st.selectbox(
"Select State 2", self.state_name
)
# multiselect weather. passes the condition to the weather function
st.header('Weather', anchor = None)
self.weather = st.multiselect(
'Select Weather Condition',
['Rain', 'Snow', 'Partly Cloudy', 'Tornado', 'Clear', 'Cloudy', 'Thunderstorm', 'Hail', 'Windy',
'Mostly Cloudy', 'Fair', 'Overcast', 'Scattered Clouds', 'Fog', 'Haze']
)
# multiselect temperature
st.header('Temperature', anchor = None)
self.temp = st.multiselect(
'Select Temperature',
['Temp < 00 °F', '00 - 19 °F', '20 - 39 °F', '40 - 59 °F', '60 - 79 °F', 'Temp > 80 °F']
)
# multiselect time
time = 'Time'
st.header(time, anchor = None)
self.time = st.multiselect(
'Select Time',
['12:00 AM - 02:59 AM', '03:00 AM - 05:59 AM',
'06:00 AM - 08:59 AM', '09:00 AM - 11:59 AM',
'12:00 PM - 02:59 PM', '03:00 PM - 05:59 PM',
'06:00 PM - 08:59 PM', '09:00 PM - 11:59 PM']
)
st.header('State Funding', anchor = None)
self.add_queries = st.selectbox("Select a Query", ["", "State Funding"])
self.form = st.form_submit_button(label = 'Run Query')
self.sql_choice = st.radio(
"Show SQL Code for Queries",
("Hide", "Show")
)
def load_map(self, state, city):
coordinates = pd.DataFrame()
defaultRadius = 0
defaultZoom = 5
if(self.location_choice == "City"):
coordinates = self.coordinates(state, city)
defaultZoom = 8
defaultRadius = 400
else:
coordinates = self.coordinates(state, "")
defaultZoom = 5.5
defaultRadius = 2000
st.pydeck_chart(pdk.Deck(
map_style = 'mapbox://styles/mapbox/light-v9',
initial_view_state = pdk.ViewState(
latitude = self.latitude,
longitude = self.longitude,
height = 530,
width = 450,
zoom = defaultZoom,
pitch = 10,
),
layers = [
pdk.Layer(
'ScatterplotLayer',
data = coordinates[:5000],
get_position = '[LON, LAT]',
radius = 200,
elevation_scale = 4,
elevation_range = [0, 1000],
pickable = True,
extruded = True,
get_color = '[200, 30, 0, 160]',
get_radius = defaultRadius,
)
],
))
def map_location(self, state, city):
result = f"""SELECT start_long AS lon, start_lat AS lat\nFROM "J.POULOS".ACCIDENT a\nWHERE """
if self.location_choice == "City":
result += f"a.STATE_NAME = '{state}' AND\n"
result += f"a.CITY_NAME = '{city}' AND\n"
else:
result += f"a.STATE_NAME = '{state}' AND\n"
def city_location(self, state, city):
temp = f"""SELECT c.latitude, c.longitude
FROM "J.POULOS".city c
WHERE c.name = '{city}' AND c.province = '{state}'"""
coord_Query = pd.read_sql(temp, con = oracle_db.connection)
if not len(coord_Query) == 0:
self.latitude = coord_Query.iloc[0,0]
self.longitude = coord_Query.iloc[0,1]
def coordinates(self, state, city):
result = f"""SELECT start_long AS lon, start_lat AS lat\nFROM "J.POULOS".ACCIDENT a\nWHERE """
if self.location_choice == "City":
result += f"a.STATE_NAME = '{state}' AND\n"
result += f"a.CITY_NAME = '{city}' AND\n"
else:
result += f"a.STATE_NAME = '{state}' AND\n"
# add date conditions based on a range of days selected
if self.date_choice == 'Date':
dates = [self.day, self.day2]
dates.sort()
result += f"trunc(start_time) BETWEEN to_date('{dates[0]}', 'YYYY-MM-DD') AND to_date('{dates[1]}', 'YYYY-MM-DD')\n"
else:
result += f"EXTRACT(year FROM start_time) BETWEEN {self.year[0]} AND {self.year[1]} \n"
# add weather conditions
result += self.generate_weather_list()
# add temperature conditions
result += self.generate_temp_list()
# add time conditions
result += self.generate_time_list()
coord_Query = pd.read_sql(result, con = oracle_db.connection)
return coord_Query
def location_query(self, state, city):
# check if date or year
if self.date_choice == 'Date':
result = f"""SELECT trunc(start_time) AS time,\nCOUNT(*) AS Total_Accidents \nFROM "J.POULOS".ACCIDENT a\nWHERE """
| |
import re
import xml.etree.ElementTree as ET
from os import path
from collections import Counter
import argparse
import csv
import json
import os
import random
# package local imports
import sys
import uuid
import matplotlib.pyplot as plt
import math
from dateutil.parser import parse
from tdigest import TDigest
import numpy as np
import boto3
from tqdm import tqdm
# package local imports
sys.path.append(os.getcwd() + "/..")
field_tokenization = ",.<>{}[]\"':;!@#$%^&*()-+=~"
from common_datagen import (
download_url,
generate_setup_json,
compress_files,
generate_inputs_dict_item,
humanized_bytes,
del_non_use_case_specific_keys,
add_key_metric,
upload_dataset_artifacts_s3,
add_deployment_requirements_redis_server_module,
add_deployment_requirements_benchmark_tool,
add_deployment_requirements_utilities,
init_deployment_requirement,
remove_file_if_exists,
decompress_file,
)
from tqdm import tqdm
from pathlib import Path
origin = "https://dumps.wikimedia.org/enwiki/20210501/enwiki-20210501-pages-articles1.xml-p1p41242.bz2"
filename = "enwiki-20210501-pages-articles1.xml-p1p41242.bz2"
decompressed_fname = "enwiki-20210501-pages-articles1.xml-p1p41242"
def generate_enwiki_pages_index_type():
types = {}
for f in ["title", "text", "comment"]:
types[f] = "text"
for f in ["username"]:
types[f] = "tag"
for f in ["timestamp"]:
types[f] = "numeric"
return types
def generate_lognormal_dist(n_elements):
mu, sigma = 0.0, 1
s = np.random.lognormal(mu, sigma, n_elements)
min_s = min(s)
max_s = max(s)
diff = max_s - min_s
s = s - min_s
s = s / diff
return s
def generate_ft_create_row(index, index_types, use_ftadd, no_index_list):
if use_ftadd:
cmd = ['"FT.CREATE"', '"{index}"'.format(index=index), '"SCHEMA"']
else:
cmd = [
'"FT.CREATE"',
'"{index}"'.format(index=index),
'"ON"',
'"HASH"',
'"SCHEMA"',
]
for f, v in index_types.items():
cmd.append('"{}"'.format(f))
cmd.append('"{}"'.format(v))
if f in no_index_list:
cmd.append('"NOINDEX"')
else:
cmd.append("SORTABLE")
cmd.append('"SORTABLE"')
return cmd
def generate_ft_drop_row(index):
cmd = ["FT.DROP", "{index}".format(index=index), "DD"]
return cmd
def EscapeTextFileString(field):
for char_escape in field_tokenization:
field = field.replace(char_escape, "\\{}".format(char_escape))
field = field.replace("\n", " \\n")
return field
def use_case_to_cmd(use_ftadd, title, text, comment, username, timestamp, total_docs):
escaped_title = EscapeTextFileString(title)
escaped_text = EscapeTextFileString(text)
escaped_comment = EscapeTextFileString(comment)
size = len(escaped_title) + len(escaped_text) + len(escaped_comment) + len(username)
unprunned_hash = {
"title": title,
"text": text,
"comment": comment,
"username": username,
"timestamp": timestamp,
}
# print(len(text),size)
hash = {
"title": escaped_title,
"text": escaped_text,
"comment": escaped_comment,
"username": username,
"timestamp": timestamp,
}
docid_str = "doc:{hash}:{n}".format(hash=uuid.uuid4().hex, n=total_docs)
fields = []
for f, v in hash.items():
if v is not None:
fields.append(f)
fields.append(v)
if use_ftadd is False:
cmd = ["WRITE", "W1", 1, "HSET", docid_str]
else:
cmd = ["WRITE", "W1", 2, "FT.ADD", indexname, docid_str, "1.0", "FIELDS"]
for x in fields:
cmd.append(x)
return cmd, size
def getQueryWords(doc, stop_words, size):
words = doc["comment"]
words = re.sub("[^0-9a-zA-Z]+", " ", words)
words = words.split(" ")
queryWords = []
totalQueryWords = 0
for word in words:
word = word.lstrip().rstrip()
if len(word) > 3 and word not in stop_words and word != "Wikipedia":
queryWords.append(word)
totalQueryWords = totalQueryWords + 1
if totalQueryWords > size:
break
return queryWords, totalQueryWords
def generate_benchmark_commands(
total_benchmark_commands,
bench_fname,
all_fname,
indexname,
docs,
stop_words,
use_numeric_range_searchs,
ts_digest,
p_writes,
query_choices,
):
total_benchmark_reads = 0
total_benchmark_writes = 0
all_csvfile = open(all_fname, "a", newline="")
bench_csvfile = open(bench_fname, "w", newline="")
all_csv_writer = csv.writer(all_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
bench_csv_writer = csv.writer(bench_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
progress = tqdm(unit="docs", total=total_benchmark_commands)
total_docs = len(docs)
## timestamp related
timestamps_pdist = generate_lognormal_dist(total_benchmark_commands)
min_ts = ts_digest.percentile(0.0)
max_ts = ts_digest.percentile(100.0)
query_range_digest = TDigest()
generated_commands = 0
while generated_commands < total_benchmark_commands:
query_ts_pdist = timestamps_pdist[generated_commands]
percentile = (1.0 - query_ts_pdist) * 100.0
query_min_ts = ts_digest.percentile(percentile)
random_doc_pos = random.randint(0, total_docs - 1)
doc = docs[random_doc_pos]
# decide read or write
p_cmd = random.random()
if p_cmd < p_writes:
## WRITE
total_benchmark_writes = total_benchmark_writes + 1
generated_row, doc_size = use_case_to_cmd(
use_ftadd,
doc["title"],
doc["text"],
doc["comment"],
doc["username"],
doc["timestamp"],
generated_commands,
)
else:
## READ
total_benchmark_reads = total_benchmark_reads + 1
words, totalW = getQueryWords(doc, stop_words, 2)
choice = random.choices(query_choices)[0]
generated_row = None
numeric_range_str = ""
if use_numeric_range_searchs:
numeric_range_str = "@timestamp:[{} {}] ".format(query_min_ts, max_ts)
query_range_digest.update(int(max_ts - query_min_ts))
if choice == "simple-1word-query" and len(words) >= 1:
generated_row = generate_ft_search_row(
indexname,
"simple-1word-query",
"{}{}".format(numeric_range_str, words[0]),
)
elif choice == "2word-union-query" and len(words) >= 2:
generated_row = generate_ft_search_row(
indexname,
"2word-union-query",
"{}{} {}".format(numeric_range_str, words[0], words[1]),
)
elif choice == "2word-intersection-query" and len(words) >= 2:
generated_row = generate_ft_search_row(
indexname,
"2word-intersection-query",
"{}{}|{}".format(numeric_range_str, words[0], words[1]),
)
if generated_row != None:
# all_csv_writer.writerow(generated_row)
# bench_csv_writer.writerow(generated_row)
progress.update()
generated_commands = generated_commands + 1
progress.close()
bench_csvfile.close()
all_csvfile.close()
# print()
xx = []
yy = []
p90 = query_range_digest.percentile(90.0)
dataset_percent = ts_digest.cdf(p90)
print(
"90% of the read queries target at max {} percent o keyspace".format(
dataset_percent
)
)
print(
"100% of the read queries target at max {} percent o keyspace".format(
ts_digest.cdf(max_ts - min_ts)
)
)
for centroid in query_range_digest.centroids_to_list():
ts_m = centroid["m"]
xx.append(ts_m)
yy.append(query_range_digest.cdf(ts_m))
plt.scatter(xx, yy)
plt.title("EnWiki pages Query time range")
plt.xlabel("Query time range")
plt.ylabel("cdf")
plt.xscale("log")
plt.show()
return total_benchmark_reads, total_benchmark_writes
def generate_ft_search_row(index, query_name, query):
cmd = [
"READ",
query_name,
1,
"FT.SEARCH",
"{index}".format(index=index),
"{query}".format(query=query),
]
return cmd
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="RediSearch FTSB data generator.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--project", type=str, default="redisearch", help="the project being tested"
)
parser.add_argument(
"--seed",
type=int,
default=12345,
help="the random seed used to generate random deterministic outputs",
)
parser.add_argument(
"--read-ratio", type=int, default=10, help="query time read ratio"
)
parser.add_argument(
"--write-ratio", type=int, default=1, help="query time write ratio"
)
parser.add_argument(
"--min-doc-len",
type=int,
default=1024,
help="Discard any generated document bellow the specified value",
)
parser.add_argument(
"--doc-limit",
type=int,
default=100000,
help="the total documents to generate to be added in the setup stage",
)
parser.add_argument(
"--total-benchmark-commands",
type=int,
default=100000,
help="the total commands to generate to be issued in the benchmark stage",
)
parser.add_argument(
"--stop-words",
type=str,
default="a,is,the,an,and,are,as,at,be,but,by,for,if,in,into,it,no,not,of,on,or,such,that,their,then,there,these,they,this,to,was,will,with",
help="When searching, stop-words are ignored and treated as if they were not sent to the query processor. Therefore, to be 100% correct we need to prevent those words to enter a query",
)
parser.add_argument(
"--index-name",
type=str,
default="enwiki_pages",
help="the name of the RediSearch index to be used",
)
parser.add_argument(
"--test-name",
type=str,
default="100K-enwiki_pages-hashes",
help="the name of the test",
)
parser.add_argument(
"--test-description",
type=str,
default="benchmark focused on full text search queries performance, making usage of English-language Wikipedia:Database page revisions",
help="the full description of the test",
)
parser.add_argument(
"--upload-artifacts-s3",
default=False,
action="store_true",
help="uploads the generated dataset files and configuration file to public benchmarks.redislabs bucket. Proper credentials are required",
)
parser.add_argument(
"--use-ftadd",
default=False,
action="store_true",
help="Use FT.ADD instead of HSET",
)
parser.add_argument(
"--query-use-ts-numeric-range-filter",
default=False,
action="store_true",
help="Use a numeric range filter on queries to simulate searchs that imply a log-normal keyspace access (very hot data and some cold data)",
)
parser.add_argument(
"--big-text-field-noindex",
default=False,
action="store_true",
help="On index creation mark the largest text field as no index. If a field has NOINDEX and doesn't have SORTABLE, it will just be ignored by the index. This is usefull to test RoF for example.",
)
parser.add_argument(
"--temporary-work-dir",
type=str,
default="./tmp",
help="The temporary dir to use as working directory for file download, compression,etc... ",
)
parser.add_argument(
"--query-choices",
type=str,
default="simple-1word-query,2word-union-query,2word-intersection-query",
help="comma separated list of queries to produce. one of: simple-1word-query,2word-union-query,2word-intersection-query",
)
parser.add_argument(
"--upload-artifacts-s3-uncompressed",
action="store_true",
help="uploads the generated dataset files and configuration file to public benchmarks.redislabs bucket. Proper credentials are required",
)
args = parser.parse_args()
query_choices = args.query_choices.split(",")
use_case_specific_arguments = del_non_use_case_specific_keys(dict(args.__dict__))
# generate the temporary working dir if required
working_dir = args.temporary_work_dir
Path(working_dir).mkdir(parents=True, exist_ok=True)
seed = args.seed
project = args.project
doc_limit = args.doc_limit
stop_words = args.stop_words.split(",")
indexname = args.index_name
test_name = args.test_name
use_numeric_range_searchs = args.query_use_ts_numeric_range_filter
no_index_list = []
big_text_field_noindex = args.big_text_field_noindex
if big_text_field_noindex:
test_name += "-big-text-field-noindex"
no_index_list = ["text"]
if use_numeric_range_searchs:
test_name += "-lognormal-numeric-range-searchs"
min_doc_len = args.min_doc_len
description = args.test_description
s3_bucket_name = "benchmarks.redislabs"
s3_bucket_path = "redisearch/datasets/{}/".format(test_name)
s3_uri = "https://s3.amazonaws.com/{bucket_name}/{bucket_path}".format(
bucket_name=s3_bucket_name, bucket_path=s3_bucket_path
)
benchmark_output_file = "{test_name}.{project}.commands".format(
test_name=test_name, project=project
)
benchmark_config_file = "{test_name}.{project}.cfg.json".format(
test_name=test_name, project=project
)
all_fname = "{}.ALL.csv".format(benchmark_output_file)
all_fname_compressed = "{}.ALL.tar.gz".format(benchmark_output_file)
all_fname = "{}.ALL.csv".format(benchmark_output_file)
setup_fname = "{}.SETUP.csv".format(benchmark_output_file)
bench_fname = "{}.BENCH.QUERY_{}_write_{}_to_read_{}.csv".format(
benchmark_output_file,
"__".join(query_choices),
args.write_ratio,
args.read_ratio,
)
all_fname_compressed = "{}.ALL.tar.gz".format(benchmark_output_file)
setup_fname_compressed = "{}.SETUP.tar.gz".format(benchmark_output_file)
bench_fname_compressed = "{}.BENCH.tar.gz".format(benchmark_output_file)
remote_url_all = "{}{}".format(s3_uri, all_fname_compressed)
remote_url_setup = "{}{}".format(s3_uri, setup_fname_compressed)
remote_url_bench = "{}{}".format(s3_uri, bench_fname_compressed)
## remove previous files if they exist
all_artifacts = [
all_fname,
setup_fname,
bench_fname,
all_fname_compressed,
setup_fname_compressed,
bench_fname_compressed,
benchmark_config_file,
]
for artifact in all_artifacts:
remove_file_if_exists(artifact)
use_ftadd = args.use_ftadd
total_benchmark_commands = args.total_benchmark_commands
used_indices = [indexname]
setup_commands = []
teardown_commands = []
key_metrics = []
add_key_metric(
key_metrics,
"setup",
"throughput",
"OverallRates.overallOpsRate",
"Overall writes query rate",
"docs/sec",
"numeric",
"higher-better",
1,
)
add_key_metric(
key_metrics,
"setup",
"latency",
"OverallQuantiles.allCommands.q50",
"Overall writes query q50 latency",
"ms",
"numeric",
"lower-better",
2,
)
add_key_metric(
key_metrics,
"benchmark",
"throughput",
"OverallRates.overallOpsRate",
"Overall writes query rate",
"docs/sec",
"numeric",
"higher-better",
1,
)
add_key_metric(
key_metrics,
"benchmark",
"latency",
"OverallQuantiles.allCommands.q50",
"Overall writes query q50 latency",
"ms",
"numeric",
"lower-better",
2,
)
total_writes = 0
total_reads = 0
total_updates = 0
total_deletes = 0
# 1:10
p_writes = float(args.write_ratio) / (
float(args.read_ratio) + float(args.write_ratio)
)
json_version = "0.1"
benchmark_repetitions_require_teardown_and_resetup = False
print("-- Benchmark: {} -- ".format(test_name))
print("-- Description: {} -- ".format(description))
total_docs = | |
well_id_to_cell_map_48 = {
1: {
'col_and_row': u'A1',
'row': 1,
'col': 1,
'well_id': 1,
},
2: {
'col_and_row': u'A2',
'row': 1,
'col': 2,
'well_id': 2,
},
3: {
'col_and_row': u'A3',
'row': 1,
'col': 3,
'well_id': 3,
},
4: {
'col_and_row': u'A4',
'row': 1,
'col': 4,
'well_id': 4,
},
5: {
'col_and_row': u'A5',
'row': 1,
'col': 5,
'well_id': 5,
},
6: {
'col_and_row': u'A6',
'row': 1,
'col': 6,
'well_id': 6,
},
7: {
'col_and_row': u'B1',
'row': 1,
'col': 1,
'well_id': 7,
},
8: {
'col_and_row': u'B2',
'row': 1,
'col': 2,
'well_id': 8,
},
9: {
'col_and_row': u'B3',
'row': 1,
'col': 3,
'well_id': 9,
},
10: {
'col_and_row': u'B4',
'row': 1,
'col': 4,
'well_id': 10,
},
11: {
'col_and_row': u'B5',
'row': 1,
'col': 5,
'well_id': 11,
},
12: {
'col_and_row': u'B6',
'row': 1,
'col': 6,
'well_id': 12,
},
13: {
'col_and_row': u'C1',
'row': 1,
'col': 1,
'well_id': 13,
},
14: {
'col_and_row': u'C2',
'row': 1,
'col': 2,
'well_id': 14,
},
15: {
'col_and_row': u'C3',
'row': 1,
'col': 3,
'well_id': 15,
},
16: {
'col_and_row': u'C4',
'row': 1,
'col': 4,
'well_id': 16,
},
17: {
'col_and_row': u'C5',
'row': 1,
'col': 5,
'well_id': 17,
},
18: {
'col_and_row': u'C6',
'row': 1,
'col': 6,
'well_id': 18,
},
19: {
'col_and_row': u'D1',
'row': 1,
'col': 1,
'well_id': 19,
},
20: {
'col_and_row': u'D2',
'row': 1,
'col': 2,
'well_id': 20,
},
21: {
'col_and_row': u'D3',
'row': 1,
'col': 3,
'well_id': 21,
},
22: {
'col_and_row': u'D4',
'row': 1,
'col': 4,
'well_id': 22,
},
23: {
'col_and_row': u'D5',
'row': 1,
'col': 5,
'well_id': 23,
},
24: {
'col_and_row': u'D6',
'row': 1,
'col': 6,
'well_id': 24,
},
25: {
'col_and_row': u'E1',
'row': 1,
'col': 1,
'well_id': 25,
},
26: {
'col_and_row': u'E2',
'row': 1,
'col': 2,
'well_id': 26,
},
27: {
'col_and_row': u'E3',
'row': 1,
'col': 3,
'well_id': 27,
},
28: {
'col_and_row': u'E4',
'row': 1,
'col': 4,
'well_id': 28,
},
29: {
'col_and_row': u'E5',
'row': 1,
'col': 5,
'well_id': 29,
},
30: {
'col_and_row': u'E6',
'row': 1,
'col': 6,
'well_id': 30,
},
31: {
'col_and_row': u'F1',
'row': 1,
'col': 1,
'well_id': 31,
},
32: {
'col_and_row': u'F2',
'row': 1,
'col': 2,
'well_id': 32,
},
33: {
'col_and_row': u'F3',
'row': 1,
'col': 3,
'well_id': 33,
},
34: {
'col_and_row': u'F4',
'row': 1,
'col': 4,
'well_id': 34,
},
35: {
'col_and_row': u'F5',
'row': 1,
'col': 5,
'well_id': 35,
},
36: {
'col_and_row': u'F6',
'row': 1,
'col': 6,
'well_id': 36,
},
37: {
'col_and_row': u'G1',
'row': 1,
'col': 1,
'well_id': 37,
},
38: {
'col_and_row': u'G2',
'row': 1,
'col': 2,
'well_id': 38,
},
39: {
'col_and_row': u'G3',
'row': 1,
'col': 3,
'well_id': 39,
},
40: {
'col_and_row': u'G4',
'row': 1,
'col': 4,
'well_id': 40,
},
41: {
'col_and_row': u'G5',
'row': 1,
'col': 5,
'well_id': 41,
},
42: {
'col_and_row': u'G6',
'row': 1,
'col': 6,
'well_id': 42,
},
43: {
'col_and_row': u'H1',
'row': 1,
'col': 1,
'well_id': 43,
},
44: {
'col_and_row': u'H2',
'row': 1,
'col': 2,
'well_id': 44,
},
45: {
'col_and_row': u'H3',
'row': 1,
'col': 3,
'well_id': 45,
},
46: {
'col_and_row': u'H4',
'row': 1,
'col': 4,
'well_id': 46,
},
47: {
'col_and_row': u'H5',
'row': 1,
'col': 5,
'well_id': 47,
},
48: {
'col_and_row': u'H6',
'row': 1,
'col': 6,
'well_id': 48,
}
}
well_id_to_cell_map_96 = {
1: {
'col_and_row': u'A1',
'row': 1,
'col': 1,
'well_id': 1,
},
2: {
'col_and_row': u'A2',
'row': 1,
'col': 2,
'well_id': 2,
},
3: {
'col_and_row': u'A3',
'row': 1,
'col': 3,
'well_id': 3,
},
4: {
'col_and_row': u'A4',
'row': 1,
'col': 4,
'well_id': 4,
},
5: {
'col_and_row': u'A5',
'row': 1,
'col': 5,
'well_id': 5,
},
6: {
'col_and_row': u'A6',
'row': 1,
'col': 6,
'well_id': 6,
},
7: {
'col_and_row': u'A7',
'row': 1,
'col': 7,
'well_id': 7,
},
8: {
'col_and_row': u'A8',
'row': 1,
'col': 8,
'well_id': 8,
},
9: {
'col_and_row': u'A9',
'row': 1,
'col': 9,
'well_id': 9,
},
10: {
'col_and_row': u'A10',
'row': 1,
'col': 10,
'well_id': 10,
},
11: {
'col_and_row': u'A11',
'row': 1,
'col': 11,
'well_id': 11,
},
12: {
'col_and_row': u'A12',
'row': 1,
'col': 12,
'well_id': 12,
},
13: {
'col_and_row': u'B1',
'row': 2,
'col': 1,
'well_id': 13,
},
14: {
'col_and_row': u'B2',
'row': 2,
'col': 2,
'well_id': 14,
},
15: {
'col_and_row': u'B3',
'row': 2,
'col': 3,
'well_id': 15,
},
16: {
'col_and_row': u'B4',
'row': 2,
'col': 4,
'well_id': 16,
},
17: {
'col_and_row': u'B5',
'row': 2,
'col': 5,
'well_id': 17,
},
18: {
'col_and_row': u'B6',
'row': 2,
'col': 6,
'well_id': 18,
},
19: {
'col_and_row': u'B7',
'row': 2,
'col': 7,
'well_id': 19,
},
20: {
'col_and_row': u'B8',
'row': 2,
'col': 8,
'well_id': 20,
},
21: {
'col_and_row': u'B9',
'row': 2,
'col': 9,
'well_id': 21,
},
22: {
'col_and_row': u'B10',
'row': 2,
'col': 10,
'well_id': 22,
},
23: {
'col_and_row': u'B11',
'row': 2,
'col': 11,
'well_id': 23,
},
24: {
'col_and_row': u'B12',
'row': 2,
'col': 12,
'well_id': 24,
},
25: {
'col_and_row': u'C1',
'row': 3,
'col': 1,
'well_id': 25,
},
26: {
'col_and_row': u'C2',
'row': 3,
'col': 2,
'well_id': 26,
},
27: {
'col_and_row': u'C3',
'row': 3,
'col': 3,
'well_id': 27,
},
28: {
'col_and_row': u'C4',
'row': 3,
'col': 4,
'well_id': 28,
},
29: {
'col_and_row': u'C5',
'row': 3,
'col': 5,
'well_id': 29,
},
30: {
'col_and_row': u'C6',
'row': 3,
'col': 6,
'well_id': 30,
},
31: {
'col_and_row': u'C7',
'row': 3,
'col': 7,
'well_id': 31,
},
32: {
'col_and_row': u'C8',
'row': 3,
'col': 8,
'well_id': 32,
},
33: {
'col_and_row': u'C9',
'row': 3,
'col': 9,
'well_id': 33,
},
34: {
'col_and_row': u'C10',
'row': 3,
'col': 10,
'well_id': 34,
},
35: {
'col_and_row': u'C11',
'row': 3,
'col': 11,
'well_id': 35,
},
36: {
'col_and_row': u'C12',
'row': 3,
'col': 12,
'well_id': 36,
},
37: {
'col_and_row': u'D1',
'row': 4,
'col': 1,
'well_id': 37,
},
38: {
'col_and_row': u'D2',
'row': 4,
'col': 2,
'well_id': 38,
},
39: {
'col_and_row': u'D3',
'row': 4,
'col': 3,
'well_id': 39,
},
40: {
'col_and_row': u'D4',
'row': 4,
'col': 4,
'well_id': 40,
},
41: {
'col_and_row': u'D5',
'row': 4,
'col': 5,
'well_id': 41,
},
42: {
'col_and_row': u'D6',
'row': 4,
'col': 6,
'well_id': 42,
},
43: {
'col_and_row': u'D7',
'row': 4,
'col': 7,
'well_id': 43,
},
44: {
'col_and_row': u'D8',
'row': 4,
'col': 8,
'well_id': 44,
},
45: {
'col_and_row': u'D9',
'row': 4,
'col': 9,
'well_id': 45,
},
46: {
'col_and_row': u'D10',
'row': 4,
'col': 10,
'well_id': 46,
},
47: {
'col_and_row': u'D11',
'row': 4,
'col': 11,
'well_id': 47,
},
48: {
'col_and_row': u'D12',
'row': 4,
'col': 12,
'well_id': 48,
},
49: {
'col_and_row': u'E1',
'row': 5,
'col': 1,
'well_id': 49,
},
50: {
'col_and_row': u'E2',
'row': 5,
'col': 2,
'well_id': 50,
},
51: {
'col_and_row': u'E3',
'row': 5,
'col': 3,
'well_id': 51,
},
52: {
'col_and_row': u'E4',
'row': 5,
'col': 4,
'well_id': 52,
},
53: {
'col_and_row': u'E5',
'row': 5,
'col': 5,
'well_id': 53,
},
54: {
'col_and_row': u'E6',
'row': 5,
'col': 6,
'well_id': 54,
},
55: {
'col_and_row': u'E7',
'row': 5,
'col': 7,
'well_id': 55,
},
56: {
'col_and_row': u'E8',
'row': 5,
'col': 8,
'well_id': 56,
},
57: {
'col_and_row': u'E9',
'row': 5,
'col': 9,
'well_id': 57,
},
58: {
'col_and_row': u'E10',
'row': 5,
'col': 10,
'well_id': 58,
},
59: {
'col_and_row': u'E11',
'row': 5,
'col': 11,
'well_id': 59,
},
60: {
'col_and_row': u'E12',
'row': 5,
'col': 12,
'well_id': 60,
},
61: {
'col_and_row': u'F1',
'row': 6,
'col': 1,
'well_id': 61,
},
62: {
'col_and_row': u'F2',
'row': 6,
'col': 2,
'well_id': 62,
},
63: {
'col_and_row': u'F3',
'row': 6,
'col': 3,
'well_id': 63,
},
64: {
'col_and_row': u'F4',
'row': 6,
'col': 4,
'well_id': 64,
},
65: {
'col_and_row': u'F5',
'row': 6,
'col': 5,
'well_id': 65,
},
66: {
'col_and_row': u'F6',
'row': 6,
'col': 6,
'well_id': 66,
},
67: {
'col_and_row': u'F7',
'row': 6,
'col': 7,
'well_id': 67,
},
68: {
'col_and_row': u'F8',
'row': 6,
'col': 8,
'well_id': | |
"""
Contains the abstraction for a commit in GitLab.
"""
from typing import Optional
from typing import Set
from typing import Union
from urllib.parse import quote_plus
from IGitt import ElementDoesntExistError
from IGitt.GitHub.GitHubCommit import get_diff_index
from IGitt.GitLab import GitLabMixin
from IGitt.GitLab import GitLabOAuthToken, GitLabPrivateToken
from IGitt.GitLab.GitLabComment import GitLabComment
from IGitt.GitLab.GitLabRepository import GitLabRepository
from IGitt.GitLab.GitLabIssue import GitLabIssue
from IGitt.Interfaces import get, post
from IGitt.Interfaces.Comment import CommentType
from IGitt.Interfaces.Commit import Commit
from IGitt.Interfaces.CommitStatus import Status, CommitStatus
GL_STATE_TRANSLATION = {
Status.RUNNING: 'running',
Status.CANCELED: 'canceled',
Status.ERROR: 'failed',
Status.FAILED: 'failed',
Status.PENDING: 'pending',
Status.SUCCESS: 'success',
Status.MANUAL: 'manual',
Status.CREATED: 'created',
Status.SKIPPED: 'skipped'
}
INV_GL_STATE_TRANSLATION = {val: key for key, val
in GL_STATE_TRANSLATION.items()}
GITLAB_KEYWORD_REGEX = {'fix': r'[Ff]ix(?:e[sd]|ing)?',
'close': r'[Cc]los(?:e[sd]?|ing)',
'resolve': r'[Rr]esolv(?:e[sd]?|ing)'}
class GitLabCommit(GitLabMixin, Commit):
"""
Represents a commit on GitLab.
"""
def __init__(self,
token: Union[GitLabOAuthToken, GitLabPrivateToken],
repository: str,
sha: Optional[str],
branch: Optional[str]=None):
"""
Creates a new GitLabCommit object.
:param token: A Token object to be used for authentication.
:param repository: The full repository name.
:param sha: The full commit SHA, if None given provide a branch.
:param branch: A branch name if SHA is unavailable. Note that lazy
loading won't work in that case.
"""
assert sha or branch, 'Either full SHA or branch name has to be given!'
self._token = token
self._repository = repository
self._sha = sha
self._branch = branch
self._url = '/projects/{id}/repository/commits/{sha}'.format(
id=quote_plus(repository), sha=sha if sha else branch)
@property
def message(self) -> str:
"""
Returns the commit message.
:return: Commit message as string.
"""
return self.data['message']
@property
def sha(self):
"""
Retrieves the SHA of the commit:
>>> from os import environ
>>> commit = GitLabCommit(
... GitLabOAuthToken(environ['GITLAB_TEST_TOKEN']),
... 'gitmate-test-user/test', '674498'
... )
>>> commit.sha
'674498'
:return: A string holding the SHA of the commit.
"""
return self._sha if self._sha else self.data['id']
@property
def repository(self):
"""
Retrieves the repository that holds this commit.
>>> from os import environ
>>> commit = GitLabCommit(
... GitLabOAuthToken(environ['GITLAB_TEST_TOKEN']),
... 'gitmate-test-user/test', '3fc4b86'
... )
>>> commit.repository.full_name
'gitmate-test-user/test'
:return: A usable Repository instance.
"""
return GitLabRepository(self._token, self._repository)
@property
def parent(self):
"""
Retrieves the parent commit. In case of a merge commit the first parent
will be returned.
>>> from os import environ
>>> commit = GitLabCommit(
... GitLabOAuthToken(environ['GITLAB_TEST_TOKEN']),
... 'gitmate-test-user/test', '3fc4b86'
... )
>>> commit.parent.sha
'674498fd415cfadc35c5eb28b8951e800f357c6f'
:return: A Commit object.
"""
return GitLabCommit(self._token, self._repository,
self.data['parent_ids'][0])
def get_statuses(self) -> Set[CommitStatus]:
"""
Retrieves the all commit statuses.
:return: A (frozen)set of CommitStatus objects.
:raises RuntimeError: If something goes wrong (network, auth...).
"""
# rebuild the url with full sha because gitlab doesn't work that way
url = '/projects/{repo}/repository/commits/{sha}/statuses'.format(
repo=quote_plus(self._repository), sha=self.sha)
statuses = get(self._token, self.absolute_url(url))
# Only the first of each context is the one we want
result = set()
contexts = set()
for status in statuses:
if status['name'] not in contexts:
result.add(CommitStatus(
INV_GL_STATE_TRANSLATION[status['status']],
status['description'], status['name'],
status['target_url']))
contexts.add(status['name'])
return result
@property
def combined_status(self) -> Status:
"""
Retrieves a combined status of all the commits.
:return:
Status.FAILED if any of the commits report as error or failure or
canceled
Status.PENDING if there are no statuses or a commit is pending or a
test is running
Status.SUCCESS if the latest status for all commits is success
:raises AssertionError:
If the status couldn't be matched with any of the possible outcomes
Status.SUCCESS, Status.FAILED and Status.PENDING.
"""
statuses = set(map(lambda status: status.status, self.get_statuses()))
if (
not len(statuses) or
Status.PENDING in statuses or
Status.RUNNING in statuses or
Status.CREATED in statuses):
return Status.PENDING
if (
Status.FAILED in statuses or
Status.ERROR in statuses or
Status.CANCELED in statuses):
return Status.FAILED
assert all(status in {Status.SUCCESS, Status.MANUAL}
for status in statuses)
return Status.SUCCESS
def set_status(self, status: CommitStatus):
"""
Adds the given status to the commit.
>>> from os import environ
>>> commit = GitLabCommit(
... GitLabOAuthToken(environ['GITLAB_TEST_TOKEN']),
... 'gitmate-test-user/test', '<PASSWORD>'
... )
>>> status = CommitStatus(Status.FAILED, 'Theres a problem',
... 'gitmate/test')
>>> commit.set_status(status)
>>> commit.get_statuses().pop().description
'Theres a problem'
If a status with the same context already exists, it will be bluntly
overridden:
>>> status.status = Status.SUCCESS
>>> status.description = "Theres no problem"
>>> commit.set_status(status)
>>> len(commit.get_statuses())
1
>>> commit.get_statuses().pop().description
'Theres no problem'
:param status: The CommitStatus to set to this commit.
:raises RuntimeError: If something goes wrong (network, auth...).
"""
data = {'state': GL_STATE_TRANSLATION[status.status],
'target_url': status.url, 'description': status.description,
'name': status.context}
status_url = '/projects/{repo}/statuses/{sha}'.format(
repo=quote_plus(self._repository), sha=self.sha)
post(self._token, self.absolute_url(status_url), data)
def get_patch_for_file(self, filename: str):
r"""
Retrieves the unified diff for the commit.
>>> from os import environ
>>> commit = GitLabCommit(
... GitLabOAuthToken(environ['GITLAB_TEST_TOKEN']),
... 'gitmate-test-user/test', '3fc4b86'
... )
>>> assert (commit.get_patch_for_file('README.md') ==
... '--- a/README.md\n+++ b/README.md\n@@ -1,2 +1,4 @@\n '
... '# test\n a test repo\n+\n+a tst pr\n')
But only if it exists!
>>> commit.get_patch_for_file('IDONTEXISTFILE')
Traceback (most recent call last):
...
IGitt.ElementDoesntExistError: The file does not exist.
:param filename: The file to retrieve patch for.
:return: A string containing the patch.
:raises ElementDoesntExistError: If the given filename does not exist.
"""
diff = get(self._token, self.url + '/diff')
for patch in diff:
if filename in (patch['new_path'], patch['old_path']):
return patch['diff']
raise ElementDoesntExistError('The file does not exist.')
def comment(self, message: str, file: Optional[str]=None,
line: Optional[int]=None,
mr_number: Optional[int]=None) -> GitLabComment:
"""
Places a comment on the commit.
>>> from os import environ
>>> commit = GitLabCommit(
... GitLabOAuthToken(environ['GITLAB_TEST_TOKEN']),
... 'gitmate-test-user/test', '3fc4b86'
... )
So this line places a comment on the bottom of the commit,
not associated to any particular piece of code:
>>> commit.comment("An issue is here!")
However, we can also comment on a particular file and line, if that is
included in the diff:
>>> commit.comment("Here in line 4, there's a spelling mistake!",
... 'README.md', 4)
If you supply the ``pr_number`` argument, the comment will appear in
the review UI of that pull request:
>>> commit.comment("Here in line 4, there's a spelling mistake!",
... 'README.md', 4, mr_number=7)
Beat that! Of course, there's a lot of error handling. If you give the
wrong file, the comment will appear below the commit with a note about
the commit, file and line:
>>> commit.comment("Oh, this'll end up below!!", 'READMENOT.md', 4)
Also if the line isn't contained in the diff GitLab won't accept that
and it'll also end up below - sorry!
>>> commit.comment("Oh, this'll too end up below!!", 'README.md', 8)
If you give a pull request, the comment will appear on the PR instead:
>>> commit.comment("Oh, this'll too end up on the PR.",
... 'README.md', 8, mr_number=7)
:param message: The body of the comment.
:param file: The file to place the comment, relative to repo root.
:param line: The line in the file in the comment or None.
:param mr_number: The iid of a merge request if this should end up in
the discussions UI of the merge request.
"""
data = {'note': message, 'line_type': 'new'}
if file is not None and line is not None:
try:
patch = self.get_patch_for_file(file)
index = get_diff_index(patch, line)
if index: # Else, fallback to comment below file
data['line'] = index
data['path'] = file
except ElementDoesntExistError:
pass # Fallback to comment below the file
if 'line' not in data:
file_str = '' if file is None else ', file ' + file
line_str = '' if line is None else ', line ' + str(line)
data['note'] = ('Comment on ' + self.sha + file_str + line_str +
'.\n\n' + data['note'])
# post a comment on commit
if 'line' in data and 'path' in data or mr_number is None:
url = '/projects/{id}/repository/commits/{sha}/comments'.format(
id=quote_plus(self._repository), sha=self.sha)
res = post(self._token, self.absolute_url(url), data)
return
# fallback to post the comment on relevant merge request
if mr_number is not None:
data['body'] = data['note'] # because gitlab is stupid
url = '/projects/{id}/merge_requests/{mr_iid}/notes'.format(
id=quote_plus(self._repository), mr_iid=mr_number)
res = post(self._token, self.absolute_url(url), data)
return GitLabComment.from_data(res, self._token, self._repository,
mr_number, CommentType.MERGE_REQUEST,
res['id'])
@property
def unified_diff(self):
"""
Retrieves the unified diff for the commit excluding the diff index.
"""
return '\n'.join(patch['diff']
for patch in get(self._token, self.url + '/diff')
)
@property
def closes_issues(self) -> Set[GitLabIssue]:
"""
Returns a set of GitLabIssue objects which would be closed upon merging
this pull request.
"""
issues = self._get_closes_issues()
return {GitLabIssue(self._token, repo_name, number)
for number, repo_name in issues}
@property
def mentioned_issues(self) | |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version 3.10.1-0-g8feb16b3)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class wxRaven_Webservices_SettingsPanel
###########################################################################
class wxRaven_Webservices_SettingsPanel ( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 505,435 ), style = wx.TAB_TRAVERSAL, name = wx.EmptyString ):
wx.Panel.__init__ ( self, parent, id = id, pos = pos, size = size, style = style, name = name )
bSizer5 = wx.BoxSizer( wx.VERTICAL )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
bSizer7 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap4 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/connexion_share_3.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer7.Add( self.m_bitmap4, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText12 = wx.StaticText( self, wx.ID_ANY, u"JSON Webservice Daemon", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText12.Wrap( -1 )
self.m_staticText12.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, wx.EmptyString ) )
bSizer7.Add( self.m_staticText12, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText18 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText18.Wrap( -1 )
bSizer7.Add( self.m_staticText18, 1, wx.ALL, 5 )
self.m_serviceStatusText = wx.StaticText( self, wx.ID_ANY, u"[STATUS]", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_serviceStatusText.Wrap( -1 )
bSizer7.Add( self.m_serviceStatusText, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_bpButton1 = wx.BitmapButton( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_bpButton1.SetBitmap( wx.Bitmap( u"res/default_style/normal/process_stop.png", wx.BITMAP_TYPE_ANY ) )
bSizer7.Add( self.m_bpButton1, 0, wx.ALL, 5 )
bSizer6.Add( bSizer7, 0, wx.EXPAND, 5 )
bSizer5.Add( bSizer6, 0, wx.EXPAND, 5 )
bSizer8 = wx.BoxSizer( wx.HORIZONTAL )
self.m_enableService = wx.CheckBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer8.Add( self.m_enableService, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"Enable JSON Webservice", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText4.Wrap( -1 )
bSizer8.Add( self.m_staticText4, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
bSizer9 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText8 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText8.Wrap( -1 )
bSizer9.Add( self.m_staticText8, 0, wx.ALL, 5 )
bSizer8.Add( bSizer9, 1, wx.EXPAND, 5 )
bSizer5.Add( bSizer8, 0, wx.EXPAND, 5 )
self.m_staticline13 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bSizer5.Add( self.m_staticline13, 0, wx.EXPAND|wx.ALL, 5 )
bSizer10 = wx.BoxSizer( wx.VERTICAL )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap11 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/computer_icon.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.m_bitmap11, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText20 = wx.StaticText( self, wx.ID_ANY, u"Server IP :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText20.Wrap( -1 )
bSizer11.Add( self.m_staticText20, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_serverIP = wx.TextCtrl( self, wx.ID_ANY, u"127.0.0.1", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_serverIP.SetMaxLength( 0 )
bSizer11.Add( self.m_serverIP, 1, wx.ALL, 5 )
self.m_staticText201 = wx.StaticText( self, wx.ID_ANY, u"Server Port :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText201.Wrap( -1 )
bSizer11.Add( self.m_staticText201, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_serverPort = wx.TextCtrl( self, wx.ID_ANY, u"9090", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_serverPort.SetMaxLength( 0 )
bSizer11.Add( self.m_serverPort, 0, wx.ALL, 5 )
bSizer10.Add( bSizer11, 0, wx.EXPAND, 5 )
bSizer12 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap1112 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/changelog_obj.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer12.Add( self.m_bitmap1112, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText2022 = wx.StaticText( self, wx.ID_ANY, u"Service Log :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2022.Wrap( -1 )
bSizer12.Add( self.m_staticText2022, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_filePickerService = wx.FilePickerCtrl( self, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.log", wx.DefaultPosition, wx.DefaultSize, wx.FLP_OVERWRITE_PROMPT|wx.FLP_SAVE|wx.FLP_USE_TEXTCTRL )
bSizer12.Add( self.m_filePickerService, 1, wx.ALL, 5 )
bSizer10.Add( bSizer12, 1, wx.EXPAND, 5 )
bSizer13 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap111 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/webservice_admin.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer13.Add( self.m_bitmap111, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText202 = wx.StaticText( self, wx.ID_ANY, u"Webservice Admin Token :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText202.Wrap( -1 )
bSizer13.Add( self.m_staticText202, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_textAdminToken = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer13.Add( self.m_textAdminToken, 1, wx.ALL, 5 )
self.m_bpButtonGenerateAdmin = wx.BitmapButton( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_bpButtonGenerateAdmin.SetBitmap( wx.Bitmap( u"res/default_style/normal/refresh.png", wx.BITMAP_TYPE_ANY ) )
bSizer13.Add( self.m_bpButtonGenerateAdmin, 0, wx.ALL, 5 )
bSizer10.Add( bSizer13, 1, wx.EXPAND, 5 )
bSizer131 = wx.BoxSizer( wx.HORIZONTAL )
self.m_forceNetwork = wx.CheckBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer131.Add( self.m_forceNetwork, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_bitmap1113 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/network.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer131.Add( self.m_bitmap1113, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText2023 = wx.StaticText( self, wx.ID_ANY, u"Force Network :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2023.Wrap( -1 )
bSizer131.Add( self.m_staticText2023, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
m_NetworkChoiceChoices = []
self.m_NetworkChoice = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_NetworkChoiceChoices, 0 )
self.m_NetworkChoice.SetSelection( 0 )
bSizer131.Add( self.m_NetworkChoice, 1, wx.ALL, 5 )
bSizer10.Add( bSizer131, 1, wx.EXPAND, 5 )
bSizer5.Add( bSizer10, 0, wx.EXPAND, 5 )
self.m_staticline131 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bSizer5.Add( self.m_staticline131, 0, wx.EXPAND|wx.ALL, 5 )
bSizer14 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap1111 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/blacklist.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer14.Add( self.m_bitmap1111, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText2021 = wx.StaticText( self, wx.ID_ANY, u"Do Not Start Modules :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2021.Wrap( -1 )
bSizer14.Add( self.m_staticText2021, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
bSizer5.Add( bSizer14, 0, wx.EXPAND, 5 )
bSizer15 = wx.BoxSizer( wx.HORIZONTAL )
bSizer16 = wx.BoxSizer( wx.VERTICAL )
m_checkList1Choices = []
self.m_checkList1 = wx.CheckListBox( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_checkList1Choices, 0 )
bSizer16.Add( self.m_checkList1, 1, wx.ALL|wx.EXPAND, 5 )
bSizer15.Add( bSizer16, 1, wx.EXPAND, 5 )
bSizer5.Add( bSizer15, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer5 )
self.Layout()
self.m_timer2 = wx.Timer()
self.m_timer2.SetOwner( self, wx.ID_ANY )
self.m_timer2.Start( 1000 )
def __del__( self ):
pass
###########################################################################
## Class wxRaven_Webservices_RemoteJobs_Settings
###########################################################################
class wxRaven_Webservices_RemoteJobs_Settings ( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 500,385 ), style = wx.TAB_TRAVERSAL, name = wx.EmptyString ):
wx.Panel.__init__ ( self, parent, id = id, pos = pos, size = size, style = style, name = name )
bSizer14 = wx.BoxSizer( wx.VERTICAL )
bSizer15 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap7 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/job_remote_icon.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer15.Add( self.m_bitmap7, 0, wx.ALL, 5 )
self.m_staticText12 = wx.StaticText( self, wx.ID_ANY, u"Remote Jobs Webservices :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText12.Wrap( -1 )
self.m_staticText12.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, wx.EmptyString ) )
bSizer15.Add( self.m_staticText12, 0, wx.ALL, 5 )
bSizer14.Add( bSizer15, 0, wx.EXPAND, 5 )
bSizer24 = wx.BoxSizer( wx.HORIZONTAL )
self.m_MeasureJobQueryResult = wx.CheckBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_MeasureJobQueryResult.SetValue(True)
bSizer24.Add( self.m_MeasureJobQueryResult, 0, wx.ALL, 5 )
self.m_bitmap10 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/speedmeter_icon_2.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer24.Add( self.m_bitmap10, 0, wx.ALL, 5 )
self.m_staticText15 = wx.StaticText( self, wx.ID_ANY, u"Measure and limit remote jobs query result size (see WS Limiter)", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText15.Wrap( -1 )
bSizer24.Add( self.m_staticText15, 0, wx.ALL, 5 )
bSizer14.Add( bSizer24, 0, wx.EXPAND, 5 )
self.m_staticline3 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bSizer14.Add( self.m_staticline3, 0, wx.EXPAND |wx.ALL, 5 )
bSizer141 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap1111 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/blacklist.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer141.Add( self.m_bitmap1111, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText2021 = wx.StaticText( self, wx.ID_ANY, u"Do Not Allow Jobs from remote :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2021.Wrap( -1 )
bSizer141.Add( self.m_staticText2021, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
bSizer14.Add( bSizer141, 0, wx.EXPAND, 5 )
bSizer42 = wx.BoxSizer( wx.HORIZONTAL )
self.m_MeasureJobQueryResult1 = wx.CheckBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_MeasureJobQueryResult1.SetValue(True)
self.m_MeasureJobQueryResult1.Enable( False )
bSizer42.Add( self.m_MeasureJobQueryResult1, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText34 = wx.StaticText( self, wx.ID_ANY, u"Redirect to :", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText34.Wrap( -1 )
bSizer42.Add( self.m_staticText34, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
bSizer14.Add( bSizer42, 0, wx.EXPAND, 5 )
bSizer421 = wx.BoxSizer( wx.HORIZONTAL )
m_choiceJobRedirectionChoices = [ u"plugins.Webservices.jobs.RemoteJobs_NotAllowedJob.Job_NotAllowedRemoteJob", u"plugins.Webservices.jobs.RemoteJobs_NotAllowedJob.Job_NotAllowedRemoteJobAlternate" ]
self.m_choiceJobRedirection = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choiceJobRedirectionChoices, 0 )
self.m_choiceJobRedirection.SetSelection( 0 )
bSizer421.Add( self.m_choiceJobRedirection, 0, wx.ALL, 5 )
bSizer14.Add( bSizer421, 0, wx.EXPAND, 5 )
bSizer151 = wx.BoxSizer( wx.HORIZONTAL )
bSizer16 = wx.BoxSizer( wx.VERTICAL )
m_checkList1Choices = []
self.m_checkList1 = wx.CheckListBox( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_checkList1Choices, 0 )
bSizer16.Add( self.m_checkList1, 1, wx.ALL|wx.EXPAND, 5 )
bSizer151.Add( bSizer16, 1, wx.EXPAND, 5 )
bSizer14.Add( bSizer151, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer14 )
self.Layout()
def __del__( self ):
pass
###########################################################################
## Class wxRaven_Webservices_ServerOptions_Settings
###########################################################################
class wxRaven_Webservices_ServerOptions_Settings ( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 500,376 ), style = wx.TAB_TRAVERSAL, name = wx.EmptyString ):
wx.Panel.__init__ ( self, parent, id = id, pos = pos, size = size, style = style, name = name )
bSizer25 = wx.BoxSizer( wx.VERTICAL )
bSizer26 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bitmap11 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/settings_2.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer26.Add( self.m_bitmap11, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText16 = wx.StaticText( self, wx.ID_ANY, u"Webservice : Services options and limiter", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText16.Wrap( -1 )
self.m_staticText16.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, wx.EmptyString ) )
bSizer26.Add( self.m_staticText16, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText17 = wx.StaticText( self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText17.Wrap( -1 )
bSizer26.Add( self.m_staticText17, 1, wx.ALL, 5 )
self.m_bpButton3 = wx.BitmapButton( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_bpButton3.SetBitmap( wx.Bitmap( u"res/default_style/normal/help_contents.png", wx.BITMAP_TYPE_ANY ) )
self.m_bpButton3.SetToolTip( u"This section represent the server_config.json file at the plugin location.\n\nyou can setup webservices options and limitations.\n\nUsersession (if the API service is enable) provide a User Session Token mechanism to identify Specific wxRaven users and provide them a increased limitation compare to an anonymous query.\n" )
bSizer26.Add( self.m_bpButton3, 0, wx.ALL, 5 )
bSizer25.Add( bSizer26, 0, wx.EXPAND, 5 )
self.m_staticline4 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
bSizer25.Add( self.m_staticline4, 0, wx.EXPAND |wx.ALL, 5 )
bSizer27 = wx.BoxSizer( wx.HORIZONTAL )
self.m_checkBox4 = wx.CheckBox( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_checkBox4.SetValue(True)
self.m_checkBox4.Enable( False )
bSizer27.Add( self.m_checkBox4, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_bitmap12 = wx.StaticBitmap( self, wx.ID_ANY, wx.Bitmap( u"res/default_style/normal/unknown_user.png", wx.BITMAP_TYPE_ANY ), wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer27.Add( self.m_bitmap12, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.m_staticText18 = wx.StaticText( self, wx.ID_ANY, u"Anonymous Query Result Max Size :", wx.DefaultPosition, | |
#!/usr/bin/env python
# Functions:
# parse_phenotypes
# parse_groups
# parse_ignore_samples
#
# ignore_samples
# calc_association
# center_scores
#
# write_prism_file
# plot_boxplot
# plot_waterfall
def parse_phenotypes(phenotypes):
# list of phenotypes.
# e.g. ["STEM", "EMT"]
# Return (potentially empty) list of phenotypes.
return phenotypes
def parse_groups(center_by_groups):
# Return tuple of batch_header, list of group 1, list of group 2.
# Format: <BATCH_HEADER>;
# <GROUP 1 VALUE>[,<GROUP 1 VALUE>,...];
# <GROUP 2 VALUE>[,<GROUP 2 VALUE>,...]
# If not given, return a tuple of None's.
if not center_by_groups:
return None, None, None
x = center_by_groups.split(";")
assert len(x) == 3
batch_header, x1, x2 = x
group1 = x1.split(",")
group2 = x2.split(",")
return batch_header, group1, group2
def parse_ignore_samples(ignore_samples):
# Return a tuple of <annot>, <value>
# Format: <annot>,<value>
x = ignore_samples.split(",")
assert len(x) == 2
return x
def ignore_samples(M, clinical_annots, ignore):
x = parse_ignore_samples(ignore)
annot, value = x
assert annot in clinical_annots, "Missing annot: %s" % annot
values = clinical_annots[annot]
I = [] # indexes to keep
for i in range(len(values)):
if value != values[i]:
I.append(i)
assert len(I) < len(values), "I could not find any %s=%s" % (annot, value)
M_f = M.matrix(None, I)
annots_f = {}
for name, values in clinical_annots.iteritems():
values_f = [values[i] for i in I]
annots_f[name] = values_f
assert len(values_f) == M_f.ncol()
return M_f, annots_f
def calc_association(phenotypes, scores, ignore_insufficient_groups):
# Return a dictionary with keys:
# n Number of samples.
# m Number of groups.
# scores n-list of <float>
# delta None or <float>
# phenotypes n-list of <string>
# groups n-list of <int> [0, length(group_names)-1]
# group_names m-list of <string> (unique list of pheno)
# num_samples dict of <group (int)> : <int>
# mean_score dict of <group (int)> : <float>
# p_value <float>
# relationship <string>
#
# May return None if there is only 1 group, and
# ignore_insufficient_groups is a true value.
from genomicode import jmath
from genomicode import sortlib
# Select only the samples with phenotype and score information.
I1 = [i for (i, x) in enumerate(phenotypes) if x]
I2 = [i for (i, x) in enumerate(scores) if x != ""]
I = sorted(set.intersection(set(I1), set(I2)))
assert I, "No valid samples."
phenotypes = [phenotypes[i] for i in I]
scores = [float(scores[i]) for i in I]
# Figure out the groupings.
#group_names = sorted({}.fromkeys(phenotypes))
group_names = sortlib.sort_natural({}.fromkeys(phenotypes))
if len(group_names) < 2 and ignore_insufficient_groups:
return None
assert len(group_names) >= 2, "Need at least 2 groups (%s)." % \
str(group_names)
groups = [None] * len(phenotypes)
for i in range(len(phenotypes)):
x = group_names.index(phenotypes[i])
groups[i] = x
# Calculate the association.
group2scores = {} # group -> list of scores
for i in range(len(scores)):
n = groups[i]
if n not in group2scores:
group2scores[n] = []
group2scores[n].append(scores[i])
y = scores
x = [[0]*len(group_names) for i in range(len(y))]
for i in range(len(groups)):
x[i][groups[i]] = 1
jmath.start_R()
jmath.R_equals(x, "x")
jmath.R_equals(y, "y")
jmath.R("m <- aov(y~x)")
p_value = jmath.R('summary(m)[[1]][["Pr(>F)"]][1]')[0]
# Count other things.
num_samples = {}
for n in group2scores:
num_samples[n] = len(group2scores[n])
mean_score = {}
for n in group2scores:
mean_score[n] = jmath.mean(group2scores[n])
# If there are exactly 2 groups, then find the difference between
# the two groups.
delta = None # list of deltas
if len(group_names) == 2:
delta = mean_score[1] - mean_score[0]
# Figure out the relationship.
relationship = ""
assert len(group_names) >= 2
high_score = None
for n, score in mean_score.iteritems():
if high_score is not None and score <= high_score:
continue
high_score = score
x1 = "Higher"
if len(group_names) > 2:
x1 = "Highest"
relationship = "%s in %s" % (x1, group_names[n])
SCORE = {}
SCORE["n"] = len(scores)
SCORE["m"] = len(group_names)
SCORE["scores"] = scores
SCORE["phenotypes"] = phenotypes
SCORE["groups"] = groups
SCORE["group_names"] = group_names
SCORE["num_samples"] = num_samples
SCORE["mean_score"] = mean_score
SCORE["delta"] = delta
SCORE["p_value"] = p_value
SCORE["relationship"] = relationship
return SCORE
def center_scores(scores, batches, phenotypes, group1, group2):
from genomicode import jmath
assert len(scores) == len(phenotypes)
assert len(batches) == len(phenotypes)
batches_all = sorted({}.fromkeys(batches))
scores_c = [None] * len(scores)
for batch in batches_all:
I = [i for i in range(len(batches)) if batches[i] == batch]
scores1, scores2 = [], []
for i in I:
pheno = phenotypes[i]
if pheno in group1:
scores1.append(scores[i])
elif pheno in group2:
scores2.append(scores[i])
else:
raise AssertionError, "%s not in groups" % pheno
assert scores1, "No samples from group1 in batch %s" % batch
assert scores2, "No samples from group2 in batch %s" % batch
mean1 = jmath.mean(scores1)
mean2 = jmath.mean(scores2)
n = (mean1 + mean2)/2.0
for i in I:
scores_c[i] = scores[i] - n
assert None not in scores_c
return scores_c
def write_prism_file(filename, scores, phenotypes, group_names):
for x in phenotypes:
assert x in group_names
pheno2scores = {}
for pheno, score in zip(phenotypes, scores):
if pheno not in pheno2scores:
pheno2scores[pheno] = []
pheno2scores[pheno].append(score)
matrix = []
matrix.append(group_names)
x = [[""]*len(group_names) for i in range(len(scores))]
matrix.extend(x)
for j in range(len(group_names)):
scores = pheno2scores.get(group_names[j], [])
for i in range(len(scores)):
matrix[i+1][j] = scores[i]
# Delete all the empty rows in the bottom.
while matrix:
x = matrix[-1]
if x == [""]*len(x):
del matrix[-1]
else:
break
handle = open(filename, 'w')
for x in matrix:
print >>handle, "\t".join(map(str, x))
## def plot_boxplot(
## filename, scores, phenotypes, group_names, p_value, gene_id,
## mar_bottom, mar_left, mar_top):
## import os
## from genomicode import jmath
## from genomicode.jmath import R_fn, R_var, R_equals
## from genomicode import config
## xlabel_size = 1.0
## height = 1600
## width = 1600
## pheno2scores = {}
## for pheno, score in zip(phenotypes, scores):
## if pheno not in pheno2scores:
## pheno2scores[pheno] = []
## pheno2scores[pheno].append(score)
## R = jmath.start_R()
## path = config.changlab_Rlib
## plotlib = os.path.join(path, "plotlib.R")
## assert os.path.exists(plotlib), "I cannot find: %s" % plotlib
## R_fn("source", plotlib)
## #main = R_var("NA")
## main = gene_id
## #sub = ""
## sub = "%.2g" % p_value
## xlab = ""
## ylab = "Gene Expression"
## labels = group_names
## col = R_var("NULL")
## lwd = 2
## las = 3 # vertical labels
## at = R_var("NULL")
## if labels:
## at = range(1, len(labels)+1)
## cex_labels = 1.25*xlabel_size
## #cex_legend = 1
## #cex_lab = 1.5
## cex_xlab = 2.0
## cex_ylab = 2.0
## cex_sub = 1.5
## R_equals(labels, "labels")
## R_equals(at, "at")
## R("X <- list()")
## for i, n in enumerate(group_names):
## s = pheno2scores.get(n, [])
## R_equals(s, "s")
## R("X[[%d]] <- s" % (i+1))
## bm_type = "png16m"
## if filename.lower().endswith(".pdf"):
## bm_type = "pdfwrite"
## R_fn(
## "bitmap", filename, type=bm_type,
## height=height, width=width, units="px", res=300)
## # Set the margins.
## # default is 5.1, 4.1, 4.1, 2.1
## x = 10*mar_bottom, 5*mar_left, 4*mar_top, 2
## mar = [x+0.1 for x in x]
## R_fn("par", mar=mar, RETVAL="op")
## R_fn(
## "boxplot", R_var("X"), col=col, main="", xlab="", ylab="",
## axes=R_var("FALSE"), pch=19, cex=1, ylim=R_var("NULL"))
## # Make plot area solid white.
## jmath.R('usr <- par("usr")')
## jmath.R('rect(usr[1], usr[3], usr[2], usr[4], col="#FFFFFF")')
## R_fn(
## "boxplot", R_var("X"), col=col, main="", xlab="", ylab="",
## axes=R_var("FALSE"), pch=19, cex=1, ylim=R_var("NULL"),
## add=R_var("TRUE"))
## R_fn("box", lwd=lwd)
## R_fn(
## "axis", 1, lwd=lwd, labels=R_var("labels"),
## at=R_var("at"), las=las, **{ "cex.axis" : cex_labels })
## R_fn(
## "axis", 2, lwd=lwd, **{ "cex.axis" : 1.5 })
## R_fn(
## "title", main=main, sub=sub, xlab=xlab, ylab=ylab,
## **{ "cex.lab" : cex_xlab, "cex.main" : 2.0, "cex.sub" : cex_sub,
## "col.sub" : "#A60400" })
## R("par(op)")
## R_fn("dev.off")
def plot_waterfall(
filename, scores, phenotypes, group_names, sample_names, p_value, gene_id,
mar_bottom, mar_left, mar_top, xlabel_off):
import os
from genomicode import jmath
from genomicode.jmath import R_fn, R_var, R_equals
from genomicode import config
from genomicode import colorlib
import analyze_clinical_outcome as aco
# Sort by increasing score.
O = jmath.order(scores)
scores = [scores[i] for i in O]
phenotypes = [phenotypes[i] for i in O]
sample_names = [sample_names[i] for i in O]
# Plot the colors.
assert len(group_names) >= 2
colors = ['#1533AD', '#FFB300']
if len(group_names) > 2:
x = colorlib.bild_colors(len(group_names))
x = [aco.colortuple2hex(*x) for x in x]
colors = x
xlabel_size = 1.0
height = 1600
width = 1600
R = jmath.start_R()
path = config.changlab_Rlib
plotlib = os.path.join(path, "plotlib.R")
assert os.path.exists(plotlib), "I cannot find: %s" % plotlib
R_fn("source", plotlib)
#main = R_var("NA")
main = gene_id
sub = ""
#sub = "%.2g" % p_value
xlab = ""
ylab = "Gene Expression"
labels = sample_names
col = [colors[group_names.index(x)] for x in phenotypes]
x = range(1, len(scores)+1)
y = scores
r = (max(y)-min(y))*0.10
mn = min(y)-r
mx = max(y)+r
ylim = (mn, | |
<gh_stars>0
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=protected-access
import collections
import datetime
import unittest.mock as mock
from unittest import TestCase
from esrally import exceptions, config
from esrally.mechanic import supplier, team
class RevisionExtractorTests(TestCase):
def test_single_revision(self):
self.assertDictEqual({"elasticsearch": "67c2f42", "all": "67c2f42"}, supplier._extract_revisions("67c2f42"))
self.assertDictEqual({"elasticsearch": "current", "all": "current"}, supplier._extract_revisions("current"))
self.assertDictEqual({"elasticsearch": "@2015-01-01-01:00:00", "all": "@2015-01-01-01:00:00"},
supplier._extract_revisions("@2015-01-01-01:00:00"))
def test_multiple_revisions(self):
self.assertDictEqual({"elasticsearch": "67c2f42", "x-pack": "@2015-01-01-01:00:00", "some-plugin": "current"},
supplier._extract_revisions("elasticsearch:67c2f42,x-pack:@2015-01-01-01:00:00,some-plugin:current"))
def test_invalid_revisions(self):
with self.assertRaises(exceptions.SystemSetupError) as ctx:
supplier._extract_revisions("elasticsearch 67c2f42,x-pack:current")
self.assertEqual("Revision [elasticsearch 67c2f42] does not match expected format [name:revision].", ctx.exception.args[0])
class SourceRepositoryTests(TestCase):
@mock.patch("esrally.utils.git.head_revision", autospec=True)
@mock.patch("esrally.utils.git.pull", autospec=True)
@mock.patch("esrally.utils.git.clone", autospec=True)
@mock.patch("esrally.utils.git.is_working_copy", autospec=True)
def test_intial_checkout_latest(self, mock_is_working_copy, mock_clone, mock_pull, mock_head_revision):
# before cloning, it is not a working copy, afterwards it is
mock_is_working_copy.side_effect = [False, True]
mock_head_revision.return_value = "HEAD"
s = supplier.SourceRepository(name="Elasticsearch", remote_url="some-github-url", src_dir="/src")
s.fetch("latest")
mock_is_working_copy.assert_called_with("/src")
mock_clone.assert_called_with("/src", "some-github-url")
mock_pull.assert_called_with("/src")
mock_head_revision.assert_called_with("/src")
@mock.patch("esrally.utils.git.head_revision", autospec=True)
@mock.patch("esrally.utils.git.pull")
@mock.patch("esrally.utils.git.clone")
@mock.patch("esrally.utils.git.is_working_copy", autospec=True)
def test_checkout_current(self, mock_is_working_copy, mock_clone, mock_pull, mock_head_revision):
mock_is_working_copy.return_value = True
mock_head_revision.return_value = "HEAD"
s = supplier.SourceRepository(name="Elasticsearch", remote_url="some-github-url", src_dir="/src")
s.fetch("current")
mock_is_working_copy.assert_called_with("/src")
self.assertEqual(0, mock_clone.call_count)
self.assertEqual(0, mock_pull.call_count)
mock_head_revision.assert_called_with("/src")\
@mock.patch("esrally.utils.git.head_revision", autospec=True)
@mock.patch("esrally.utils.git.checkout")
@mock.patch("esrally.utils.git.pull")
@mock.patch("esrally.utils.git.clone")
@mock.patch("esrally.utils.git.is_working_copy", autospec=True)
def test_checkout_revision_for_local_only_repo(self, mock_is_working_copy, mock_clone, mock_pull, mock_checkout, mock_head_revision):
mock_is_working_copy.return_value = True
mock_head_revision.return_value = "HEAD"
# local only, we dont specify a remote
s = supplier.SourceRepository(name="Elasticsearch", remote_url=None, src_dir="/src")
s.fetch("67c2f42")
mock_is_working_copy.assert_called_with("/src")
self.assertEqual(0, mock_clone.call_count)
self.assertEqual(0, mock_pull.call_count)
mock_checkout.assert_called_with("/src", "67c2f42")
mock_head_revision.assert_called_with("/src")
@mock.patch("esrally.utils.git.head_revision", autospec=True)
@mock.patch("esrally.utils.git.pull_ts", autospec=True)
@mock.patch("esrally.utils.git.is_working_copy", autospec=True)
def test_checkout_ts(self, mock_is_working_copy, mock_pull_ts, mock_head_revision):
mock_is_working_copy.return_value = True
mock_head_revision.return_value = "HEAD"
s = supplier.SourceRepository(name="Elasticsearch", remote_url="some-github-url", src_dir="/src")
s.fetch("@2015-01-01-01:00:00")
mock_is_working_copy.assert_called_with("/src")
mock_pull_ts.assert_called_with("/src", "2015-01-01-01:00:00")
mock_head_revision.assert_called_with("/src")
@mock.patch("esrally.utils.git.head_revision", autospec=True)
@mock.patch("esrally.utils.git.pull_revision", autospec=True)
@mock.patch("esrally.utils.git.is_working_copy", autospec=True)
def test_checkout_revision(self, mock_is_working_copy, mock_pull_revision, mock_head_revision):
mock_is_working_copy.return_value = True
mock_head_revision.return_value = "HEAD"
s = supplier.SourceRepository(name="Elasticsearch", remote_url="some-github-url", src_dir="/src")
s.fetch("67c2f42")
mock_is_working_copy.assert_called_with("/src")
mock_pull_revision.assert_called_with("/src", "67c2f42")
mock_head_revision.assert_called_with("/src")
def test_is_commit_hash(self):
self.assertTrue(supplier.SourceRepository.is_commit_hash("67c2f42"))
def test_is_not_commit_hash(self):
self.assertFalse(supplier.SourceRepository.is_commit_hash("latest"))
self.assertFalse(supplier.SourceRepository.is_commit_hash("current"))
self.assertFalse(supplier.SourceRepository.is_commit_hash("@2015-01-01-01:00:00"))
class BuilderTests(TestCase):
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.jvm.resolve_path")
def test_build_on_jdk_8(self, jvm_resolve_path, mock_run_subprocess):
jvm_resolve_path.return_value = (8, "/opt/jdk8")
mock_run_subprocess.return_value = False
b = supplier.Builder(src_dir="/src", build_jdk=8, log_dir="logs")
b.build(["./gradlew clean", "./gradlew assemble"])
calls = [
# Actual call
mock.call("export JAVA_HOME=/opt/jdk8; cd /src; ./gradlew clean > logs/build.log 2>&1"),
# Return value check
mock.call("export JAVA_HOME=/opt/jdk8; cd /src; ./gradlew assemble > logs/build.log 2>&1"),
]
mock_run_subprocess.assert_has_calls(calls)
@mock.patch("esrally.utils.process.run_subprocess")
@mock.patch("esrally.utils.jvm.resolve_path")
def test_build_on_jdk_10(self, jvm_resolve_path, mock_run_subprocess):
jvm_resolve_path.return_value = (10, "/opt/jdk10")
mock_run_subprocess.return_value = False
b = supplier.Builder(src_dir="/src", build_jdk=8, log_dir="logs")
b.build(["./gradlew clean", "./gradlew assemble"])
calls = [
# Actual call
mock.call("export JAVA_HOME=/opt/jdk10; cd /src; ./gradlew clean > logs/build.log 2>&1"),
# Return value check
mock.call("export JAVA_HOME=/opt/jdk10; cd /src; ./gradlew assemble > logs/build.log 2>&1"),
]
mock_run_subprocess.assert_has_calls(calls)
class TemplateRendererTests(TestCase):
def test_uses_provided_values(self):
renderer = supplier.TemplateRenderer(version="1.2.3", os_name="Windows", arch="arm7")
self.assertEqual("This is version 1.2.3 on Windows with a arm7 CPU.",
renderer.render("This is version {{VERSION}} on {{OSNAME}} with a {{ARCH}} CPU."))
@mock.patch("esrally.utils.sysstats.os_name", return_value="Linux")
@mock.patch("esrally.utils.sysstats.cpu_arch", return_value="X86_64")
def test_uses_derived_values(self, os_name, cpu_arch):
renderer = supplier.TemplateRenderer(version="1.2.3")
self.assertEqual("This is version 1.2.3 on linux with a x86_64 CPU.",
renderer.render("This is version {{VERSION}} on {{OSNAME}} with a {{ARCH}} CPU."))
class CachedElasticsearchSourceSupplierTests(TestCase):
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("shutil.copy")
@mock.patch("esrally.mechanic.supplier.ElasticsearchSourceSupplier")
def test_does_not_cache_when_no_revision(self, es, copy, ensure_dir):
def add_es_artifact(binaries):
binaries["elasticsearch"] = "/path/to/artifact.tar.gz"
es.fetch.return_value = None
es.add.side_effect = add_es_artifact
# no version / revision provided
renderer = supplier.TemplateRenderer(version=None, os_name="linux", arch="x86_64")
dist_cfg = {
"runtime.jdk.bundled": "true",
"jdk.bundled.release_url": "https://elstc.co/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz"
}
file_resolver = supplier.ElasticsearchFileNameResolver(
distribution_config=dist_cfg,
template_renderer=renderer
)
cached_supplier = supplier.CachedSourceSupplier(distributions_root="/tmp",
source_supplier=es,
file_resolver=file_resolver)
cached_supplier.fetch()
cached_supplier.prepare()
binaries = {}
cached_supplier.add(binaries)
self.assertEqual(0, copy.call_count)
self.assertFalse(cached_supplier.cached)
self.assertIn("elasticsearch", binaries)
self.assertEqual("/path/to/artifact.tar.gz", binaries["elasticsearch"])
@mock.patch("os.path.exists")
@mock.patch("esrally.mechanic.supplier.ElasticsearchSourceSupplier")
def test_uses_already_cached_artifact(self, es, path_exists):
# assume that the artifact is already cached
path_exists.return_value = True
renderer = supplier.TemplateRenderer(version="abc123", os_name="linux", arch="x86_64")
dist_cfg = {
"runtime.jdk.bundled": "true",
"jdk.bundled.release_url": "https://elstc.co/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz"
}
file_resolver = supplier.ElasticsearchFileNameResolver(
distribution_config=dist_cfg,
template_renderer=renderer
)
cached_supplier = supplier.CachedSourceSupplier(distributions_root="/tmp",
source_supplier=es,
file_resolver=file_resolver)
cached_supplier.fetch()
cached_supplier.prepare()
binaries = {}
cached_supplier.add(binaries)
self.assertEqual(0, es.fetch.call_count)
self.assertEqual(0, es.prepare.call_count)
self.assertEqual(0, es.add.call_count)
self.assertTrue(cached_supplier.cached)
self.assertIn("elasticsearch", binaries)
self.assertEqual("/tmp/elasticsearch-abc123-linux-x86_64.tar.gz", binaries["elasticsearch"])
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.exists")
@mock.patch("shutil.copy")
@mock.patch("esrally.mechanic.supplier.ElasticsearchSourceSupplier")
def test_caches_artifact(self, es, copy, path_exists, ensure_dir):
def add_es_artifact(binaries):
binaries["elasticsearch"] = "/path/to/artifact.tar.gz"
path_exists.return_value = False
es.fetch.return_value = "abc123"
es.add.side_effect = add_es_artifact
renderer = supplier.TemplateRenderer(version="abc123", os_name="linux", arch="x86_64")
dist_cfg = {
"runtime.jdk.bundled": "true",
"jdk.bundled.release_url": "https://elstc.co/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz"
}
cached_supplier = supplier.CachedSourceSupplier(distributions_root="/tmp",
source_supplier=es,
file_resolver=supplier.ElasticsearchFileNameResolver(
distribution_config=dist_cfg,
template_renderer=renderer
))
cached_supplier.fetch()
cached_supplier.prepare()
binaries = {}
cached_supplier.add(binaries)
# path is cached now
path_exists.return_value = True
self.assertEqual(1, copy.call_count, "artifact has been copied")
self.assertEqual(1, es.add.call_count, "artifact has been added by internal supplier")
self.assertTrue(cached_supplier.cached)
self.assertIn("elasticsearch", binaries)
# simulate a second attempt
cached_supplier.fetch()
cached_supplier.prepare()
binaries = {}
cached_supplier.add(binaries)
self.assertEqual(1, copy.call_count, "artifact has not been copied twice")
# the internal supplier did not get called again as we reuse the cached artifact
self.assertEqual(1, es.add.call_count, "internal supplier is not called again")
self.assertTrue(cached_supplier.cached)
@mock.patch("esrally.utils.io.ensure_dir")
@mock.patch("os.path.exists")
@mock.patch("shutil.copy")
@mock.patch("esrally.mechanic.supplier.ElasticsearchSourceSupplier")
def test_does_not_cache_on_copy_error(self, es, copy, path_exists, ensure_dir):
def add_es_artifact(binaries):
binaries["elasticsearch"] = "/path/to/artifact.tar.gz"
path_exists.return_value = False
es.fetch.return_value = "abc123"
es.add.side_effect = add_es_artifact
copy.side_effect = OSError("no space left on device")
renderer = supplier.TemplateRenderer(version="abc123", os_name="linux", arch="x86_64")
dist_cfg = {
"runtime.jdk.bundled": "true",
"jdk.bundled.release_url": "https://elstc.co/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz"
}
cached_supplier = supplier.CachedSourceSupplier(distributions_root="/tmp",
source_supplier=es,
file_resolver=supplier.ElasticsearchFileNameResolver(
distribution_config=dist_cfg,
template_renderer=renderer
))
cached_supplier.fetch()
cached_supplier.prepare()
binaries = {}
cached_supplier.add(binaries)
self.assertEqual(1, copy.call_count, "artifact has been copied")
self.assertEqual(1, es.add.call_count, "artifact has been added by internal supplier")
self.assertFalse(cached_supplier.cached)
self.assertIn("elasticsearch", binaries)
# still the uncached artifact
self.assertEqual("/path/to/artifact.tar.gz", binaries["elasticsearch"])
class ElasticsearchFileNameResolverTests(TestCase):
def setUp(self):
super().setUp()
renderer = supplier.TemplateRenderer(version="8.0.0-SNAPSHOT", os_name="linux", arch="x86_64")
dist_cfg = {
"runtime.jdk.bundled": "true",
"jdk.bundled.release_url": "https://elstc.co/elasticsearch-{{VERSION}}-{{OSNAME}}-{{ARCH}}.tar.gz"
}
self.resolver = supplier.ElasticsearchFileNameResolver(
distribution_config=dist_cfg,
template_renderer=renderer
)
def test_resolve(self):
self.resolver.revision = "abc123"
self.assertEqual("elasticsearch-abc123-linux-x86_64.tar.gz", self.resolver.file_name)
def test_artifact_key(self):
self.assertEqual("elasticsearch", self.resolver.artifact_key)
def test_to_artifact_path(self):
file_system_path = "/tmp/test"
self.assertEqual(file_system_path, self.resolver.to_artifact_path(file_system_path))
def test_to_file_system_path(self):
artifact_path = "/tmp/test"
self.assertEqual(artifact_path, self.resolver.to_file_system_path(artifact_path))
class PluginFileNameResolverTests(TestCase):
def setUp(self):
super().setUp()
self.resolver = supplier.PluginFileNameResolver("test-plugin")
def test_resolve(self):
self.resolver.revision = "abc123"
self.assertEqual("test-plugin-abc123.zip", self.resolver.file_name)
def test_artifact_key(self):
self.assertEqual("test-plugin", self.resolver.artifact_key)
def test_to_artifact_path(self):
file_system_path = "/tmp/test"
self.assertEqual(f"file://{file_system_path}", self.resolver.to_artifact_path(file_system_path))
def test_to_file_system_path(self):
file_system_path = "/tmp/test"
self.assertEqual(file_system_path, self.resolver.to_file_system_path(f"file://{file_system_path}"))
class PruneTests(TestCase):
LStat = collections.namedtuple("LStat", "st_ctime")
@mock.patch("os.path.exists")
@mock.patch("os.listdir")
@mock.patch("os.path.isfile")
@mock.patch("os.lstat")
@mock.patch("os.remove")
def test_does_not_touch_nonexisting_directory(self, rm, lstat, isfile, listdir, exists):
exists.return_value = False
supplier._prune(root_path="/tmp/test", max_age_days=7)
self.assertEqual(0, listdir.call_count, "attempted to list a non-existing directory")
@mock.patch("os.path.exists")
@mock.patch("os.listdir")
@mock.patch("os.path.isfile")
@mock.patch("os.lstat")
@mock.patch("os.remove")
def test_prunes_old_files(self, rm, lstat, isfile, listdir, exists):
exists.return_value = True
listdir.return_value = ["elasticsearch-6.8.0.tar.gz", "some-subdir", "elasticsearch-7.3.0-darwin-x86_64.tar.gz"]
isfile.side_effect = [True, False, True]
now = datetime.datetime.now(tz=datetime.timezone.utc)
ten_days_ago = now - datetime.timedelta(days=10)
one_day_ago = now - datetime.timedelta(days=1)
lstat.side_effect = [
# elasticsearch-6.8.0.tar.gz
PruneTests.LStat(st_ctime=int(ten_days_ago.timestamp())),
# elasticsearch-7.3.0-darwin-x86_64.tar.gz
PruneTests.LStat(st_ctime=int(one_day_ago.timestamp()))
]
supplier._prune(root_path="/tmp/test", max_age_days=7)
rm.assert_called_with("/tmp/test/elasticsearch-6.8.0.tar.gz")
class ElasticsearchSourceSupplierTests(TestCase):
def test_no_build(self):
car = team.Car("default", root_path=None, config_paths=[], variables={
"clean_command": "./gradlew clean",
"system.build_command": "./gradlew assemble"
})
renderer = supplier.TemplateRenderer(version=None)
es = supplier.ElasticsearchSourceSupplier(revision="abc",
es_src_dir="/src",
remote_url="",
car=car,
builder=None,
template_renderer=renderer)
es.prepare()
# nothing has happened (intentionally) because there is no builder
def test_build(self):
car = team.Car("default", root_path=None, config_paths=[], variables={
"clean_command": "./gradlew clean",
"system.build_command": "./gradlew assemble"
})
builder = mock.create_autospec(supplier.Builder)
renderer = supplier.TemplateRenderer(version="abc")
es = supplier.ElasticsearchSourceSupplier(revision="abc",
es_src_dir="/src",
remote_url="",
car=car,
builder=builder,
template_renderer=renderer)
es.prepare()
builder.build.assert_called_once_with(["./gradlew clean", "./gradlew assemble"])
def test_raises_error_on_missing_car_variable(self):
car = team.Car("default", root_path=None, config_paths=[], variables={
"clean_command": "./gradlew clean",
# system.build_command is not defined
})
renderer = supplier.TemplateRenderer(version="abc")
builder = mock.create_autospec(supplier.Builder)
es = supplier.ElasticsearchSourceSupplier(revision="abc",
es_src_dir="/src",
remote_url="",
car=car,
builder=builder,
template_renderer=renderer)
with self.assertRaisesRegex(exceptions.SystemSetupError,
"Car \"default\" requires config key \"system.build_command\""):
es.prepare()
self.assertEqual(0, builder.build.call_count)
@mock.patch("glob.glob", lambda p: ["elasticsearch.tar.gz"])
def test_add_elasticsearch_binary(self):
car = team.Car("default", root_path=None, config_paths=[], variables={
"clean_command": "./gradlew clean",
"system.build_command": "./gradlew assemble",
"system.artifact_path_pattern": "distribution/archives/tar/build/distributions/*.tar.gz"
})
renderer = supplier.TemplateRenderer(version="abc")
es = supplier.ElasticsearchSourceSupplier(revision="abc",
es_src_dir="/src",
remote_url="",
car=car,
builder=None,
template_renderer=renderer)
binaries = {}
es.add(binaries=binaries)
self.assertEqual(binaries, {"elasticsearch": "elasticsearch.tar.gz"})
class ExternalPluginSourceSupplierTests(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.along_es = None
self.standalone = None
def setUp(self):
self.along_es = supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor("some-plugin", core_plugin=False),
revision="abc",
# built along-side ES
src_dir="/src",
src_config={
"plugin.some-plugin.src.subdir": "elasticsearch-extra/some-plugin",
"plugin.some-plugin.build.artifact.subdir": "plugin/build/distributions"
},
builder=None)
self.standalone = supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor("some-plugin", core_plugin=False),
revision="abc",
# built separately
src_dir=None,
src_config={
"plugin.some-plugin.src.dir": "/Projects/src/some-plugin",
"plugin.some-plugin.build.artifact.subdir": "build/distributions"
},
builder=None)
def test_invalid_config_no_source(self):
with self.assertRaisesRegex(exceptions.SystemSetupError,
"Neither plugin.some-plugin.src.dir nor plugin.some-plugin.src.subdir are set for plugin some-plugin."):
supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor("some-plugin", core_plugin=False),
revision="abc",
# built separately
src_dir=None,
src_config={
# but no source config
# "plugin.some-plugin.src.dir": "/Projects/src/some-plugin",
"plugin.some-plugin.build.artifact.subdir": "build/distributions"
},
builder=None)
def test_invalid_config_duplicate_source(self):
with self.assertRaisesRegex(exceptions.SystemSetupError,
"Can only specify one of plugin.duplicate.src.dir and plugin.duplicate.src.subdir but both are set."):
supplier.ExternalPluginSourceSupplier(plugin=team.PluginDescriptor("duplicate", core_plugin=False),
revision="abc",
src_dir=None,
src_config={
"plugin.duplicate.src.subdir": "elasticsearch-extra/some-plugin",
"plugin.duplicate.src.dir": "/Projects/src/some-plugin",
"plugin.duplicate.build.artifact.subdir": "build/distributions"
},
builder=None)
def test_standalone_plugin_overrides_build_dir(self):
self.assertEqual("/Projects/src/some-plugin", self.standalone.override_build_dir)
def test_along_es_plugin_keeps_build_dir(self):
self.assertIsNone(self.along_es.override_build_dir)
@mock.patch("glob.glob", lambda p: ["/src/elasticsearch-extra/some-plugin/plugin/build/distributions/some-plugin.zip"])
def test_add_binary_built_along_elasticsearch(self):
binaries = {}
self.along_es.add(binaries)
| |
helix: int
"""index of the :any:`Helix` on which this :any:`Domain` resides."""
forward: bool
"""Whether the strand "points" forward (i.e., its 3' end has a larger offset than its 5' end).
If :any:`Domain.forward` is ``True``, then
:any:`Domain.start` is the 5' end of the :any:`Domain` and
:any:`Domain.end` is the 3' end of the :any:`Domain`.
If :any:`Domain.forward` is ``False``, these roles are reversed."""
start: int
"""
The smallest offset position of any base on this Domain
(3' end if :any:`Domain.forward` = ``False``,
5' end if :any:`Domain.forward` = ``True``).
"""
end: int
"""
1 plus the largest offset position of any base on this Domain
(5' end if :any:`Domain.forward` = ``False``,
3' end if :any:`Domain.forward` = ``True``).
Note that the set of base offsets occupied by this Domain is {start, start+1, ..., end-1},
i.e., inclusive for :py:data:`Strand.start` but exclusive for :py:data:`Strand.end`,
the same convention used in Python for slices of lists and strings.
(e.g., :samp:`"abcdef"[1:3] == "bc"`)
Some methods (such as :py:meth:`Domain.dna_sequence_in`) use the convention of being inclusive on
both ends and are marked with the word "INCLUSIVE".
(Such a convention is easier to reason about when there are insertions and deletions.)
"""
deletions: List[int] = field(default_factory=list)
"""List of positions of deletions on this Domain."""
insertions: List[Tuple[int, int]] = field(default_factory=list)
"""List of (position,num_insertions) pairs on this Domain.
This is the number of *extra* bases in addition to the base already at this position.
The total number of bases at this offset is num_insertions+1."""
label: DomainLabel = None
"""Generic "label" object to associate to this :any:`Domain`.
Useful for associating extra information with the :any:`Domain` that will be serialized, for example,
for DNA sequence design. It must be an object (e.g., a dict or primitive type such as str or int)
that is naturally JSON serializable. (Calling ``json.dumps`` on the object should succeed without
having to specify a custom encoder.)
"""
# not serialized; for efficiency
# remove quotes when Python 3.6 support dropped
_parent_strand: 'Strand' = field(init=False, repr=False, compare=False, default=None)
def __post_init__(self):
self._check_start_end()
def __repr__(self):
rep = (f'Domain(helix={self.helix}'
f', forward={self.forward}'
f', start={self.start}'
f', end={self.end}') + \
(f', deletions={self.deletions}' if len(self.deletions) > 0 else '') + \
(f', insertions={self.insertions}' if len(self.insertions) > 0 else '') + \
')'
return rep
def __str__(self):
return repr(self)
def strand(self) -> 'Strand': # remove quotes when Python 3.6 support dropped
return self._parent_strand
def set_label(self, label: StrandLabel):
"""Sets label of this :any:`Domain`."""
self.label = label
def _check_start_end(self):
if self.start >= self.end:
raise StrandError(self._parent_strand,
f'start = {self.start} must be less than end = {self.end}')
def to_json_serializable(self, suppress_indent: bool = True):
dct = OrderedDict()
dct[helix_idx_key] = self.helix
dct[forward_key] = self.forward
dct[start_key] = self.start
dct[end_key] = self.end
if len(self.deletions) > 0:
dct[deletions_key] = self.deletions
if len(self.insertions) > 0:
dct[insertions_key] = self.insertions
if self.label is not None:
dct[domain_label_key] = self.label
return NoIndent(dct) if suppress_indent else dct
@staticmethod
def is_loopout() -> bool:
"""Indicates if this is a :any:`Loopout` (always false)
Useful when object could be either :any:`Loopout` or :any:`Domain`."""
return False
@staticmethod
def is_domain() -> bool:
"""Indicates if this is a :any:`Domain` (always true)
Useful when object could be either :any:`Loopout` or :any:`Domain`."""
return True
def set_start(self, new_start: int):
self.start = new_start
self._check_start_end()
def set_end(self, new_end: int):
self.end = new_end
self._check_start_end()
def offset_5p(self) -> int:
"""5' offset of this :any:`Domain`, INCLUSIVE."""
if self.forward:
return self.start
else:
return self.end - 1
# return self.start if self.forward else self.end - 1
def offset_3p(self) -> int:
"""3' offset of this :any:`Domain`, INCLUSIVE."""
return self.end - 1 if self.forward else self.start
def _num_insertions(self) -> int:
# total number of insertions in this Domain
return sum(insertion[1] for insertion in self.insertions)
def contains_offset(self, offset: int) -> bool:
"""Indicates if `offset` is the offset of a base on this :any:`Domain`.
Note that offsets refer to visual portions of the displayed grid for the Helix.
If for example, this Domain starts at position 0 and ends at 10, and it has 5 deletions,
then it contains the offset 7 even though there is no base 7 positions from the start."""
return self.start <= offset < self.end
def __len__(self):
"""Same as :meth:`Domain.dna_length`.
See also :meth:`Domain.visual_length`."""
return self.dna_length()
def dna_length(self) -> int:
"""Number of bases in this Domain."""
return self.end - self.start - len(self.deletions) + self._num_insertions()
def dna_length_in(self, left, right) -> int:
"""Number of bases in this Domain between `left` and `right` (INCLUSIVE)."""
if not left <= right + 1:
raise ValueError(f'left = {left} and right = {right} but we should have left <= right + 1')
if not self.start <= left:
raise ValueError(f'left = {left} should be at least self.start = {self.start}')
if not right < self.end:
raise ValueError(f'right = {right} should be at most self.end - 1 = {self.end - 1}')
num_deletions = sum(1 for offset in self.deletions if left <= offset <= right)
num_insertions = sum(length for (offset, length) in self.insertions if left <= offset <= right)
return (right - left + 1) - num_deletions + num_insertions
def visual_length(self) -> int:
"""Distance between :any:`Domain.start` offset and :any:`Domain.end` offset.
This can be more or less than the :meth:`Domain.dna_length` due to insertions and deletions."""
return self.end - self.start
def dna_sequence(self) -> Optional[str]:
"""Return DNA sequence of this Domain, or ``None`` if no DNA sequence has been assigned
to this :any:`Domain`'s :any:`Strand`."""
return self.dna_sequence_in(self.start, self.end - 1)
def dna_sequence_in(self, offset_left: int, offset_right: int) -> Optional[str]:
"""Return DNA sequence of this Domain in the interval of offsets given by
[`offset_left`, `offset_right`], INCLUSIVE, or ``None`` if no DNA sequence has been assigned
to this :any:`Domain`'s :any:`Strand`.
WARNING: This is inclusive on both ends,
unlike other parts of this API where the right endpoint is exclusive.
This is to make the notion well-defined when one of the endpoints is on an offset with a
deletion or insertion."""
strand_seq = self._parent_strand.dna_sequence
if strand_seq is None:
return None
# if on a deletion, move inward until we are off of it
while offset_left in self.deletions:
offset_left += 1
while offset_right in self.deletions:
offset_right -= 1
if offset_left > offset_right:
return ''
if offset_left >= self.end:
return ''
if offset_right < 0:
return ''
str_idx_left = self.domain_offset_to_strand_dna_idx(offset_left, self.forward)
str_idx_right = self.domain_offset_to_strand_dna_idx(offset_right, not self.forward)
if not self.forward: # these will be out of order if strand is left
str_idx_left, str_idx_right = str_idx_right, str_idx_left
subseq = strand_seq[str_idx_left:str_idx_right + 1]
return subseq
def get_seq_start_idx(self) -> int:
"""Starting DNA subsequence index for first base of this :any:`Domain` on its
Parent :any:`Strand`'s DNA sequence."""
domains = self._parent_strand.domains
# index of self in parent strand's list of domains
self_domain_idx = domains.index(self)
# index of self's position within the DNA sequence of parent strand
self_seq_idx_start = sum(prev_domain.dna_length()
for prev_domain in domains[:self_domain_idx])
return self_seq_idx_start
def domain_offset_to_strand_dna_idx(self, offset: int, offset_closer_to_5p: bool) -> int:
""" Convert from offset on this :any:`Domain`'s :any:`Helix`
to string index on the parent :any:`Strand`'s DNA sequence.
If `offset_closer_to_5p` is ``True``, (this only matters if `offset` contains an insertion)
then the only leftmost string index corresponding to this offset is included,
otherwise up to the rightmost string index (including all insertions) is included."""
if offset in self.deletions:
raise ValueError(f'offset {offset} illegally contains a deletion from {self.deletions}')
# length adjustment for insertions depends on whether this is a left or right offset
len_adjust = self._net_ins_del_length_increase_from_5p_to(offset, offset_closer_to_5p)
# get string index assuming this Domain is first on Strand
if self.forward:
offset += len_adjust # account for insertions and deletions
domain_str_idx = offset - self.start
else:
# account for insertions and deletions
offset -= len_adjust # account for insertions and deletions
domain_str_idx = self.end - 1 - offset
# correct for existence of previous Domains on this Strand
return domain_str_idx + self.get_seq_start_idx()
def _net_ins_del_length_increase_from_5p_to(self, offset_edge: int, offset_closer_to_5p: bool) -> int:
"""Net number of insertions from 5'/3' end to offset_edge,
INCLUSIVE on 5'/3' end, EXCLUSIVE on offset_edge.
Set `five_p` ``= False`` to test from 3' | |
drug scores.
@param method str: normalize by the average or max within the vectors
@return Returns None
"""
# dimensions include drugs or features (e.g. "proteins")
# methods are average ('avg') or max ('max')
dvs = {} # drug vectors
cc = 0
if dimension == 'drugs':
for vec in self.values:
for vi in range(len(vec)):
if cc == 0:
dvs[vi] = []
dvs[vi].append(vec[vi])
cc += 1
new_dvecs = []
for i in range(len(dvs)):
vec = dvs[i]
if method == 'avg':
norm_val = np.average(vec)
elif method == 'max':
norm_val = max(vec)
else:
print('Please enter a proper normalization method: "max" or "avg"')
quit()
def norm(x):
if norm_val == 0:
return 0.0
else:
return x/norm_val
new_dvecs.append(list(map(norm, vec)))
pvs = {}
for dvi in range(len(new_dvecs)):
for p in range(len(self.proteins)):
try:
pvs[p].append(new_dvecs[dvi][p])
except KeyError:
pvs[p] = [new_dvecs[dvi][p]]
with open(outfile, 'w') as fo:
for p in range(len(self.proteins)):
fo.write('{}\t{}\n'.format(self.proteins[p], '\t'.join(list(map(str, pvs[p])))))
def single_interaction(c_id, p_id, v="v2.2", fp="rd_ecfp4", vect="int",
dist="dice", org="nrpdb", bs="coach",
c_cutoff=0.0, p_cutoff=0.0, percentile_cutoff=0.0,
i_score="P", nr_ligs=True, approved_only=False, lig_name=False,
lib_path='',prot_path=''):
def print_time(s):
if s >= 60:
m = s / 60.0
s -= m * 60.0
if m >= 60.0:
h = m / 60.0
m -= h * 60.0
print("Interaciton calculation took {:.0f} hr {:.0f} min {:.0f} s to finish.".format(h, m, s))
else:
print("Interaciton calculation took {:.0f} min {:.0f} s to finish.".format(m, s))
else:
print("Interaciton calculation took {:.0f} s to finish.".format(s))
print("Calculating BANDOCK interaction...")
start = time.time()
c_id = int(c_id)
pre = os.path.dirname(__file__) + "/data/v2.2+/"
lig_path = "{}/ligs/fps".format(pre)
if not lib_path:
cmpd_path = "{}/cmpds/fps-{}".format(pre,v)
map_path = "{}/mappings".format(pre)
else:
cmpd_path = "{0}/{1}/cmpds/fps-{1}".format(lib_path,v)
map_path = "{0}/{1}/mappings".format(lib_path,v)
# Remove redundant ligands from full list
# Especially important for percentile calculations
if nr_ligs:
if not os.path.exists("{}/mappings/nr_ligs.csv".format(pre)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'
dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))
nr_ligs = pd.read_csv("{}/mappings/nr_ligs.csv".format(pre),header=None)
nr_ligs = nr_ligs[0].values.flatten()
# Download protein matrix if it does not exist
if not prot_path:
if not os.path.exists("{}/prots/{}-{}.tsv".format(pre,org,bs)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)
dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))
p_matrix = pd.read_csv("{}/prots/{}-{}.tsv".format(pre,org,bs),sep='\t',header=None,index_col=0)
else:
p_matrix = pd.read_csv("{}/{}-{}.tsv".format(prot_path,org,bs),sep='\t',header=None,index_col=0)
# Create dictionary of lists
# Keys == proteins
# Values == list of predicted bs + bs scores
p_dict = {}
for p in p_matrix.itertuples():
p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))
try:
p_dict = {p_id: p_dict[p_id]}
except:
print("{} does not exist in protein library".format(p_id))
sys.exit()
if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:
print("{} is not an applicable interaction score.".format(i_score))
return
if not os.path.exists("{}/{}-{}_vect.pickle".format(cmpd_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect))
if not os.path.exists("{}/{}-{}_vect.pickle".format(lig_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))
# Load compound and ligand fingerprint pickles
with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'rb') as f:
c_fps = pickle.load(f)
with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:
l_fps = pickle.load(f)
try:
check = c_fps[c_id]
except:
print("{} does not exist in compound library".format(c_id))
sys.exit()
print("Interaction between {} and {}.".format(c_id,p_id))
score = calc_scores(c_id,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name)
print("Interaction score between {} and {} = {}".format(c_id,p_id,score[1][0]))
end = time.time()
print_time(end-start)
return(score[1][0])
def generate_matrix(v="v2.2", fp="rd_ecfp4", vect="int", dist="dice", org="nrpdb", bs="coach", c_cutoff=0.0,
p_cutoff=0.0, percentile_cutoff=0.0, i_score="P", out_file='', out_path=".", nr_ligs=True,
approved_only=False, lig_name=False, lib_path='', prot_path='', ncpus=1):
"""!
Generate a matrix using our in-house protocol BANDOCK.
@param v str: version to use (supports v2.2 - v2.5)
@param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)
@param vect str: integer "int" or binary "bit" vector for fingerprint
@param dist str: use Sorenson-Dice "dice" for vect="int" and Tanimoto "tani" for vect="bit"
@param org str: protein library to use ('nrpdb' or 'homo_sapien')
@param bs str: the method to use, just use "coach"
@param c_cutoff float: minimum Cscore (Tanimoto/Dice similarity score) to consider for scoring
@param p_cutoff float: minimum Pscore (binding site score from COACH) to consider for scoring
@param percentile_cutoff float: %ile cutoff for fingerprint similarity scores in 'dC' scoring protocols
@param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')
@param out_file str: filename of the output matrix
@param out_path str: path to the output matrix
@param nr_ligs bool: use only the non-redundant set of ligands for 'dC' scoring protocols (recommended)
@param approved_only bool: use only approved drugs to create the matrix
@param lig_name bool: output the ligand chosen for the compound-protein interaction score instead of the score
@param lib_path str: specify a local compound fingerprint set for custom analyses
@param prot_path str: specify a local protein library for custom analyses
@param ncpus int: number of cores to run on
@return Returns None
"""
def print_time(s):
if s >= 60:
m = s / 60.0
s -= m * 60.0
if m >= 60.0:
h = m / 60.0
m -= h * 60.0
print("Matrix generation took {:.0f} hr {:.0f} min {:.0f} s to finish.".format(h, m, s))
else:
print("Matrix generation took {:.0f} min {:.0f} s to finish.".format(m, s))
else:
print("Matrix generation took {:.0f} s to finish.".format(s))
print("Generating CANDO matrix...")
start = time.time()
pre = os.path.dirname(__file__) + "/data/v2.2+/"
lig_path = "{}/ligs/fps".format(pre)
if not lib_path:
cmpd_path = "{}/cmpds/fps-{}".format(pre,v)
map_path = "{}/mappings".format(pre)
else:
cmpd_path = "{0}/{1}/cmpds/fps-{1}".format(lib_path,v)
map_path = "{0}/{1}/mappings".format(lib_path,v)
if out_file == '':
if percentile_cutoff != 0.0:
if approved_only:
out_file = "{}-{}-{}-{}-{}-percentile{}-p{}-{}-approved.tsv".format(fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)
else:
out_file = "{}-{}-{}-{}-{}-percentile{}-p{}-{}.tsv".format(fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)
else:
if approved_only:
out_file = "{}-{}-{}-{}-{}-c{}-p{}-{}-approved.tsv".format(fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)
else:
out_file = "{}-{}-{}-{}-{}-c{}-p{}-{}.tsv".format(fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)
if not out_path and not lib_path:
out_path = '{}/matrices/{}'.format(pre,v)
elif not out_path and lib_path:
out_path = '{}/{}/matrices'.format(lib_path,v)
os.makedirs(out_path, exist_ok=True)
# Remove redundant ligands from full list
# Especially important for percentile calculations
if nr_ligs:
if not os.path.exists("{}/mappings/nr_ligs.csv".format(pre)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'
dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))
nr_ligs = pd.read_csv("{}/mappings/nr_ligs.csv".format(pre),header=None)
nr_ligs = nr_ligs[0].values.flatten()
# Download protein matrix if it does not exist
if not prot_path:
if not os.path.exists("{}/prots/{}-{}.tsv".format(pre,org,bs)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)
dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))
p_matrix = pd.read_csv("{}/prots/{}-{}.tsv".format(pre,org,bs),sep='\t',header=None,index_col=0)
else:
p_matrix = pd.read_csv("{}/{}-{}.tsv".format(prot_path,org,bs),sep='\t',header=None,index_col=0)
# Create dictionary of lists
# Keys == proteins
# Values == list of predicted bs + bs scores
p_dict = {}
for p in p_matrix.itertuples():
p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))
if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:
print("{} is not an applicable interaction score.".format(i_score))
return
if not os.path.exists("{}/{}-{}_vect.pickle".format(cmpd_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect))
if not os.path.exists("{}/{}-{}_vect.pickle".format(lig_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))
# Load compound and ligand fingerprint pickles
with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'rb') as f:
c_fps = pickle.load(f)
with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:
l_fps = pickle.load(f)
if approved_only:
if not os.path.exists("{}/drugbank-{}-approved.tsv".format(map_path,v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}-approved.tsv'.format(v)
dl_file(url, '{}/drugbank-{}-approved.tsv'.format(map_path,v))
approved_df = pd.read_csv('{}/drugbank-{}-approved.tsv'.format(map_path,v),sep='\t',index_col=0)
c_list = approved_df.index
else:
c_list = list(c_fps.keys())
if ncpus > 1:
pool = mp.Pool(ncpus)
scores = pool.starmap_async(calc_scores, [(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name) for c in c_list]).get()
pool.close
pool.join
else:
scores = [calc_scores(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name) for c in c_list]
scores = {d[0]:d[1] for d in scores}
mat = pd.DataFrame.from_dict(scores)
mat.sort_index(axis=1,inplace=True)
mat.rename(index=dict(zip(range(len(p_matrix.index)), p_matrix.index)), inplace=True)
mat.to_csv("{}/{}".format(out_path,out_file), sep='\t', index=True, header=False, float_format='%.3f')
end = time.time()
print("Matrix written to {}/{}.".format(out_path,out_file))
print_time(end-start)
def calc_scores(c,c_fps,l_fps,p_dict,dist,pscore_cutoff=0.0,cscore_cutoff=0.0,percentile_cutoff=0.0,i_score='P',nr_ligs=[],lig_name=False):
if i_score in ['dC','dCxP'] or percentile_cutoff != 0.0:
if dist == 'dice':
all_scores = DataStructs.BulkDiceSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())
elif dist == 'tani':
all_scores = DataStructs.BulkTanimotoSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())
elif dist == 'cos':
all_scores = DataStructs.BulkCosineSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())
if percentile_cutoff != 0.0:
cscore_cutoff = np.percentile(all_scores,percentile_cutoff)
scores = []
for p in p_dict.keys():
li = [i[0:2] for i in p_dict[p] if i[0] in l_fps.index and float(i[1]) >= pscore_cutoff]
if li:
li_bs, li_score = zip(*li)
li_bs = list(li_bs)
li_score = list(li_score)
else:
li_bs = li_score = []
x = l_fps.loc[li_bs,0].values.tolist()
y = l_fps.loc[li_bs].index.tolist()
# Pscore
if i_score in ['P','CxP','dCxP','avgP','medP']:
try:
if dist == 'dice':
temp_scores = list(zip(y,DataStructs.BulkDiceSimilarity(c_fps[c],x)))
elif dist == 'tani':
temp_scores = list(zip(y,DataStructs.BulkTanimotoSimilarity(c_fps[c],x)))
elif dist == 'cos':
temp_scores = list(zip(y,DataStructs.BulkCosineSimilarity(c_fps[c],x)))
#Cscore cutoff
temp_scores = [i for i in temp_scores if float(i[1]) >= cscore_cutoff]
if i_score == 'dCxP':
temp_c = max(temp_scores, key = lambda i:i[1])
if not lig_name:
c_score = stats.percentileofscore(all_scores,temp_c[1])/100.0
p_score = li_score[li_bs.index(temp_c[0])]
scores.append(float(c_score) * float(p_score))
else:
scores.append(temp_c[0])
elif i_score == 'CxP':
temp_c = max(temp_scores, key = lambda i:i[1])
if not lig_name:
c_score = temp_c[1]
p_score = li_score[li_bs.index(temp_c[0])]
scores.append(float(c_score) * float(p_score))
continue
else:
scores.append(temp_c[0])
elif i_score == 'P':
temp_c = max(temp_scores, key = lambda i:i[1])
if not lig_name:
p_score = li_score[li_bs.index(temp_c[0])]
scores.append(float(p_score))
else:
scores.append(temp_c[0])
elif i_score == 'avgP':
# Will produce a warning when li_score is empty
# temp_p will then == nan, so we check for that
# append 0.00 if True.
temp_p = np.mean(li_score)
if not np.isnan(temp_p):
scores.append(temp_p)
else:
scores.append(0.000)
elif i_score == 'medP':
temp_p = np.median(li_score)
if not np.isnan(temp_p):
| |
<reponame>cmc333333/parsons
import logging
import re
from parsons.etl import Table
from parsons.utilities import check_env
from parsons.utilities.api_connector import APIConnector
logger = logging.getLogger(__name__)
class Mailchimp():
"""
Instantiate Mailchimp Class
`Args:`
api_key:
The Mailchimp-provided application key. Not required if
``MAILCHIMP_API_KEY`` env variable set.
`Returns:`
Mailchimp Class
"""
def __init__(self, api_key=None):
self.api_key = check_env.check('MAILCHIMP_API_KEY', api_key)
self.domain = re.findall("(?<=-).+$", self.api_key)[0]
self.uri = f'https://{self.domain}.api.mailchimp.com/3.0/'
self.client = APIConnector(self.uri, auth=('x', self.api_key))
def get_lists(self, fields=None, exclude_fields=None,
count=None, offset=None, before_date_created=None,
since_date_created=None, before_campaign_last_sent=None,
since_campaign_last_sent=None, email=None, sort_field=None,
sort_dir=None):
"""
Get a table of lists under the account based on query parameters. Note
that argument descriptions here are sourced from Mailchimp's official
API documentation.
`Args:`
fields: list of strings
A comma-separated list of fields to return. Reference
parameters of sub-objects with dot notation.
exclude_fields: list of strings
A comma-separated list of fields to exclude. Reference
parameters of sub-objects with dot notation.
count: int
The number of records to return. Default value is 10. Maximum
value is 1000.
offset: int
The number of records from a collection to skip. Iterating over
large collections with this parameter can be slow. Default
value is 0.
before_date_created: string
Restrict response to lists created before the set date. We
recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00.
since_date_created: string
Restrict results to lists created after the set date. We
recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00.
before_campaign_last_sent: string
Restrict results to lists created before the last campaign send
date. We recommend ISO 8601 time format:
2015-10-21T15:41:36+00:00.
since_campaign_last_sent: string
Restrict results to lists created after the last campaign send
date. We recommend ISO 8601 time format:
2015-10-21T15:41:36+00:00.
email: string
Restrict results to lists that include a specific subscriber's
email address.
sort_field: string, can only be 'date_created' or None
Returns files sorted by the specified field.
sort_dir: string, can only be 'ASC', 'DESC', or None
Determines the order direction for sorted results.
`Returns:`
Table Class
"""
params = {'fields': fields,
'exclude_fields': exclude_fields,
'count': count,
'offset': offset,
'before_date_created': before_date_created,
'since_date_created': since_date_created,
'before_campaign_last_sent': before_campaign_last_sent,
'since_campaign_last_sent': since_campaign_last_sent,
'email': email,
'sort_field': sort_field,
'sort_dir': sort_dir}
response = self.client.get_request('lists', params=params)
tbl = Table(response['lists'])
logger.info(f'Found {tbl.num_rows} lists.')
if tbl.num_rows > 0:
return tbl
else:
return Table()
def get_campaigns(self, fields=None, exclude_fields=None,
count=None, offset=None, type=None, status=None,
before_send_time=None, since_send_time=None,
before_create_time=None, since_create_time=None,
list_id=None, folder_id=None, member_id=None,
sort_field=None, sort_dir=None):
"""
Get a table of campaigns under the account based on query parameters.
Note that argument descriptions here are sourced from Mailchimp's
official API documentation.
`Args:`
fields: list of strings
A comma-separated list of fields to return. Reference
parameters of sub-objects with dot notation.
exclude_fields: list of strings
A comma-separated list of fields to exclude. Reference
parameters of sub-objects with dot notation.
count: int
The number of records to return. Default value is 10. Maximum
value is 1000.
offset: int
The number of records from a collection to skip. Iterating over
large collections with this parameter can be slow. Default
value is 0.
type: string, can only be 'regular', 'plaintext', 'absplit', 'rss',
'variate', or None
The campaign type.
status: string, can only be 'save', 'paused', 'schedule',
'sending', 'sent', or None
The status of the campaign.
before_send_time: string
Restrict the response to campaigns sent before the set time. We
recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00.
since_send_time: string
Restrict the response to campaigns sent after the set time. We
recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00.
before_create_time: string
Restrict the response to campaigns created before the set time.
We recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00.
since_create_time: string
Restrict the response to campaigns created after the set time.
We recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00.
list_id: string
The unique id for the list.
folder_id: string
The unique folder id.
member_id: string
Retrieve campaigns sent to a particular list member. Member ID
is The MD5 hash of the lowercase version of the list member’s
email address.
sort_field: string, can only be 'create_time', 'send_time', or None
Returns files sorted by the specified field.
sort_dir: string, can only be 'ASC', 'DESC', or None
Determines the order direction for sorted results.
`Returns:`
Table Class
"""
params = {'fields': fields,
'exclude_fields': exclude_fields,
'count': count,
'offset': offset,
'type': type,
'status': status,
'before_send_time': before_send_time,
'since_send_time': since_send_time,
'before_create_time': before_create_time,
'since_create_time': since_create_time,
'list_id': list_id,
'folder_id': folder_id,
'member_id': member_id,
'sort_field': sort_field,
'sort_dir': sort_dir}
response = self.client.get_request('campaigns', params=params)
tbl = Table(response['campaigns'])
logger.info(f'Found {tbl.num_rows} campaigns.')
if tbl.num_rows > 0:
return tbl
else:
return Table()
def get_members(self, list_id, fields=None,
exclude_fields=None, count=None, offset=None,
email_type=None, status=None, since_timestamp_opt=None,
before_timestamp_opt=None, since_last_changed=None,
before_last_changed=None, unique_email_id=None,
vip_only=False, interest_category_id=None,
interest_ids=None, interest_match=None, sort_field=None,
sort_dir=None, since_last_campaign=None,
unsubscribed_since=None):
"""
Get a table of members in a list based on query parameters. Note that
argument descriptions here are sourced from Mailchimp's official API
documentation.
`Args:`
list_id: string
The unique ID of the list to fetch members from.
fields: list of strings
A comma-separated list of fields to return. Reference
parameters of sub-objects with dot notation.
exclude_fields: list of fields as strings
A comma-separated list of fields to exclude. Reference
parameters of sub-objects with dot notation.
count: int
The number of records to return. Default value is 10. Maximum
value is 1000.
offset: int
The number of records from a collection to skip. Iterating over
large collections with this parameter can be slow. Default
value is 0.
email_type: string
The email type.
status: string, can only be 'subscribed', 'unsubscribed',
'cleaned', 'pending', 'transactional', 'archived', or None
The subscriber's status.
since_timestamp_opt: string
Restrict results to subscribers who opted-in after the set
timeframe. We recommend ISO 8601 time format:
2015-10-21T15:41:36+00:00.
before_timestamp_opt: string
Restrict results to subscribers who opted-in before the set
timeframe. We recommend ISO 8601 time format:
2015-10-21T15:41:36+00:00.
since_last_changed: string
Restrict results to subscribers whose information changed after
the set timeframe. We recommend ISO 8601 time format:
2015-10-21T15:41:36+00:00.
before_last_changed: string
Restrict results to subscribers whose information changed
before the set timeframe. We recommend ISO 8601 time format:
2015-10-21T15:41:36+00:00.
unique_email_id: string
A unique identifier for the email address across all Mailchimp
lists. This parameter can be found in any links with Ecommerce
Tracking enabled.
vip_only: boolean
A filter to return only the list's VIP members. Passing true
will restrict results to VIP list members, passing false will
return all list members.
interest_category_id: string
The unique id for the interest category.
interest_ids: list of strings
Used to filter list members by interests. Must be accompanied
by interest_category_id and interest_match. The value must be a
comma separated list of interest ids present for any supplied
interest categories.
interest_match: string, can only be 'any', 'all', 'none', or None
Used to filter list members by interests. Must be accompanied
by interest_category_id and interest_ids. "any" will match a
member with any of the interest supplied, "all" will only match
members with every interest supplied, and "none" will match
members without any of the interest supplied.
sort_field: string, can only be 'timestamp_opt',
'timestamp_signup', 'last_changed', or None
Returns files sorted by the specified field.
sort_dir: string, can only be 'ASC', 'DESC', or None
Determines the order direction for sorted results.
since_last_campaign: string
Filter subscribers by those
subscribed/unsubscribed/pending/cleaned since last email
campaign send. Member status is required to use this filter.
unsubscribed_since: string
Filter subscribers by those unsubscribed since a specific date.
Using any status other than unsubscribed with this filter will
result in an error.
`Returns:`
Table Class
"""
params = {'fields': fields,
'exclude_fields': exclude_fields,
'count': count,
'offset': offset,
'email_type': email_type,
'status': status,
'since_timestamp_opt': since_timestamp_opt,
'before_timestamp_opt': before_timestamp_opt,
'since_last_changed': since_last_changed,
'before_last_changed': before_last_changed,
'unqiue_email_id': unique_email_id,
'vip_only': vip_only,
'interest_category_id': interest_category_id,
'interest_ids': interest_ids,
'interest_match': interest_match,
'sort_field': sort_field,
'sort_dir': sort_dir,
'since_last_campaign': since_last_campaign,
'unsubscribed_since': unsubscribed_since}
response = self.client.get_request(f'lists/{list_id}/members', params=params)
tbl = Table(response['members'])
logger.info(f'Found {tbl.num_rows} members.')
if tbl.num_rows > 0:
return tbl
else:
return Table()
def get_campaign_emails(self, campaign_id, fields=None,
exclude_fields=None, count=None, offset=None,
since=None):
"""
Get a table of individual emails from a campaign based on query
parameters. Note that argument descriptions here are sourced from
Mailchimp's official API documentation.
`Args:`
campaign_id: string
The unique ID of the campaign to fetch emails from.
fields: list of strings
A comma-separated list of fields to | |
the URL to check it is indeed on the domain
- Issue the URL to check effective roles on project1 - this
should return 3 roles.
"""
role_list = []
for _ in range(4):
role = unit.new_role_ref()
PROVIDERS.role_api.create_role(role['id'], role)
role_list.append(role)
domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain['id'], domain)
user1 = unit.create_user(
PROVIDERS.identity_api, domain_id=domain['id']
)
user2 = unit.create_user(
PROVIDERS.identity_api, domain_id=domain['id']
)
group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = PROVIDERS.identity_api.create_group(group1)
PROVIDERS.identity_api.add_user_to_group(
user1['id'], group1['id']
)
PROVIDERS.identity_api.add_user_to_group(
user2['id'], group1['id']
)
project1 = unit.new_project_ref(domain_id=domain['id'])
PROVIDERS.resource_api.create_project(project1['id'], project1)
project2 = unit.new_project_ref(domain_id=domain['id'])
PROVIDERS.resource_api.create_project(project2['id'], project2)
# Add some roles to the project
PROVIDERS.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[0]['id'])
PROVIDERS.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[1]['id'])
# ..and one on a different project as a spoiler
PROVIDERS.assignment_api.add_role_to_user_and_project(
user1['id'], project2['id'], role_list[2]['id'])
# Now create our inherited role on the domain
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
'domain_id': domain['id'],
'group_id': group1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[3]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
self.get(member_url, expected_status=http.client.NO_CONTENT)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[3],
resource_url=collection_url)
# Now use the list domain role assignments api to check if this
# is included
collection_url = (
'/role_assignments?group.id=%(group_id)s'
'&scope.domain.id=%(domain_id)s' % {
'group_id': group1['id'],
'domain_id': domain['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=1,
resource_url=collection_url)
gd_entity = self.build_role_assignment_entity(
domain_id=domain['id'], group_id=group1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, gd_entity)
# Now ask for effective list role assignments - the role should
# turn into a user project role, along with the two direct roles
# that are on the project
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
'user_id': user1['id'],
'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=3,
resource_url=collection_url)
# An effective role for an inherited role will be a project
# entity, with a domain link to the inherited assignment
up_entity = self.build_role_assignment_entity(
link=gd_entity['links']['assignment'], project_id=project1['id'],
user_id=user1['id'], role_id=role_list[3]['id'],
inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, up_entity)
def test_filtered_role_assignments_for_inherited_grants(self):
"""Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``.
Test Plan:
- Create 5 roles
- Create a domain with a user, group and two projects
- Assign three direct spoiler roles to projects
- Issue the URL to add an inherited user role to the domain
- Issue the URL to add an inherited group role to the domain
- Issue the URL to filter by inherited roles - this should
return just the 2 inherited roles.
"""
role_list = []
for _ in range(5):
role = unit.new_role_ref()
PROVIDERS.role_api.create_role(role['id'], role)
role_list.append(role)
domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain['id'], domain)
user1 = unit.create_user(
PROVIDERS.identity_api, domain_id=domain['id']
)
group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = PROVIDERS.identity_api.create_group(group1)
project1 = unit.new_project_ref(domain_id=domain['id'])
PROVIDERS.resource_api.create_project(project1['id'], project1)
project2 = unit.new_project_ref(domain_id=domain['id'])
PROVIDERS.resource_api.create_project(project2['id'], project2)
# Add some spoiler roles to the projects
PROVIDERS.assignment_api.add_role_to_user_and_project(
user1['id'], project1['id'], role_list[0]['id'])
PROVIDERS.assignment_api.add_role_to_user_and_project(
user1['id'], project2['id'], role_list[1]['id'])
# Create a non-inherited role as a spoiler
PROVIDERS.assignment_api.create_grant(
role_list[2]['id'], user_id=user1['id'], domain_id=domain['id'])
# Now create two inherited roles on the domain, one for a user
# and one for a domain
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': domain['id'],
'user_id': user1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[3]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
self.get(member_url, expected_status=http.client.NO_CONTENT)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[3],
resource_url=collection_url)
base_collection_url = (
'/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
'domain_id': domain['id'],
'group_id': group1['id']})
member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
'collection_url': base_collection_url,
'role_id': role_list[4]['id']}
collection_url = base_collection_url + '/inherited_to_projects'
self.put(member_url)
self.head(member_url)
self.get(member_url, expected_status=http.client.NO_CONTENT)
r = self.get(collection_url)
self.assertValidRoleListResponse(r, ref=role_list[4],
resource_url=collection_url)
# Now use the list role assignments api to get a list of inherited
# roles on the domain - should get back the two roles
collection_url = (
'/role_assignments?scope.OS-INHERIT:inherited_to=projects')
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
resource_url=collection_url)
ud_entity = self.build_role_assignment_entity(
domain_id=domain['id'], user_id=user1['id'],
role_id=role_list[3]['id'], inherited_to_projects=True)
gd_entity = self.build_role_assignment_entity(
domain_id=domain['id'], group_id=group1['id'],
role_id=role_list[4]['id'], inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, ud_entity)
self.assertRoleAssignmentInListResponse(r, gd_entity)
def _setup_hierarchical_projects_scenario(self):
"""Create basic hierarchical projects scenario.
This basic scenario contains a root with one leaf project and
two roles with the following names: non-inherited and inherited.
"""
# Create project hierarchy
root = unit.new_project_ref(domain_id=self.domain['id'])
leaf = unit.new_project_ref(domain_id=self.domain['id'],
parent_id=root['id'])
PROVIDERS.resource_api.create_project(root['id'], root)
PROVIDERS.resource_api.create_project(leaf['id'], leaf)
# Create 'non-inherited' and 'inherited' roles
non_inherited_role = unit.new_role_ref(name='non-inherited')
PROVIDERS.role_api.create_role(
non_inherited_role['id'], non_inherited_role
)
inherited_role = unit.new_role_ref(name='inherited')
PROVIDERS.role_api.create_role(inherited_role['id'], inherited_role)
return (root['id'], leaf['id'],
non_inherited_role['id'], inherited_role['id'])
def test_get_token_from_inherited_user_project_role_grants(self):
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Define root and leaf projects authentication data
root_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=root_id)
leaf_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=leaf_id)
# Check the user cannot get a token on root nor leaf project
self.v3_create_token(root_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
self.v3_create_token(leaf_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
# Grant non-inherited role for user on leaf project
non_inher_up_link = self.build_role_assignment_link(
project_id=leaf_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.put(non_inher_up_link)
# Check the user can only get a token on leaf project
self.v3_create_token(root_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
self.v3_create_token(leaf_project_auth_data)
# Grant inherited role for user on root project
inher_up_link = self.build_role_assignment_link(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_link)
# Check the user still can get a token only on leaf project
self.v3_create_token(root_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
self.v3_create_token(leaf_project_auth_data)
# Delete non-inherited grant
self.delete(non_inher_up_link)
# Check the inherited role still applies for leaf project
self.v3_create_token(root_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
self.v3_create_token(leaf_project_auth_data)
# Delete inherited grant
self.delete(inher_up_link)
# Check the user cannot get a token on leaf project anymore
self.v3_create_token(leaf_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
def test_get_token_from_inherited_group_project_role_grants(self):
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Create group and add user to it
group = unit.new_group_ref(domain_id=self.domain['id'])
group = PROVIDERS.identity_api.create_group(group)
PROVIDERS.identity_api.add_user_to_group(self.user['id'], group['id'])
# Define root and leaf projects authentication data
root_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=root_id)
leaf_project_auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=<PASSWORD>['password'],
project_id=leaf_id)
# Check the user cannot get a token on root nor leaf project
self.v3_create_token(root_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
self.v3_create_token(leaf_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
# Grant non-inherited role for group on leaf project
non_inher_gp_link = self.build_role_assignment_link(
project_id=leaf_id, group_id=group['id'],
role_id=non_inherited_role_id)
self.put(non_inher_gp_link)
# Check the user can only get a token on leaf project
self.v3_create_token(root_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
self.v3_create_token(leaf_project_auth_data)
# Grant inherited role for group on root project
inher_gp_link = self.build_role_assignment_link(
project_id=root_id, group_id=group['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_gp_link)
# Check the user still can get a token only on leaf project
self.v3_create_token(root_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
self.v3_create_token(leaf_project_auth_data)
# Delete no-inherited grant
self.delete(non_inher_gp_link)
# Check the inherited role still applies for leaf project
self.v3_create_token(leaf_project_auth_data)
# Delete inherited grant
self.delete(inher_gp_link)
# Check the user cannot get a token on leaf project anymore
self.v3_create_token(leaf_project_auth_data,
expected_status=http.client.UNAUTHORIZED)
def test_get_role_assignments_for_project_hierarchy(self):
"""Call ``GET /role_assignments``.
Test Plan:
- Create 2 roles
- Create a hierarchy of projects with one root and one leaf project
- Issue the URL to add a non-inherited user role to the root project
- Issue the URL to add an inherited user role to the root project
- Issue the URL to get all role assignments - this should return just
2 roles (non-inherited and inherited) in the root project.
"""
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
non_inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.put(non_inher_up_entity['links']['assignment'])
# Grant inherited role
inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_entity['links']['assignment'])
# Get role assignments
collection_url = '/role_assignments'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
# Assert that the user has non-inherited role on root project
self.assertRoleAssignmentInListResponse(r, non_inher_up_entity)
# Assert that the user has inherited role on root project
self.assertRoleAssignmentInListResponse(r, inher_up_entity)
# Assert that the user does not have non-inherited role on leaf project
non_inher_up_entity = self.build_role_assignment_entity(
project_id=leaf_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
# Assert that the user does not have inherited role on leaf project
inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentNotInListResponse(r, inher_up_entity)
def test_get_effective_role_assignments_for_project_hierarchy(self):
"""Call ``GET /role_assignments?effective``.
Test Plan:
- Create 2 roles
- Create a hierarchy of projects with one root and one leaf project
- Issue the URL to add a non-inherited user role to the root project
- Issue the URL to add an inherited user role to the root project
- Issue the URL to get effective role assignments - this should return
1 role (non-inherited) on the root project and 1 role (inherited) on
the leaf project.
"""
# Create default scenario
root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
self._setup_hierarchical_projects_scenario())
# Grant non-inherited role
non_inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=non_inherited_role_id)
self.put(non_inher_up_entity['links']['assignment'])
# Grant inherited role
inher_up_entity = self.build_role_assignment_entity(
project_id=root_id, user_id=self.user['id'],
role_id=inherited_role_id, inherited_to_projects=True)
self.put(inher_up_entity['links']['assignment'])
# Get effective role assignments
collection_url = '/role_assignments?effective'
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
resource_url=collection_url)
# Assert that the user has non-inherited role on root project
self.assertRoleAssignmentInListResponse(r, non_inher_up_entity)
# Assert that the user does not have inherited role on | |
f.write(bygene_lorad_conf_contents)
f.close()
bygene_gss_conf_template = open('conf-bygene-gss-template.txt','r').read()
bygene_gss_conf_filename = os.path.join(bygene_gss_dir,'lorad-gss.conf')
bygene_gss_conf_contents = re.sub('__FIRST_SITE_COI__', '1', bygene_gss_conf_template, re.M | re.S)
bygene_gss_conf_contents = re.sub('__LAST_SITE_COI__', str(bygene_boundaries[0]), bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__FIRST_SITE_COII__', str(bygene_boundaries[0] + 1), bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__LAST_SITE_COII__', str(bygene_boundaries[1]), bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE6__', str(bygene_boundaries[1] + 1), bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__LAST_SITE_ATPASE6__', str(bygene_boundaries[2]), bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE8__', str(bygene_boundaries[2] + 1), bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__LAST_SITE_ATPASE8__', str(bygene_boundaries[3]), bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__BURNIN__', gss_burnin, bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__NITER__', gss_niter, bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__SAMPLEFREQ__', gss_samplefreq, bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__PRINTFREQ__', gss_printfreq, bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__NSTONES__', gss_nstones, bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__ALPHA__', gss_alpha, bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__RNSEED__', rnseed, bygene_gss_conf_contents, re.M | re.S)
bygene_gss_conf_contents = re.sub('__TREEFILE__', tree_file_name, bygene_gss_conf_contents, re.M | re.S)
f = open(bygene_gss_conf_filename,'w')
f.write(bygene_gss_conf_contents)
f.close()
if include_rev:
bygene_rev_template = open('rev-bygene-template.txt','r').read()
bygene_rev_filename = os.path.join(bygene_rev_dir,'ss.Rev')
bygene_rev_contents = re.sub('__RNSEED__', rnseed, bygene_rev_template, re.M | re.S)
bygene_rev_contents = re.sub('__BURNIN__', rev_burnin, bygene_rev_contents, re.M | re.S)
bygene_rev_contents = re.sub('__NITER__', rev_niter, bygene_rev_contents, re.M | re.S)
bygene_rev_contents = re.sub('__SAMPLEFREQ__', rev_samplefreq, bygene_rev_contents, re.M | re.S)
bygene_rev_contents = re.sub('__PRINTFREQ__', rev_printfreq, bygene_rev_contents, re.M | re.S)
bygene_rev_contents = re.sub('__TUNINGINTERVAL__', rev_tuning_interval, bygene_rev_contents, re.M | re.S)
bygene_rev_contents = re.sub('__NSTONES__', rev_nstones, bygene_rev_contents, re.M | re.S)
bygene_rev_contents = re.sub('__ALPHA__', rev_alpha, bygene_rev_contents, re.M | re.S)
bygene_rev_contents = re.sub('__TREEFILE__', tree_file_name, bygene_rev_contents, re.M | re.S)
if fan_etal_2011:
bygene_rev_contents = re.sub('__FAN_ETAL_2011__', 'TRUE', bygene_rev_contents, re.M | re.S)
else:
bygene_rev_contents = re.sub('__FAN_ETAL_2011__', 'FALSE', bygene_rev_contents, re.M | re.S)
f = open(bygene_rev_filename,'w')
f.write(bygene_rev_contents)
f.close()
if not fan_etal_2011 and include_lorad:
byboth_lorad_conf_template = open('conf-byboth-lorad-template.txt','r').read()
byboth_lorad_conf_filename = os.path.join(byboth_lorad_dir,'lorad.conf')
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COI1__', '1', byboth_lorad_conf_template, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COI1__', str(byboth_boundaries[0]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COI2__', str(byboth_boundaries[0] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COI2__', str(byboth_boundaries[1]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COI3__', str(byboth_boundaries[1] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COI3__', str(byboth_boundaries[2]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COII1__', str(byboth_boundaries[2] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COII1__', str(byboth_boundaries[3]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COII2__', str(byboth_boundaries[3] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COII2__', str(byboth_boundaries[4]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COII3__', str(byboth_boundaries[4] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COII3__', str(byboth_boundaries[5]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE61__', str(byboth_boundaries[5] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE61__', str(byboth_boundaries[6]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE62__', str(byboth_boundaries[6] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE62__', str(byboth_boundaries[7]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE63__', str(byboth_boundaries[7] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE63__', str(byboth_boundaries[8]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE81__', str(byboth_boundaries[8] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE81__', str(byboth_boundaries[9]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE82__', str(byboth_boundaries[9] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE82__', str(byboth_boundaries[10]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE83__', str(byboth_boundaries[10] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE83__', str(byboth_boundaries[11]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__BURNIN__', lorad_burnin, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__NITER__', lorad_niter, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__SAMPLEFREQ__', lorad_samplefreq, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__PRINTFREQ__', lorad_printfreq, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__COVERAGE1__', lorad_coverage1, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__COVERAGE2__', lorad_coverage2, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__COVERAGE3__', lorad_coverage3, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__RNSEED__', rnseed, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__TREEFILE__', tree_file_name, byboth_lorad_conf_contents, re.M | re.S)
f = open(byboth_lorad_conf_filename,'w')
f.write(byboth_lorad_conf_contents)
f.close()
if not fan_etal_2011 and include_gss:
# GSS requires a reference distribution estimated from an initial MCMC run, so this part is identical to the include_lorad
# part above except that gss_burnin, gss_niter, gss_samplefreq, and gss_printfreq are used instead of lorad_burnin, lorad_niter,
# lorad_samplefreq, and lorad_printfreq (also, the file generated is <byboth_gss_dir>/lorad-mcmc.conf, not <byboth_lorad_dir>/lorad.conf).
byboth_lorad_conf_template = open('conf-byboth-lorad-template.txt','r').read()
byboth_lorad_conf_filename = os.path.join(byboth_gss_dir,'lorad-mcmc.conf')
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COI1__', '1', byboth_lorad_conf_template, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COI1__', str(byboth_boundaries[0]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COI2__', str(byboth_boundaries[0] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COI2__', str(byboth_boundaries[1]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COI3__', str(byboth_boundaries[1] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COI3__', str(byboth_boundaries[2]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COII1__', str(byboth_boundaries[2] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COII1__', str(byboth_boundaries[3]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COII2__', str(byboth_boundaries[3] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COII2__', str(byboth_boundaries[4]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_COII3__', str(byboth_boundaries[4] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_COII3__', str(byboth_boundaries[5]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE61__', str(byboth_boundaries[5] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE61__', str(byboth_boundaries[6]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE62__', str(byboth_boundaries[6] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE62__', str(byboth_boundaries[7]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE63__', str(byboth_boundaries[7] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE63__', str(byboth_boundaries[8]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE81__', str(byboth_boundaries[8] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE81__', str(byboth_boundaries[9]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE82__', str(byboth_boundaries[9] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE82__', str(byboth_boundaries[10]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__FIRST_SITE_ATPASE83__', str(byboth_boundaries[10] + 1), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__LAST_SITE_ATPASE83__', str(byboth_boundaries[11]), byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__BURNIN__', gss_burnin, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__NITER__', gss_niter, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__SAMPLEFREQ__', gss_samplefreq, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__PRINTFREQ__', gss_printfreq, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__COVERAGE1__', lorad_coverage1, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__COVERAGE2__', lorad_coverage2, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__COVERAGE3__', lorad_coverage3, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__RNSEED__', rnseed, byboth_lorad_conf_contents, re.M | re.S)
byboth_lorad_conf_contents = re.sub('__TREEFILE__', tree_file_name, byboth_lorad_conf_contents, re.M | re.S)
f = open(byboth_lorad_conf_filename,'w')
f.write(byboth_lorad_conf_contents)
f.close()
byboth_gss_conf_template = open('conf-byboth-gss-template.txt','r').read()
byboth_gss_conf_filename = os.path.join(byboth_gss_dir,'lorad-gss.conf')
byboth_gss_conf_contents = re.sub('__FIRST_SITE_COI1__', '1', byboth_gss_conf_template, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_COI1__', str(byboth_boundaries[0]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_COI2__', str(byboth_boundaries[0] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_COI2__', str(byboth_boundaries[1]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_COI3__', str(byboth_boundaries[1] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_COI3__', str(byboth_boundaries[2]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_COII1__', str(byboth_boundaries[2] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_COII1__', str(byboth_boundaries[3]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_COII2__', str(byboth_boundaries[3] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_COII2__', str(byboth_boundaries[4]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_COII3__', str(byboth_boundaries[4] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_COII3__', str(byboth_boundaries[5]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE61__', str(byboth_boundaries[5] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_ATPASE61__', str(byboth_boundaries[6]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE62__', str(byboth_boundaries[6] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_ATPASE62__', str(byboth_boundaries[7]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE63__', str(byboth_boundaries[7] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_ATPASE63__', str(byboth_boundaries[8]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE81__', str(byboth_boundaries[8] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_ATPASE81__', str(byboth_boundaries[9]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE82__', str(byboth_boundaries[9] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_ATPASE82__', str(byboth_boundaries[10]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__FIRST_SITE_ATPASE83__', str(byboth_boundaries[10] + 1), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__LAST_SITE_ATPASE83__', str(byboth_boundaries[11]), byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__BURNIN__', gss_burnin, byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__NITER__', gss_niter, byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__SAMPLEFREQ__', gss_samplefreq, byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__PRINTFREQ__', gss_printfreq, byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__NSTONES__', gss_nstones, byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__ALPHA__', gss_alpha, byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__RNSEED__', rnseed, byboth_gss_conf_contents, re.M | re.S)
byboth_gss_conf_contents = re.sub('__TREEFILE__', tree_file_name, byboth_gss_conf_contents, re.M | re.S)
f = open(byboth_gss_conf_filename,'w')
f.write(byboth_gss_conf_contents)
f.close()
if include_rev:
byboth_rev_template = open('rev-byboth-template.txt','r').read()
byboth_rev_filename = os.path.join(byboth_rev_dir,'ss.Rev')
byboth_rev_contents = re.sub('__RNSEED__', rnseed, byboth_rev_template, re.M | re.S)
byboth_rev_contents = re.sub('__BURNIN__', rev_burnin, byboth_rev_contents, re.M | re.S)
byboth_rev_contents = re.sub('__NITER__', rev_niter, byboth_rev_contents, re.M | re.S)
byboth_rev_contents = re.sub('__SAMPLEFREQ__', rev_samplefreq, byboth_rev_contents, re.M | re.S)
byboth_rev_contents = re.sub('__PRINTFREQ__', rev_printfreq, byboth_rev_contents, re.M | re.S)
byboth_rev_contents = re.sub('__TUNINGINTERVAL__', rev_tuning_interval, byboth_rev_contents, re.M | re.S)
byboth_rev_contents = re.sub('__NSTONES__', rev_nstones, byboth_rev_contents, re.M | re.S)
byboth_rev_contents = re.sub('__ALPHA__', rev_alpha, byboth_rev_contents, re.M | re.S)
byboth_rev_contents = re.sub('__TREEFILE__', tree_file_name, byboth_rev_contents, re.M | re.S)
if fan_etal_2011:
byboth_rev_contents = re.sub('__FAN_ETAL_2011__', 'TRUE', byboth_rev_contents, re.M | re.S)
else:
byboth_rev_contents | |
"out_right_tag_with-conflict_with-rerun"
target.TaggedOutputLocalTarget._already_changed = False
# Wait for the beginning of the next second
pause.until(int(time.time()) + 1.01)
# The t1 task has no conflict
# Note that we have to specify the prefix in the target to avoid conflict. If we do not,
# the default prefix is updated when the task t2 is defined, so when t1 is built the prefix
# has been changed and thus the assert fails.
t1 = TestTaskRight(result_path=TestTaskRight.root, tag_output=True, with_conflict=True)
assert t1.output().path.endswith("s/file.test")
# Test warning is raised when TagResultOutputMixin is used with RerunMixin
with pytest.warns(
UserWarning,
match=(
"Using 'rerun' with conflicting tag output results in creating a new tag or "
r"removing the untagged result directory \(depending on the inheritance order\)."
),
):
t2 = TestTaskRight(
result_path=TestTaskRight.root, tag_output=True, with_conflict=True, rerun=True
)
assert t2.output().path.endswith("s_1/file.test")
# Build without rerun
assert luigi.build([t1], local_scheduler=True)
assert len(executed) == 1
executed.clear()
expected_files.extend(
[
r"out_right_tag_with-conflict_with-rerun",
r"out_right_tag_with-conflict_with-rerun_\d{8}-\d{2}h\d{2}m\d{2}s",
r"out_right_tag_with-conflict_with-rerun_\d{8}-\d{2}h\d{2}m\d{2}s/file.test",
r"out_right_tag_with-conflict_with-rerun_\d{8}-\d{2}h\d{2}m\d{2}s_\d+",
]
)
check_files_exist(
tmpdir,
expected_files,
)
with Path(
[
i
for i in sorted(Path(tmpdir).rglob("*"))
if re.match(
r"out_right_tag_with-conflict_with-rerun_\d{8}-\d{2}h\d{2}m\d{2}s/file.test",
str(i.relative_to(tmpdir)),
)
][0]
).open("r", encoding="utf-8") as f:
data = f.read().split("\n")
assert data[0] == str(tmpdir / "out_right_tag_with-conflict_with-rerun")
assert re.match(
str(tmpdir / r"out_right_tag_with-conflict_with-rerun_\d{8}-\d{2}h\d{2}m\d{2}s"),
data[1],
)
assert data[2:] == ["0", "True", "True", "False"]
# Build with rerun
assert luigi.build([t2], local_scheduler=True)
assert len(executed) == 1
executed.clear()
expected_files.extend(
[
r"out_right_tag_with-conflict_with-rerun_\d{8}-\d{2}h\d{2}m\d{2}s_\d+/file.test",
]
)
check_files_exist(
tmpdir,
expected_files,
)
with Path(
[
i
for i in sorted(Path(tmpdir).rglob("*"))
if re.match(
(
"out_right_tag_with-conflict_with-rerun_"
r"\d{8}-\d{2}h\d{2}m\d{2}s_\d+/file.test"
),
str(i.relative_to(tmpdir)),
)
][0]
).open("r", encoding="utf-8") as f:
data = f.read().split("\n")
assert data[0] == str(tmpdir / "out_right_tag_with-conflict_with-rerun")
assert re.match(
str(tmpdir / r"out_right_tag_with-conflict_with-rerun_\d{8}-\d{2}h\d{2}m\d{2}s"),
data[1],
)
assert data[2:] == ["0", "True", "True", "True"]
def test_propagation(self, tmpdir, dataset_df_path):
"""Test the propagation of result_path."""
class TestTaskA(task.SetValidationTask):
"""A test validation task."""
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
(output_path / "fileA.test").touch()
class TestTaskB(task.SetValidationTask):
"""A test validation task."""
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
(output_path / "fileB.test").touch()
class TestWorkflow(task.ValidationWorkflow):
"""The global validation workflow."""
with_conflict = luigi.BoolParameter(default=False)
def inputs(self):
return {
TestTaskA: {},
TestTaskB: {},
}
def kwargs(self):
return {
"tag_output": self.tag_output,
"with_conflict": self.with_conflict,
}
@staticmethod
def validation_function(df, output_path, *args, tag_output, with_conflict, **kwargs):
res_files = sorted(Path(output_path.parent.parent).rglob("*"))
if not tag_output:
suffix = ""
elif not with_conflict:
suffix = r"_\d{8}-\d{2}h\d{2}m\d{2}s"
else:
suffix = r"_\d{8}-\d{2}h\d{2}m\d{2}s_\d+"
expected_patterns = [
f"{tmpdir}/out" + suffix + "/TestTaskA",
f"{tmpdir}/out" + suffix + "/TestTaskA/data",
f"{tmpdir}/out" + suffix + "/TestTaskA/data/fileA.test",
f"{tmpdir}/out" + suffix + "/TestTaskA/report.csv",
f"{tmpdir}/out" + suffix + "/TestTaskB",
f"{tmpdir}/out" + suffix + "/TestTaskB/data",
f"{tmpdir}/out" + suffix + "/TestTaskB/data/fileB.test",
f"{tmpdir}/out" + suffix + "/TestTaskB/report.csv",
f"{tmpdir}/out" + suffix + "/TestWorkflow",
f"{tmpdir}/out" + suffix + "/TestWorkflow/data",
]
assert len(res_files) == len(expected_patterns)
for path, pattern in zip(res_files, expected_patterns):
assert re.match(str(pattern), str(path))
root = tmpdir / "out"
workflow = TestWorkflow(
dataset_df=dataset_df_path,
result_path=str(root),
tag_output=False,
with_conflict=False,
)
assert luigi.build([workflow], local_scheduler=True)
report_df = pd.read_csv(workflow.result_path / "TestWorkflow" / "report.csv")
assert report_df["is_valid"].all()
# Test tag name conflicts
pause.until(int(time.time()) + 1.01)
workflow_1 = TestWorkflow(
dataset_df=dataset_df_path,
result_path=str(root),
tag_output=True,
with_conflict=False,
)
assert re.match(
r"out_\d{8}-\d{2}h\d{2}m\d{2}s/TestWorkflow/report.csv",
str(workflow_1.output()["report"].pathlib_path.relative_to(tmpdir)),
)
# Create the result directory of workflow_1 so that it appears to have already run for
# workflow_2
workflow_1.output()["data"].pathlib_path.mkdir(parents=True, exist_ok=True)
workflow_2 = TestWorkflow(
dataset_df=dataset_df_path,
result_path=str(root),
tag_output=True,
with_conflict=True,
)
assert re.match(
r"out_\d{8}-\d{2}h\d{2}m\d{2}s_\d+/TestWorkflow/report.csv",
str(workflow_2.output()["report"].pathlib_path.relative_to(tmpdir)),
)
assert luigi.build([workflow_1, workflow_2], local_scheduler=True)
report_df_1 = pd.read_csv(workflow_1.result_path / "TestWorkflow" / "report.csv")
assert report_df_1["is_valid"].all()
report_df_2 = pd.read_csv(workflow_2.result_path / "TestWorkflow" / "report.csv")
assert report_df_2["is_valid"].all()
class TestSetValidationTask:
"""Test the data_validation_framework.task.SetValidationTask class."""
@pytest.fixture
def TestTask(self, tmpdir):
class TestTask(task.SetValidationTask):
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
assert df["is_valid"].all()
assert (df["ret_code"] == 0).all()
assert df["comment"].isnull().all()
assert df["exception"].isnull().all()
assert df["a"].tolist() == [1, 2]
assert df["b"].tolist() == [3, 4]
df["a"] *= 10
df[["a", "b"]].to_csv(output_path / "test.csv")
return TestTask
def test_defaults(self, TestTask, dataset_df_path, tmpdir):
# Test defaults
assert luigi.build(
[TestTask(dataset_df=dataset_df_path, result_path=str(tmpdir / "out_defaults"))],
local_scheduler=True,
)
result = pd.read_csv(tmpdir / "out_defaults" / "TestTask" / "data" / "test.csv")
expected = pd.read_csv(tmpdir / "dataset.csv")
expected["a"] *= 10
assert result.equals(expected)
def test_no_dataset_no_input(self, TestTask, tmpdir):
# Test with no dataset and no input (should fail)
class FailingTestTask(TestTask):
pass
failed_tasks = []
exceptions = []
@FailingTestTask.event_handler(luigi.Event.FAILURE)
def check_exception(failed_task, exception): # pylint: disable=unused-variable
failed_tasks.append(str(failed_task))
exceptions.append(str(exception))
assert not luigi.build(
[FailingTestTask(result_path=str(tmpdir / "out_fail"))], local_scheduler=True
)
assert failed_tasks == [str(FailingTestTask(result_path=str(tmpdir / "out_fail")))]
assert exceptions == [
str(ValueError("Either the 'dataset_df' parameter or a requirement must be provided."))
]
def test_inputs_outputs(self, TestTask, dataset_df_path, tmpdir):
# Test inputs
class TestTaskWithOutputs(task.SetValidationTask):
output_columns = {"a": None}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
assert df["is_valid"].all()
assert (df["ret_code"] == 0).all()
assert df["comment"].isnull().all()
assert df["exception"].isnull().all()
assert df["a"].tolist() == [1, 2]
assert df["b"].tolist() == [3, 4]
df["a"] *= 10
df[["a", "b"]].to_csv(output_path / "test.csv")
class TestTaskWithInputs(task.SetValidationTask):
def inputs(self):
return {
TestTaskWithOutputs: (
{
"dataset_df": dataset_df_path,
"result_path": self.result_path,
},
{"a": "a_input"},
)
}
def kwargs(self):
return {"dataset_df": self.dataset_df}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
assert df["is_valid"].all()
assert (df["ret_code"] == 0).all()
assert df["comment"].isnull().all()
assert df["exception"].isnull().all()
assert df["a_input"].tolist() == [10, 20]
if kwargs["dataset_df"] is not None:
assert df["a"].tolist() == [1, 2]
assert df["b"].tolist() == [3, 4]
df["a_input"] *= 10
df.to_csv(output_path / "test.csv")
# Test with inputs but no dataset
assert luigi.build(
[TestTaskWithInputs(result_path=str(tmpdir / "out_inputs"))], local_scheduler=True
)
result = pd.read_csv(tmpdir / "out_inputs" / "TestTaskWithInputs" / "data" / "test.csv")
expected = pd.read_csv(tmpdir / "dataset.csv")
expected["a_input"] = expected["a"] * 100
assert (result["a_input"] == expected["a_input"]).all()
# Test with inputs and dataset
assert luigi.build(
[
TestTaskWithInputs(
dataset_df=dataset_df_path, result_path=str(tmpdir / "out_inputs_and_outputs")
)
],
local_scheduler=True,
)
result = pd.read_csv(
tmpdir / "out_inputs_and_outputs" / "TestTaskWithInputs" / "data" / "test.csv"
)
expected = pd.read_csv(tmpdir / "dataset.csv")
expected["a_input"] = expected["a"] * 100
assert result[["a", "b", "a_input"]].equals(expected[["a", "b", "a_input"]])
def test_missing_columns(self, TestTask, dataset_df_path, tmpdir):
# Test missing columns in requirements
class TestTaskMissingColumns(task.SetValidationTask):
def inputs(self):
return {TestTask: {"a": "a_input"}}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
assert df["is_valid"].all()
assert (df["ret_code"] == 0).all()
assert df["comment"].isnull().all()
assert df["exception"].isnull().all()
assert df["a_input"].tolist() == [10, 20]
df["a_input"] *= 10
df.to_csv(output_path / "test.csv")
failed_tasks = []
exceptions = []
@TestTaskMissingColumns.event_handler(luigi.Event.FAILURE)
def check_exception(failed_task, exception): # pylint: disable=unused-variable
failed_tasks.append(str(failed_task))
exceptions.append(str(exception))
assert not luigi.build(
[
TestTaskMissingColumns(
dataset_df=dataset_df_path, result_path=str(tmpdir / "out_missing_columns")
)
],
local_scheduler=True,
)
assert failed_tasks == [
str(
TestTaskMissingColumns(
dataset_df=dataset_df_path, result_path=str(tmpdir / "out_missing_columns")
)
)
]
assert exceptions == [
str(
KeyError(
"The columns ['a'] are missing from the output columns of "
"TestTask("
f"tag_output=False, result_path={tmpdir / 'out_missing_columns'}, "
f"dataset_df={dataset_df_path}, input_index_col=, data_dir=data).",
)
)
]
def test_wrong_inputs(self, TestTask, dataset_df_path, tmpdir):
# Test bad format in inputs
class TestTaskMissingColumns(task.SetValidationTask):
def inputs(self):
return {TestTask: "BAD INPUTS"}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
pass
with pytest.raises(
ValueError,
match=(
"The input values should either be a dict containing the column mapping or a tuple "
"with a dict containing the keyword arguments as first element and a dict "
r"containing the column mapping as second element\."
),
):
luigi.build(
[
TestTaskMissingColumns(
dataset_df=dataset_df_path, result_path=str(tmpdir / "out_bad_inputs")
)
],
local_scheduler=True,
)
def test_validation_function_as_method(self, dataset_df_path, tmpdir):
class TestTask(task.SetValidationTask):
# pylint: disable=no-self-argument
def validation_function(df, output_path, *args, **kwargs):
df["a"] *= 10
df[["a", "b"]].to_csv(output_path / "test.csv")
# Test defaults
assert luigi.build(
[TestTask(dataset_df=dataset_df_path, result_path=str(tmpdir / "out_defaults"))],
local_scheduler=True,
)
result = pd.read_csv(tmpdir / "out_defaults" / "TestTask" / "data" / "test.csv")
expected = pd.read_csv(tmpdir / "dataset.csv")
expected["a"] *= 10
assert result.equals(expected)
class TestPropagation:
@pytest.fixture
def BaseTestTask(self, TestTask):
class BaseTestTask(TestTask):
def kwargs(self):
return {
"dataset_df": str(self.dataset_df),
"result_path": str(self.result_path),
}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
with open(output_path / "test.json", "w", encoding="utf-8") as f:
json.dump(kwargs, f)
TestTask.validation_function(df, output_path, *args, **kwargs)
return BaseTestTask
@pytest.fixture
def TestTaskPassDatasetAndResultPath(self, BaseTestTask):
class TestTaskPassDatasetAndResultPath(task.SetValidationTask):
def inputs(self):
return {BaseTestTask: {}}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
assert df["is_valid"].all()
assert (df["ret_code"] == 0).all()
assert df["comment"].isnull().all()
assert df["exception"].isnull().all()
assert df["a"].tolist() == [1, 2]
df["a"] *= 100
df.to_csv(output_path / "test.csv")
return TestTaskPassDatasetAndResultPath
@pytest.fixture
def TestTaskPassDatasetAndResultPathWithKwargs(self, BaseTestTask):
class TestTaskPassDatasetAndResultPath(task.SetValidationTask):
def inputs(self):
return {BaseTestTask: ({"result_path": str(self.result_path / "sub_path")}, {})}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
assert df["is_valid"].all()
assert (df["ret_code"] == 0).all()
assert df["comment"].isnull().all()
assert df["exception"].isnull().all()
assert df["a"].tolist() == [1, 2]
df["a"] *= 100
df.to_csv(output_path / "test.csv")
return TestTaskPassDatasetAndResultPath
@pytest.fixture
def TestTaskPassDatasetAndResultPathWithInputParameters(self, BaseTestTask):
class TestTaskPassDatasetAndResultPath(task.SetValidationTask):
def inputs(self):
return {
BaseTestTask: task.InputParameters(
{}, result_path=str(self.result_path / "sub_path")
)
}
@staticmethod
def validation_function(df, output_path, *args, **kwargs):
assert df["is_valid"].all()
assert (df["ret_code"] == 0).all()
assert df["comment"].isnull().all()
assert df["exception"].isnull().all()
assert df["a"].tolist() == [1, 2]
df["a"] *= 100
df.to_csv(output_path / "test.csv")
return TestTaskPassDatasetAndResultPath
| |
<filename>pirates/economy/SimpleStoreGUI.py
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.ai import HolidayGlobals
from pirates.battle import WeaponGlobals
from pirates.economy import EconomyGlobals
from pirates.economy.EconomyGlobals import ItemType
from pirates.holiday import CatalogHoliday
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui import GuiPanel, RedeemCodeGUI
from pirates.piratesgui import GuiButton, DialogButton
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import GuiButton
from pirates.pirate import DynamicHuman
from pirates.pirate import Human
from pirates.pirate import HumanDNA
from pirates.piratesgui.TabBar import LeftTab, TabBar
from direct.interval.IntervalGlobal import *
from pirates.makeapirate import ClothingGlobals
from pirates.makeapirate import TattooGlobals
from pirates.piratesgui.BorderFrame import BorderFrame
from pirates.uberdog.UberDogGlobals import InventoryId, InventoryType
from otp.otpbase import OTPGlobals
from otp.otpgui import OTPDialog
from pirates.piratesgui import PDialog
from direct.task import Task
from pirates.piratesbase import Freebooter
from pirates.piratesgui.InventoryItemGui import InventoryItemGui
from pirates.inventory.InventoryGlobals import *
from pirates.uberdog.TradableInventoryBase import InvItem
from pirates.inventory import ItemGlobals, DropGlobals
from pirates.inventory import ItemConstants
from pirates.inventory import InventoryUIStoreContainer
from pirates.pirate import AvatarTypes
from pirates.economy.SimpleStoreItem import *
import random
from math import sin
from math import cos
from math import pi
import time
class SimpleStoreTab(LeftTab):
def __init__(self, tabBar, name, **kw):
optiondefs = (
('modelName', 'general_frame_d', None), ('borderScale', 0.38, None), ('bgBuffer', 0.15, None))
self.defineoptions(kw, optiondefs)
LeftTab.__init__(self, tabBar, name, **kw)
self.initialiseoptions(SimpleStoreTab)
return None
class SimpleStoreTabBar(TabBar):
def hasTabs(self):
return len(self.getOrder()) > 0
def refreshTabs(self):
for x, name in enumerate(self.tabOrder):
tab = self.tabs[name]
tab.reparentTo(self.bParent)
tab.setPos(-0.07, 0, 1.08 - 0.15 * (x + self.offset))
(tab.setScale(0.22, 1, 0.235),)
self.activeIndex = max(0, min(self.activeIndex, len(self.tabOrder) - 1))
if len(self.tabOrder):
name = self.tabOrder[self.activeIndex]
tab = self.tabs[name]
tab.reparentTo(self.fParent)
tab.setX(-0.08)
tab.setScale(0.23, 1, 0.27)
def makeTab(self, name, **kw):
return SimpleStoreTab(self, name, **kw)
class SimpleStoreColorPicker(BorderFrame):
sizeX = 0.65
sizeZ = 0.25
def __init__(self, store, parentFrame, item, **kw):
optiondefs = (
(
'state', DGG.DISABLED, None), ('frameSize', (-0.5 * self.sizeX, 0.5 * self.sizeX, -0.5 * self.sizeZ, 0.5 * self.sizeZ), None), ('modelName', 'pir_m_gui_frm_subframe', None), ('imageColorScale', VBase4(0.75, 0.75, 0.9, 0.75), None))
self.defineoptions(kw, optiondefs)
BorderFrame.__init__(self, parent=parentFrame)
self.store = store
self.item = item
self.colorButtons = []
self.selectedColor = None
self._parent = parentFrame
self.setup()
return
def setup(self):
self.setBin('gui-fixed', 1)
offsetx = -0.5 * self.sizeX + 0.12
offsety = 0.5 * self.sizeZ - 0.055
colorSet = colorsNotOwned = range(0, 21)
numcolors = len(colorsNotOwned)
colorsOwned = []
charGui = loader.loadModel('models/gui/char_gui')
for idx in range(0, numcolors):
color = colorSet[idx]
colorButton = GuiButton.GuiButton(parent=self, pos=(offsetx, 0.0, offsety), image=(charGui.find('**/chargui_frame04'), charGui.find('**/chargui_frame04_down'), charGui.find('**/chargui_frame04_over')), image_scale=(0.115, 0.0, 0.088), helpText=PLocalizer.TailorColorStrings.get(color), helpDelay=0, command=self.selectColor)
colorFrame = DirectFrame(parent=colorButton, frameSize=(-0.025, 0.025, -0.025, 0.025))
colorButton['extraArgs'] = [
color]
colorFrame['frameColor'] = ItemConstants.DYE_COLORS[color]
offsetx += 0.07
if idx != 0 and (idx + 1) % 7 == 0:
offsetx = -0.5 * self.sizeX + 0.12
offsety -= 0.07
self.colorButtons.append(colorButton)
if idx in colorsOwned:
colorButton['state'] = DGG.DISABLED
colorButton['image_color'] = Vec4(0.5, 0.5, 0.5, 0.5)
colorFrame.setColorScale(0.5, 0.5, 0.5, 0.5)
def destroy(self):
self._parent = None
for button in self.colorButtons:
button.destroy()
self.colorButtons = []
BorderFrame.destroy(self)
return
def selectColor(self, colorId):
if self.selectedColor:
self.selectedColor.setScale(1.0)
self.selectedColor['image_color'] = VBase4(1, 1, 1, 1)
self.selectedColor['state'] = DGG.NORMAL
self.selectedColor = self.colorButtons[colorId]
self.selectedColor.setScale(1.2)
self.selectedColor['image_color'] = VBase4(1, 1, 0, 1)
self.selectedColor['state'] = DGG.DISABLED
if self.store.previewPirate:
self.item.unapply(self.store.previewPirate, localAvatar.style)
self.item.colorId = colorId
self.item.apply(self.store.previewPirate)
self.store.invContainer.getItem(self.item.uid).setColorId(colorId)
self.store.showClicked()
class SimpleStoreBuyPanelGUI(BorderFrame):
sizeX = 0.65
sizeZ = 0.25
def __init__(self, store, parentFrame, item, purchasable=False, **kw):
optiondefs = (('state', DGG.DISABLED, None), ('frameSize', (-0.5 * self.sizeX, 0.5 * self.sizeX, -0.65 * self.sizeZ, 0.5 * self.sizeZ), None), ('modelName', 'pir_m_gui_frm_subframe', None), ('imageColorScale', VBase4(0.75, 0.75, 0.9, 0.75), None))
self.defineoptions(kw, optiondefs)
BorderFrame.__init__(self, parent=parentFrame)
self.store = store
self.item = item
self.purchasable = purchasable
self.ownedLabel = None
self.qtyLabel = None
self.qtyText = None
self.minusButton = None
self.plusButton = None
self.costLabel = None
self.costText = None
self.buyButton = None
self.setPending(0)
self.setupGui()
return
def setPending(self, pending):
self.pending = pending
taskMgr.remove('simpleStoreBuyPanelPurchasePending')
if pending:
taskMgr.doMethodLater(10.0, self.handlePurchaseTimeout, 'simpleStoreBuyPanelPurchasePending')
def handlePurchaseTimeout(self, task=None):
self.setPending(0)
localAvatar.guiMgr.createWarning(PLocalizer.PurchaseTimeout, PiratesGuiGlobals.TextFG6)
self.updateGui()
def destroy(self):
taskMgr.remove('simpleStoreBuyPanelPurchasePending')
self.store = None
self.item = None
self.purchasable = None
if self.qtyLabel:
self.qtyLabel.destroy()
self.qtyLabel = None
if self.qtyText:
self.qtyText.destroy()
self.qtyText = None
if self.minusButton:
self.minusButton.destroy()
self.minusButton = None
if self.plusButton:
self.plusButton.destroy()
self.plusButton = None
if self.costLabel:
self.costLabel.destroy()
self.costLabel = None
if self.costText:
self.costText.destroy()
self.costText = None
if self.buyButton:
self.buyButton.destroy()
self.buyButton = None
BorderFrame.destroy(self)
return
def setupGui(self):
inventory = base.localAvatar.getInventory()
qtyLimit = ItemGlobals.getStackLimit(self.item.uid)
if not qtyLimit:
qtyLimit = inventory.getStackLimit(self.item.uid)
freeSpace = qtyLimit - self.item.getQuantityInInventory()
stackable = self.item.checkStackable()
if stackable:
qtyMult = max(1, EconomyGlobals.getItemQuantity(self.item.uid))
qtyLimit = ItemGlobals.getStackLimit(self.item.uid)
if not qtyLimit:
qtyLimit = inventory.getStackLimit(self.item.uid)
self.item.quantity = freeSpace
else:
qtyMult = 1
qtyLimit = 10
def plusQuantity():
inventory = base.localAvatar.getInventory()
self.item.quantity += qtyMult
self.updateGui()
def minusQuantity():
inventory = base.localAvatar.getInventory()
self.item.quantity -= qtyMult
self.updateGui()
self.qtyLabel = DirectLabel(parent=self, relief=None, state=DGG.DISABLED, text=PLocalizer.SimpleStoreQty, text_font=PiratesGlobals.getInterfaceFont(), text_scale=PiratesGuiGlobals.TextScaleLarge, text_align=TextNode.ALeft, text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_wordwrap=11, text_pos=(0, 0, 0), pos=(-0.2, 0, 0.015))
self.minusButton = GuiButton.GuiButton(parent=self, text='-', text_fg=PiratesGuiGlobals.TextFG2, text_pos=(0.0, -0.014), text_scale=PiratesGuiGlobals.TextScaleTitleSmall, text_align=TextNode.ACenter, text_shadow=PiratesGuiGlobals.TextShadow, image=GuiButton.GuiButton.blueGenericButton, image_scale=(0.125, 0.36, 0.36), pos=(0.04, 0.0, 0.028), command=minusQuantity)
if self.item.quantity == qtyMult:
self.minusButton['state'] = DGG.DISABLED
self.plusButton = GuiButton.GuiButton(parent=self, text='+', text_fg=PiratesGuiGlobals.TextFG2, text_pos=(0.0, -0.014), text_scale=PiratesGuiGlobals.TextScaleTitleSmall, text_align=TextNode.ACenter, text_shadow=PiratesGuiGlobals.TextShadow, image=GuiButton.GuiButton.blueGenericButton, image_scale=(0.125, 0.36, 0.36), pos=(0.18, 0.0, 0.038), command=plusQuantity)
if self.item.quantity == qtyLimit:
self.plusButton['state'] = DGG.DISABLED
self.qtyText = DirectLabel(parent=self, relief=None, state=DGG.DISABLED, text=str(self.item.quantity), text_font=PiratesGlobals.getInterfaceFont(), text_scale=PiratesGuiGlobals.TextScaleLarge, text_align=TextNode.ARight, text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_wordwrap=11, text_pos=(0, 0, 0), pos=(0.14, 0, 0.015), textMayChange=1)
self.qtyFullText = DirectLabel(parent=self, relief=None, state=DGG.DISABLED, text=PLocalizer.SimpleStoreFull, text_font=PiratesGlobals.getInterfaceFont(), text_scale=PiratesGuiGlobals.TextScaleLarge, text_align=TextNode.ALeft, text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_wordwrap=11, text_pos=(0, 0, 0), pos=(0.17, 0, 0.015), textMayChange=1)
self.qtyFullText.hide()
quantity = self.item.getQuantityInInventory()
if stackable:
freeSpace = qtyLimit - self.item.getQuantityInInventory()
else:
freeSpace = len(localAvatar.getInventory().getFreeLocations(self.item.itemClass, self.item.itemType))
if self.item.quantity > freeSpace:
self.qtyText['text_fg'] = VBase4(1, 0, 0, 1)
self.ownedLabel = DirectLabel(parent=self, relief=None, state=DGG.DISABLED, text=PLocalizer.SimpleStoreOwned + ' %s' % quantity, text_scale=PiratesGuiGlobals.TextScaleLarge, text_align=TextNode.ALeft, text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_wordwrap=11, text_pos=(0, 0, 0.0), pos=(-0.2, 0, 0.08), text_font=PiratesGlobals.getInterfaceFont(), textMayChange=1)
totalCost = int(self.item.cost * qtyMult) * self.item.quantity / qtyMult
costText = PLocalizer.ShopFree if self.item.cost == 0 else str(totalCost)
self.costLabel = DirectLabel(parent=self, relief=None, state=DGG.DISABLED, image=self.store.CoinImage, image_scale=0.15, image_pos=Vec3(0.38, 0, 0.012), text=PLocalizer.SimpleStoreCost, text_scale=PiratesGuiGlobals.TextScaleLarge, text_align=TextNode.ALeft, text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_wordwrap=11, text_pos=(0, 0, 0.0), pos=(-0.2, 0, -0.05), text_font=PiratesGlobals.getInterfaceFont(), textMayChange=1)
self.costText = DirectLabel(parent=self, relief=None, state=DGG.DISABLED, text=str(costText), text_scale=PiratesGuiGlobals.TextScaleLarge, text_align=TextNode.ARight, text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_wordwrap=11, text_pos=(0, 0, 0.0), pos=(0.14, 0, -0.05), text_font=PiratesGlobals.getInterfaceFont())
totalMoney = localAvatar.getInventory().getGoldInPocket()
if totalCost > totalMoney:
self.costText['text_fg'] = VBase4(1, 0, 0, 1)
self.buyButton = GuiButton.GuiButton(parent=self, text=PLocalizer.PurchaseCommit, text_fg=PiratesGuiGlobals.TextFG2, text_pos=(0.0, -0.014), text_scale=PiratesGuiGlobals.TextScaleLarge, text_align=TextNode.ACenter, text_shadow=PiratesGuiGlobals.TextShadow, image=GuiButton.GuiButton.blueGenericButton, image_scale=(0.6, 0.6, 0.6), pos=(0.0, 0.0, -0.115), command=self.store.handleBuyItem)
if not self.purchasable:
self.buyButton.hide()
self.updateGui()
self.store.npc.resumeShopping()
return
def updateGui(self):
if self.item == None:
return
if self.item.quantity < 0:
self.item.quantity = 0
inventory = base.localAvatar.getInventory()
quantity = self.item.getQuantityInInventory()
inventory = base.localAvatar.getInventory()
qtyLimit = ItemGlobals.getStackLimit(self.item.uid)
if not qtyLimit:
qtyLimit = inventory.getStackLimit(self.item.uid)
freeSpace = None
stackable = self.item.checkStackable()
if stackable:
freeSpace = qtyLimit - self.item.getQuantityInInventory()
else:
freeSpace = len(localAvatar.getInventory().getFreeLocations(self.item.itemClass, self.item.itemType))
if self.item.quantity > freeSpace:
self.item.quantity = freeSpace
if stackable:
qtyMult = max(1, EconomyGlobals.getItemQuantity(self.item.uid))
qtyLimit = ItemGlobals.getStackLimit(self.item.uid)
if not qtyLimit:
qtyLimit = inventory.getStackLimit(self.item.uid)
self.ownedLabel['text'] = PLocalizer.SimpleStoreOwned + ' %s / %s' % (quantity, qtyLimit)
self.qtyFullText['text'] = PLocalizer.SimpleStoreFull
else:
qtyMult = 1
qtyLimit = 10
self.ownedLabel['text'] = PLocalizer.SimpleStoreOwned + ' %s' % quantity
self.qtyFullText['text'] = PLocalizer.SimpleStoreFullNonStack
full = 0
self.plusButton['state'] = DGG.NORMAL
self.plusButton.show()
self.buyButton['state'] = DGG.NORMAL
if self.item.quantity >= freeSpace:
full = 1
self.plusButton['state'] = DGG.DISABLED
self.plusButton.hide()
self.minusButton['state'] = DGG.NORMAL
self.minusButton.show()
if self.item.quantity <= 0:
self.minusButton['state'] = DGG.DISABLED
self.minusButton.hide()
if self.item.quantity > freeSpace:
self.qtyText['text_fg'] = VBase4(1, 0, 0, 1)
else:
self.qtyText['text_fg'] = VBase4(1, 1, 1, 1)
if full:
self.qtyFullText.show()
else:
self.qtyFullText.hide()
self.qtyText['text'] = '%s' % self.item.quantity
if self.item.cost:
totalCost = int(self.item.cost * qtyMult) * self.item.quantity / qtyMult
self.costText['text'] = str(totalCost)
totalMoney = localAvatar.getInventory().getGoldInPocket()
if totalCost > totalMoney:
self.costText['text_fg'] = VBase4(1, 0, 0, 1)
else:
self.costText['text_fg'] = VBase4(1, 1, 1, 1)
if self.pending:
self.buyButton['state'] = DGG.DISABLED
self.buyButton['text'] = PLocalizer.PurchasePending
elif self.item.quantity == 0:
self.buyButton['state'] = DGG.DISABLED
self.buyButton['text'] = PLocalizer.PurchaseCommit
else:
self.buyButton['state'] = DGG.NORMAL
self.buyButton['text'] = PLocalizer.PurchaseCommit
return
class SimpleStoreGUI(DirectFrame):
notify = directNotify.newCategory('SimpleStoreGUI')
guiPos = (0.625, 0.0, -0.05)
guiWidth = 1.25
guiHeight = 1.35
buyPanelXFactor = -0.182
buyPanelZFactor = -0.375
storeIconName = None
holidayIdList = []
itemGlobalsClasses = {InventoryType.ItemTypeClothing: SimpleClothingItem,InventoryType.ItemTypeJewelry: SimpleJewelryItem,InventoryType.ItemTypeTattoo: SimpleTattooItem,InventoryType.ItemTypeWeapon: SimpleWeaponItem,InventoryType.ItemTypeConsumable: SimpleConsumableItem,ItemType.DAGGERAMMO: SimpleAmmoItem,ItemType.PISTOLAMMO: SimpleAmmoItem,ItemType.CANNONAMMO: SimpleAmmoItem,ItemType.GRENADEAMMO: SimpleAmmoItem,ItemType.PISTOL_POUCH: SimplePouchItem,ItemType.DAGGER_POUCH: SimplePouchItem,ItemType.GRENADE_POUCH: SimplePouchItem,ItemType.CANNON_POUCH: SimplePouchItem,ItemType.FISHING_POUCH: SimplePouchItem,ItemType.FISHING_LURE: SimpleFishingLureItem}
def __init__(self, storeName, npc=None, shopId=None, **kw):
optiondefs = (
('relief', None, None), ('frameSize', (-self.guiWidth / 2, self.guiWidth / 2, -self.guiHeight / 2, self.guiHeight / 2), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, None, **kw)
self.confirmDialog = None
self.rollInItem = None
self.rollInItemTask = None
self.clickedItem = None
self.previewItem = None
self.buyPanelItem = None
self.camIval = None
self.initialiseoptions(SimpleStoreGUI)
self.npc = npc
self.storeName = storeName
self.shopId = shopId
self.pvpMode = 0
if shopId == PiratesGlobals.PRIVATEER_HATS:
self.pvpMode = 1
self.paid = Freebooter.getPaidStatus(localAvatar.getDoId())
self.previewPirate = None
if localAvatar.gameFSM.camIval is not None:
if localAvatar.gameFSM.camIval.isPlaying():
localAvatar.gameFSM.camIval.finish()
self.initialCamPos = camera.getPos()
self.initialCamHpr = camera.getHpr()
self.initialPirateH = 0
| |
new input parameter vector for the predator prey and grass model. Population max 5000, Energy gains max 200, reproduction chance max 0.25
def generate_parameter_vector(lim):
new = [0]*len(lim)
for i in range(len(lim)):
if type(lim[i][0])==int:
new[i] = str(int(random.uniform(lim[i][0],lim[i][1])))
else:
new[i] = str(round(random.uniform(lim[i][0],lim[i][1]),6))
return new
#Class for batch simulation of randomly generated inputs
class Batch(threading.Thread):
def __init__(self, d, q, sp, t, st):
threading.Thread.__init__(self)
self.device = d
self.queue = q
self.save = sp
self.start_time = t
self.fitness_type = st
def run(self):
winexe = ""
linexe = ""
if self.fitness_type=="continuous":
winexe = "simulation_executables\\PreyPredator_continuous_fitness.exe "
linexe = "./simulation_executables/PreyPredator_console_continuous_fitness "
else:
winexe = "simulation_executables\\PreyPredator_discrete_fitness.exe "
linexe = "./simulation_executables/PreyPredator_console_discrete_fitness "
run_batch_queue(self.device, self.queue, self.save, self.start_time, winexe, linexe)
#Look through queue of randomly generated inputs and simulate a new input on the device assigned to the current thread
def run_batch_queue(d,q,s,start,winexe,linexe):
global batch_queue, batch_queue_lock, exit_batch_queue, batch_times
#Ensure that all required files exist
if not os.path.exists(s+str(d)):
os.makedirs(s+str(d))
if not os.path.exists(s+str(d)+"/0.xml"):
open(s+str(d)+"/0.xml", "w").close()
#Batch simulate while there still exist unevaluated inputs
while exit_batch_queue==0:
batch_queue_lock.acquire()
if not batch_queue.empty():
x = q.get()
batch_queue_lock.release()
#Generate initial state from initial paramter vector, then simulate
if os.name=='nt':
genComm = "simulation_executables\\xmlGenEx3.exe "+s+str(d)+"/0.xml "+str(x[0])+" "+str(x[1])+" "+str(x[2])+" "+str(x[3])+" "+str(x[4])+" "+str(x[5])+" "+str(x[6])+" "+str(x[7])
command = winexe+s+str(d)+"/0.xml 1000 "+str(d)
else:
genComm = "./simulation_executables/xmlGen "+s+str(d)+"/0.xml "+str(x[0])+" "+str(x[1])+" "+str(x[2])+" "+str(x[3])+" "+str(x[4])+" "+str(x[5])+" "+str(x[6])+" "+str(x[7])
command = linexe+s+str(d)+"/0.xml 1000 "+str(d)
#Assumes model is one which appends results to a file
os.system(genComm)
os.system(command)
else:
batch_queue_lock.release()
#Measure and update the total simulation time taken by the current thread
current_time = datetime.datetime.now()
time_taken = current_time-start
batch_times[d] = float(((time_taken.seconds*1000000)+time_taken.microseconds)/1000000)
return
####################################### TRAIN SURROGATES ###################
def train_regression_surrogate_model(data_sizes,training,start_time,maxtime,bloc,loc,m, sm=-1):
trained_models = 0
timer = 0
index = 0
name = ""
if m==1:
name = "primary"
index = 0
elif m==2:
name = "secondary"
index = 1
elif m==3:
name = "tertiary"
index = 2
else:
print("invalid model, exiting")
return trained_models
print("Training regression networks for",name,"fitness prediction with max time",maxtime,"minutes")
while (not trained_models==sm and timer<maxtime):
for i in data_sizes:
training_datasets = []
bsd = open(bloc+"/batch_simulation_data.csv","r")
data = bsd.readlines()
bsd.close()
generate_training_dataset(data,training,i,training_datasets,loc)
data = pickle.load(open(loc+"data/"+str(i)+"/all_examples.p","rb"))
X,Y = get_xy(data)
train_regressor(X,[[a[index],a[-1]] for a in Y],name,training,False,trained_models,i,loc)
ct = datetime.datetime.now()
ctt = ct-start_time
timer = int(ctt.seconds/60)
trained_models += 1
if timer>=maxtime:
print("Exceeded specified time limit,",maxtime,"minutes. Exiting early")
break
return trained_models, ct
def generate_training_dataset(data,test_sizes,max_data,td,loc,c=0):
SP = loc+"/data/"+str(max_data)+"/"
if not os.path.exists(loc+"/data/"):
os.mkdir(loc+"/data/")
if not os.path.exists(SP):
os.mkdir(SP)
random.shuffle(data)
data = data[:max_data]
lcount=0
positive_examples = []
negative_examples = []
positive_response = 0
if c=="continuous":
positive_response = 1050
else:
positive_response = 1
for li in data:
sp = li.rstrip().split(",")
if sp[0]!="Seed" and sp[0]!="time_taken":
params = np.zeros(8)
fitnesses = [0,0,0]
for i in range(len(params)):
params[i] = float(sp[i+1])
fitnesses[0] = float(sp[10])
fitnesses[1] = float(sp[11])
fitnesses[2] = float(sp[12])
X = [params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7]]
if fitnesses[0]==positive_response:
positive_examples.append([X,fitnesses])
else:
negative_examples.append([X,fitnesses])
lcount+=1
else:
lcount+=1
pickle.dump(positive_examples,open(SP+"/positive_examples.p","wb"))
pickle.dump(negative_examples,open(SP+"/negative_examples.p","wb"))
td.append(data)
pickle.dump(data,open(SP+"/all_examples.p","wb"))
return
def get_xy(data):
x = []
y = []
count = 0
for l in data:
sp = l.rstrip().split(",")
x.append([float(sp[1]),float(sp[2]),float(sp[3]),float(sp[4]),float(sp[5]),float(sp[6]),float(sp[7]),float(sp[8]),count])
y.append([float(sp[10]),float(sp[11]),float(sp[12]),count])
count+=1
return x,y
def train_regressor(X,Y,name,train_sizes,prin,x,size,loc):
SP = loc+str(size)+"/"
if not os.path.exists(SP):
os.mkdir(SP)
SP = SP+name+"/"
if not os.path.exists(SP):
os.mkdir(SP)
hidden_layers = (100,)*2
scaler = StandardScaler()
ds = []
max_score = [0,-99]
current_score = [0,-99]
for ts in train_sizes:
if prin:
print(name,"training percentage",ts)
print()
print(name,"Random data sampling, training data size:",ts)
testing_size = (1.0-ts)
trainX,testX,trainY,testY = train_test_split(X,Y, test_size=testing_size)
trainX = [m[:-1] for m in trainX]
trainY = [n[:-1] for n in trainY]
testX = [o[:-1] for o in testX]
testY = [q[:-1] for q in testY]
if prin:
print("Training (and validation) data:",len(trainX),"Testing data:",len(testX))
if len(trainX)>0 and len(testX)>0:
if len(trainY[0])==1:
trainY = np.array(trainY).ravel()
scaler = scaler.fit(trainX)
trainX = scaler.transform(trainX)
testX = scaler.transform(testX)
mlp = MLPRegressor(hidden_layer_sizes=hidden_layers, verbose=False, tol=0.000000001, early_stopping=True)
mlp.fit(trainX,trainY)
test_score = mlp.score(testX,testY)
current_score = max_score
if test_score>=max_score[1]:
if prin:
print("New best network for ",name," Fitness discovered by random sampling at:",ts,"scoring:",test_score)
pickle.dump(mlp,open(SP+"/best_multiple_"+name+".p","wb"))
pickle.dump(scaler,open(SP+"/scaler_multiple_"+name+".p","wb"))
max_score = [ts,test_score]
if test_score==1.0:
if prin:
print("New best network for ",name," Fitness with accuracy==1.0 discovered by random sampling at:",ts,"scoring:",test_score)
pickle.dump(mlp,open(SP+"/multiple_"+name+"_1_"+str(ts)+".p","wb"))
pickle.dump(scaler,open(SP+"/scaler_multiple_"+name+"_1_"+str(ts)+".p","wb"))
max_score = current_score
ds.append(["random",ts,len(trainX),test_score,max_score])
if prin:
print()
if not os.path.exists(SP+"surrogate_training_data"+str(x)+".csv"):
surrogate_training_file = open(SP+"surrogate_training_data"+str(x)+".csv","w")
else:
surrogate_training_file = open(SP+"surrogate_training_data"+str(x)+".csv","a")
for a in ds:
surrogate_training_file.write(str(a)+"\n")
surrogate_training_file.close()
return ds
def train_classifier_surrogate(data_sizes,training,start_time,maxtime,bloc,loc, sm=-1):
trained_models = 0
timer = 0
print("Training classifier network for discrete primary fitness prediction with max time",maxtime,"minutes")
while (not trained_models==sm and timer<maxtime):
for i in data_sizes:
training_datasets = []
bsd = open(bloc+"/batch_simulation_data.csv","r")
data = bsd.readlines()
bsd.close()
generate_training_dataset(data,training,i,training_datasets,loc,1)
data = pickle.load(open(loc+"data/"+str(i)+"/all_examples.p","rb"))
X,Y = get_xy(data)
train_classifier(X,[[a[0],a[-1]] for a in Y],"primary","undersample",training,"sampling_before",False,trained_models,loc)
ct = datetime.datetime.now()
ctt = ct-start_time
timer = int(ctt.seconds/60)
trained_models += 1
if timer>=maxtime:
print("Exceeded specified time limit,",maxtime,"minutes. Exiting early")
break
return trained_models, ct
def sample_data(X,Y,samp):
sampled_X = []
sampled_Y = []
if samp=="undersample":
rus = RandomUnderSampler(random_state=0)
modY = [a[0] for a in Y]
if 1.0 in modY and 0.0 in modY:
sx, sy = rus.fit_sample(X,modY)
for i in sx:
for j in Y:
if i[-1]==j[-1]:
sampled_X.append(i)
sampled_Y.append(j)
# else:
# print("Data set does not contain both negative and positive examples.")
# print(modY)
elif samp=="oversample":
ros = RandomOverSampler(random_state=0)
modY = [a[0] for a in Y]
if 1.0 in modY and 0.0 in modY:
sx, sy = ros.fit_sample(X,modY)
for i in sx:
for j in Y:
if i[-1]==j[-1]:
sampled_X.append(i)
sampled_Y.append(j)
# else:
# print("Data set does not contain both negative and positive examples.")
# print(modY)
else:
sampled_X = X
sampled_Y = Y
return sampled_X,sampled_Y
def train_classifier(X,Y,name,samp,train_sizes,samptype,prin,x,loc):
SP = loc+name+"/"
if not os.path.exists(SP):
os.mkdir(SP)
SP = SP+samptype+"/"
if not os.path.exists(SP):
os.mkdir(SP)
SP = SP+samp+"/"
if not os.path.exists(SP):
os.mkdir(SP)
hidden_layers = (100,)*2
scaler = StandardScaler()
ds = []
max_score = [0,-99]
current_score = [0,-99]
for ts in train_sizes:
if prin:
print()
print(samp," data sampling, training size:",ts)
sampled_X = None
sampled_Y = None
trainX = None
trainY = None
testX = None
testY = None
if samptype=="sampling_before":
if prin:
print(name,"sampling BEFORE splitting data")
sampled_X,sampled_Y = sample_data(X,Y,samp)
testing_size = (1.0-ts)
trainX,testX,trainY,testY = train_test_split(sampled_X,sampled_Y, test_size=testing_size)
trainX = [m[:-1] for m in trainX]
trainY = [n[:-1] for n in trainY]
# elif samptype=="sampling_after":
# if prin:
# print("sampling AFTER splitting data")
# testing_size = (1.0-ts)
# trainX,testX,trainY,testY = train_test_split(X,Y, test_size=testing_size)
# sampled_X,sampled_Y = sample_data(trainX,trainY,samp)
# trainX = [m[:-1] for m in sampled_X]
# trainY = [n[:-1] for n in sampled_Y]
testX = [o[:-1] for o in testX]
testY = [q[:-1] for q in testY]
tpos = 0
tneg = 0
for i in trainY:
if i[0]==1.0:
tpos+=1
else:
tneg+=1
if prin:
print("training positive examples:",tpos,", negative examples:",tneg)
epos = 0
eneg = 0
for j in testY:
if j[0]==1.0:
epos+=1
else:
eneg+=1
if prin:
print("testing positive examples:",epos,", negative examples:",eneg)
print("Training (and validation) data:",len(trainX),"Testing data:",len(testX))
if len(trainX)>1 and len(testX)>0:
trainY = np.array(trainY).ravel()
scaler = scaler.fit(trainX)
trainX = scaler.transform(trainX)
testX = scaler.transform(testX)
mlp = MLPClassifier(hidden_layer_sizes=hidden_layers, verbose=False, tol=0.000000001, early_stopping=True)
mlp.fit(trainX,trainY)
test_score = mlp.score(testX,testY)
current_score = max_score
if test_score>=max_score[1]:
if prin:
print("New best network for ",name," Fitness discovered by",samp,"at:",ts,"scoring:",test_score)
pickle.dump(mlp,open(SP+"/best_multiple_"+name+".p","wb"))
pickle.dump(scaler,open(SP+"/scaler_multiple_"+name+".p","wb"))
max_score = [ts,test_score]
if test_score==1.0:
if prin:
print("New best network for ",name," Fitness with accuracy==1.0 discovered by",samp,"at:",ts,"scoring:",test_score)
pickle.dump(mlp,open(SP+"/multiple_"+name+"_1_"+str(ts)+".p","wb"))
pickle.dump(scaler,open(SP+"/scaler_multiple_"+name+"_1_"+str(ts)+".p","wb"))
max_score = current_score
ds.append([samp,ts,len(trainX),test_score,max_score])
if prin:
print()
if not os.path.exists(SP+"surrogate_training_data"+str(x)+".csv"):
surrogate_training_file = open(SP+"surrogate_training_data"+str(x)+".csv","w")
else:
surrogate_training_file = open(SP+"surrogate_training_data"+str(x)+".csv","a")
for a in ds:
surrogate_training_file.write(str(a)+"\n")
surrogate_training_file.close()
return ds
######################################## SURROGATE GA ##############################################
def surrogate_ga(x,sizes,m,l,g,time,mloc,loc,c,s=1):
primary_loc = ""
scaler_loc = ""
if c=="continuous":
primary_loc = "/primary/best_multiple_primary.p"
scaler_loc = "/primary/scaler_multiple_primary.p"
else:
primary_loc = "/../primary/sampling_before/undersample/best_multiple_primary.p"
scaler_loc = "/../primary/sampling_before/undersample/scaler_multiple_primary.p"
timer = 0
start_time = datetime.datetime.now()
complete = False
while (timer<time and not complete):
for j in sizes:
for i in range(x):
nn0 = pickle.load(open(mloc+str(j)+primary_loc,"rb"))
nn1 = pickle.load(open(mloc+str(j)+"/secondary/best_multiple_secondary.p","rb"))
nn2 = pickle.load(open(mloc+str(j)+"/tertiary/best_multiple_tertiary.p","rb"))
scaler0 = pickle.load(open(mloc+str(j)+scaler_loc,"rb"))
scaler1 = pickle.load(open(mloc+str(j)+"/secondary/scaler_multiple_secondary.p","rb"))
scaler2 = pickle.load(open(mloc+str(j)+"/tertiary/scaler_multiple_tertiary.p","rb"))
run_surrogate_ga(m,l,g,time,str(j)+"/",[nn0,nn1,nn2],[scaler0,scaler1,scaler2],c,loc)
ct = datetime.datetime.now()
ctt = ct-start_time
timer = int(ctt.seconds/60)
if timer>=time:
print("Exceeded specified time limit,",time,"minutes. Exiting early")
break
complete = True
return ct
def run_surrogate_ga(m,l,g,time,name,networks,scalers,c,loc,s=1):
global curr_pop, gen, s1, s2, s3, s4, population, toolbox, logbook
if not os.path.exists(loc+name):
os.mkdir(loc+name)
if not os.path.exists(loc+name+"runs/"):
os.mkdir(loc+name+"runs/")
loc = loc+name
runloc = loc+"runs/"
curr_pop = 0
#Initialise GA tools
creator.create("Fitness", base.Fitness, weights=(1.0,))
if c=="continuous":
creator.create("Fitnesses", base.Fitness, weights=(0.001,0.01,-0.00001))
else:
creator.create("Fitnesses", base.Fitness, weights=(1.0,0.01,-0.00001))
creator.create("Individual", list, fitness=creator.Fitness, fitnesses=creator.Fitnesses)
toolbox = base.Toolbox()
toolbox.register("select_parents", select_parents, toolbox)
toolbox.register("mutate", mutate, toolbox)
toolbox.register("mate", mate, 0.5)
toolbox.register("individual", initialise_individual, creator.Individual)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
#Initialise Logging tools
s1 = tools.Statistics(lambda ind: ind.fitness.values[0])
| |
<filename>panoptic_benchmark/structures/segmentation_mask.py
import cv2
import copy
import torch
import numpy as np
from panoptic_benchmark.layers.misc import interpolate
import pycocotools.mask as mask_utils
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
""" ABSTRACT
Segmentations come in either:
1) Binary masks
2) Polygons
Binary masks can be represented in a contiguous array
and operations can be carried out more efficiently,
therefore BinaryMaskList handles them together.
Polygons are handled separately for each instance,
by PolygonInstance and instances are handled by
PolygonList.
SegmentationList is supposed to represent both,
therefore it wraps the functions of BinaryMaskList
and PolygonList to make it transparent.
"""
class BinaryMaskList(object):
"""
This class handles binary masks for all objects in the image
"""
def __init__(self, masks, size):
"""
Arguments:
masks: Either torch.tensor of [num_instances, H, W]
or list of torch.tensors of [H, W] with num_instances elems,
or RLE (Run Length Encoding) - interpreted as list of dicts,
or BinaryMaskList.
size: absolute image size, width first
After initialization, a hard copy will be made, to leave the
initializing source data intact.
"""
if isinstance(masks, torch.Tensor):
# The raw data representation is passed as argument
masks = masks.clone()
elif isinstance(masks, (list, tuple)):
if isinstance(masks[0], torch.Tensor):
masks = torch.stack(masks, dim=2).clone()
elif isinstance(masks[0], dict) and "counts" in masks[0]:
# RLE interpretation
assert all(
[(size[1], size[0]) == tuple(inst["size"]) for inst in masks]
) # in RLE, height come first in "size"
masks = mask_utils.decode(masks) # [h, w, n]
masks = torch.tensor(masks).permute(2, 0, 1) # [n, h, w]
else:
RuntimeError(
"Type of `masks[0]` could not be interpreted: %s" % type(masks)
)
elif isinstance(masks, BinaryMaskList):
# just hard copy the BinaryMaskList instance's underlying data
masks = masks.masks.clone()
else:
RuntimeError(
"Type of `masks` argument could not be interpreted:%s" % type(masks)
)
if len(masks.shape) == 2:
# if only a single instance mask is passed
masks = masks[None]
assert len(masks.shape) == 3
assert masks.shape[1] == size[1], "%s != %s" % (masks.shape[1], size[1])
assert masks.shape[2] == size[0], "%s != %s" % (masks.shape[2], size[0])
self.masks = masks
self.size = tuple(size)
def transpose(self, method):
dim = 1 if method == FLIP_TOP_BOTTOM else 2
flipped_masks = self.masks.flip(dim)
return BinaryMaskList(flipped_masks, self.size)
def crop(self, box):
assert isinstance(box, (list, tuple, torch.Tensor)), str(type(box))
# box is assumed to be xyxy
current_width, current_height = self.size
xmin, ymin, xmax, ymax = [round(float(b)) for b in box]
assert xmin <= xmax and ymin <= ymax, str(box)
xmin = min(max(xmin, 0), current_width - 1)
ymin = min(max(ymin, 0), current_height - 1)
xmax = min(max(xmax, 0), current_width)
ymax = min(max(ymax, 0), current_height)
xmax = max(xmax, xmin + 1)
ymax = max(ymax, ymin + 1)
width, height = xmax - xmin, ymax - ymin
cropped_masks = self.masks[:, ymin:ymax, xmin:xmax]
cropped_size = width, height
return BinaryMaskList(cropped_masks, cropped_size)
def resize(self, size):
try:
iter(size)
except TypeError:
assert isinstance(size, (int, float))
size = size, size
width, height = map(int, size)
assert width > 0
assert height > 0
# Height comes first here!
resized_masks = torch.nn.functional.interpolate(
input=self.masks[None].float(),
size=(height, width),
mode="bilinear",
align_corners=False,
)[0].type_as(self.masks)
resized_size = width, height
return BinaryMaskList(resized_masks, resized_size)
def convert_to_polygon(self):
contours = self._findContours()
return PolygonList(contours, self.size)
def to(self, *args, **kwargs):
return self
def _findContours(self):
contours = []
masks = self.masks.detach().numpy()
for mask in masks:
mask = cv2.UMat(mask)
contour, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
)
reshaped_contour = []
for entity in contour:
assert len(entity.shape) == 3
assert entity.shape[1] == 1, "Hierarchical contours are not allowed"
reshaped_contour.append(entity.reshape(-1).tolist())
contours.append(reshaped_contour)
return contours
def __len__(self):
return len(self.masks)
def __getitem__(self, index):
# Probably it can cause some overhead
# but preserves consistency
masks = self.masks[index].clone()
return BinaryMaskList(masks, self.size)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.masks))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
class PolygonInstance(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size):
"""
Arguments:
a list of lists of numbers.
The first level refers to all the polygons that compose the
object, and the second level to the polygon coordinates.
"""
if isinstance(polygons, (list, tuple)):
valid_polygons = []
for p in polygons:
p = torch.as_tensor(p, dtype=torch.float32)
if len(p) >= 6: # 3 * 2 coordinates
valid_polygons.append(p)
polygons = valid_polygons
elif isinstance(polygons, PolygonInstance):
polygons = copy.copy(polygons.polygons)
else:
RuntimeError(
"Type of argument `polygons` is not allowed:%s" % (type(polygons))
)
""" This crashes the training way too many times...
for p in polygons:
assert p[::2].min() >= 0
assert p[::2].max() < size[0]
assert p[1::2].min() >= 0
assert p[1::2].max() , size[1]
"""
self.polygons = polygons
self.size = tuple(size)
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return PolygonInstance(flipped_polygons, size=self.size)
def crop(self, box):
assert isinstance(box, (list, tuple, torch.Tensor)), str(type(box))
# box is assumed to be xyxy
current_width, current_height = self.size
xmin, ymin, xmax, ymax = map(float, box)
assert xmin <= xmax and ymin <= ymax, str(box)
xmin = min(max(xmin, 0), current_width - 1)
ymin = min(max(ymin, 0), current_height - 1)
xmax = min(max(xmax, 0), current_width)
ymax = min(max(ymax, 0), current_height)
xmax = max(xmax, xmin + 1)
ymax = max(ymax, ymin + 1)
w, h = xmax - xmin, ymax - ymin
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - xmin # .clamp(min=0, max=w)
p[1::2] = p[1::2] - ymin # .clamp(min=0, max=h)
cropped_polygons.append(p)
return PolygonInstance(cropped_polygons, size=(w, h))
def resize(self, size):
try:
iter(size)
except TypeError:
assert isinstance(size, (int, float))
size = size, size
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return PolygonInstance(scaled_polys, size)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return PolygonInstance(scaled_polygons, size=size)
def convert_to_binarymask(self):
width, height = self.size
# formatting for COCO PythonAPI
polygons = [p.numpy() for p in self.polygons]
rles = mask_utils.frPyObjects(polygons, height, width)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
return mask
def __len__(self):
return len(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_groups={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
return s
class PolygonList(object):
"""
This class handles PolygonInstances for all objects in the image
"""
def __init__(self, polygons, size):
"""
Arguments:
polygons:
a list of list of lists of numbers. The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
object, and the third level to the polygon coordinates.
OR
a list of PolygonInstances.
OR
a PolygonList
size: absolute image size
"""
if isinstance(polygons, (list, tuple)):
if len(polygons) == 0:
polygons = [[[]]]
if isinstance(polygons[0], (list, tuple)):
assert isinstance(polygons[0][0], (list, tuple)), str(
type(polygons[0][0])
)
else:
assert isinstance(polygons[0], PolygonInstance), str(type(polygons[0]))
elif isinstance(polygons, PolygonList):
size = polygons.size
polygons = polygons.polygons
else:
RuntimeError(
"Type of argument `polygons` is not allowed:%s" % (type(polygons))
)
assert isinstance(size, (list, tuple)), str(type(size))
self.polygons = []
for p in polygons:
p = PolygonInstance(p, size)
if len(p) > 0:
self.polygons.append(p)
self.size = tuple(size)
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
for polygon in self.polygons:
flipped_polygons.append(polygon.transpose(method))
return PolygonList(flipped_polygons, size=self.size)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_polygons = []
for polygon in self.polygons:
cropped_polygons.append(polygon.crop(box))
cropped_size = w, h
return PolygonList(cropped_polygons, cropped_size)
def resize(self, size):
resized_polygons = []
for polygon in self.polygons:
resized_polygons.append(polygon.resize(size))
resized_size = size
return PolygonList(resized_polygons, resized_size)
def to(self, *args, **kwargs):
return self
def convert_to_binarymask(self):
if len(self) > 0:
masks = torch.stack([p.convert_to_binarymask() for p in self.polygons])
else:
size = self.size
masks = torch.empty([0, size[1], size[0]], dtype=torch.uint8)
return BinaryMaskList(masks, size=self.size)
def __len__(self):
return len(self.polygons)
def __getitem__(self, item):
if isinstance(item, int):
selected_polygons | |
<filename>BesselHelmert.py
###################
# A "plugin like" implementation of the Bessel Helmert tab for trui
# simlk, March 2013
###################
# Copyright (c) 2013, National Geodata Agency, Denmark
# (Geodatastyrelsen), <EMAIL>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from WidgetBase import WidgetBase
from Tab_bshlm import Ui_tab_bshlm
import TrLib
import Minilabel
from TrLib_constants import *
import WidgetUtils
# Default systems pr. region:
BSHLM_SYS_DK = ["utm32_etrs89", "utm33_etrs89", "geo_etrs89", "geo_ed50", "fcs", "dktm1", "dktm2", "dktm3", "dktm4", "kp2000j",
"kp2000s", "kp2000b", "s34j", "s34s", "s34b"]
BSHLM_SYS_FO = ["fotm", "fk89", "utm28_wgs84", "geo_wgs84"]
BSHLM_SYS_GR = ["geo_gr96", "utm24_gr96", "utm25_gr96", "utm26_gr96"]
BSHLM_SYS_WORLD = ["geo_wgs84", "mrc0_wgs84", "webmrc"]
BSHLM_SYS_DEFAULT = ["utm32_wgs84", "geo_wgs84", "mrc0_wgs84"]
BSHLM_SYSTEMS = {"default": BSHLM_SYS_DEFAULT,
REGION_DK: BSHLM_SYS_DK,
REGION_FO: BSHLM_SYS_FO,
REGION_GR: BSHLM_SYS_GR,
REGION_WORLD: BSHLM_SYS_WORLD
}
class BshlmCache(object):
def __init__(self):
self.is_valid = False # flag which determines whether output fields are valid
self.mlb = None
self.geo_mlb = None
self.ellipsoid = None
self.is_custom = False # flag to turn on when using custom ellipsoid
self.valid_label = False # flag to determine if the label is invalid
self.axis = -1 # use negative numbers to signal something not valid...
self.flattening = -1
self.a1 = 0
self.a2 = 0
self.d = 0
# flag to use for s34 like systems (8 and 10)
self.proj_weakly_defined = False
self.mode = 0 # flag which tells whether the output was gotten by mode 1 ==0 or mode 2==1
class BshlmWidget(WidgetBase, Ui_tab_bshlm):
def __init__(self, parent):
WidgetBase.__init__(self, parent)
self.setupUi(self)
#convenient pointers#
self.input_bshlm = [
self.txt_bshlm_x1, self.txt_bshlm_y1, self.txt_bshlm_x2, self.txt_bshlm_y2]
self.output_bshlm_geo = [
self.txt_bshlm_lon1, self.txt_bshlm_lat1, self.txt_bshlm_lon2, self.txt_bshlm_lat2]
# yep - bad naming, first field is the distance output/input
self.output_bshlm_azimuth = [
self.txt_bshlm_distance, self.txt_bshlm_azimuth1, self.txt_bshlm_azimuth2]
self.input_bshlm_azimuth = self.output_bshlm_azimuth[:2]
self.input_labels_bshlm = [self.lbl_bshlm_x, self.lbl_bshlm_y]
self.derived_angular_output = self.output_bshlm_azimuth[1:]
#SETUP BSHLM EVENT HANDLERS#
# flag to set when we want the event handler to just return - e.g. when
# removing a bad item...
self._handle_system_change = True
for field in self.input_bshlm + self.input_bshlm_azimuth:
field.returnPressed.connect(self.doBesselHelmert)
self.cb_bshlm_system.currentIndexChanged.connect(
self.onBshlmSystemChanged)
self.geo_unit = parent.geo_unit
self.geo_unit_derived = parent.geo_unit_derived
self.cache = BshlmCache()
self.ed50_ellipsoid = TrLib.GetEllipsoidParametersFromDatum("ed50")
#TAB BESSEL HELMERT#
@pyqtSignature('') # prevents actions being handled twice
def on_chb_bshlm_custom_ellipsoid_clicked(self):
is_custom = self.chb_bshlm_custom_ellipsoid.isChecked()
if is_custom:
self.cb_bshlm_system.setEnabled(False)
self.txt_bshlm_ellipsoid.setText("custom")
self.txt_bshlm_axis.setEnabled(True)
self.txt_bshlm_flattening.setEnabled(True)
labels = Minilabel.getSystemLabels("geo_wgs84") # a dummy label
self.lbl_bshlm_description.setText(
"Custom ellipsoid - geographical coordinates")
for i in range(2):
self.input_labels_bshlm[i].setText(labels[i])
self.clearOutput()
else:
self.cb_bshlm_system.setEnabled(True)
self.txt_bshlm_axis.setEnabled(False)
self.txt_bshlm_flattening.setEnabled(False)
self.onBshlmSystemChanged(False)
# will be called on every transformation also in order to set correct info
# text and data also when return isn't pressed.
def onBshlmSystemChanged(self, called_by_index_change=True):
if not self._handle_system_change:
return
self.clearOutput()
self.cache.is_valid = False # signal no valid output YET!
# will only be set to true if nothing goes wrong below....
self.cache.valid_label = False
is_custom = self.chb_bshlm_custom_ellipsoid.isChecked()
self.logBshlm("", clear=True)
if (is_custom):
self.cache.is_custom = True
self.cache.mlb = "custom"
ell_data, msg = WidgetUtils.getInput(
[self.txt_bshlm_axis, self.txt_bshlm_flattening], False)
if len(ell_data) != 2 or ell_data[0] < 0 or ell_data[1] < 0:
self.logBshlm("Bad ellipsoid data:\n%s" % msg, "red")
self.cache.valid_label = False
return
self.cache.axis = ell_data[0]
self.cache.flattening = ell_data[1]
self.cache.valid_label = True
else:
self.cache.is_custom = False
mlb = str(self.cb_bshlm_system.currentText())
if len(mlb) == 0:
return
self.cache.mlb = mlb
text = TrLib.DescribeLabel(mlb)
self.lbl_bshlm_description.setText("%s" % text)
labels = Minilabel.getSystemLabels(mlb)
if labels is not None:
for i in range(2):
self.input_labels_bshlm[i].setText(labels[i])
if Minilabel.isProjWeaklyDefined(mlb):
self.cache.proj_weakly_defined = True
self.logBshlm(
"INFO: distance and azimuths will be calculated in ED50 datum", "blue")
name, a, f = self.ed50_ellipsoid
self.cache.geo_mlb = "geo_ed50"
else:
try:
dtm = TrLib.GetDatum(mlb)
name, a, f = TrLib.GetEllipsoidParametersFromDatum(dtm)
self.cache.geo_mlb = TrLib.Convert2Geo(mlb)
except Exception, msg:
self.logBshlm("Invalid label:\n%s" % msg, "red")
if called_by_index_change: # if called by handler which adds label to list
self._handle_system_change = False
self.cb_bshlm_system.removeItem(
self.cb_bshlm_system.currentIndex())
self.cb_bshlm_system.setEditText(mlb)
self._handle_system_change = True
return
if name is not None:
self.txt_bshlm_ellipsoid.setText(name)
self.txt_bshlm_axis.setText("%.4f m" % a)
if 0 < f < 1:
sf = 1 / f
else:
sf = f
self.txt_bshlm_flattening.setText("%.8f" % sf)
self.cache.flattening = f
self.cache.axis = a
self.cache.ellipsoid = name
self.cache.valid_label = True
else:
self.logBshlm(
"Invalid input label - unable to set ellipsoid data...", "red")
if called_by_index_change: # if called by handler which adds label to list
self._handle_system_change = False
self.cb_bshlm_system.removeItem(
self.cb_bshlm_system.currentIndex())
self.cb_bshlm_system.setEditText(mlb)
self._handle_system_change = True
self.txt_bshlm_flattening.setText("")
self.txt_bshlm_axis.setText("")
self.cache.valid_label = False
def doBesselHelmert(self):
# Check if we need to update data....
is_custom = self.chb_bshlm_custom_ellipsoid.isChecked()
self.cache.is_valid = False
self.logBshlm("", clear=True)
if is_custom or str(self.cb_bshlm_system.currentText()) != self.cache.mlb:
self.onBshlmSystemChanged(False)
if not self.cache.valid_label:
self.logBshlm("Invalid input label...", "red")
self.clearOutput()
return
is_mode1 = self.rdobt_bshlm_mode1.isChecked()
# Get needed input
mlb = self.cache.mlb
geo_mlb = self.cache.geo_mlb
is_geo_in = is_custom or TrLib.IsGeographic(mlb)
if is_mode1:
coords, msg = WidgetUtils.getInput(
self.input_bshlm, is_geo_in, z_fields=[], angular_unit=self.geo_unit)
if len(coords) != 4:
self.logBshlm("Input coordinate %d not OK.\n%s" %
(len(coords) + 1, msg), "red")
self.input_bshlm[len(coords)].setFocus()
self.clearOutput()
return
x1, y1, x2, y2 = coords
else:
coords, msg = WidgetUtils.getInput(
self.input_bshlm[0:2], is_geo_in, z_fields=[], angular_unit=self.geo_unit)
if len(coords) != 2:
self.logBshlm("Station1 coordinates not OK.\n%s" % msg, "red")
self.input_bshlm[len(coords)].setFocus()
self.clearOutput()
return
input_data, msg = WidgetUtils.getInput(self.input_bshlm_azimuth, True, z_fields=[
0], angular_unit=self.geo_unit_derived)
if len(input_data) != 2:
self.logBshlm(
"Input distance and azimuth not OK.\n%s" % msg, "red")
self.input_bshlm_azimuth[len(input_data)].setFocus()
self.clearOutput()
return
x1, y1 = coords
dist, a1 = input_data
a = self.cache.axis
f = self.cache.flattening
#end get needed input#
#transform to geo coords if needed#
if not is_custom:
try:
ct = TrLib.CoordinateTransformation(mlb, geo_mlb)
except:
self.logBshlm("Input label not OK!", "red")
self.clearOutput()
return
try:
x1, y1, z = ct.Transform(x1, y1)
except:
msg = ""
err = TrLib.GetLastError()
if err in ERRORS:
msg = "\n%s" % ERRORS[err]
self.logBshlm(
"Error in transformation of coords for station1" + msg, "red")
self.clearOutput()
return
# display output of first transformation, x1,y1 should now alwyas be in
# geo-coords#
WidgetUtils.setOutput([x1, y1], self.output_bshlm_geo[
:2], True, z_fields=[], angular_unit=self.geo_unit)
# Now get the other output from bshlm and transformations....
if is_mode1:
if not is_custom:
try: # transform to geographic
x2, y2, z = ct.Transform(x2, y2)
except:
msg = ""
err = TrLib.GetLastError()
if err in ERRORS:
msg = "\n%s" % ERRORS[err]
self.logBshlm(
"Error in transformation of coords for station2" + msg, "red")
self.clearOutput()
return
data = TrLib.BesselHelmert(a, f, x1, y1, x2, y2)
if data[0] is not None:
a1, a2 = data[1:]
# WidgetUtils.setOutput(data,self.output_bshlm_azimuth,True,z_fields=[0],angular_unit=self.geo_unit_derived)
self.output_bshlm_azimuth[0].setText("%.3f m" % data[0])
self.output_bshlm_azimuth[1].setText(translateFromDegrees(
data[1], self.geo_unit_derived, precision=1))
self.output_bshlm_azimuth[2].setText(translateFromDegrees(
data[2], self.geo_unit_derived, precision=1))
else:
self.message("Error: could not calculate azimuth!")
self.clearOutput()
return
else:
data = TrLib.InverseBesselHelmert(a, f, x1, y1, a1, dist)
if data[0] is not None:
x2, y2, a2 = data
if not is_custom:
try:
x2_out, y2_out, z2 = ct.InverseTransform(x2, y2)
except:
msg = ""
err = TrLib.GetLastError()
if err in ERRORS:
msg = "\n%s" % ERRORS[err]
self.logBshlm(
"Error in transformation of coords for station2" + msg, "red")
self.clearOutput()
return
else:
x2_out = x2
y2_out = y2
# display result...
WidgetUtils.setOutput([x2_out, y2_out], self.input_bshlm[
2:], is_geo_in, z_fields=[], angular_unit=self.geo_unit)
self.txt_bshlm_azimuth2.setText(translateFromDegrees(
a2, self.geo_unit_derived, precision=1))
else:
self.message(
"Error: could not do inverse Bessel Helmert calculation")
self.clearOutput()
return
# always display ouput in geo field - even if not transformed
self.cache.a1 = a1
self.cache.a2 = a2
WidgetUtils.setOutput([x2, y2],
self.output_bshlm_geo[2:], True, z_fields=[], angular_unit=self.geo_unit)
self.cache.is_valid = True
self.cache.mode = int(not is_mode1)
# called on error
def clearOutput(self):
is_mode1 = self.rdobt_bshlm_mode1.isChecked()
if (is_mode1):
WidgetUtils.setOutput([], self.output_bshlm_azimuth)
else:
WidgetUtils.setOutput([], self.input_bshlm[2:])
WidgetUtils.setOutput([], self.output_bshlm_azimuth[2:])
WidgetUtils.setOutput([], self.output_bshlm_geo)
def logBshlm(self, text, color="black", clear=False):
self.txt_bshlm_log.setTextColor(QColor(color))
if (not clear):
self.txt_bshlm_log.append(text)
else:
self.txt_bshlm_log.setText(text)
self.txt_bshlm_log.ensureCursorVisible()
#Override methods#
def handleStdOut(self, text):
self.logBshlm(text, color="green")
def handleStdErr(self, text):
self.logBshlm(text, color="red")
def handleCallBack(self, text):
self.logBshlm(text, color="blue")
def handleRegionChange(self, region):
self.cb_bshlm_system.clear()
if region in BSHLM_SYSTEMS:
systems = BSHLM_SYSTEMS[region]
else:
systems = BSHLM_SYSTEMS["default"]
self.cb_bshlm_system.addItems(systems)
def handleGeoUnitChange(self, geo_unit):
self.geo_unit = geo_unit
if TrLib.IsGeographic(str(self.cb_bshlm_system.currentText())):
for field in self.input_bshlm:
WidgetUtils.translateAngularField(field, geo_unit, precision=4)
for field in self.output_bshlm_geo:
WidgetUtils.translateAngularField(field, geo_unit, precision=4)
def handleAngularUnitChange(self, | |
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class insert_args:
"""
Attributes:
- key
- column_parent
- column
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'column', (Column, Column.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, column=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.column = column
self.consistency_level = consistency_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.column = Column()
self.column.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('insert_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.column_parent != None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.column != None:
oprot.writeFieldBegin('column', TType.STRUCT, 3)
self.column.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level != None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocol.TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocol.TProtocolException(message='Required field column_parent is unset!')
if self.column is None:
raise TProtocol.TProtocolException(message='Required field column is unset!')
if self.consistency_level is None:
raise TProtocol.TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class insert_result:
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('insert_result')
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue != None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te != None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class add_args:
"""
Attributes:
- key
- column_parent
- column
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'column', (CounterColumn, CounterColumn.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, column=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.column = column
self.consistency_level = consistency_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.column = CounterColumn()
self.column.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('add_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.column_parent != None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.column != None:
oprot.writeFieldBegin('column', TType.STRUCT, 3)
self.column.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level != None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocol.TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocol.TProtocolException(message='Required field column_parent is unset!')
if self.column is None:
raise TProtocol.TProtocolException(message='Required field column is unset!')
if self.consistency_level is None:
raise TProtocol.TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class add_result:
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('add_result')
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue != None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te != None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class remove_args:
"""
Attributes:
- key
- column_path
- timestamp
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRUCT, 'column_path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_path=None, timestamp=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_path = column_path
self.timestamp = timestamp
self.consistency_level = consistency_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_path = ColumnPath()
self.column_path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level | |
self.device1 = Mock(**self.empty_output)
ipv6_vrf_all_interface_obj = ShowIpv6VrfAllInterface(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = ipv6_vrf_all_interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
ipv6_vrf_all_interface_obj = ShowIpv6VrfAllInterface(device=self.device)
parsed_output = ipv6_vrf_all_interface_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden1(self):
self.device = Mock(**self.golden_output1)
ipv6_vrf_all_interface_obj = ShowIpv6VrfAllInterface(device=self.device)
parsed_output = ipv6_vrf_all_interface_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output1)
def test_golden2(self):
self.device = Mock(**self.golden_output2)
ipv6_vrf_all_interface_obj = ShowIpv6VrfAllInterface(device=self.device)
parsed_output = ipv6_vrf_all_interface_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output2)
def test_golden_custom(self):
self.device = Mock(**self.golden_output_custom)
ipv6_vrf_all_interface_obj = ShowIpv6VrfAllInterface(device=self.device)
parsed_output = ipv6_vrf_all_interface_obj.parse(vrf='VRF1', interface='GigabitEthernet0/0/0/1')
self.assertEqual(parsed_output, self.golden_parsed_output_custom)
#############################################################################
# unitest For show ethernet tags
#############################################################################
class test_show_ethernet_tags(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"GigabitEthernet0/0/0/0.511": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:511",
"vlan_id": "511"
},
"GigabitEthernet0/0/0/0.510": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:510",
"vlan_id": "510"
},
"GigabitEthernet0/0/0/0.503": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:503",
"vlan_id": "503"
},
"GigabitEthernet0/0/0/0.501": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:501",
"vlan_id": "501"
},
"GigabitEthernet0/0/0/0.502": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:502",
"vlan_id": "502"
},
"GigabitEthernet0/0/0/0.504": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:504",
"vlan_id": "504"
},
"GigabitEthernet0/0/0/0.505": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:505",
"vlan_id": "505"
},
"GigabitEthernet0/0/0/1.501": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:501",
"vlan_id": "501"
}
}
golden_parsed_interface_output = {
"GigabitEthernet0/0/0/1.501": {
"rewrite_num_of_tags_push": 0,
"status": "up",
"rewrite_num_of_tags_pop": 1,
"mtu": 1518,
"outer_vlan": ".1Q:501",
"vlan_id": "501"
}
}
golden_output = {'execute.return_value': '''
St: AD - Administratively Down, Dn - Down, Up - Up
Ly: L2 - Switched layer 2 service, L3 = Terminated layer 3 service,
Xtra C - Match on Cos, E - Match on Ethertype, M - Match on source MAC
-,+: Ingress rewrite operation; number of tags to pop and push respectively
Interface St MTU Ly Outer Inner Xtra -,+
Gi0/0/0/0.501 Up 1518 L3 .1Q:501 - - 1 0
Gi0/0/0/0.502 Up 1518 L3 .1Q:502 - - 1 0
Gi0/0/0/0.503 Up 1518 L3 .1Q:503 - - 1 0
Gi0/0/0/0.504 Up 1518 L3 .1Q:504 - - 1 0
Gi0/0/0/0.505 Up 1518 L3 .1Q:505 - - 1 0
Gi0/0/0/0.510 Up 1518 L3 .1Q:510 - - 1 0
Gi0/0/0/0.511 Up 1518 L3 .1Q:511 - - 1 0
Gi0/0/0/1.501 Up 1518 L3 .1Q:501 - - 1 0
'''}
golden_interface_output={'execute.return_value': '''
St: AD - Administratively Down, Dn - Down, Up - Up
Ly: L2 - Switched layer 2 service, L3 = Terminated layer 3 service,
Xtra C - Match on Cos, E - Match on Ethertype, M - Match on source MAC
-,+: Ingress rewrite operation; number of tags to pop and push respectively
Interface St MTU Ly Outer Inner Xtra -,+
Gi0/0/0/1.501 Up 1518 L3 .1Q:501 - - 1 0
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowEthernetTags(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowEthernetTags(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_custom(self):
self.device = Mock(**self.golden_output)
obj = ShowEthernetTags(device=self.device)
parsed_output = obj.parse(interface='Gi0/0/0/1.501')
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces <interface> accounting
#############################################################################
class test_show_interfaces_accounting(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = \
{
"GigabitEthernet0/0/0/0": {
"accounting": {
"arp": {
"chars_in": 378,
"chars_out": 378,
"pkts_in": 9,
"pkts_out": 9
},
"ipv4_multicast": {
"chars_in": 0,
"chars_out": 843700,
"pkts_in": 0,
"pkts_out": 10514
},
"ipv4_unicast": {
"chars_in": 1226852,
"chars_out": 887519,
"pkts_in": 19254,
"pkts_out": 13117
}
}
},
"GigabitEthernet0/0/0/1": {
"accounting": {
"arp": {
"chars_in": 378,
"chars_out": 378,
"pkts_in": 9,
"pkts_out": 9
},
"ipv4_multicast": {
"chars_in": 0,
"chars_out": 844816,
"pkts_in": 0,
"pkts_out": 10530
},
"ipv4_unicast": {
"chars_in": 843784,
"chars_out": 1764,
"pkts_in": 10539,
"pkts_out": 26
}
}
}
}
golden_output = {'execute.return_value': '''
Tue Jun 5 20:45:11.544 UTC
No accounting statistics available for Loopback0
No accounting statistics available for Loopback1
No accounting statistics available for Null0
GigabitEthernet0/0/0/0
Protocol Pkts In Chars In Pkts Out Chars Out
IPV4_UNICAST 19254 1226852 13117 887519
IPV4_MULTICAST 0 0 10514 843700
ARP 9 378 9 378
GigabitEthernet0/0/0/1
Protocol Pkts In Chars In Pkts Out Chars Out
IPV4_UNICAST 10539 843784 26 1764
IPV4_MULTICAST 0 0 10530 844816
ARP 9 378 9 378
No accounting statistics available for MgmtEth0/RP0/CPU0/0
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfacesAccounting(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowInterfacesAccounting(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For ip interface brief
#############################################################################
class test_show_ip_interface_brief(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {'interface':
{'GigabitEthernet0/0/0/0': {'interface_status': 'Up',
'ip_address': 'unassigned',
'protocol_status': 'Up',
'vrf_name': 'default'},
'GigabitEthernet0/0/0/0.501': {'interface_status': 'Up',
'ip_address': '192.168.4.1',
'protocol_status': 'Up',
'vrf_name': 'VRF501'},
'GigabitEthernet0/0/0/0.502': {'interface_status': 'Up',
'ip_address': '192.168.154.1',
'protocol_status': 'Up',
'vrf_name': 'VRF502'},
'GigabitEthernet0/0/0/0.503': {'interface_status': 'Up',
'ip_address': '192.168.51.1',
'protocol_status': 'Up',
'vrf_name': 'VRF503'},
'GigabitEthernet0/0/0/0.504': {'interface_status': 'Up',
'ip_address': '192.168.205.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'GigabitEthernet0/0/0/0.505': {'interface_status': 'Up',
'ip_address': '192.168.106.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'GigabitEthernet0/0/0/0.510': {'interface_status': 'Up',
'ip_address': '192.168.151.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'GigabitEthernet0/0/0/0.511': {'interface_status': 'Up',
'ip_address': '192.168.64.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'GigabitEthernet0/0/0/1': {'interface_status': 'Up',
'ip_address': 'unassigned',
'protocol_status': 'Up',
'vrf_name': 'default'},
'GigabitEthernet0/0/0/1.501': {'interface_status': 'Up',
'ip_address': '192.168.51.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'GigabitEthernet0/0/0/2': {'interface_status': 'Up',
'ip_address': 'unassigned',
'protocol_status': 'Up',
'vrf_name': 'default'},
'Loopback500': {'interface_status': 'Up',
'ip_address': '192.168.220.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'Loopback501': {'interface_status': 'Up',
'ip_address': '192.168.111.1',
'protocol_status': 'Up',
'vrf_name': 'VRF501'},
'Loopback502': {'interface_status': 'Up',
'ip_address': '192.168.4.1',
'protocol_status': 'Up',
'vrf_name': 'VRF502'},
'Loopback503': {'interface_status': 'Up',
'ip_address': '192.168.154.1',
'protocol_status': 'Up',
'vrf_name': 'VRF503'},
'Loopback505': {'interface_status': 'Up',
'ip_address': '192.168.205.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'Loopback506': {'interface_status': 'Up',
'ip_address': '192.168.106.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'Loopback510': {'interface_status': 'Up',
'ip_address': '192.168.240.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'Loopback511': {'interface_status': 'Up',
'ip_address': '192.168.151.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'Loopback512': {'interface_status': 'Up',
'ip_address': '192.168.64.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'Loopback513': {'interface_status': 'Up',
'ip_address': '192.168.234.1',
'protocol_status': 'Up',
'vrf_name': 'default'},
'MgmtEth0/RP0/CPU0/0': {'interface_status': 'Up',
'ip_address': '10.1.17.179',
'protocol_status': 'Up',
'vrf_name': 'default'}}}
golden_output = {'execute.return_value': '''
RP/0/RP0/CPU0:PE1#show ip interface brief
Interface IP-Address Status Protocol Vrf-Name
Loopback500 192.168.220.1 Up Up default
Loopback501 192.168.111.1 Up Up VRF501
Loopback502 192.168.4.1 Up Up VRF502
Loopback503 192.168.154.1 Up Up VRF503
Loopback505 192.168.205.1 Up Up default
Loopback506 192.168.106.1 Up Up default
Loopback510 192.168.240.1 Up Up default
Loopback511 192.168.151.1 Up Up default
Loopback512 192.168.64.1 Up Up default
Loopback513 192.168.234.1 Up Up default
MgmtEth0/RP0/CPU0/0 10.1.17.179 Up Up default
GigabitEthernet0/0/0/0 unassigned Up Up default
GigabitEthernet0/0/0/0.501 192.168.4.1 Up Up VRF501
GigabitEthernet0/0/0/0.502 192.168.154.1 Up Up VRF502
GigabitEthernet0/0/0/0.503 192.168.51.1 Up Up VRF503
GigabitEthernet0/0/0/0.504 192.168.205.1 Up Up default
GigabitEthernet0/0/0/0.505 192.168.106.1 Up Up default
GigabitEthernet0/0/0/0.510 192.168.151.1 Up Up default
GigabitEthernet0/0/0/0.511 192.168.64.1 Up Up default
GigabitEthernet0/0/0/1 unassigned Up Up default
GigabitEthernet0/0/0/1.501 192.168.51.1 Up Up default
GigabitEthernet0/0/0/2 unassigned Up Up default
RP/0/RP0/CPU0:PE1#show ip interface brief | i 10.1.17.179
MgmtEth0/RP0/CPU0/0 10.1.17.179 Up Up default
'''}
golden_parsed_output_pipe_ip = {'interface':
{'MgmtEth0/RP0/CPU0/0': {'interface_status': 'Up',
'ip_address': '10.1.17.179',
'protocol_status': 'Up',
'vrf_name': 'default'}}}
golden_output_pipe_ip = {'execute.return_value': '''
RP/0/RP0/CPU0:PE1#show ip interface brief | i 10.1.17.179
MgmtEth0/RP0/CPU0/0 10.1.17.179 Up Up default
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpInterfaceBrief(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowIpInterfaceBrief(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden1(self):
self.device = Mock(**self.golden_output_pipe_ip)
obj = ShowIpInterfaceBrief(device=self.device)
parsed_output = obj.parse(ip='10.1.17.179')
self.assertEqual(parsed_output,self.golden_parsed_output_pipe_ip)
def test_empty_ipv4(self):
self.device = Mock(**self.empty_output)
obj = ShowIpv4InterfaceBrief(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_ipv4(self):
self.device = Mock(**self.golden_output)
obj = ShowIpv4InterfaceBrief(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden1_ipv4(self):
self.device = Mock(**self.golden_output_pipe_ip)
obj = ShowIpv4InterfaceBrief(device=self.device)
parsed_output = obj.parse(ip='10.1.17.179')
self.assertEqual(parsed_output,self.golden_parsed_output_pipe_ip)
#############################################################################
# unitest For show interfaces
#############################################################################
class test_show_interfaces(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"BVI51": {
"enabled": True,
"line_protocol": "down",
"oper_status": "down",
"interface_state_transitions": 0,
"type": "Bridge-Group Virtual Interface",
"mac_address": "0000.59ff.60b1",
"description": "NPON_Mcast_VLAN",
"ipv4": {
"192.168.166.9/30": {
"ip": "192.168.166.9",
"prefix_length": "30"
}
},
"mtu": 1514,
"bandwidth": 10000000,
"bandwidth_max": 10000000,
"reliability": "255/255",
"txload": "0/255",
"rxload": "0/255",
"encapsulations": {
"encapsulation": "arpa"
},
"loopback": "not set",
"arp_type": "arpa",
"arp_timeout": "04:00:00",
"last_input": "Unknown",
"last_output": "Unknown",
"counters": {
"last_clear": "Unknown",
"rate": {
"load_interval": 300,
"in_rate": 0,
"in_rate_pkts": 0,
"out_rate": 0,
"out_rate_pkts": 0
}
}
},
"BVI100": {
"enabled": True,
"line_protocol": "down",
"oper_status": "down",
"interface_state_transitions": 0,
"type": "Bridge-Group Virtual Interface",
"mac_address": "0059.01ff.0001",
"description": "au-hikari-mansion-100",
"ipv4": {
"192.168.36.254/24": {
"ip": "192.168.36.254",
"prefix_length": "24"
}
},
"mtu": 1514,
"bandwidth": 10000000,
"bandwidth_max": 10000000,
"reliability": "255/255",
"txload": "0/255",
"rxload": "0/255",
"encapsulations": {
"encapsulation": "arpa"
},
"loopback": "not | |
xlabelname=None,
format='png'):
"""Plotting decoding of the response with respect to dose.
Params
------
df : `DataFrame`
Table with columns=[perturbation_key, contvar_key, response_name].
The last column is always "response".
contvar_key : str
Name of the column in df for values to use for x axis.
perturbation_key : str
Name of the column in df for the perturbation or covariate to plot.
response_name: str (default: response)
Name of the column in df for values to use for y axis.
df_ref : `DataFrame` (default: None)
Table with the same columns as in df to plot ground_truth or another
condition for comparison. Could
also be used to just extract reference values for x-axis.
use_ref_response : bool (default: False)
A flag indicating if to use values for y axis from df_ref (True) or j
ust to extract reference values for x-axis.
col_dict : dictionary (default: None)
Dictionary with colors for each value in perturbation_key.
bbox : tuple (default: (1.35, 1.))
Coordinates to adjust the legend.
plot_vertical : boolean (default: False)
Flag if to plot reference values for x axis from df_ref dataframe.
f1 : float (default: 7.0))
Width in inches for the plot.
f2 : float (default: 3.0))
Hight in inches for the plot.
fname : str (default: None)
Name of the file to export the plot. The name comes without format
extension.
format : str (default: png)
Format for the file to export the plot.
"""
sns.set_style("white")
if use_ref_response and not (df_ref is None):
df[ref_name] = 'predictions'
df_ref[ref_name] = 'observations'
if interpolate:
df_plt = pd.concat([df, df_ref])
else:
df_plt = df
else:
df_plt = df
atomic_drugs = np.unique(df[perturbation_key].values)
if palette is None:
current_palette = get_palette(len(list(atomic_drugs)))
if col_dict is None:
col_dict = dict(
zip(
list(atomic_drugs),
current_palette
)
)
fig = plt.figure(figsize=(f1, f2))
ax = plt.gca()
if use_ref_response:
sns.lineplot(
x=contvar_key,
y=response_name,
palette=col_dict,
hue=perturbation_key,
style=ref_name,
dashes=[(1, 0), (2, 1)],
legend='full',
style_order=['predictions', 'observations'],
data=df_plt, ax=ax)
df_ref = df_ref.replace('training_treated', 'train')
sns.scatterplot(
x=contvar_key,
y=response_name,
hue='split',
size='num_cells',
sizes=(10, 100),
alpha=1.,
palette={'train': '#000000', 'training': '#000000', 'ood': '#e41a1c'},
data=df_ref, ax=ax)
ax.legend_.remove()
else:
sns.lineplot(x=contvar_key, y=response_name,
palette=col_dict,
hue=perturbation_key,
data=df_plt, ax=ax)
ax.legend(
loc='upper right',
bbox_to_anchor=bbox,
fontsize=fontsize)
if not (title_name is None):
ax.set_title(title_name, fontsize=fontsize, fontweight='bold')
ax.grid('off')
if xlabelname is None:
ax.set_xlabel(contvar_key, fontsize=fontsize)
else:
ax.set_xlabel(xlabelname, fontsize=fontsize)
ax.set_ylabel(f"{response_name}", fontsize=fontsize)
ax.xaxis.set_tick_params(labelsize=fontsize)
ax.yaxis.set_tick_params(labelsize=fontsize)
if not (logscale is None):
ax.set_xticks(np.log10(logscale))
ax.set_xticklabels(logscale, rotation=90)
if not (df_ref is None):
atomic_drugs=np.unique(df_ref[perturbation_key].values)
for drug in atomic_drugs:
x = df_ref[df_ref[perturbation_key] == drug][contvar_key].values
m1 = np.min(df[df[perturbation_key] == drug][response_name].values)
m2 = np.max(df[df[perturbation_key] == drug][response_name].values)
if plot_vertical:
for x_dot in x:
ax.plot([x_dot, x_dot], [m1, m2], ':', color='black',
linewidth=.5, alpha=0.5)
fig.tight_layout()
if fname:
plt.savefig(f'{fname}.{format}', format=format)
return fig
def plot_uncertainty_comb_dose(
compert_api,
cov,
pert,
N=11,
metric='cosine',
measured_points=None,
cond_key='condition',
vmin=None,
vmax=None,
sizes=(40, 160),
df_ref=None,
xlims=(0, 1.03),
ylims=(0, 1.03),
fixed_drugs='',
fixed_doses='',
title=True,
filename=None
):
"""Plotting uncertainty for a single perturbation at a dose range for a
particular covariate.
Params
------
compert_api
Api object for the model class.
cov : str
Name of covariate.
pert : str
Name of the perturbation.
N : int
Number of dose values.
metric: str (default: 'cosine')
Metric to evaluate uncertainty.
measured_points : dict (default: None)
A dicitionary of dictionaries. Per each covariate a dictionary with
observed doses per perturbation, e.g. {'covar1': {'pert1':
[0.1, 0.5, 1.0], 'pert2': [0.3]}
cond_key : str (default: 'condition')
Name of the variable to use for plotting.
filename : str (default: None)
Full path to the file to export the plot. File extension should be
included.
Returns
-------
pd.DataFrame of uncertainty estimations.
"""
df_list = []
for i in np.round(np.linspace(0, 1, N), decimals=2):
for j in np.round(np.linspace(0, 1, N), decimals=2):
df_list.append(
{
'cell_type' : cov,
'condition' : pert+fixed_drugs,
'dose_val' : str(i) + '+' + str(j)+fixed_doses,
}
)
df_pred = pd.DataFrame(df_list)
uncert_cos = []
uncert_eucl = []
closest_cond_cos = []
closest_cond_eucl = []
for i in range(df_pred.shape[0]):
uncert_cos_, uncert_eucl_, closest_cond_cos_, closest_cond_eucl_ = (
compert_api.compute_uncertainty(
cov=df_pred.iloc[i]['cell_type'],
pert=df_pred.iloc[i]['condition'],
dose=df_pred.iloc[i]['dose_val']
)
)
uncert_cos.append(uncert_cos_)
uncert_eucl.append(uncert_eucl_)
closest_cond_cos.append(closest_cond_cos_)
closest_cond_eucl.append(closest_cond_eucl_)
df_pred['uncertainty_cosine'] = uncert_cos
df_pred['uncertainty_eucl'] = uncert_eucl
df_pred['closest_cond_cos'] = closest_cond_cos
df_pred['closest_cond_eucl'] = closest_cond_eucl
doses = df_pred.dose_val.apply(lambda x: x.split('+'))
X = np.array(
doses
.apply(lambda x: x[0])
.astype(float)
).reshape(N, N)
Y = np.array(
doses
.apply(lambda x: x[1])
.astype(float)
).reshape(N, N)
Z = np.array(
df_pred[f'uncertainty_{metric}']
.values
.astype(float)
).reshape(N, N)
fig, ax = plt.subplots(1, 1)
CS = ax.contourf(X, Y, Z, cmap='coolwarm', levels=20,
alpha=1, vmin=vmin, vmax=vmax)
ax.set_xlabel(pert.split('+')[0], fontweight="bold")
ax.set_ylabel(pert.split('+')[1], fontweight="bold")
if title:
ax.set_title(cov)
if not (df_ref is None):
sns.scatterplot(
x=pert.split('+')[0],
y=pert.split('+')[1],
hue='split',
size='num_cells',
sizes=sizes,
alpha=1.,
palette={'train': '#000000', 'training': '#000000', 'ood': '#e41a1c'},
data=df_ref,
ax=ax)
ax.legend_.remove()
if measured_points:
ticks = measured_points[cov][pert]
xticks = [float(x.split('+')[0]) for x in ticks]
yticks = [float(x.split('+')[1]) for x in ticks]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks, rotation=90)
ax.set_yticks(yticks)
fig.colorbar(CS)
sns.despine()
ax.axis("equal")
ax.axis("square")
ax.set_xlim(xlims)
ax.set_ylim(ylims)
plt.tight_layout()
if filename:
plt.savefig(filename)
return df_pred
def plot_uncertainty_dose(
compert_api,
cov,
pert,
N=11,
metric='cosine',
measured_points=None,
cond_key='condition',
log=False,
min_dose=None,
filename=None
):
"""Plotting uncertainty for a single perturbation at a dose range for a
particular covariate.
Params
------
compert_api
Api object for the model class.
cov : str
Name of covariate.
pert : str
Name of the perturbation.
N : int
Number of dose values.
metric: str (default: 'cosine')
Metric to evaluate uncertainty.
measured_points : dict (default: None)
A dicitionary of dictionaries. Per each covariate a dictionary with
observed doses per perturbation, e.g. {'covar1': {'pert1':
[0.1, 0.5, 1.0], 'pert2': [0.3]}
cond_key : str (default: 'condition')
Name of the variable to use for plotting.
log : boolean (default: False)
A flag if to plot on a log scale.
min_dose : float (default: None)
Minimum dose for the uncertainty estimate.
filename : str (default: None)
Full path to the file to export the plot. File extension should be included.
Returns
-------
pd.DataFrame of uncertainty estimations.
"""
df_list = []
if log:
if min_dose is None:
min_dose = 1e-3
N_val = np.round(np.logspace(np.log10(min_dose), np.log10(1), N), decimals=10)
else:
if min_dose is None:
min_dose = 0
N_val = np.round(np.linspace(min_dose, 1., N), decimals=3)
for i in N_val:
df_list.append(
{
'cell_type' : cov,
'condition' : pert,
'dose_val' : repr(i),
}
)
df_pred = pd.DataFrame(df_list)
uncert_cos = []
uncert_eucl = []
closest_cond_cos = []
closest_cond_eucl = []
for i in range(df_pred.shape[0]):
uncert_cos_, uncert_eucl_, closest_cond_cos_, closest_cond_eucl_ = (
compert_api.compute_uncertainty(
cov=df_pred.iloc[i]['cell_type'],
pert=df_pred.iloc[i]['condition'],
dose=df_pred.iloc[i]['dose_val']
)
)
uncert_cos.append(uncert_cos_)
uncert_eucl.append(uncert_eucl_)
closest_cond_cos.append(closest_cond_cos_)
closest_cond_eucl.append(closest_cond_eucl_)
df_pred['uncertainty_cosine'] = uncert_cos
df_pred['uncertainty_eucl'] = uncert_eucl
df_pred['closest_cond_cos'] = closest_cond_cos
df_pred['closest_cond_eucl'] = closest_cond_eucl
x = df_pred.dose_val.values.astype(float)
y = df_pred[f'uncertainty_{metric}'].values.astype(float)
fig, ax = plt.subplots(1, 1)
ax.plot(x, y)
ax.set_xlabel(pert)
ax.set_ylabel('Uncertainty')
ax.set_title(cov)
if log:
ax.set_xscale('log')
if measured_points:
ticks = measured_points[cov][pert]
ax.set_xticks(ticks)
ax.set_xticklabels(ticks, rotation=90)
else:
plt.draw()
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
sns.despine()
plt.tight_layout()
if filename:
plt.savefig(filename)
return df_pred
def save_to_file(fig, file_name, file_format=None):
if file_format is None:
if file_name.split(".")[-1] in ['png', 'pdf']:
file_format = file_name.split(".")[-1]
savename = file_name
else:
file_format = 'pdf'
savename = f'{file_name}.{file_format}'
else:
savename = file_name
fig.savefig(savename, format=file_format)
print(f"Saved file to: {savename}")
def plot_embedding(
emb,
labels=None,
col_dict=None,
title=None,
show_lines=False,
show_text=False,
show_legend=True,
axis_equal=True,
circle_size=40,
circe_transparency=1.0,
line_transparency=0.8,
line_width=1.0,
fontsize=9,
fig_width=4,
fig_height=4,
file_name=None,
file_format=None,
labels_name=None,
width_ratios=[7, 1],
bbox=(1.3, 0.7)
):
sns.set_style("white")
# create data structure suitable for embedding
df = pd.DataFrame(emb, columns=['dim1', 'dim2'])
if not (labels is None):
if labels_name is None:
labels_name = 'labels'
df[labels_name] = labels
fig = plt.figure(figsize=(fig_width, fig_height))
ax = plt.gca()
sns.despine(left=False, bottom=False, right=True)
if (col_dict is None) and not (labels is None):
col_dict = get_colors(labels)
sns.scatterplot(
x="dim1",
y="dim2",
hue=labels_name,
palette=col_dict,
alpha=circe_transparency,
edgecolor="none",
s=circle_size,
data=df,
ax=ax)
try:
ax.legend_.remove()
except:
pass
if show_lines:
for i in range(len(emb)):
if col_dict is None:
ax.plot(
[0, emb[i, 0]],
[0, emb[i, 1]],
alpha=line_transparency,
linewidth=line_width,
c=None
)
else:
ax.plot(
[0, emb[i, 0]],
[0, emb[i, 1]],
alpha=line_transparency,
linewidth=line_width,
c=col_dict[labels[i]]
)
if show_text and not (labels is None):
texts = []
labels = np.array(labels)
unique_labels = np.unique(labels)
for label in unique_labels:
idx_label = np.where(labels == label)[0]
texts.append(
ax.text(
np.mean(emb[idx_label, 0]),
np.mean(emb[idx_label, 1]),
label,
fontsize=fontsize
)
)
adjust_text(
texts,
arrowprops=dict(arrowstyle='-', color='black', lw=0.1),
ax=ax
)
if axis_equal:
ax.axis('equal')
ax.axis('square')
if title:
ax.set_title(title, fontsize=fontsize, fontweight="bold")
ax.set_xlabel('dim1', fontsize=fontsize)
ax.set_ylabel('dim2', fontsize=fontsize)
ax.xaxis.set_tick_params(labelsize=fontsize)
ax.yaxis.set_tick_params(labelsize=fontsize)
plt.tight_layout()
if file_name:
save_to_file(fig, file_name, file_format)
return plt
def get_colors(
labels,
palette=None,
palette_name=None
):
n_colors = len(labels)
if palette is None:
palette = get_palette(n_colors, palette_name)
col_dict = dict(zip(labels, palette[:n_colors]))
return col_dict
def plot_similarity(
emb,
labels=None,
col_dict=None,
fig_width=4,
fig_height=4,
| |
True to negate the shifted points.
Returns
-------
ndata : ndarray
Array of shifted NMR data.
"""
return roll(data, pts, neg)
def roll(data, pts=0.0, neg=False):
"""
Roll axis
Parameters
----------
data : ndarray
Array of NMR data.
pts : int
Number of points to shift. Positive value will right shift the data,
negative values will left shift the data.
neg : bool
True to negate the shifted points.
Returns
-------
ndata : ndarray
Array of NMR data with last axis rolled.
"""
data = np.roll(data, int(pts), axis=-1)
if neg:
if pts > 0:
data[..., :pts] = -data[..., :pts]
else:
data[..., pts:] = -data[..., pts:]
return data
def fsh(data, pts):
"""
Frequency shift by Fourier transform. Negative signed phase correction.
Parameters
----------
data : ndarray
Array of NMR data.
pts : float
Number of points to frequency shift the data. Positive value will
shift the spectrum to the right, negative values to the left.
Returns
-------
ndata : ndarray
Array of NMR data with last axis rolled.
"""
s = float(data.shape[-1])
# inverse fft -> first order phase correction -> fft
# idata = icomplexft(data)
# pdata =np.exp(-2.j*pi*pts*np.arange(s)/s,sig=data.dtype)*icomplexft(data)
# data = complexft(pdata)
# inplace processing
return fft(np.exp(-2.j * pi * pts * np.arange(s) /
s).astype(data.dtype) * ifft(data))
def fsh2(data, pts):
"""
Frequency Shift by Fourier transform. Postive signed phase correction.
Parameters
----------
data : ndarray
Array of NMR data.
pts : float
Number of points to frequency shift the data. Positive value will
shift the spectrum to the right, negative values to the left.
Returns
-------
ndata : ndarray
Array of NMR data with last axis rolled.
"""
s = float(data.shape[-1])
return fft_positive(np.exp(2.j * pi * pts * np.arange(s) /
s).astype(data.dtype) * ifft_positive(data))
##############
# Transforms #
##############
def nmr_reorder(data):
"""
Reorder spectrum after FT transform to NMR order (swap halves and reverse).
"""
s = data.shape[-1]
return np.append(data[..., int(s / 2)::-1], data[..., s:int(s / 2):-1],
axis=-1)
def swap_halves(data):
"""
Swap the halves of a spectrum,
"""
s = data.shape[-1]
return np.append(data[..., int(s / 2):], data[..., :int(s / 2)], axis=-1)
# Fourier based Transforms
def rft(x):
"""
Real Fourier transform.
Parameters
----------
x : ndarray
Array of NMR data.
Returns
-------
y : ndarray
Array of NMR data with real Fourier transform applied.
"""
# XXX figure out what exactly this is doing...
s = x.shape[-1]
xp = np.zeros(x.shape, dtype="complex64")
xp[..., 1:s / 2] = x[..., 1:-1:2] + x[..., 2::2] * 1.j
xp[..., 0] = x[..., 0] / 2.
xp[..., s / 2] = x[..., -1] / 2.
return np.array(nmr_reorder(np.fft.fft(2 * xp, axis=-1).real),
dtype="float32")
def irft(xp):
"""
Inverse real fourier transform
Parameters
----------
x : ndarray
Array of NMR data.
Returns
-------
y : ndarray
Array of NMR data with an inverse real Fourier transform applied.
"""
# XXX figure out what exactly this is doing
s = xp.shape[-1]
xp = np.fft.ifft(nmr_reorder(xp)) # re-order, inverse FT
# output results
x = np.zeros(xp.shape, dtype="float32")
# unpack ifft data
x[..., 1:-1:2] = xp[..., 1:s / 2].real
x[..., 2::2] = xp[..., 1:s / 2].imag
x[..., 0] = xp[..., 0].real
x[..., -1] = xp[..., s / 2].real
return x
# Fourier transforms
def fft(data):
"""
Fourier transform, NMR ordering of results.
There are a number of definitions of the discrete Fourier transform
the version used in this function is as follows.
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}
\\right\\}\\qquad k = 0,\\ldots,n-1.
With the inverse DFT in the :py:func:`ifft` function defined as follows.
.. math::
a_m = \\frac{1}{n} \\sum_{k=0}^{n-1} A_k \\exp \\left\\{2\\pi
i{mk\\over n} \\right\\}\\qquad n = 0,\\ldots,n-1.
Two alternative definitions are also supported by nmrglue. One in
which both the sum in the fft and ifft are multiplied by
:math:`\\frac{1}{\\sqrt{n}}` which results in a pair of transforms in
which the total power contained in the the signals before and after the
transforms are equal. This is the type transforms used in the
Rowland NMR Toolkit. This type of transform is performed by the
:py:func:`fft_norm` and :py:func:`ifft_norm` functions.
The second definition changes the sign of the exponent to be positive while
keeping the normalization factors the same. This type of transform is
performed by the NMRPipe processing package and the functions
:py:func:`fft_positive` and :py:func:`ifft_positive`.
All of the Fourier transforms perfromed by nmrglue return results in 'NMR
order', in which the two half of the spectrum have been swapped and
reversed.
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Fourier transform of NMR data in 'NMR order'.
See Also
--------
ifft : Inversion Fourier transform.
fft_norm : Norm (power) conserving Fourier transform.
fft_positive : Forier transform with a positive exponential.
"""
return np.fft.fftshift(np.fft.fft(data, axis=-1).astype(data.dtype), -1)
def fft_norm(data):
"""
Fourier transform, total power preserved, NMR ordering of results
This function is similar to the transform performed by The Rowland NMR
Toolkit's FFT function.
See :py:func:`fft` for documentation of the transformation applied by this
function.
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Fourier transform of NMR data in 'NMR order'.
See Also
--------
ifft_norm : Inversion Fourier transform.
fft : None-norm (power) conserving Fourier transform.
fft_positive : Forier transform with a positive exponential.
"""
return fft(data) / np.sqrt(float(data.shape[-1]))
def fft_positive(data):
"""
Fourier transform with positive exponential, NMR ordering of results
This is similar to the transform performed by NMRPipe's FFT function.
See :py:func:`fft` for documentation of the transformation applied by this
function.
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Fourier transform of NMR data in 'NMR order'.
See Also
--------
ifft_positive : Inversion Fourier transform.
fft_norm : Norm (power) conserving Fourier transform.
fft_positive : Forier transform with a positive exponential.
"""
# a positive exponential is the same as a IFFT, but we need to undo
# the 1/N scaling
s = float(data.shape[-1])
return (np.fft.fftshift(np.fft.ifft(data, axis=-1).astype(data.dtype), -1)
* s)
def ifft(data):
"""
Inverse fourier transform, NMR ordering of results.
See :py:func:`fft` for documentation of the transformation applied by this
function.
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Inverse fourier transform of NMR data in 'NMR order'.
See Also
--------
fft : Fourier transform.
ifft_norm : Norm (power) conserving inverse Fourier transform.
ifft_positive : inverse Forier transform with a positive exponential.
"""
return np.fft.ifft(np.fft.ifftshift(data, -1), axis=-1).astype(data.dtype)
def ifft_norm(data):
"""
Inverse fourier transform, total power preserved, NMR ordering of results
This is similar to the transform performed by the Rowland NMR Toolkit's
IFFT function.
See :py:func:`fft` for documentation of the transformation applied by this
function.
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Inverse fourier transform of NMR data in 'NMR order'.
See Also
--------
fft_norm : Norm (power) conserving Fourier transform.
ifft : Non-norm (power) conserving inverse Fourier transform.
ifft_positive : inverse Forier transform with a positive exponential.
"""
return ifft(data) * np.sqrt(float(data.shape[-1]))
def ifft_positive(data):
"""
Inverse fourier transform with positive exponential, NMR ordered results.
This is similar to the transform performed by NMRPipe's FFT function with
the -inv flag
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Inverse fourier transform of NMR data in 'NMR order'.
See Also
--------
fft_postive : Fourier transform with a positive exponential.
ifft_norm : Norm (power) conserving inverse Fourier transform.
ifft : Inverse Forier transform with a negative exponential.
"""
# a inverse fft with positive exponential in the FFT definition is the
# same as a FFT with negative exponentials, but with a 1/N scaling factor
s = 1.0 / float(data.shape[-1])
return (np.fft.fft(np.fft.ifftshift(data, -1), axis=-1).astype(data.dtype)
* s)
# Hadamard Transform functions
def int2bin(n, digits=8):
"""
Integer to binary string
"""
return "".join([str((n >> y) & 1) for y in range(digits - 1, -1, -1)])
def bin2int(s):
"""
binary string to integer
"""
o = 0
k = len(s) - 1
for i, v in enumerate(s):
o = o + int(v) * | |
<gh_stars>10-100
'''
Jabber Protocol.
'''
from common import pref, netcall
from digsby.loadbalance import DigsbyLoadBalanceManager
from hashlib import sha256
from jabber.JabberProtocol import JabberProtocol
from jabber.threadstream import ThreadStream
from logging import getLogger
from peak.util.imports import lazyModule
from pyxmpp.all import Iq, Presence
from pyxmpp.jabber.client import JabberClient
from pyxmpp.jid import JID
from traceback import print_exc
from util.callbacks import callsback, CallLater
from util.introspect import callany, funcinfo
from util.primitives.funcs import Delegate, do
from util.primitives.mapping import odict
from util.primitives.structures import enum
from util.threads.threadpool2 import threaded
import blobs
import common
import digsby
import hooks
import jabber
import random
import string
import sys
from common.Protocol import StateMixin
callbacks = lazyModule('util.callbacks')
mapping = lazyModule('util.primitives.mapping')
funcs = lazyModule('util.primitives.funcs')
error_handling = lazyModule('util.primitives.error_handling')
hook_util = lazyModule('util.hook_util')
util_threads = lazyModule('util.threads')
log = getLogger('jabber.protocol')
methods_dict = odict(dict(digsby_login="sasl:DIGSBY-SHA256-RSA-CERT-AES"))
log = getLogger('digsbyImportProtocol')
LOAD_BALANCE_ADDRS = [
('login1.digsby.org', 80),
('login2.digsby.org', 80)
]
JABBER_SRVS_FALLBACK = [
'api1.digsby.org',
'api2.digsby.org',
]
class DigsbyProtocol(JabberClient, StateMixin):
states = enum('Disconnected',
'Authenticating',
'Connected')
#layering violations from threadstream.py
do_ssl = False
connect_killed = False
def __init__(self, username, password, server, resource="Digsby"):
host, port = server
alphanum = string.letters + string.digits
resource = resource + "." + "".join(random.choice(alphanum) for _x in xrange(6))
jid = JID(username, "digsby.org", resource)
if isinstance(username, unicode):
username = username.encode('utf8')
if isinstance(password, unicode):
password = <PASSWORD>(password.encode('utf-8')).<PASSWORD>()
jkwargs = dict(jid = jid,
password = password,
keepalive = 45)
if host:
jkwargs.update(server = host)
if port:
jkwargs.update(port = port)
jkwargs.update(auth_methods = ("sasl:DIGSBY-SHA256-RSA-CERT-AES",))
JabberClient.__init__(self, **jkwargs)
StateMixin.__init__(self)
self.stream_class = ThreadStream
@callsback
def Connect(self, callback=None):
self.change_state(self.Statuses.CONNECTING)
self.connect_callback = callback
def conn_attmpt_failed():
log.debug('conn_attmpt_failed')
#nowhere else to go, + report conn fail
self.set_offline(self.Reasons.CONN_FAIL)
callback.error()
self.connect_attempt_failed = conn_attmpt_failed
try:
self.connect()
except Exception as _e:
print_exc()
self.connect_attempt_failed()
else:
self.connect_attempt_succeeded()
# Go find the load balancer; if you cannot, call on_fail
#self.get_login_servers(l.username)
def connect_attempt_succeeded(self):
log.debug('connect_attempt_succeeded')
with self.lock:
self.idle_loop = Delegate()
self.idle_loop += self.idle
self.idle_looper = jabber.IdleLoopTimer(1, self.idle_loop)
def connect_attempt_failed(self):
raise AssertionError('connection attempt cannot fail before it is attempted')
def connected(self):
with self.lock:
if self.state == self.Statuses.CONNECTING:
self.change_state(self.Statuses.AUTHENTICATING)
def disconnected(self, want_try_again = False):
log.debug('disconnected 1')
with self.lock:
log.debug('disconnected 2')
if not want_try_again and not self.want_try_again:
log.debug('disconnected 3')
# assert False
self.change_state(self.Statuses.OFFLINE)
JabberClient.disconnected(self)
def _get_stream(self):
return getattr(self, '_stream', None)
def _set_stream(self, stream):
new = stream
if new is None:
old = self.stream
if old is not None:
netcall(old.close)
for attr in ('process_stream_error', 'state_change', 'owner'):
setattr(old, attr, Null)
self._stream = new
stream = property(_get_stream, _set_stream)
# def auth_failed(self, reason=''):
# if reason:
# self._auth_error_msg = reason
# self.setnotifyif('offline_reason', self.Reasons.BAD_PASSWORD)
# self.Disconnect()
def auth_failed(self, reason=''):
if reason in ('bad-auth', 'not-authorized'):
self._auth_error_msg = reason
self.setnotifyif('offline_reason', self.Reasons.BAD_PASSWORD)
elif reason:
self.error_txt = reason
self.fatal_error()
@callsback
def _reconnect(self, invisible = False,
do_conn_fail = True, callback=None):
log.info('jabber _reconnect!')
getattr(getattr(self, 'idle_looper', None), 'stop', lambda: None)()
#grab next set of connection opts
opts = self._get_next_connection_options()
if opts is None:
return self.fatal_error()
self.change_state(self.Statuses.CONNECTING)
self.server, self.port = opts.pop('server')
#collect $200
threaded(lambda: JabberProtocol.Connect(self, invisible = invisible,
do_conn_fail = do_conn_fail, callback=callback))()
def fatal_error(self):
if getattr(self.stream, '_had_host_mismatch', False) and not self.offline_reason:
log.error('there was a host mismatch, interpreting it as auth error...')
self.setnotifyif('offline_reason', self.Reasons.BAD_PASSWORD) # technically it's a bad username, but auth error regardless
reason = self.offline_reason or self.Reasons.CONN_LOST
log.info('Out of connection options. Changing to %r', reason)
self.set_offline(reason)
return False
def connect(self):
"""Connect to the server and set up the stream.
Set `self.stream` and notify `self.state_changed` when connection
succeeds. Additionally, initialize Disco items and info of the client.
"""
JabberClient.connect(self, register=False)
def stop_idle_looper(self):
idle_looper, self.idle_looper = getattr(self, 'idle_looper', None), None
if idle_looper is not None:
idle_looper.stop()
del self.idle_looper
def stop_timer_loops(self):
self.stop_idle_looper()
def Disconnect(self):
netcall(self._Disconnect)
def _Disconnect(self):
log.debug('logging out %r', self.want_try_again)
with self.lock:
pres = Presence(stanza_type="unavailable",
status='Logged Out')
try:
self.stream.send(pres)
except AttributeError:
pass
self.connect_killed = True
self.disconnect()
try:
self.stop_timer_loops()
except AttributeError:
print_exc()
if getattr(self, 'idle_loop', None) is not None:
del self.idle_loop[:]
if self.interface_providers is not None:
del self.interface_providers[:]
self.interface_providers = None
if self.stream is not None:
self.stream = None
log.debug('1logged out %r', self.want_try_again)
self.offline_reason = None
# self.disconnected()
log.debug('1logged out %r', self.want_try_again)
common.protocol.Disconnect(self)
def session_started(self):
'''
Called when the IM session is successfully started (after all the
neccessery negotiations, authentication and authorizasion).
'''
with self.lock:
self.idle_looper.start()
log.info('session started')
s = self.stream
newstate = self.Statuses.AUTHORIZED
#when this is true, don't try to start a new connection on conn_lost
self.have_connected = True
# set up handlers for supported <iq/> queries
s.set_iq_get_handler("query", "jabber:iq:version", self.get_version)
# set up handlers for <presence/> stanzas
# do(s.set_presence_handler(name, func) for name, func in [
# (None, self.buddies.update_presence),
# ('unavailable', self.buddies.update_presence),
# ('available', self.buddies.update_presence),
# ('subscribe', self.subscription_requested),
# ('subscribed', self.presence_control),
# ('unsubscribe', self.presence_control),
# ('unsubscribed', self.presence_control),
# ])
self.session_started_notify(s)
self.connect_callback.success()
self.change_state(newstate)
log.info('session started done')
def session_started_notify(self, s):
'''
whatever needs to be done after most setup and before the callback/state change happens.
usually that will be plugins doing their own setup.
allows notify to be overridden in subclass
'''
hooks.notify('digsby.jabber.session_started', self, s)
def authorized(self):
log.info('authorized1')
JabberClient.authorized(self) # required!
log.info('authorized2')
def get_version(self,iq):
"""Handler for jabber:iq:version queries.
jabber:iq:version queries are not supported directly by PyXMPP, so the
XML node is accessed directly through the libxml2 API. This should be
used very carefully!"""
iq = iq.make_result_response()
q = iq.new_query("jabber:iq:version")
q.newTextChild( q.ns(), "name", "Digsby Import Client" )
# q.newTextChild( q.ns(), "version", ('%s %s' % (sys.REVISION, sys.TAG)).strip()) # strip because sometimes TAG is ''
if not self.hide_os:
import platform
platform_string = platform.platform()
# for some reason, on my XP box, platform.platform() contains both
# the platform AND release in platform.platform(). On Ubuntu, OS X,
# and I believe older versions of Windows, this does not happen,
# so we need to add the release in all other cases.
if platform_string.find("XP") == -1:
platform_string += " " + platform.release()
q.newTextChild( q.ns(), "os", platform_string )
self.send(iq)
return True
# presense start
def presence_push(self):
pres = Presence()
self.send_presence(pres)
def presence_control(self,*_a, **_k):
'''
Handle subscription control <presence/> stanzas -- acknowledge
them.
'''
return True
# presense end
@callsback
def send_cb(self, query, timeout_duration=None, callback=None):
'''
Given a callback object with callable attributes success, error, and timeout,
sends out a query with response handlers set.
'''
def free_stanza(st):
stream = self.get_stream()
with stream.lock:
if stream.doc_out is not None:
st.free()
else:
if st._error:
st._error.xmlnode = None
st.xmlnode = None
def my_super_callback_success(stanza):
stanza.handler_frees = True
if not isinstance(callback.success, list):
try:
log.info("what is this?, callback was %r", funcinfo(callback.success))
except Exception:
log.error('bad callback.success %r', callback.success)
return free_stanza(stanza)
if len(callback.success) == 0:
return free_stanza(stanza)
try:
f = callback.success[0].cb
_call_free = CallLater(lambda:free_stanza(stanza))
def my_hyper_callback_success(st):
try: f(st)
except Exception:
log.error('error processing %r', f)
print_exc()
finally: _call_free()
callback.success[0].cb = my_hyper_callback_success
callany(callback.success, stanza)
except Exception:
print_exc()
log.error('failed to set up success stanza.free for %r, %r', callback, stanza)
def my_super_callback_error(stanza):
stanza.handler_frees = True
if not isinstance(callback.error, list):
try:
log.info("what is this?, callback was %r", funcinfo(callback.error))
except Exception:
log.error('bad callback.error %r', callback.error)
return free_stanza(stanza)
if len(callback.error) == 0:
return free_stanza(stanza)
try:
f = callback.error[0].cb
_call_free = CallLater(lambda:free_stanza(stanza))
def my_hyper_callback_error(st):
try: f(st)
except Exception:
log.error('error processing %r', f)
print_exc()
finally: _call_free()
callback.error[0].cb = my_hyper_callback_error
callany(callback.error, stanza)
except Exception:
print_exc()
log.error('failed to set up error stanza.free for %r, %r', callback, stanza)
def my_super_callback_timeout(*_a):
'''
consume the arguments, they are from ExpiringDictionary and cause problems
with the number of arguments taken by timeout function,
which in this case is expected to be 0, since we can store the context in it's closure
and hardly anything even uses timeouts.
'''
return callback.timeout()
s = self.get_stream()
if s is not None:
try:
if timeout_duration is not None:
self.stream.set_response_handlers(query, my_super_callback_success,
my_super_callback_error,
my_super_callback_timeout,
timeout = timeout_duration)
else:
self.stream.set_response_handlers(query, my_super_callback_success,
my_super_callback_error,
my_super_callback_timeout)
except Exception as _e:
print_exc()
log.critical("couln't set stream handlers")
return callany(callback.error)
try:
self.stream.send(query)
except Exception as _e:
print_exc()
log.critical("couln't send query")
try:
return callany(callback.error)
except Exception:
log.critical("couln't call callany(callback.error) %r", callback)
def send_presence(self, pres):
assert isinstance(pres, Presence)
self.send(pres)
def send_iq(self, iq):
assert isinstance(iq, Iq)
self.send(iq)
def send(self, stanza):
s = self.get_stream()
if s is not None:
try:
self.stream.send(stanza)
except Exception as _e:
print_exc()
log.critical("couln't send stanza")
def idle(self):
try:
JabberClient.idle(self)
if not self.stream.socket or self.stream.eof:
raise AssertionError, "if the stream is dead or gone, we can't really send a keep-alive"
except Exception:
self.stop_timer_loops()
else:
self.cache.tick()
def stream_error(self, err):
if err.get_condition().name == "pwchanged":
self.change_reason(self.Reasons.BAD_PASSWORD)
self.profile.signoff(kicked=True)
elif err.get_condition().name == "conflict":
self.change_reason(self.Reasons.OTHER_USER)
else:
self.change_reason(self.Reasons.CONN_LOST)
JabberClient.stream_error(self, err)
class Statuses(jabber.protocol.Statuses):
SYNC_PREFS = _('Synchronizing Preferences...')
#LOAD_SKIN = _('Loading Skin...')
AUTHORIZED = _('Synchronizing...')
@callsback
def get_blob_raw(self, elem_name, tstamp='0', callback=None):
try:
blob = blobs.name_to_obj[elem_name](tstamp)
blob._data = None
| |
<reponame>JetBrains-Research/pubtrends
import logging
import numpy as np
import pandas as pd
from networkx.readwrite import json_graph
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from pysrc.papers.analysis.citations import find_top_cited_papers, find_max_gain_papers, \
find_max_relative_gain_papers, build_cit_stats_df, merge_citation_stats, build_cocit_grouped_df
from pysrc.papers.analysis.evolution import topic_evolution_analysis, topic_evolution_descriptions
from pysrc.papers.analysis.graph import build_papers_graph, \
to_weighted_graph, sparse_graph
from pysrc.papers.analysis.metadata import popular_authors, popular_journals
from pysrc.papers.analysis.node2vec import node2vec
from pysrc.papers.analysis.numbers import extract_numbers
from pysrc.papers.analysis.text import texts_embeddings, vectorize_corpus, tokens_embeddings
from pysrc.papers.analysis.topics import get_topics_description, cluster_and_sort
from pysrc.papers.db.loaders import Loaders
from pysrc.papers.db.search_error import SearchError
from pysrc.papers.progress import Progress
from pysrc.papers.utils import SORT_MOST_CITED
logger = logging.getLogger(__name__)
class PapersAnalyzer:
TOP_CITED_PAPERS = 50
# Features are originally taken from paper:
# 1) Which type of citation analysis generates the most accurate taxonomy of
# scientific and technical knowledge? (https://arxiv.org/pdf/1511.05078.pdf)
# ...bibliographic coupling (BC) was the most accurate, followed by co-citation (CC).
# Direct citation (DC) was a distant third among the three...
#
# Coefficients were estimated in the paper: https://dl.acm.org/doi/10.1145/3459930.3469501
# Poster: https://drive.google.com/file/d/1SeqJtJtaHSO6YihG2905boOEYL1NiSP1/view
# See for details: https://github.com/JetBrains-Research/pubtrends-nature-reviews
SIMILARITY_BIBLIOGRAPHIC_COUPLING = 3 # Limited by number of references, applied to log
SIMILARITY_COCITATION = 3 # Limiter by number of co-citations, applied to log
SIMILARITY_CITATION = 1 # Limited by 1 citation
# Minimal number of common references, used to reduces papers graph edges count
# Value > 1 is especially useful while analysing single paper, removes meaningless connections by construction
SIMILARITY_BIBLIOGRAPHIC_COUPLING_MIN = 1
# Minimal number of common references, used to reduces papers graph edges count
SIMILARITY_COCITATION_MIN = 1
# Papers embeddings is a concatenation of graph and text embeddings times corresponding factors
# Graph embeddings produce more clear topics separation, so it goes with bigger coefficient
GRAPH_EMBEDDINGS_FACTOR = 5
TEXT_EMBEDDINGS_FACTOR = 1
# Reduce number of edges in papers graph
PAPERS_GRAPH_EDGES_TO_NODES = 5
# Global vectorization max vocabulary size
VECTOR_WORDS = 10000
# Terms with lower frequency will be ignored, remove rare words
VECTOR_MIN_DF = 0.001
# Terms with higher frequency will be ignored, remove abundant words
VECTOR_MAX_DF = 0.8
PCA_COMPONENTS = 30
# Configure number and size of topics
TOPICS_NUMBER_SMALL = dict(max_number=10, min_size=50)
TOPICS_NUMBER_MEDIUM = dict(max_number=20, min_size=20)
TOPICS_NUMBER_LARGE = dict(max_number=50, min_size=10)
@staticmethod
def get_topics_info(topics):
if topics.lower() == 'small':
info = PapersAnalyzer.TOPICS_NUMBER_SMALL
elif topics.lower() == 'medium':
info = PapersAnalyzer.TOPICS_NUMBER_MEDIUM
elif topics.lower() == 'large':
info = PapersAnalyzer.TOPICS_NUMBER_LARGE
else:
raise Exception(f'Unknown topics size: {topics}')
return info['max_number'], info['min_size']
# Number of top cited papers in topic picked for description computation
TOPIC_MOST_CITED_PAPERS = 50
# Number of words for topic description
TOPIC_DESCRIPTION_WORDS = 10
POPULAR_JOURNALS = 50
POPULAR_AUTHORS = 50
# Expand limit by references before filtration by citations and keywords
EXPAND_LIMIT = 5000
# Control citations count
EXPAND_CITATIONS_Q_LOW = 5
EXPAND_CITATIONS_Q_HIGH = 95
EXPAND_CITATIONS_SIGMA = 3
# Take up to fraction of top similarity
EXPAND_SIMILARITY_THRESHOLD = 0.5
# Impact of single paper when analyzing citations and mesh terms when analysing paper
SINGLE_PAPER_IMPACT = 20
EVOLUTION_MIN_PAPERS = 100
EVOLUTION_STEP = 10
def __init__(self, loader, config, test=False):
self.config = config
self.progress = Progress(self.total_steps())
self.loader = loader
self.source = Loaders.source(self.loader, test)
def total_steps(self):
return 14 + 1 # One extra step for visualization
def teardown(self):
self.progress.remove_handler()
def search_terms(self, query, limit=None, sort=None, noreviews=True, task=None):
# Search articles relevant to the terms
if len(query) == 0:
raise SearchError('Empty search string, please use search terms or '
'all the query wrapped in "" for phrasal search')
limit = limit or self.config.max_number_of_articles
sort = sort or SORT_MOST_CITED
noreviews_msg = ", not reviews" if noreviews else ""
self.progress.info(f'Searching {limit} {sort.lower()} publications matching {query}{noreviews_msg}',
current=1, task=task)
ids = self.loader.search(query, limit=limit, sort=sort, noreviews=noreviews)
if len(ids) == 0:
raise SearchError(f"Nothing found for search query: {query}")
else:
self.progress.info(f'Found {len(ids)} publications in the database', current=1, task=task)
return ids
def analyze_papers(self, ids, query, topics='medium', test=False, task=None):
self.progress.info('Loading publication data', current=2, task=task)
self.query = query
self.df = self.loader.load_publications(ids)
if len(self.df) == 0:
raise SearchError(f'Nothing found for ids: {ids}')
else:
self.progress.info(f'Found {len(self.df)} papers in database', current=2, task=task)
ids = list(self.df['id']) # Limit ids to existing papers only!
self.pub_types = list(set(self.df['type']))
self.progress.info('Analyzing title and abstract texts', current=3, task=task)
self.corpus, self.corpus_tokens, self.corpus_counts = vectorize_corpus(
self.df,
max_features=PapersAnalyzer.VECTOR_WORDS,
min_df=PapersAnalyzer.VECTOR_MIN_DF,
max_df=PapersAnalyzer.VECTOR_MAX_DF,
test=test
)
if PapersAnalyzer.TEXT_EMBEDDINGS_FACTOR != 0:
logger.debug('Analyzing tokens embeddings')
self.corpus_tokens_embedding = tokens_embeddings(
self.corpus, self.corpus_tokens, test=test
)
logger.debug('Analyzing texts embeddings')
self.texts_embeddings = texts_embeddings(
self.corpus_counts, self.corpus_tokens_embedding
)
else:
self.texts_embeddings = np.zeros(shape=(len(self.df), 0))
self.progress.info('Loading citations for papers', current=4, task=task)
logger.debug('Loading citations by year statistics')
cits_by_year_df = self.loader.load_citations_by_year(ids)
logger.debug(f'Found {len(cits_by_year_df)} records of citations by year')
self.cit_stats_df = build_cit_stats_df(cits_by_year_df, len(ids))
if len(self.cit_stats_df) == 0:
logger.warning('No citations of papers were found')
self.df, self.citation_years = merge_citation_stats(self.df, self.cit_stats_df)
logger.debug('Loading citations information')
self.cit_df = self.loader.load_citations(ids)
logger.debug(f'Found {len(self.cit_df)} citations between papers')
self.progress.info('Calculating co-citations for selected papers', current=5, task=task)
self.cocit_df = self.loader.load_cocitations(ids)
cocit_grouped_df = build_cocit_grouped_df(self.cocit_df)
logger.debug(f'Found {len(cocit_grouped_df)} co-cited pairs of papers')
self.cocit_grouped_df = cocit_grouped_df[cocit_grouped_df['total'] >= self.SIMILARITY_COCITATION_MIN].copy()
logger.debug(f'Filtered {len(self.cocit_grouped_df)} co-cited pairs of papers, '
f'threshold {self.SIMILARITY_COCITATION_MIN}')
self.progress.info('Processing bibliographic coupling for selected papers', current=6, task=task)
bibliographic_coupling_df = self.loader.load_bibliographic_coupling(ids)
logger.debug(f'Found {len(bibliographic_coupling_df)} bibliographic coupling pairs of papers')
self.bibliographic_coupling_df = bibliographic_coupling_df[
bibliographic_coupling_df['total'] >= self.SIMILARITY_BIBLIOGRAPHIC_COUPLING_MIN].copy()
logger.debug(f'Filtered {len(self.bibliographic_coupling_df)} bibliographic coupling pairs of papers '
f'threshold {self.SIMILARITY_BIBLIOGRAPHIC_COUPLING_MIN}')
self.progress.info('Analyzing papers graph', current=7, task=task)
self.papers_graph = build_papers_graph(
self.df, self.cit_df, self.cocit_grouped_df, self.bibliographic_coupling_df,
)
self.progress.info(f'Built papers graph - {self.papers_graph.number_of_nodes()} nodes and '
f'{self.papers_graph.number_of_edges()} edges', current=7, task=task)
logger.debug('Analyzing papers graph embeddings')
self.weighted_similarity_graph = to_weighted_graph(self.papers_graph, PapersAnalyzer.similarity)
logger.debug('Prepare sparse graph for visualization')
self.sparse_papers_graph = self.prepare_sparse_papers_graph(self.papers_graph, self.weighted_similarity_graph)
if PapersAnalyzer.GRAPH_EMBEDDINGS_FACTOR != 0:
gs = sparse_graph(self.weighted_similarity_graph)
self.graph_embeddings = node2vec(self.df['id'], gs)
else:
self.graph_embeddings = np.zeros(shape=(len(self.df), 0))
logger.debug('Computing aggregated graph and text embeddings for papers')
papers_embeddings = np.concatenate(
(self.graph_embeddings * PapersAnalyzer.GRAPH_EMBEDDINGS_FACTOR,
self.texts_embeddings * PapersAnalyzer.TEXT_EMBEDDINGS_FACTOR), axis=1)
if len(self.df) > 1:
logger.debug('Computing PCA projection')
pca = PCA(n_components=min(len(papers_embeddings), PapersAnalyzer.PCA_COMPONENTS))
t = StandardScaler().fit_transform(papers_embeddings)
self.pca_coords = pca.fit_transform(t)
logger.debug(f'Explained variation {int(np.sum(pca.explained_variance_ratio_) * 100)}%')
logger.debug('Apply TSNE transformation on papers PCA coords')
tsne_embeddings_2d = TSNE(n_components=2, random_state=42).fit_transform(self.pca_coords)
self.df['x'] = tsne_embeddings_2d[:, 0]
self.df['y'] = tsne_embeddings_2d[:, 1]
else:
self.pca_coords = np.zeros(shape=(len(self.df), 128))
self.df['x'] = 0
self.df['y'] = 0
self.progress.info(f'Extracting {topics.lower()} number of topics from papers text and graph similarity',
current=8, task=task)
topics_max_number, topic_min_size = PapersAnalyzer.get_topics_info(topics)
if self.papers_graph.number_of_nodes() <= topic_min_size:
logger.debug('Small graph - single topic')
self.clusters, self.dendrogram = [0] * len(self.df), None
self.df['comp'] = self.clusters
else:
logger.debug('Extracting topics from papers embeddings')
self.clusters, self.dendrogram = cluster_and_sort(self.pca_coords, topics_max_number, topic_min_size)
self.df['comp'] = self.clusters
self.progress.info(f'Analyzing {len(set(self.df["comp"]))} topics descriptions',
current=9, task=task)
comp_pids = self.df[['id', 'comp']].groupby('comp')['id'].apply(list).to_dict()
self.topics_description = get_topics_description(
self.df, comp_pids,
self.corpus, self.corpus_tokens, self.corpus_counts,
n_words=self.TOPIC_DESCRIPTION_WORDS
)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs[:self.TOPIC_DESCRIPTION_WORDS]]))
for comp, vs in self.topics_description.items()]
self.kwd_df = pd.DataFrame(kwds, columns=['comp', 'kwd'])
self.progress.info('Identifying top cited papers', current=10, task=task)
logger.debug('Top cited papers')
self.top_cited_papers, self.top_cited_df = find_top_cited_papers(self.df, self.TOP_CITED_PAPERS)
logger.debug('Top cited papers per year')
self.max_gain_papers, self.max_gain_df = find_max_gain_papers(self.df, self.citation_years)
logger.debug('Hot paper per year')
self.max_rel_gain_papers, self.max_rel_gain_df = find_max_relative_gain_papers(
self.df, self.citation_years
)
# Additional analysis steps
if self.config.feature_authors_enabled:
self.progress.info("Analyzing authors and groups", current=11, task=task)
self.author_stats = popular_authors(self.df, n=self.POPULAR_AUTHORS)
if self.config.feature_journals_enabled:
self.progress.info("Analyzing popular journals", current=12, task=task)
self.journal_stats = popular_journals(self.df, n=self.POPULAR_JOURNALS)
if self.config.feature_numbers_enabled:
if len(self.df) >= 0:
self.progress.info('Extracting quantitative features from abstracts texts', current=13, task=task)
self.numbers_df = extract_numbers(self.df)
else:
logger.debug('Not enough papers for numbers extraction')
if self.config.feature_evolution_enabled:
if len(self.df) >= PapersAnalyzer.EVOLUTION_MIN_PAPERS:
self.progress.info(f'Analyzing evolution of topics {self.df["year"].min()} - {self.df["year"].max()}',
current=14, task=task)
logger.debug('Perform topic evolution analysis and get topic descriptions')
self.evolution_df, self.evolution_year_range = topic_evolution_analysis(
self.df,
self.cit_df,
self.cocit_grouped_df,
self.bibliographic_coupling_df,
self.SIMILARITY_COCITATION_MIN,
self.similarity,
self.corpus_counts,
self.corpus_tokens_embedding,
PapersAnalyzer.GRAPH_EMBEDDINGS_FACTOR,
PapersAnalyzer.TEXT_EMBEDDINGS_FACTOR,
topics_max_number,
topic_min_size,
self.EVOLUTION_STEP
)
self.evolution_kwds = topic_evolution_descriptions(
self.df, self.evolution_df, self.evolution_year_range,
self.corpus, self.corpus_tokens, self.corpus_counts, self.TOPIC_DESCRIPTION_WORDS,
self.progress, current=14, task=task
)
else:
logger.debug('Not enough papers for topics evolution')
self.evolution_df = None
self.evolution_kwds = None
@staticmethod
def prepare_sparse_papers_graph(papers_graph, weighted_similarity_graph):
sparse_papers_graph = sparse_graph(weighted_similarity_graph, PapersAnalyzer.PAPERS_GRAPH_EDGES_TO_NODES)
for i, j in sparse_papers_graph.edges():
d = papers_graph.get_edge_data(i, j)
for k, v in d.items():
sparse_papers_graph[i][j][k] = v
sparse_papers_graph[i][j]['similarity'] = PapersAnalyzer.similarity(d)
return sparse_papers_graph
@staticmethod
def similarity(d):
return \
PapersAnalyzer.SIMILARITY_BIBLIOGRAPHIC_COUPLING * np.log1p(d.get('bibcoupling', 0)) + \
PapersAnalyzer.SIMILARITY_COCITATION * np.log1p(d.get('cocitation', 0)) + \
PapersAnalyzer.SIMILARITY_CITATION * d.get('citation', 0)
def dump(self):
"""
Dump valuable fields to JSON-serializable dict. Use 'load' to restore analyzer.
"""
return dict(
df=self.df.to_json(),
cit_df=self.cit_df.to_json(),
cocit_grouped_df=self.cocit_grouped_df.to_json(),
bibliographic_coupling_df=self.bibliographic_coupling_df.to_json(),
topics_description=self.topics_description,
kwd_df=self.kwd_df.to_json(),
pca_coords=self.pca_coords.tolist(),
sparse_papers_graph=json_graph.node_link_data(self.sparse_papers_graph),
top_cited_papers=self.top_cited_papers,
max_gain_papers=self.max_gain_papers,
max_rel_gain_papers=self.max_rel_gain_papers
)
@staticmethod
def load(fields):
"""
Load valuable fields from JSON-serializable dict. Use 'dump' to dump analyzer.
"""
# Restore main dataframe
df = pd.read_json(fields['df'])
df['id'] = df['id'].apply(str)
mapping = {}
for col in df.columns:
try:
mapping[col] = int(col)
except ValueError:
| |
#!/usr/bin/env python
"""The classes in this module aid the specification of tally `units` in
:py:class:`pyne.simplesim.cards.ICellSurfTally` (see examples below). The class
focuses on the ability to specify cells and surfaces that are nested within
universes or other cells in nearly arbitrarily complex ways. Apart from making
user input clearler, this class has methods that help with creating cell
comments and mcnp strings.
The names of classes here are fairly short, so the user may not want to import
the entire namespace. A suggested way to import the module is::
import .simplesim.nestedgeom as ng
An inheritance diagram of all the classes in this module can be found at
:ref:`pyne_simplesim_inheritance`.
Usage Examples
--------------
The subclasses of :py:class:`ICellSurfTally` can take as input any subclass of
:py:class:`IUnit`. The following shows different ways in which complex units
can be built. Here it is assumed that there is a cell named '1' in the system,
a surface named '1' in the system, a universe named '1' in the system, etc. The
unions represent averaging or totaling the units, depending on the type of
tally.
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|category |description |nestedgeom |mcnp |
+===========+=============================================================+============================================================+=======================+
|basic |surface 1 (s1) |``s1 = Surf('1')`` |1 |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |cell 1 (c1) |``c1 = Cell('1')`` |1 |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |universe 1 (u1) |``u1 = Univ('1')`` |U=1 |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|union |union of s1 and s2 |``Union(s1, s2)`` |(1 2) |
| | +------------------------------------------------------------+ |
| | |``(s1 | s2)`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |union of c1, c2, and c3 |``Union(c1, c2, c3)`` |(1 2 3) |
| | +------------------------------------------------------------+ |
| | |``(c1 | c2 | c3)`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |union of cells in u1 |``Union(u1)`` |(U=1) |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|nesting |s1 in filled cell 2 (fc2) |``fc2 = FCell('2'): s1 < fc2`` |(1 < 1) |
| | +------------------------------------------------------------+ |
| | |``s1.of(fc2)`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |c1 in fc3 in fc3 |``c1 < fc2 < fc3`` |(1 < 2 < 3) |
| | +------------------------------------------------------------+ |
| | |``c1.of( fc2.of(fc3) )`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in union of fc2 and fc3 |``s1 < Union(fc2, fc3)`` |(1 < (2 3)) |
| | +------------------------------------------------------------+ |
| | |``s1.of( Union(fc2, fc3) )`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (union of fc2 and fc3) in fc4 |``s1 < Union(fc2, fc3) < fc4`` |(1 < (2 3) < 4) |
| | +------------------------------------------------------------+ |
| | |``s1.of( Union(fc2, fc3).of(fc4) )`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in u1 in fc4 |``s1 < u1 < fc4`` |(1 < U=1 < 4) |
| | +------------------------------------------------------------+ |
| | |``s1.of( u1.of(fc4) )`` | |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|vectorized |(over s1 and s2) in fc2 |``ng.Vec(s1, s2) < fc2`` |(1 2 < 2) |
| | +------------------------------------------------------------+ |
| | |``ng.Vec(s1, s2).of(fc2)`` | |
| | +------------------------------------------------------------+ |
| | |``(s1 & s2) < fc2`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (over fc2 and fc3) |``s1 < ng.Vec(fc2, fc3)`` |(1 < 2 3) |
| | +------------------------------------------------------------+ |
| | |``s1 < (fc2 & fc3)`` | |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|lattice |s1 in (union of fc2 and (fc3 in lattice elem 5)) in fc4 |``la = Lin(5): s1 < (fc2 | FCell('3', la)) < fc4`` |(1 < (2 3[5]) < 4 |
| | +------------------------------------------------------------+ |
| | |``s1 < (fc2 | fc3.lat(la)) < fc4`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (fc2 in lattice x range 0:1, y range 0:2, z range 0:3) |``s1 < fc2.lat(Rng([0,1], [0,2], [0,3])`` |(1 < 2[0:1 0:2 0:3]) |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (fc2 in lattice coord (0, 1, 2)) |``s1 < fc2.lat(Cor([0, 1, 2]))`` |(1 < 2[0 1 2]) |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (fc2 in lattice coords (0, 1, 2), (3, 2, 1)) |``s1 < fc2.lat( Cor([ [0, 1, 2], [3, 2, 1]]) )`` |(1 < 2[0 1 2, 3 2 1]) |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
"""
from warnings import warn
from pyne.utils import VnVWarning
warn(__name__ + " is not yet V&V compliant.", VnVWarning)
class IUnit(object):
"""Abstract base class for tally units. The user does not use this class
directly.
"""
# TODO need to define a 'recursive' __repr__.
def __init__(self, up=None, down=None):
"""Currently, the two keyword arguments are not actually used, but are
sometimes set directly, after initialization.
"""
self.up = up
self.down = down
def __lt__(self, next_level_up):
"""The user can use the < operator to perform the operation of
:py:meth:`of`, e.g. (cells/surfs) < (cells/univ)
"""
return self.of(next_level_up)
def of(self, next_level_up):
"""Returns a unit where ``self`` must be in ``next_level_up`` to be
tallied. ``self`` must be a cell or surface, or `vector` or `union`
thereof (cell/surf), and
``next_level_up`` can be a cell or universe, or union thereof
(cells/univ).
"""
self.up = next_level_up
self.up.down = self
if self.down: return self.down
else: return self
def __or__(self, right):
"""A convenient way to call :py:meth:`union`."""
return self.union(right)
def union(self, right):
"""Returns a :py:class:`Union` of ``self`` with ``right``."""
if isinstance(self, Union):
self.brothers += [right]
return self
else:
return Union(self, right)
def __and__(self, right):
"""A convenient way to call :py:meth:`vector`."""
return self.vector(right)
def vector(self, right):
"""Returns a :py:class:`Vec` of ``self`` with ``right``."""
if isinstance(self, Vec):
self.sisters += [right]
return self
else:
return Vec(self, right)
def comment(self, inner):
return "{0}{1}{2}{3}".format(
" (" if self.up and not self.down else "",
inner,
(" in" + self.up.comment()) if self.up else "",
")" if self.down and not self.up else "")
def mcnp(self, float_format, sim, inner):
return "{0}{1}{2}{3}".format(
" (" if self.up and not self.down else "",
inner,
(" <" + self.up.mcnp(float_format, sim)) if self.up else "",
")" if self.down and not self.up else "")
class ICellSurf(IUnit):
"""Abstract base class for surfaces and cells in the lowest level of the
nested geometry. The user directly uses the subclasses of this class. For
cells in higher levels of nesting (e.g. closer to the real world), see
:py:class:`FCell`.
"""
def __init__(self, name):
"""
Parameters
----------
name : str
Name of the surface or cell. Depending on the subclass, the name is
looked up appropriately in the system definition to obtain the
surface or cell number.
"""
super(ICellSurf, self).__init__()
self.name = name
class Surf(ICellSurf):
"""The user uses this class directly to reference a surface in the system
definition for tallying.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Surf
"""
def comment(self):
return super(Surf, self).comment(" surf {0!r}".format(self.name))
def mcnp(self, float_format, sim):
return super(Surf, self).mcnp(float_format, sim,
" {0}".format(sim.sys.surface_num(self.name)))
class Cell(ICellSurf):
"""The user uses this class directly to reference a cell in the system
definition for tallying.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Cell
"""
def comment(self, inner=None):
return super(Cell, self).comment(" cell {0!r}{1}".format(self.name,
inner if inner else ""))
def mcnp(self, float_format, sim, inner=None):
return super(Cell, self).mcnp(float_format, sim,
" {0}{1}".format(sim.sys.cell_num(self.name),
inner if inner else ""))
class FCell(Cell):
"""This is subclassed from :py:class:`Cell`. Its name stands for filled
cell. It is to be used for higher-level cells (closer to the real world),
and has an additional attribute to specify specific lattice elements from
this cell if it is a lattice cell.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.FCell
"""
def __init__(self, name, lat_spec=None):
"""
Parameters
----------
name : str
See :py:class:`ICellSurf`.
lat_sec : subclass of :py:class:`LatticeSpec`
"""
super(FCell, self).__init__(name)
self.lat_spec = lat_spec
def lat(self, lat_spec):
self.lat_spec = lat_spec
return self
def comment(self):
return super(FCell, self).comment(
self.lat_spec.comment() if self.lat_spec else "")
def mcnp(self, float_format, sim):
return super(FCell, self).mcnp(float_format, sim,
self.lat_spec.mcnp(float_format, sim) if self.lat_spec else "")
class Univ(IUnit):
"""A universe. It is used in higher levels of nesting (closer to the real
world). The class, given the name of a universe in the system, looks up the
appropriate universe number.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Univ
"""
def __init__(self, name):
"""
Parameters
----------
name : str
Name of the universe in the system definition.
"""
super(Univ, self).__init__()
self.name = name
def comment(self):
return super(Univ, self).comment(" univ {0!r}".format(self.name))
def mcnp(self, float_format, sim):
return super(Univ, self).mcnp(float_format, sim,
" U={0}".format(sim.sys.universe_num(self.name)))
class Union(IUnit):
"""A union of surfaces, cells, or of a single universe.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Union
"""
def __init__(self, *args):
"""
Parameters
-----------
*args : list of instances of :py:class:`IUnit` subclasses.
"""
# all args must be a instance of Unit or its subclasses.
self.brothers = args
super(Union, self).__init__()
def comment(self):
string = ""
counter = 0
for bro in self.brothers:
counter += 1
string += bro.comment()
if counter < len(self.brothers): string += ","
return super(Union, self).comment(" union of ({0})".format(string))
def mcnp(self, float_format, sim):
string = ""
for bro in self.brothers:
string | |
genes for the junctions that aren't in our
# database. We can associate each start and end with a gene and use this.
start_gene = dict(zip(extDF['chrom:start'], extDF.gene))
end_gene = dict(zip(extDF['chrom:end'], extDF.gene))
genes = []
for ind in annotDF[annotDF.ext_annotated == False].index:
cur_start = annotDF.ix[ind,'chrom:start']
cur_end = annotDF.ix[ind,'chrom:end']
gene = start_gene.get(cur_start, '')
if gene == '':
gene = end_gene.get(cur_end, '')
genes.append(gene)
annotDF.ix[annotDF.ext_annotated == False, 'gene_id'] = genes
# We can use the genes to assign strand to the novel splice junctions.
strandSE = pd.Series(dict(zip(extDF.gene,extDF.strand)))
ind = annotDF[annotDF.ext_annotated == False].index
annotDF.ix[ind, 'strand'] = strandSE[annotDF.ix[ind, 'gene_id']].values
# And re-index with the strand info.
annotDF.index = [ x + ':' + annotDF.ix[x,'strand'] for x in annotDF.index ]
# Now we'll add donor and acceptor info.
pos_ind = annotDF[annotDF.strand == '+'].index
neg_ind = annotDF[annotDF.strand == '-'].index
annotDF.ix[pos_ind, 'donor'] = (annotDF.ix[pos_ind, 'chrom'] + ':' +
annotDF.ix[pos_ind, 'start'].astype(str) +
':' + annotDF.ix[pos_ind, 'strand'])
annotDF.ix[pos_ind, 'acceptor'] = (annotDF.ix[pos_ind, 'chrom'] + ':' +
annotDF.ix[pos_ind, 'end'].astype(str)
+ ':' + annotDF.ix[pos_ind, 'strand'])
annotDF.ix[neg_ind, 'acceptor'] = (annotDF.ix[neg_ind, 'chrom'] + ':' +
annotDF.ix[neg_ind, 'start'].astype(str)
+ ':' + annotDF.ix[neg_ind, 'strand'])
annotDF.ix[neg_ind, 'donor'] = (annotDF.ix[neg_ind, 'chrom'] + ':' +
annotDF.ix[neg_ind, 'end'].astype(str) +
':' + annotDF.ix[neg_ind, 'strand'])
# And whether the donor or acceptor is in the external database or not.
ext_donor = pd.DataFrame(False, index=list(set(extDF.donor)),
columns=['novel_donor'])
ext_acceptor = pd.DataFrame(False, index=list(set(extDF.acceptor)),
columns=['novel_acceptor'])
annotDF = annotDF.merge(ext_donor, left_on='donor', right_index=True,
how='left')
annotDF.ix[annotDF.novel_donor.isnull(), 'novel_donor'] = True
annotDF = annotDF.merge(ext_acceptor, left_on='acceptor', right_index=True,
how='left')
annotDF.ix[annotDF.novel_acceptor.isnull(), 'novel_acceptor'] = True
# ext_donorS = frozenset(extDF.donor)
# ext_acceptorS = frozenset(extDF.acceptor)
# annotDF['novel_donor'] = False
# annotDF['novel_acceptor'] = False
# ind = annotDF[annotDF.ext_annotated == False].index
# if len(ind) > 0:
# novel_donor = []
# novel_acceptor = []
# for i in ind:
# novel_donor.append(annotDF.ix[i, 'donor'] not in ext_donorS)
# novel_acceptor.append(annotDF.ix[i, 'acceptor'] not in ext_acceptorS)
# annotDF.ix[ind, 'novel_donor'] = novel_donor
# annotDF.ix[ind, 'novel_acceptor'] = novel_acceptor
# Sort by gene ID and start/end.
annotDF = annotDF.sort_values(by=['gene_id', 'start', 'end'])
# Make file with counts for the junctions we are interested in.
L = [ juncRE.match(x).group().strip(':') for x in annotDF.index ]
countDF = sj_outP.ix[:, L, 'unique_junction_reads']
countDF.index = annotDF.index
stats = []
t = extDF.shape[0]
stats.append('Junction filtering stats')
stats.append(('Number of junctions in external '
'annotation\t{0:,}').format(t))
t = annotDF.annotated.sum()
stats.append(('Number observed junctions in STAR '
'annotation\t{0:,}').format(t))
t = annotDF.ext_annotated.sum()
stats.append(('Number observed junctions in external '
'annotation\t{0:,}').format(t))
t = annotDF.shape[0] - annotDF.ext_annotated.sum()
stats.append(('Number of observed junctions not in external '
'annotation\t{0:,}').format(t))
t = annotDF.ix[annotDF.ext_annotated == False,'annotated'].sum()
stats.append(('Number of observed junctions not in external annotation but '
'in STAR annotation\t{0:,}').format(t))
t = sum(annotDF.ix[annotDF.ext_annotated == False,'annotated'].values == 0)
stats.append(('Number of observed junctions not in external annotation and '
'not in STAR annotation\t{0:,}').format(t))
t = len(set(annotDF.ix[annotDF.novel_donor, 'donor']))
stats.append(('Number of novel donors\t{0:,}').format(t))
t = annotDF.novel_donor.sum()
stats.append(('Number of novel junctions with novel '
'donors\t{0:,}').format(t))
t = len(set(annotDF.ix[annotDF.novel_acceptor, 'acceptor']))
stats.append(('Number of novel acceptors\t{0:,}').format(t))
t = annotDF.novel_acceptor.sum()
stats.append(('Number of novel junctions with novel '
'acceptors\t{0:,}').format(t))
t = (annotDF[annotDF.ext_annotated == False].shape[0] -
sum(annotDF.novel_donor) -
sum(annotDF.novel_acceptor))
stats.append(('Number of novel junctions with new combination of donor and '
'acceptor\t{0:,}').format(t))
return countDF, annotDF, stats
def combine_sj_out(
fns,
external_db,
total_jxn_cov_cutoff=20,
define_sample_name=None,
verbose=False,
):
"""Combine SJ.out.tab files from STAR by filtering based on coverage and
comparing to an external annotation to discover novel junctions.
Parameters
----------
fns : list of strings
Filenames of SJ.out.tab files to combine.
external_db : str
Filename of splice junction information from external database. The file
should have a header and contained the following columns 'gene',
'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor',
'acceptor', 'intron'.
total_jxn_cov_cutoff : int
Discard junctions with less than this many reads summed over all
samples.
define_sample_name : function
A function mapping the SJ.out.tab filenames to sample names.
Returns
-------
countDF : pandas.DataFrame
Number of unique junction spanning reads for each junction that passed
filtering criteria.
annotDF : pandas.DataFrame
Annotation information for junctions that passed filtering criteria.
stats : list of strings
Human readable statistics.
"""
if verbose:
import sys
# I'll start by figuring out which junctions we will keep.
counts = _total_jxn_counts(fns)
jxns = set(counts[counts >= total_jxn_cov_cutoff].index)
if verbose:
sys.stderr.write('Counting done\n')
stats = []
sj_outD = _make_sj_out_dict(fns, jxns=jxns,
define_sample_name=define_sample_name)
stats.append('Number of junctions in SJ.out file per sample')
for k in sj_outD.keys():
stats.append('{0}\t{1:,}'.format(k, sj_outD[k].shape[0]))
stats.append('')
if verbose:
sys.stderr.write('Dict done\n')
sj_outP, annotDF = _make_sj_out_panel(sj_outD, total_jxn_cov_cutoff)
stats.append('SJ.out panel size\t{0}'.format(sj_outP.shape))
stats.append('')
if verbose:
sys.stderr.write('Panel done\n')
extDF, ext_stats = read_external_annotation(external_db)
stats += ext_stats
stats.append('')
if verbose:
sys.stderr.write('DB read done\n')
countsDF, annotDF, filter_stats = _filter_jxns_donor_acceptor(sj_outP,
annotDF,
extDF)
if verbose:
sys.stderr.write('Filter done\n')
annotDF = _find_novel_donor_acceptor_dist(annotDF, extDF)
if verbose:
sys.stderr.write('Dist done\n')
stats += filter_stats
return countsDF, annotDF, stats
def _total_jxn_counts(fns):
"""Count the total unique coverage junction for junctions in a set of
SJ.out.tab files."""
df = pd.read_table(fns[0], header=None, names=COLUMN_NAMES)
df.index = (df.chrom + ':' + df.start.astype(int).astype(str) + '-' +
df.end.astype(int).astype(str))
counts = df.unique_junction_reads
for fn in fns[1:]:
df = pd.read_table(fn, header=None, names=COLUMN_NAMES)
df.index = (df.chrom + ':' + df.start.astype(int).astype(str) + '-' +
df.end.astype(int).astype(str))
counts = counts.add(df.unique_junction_reads, fill_value=0)
return counts
def _make_splice_targets_dict(df, feature, strand):
"""Make dict mapping each donor to the location of all acceptors it splices
to or each acceptor to all donors it splices from.
Parameters
----------
df : pandas.DataFrame
Dataframe with splice junction information from external database
containing columns 'gene', 'chrom', 'start', 'end', 'strand',
'chrom:start', 'chrom:end', 'donor', 'acceptor', 'intron'.
feature : string
Either 'donor' or 'acceptor'.
strand : string
Either '+' or '-'.
Returns
-------
d : dict
If feature='donor', dict whose keys are all distinct donors in df and
whose values are the distinct locations (integers) of the acceptors that
donor splices to in a numpy array. If feature='acceptor', dict whose
keys are all distinct acceptors in df and whose values are the distinct
locations (integers) of the donors that acceptor splices from in a numpy
array.
"""
g = df[df.strand == strand].groupby(feature)
d = dict()
if strand == '+':
if feature == 'donor':
target = 'end'
if feature == 'acceptor':
target = 'start'
if strand == '-':
if feature == 'donor':
target = 'start'
if feature == 'acceptor':
target = 'end'
for k in g.groups.keys():
d[k] = np.array(list(set(df.ix[g.groups[k], target])))
d[k].sort()
return d
def _find_novel_donor_acceptor_dist(annot, ext):
"""Find nearest annotated upstream/downstream donor/acceptor for novel
donor/acceptors.
Parameters
----------
annot : pandas.DataFrame
Dataframe with observed splice junctions (novel and known) with columns
'chrom', 'first_bp_intron', 'last_bp_intron', 'strand', 'intron_motif',
'annotated', 'ext_annotated', 'chrom:start', 'chrom:end', 'gene_id',
'donor', 'acceptor', 'novel_donor', 'novel_acceptor'.
ext : pandas.DataFrame
Dataframe with splice junction information from external database
containing columns 'gene', 'chrom', 'start', 'end', 'strand',
'chrom:start', 'chrom:end', 'donor', 'acceptor', 'intron'.
Returns
-------
annot : pandas.DataFrame
Annotation information with added columns for distance from novel
donor/acceptor to nearest upstream/downstream donor/acceptor.
"""
pos_donor_to_acceptors = _make_splice_targets_dict(ext, 'donor', '+')
pos_acceptor_to_donors = _make_splice_targets_dict(ext, 'acceptor', '+')
neg_donor_to_acceptors = _make_splice_targets_dict(ext, 'donor', '-')
neg_acceptor_to_donors = _make_splice_targets_dict(ext, 'acceptor', '-')
annot = copy.deepcopy(annot)
annot['upstream_donor_dist'] = np.nan
annot['downstream_donor_dist'] = np.nan
annot['upstream_acceptor_dist'] = np.nan
annot['downstream_acceptor_dist'] = np.nan
juncs = annot[annot.novel_donor & (annot.strand == '+')].index
up, down = _dist_to_annot_donor_acceptor(annot.ix[juncs],
pos_acceptor_to_donors,
'+',
'donor')
annot.ix[juncs, 'upstream_donor_dist'] = up
annot.ix[juncs, 'downstream_donor_dist'] = down
juncs = annot[annot.novel_donor & (annot.strand == '-')].index
up, down = _dist_to_annot_donor_acceptor(annot.ix[juncs],
neg_acceptor_to_donors,
'-',
'donor')
annot.ix[juncs, 'upstream_donor_dist'] = up
annot.ix[juncs, 'downstream_donor_dist'] = down
juncs = annot[annot.novel_acceptor & (annot.strand == '+')].index
up, down = _dist_to_annot_donor_acceptor(annot.ix[juncs],
pos_donor_to_acceptors,
'+',
'acceptor')
annot.ix[juncs, 'upstream_acceptor_dist'] = up
annot.ix[juncs, 'downstream_acceptor_dist'] = down
juncs = annot[annot.novel_acceptor & (annot.strand == '-')].index
up, down = _dist_to_annot_donor_acceptor(annot.ix[juncs],
neg_donor_to_acceptors,
'-',
'acceptor')
annot.ix[juncs, 'upstream_acceptor_dist'] = up
annot.ix[juncs, 'downstream_acceptor_dist'] = down
return annot
def _dist_to_annot_donor_acceptor(df, d, strand, novel_feature):
"""Find nearest annotated upstream/downstream donor/acceptor for novel
donor/acceptors.
Parameters
----------
df : pandas.DataFrame
Dataframe with observed splice junctions (novel and known) with columns
'chrom', 'first_bp_intron', 'last_bp_intron', 'strand', 'intron_motif',
'annotated', 'ext_annotated', 'chrom:start', 'chrom:end', 'gene_id',
'donor', 'acceptor', 'novel_donor', 'novel_acceptor'.
d : dict
If df contains novel donors, should be a dict whose keys are acceptors
and whose values |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.