input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>JesusZerpa/pscript<gh_stars>1-10
"""
PScript standard functions.
Functions are declared as ... functions. Methods are written as methods
(using this), but declared as functions, and then "apply()-ed" to the
instance of interest. Declaring methods on Object is a bad idea (breaks
Bokeh, jquery).
"""
import re
# Functions not covered by this lib:
# isinstance, issubclass, print, len, max, min, callable, chr, ord
FUNCTIONS = {}
METHODS = {}
FUNCTION_PREFIX = '_pyfunc_'
METHOD_PREFIX = '_pymeth_'
def get_std_info(code):
""" Given the JS code for a std function or method, determine the
number of arguments, function_deps and method_deps.
"""
_, _, nargs = code.splitlines()[0].partition('nargs:')
nargs = [int(i.strip()) for i in nargs.strip().replace(',', ' ').split(' ') if i]
# Collect dependencies on other funcs/methods
sep = FUNCTION_PREFIX
function_deps = [part.split('(')[0].strip() for part in code.split(sep)[1:]]
sep = METHOD_PREFIX
method_deps = [part.split('.')[0].strip() for part in code.split(sep)[1:]]
# Reduce and sort
function_deps = sorted(set(function_deps))
method_deps = sorted(set(method_deps))
# Filter
function_deps = [dep for dep in function_deps if dep not in method_deps]
function_deps = set([dep for dep in function_deps if dep in FUNCTIONS])
method_deps = set([dep for dep in method_deps if dep in METHODS])
# Recurse
for dep in list(function_deps):
_update_deps(FUNCTIONS[dep], function_deps, method_deps)
for dep in list(method_deps):
_update_deps(METHODS[dep], function_deps, method_deps)
return nargs, sorted(function_deps), sorted(method_deps)
def _update_deps(code, function_deps, method_deps):
""" Given the code of a dependency, recursively resolve additional dependencies.
"""
# Collect deps
sep = FUNCTION_PREFIX
new_function_deps = [part.split('(')[0].strip() for part in code.split(sep)[1:]]
sep = METHOD_PREFIX
new_method_deps = [part.split('.')[0].strip() for part in code.split(sep)[1:]]
# Update
new_function_deps = set(new_function_deps).difference(function_deps)
new_method_deps = set(new_method_deps).difference(method_deps)
function_deps.update(new_function_deps)
method_deps.update(new_method_deps)
# Recurse
for dep in new_function_deps:
_update_deps(FUNCTIONS[dep], function_deps, method_deps)
for dep in new_method_deps:
_update_deps(METHODS[dep], function_deps, method_deps)
return function_deps, method_deps
def get_partial_std_lib(func_names, method_names, indent=0,
func_prefix=None, method_prefix=None):
""" Get the code for the PScript standard library consisting of
the given function and method names. The given indent specifies how
many sets of 4 spaces to prepend.
"""
func_prefix = 'var ' + FUNCTION_PREFIX if (func_prefix is None) else func_prefix
method_prefix = 'var ' + METHOD_PREFIX if (method_prefix is None) else method_prefix
lines = []
for name in sorted(func_names):
code = FUNCTIONS[name].strip()
if '\n' not in code:
code = code.rsplit('//', 1)[0].rstrip() # strip comment from one-liners
lines.append('%s%s = %s;' % (func_prefix, name, code))
for name in sorted(method_names):
code = METHODS[name].strip()
# lines.append('Object.prototype.%s%s = %s;' % (METHOD_PREFIX, name, code))
lines.append('%s%s = %s;' % (method_prefix, name, code))
code = '\n'.join(lines)
if indent:
lines = [' '*indent + line for line in code.splitlines()]
code = '\n'.join(lines)
return code
def get_full_std_lib(indent=0):
""" Get the code for the full PScript standard library.
The given indent specifies how many sets of 4 spaces to prepend.
If the full stdlib is made available in JavaScript, multiple
snippets of code can be transpiled without inlined stdlib parts by
using ``py2js(..., inline_stdlib=False)``.
"""
return get_partial_std_lib(FUNCTIONS.keys(), METHODS.keys(), indent)
# todo: now that we have modules, we can have shorter/no prefixes, right?
# -> though maybe we use them for string replacement somewhere?
def get_all_std_names():
""" Get list if function names and methods names in std lib.
"""
return ([FUNCTION_PREFIX + f for f in FUNCTIONS],
[METHOD_PREFIX + f for f in METHODS])
## ----- Functions
## Special functions: not really in builtins, but important enough to support
FUNCTIONS['perf_counter'] = """function() { // nargs: 0
if (typeof(process) === "undefined"){return performance.now()*1e-3;}
else {var t = process.hrtime(); return t[0] + t[1]*1e-9;}
}""" # Work in nodejs and browser
FUNCTIONS['time'] = """function () {return Date.now() / 1000;} // nargs: 0"""
## Hardcore functions
FUNCTIONS['op_instantiate'] = """function (ob, args) { // nargs: 2
if ((typeof ob === "undefined") ||
(typeof window !== "undefined" && window === ob) ||
(typeof global !== "undefined" && global === ob))
{throw "Class constructor is called as a function.";}
for (var name in ob) {
if (Object[name] === undefined &&
typeof ob[name] === 'function' && !ob[name].nobind) {
ob[name] = ob[name].bind(ob);
ob[name].__name__ = name;
}
}
if (ob.__init__) {
ob.__init__.apply(ob, args);
}
}"""
FUNCTIONS['create_dict'] = """function () {
var d = {};
for (var i=0; i<arguments.length; i+=2) { d[arguments[i]] = arguments[i+1]; }
return d;
}"""
FUNCTIONS['merge_dicts'] = """function () {
var res = {};
for (var i=0; i<arguments.length; i++) {
var d = arguments[i];
var key, keys = Object.keys(d);
for (var j=0; j<keys.length; j++) { key = keys[j]; res[key] = d[key]; }
}
return res;
}"""
# args is a list of (name, default) tuples, and is overwritten with names from kwargs
FUNCTIONS['op_parse_kwargs'] = """
function (arg_names, arg_values, kwargs, strict) { // nargs: 3
for (var i=0; i<arg_values.length; i++) {
var name = arg_names[i];
if (kwargs[name] !== undefined) {
arg_values[i] = kwargs[name];
delete kwargs[name];
}
}
if (strict && Object.keys(kwargs).length > 0) {
throw FUNCTION_PREFIXop_error('TypeError',
'Function ' + strict + ' does not accept **kwargs.');
}
return kwargs;
}""".lstrip()
FUNCTIONS['op_error'] = """function (etype, msg) { // nargs: 2
var e = new Error(etype + ': ' + msg);
e.name = etype
return e;
}"""
FUNCTIONS['hasattr'] = """function (ob, name) { // nargs: 2
return (ob !== undefined) && (ob !== null) && (ob[name] !== undefined);
}"""
FUNCTIONS['getattr'] = """function (ob, name, deflt) { // nargs: 2 3
var has_attr = ob !== undefined && ob !== null && ob[name] !== undefined;
if (has_attr) {return ob[name];}
else if (arguments.length == 3) {return deflt;}
else {var e = Error(name); e.name='AttributeError'; throw e;}
}"""
FUNCTIONS['setattr'] = """function (ob, name, value) { // nargs: 3
ob[name] = value;
}"""
FUNCTIONS['delattr'] = """function (ob, name) { // nargs: 2
delete ob[name];
}"""
FUNCTIONS['dict'] = """function (x) {
var t, i, keys, r={};
if (Array.isArray(x)) {
for (i=0; i<x.length; i++) {
t=x[i]; r[t[0]] = t[1];
}
} else {
keys = Object.keys(x);
for (i=0; i<keys.length; i++) {
t=keys[i]; r[t] = x[t];
}
}
return r;
}"""
FUNCTIONS['list'] = """function (x) {
var r=[];
if (typeof x==="object" && !Array.isArray(x)) {x = Object.keys(x)}
for (var i=0; i<x.length; i++) {
r.push(x[i]);
}
return r;
}"""
FUNCTIONS['range'] = """function (start, end, step) {
var i, res = [];
var val = start;
var n = (end - start) / step;
for (i=0; i<n; i++) {
res.push(val);
val += step;
}
return res;
}"""
FUNCTIONS['format'] = """function (v, fmt) { // nargs: 2
fmt = fmt.toLowerCase();
var s = String(v);
if (fmt.indexOf('!r') >= 0) {
try { s = JSON.stringify(v); } catch (e) { s = undefined; }
if (typeof s === 'undefined') { s = v._IS_COMPONENT ? v.id : String(v); }
}
var fmt_type = '';
if (fmt.slice(-1) == 'i' || fmt.slice(-1) == 'f' ||
fmt.slice(-1) == 'e' || fmt.slice(-1) == 'g') {
fmt_type = fmt[fmt.length-1]; fmt = fmt.slice(0, fmt.length-1);
}
var i0 = fmt.indexOf(':');
var i1 = fmt.indexOf('.');
var spec1 = '', spec2 = ''; // before and after dot
if (i0 >= 0) {
if (i1 > i0) { spec1 = fmt.slice(i0+1, i1); spec2 = fmt.slice(i1+1); }
else { spec1 = fmt.slice(i0+1); }
}
// Format numbers
if (fmt_type == '') {
} else if (fmt_type == 'i') { // integer formatting, for %i
s = parseInt(v).toFixed(0);
} else if (fmt_type == 'f') { // float formatting
v = parseFloat(v);
var decimals = spec2 ? Number(spec2) : 6;
s = v.toFixed(decimals);
} else if (fmt_type == 'e') { // exp formatting
v = parseFloat(v);
var precision = (spec2 ? Number(spec2) : 6) || 1;
s = v.toExponential(precision);
} else if (fmt_type == 'g') { // "general" formatting
v = parseFloat(v);
var precision = (spec2 ? Number(spec2) : 6) || 1;
// Exp or decimal?
s = v.toExponential(precision-1);
var s1 = s.slice(0, s.indexOf('e')), s2 = s.slice(s.indexOf('e'));
if (s2.length == 3) { s2 = 'e' + s2[1] + '0' + s2[2]; }
var exp = Number(s2.slice(1));
if (exp >= -4 && exp < precision) { s1=v.toPrecision(precision); s2=''; }
// Skip trailing zeros and dot
var j = s1.length-1;
while (j>0 && s1[j] == '0') { j-=1; }
s1 = s1.slice(0, j+1);
if (s1.slice(-1) == '.') { s1 = s1.slice(0, s1.length-1); }
s = s1 + s2;
}
// prefix/padding
var prefix = '';
if (spec1) {
if (spec1[0] == '+' && v > 0) { prefix = '+'; spec1 = spec1.slice(1); }
else if (spec1[0] | |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
The models defined in this file represent the resource JSON description
format and provide a layer of abstraction from the raw JSON. The advantages
of this are:
* Pythonic interface (e.g. ``action.request.operation``)
* Consumers need not change for minor JSON changes (e.g. renamed field)
These models are used both by the resource factory to generate resource
classes as well as by the documentation generator.
"""
import logging
from ibm_botocore import xform_name
logger = logging.getLogger(__name__)
class Identifier(object):
"""
A resource identifier, given by its name.
:type name: string
:param name: The name of the identifier
"""
def __init__(self, name, member_name=None):
#: (``string``) The name of the identifier
self.name = name
self.member_name = member_name
class Action(object):
"""
A service operation action.
:type name: string
:param name: The name of the action
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, name, definition, resource_defs):
self._definition = definition
#: (``string``) The name of the action
self.name = name
#: (:py:class:`Request`) This action's request or ``None``
self.request = None
if 'request' in definition:
self.request = Request(definition.get('request', {}))
#: (:py:class:`ResponseResource`) This action's resource or ``None``
self.resource = None
if 'resource' in definition:
self.resource = ResponseResource(definition.get('resource', {}),
resource_defs)
#: (``string``) The JMESPath search path or ``None``
self.path = definition.get('path')
class DefinitionWithParams(object):
"""
An item which has parameters exposed via the ``params`` property.
A request has an operation and parameters, while a waiter has
a name, a low-level waiter name and parameters.
:type definition: dict
:param definition: The JSON definition
"""
def __init__(self, definition):
self._definition = definition
@property
def params(self):
"""
Get a list of auto-filled parameters for this request.
:type: list(:py:class:`Parameter`)
"""
params = []
for item in self._definition.get('params', []):
params.append(Parameter(**item))
return params
class Parameter(object):
"""
An auto-filled parameter which has a source and target. For example,
the ``QueueUrl`` may be auto-filled from a resource's ``url`` identifier
when making calls to ``queue.receive_messages``.
:type target: string
:param target: The destination parameter name, e.g. ``QueueUrl``
:type source_type: string
:param source_type: Where the source is defined.
:type source: string
:param source: The source name, e.g. ``Url``
"""
def __init__(self, target, source, name=None, path=None, value=None,
**kwargs):
#: (``string``) The destination parameter name
self.target = target
#: (``string``) Where the source is defined
self.source = source
#: (``string``) The name of the source, if given
self.name = name
#: (``string``) The JMESPath query of the source
self.path = path
#: (``string|int|float|bool``) The source constant value
self.value = value
# Complain if we encounter any unknown values.
if kwargs:
logger.warning('Unknown parameter options found: %s', kwargs)
class Request(DefinitionWithParams):
"""
A service operation action request.
:type definition: dict
:param definition: The JSON definition
"""
def __init__(self, definition):
super(Request, self).__init__(definition)
#: (``string``) The name of the low-level service operation
self.operation = definition.get('operation')
class Waiter(DefinitionWithParams):
"""
An event waiter specification.
:type name: string
:param name: Name of the waiter
:type definition: dict
:param definition: The JSON definition
"""
PREFIX = 'WaitUntil'
def __init__(self, name, definition):
super(Waiter, self).__init__(definition)
#: (``string``) The name of this waiter
self.name = name
#: (``string``) The name of the underlying event waiter
self.waiter_name = definition.get('waiterName')
class ResponseResource(object):
"""
A resource response to create after performing an action.
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
#: (``string``) The name of the response resource type
self.type = definition.get('type')
#: (``string``) The JMESPath search query or ``None``
self.path = definition.get('path')
@property
def identifiers(self):
"""
A list of resource identifiers.
:type: list(:py:class:`Identifier`)
"""
identifiers = []
for item in self._definition.get('identifiers', []):
identifiers.append(
Parameter(**item))
return identifiers
@property
def model(self):
"""
Get the resource model for the response resource.
:type: :py:class:`ResourceModel`
"""
return ResourceModel(self.type, self._resource_defs[self.type],
self._resource_defs)
class Collection(Action):
"""
A group of resources. See :py:class:`Action`.
:type name: string
:param name: The name of the collection
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
@property
def batch_actions(self):
"""
Get a list of batch actions supported by the resource type
contained in this action. This is a shortcut for accessing
the same information through the resource model.
:rtype: list(:py:class:`Action`)
"""
return self.resource.model.batch_actions
class ResourceModel(object):
"""
A model representing a resource, defined via a JSON description
format. A resource has identifiers, attributes, actions,
sub-resources, references and collections. For more information
on resources, see :ref:`guide_resources`.
:type name: string
:param name: The name of this resource
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, name, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
self._renamed = {}
#: (``string``) The name of this resource
self.name = name
#: (``string``) The service shape name for this resource or ``None``
self.shape = definition.get('shape')
def load_rename_map(self, shape=None):
"""
Load a name translation map given a shape. This will set
up renamed values for any collisions, e.g. if the shape,
an action, and a subresource all are all named ``foo``
then the resource will have an action ``foo``, a subresource
named ``Foo`` and a property named ``foo_attribute``.
This is the order of precedence, from most important to
least important:
* Load action (resource.load)
* Identifiers
* Actions
* Subresources
* References
* Collections
* Waiters
* Attributes (shape members)
Batch actions are only exposed on collections, so do not
get modified here. Subresources use upper camel casing, so
are unlikely to collide with anything but other subresources.
Creates a structure like this::
renames = {
('action', 'id'): 'id_action',
('collection', 'id'): 'id_collection',
('attribute', 'id'): 'id_attribute'
}
# Get the final name for an action named 'id'
name = renames.get(('action', 'id'), 'id')
:type shape: ibm_botocore.model.Shape
:param shape: The underlying shape for this resource.
"""
# Meta is a reserved name for resources
names = set(['meta'])
self._renamed = {}
if self._definition.get('load'):
names.add('load')
for item in self._definition.get('identifiers', []):
self._load_name_with_category(names, item['name'], 'identifier')
for name in self._definition.get('actions', {}):
self._load_name_with_category(names, name, 'action')
for name, ref in self._get_has_definition().items():
# Subresources require no data members, just typically
# identifiers and user input.
data_required = False
for identifier in ref['resource']['identifiers']:
if identifier['source'] == 'data':
data_required = True
break
if not data_required:
self._load_name_with_category(names, name, 'subresource',
snake_case=False)
else:
self._load_name_with_category(names, name, 'reference')
for name in self._definition.get('hasMany', {}):
self._load_name_with_category(names, name, 'collection')
for name in self._definition.get('waiters', {}):
self._load_name_with_category(names, Waiter.PREFIX + name,
'waiter')
if shape is not None:
for name in shape.members.keys():
self._load_name_with_category(names, name, 'attribute')
def _load_name_with_category(self, names, name, category,
snake_case=True):
"""
Load a name with a given category, possibly renaming it
if that name is already in use. The name will be stored
in ``names`` and possibly be set up in ``self._renamed``.
:type names: set
:param names: Existing names (Python attributes, properties, or
methods) on the resource.
:type name: string
:param name: The original name of the value.
:type category: string
:param category: The value type, such as 'identifier' or 'action'
:type snake_case: bool
:param snake_case: True (default) if the name should be snake cased.
"""
if snake_case:
name = xform_name(name)
if name in names:
logger.debug('Renaming %s %s %s' % (self.name, category, name))
self._renamed[(category, name)] = name + '_' + category
name += '_' + category
if name in names:
# This isn't good, let's raise instead of trying to keep
# renaming this value.
raise ValueError('Problem renaming {0} {1} to {2}!'.format(
self.name, category, name))
names.add(name)
def _get_name(self, category, | |
atom explicit valence, atom implicit valence, aromaticity.
edge_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for edges like bonds in a molecule, which can be used to update
edata for a DGLGraph. By default, we consider descriptors including bond type,
whether bond is conjugated and whether bond is in ring.
atom_pair_featurizer : callable, str -> dict
Featurization for each pair of atoms in multiple reactants. The result will be
used to update edata in the complete DGLGraphs. By default, the features include
the bond type between the atoms (if any) and whether they belong to the same molecule.
load : bool
Whether to load the previously pre-processed dataset or pre-process from scratch.
``load`` should be False when we want to try different graph construction and
featurization methods and need to preprocess from scratch. Default to True.
num_processes : int
Number of processes to use for data pre-processing. Default to 1.
"""
def __init__(self,
raw_file_path,
mol_graph_path,
mol_to_graph=mol_to_bigraph,
node_featurizer=default_node_featurizer_center,
edge_featurizer=default_edge_featurizer_center,
atom_pair_featurizer=default_atom_pair_featurizer,
load=True,
num_processes=1):
super(WLNCenterDataset, self).__init__()
self._atom_pair_featurizer = atom_pair_featurizer
self.atom_pair_features = []
self.atom_pair_labels = []
# Map number of nodes to a corresponding complete graph
self.complete_graphs = dict()
path_to_reaction_file = raw_file_path + '.proc'
if not os.path.isfile(path_to_reaction_file):
print('Pre-processing graph edits from reaction data')
process_file(raw_file_path, num_processes)
import time
t0 = time.time()
full_mols, full_reactions, full_graph_edits = \
self.load_reaction_data(path_to_reaction_file, num_processes)
print('Time spent', time.time() - t0)
if load and os.path.isfile(mol_graph_path):
print('Loading previously saved graphs...')
self.reactant_mol_graphs, _ = load_graphs(mol_graph_path)
else:
print('Constructing graphs from scratch...')
if num_processes == 1:
self.reactant_mol_graphs = []
for mol in full_mols:
self.reactant_mol_graphs.append(mol_to_graph(
mol, node_featurizer=node_featurizer,
edge_featurizer=edge_featurizer, canonical_atom_order=False))
else:
torch.multiprocessing.set_sharing_strategy('file_system')
with Pool(processes=num_processes) as pool:
self.reactant_mol_graphs = pool.map(
partial(mol_to_graph, node_featurizer=node_featurizer,
edge_featurizer=edge_featurizer, canonical_atom_order=False),
full_mols)
save_graphs(mol_graph_path, self.reactant_mol_graphs)
self.mols = full_mols
self.reactions = full_reactions
self.graph_edits = full_graph_edits
self.atom_pair_features.extend([None for _ in range(len(self.mols))])
self.atom_pair_labels.extend([None for _ in range(len(self.mols))])
def load_reaction_data(self, file_path, num_processes):
"""Load reaction data from the raw file.
Parameters
----------
file_path : str
Path to read the file.
num_processes : int
Number of processes to use for data pre-processing.
Returns
-------
all_mols : list of rdkit.Chem.rdchem.Mol
RDKit molecule instances
all_reactions : list of str
Reactions
all_graph_edits : list of str
Graph edits in the reactions.
"""
all_mols = []
all_reactions = []
all_graph_edits = []
with open(file_path, 'r') as f:
lines = f.readlines()
if num_processes == 1:
results = []
for li in lines:
mol, reaction, graph_edits = load_one_reaction(li)
results.append((mol, reaction, graph_edits))
else:
with Pool(processes=num_processes) as pool:
results = pool.map(load_one_reaction, lines)
for mol, reaction, graph_edits in results:
if mol is None:
continue
all_mols.append(mol)
all_reactions.append(reaction)
all_graph_edits.append(graph_edits)
return all_mols, all_reactions, all_graph_edits
def __len__(self):
"""Get the size for the dataset.
Returns
-------
int
Number of reactions in the dataset.
"""
return len(self.mols)
def __getitem__(self, item):
"""Get the i-th datapoint.
Returns
-------
str
Reaction
str
Graph edits for the reaction
DGLGraph
DGLGraph for the ith molecular graph
DGLGraph
Complete DGLGraph, which will be needed for predicting
scores between each pair of atoms
float32 tensor of shape (V^2, 10)
Features for each pair of atoms.
float32 tensor of shape (V^2, 5)
Labels for reaction center prediction.
V for the number of atoms in the reactants.
"""
mol = self.mols[item]
num_atoms = mol.GetNumAtoms()
if num_atoms not in self.complete_graphs:
self.complete_graphs[num_atoms] = mol_to_complete_graph(
mol, add_self_loop=True, canonical_atom_order=False)
if self.atom_pair_features[item] is None:
reactants = self.reactions[item].split('>')[0]
self.atom_pair_features[item] = self._atom_pair_featurizer(reactants)
if self.atom_pair_labels[item] is None:
self.atom_pair_labels[item] = get_pair_label(mol, self.graph_edits[item])
return self.reactions[item], self.graph_edits[item], \
self.reactant_mol_graphs[item], \
self.complete_graphs[num_atoms], \
self.atom_pair_features[item], \
self.atom_pair_labels[item]
class USPTOCenter(WLNCenterDataset):
"""USPTO dataset for reaction center prediction.
The dataset contains reactions from patents granted by United States Patent
and Trademark Office (USPTO), collected by Lowe [1]. Jin et al. removes duplicates
and erroneous reactions, obtaining a set of 480K reactions. They divide it
into 400K, 40K, and 40K for training, validation and test.
References:
* [1] Patent reaction extraction
* [2] Predicting Organic Reaction Outcomes with Weisfeiler-Lehman Network
Parameters
----------
subset : str
Whether to use the training/validation/test set as in Jin et al.
* 'train' for the training set
* 'val' for the validation set
* 'test' for the test set
mol_to_graph: callable, str -> DGLGraph
A function turning RDKit molecule instances into DGLGraphs.
Default to :func:`dgllife.utils.mol_to_bigraph`.
node_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for nodes like atoms in a molecule, which can be used to update
ndata for a DGLGraph. By default, we consider descriptors including atom type,
atom degree, atom explicit valence, atom implicit valence, aromaticity.
edge_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for edges like bonds in a molecule, which can be used to update
edata for a DGLGraph. By default, we consider descriptors including bond type,
whether bond is conjugated and whether bond is in ring.
atom_pair_featurizer : callable, str -> dict
Featurization for each pair of atoms in multiple reactants. The result will be
used to update edata in the complete DGLGraphs. By default, the features include
the bond type between the atoms (if any) and whether they belong to the same molecule.
load : bool
Whether to load the previously pre-processed dataset or pre-process from scratch.
``load`` should be False when we want to try different graph construction and
featurization methods and need to preprocess from scratch. Default to True.
num_processes : int
Number of processes to use for data pre-processing. Default to 1.
"""
def __init__(self,
subset,
mol_to_graph=mol_to_bigraph,
node_featurizer=default_node_featurizer_center,
edge_featurizer=default_edge_featurizer_center,
atom_pair_featurizer=default_atom_pair_featurizer,
load=True,
num_processes=1):
assert subset in ['train', 'val', 'test'], \
'Expect subset to be "train" or "val" or "test", got {}'.format(subset)
print('Preparing {} subset of USPTO for reaction center prediction.'.format(subset))
self._subset = subset
if subset == 'val':
subset = 'valid'
self._url = 'dataset/uspto.zip'
data_path = get_download_dir() + '/uspto.zip'
extracted_data_path = get_download_dir() + '/uspto'
download(_get_dgl_url(self._url), path=data_path)
extract_archive(data_path, extracted_data_path)
super(USPTOCenter, self).__init__(
raw_file_path=extracted_data_path + '/{}.txt'.format(subset),
mol_graph_path=extracted_data_path + '/{}_mol_graphs.bin'.format(subset),
mol_to_graph=mol_to_graph,
node_featurizer=node_featurizer,
edge_featurizer=edge_featurizer,
atom_pair_featurizer=atom_pair_featurizer,
load=load,
num_processes=num_processes)
@property
def subset(self):
"""Get the subset used for USPTOCenter
Returns
-------
str
* 'full' for the complete dataset
* 'train' for the training set
* 'val' for the validation set
* 'test' for the test set
"""
return self._subset
def mkdir_p(path):
"""Create a folder for the given path.
Parameters
----------
path: str
Folder to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def load_one_reaction_rank(line):
"""Load one reaction and check if the reactants are valid.
Parameters
----------
line : str
One reaction and the associated graph edits
Returns
-------
reactants_mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance for the reactants. None will be returned if the
line is not valid.
product_mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance for the product. None will be returned if the line is not valid.
reaction_real_bond_changes : list of 3-tuples
Real bond changes in the reaction. Each tuple is of form (atom1, atom2, change_type). For
change_type, 0.0 stands for losing a bond, 1.0, 2.0, 3.0 and 1.5 separately stands for
forming a single, double, triple or aromatic bond.
"""
# Each line represents a reaction and the corresponding graph edits
#
# reaction example:
# [CH3:14][OH:15].[NH2:12][NH2:13].[OH2:11].[n:1]1[n:2][cH:3][c:4]
# ([C:7]([O:9][CH3:8])=[O:10])[cH:5][cH:6]1>>[n:1]1[n:2][cH:3][c:4]
# ([C:7](=[O:9])[NH:12][NH2:13])[cH:5][cH:6]1
# The reactants are on the left-hand-side of the reaction and the product
# is on the right-hand-side of the reaction. The numbers represent atom mapping.
#
# graph_edits example:
# 23-33-1.0;23-25-0.0
# For a triplet a-b-c, a and b are the atoms that form or loss the bond.
# c specifies the particular change, 0.0 for losing a bond, 1.0, 2.0, 3.0 and
# 1.5 separately for forming a single, double, triple or aromatic bond.
reaction, graph_edits = line.strip("\r\n ").split()
reactants, _, product = reaction.split('>')
reactants_mol = Chem.MolFromSmiles(reactants)
if reactants_mol is None:
return None, None, None, None, None
product_mol = Chem.MolFromSmiles(product)
if product_mol is None:
return None, None, None, None, None
# Reorder atoms according to the order specified in the atom map
atom_map_order = [-1 for _ in range(reactants_mol.GetNumAtoms())]
for j in range(reactants_mol.GetNumAtoms()):
atom = reactants_mol.GetAtomWithIdx(j)
atom_map_order[atom.GetIntProp('molAtomMapNumber') - 1] = j
reactants_mol = rdmolops.RenumberAtoms(reactants_mol, atom_map_order)
reaction_real_bond_changes = []
for changed_bond in graph_edits.split(';'):
atom1, atom2, change_type = | |
vrouters_list_ops[n]['href'])
ops_basic_data = []
host_name = vrouters_list_ops[n]['name']
ip_address = vrouters_ops_data.get(
'VrouterAgent').get('self_ip_list')[0]
version = json.loads(vrouters_ops_data.get('VrouterAgent').get('build_info')).get(
'build-info')[0].get('build-id')
version = version.split('-')
version = version[0] + ' (Build ' + version[1] + ')'
xmpp_messages = vrouters_ops_data.get(
'VrouterStatsAgent').get('xmpp_stats_list')
for i, item in enumerate(xmpp_messages):
if item['ip'] == ip_address:
xmpp_in_msgs = item['in_msgs']
xmpp_out_msgs = item['out_msgs']
xmpp_msgs_string = str(xmpp_in_msgs) + \
' In ' + \
str(xmpp_out_msgs) + ' Out'
break
total_flows = vrouters_ops_data.get(
'VrouterStatsAgent').get('total_flows')
active_flows = vrouters_ops_data.get(
'VrouterStatsAgent').get('active_flows')
flow_count_string = str(active_flows) + \
' Active, ' + \
str(total_flows) + ' Total'
if vrouters_ops_data.get('VrouterAgent').get('connected_networks'):
networks = str(
len(vrouters_ops_data.get('VrouterAgent').get('connected_networks')))
else:
networks = '--'
interfaces = str(vrouters_ops_data.get('VrouterAgent')
.get('total_interface_count'))
if vrouters_ops_data.get('VrouterAgent').get('virtual_machine_list'):
instances = str(
len(vrouters_ops_data.get('VrouterAgent').get('virtual_machine_list')))
else:
instances = '--'
cpu = vrouters_ops_data.get('VrouterStatsAgent').get(
'cpu_info').get('cpu_share')
cpu = str(round(cpu, 2)) + ' %'
memory = vrouters_ops_data.get('VrouterStatsAgent').get(
'cpu_info').get('meminfo').get('virt')
memory = memory / 1024.0
if memory < 1024:
memory = str(round(memory, 2)) + ' MB'
else:
memory = str(round(memory / 1024), 2) + ' GB'
last_log = vrouters_ops_data.get(
'VrouterAgent').get('total_interface_count')
modified_ops_data = []
process_state_list = vrouters_ops_data.get(
'VrouterStatsAgent').get('process_state_list')
process_down_stop_time_dict = {}
process_up_start_time_dict = {}
exclude_process_list = [
'contrail-config-nodemgr', 'contrail-analytics-nodemgr', 'contrail-control-nodemgr', 'contrail-vrouter-nodemgr',
'openstack-nova-compute', 'contrail-svc-monitor', 'contrail-discovery:0', 'contrail-zookeeper', 'contrail-schema']
for i, item in enumerate(process_state_list):
if item['process_name'] == 'contrail-vrouter':
contrail_vrouter_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-vrouter-nodemgr':
contrail_vrouter_nodemgr_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'openstack-nova-compute':
openstack_nova_compute_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
for k, v in process_down_stop_time_dict.items():
if k not in exclude_process_list:
reduced_process_keys_dict[k] = v
'''
if not reduced_process_keys_dict :
recent_time = max(process_up_start_time_dict.values())
overall_node_status_time = self.webui_common.get_node_status_string(str(recent_time))
overall_node_status_string = ['Up since ' + status for status in overall_node_status_time]
else:
overall_node_status_down_time = self.webui_common.get_node_status_string(str(max(reduced_process_keys_dict.values())))
overall_node_status_string = ['Down since ' + status for status in overall_node_status_down_time]
'''
if not reduced_process_keys_dict:
for process in exclude_process_list:
process_up_start_time_dict.pop(process, None)
recent_time = max(process_up_start_time_dict.values())
overall_node_status_time = self.webui_common.get_node_status_string(
str(recent_time))
overall_node_status_string = [
'Up since ' + status for status in overall_node_status_time]
else:
overall_node_status_down_time = self.webui_common.get_node_status_string(
str(max(reduced_process_keys_dict.values())))
process_down_count = len(reduced_process_keys_dict)
process_down_list = reduced_process_keys_dict.keys()
overall_node_status_string = str(
process_down_count) + ' Process down'
generator_list = self.webui_common.get_generators_list_ops()
for element in generator_list:
if element['name'] == ops_vrouter_name + ':Compute:VRouterAgent:0':
analytics_data = element['href']
break
generators_vrouters_data = self.webui_common.get_details(
element['href'])
analytics_data = generators_vrouters_data.get(
'ModuleClientState').get('client_info')
if analytics_data['status'] == 'Established':
analytics_primary_ip = analytics_data[
'primary'].split(':')[0] + ' (Up)'
tx_socket_bytes = analytics_data.get(
'tx_socket_stats').get('bytes')
tx_socket_size = self.webui_common.get_memory_string(
int(tx_socket_bytes))
analytics_msg_count = generators_vrouters_data.get(
'ModuleClientState').get('session_stats').get('num_send_msg')
offset = 5
analytics_msg_count_list = range(
int(analytics_msg_count) - offset, int(analytics_msg_count) + offset)
analytics_messages_string = [
str(count) + ' [' + str(size) + ']' for count in analytics_msg_count_list for size in tx_socket_size]
control_nodes_list = vrouters_ops_data.get(
'VrouterAgent').get('xmpp_peer_list')
control_nodes_string = ''
for node in control_nodes_list:
if node['status'] == True and node['primary'] == True:
control_ip = node['ip']
control_nodes_string = control_ip + '* (Up)'
index = control_nodes_list.index(node)
del control_nodes_list[index]
for node in control_nodes_list:
node_ip = node['ip']
if node['status'] == True:
control_nodes_string = control_nodes_string + \
', ' + node_ip + ' (Up)'
else:
control_nodes_string = control_nodes_string + \
', ' + node_ip + ' (Down)'
modified_ops_data.extend(
[{'key': 'Flow Count', 'value': flow_count_string}, {'key': 'Hostname', 'value': host_name}, {'key': 'IP Address', 'value': ip_address}, {'key': 'Networks', 'value': networks}, {'key': 'Instances', 'value': instances}, {'key': 'CPU', 'value': cpu}, {'key': 'Memory', 'value': memory}, {'key': 'Version', 'value': version},
{'key': 'vRouter Agent', 'value': contrail_vrouter_string}, {'key': 'Overall Node Status', 'value': overall_node_status_string}, {'key': 'Analytics Node', 'value': analytics_primary_ip}, {'key': 'Analytics Messages', 'value': analytics_messages_string}, {'key': 'Control Nodes', 'value': control_nodes_string}])
self.webui_common.match_ops_with_webui(
modified_ops_data, dom_basic_view)
if self.webui_common.match_ops_with_webui(modified_ops_data, dom_basic_view):
self.logger.info(
"Ops %s uves vrouters basic view details data matched in webui" % (ops_vrouter_name))
else:
self.logger.error(
"Ops %s uves vrouters basic view details data match failed in webui" % (ops_vrouter_name))
result = result and False
return result
# end verify_vrouter_ops_basic_data_in_webui
def verify_vrouter_ops_advance_data(self):
self.logger.info(
"Verifying vrouter Ops-data in Webui monitor->infra->Virtual Routers->details(advance view)......")
self.logger.debug(self.dash)
if not self.webui_common.click_monitor_vrouters():
result = result and False
rows = self.webui_common.get_rows()
vrouters_list_ops = self.webui_common.get_vrouters_list_ops()
result = True
for n in range(len(vrouters_list_ops)):
ops_vrouter_name = vrouters_list_ops[n]['name']
self.logger.info(
"Vn host name %s exists in op server..checking if exists in webui as well" %
(ops_vrouter_name))
if not self.webui_common.click_monitor_vrouters():
result = result and False
rows = self.webui_common.get_rows()
for i in range(len(rows)):
match_flag = 0
if rows[i].find_elements_by_class_name('slick-cell')[0].text == ops_vrouter_name:
self.logger.info(
"Vrouter name %s found in webui..going to match advance details..." % (ops_vrouter_name))
self.logger.debug(self.dash)
match_index = i
match_flag = 1
break
if not match_flag:
self.logger.error(
"Vrouter name %s did not match in webui...not found in webui" % (ops_vrouter_name))
self.logger.debug(self.dash)
else:
self.logger.info(
"Click and retrieve vrouter advance details in webui for vrouter-name %s " % (ops_vrouter_name))
self.webui_common.click_monitor_vrouters_advance(match_index)
vrouters_ops_data = self.webui_common.get_details(
vrouters_list_ops[n]['href'])
dom_arry = self.webui_common.parse_advanced_view()
dom_arry_str = self.webui_common.get_advanced_view_str()
dom_arry_num = self.webui_common.get_advanced_view_num()
dom_arry_num_new = []
for item in dom_arry_num:
dom_arry_num_new.append(
{'key': item['key'].replace('\\', '"').replace(' ', ''), 'value': item['value']})
dom_arry_num = dom_arry_num_new
merged_arry = dom_arry + dom_arry_str + dom_arry_num
if vrouters_ops_data.has_key('VrouterStatsAgent'):
ops_data = vrouters_ops_data['VrouterStatsAgent']
history_del_list = [
'total_in_bandwidth_utilization', 'cpu_share', 'used_sys_mem',
'one_min_avg_cpuload', 'virt_mem', 'total_out_bandwidth_utilization']
for item in history_del_list:
if ops_data.get(item):
for element in ops_data.get(item):
if element.get('history-10'):
del element['history-10']
if element.get('s-3600-topvals'):
del element['s-3600-topvals']
modified_ops_data = []
self.webui_common.extract_keyvalue(
ops_data, modified_ops_data)
if vrouters_ops_data.has_key('VrouterAgent'):
ops_data_agent = vrouters_ops_data['VrouterAgent']
modified_ops_data_agent = []
self.webui_common.extract_keyvalue(
ops_data_agent, modified_ops_data_agent)
complete_ops_data = modified_ops_data + \
modified_ops_data_agent
for k in range(len(complete_ops_data)):
if type(complete_ops_data[k]['value']) is list:
for m in range(len(complete_ops_data[k]['value'])):
complete_ops_data[k]['value'][m] = str(
complete_ops_data[k]['value'][m])
elif type(complete_ops_data[k]['value']) is unicode:
complete_ops_data[k]['value'] = str(
complete_ops_data[k]['value'])
else:
complete_ops_data[k]['value'] = str(
complete_ops_data[k]['value'])
if self.webui_common.match_ops_with_webui(complete_ops_data, merged_arry):
self.logger.info(
"Ops %s uves virual networks advance view data matched in webui" % (ops_vrouter_name))
else:
self.logger.error(
"Ops %s uves virual networks advance data match failed in webui" % (ops_vrouter_name))
result = result and False
return result
# end verify_vrouter_ops_advance_data_in_webui
def verify_bgp_routers_ops_basic_data(self):
self.logger.info(
"Verifying Control Nodes basic ops-data in Webui monitor->infra->Control Nodes->details(basic view)......")
self.logger.debug(self.dash)
if not self.webui_common.click_monitor_control_nodes():
result = result and False
rows = self.webui_common.get_rows()
bgp_routers_list_ops = self.webui_common.get_bgp_routers_list_ops()
result = True
for n in range(len(bgp_routers_list_ops)):
ops_bgp_routers_name = bgp_routers_list_ops[n]['name']
self.logger.info("Control node host name %s exists in op server..checking if exists \
in webui as well" % (ops_bgp_routers_name))
if not self.webui_common.click_monitor_control_nodes():
result = result and False
rows = self.webui_common.get_rows()
for i in range(len(rows)):
match_flag = 0
if rows[i].find_elements_by_class_name('slick-cell')[0].text == ops_bgp_routers_name:
self.logger.info("Bgp routers name %s found in webui..going to match basic details..." % (
ops_bgp_routers_name))
self.logger.debug(self.dash)
match_index = i
match_flag = 1
break
if not match_flag:
self.logger.error("Bgp routers name %s did not match in webui...not found in webui" % (
ops_bgp_routers_name))
self.logger.debug(self.dash)
else:
self.logger.info("Click and retrieve control nodes basic view details in webui for \
control node name %s " % (ops_bgp_routers_name))
self.webui_common.click_monitor_control_nodes_basic(
match_index)
dom_basic_view = self.webui_common.get_basic_view_infra()
# special handling for overall node status value
node_status = self.browser.find_element_by_id('allItems').find_element_by_tag_name(
'p').get_attribute('innerHTML').replace('\n', '').strip()
for i, item in enumerate(dom_basic_view):
if item.get('key') == 'Overall Node Status':
dom_basic_view[i]['value'] = node_status
# filter bgp_routers basic view details from opserver data
bgp_routers_ops_data = self.webui_common.get_details(
bgp_routers_list_ops[n]['href'])
ops_basic_data = []
host_name = bgp_routers_list_ops[n]['name']
ip_address = bgp_routers_ops_data.get(
'BgpRouterState').get('bgp_router_ip_list')[0]
if not ip_address:
ip_address = '--'
version = json.loads(bgp_routers_ops_data.get('BgpRouterState').get('build_info')).get(
'build-info')[0].get('build-id')
version = self.webui_common.get_version_string(version)
bgp_peers_string = 'BGP Peers: ' + \
str(bgp_routers_ops_data.get('BgpRouterState')
.get('num_bgp_peer')) + ' Total'
vrouters = 'vRouters: ' + \
str(bgp_routers_ops_data.get('BgpRouterState')
.get('num_up_xmpp_peer')) + ' Established in Sync'
cpu = bgp_routers_ops_data.get('BgpRouterState')
memory = bgp_routers_ops_data.get('BgpRouterState')
if not cpu:
cpu = '--'
memory = '--'
else:
cpu = self.webui_common.get_cpu_string(cpu)
memory = self.webui_common.get_memory_string(memory)
generator_list = self.webui_common.get_generators_list_ops()
for element in generator_list:
if element['name'] == ops_bgp_routers_name + ':Control:ControlNode:0':
analytics_data = element['href']
generators_vrouters_data = self.webui_common.get_details(
element['href'])
analytics_data = generators_vrouters_data.get(
'ModuleClientState').get('client_info')
if analytics_data['status'] == 'Established':
analytics_primary_ip = analytics_data[
'primary'].split(':')[0] + ' (Up)'
tx_socket_bytes = analytics_data.get(
'tx_socket_stats').get('bytes')
tx_socket_size = self.webui_common.get_memory_string(
int(tx_socket_bytes))
analytics_msg_count = generators_vrouters_data.get(
'ModuleClientState').get('session_stats').get('num_send_msg')
offset = 10
analytics_msg_count_list = range(
int(analytics_msg_count) - offset, int(analytics_msg_count) + offset)
analytics_messages_string = [
str(count) + ' [' + str(size) + ']' for count in analytics_msg_count_list for size in tx_socket_size]
ifmap_ip = bgp_routers_ops_data.get('BgpRouterState').get(
'ifmap_info').get('url').split(':')[0]
ifmap_connection_status = bgp_routers_ops_data.get(
'BgpRouterState').get('ifmap_info').get('connection_status')
ifmap_connection_status_change = bgp_routers_ops_data.get(
'BgpRouterState').get('ifmap_info').get('connection_status_change_at')
ifmap_connection_string = [ifmap_ip + ' (' + ifmap_connection_status + ' since ' + time +
')' for time in self.webui_common.get_node_status_string(ifmap_connection_status_change)]
process_state_list = bgp_routers_ops_data.get(
'BgpRouterState').get('process_state_list')
process_down_stop_time_dict = {}
process_up_start_time_dict = {}
exclude_process_list = [
'contrail-config-nodemgr', 'contrail-analytics-nodemgr', 'contrail-control-nodemgr', 'contrail-vrouter-nodemgr',
'openstack-nova-compute', 'contrail-svc-monitor', 'contrail-discovery:0', 'contrail-zookeeper', 'contrail-schema']
for i, item in enumerate(process_state_list):
if item['process_name'] == 'contrail-control':
control_node_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-control-nodemgr':
control_nodemgr_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, process_up_start_time_dict)
if item['process_name'] == 'contrail-dns':
contrail_dns_string = self.webui_common.get_process_status_string(
item, process_down_stop_time_dict, | |
import os
import unittest
from programy.utils.parsing.linenumxml import LineNumberingParser
import xml.etree.ElementTree as ET # pylint: disable=wrong-import-order
from programy.parser.aiml_parser import AIMLParser
from programy.dialog.sentence import Sentence
from programy.parser.pattern.nodes.oneormore import PatternOneOrMoreWildCardNode
from programy.parser.pattern.nodes.root import PatternRootNode
from programy.parser.pattern.nodes.template import PatternTemplateNode
from programy.parser.pattern.nodes.that import PatternThatNode
from programy.parser.pattern.nodes.topic import PatternTopicNode
from programy.parser.pattern.nodes.word import PatternWordNode
from programytest.client import TestClient
from programy.parser.exceptions import ParserException
from programy.parser.exceptions import DuplicateGrammarException
class MockElement(ET.Element):
def __init__(self, tag, attrib={}):
ET.Element.__init__(self, tag, attrib)
self._start_line_number = 0
self._end_line_number = 0
class AIMLParserTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(AIMLParserTestClient, self).load_storage()
self.add_default_stores()
class AIMLParserTests(unittest.TestCase):
def setUp(self):
self._client = AIMLParserTestClient()
self._client_context = self._client.create_client_context("testid")
self.parser = self._client_context.brain.aiml_parser
def test__getstate__(self):
self.assertIsNotNone(self.parser.__getstate__()['_aiml_loader'])
self.assertIsNotNone(self.parser.__getstate__()['_template_parser'])
self.assertIsNotNone(self.parser.__getstate__()['_aiml_loader'])
self.assertIsNotNone(self.parser.__getstate__()['_num_categories'])
self.assertIsNone(self.parser.__getstate__().get('_brain', None))
self.assertIsNone(self.parser.__getstate__().get('_errors', None))
self.assertIsNone(self.parser.__getstate__().get('_duplicates', None))
def test__getstate__with_errors_and_duplicates(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertIsNotNone(self.parser.__getstate__()['_aiml_loader'])
self.assertIsNotNone(self.parser.__getstate__()['_template_parser'])
self.assertIsNotNone(self.parser.__getstate__()['_aiml_loader'])
self.assertIsNotNone(self.parser.__getstate__()['_num_categories'])
self.assertIsNone(self.parser.__getstate__().get('_brain', None))
self.assertIsNone(self.parser.__getstate__().get('_errors', None))
self.assertIsNone(self.parser.__getstate__().get('_duplicates', None))
def test__getstate__without_errors_and_duplicates(self):
if '_errors' in self.parser.__dict__:
del self.parser.__dict__['_errors']
if '_duplicates' in self.parser.__dict__:
del self.parser.__dict__['_duplicates']
self.assertIsNotNone(self.parser.__getstate__()['_aiml_loader'])
self.assertIsNotNone(self.parser.__getstate__()['_template_parser'])
self.assertIsNotNone(self.parser.__getstate__()['_aiml_loader'])
self.assertIsNotNone(self.parser.__getstate__()['_num_categories'])
self.assertIsNone(self.parser.__getstate__().get('_brain', None))
self.assertIsNone(self.parser.__getstate__().get('_errors', None))
self.assertIsNone(self.parser.__getstate__().get('_duplicates', None))
def test_check_aiml_tag(self):
aiml = ET.fromstring( """<?xml version="1.0" encoding="ISO-8859-1"?>
<aiml version="1.01"
xmlns="http://alicebot.org/2001/AIML"
xmlns:aiml="http://alicebot.org/2001/AIML"
xmlns:html="http://www.w3.org/TR/REC-html40">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
tag_name, namespace = AIMLParser.check_aiml_tag(aiml)
self.assertEquals("aiml", tag_name)
self.assertEquals("{http://alicebot.org/2001/AIML}", namespace)
def test_check_aiml_tag_no_aiml(self):
aiml = None
with self.assertRaises(ParserException):
tag_name, namespace = AIMLParser.check_aiml_tag(aiml)
def test_check_aiml_tag_no_namespace(self):
aiml = ET.fromstring( """<?xml version="1.0" encoding="ISO-8859-1"?>
<aiml version="1.01">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
tag_name, namespace = AIMLParser.check_aiml_tag(aiml)
self.assertEquals("aiml", tag_name)
self.assertEquals(None, namespace)
def test_check_aiml_tag_not_aiml(self):
aiml = ET.fromstring( """<?xml version="1.0" encoding="ISO-8859-1"?>
<aipl version="1.01"
xmlns="http://alicebot.org/2001/AIML"
xmlns:aiml="http://alicebot.org/2001/AIML"
xmlns:html="http://www.w3.org/TR/REC-html40">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aipl>
""")
with self.assertRaises(ParserException):
_, _ = AIMLParser.check_aiml_tag(aiml)
def test_parse_from_file_valid(self):
filename = os.path.dirname(__file__)+ '/valid.aiml'
self.parser.parse_from_file(filename)
def test_aiml_with_namespace(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="ISO-8859-1"?>
<aiml version="1.01"
xmlns="http://alicebot.org/2001/AIML"
xmlns:aiml="http://alicebot.org/2001/AIML"
xmlns:html="http://www.w3.org/TR/REC-html40">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEqual(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEqual(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(self._client_context), "RESPONSE")
def test_base_aiml_topic_category_template(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</topic>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEqual(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(self._client_context), "RESPONSE")
def test_base_aiml_topic_category_template_multi_line(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>*</pattern>
<template>
RESPONSE1,
RESPONSE2.
RESPONSE3
</template>
</category>
</topic>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEqual(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(self._client_context), "RESPONSE1, RESPONSE2. RESPONSE3")
def test_base_aiml_category_template(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEqual(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEqual(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(self._client_context), "RESPONSE")
def test_base_aiml_category_template_that(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<that>something</that>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEqual(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEqual(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertEqual(len(that.children), 1)
self.assertIsNotNone(that.children[0])
self.assertIsInstance(that.children[0], PatternWordNode)
self.assertEqual(that.children[0].word, "something")
template = that.children[0].template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(self._client_context), "RESPONSE")
def test_base_aiml_category_template_topic(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<topic>something</topic>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEqual(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "something")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(self._client_context), "RESPONSE")
def test_base_aiml_category_template_topic_that(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<that>something</that>
<topic>other</topic>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEqual(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "other")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertEqual(len(that.children), 1)
self.assertIsNotNone(that.children[0])
self.assertIsInstance(that.children[0], PatternWordNode)
self.assertEqual(that.children[0].word, "something")
template = that.children[0].template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(self._client_context), "RESPONSE")
def test_base_aiml_multiple_categories(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>Hello</pattern>
<template>Hiya</template>
</category>
<category>
<pattern>Goodbye</pattern>
<template>See ya</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertEqual(2, len(self.parser.pattern_parser.root.children))
node = self.parser.pattern_parser.root.children[1]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEqual(node.word, "Hello")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEqual(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
node = self.parser.pattern_parser.root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEqual(node.word, "Goodbye")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEqual(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
def test_base_aiml_multiple_categories_in_a_topic(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>Hello</pattern>
<template>Hiya</template>
</category>
<category>
<pattern>Goodbye</pattern>
<template>See ya</template>
</category>
</topic>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertEqual(2, len(self.parser.pattern_parser.root.children))
node = self.parser.pattern_parser.root.children[1]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEqual(node.word, "Hello")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
node = self.parser.pattern_parser.root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEqual(node.word, "Goodbye")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
def test_base_aiml_multiple_categories_in_and_out_of_topic(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>Welcome</pattern>
<template>Hello there</template>
</category>
<topic name="test">
<category>
<pattern>Hello</pattern>
<template>Hiya</template>
</category>
<category>
<pattern>Goodbye</pattern>
<template>See ya</template>
</category>
</topic>
<category>
<pattern>Interesting</pattern>
<template>Yes</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertEqual(4, len(self.parser.pattern_parser.root.children))
node1 = self.parser.pattern_parser.root.children[0]
self.assertIsNotNone(node1)
self.assertIsInstance(node1, PatternWordNode)
self.assertEqual(node1.word, "Interesting")
topic = node1.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEqual(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
node2 = self.parser.pattern_parser.root.children[1]
self.assertIsNotNone(node2)
self.assertIsInstance(node2, PatternWordNode)
self.assertEqual(node2.word, "Goodbye")
topic = node2.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
node3 = self.parser.pattern_parser.root.children[2]
self.assertIsNotNone(node3)
self.assertIsInstance(node3, PatternWordNode)
self.assertEqual(node3.word, "Hello")
topic = node3.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
node4 = self.parser.pattern_parser.root.children[3]
self.assertIsNotNone(node4)
self.assertIsInstance(node4, PatternWordNode)
self.assertEqual(node4.word, "Welcome")
topic = node4.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEqual(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEqual(that.star.wildcard, "*")
def test_match_sentence(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>HELLO</pattern>
<template>Hiya</template>
</category>
</aiml>
""")
self.parser.pattern_parser.dump()
context = self.parser.match_sentence(self._client_context, Sentence(self._client_context, "HELLO"), "*", "*")
self.assertIsNotNone(context)
self.assertEqual("Hiya", context.template_node.template.resolve(self._client_context))
def test_inline_br_html(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>HELLO</pattern>
<template>Hello <br/> World</template>
</category>
</aiml>
""")
def test_inline_bold_html(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>HELLO</pattern>
<template>Hello <bold>You</bold> World</template>
</category>
</aiml>
""")
def test_iset(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>Hello</pattern>
<template>Hi There</template>
</category>
<category>
<pattern># <iset>who, what</iset> are you</pattern>
<template>OK thanks</template>
</category>
<category>
<pattern># <iset>who, what</iset> is he</pattern>
<template>OK thanks</template>
</category>
</aiml>
""")
def test_duplicate_categories_in_topic(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertEquals(0, len(self.parser._duplicates))
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="Topic1">
<category>
<pattern>HELLO</pattern>
<template>Hi There</template>
</category>
<category>
<pattern>HELLO</pattern>
<template>Hi There</template>
</category>
</topic>
</aiml>
""")
self.assertEquals(1, len(self.parser._duplicates))
def test_duplicate_topic(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertEquals(0, len(self.parser._errors))
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic name="TOPIC1" />
<topic name="TOPIC1" />
<pattern>*</pattern>
<template>
Test Text
</template>
</category>
</aiml>
""")
self.assertEquals(1, len(self.parser._errors))
def test_duplicate_that(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertEquals(0, len(self.parser._errors))
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic name="TOPIC1" />
<that name="THAT1" />
<that name="THAT1" />
<pattern>*</pattern>
<template>
Test Text
</template>
</category>
</aiml>
""")
self.assertEquals(1, len(self.parser._errors))
def test_duplicate_pattern(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertEquals(0, len(self.parser._errors))
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic name="TOPIC1" />
<that name="THAT1" />
<pattern>*</pattern>
<pattern>*</pattern>
<template>
Test Text
</template>
</category>
</aiml>
""")
self.assertEquals(1, len(self.parser._errors))
def test_no_pattern(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertEquals(0, len(self.parser._errors))
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic name="TOPIC1" />
<that name="THAT1" />
<template>
Test Text
</template>
</category>
</aiml>
""")
self.assertEquals(1, len(self.parser._errors))
def test_duplicate_template(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertEquals(0, len(self.parser._errors))
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic name="TOPIC1" />
<that name="THAT1" />
<pattern>*</pattern>
<template>
Test Text
</template>
<template>
Test Text
</template>
</category>
</aiml>
""")
self.assertEquals(1, len(self.parser._errors))
def test_no_template(self):
self.parser._errors = []
self.parser._duplicates = []
self.assertEquals(0, len(self.parser._errors))
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
| |
<reponame>SGrosse-Holz/tracklib<filename>tracklib/analysis/neda/models.py
"""
The inference models, and the interface they have to conform to.
"""
import abc
import functools
import numpy as np
import scipy.optimize
from tracklib import Trajectory
from tracklib.models import rouse
from .util import Loopingtrace
class Model(metaclass=abc.ABCMeta):
"""
Abstract base class for inference models
The most important capability of any model is the likelihood function
`logL` for a combination of `Loopingtrace` and `Trajectory`. Furthermore, a
model should provide an initial guess for a good `Loopingtrace`.
The method `trajectory_from_loopingtrace` is considered an optional part of
the interface, since it is not important to the inference, but might come
in handy when working with a `Model`. So it is recommended but not
required.
"""
@abc.abstractmethod
def initial_loopingtrace(self, traj):
"""
Give a quick guess for a good `Loopingtrace` for a `Trajectory`.
Parameters
----------
traj : Trajectory
Returns
-------
Loopingtrace
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def logL(self, loopingtrace, traj):
"""
Calculate (log-)likelihood for (`Loopingtrace`, `Trajectory`) pair.
Parameters
----------
loopingtrace : Loopingtrace
traj : Trajectory
Returns
-------
float
log-likelihood associated with the inputs
"""
raise NotImplementedError # pragma: no cover
def trajectory_from_loopingtrace(self, loopingtrace, localization_error=0.1, d=3):
"""
Generate a `Trajectory` from this `Model` and the given `Loopingtrace`
Parameters
----------
loopingtrace : Loopingtrace
localization_error : float, optional
how much Gaussian noise to add to the trajectory.
d : int, optional
number of spatial dimensions
Returns
-------
Trajectory
"""
raise NotImplementedError # pragma: no cover
class RouseModel(Model):
"""
Inference with Rouse models
This inference model uses a given number of `rouse.Model` instances to
choose from for each propagation interval. In the default use case this
switches between a looped and unlooped model, but it could be way more
general than that, e.g. incorporating different looped states, loop
positions, numbers of loop, etc.
Parameters
----------
N, D, k : float
parameters for the `rouse.Model`. `!N` is the number of monomers, `!D`
the diffusion constant of a free monomer, `k` the backbone strength.
looppositions : list of 2-tuples of int, optional
list of positions of the extra bond. For each entry, a new
`rouse.Model` instance will be set up. Remember to include an unlooped
model (if wanted) by including a position like ``(0, 0)``.
k_extra : float, optional
the strength of the extra bond. By default equal to `k`
measurement : "end2end" or (N,) np.ndarray
which distance to measure. The default setting "end2end" is equivalent
to specifying a vector ``np.array([-1, 0, ..., 0, 1])``, i.e. measuring
the distance from the first to the last monomer.
localization_error : float or np.array, optional
a global value for the localization error. By default, we use the value
stored in ``traj.meta['localization_error']``, which allows
trajectory-wise specification of error. But for example for fitting it
might be useful to have one global setting for localization error, at
which point it becomes part of the model.
Attributes
----------
models : list of `rouse.Model`
the models used for inference
localization_error : float, array, or None
if ``None``, use ``traj.meta['localization_error']`` for each
trajectory ``traj``. If float, assume that value for each dimension of
the trajectory. If array, should contain values for each dimension
separately, i.e. ``np.array([Δx, Δy, Δz])``.
Notes
-----
By default, this model assumes that the difference between the models is
the position of the extra bond. It is easy to generalize this, by editing
the `models` attribute after initialization. The only thing to pay
attention to is that each model needs to have a `!measurement` vector.
The `initial_loopingtrace` for this `Model` is the MLE assuming time scale
separation. I.e. we calculate the timepoint-wise MLE using the exact steady
state distributions of each model.
See also
--------
Model, rouse.Model
"""
def __init__(self, N, D, k,
k_extra=None,
looppositions=((0, 0), (0, -1)), # no mutable default elements! (i.e. tuple instead of list)
measurement="end2end",
localization_error=None,
):
self.localization_error = localization_error
if k_extra is None:
k_extra = k
if str(measurement) == "end2end":
measurement = np.zeros((N,))
measurement[0] = -1
measurement[-1] = 1
self.models = []
for loop in looppositions:
mod = rouse.Model(N, D, k, k_extra, extrabond=loop)
mod.measurement = measurement
self.models.append(mod)
def initial_loopingtrace(self, traj):
# We give the MLE assuming time scale separation
# This is exactly the same procedure as for FactorizedModel, where we
# utilize the steady state distributions of the individual Rouse
# models.
loopingtrace = Loopingtrace.forTrajectory(traj, len(self.models))
distances = traj.abs()[loopingtrace.t][:, 0]
ss_variances = np.array([mod.measurement @ mod.steady_state(True)[1] @ mod.measurement \
for mod in self.models])
ss_likelihoods = -0.5*(distances[:, np.newaxis]**2 / ss_variances[np.newaxis, :] \
+ traj.d*np.log(2*np.pi*ss_variances)[np.newaxis, :])
loopingtrace.state = np.argmax(ss_likelihoods, axis=1)
return loopingtrace
def logL(self, loopingtrace, traj):
if traj.N == 2: # pragma: no cover
traj = traj.relative()
if self.localization_error is not None:
if np.isscalar(self.localization_error):
localization_error = d*[self.localization_error]
else:
localization_error = self.localization_error
else:
localization_error = traj.meta['localization_error']
localization_error = np.asarray(localization_error)
assert localization_error.shape == (traj.d,)
# if not hasattr(loopingtrace, 'individual_logLs'): # interferes with model fitting
looptrace = loopingtrace.full_valid()
logLs = [rouse.multistate_likelihood(traj[:][:, i],
self.models,
looptrace,
localization_error[i],
return_individual_likelihoods=True,
)[1] \
for i in range(traj.d)]
loopingtrace.individual_logLs = np.sum(logLs, axis=0)[loopingtrace.t]
return np.nansum(loopingtrace.individual_logLs)
def trajectory_from_loopingtrace(self, loopingtrace, localization_error=None, d=3):
if localization_error is None:
if self.localization_error is None:
raise ValueError("Need to specify either localization_error or model.localization_error")
else:
localization_error = self.localization_error
if np.isscalar(localization_error):
localization_error = d*[localization_error]
localization_error = np.asarray(localization_error)
if localization_error.shape != (d,):
raise ValueError("Did not understand localization_error")
arr = np.empty((loopingtrace.T, d))
arr[:] = np.nan
cur_mod = self.models[loopingtrace[0]]
conf = cur_mod.conf_ss(True, d)
arr[loopingtrace.t[0], :] = cur_mod.measurement @ conf
for i in range(1, len(loopingtrace)):
cur_mod = self.models[loopingtrace[i]]
conf = cur_mod.evolve(conf, True, loopingtrace.t[i]-loopingtrace.t[i-1])
arr[loopingtrace.t[i], :] = cur_mod.measurement @ conf
return Trajectory.fromArray(arr + localization_error[np.newaxis, :]*np.random.normal(size=arr.shape),
localization_error=localization_error,
loopingtrace=loopingtrace,
)
class FactorizedModel(Model):
"""
A simplified model, assuming time scale separation
This model assumes that each point is sampled from one of a given list of
distributions, where there is no correlation between the choice of
distribution for each point. It runs significantly faster than the full
`RouseModel`, but is of course inaccurate if the Rouse time is longer or
comparable to the frame rate of the recorded trajectories.
Parameters
----------
distributions : list of distribution objects
these will usually be ``scipy.stats.rv_continuous`` objects (e.g.
Maxwell), but can be pretty arbitrary. The only function they have to
provide is ``logpdf()``, which should take a scalar or vector of
distance values and return a corresponding number of outputs. If you
plan on using `trajectory_from_loopingtrace`, the distributions should
also have an ``rvs()`` method for sampling.
Attributes
----------
distributions : list of distribution objects
Notes
-----
This being a heuristical model, we assume that the localization error is
already incorporated in the `!distributions`, as would be the case if they
come from experimental data. Therefore, this class ignores the
``meta['localization_error']`` field of `Trajectory`.
Instances of this class memoize trajectories they have seen before. To
reset the memoization, you can either reinstantiate or clear the cache
manually:
>>> model = FactorizedModel(model.distributions)
... model.clear_memo()
If using ``scipy.stats.maxwell``, make sure to use it correctly, i.e. you
have to specify ``scale=...``. Writing ``scipy.stats.maxwell(5)`` instead
of ``scipy.stats.maxwell(scale=5)`` shifts the distribution instead of
scaling it and leads to ``-inf`` values in the likelihood, which then screw
up the MCMC. The classic error to get for this is ``invalid value
encountered in double_scalars``. This is caused by ``new_logL - cur_logL``
reading ``- inf + inf`` at the first MCMC iteration, if `logL` returns
``-inf``.
"""
def __init__(self, distributions):
self.distributions = distributions
self._known_trajs = dict()
def _memo(self, traj):
"""
(internal) memoize `traj`
"""
if not traj in self._known_trajs:
with np.errstate(divide='ignore'): # nans in the trajectory raise 'divide by zero in log'
logL_table = np.array([dist.logpdf(traj.abs()[:][:, 0])
for dist in self.distributions
])
self._known_trajs[traj] = {'logL_table' : logL_table}
def clear_memo(self):
"""
Clear the memoization cache
"""
self._known_trajs = dict()
def initial_loopingtrace(self, traj):
self._memo(traj)
loopingtrace = Loopingtrace.forTrajectory(traj, len(self.distributions))
loopingtrace.state = np.argmax(self._known_trajs[traj]['logL_table'][:, loopingtrace.t], axis=0)
return loopingtrace
def logL(self, loopingtrace, traj):
self._memo(traj)
return np.sum(self._known_trajs[traj]['logL_table'][loopingtrace.state, loopingtrace.t])
def trajectory_from_loopingtrace(self, loopingtrace, localization_error=0., d=3):
# Note that the distributions in the model give us only the length, not
# the orientation. So we also have to sample unit vectors
| |
find_name_components(_compartment)[1:]
compartments_strata.reverse()
compartments_strata.append("")
# loop through each stratification of the parameter and adapt if the parameter is available
for stratum in compartments_strata:
if stratum in self.available_death_rates:
all_sub_parameters.append("universal_death_rateX" + stratum)
if "universal_death_rateX" + stratum in self.overwrite_parameters:
break
all_sub_parameters.reverse()
return all_sub_parameters
def create_mortality_functions(self, _compartment, _sub_parameters):
"""
loop through all the components to the population-wide mortality and create the recursive functions
:param _compartment: str
name of the compartment of interest
:param _sub_parameters: list
the names of the functions that need to update the upstream parameters
:return:
"""
self.final_parameter_functions[
"universal_death_rateX" + _compartment
] = self.adaptation_functions[_sub_parameters[0]]
for component in _sub_parameters[1:]:
# get the new function to act on the less stratified function (closer to the "tree-trunk")
if component not in self.parameters:
raise ValueError(
"parameter component %s not found in parameters attribute" % component
)
elif type(self.parameters[component]) == float:
self.adaptation_functions[component] = create_multiplicative_function(
self.parameters[component]
)
elif type(self.parameters[component]) == str:
self.adaptation_functions[component] = create_time_variant_multiplicative_function(
self.adaptation_functions[component]
)
else:
raise ValueError("parameter component %s not appropriate format" % component)
# create the composite function
self.final_parameter_functions[
"universal_death_rateX" + _compartment
] = create_function_of_function(
self.adaptation_functions[component],
self.final_parameter_functions["universal_death_rateX" + _compartment],
)
def find_transition_components(self, _parameter):
"""
finds each of the strings for the functions acting on the next function in the sequence
:param _parameter: str
full name of the parameter of interest
"""
sub_parameters = []
# work backwards to allow stopping for overwriting requests, then reverse in preparation for function creation
for x_instance in extract_reversed_x_positions(_parameter):
component = _parameter[:x_instance]
sub_parameters.append(component)
if component in self.overwrite_parameters:
break
sub_parameters.reverse()
return sub_parameters
def create_transition_functions(self, _parameter, _sub_parameters):
"""
builds up each parameter to be implemented as a function, recursively creating an outer function that calls the
inner function
:param _parameter: str
full name of the parameter of interest
:param _sub_parameters: list
list of the strings representing the sub-parameters, including the base parameter as the stem and with all
of the relevant strata in the stratification sequence following
"""
# start from base value as a function of time, even if the time argument is ignored
if isinstance(self.parameters[_sub_parameters[0]], (float, int)):
self.final_parameter_functions[_parameter] = lambda time: self.parameters[
_sub_parameters[0]
]
elif type(self.parameters[_sub_parameters[0]]) == str:
self.final_parameter_functions[_parameter] = self.adaptation_functions[
_sub_parameters[0]
]
# then cycle through other applicable components and extend function recursively, only if component available
for component in _sub_parameters[1:]:
# get the new function to act on the less stratified function (closer to the "tree-trunk")
if component not in self.parameters:
raise ValueError(
"parameter component %s not found in parameters attribute" % component
)
elif isinstance(self.parameters[component], float) or isinstance(
self.parameters[component], int
):
self.adaptation_functions[component] = create_multiplicative_function(
self.parameters[component]
)
elif type(self.parameters[component]) == str:
self.adaptation_functions[component] = create_time_variant_multiplicative_function(
self.time_variants[self.parameters[component]]
)
else:
raise ValueError("parameter component %s not appropriate format" % component)
# create the composite function
self.final_parameter_functions[_parameter] = create_function_of_function(
self.adaptation_functions[component], self.final_parameter_functions[_parameter],
)
def prepare_infectiousness_calculations(self):
"""
master method to run all the code concerned with preparation for force of infection calculations
"""
# infectiousness preparations
self.prepare_all_infectiousness_multipliers()
self.find_infectious_indices()
# mixing preparations
if self.mixing_matrix is not None:
self.add_force_indices_to_transitions()
self.find_mixing_denominators()
# reconciling the strains and the mixing attributes together into one structure
self.find_strain_mixing_multipliers()
def prepare_all_infectiousness_multipliers(self):
"""
find the infectiousness multipliers for each compartment being implemented in the model
"""
# start from assumption that each compartment is fully and equally infectious
self.infectiousness_multipliers = [1.0] * len(self.compartment_names)
# if infectiousness modification requested for the compartment type, multiply through by the current value
for n_comp, compartment in enumerate(self.compartment_names):
for modifier in self.infectiousness_levels:
if modifier in find_name_components(compartment):
self.infectiousness_multipliers[n_comp] *= self.infectiousness_levels[modifier]
self.make_further_infectiousness_adjustments()
def make_further_infectiousness_adjustments(self):
"""
Work through specific requests for specific adjustments, to escape the requirement to only adjust compartment
infectiousness according to stratification process - with all infectious compartments having the same
adjustment.
"""
for i_adjustment in range(len(self.individual_infectiousness_adjustments)):
for i_comp, comp in enumerate(self.compartment_names):
if all(
[
component in find_name_components(comp)
for component in self.individual_infectiousness_adjustments[i_adjustment][0]
]
):
self.infectiousness_multipliers[
i_comp
] = self.individual_infectiousness_adjustments[i_adjustment][1]
def find_infectious_indices(self):
"""
find the infectious indices by strain and overall, as opposed to just overall in EpiModel
note that this changes the structure by one hierarchical level compared to EpiModel - in that previously we had
self.infectious_indices a list of infectious indices and now it is has a dictionary structure at the highest
level, followed by keys for each strain with values being lists that are equivalent to the
self.infectious_indices list for the unstratified version
"""
# find the indices for the compartments that are infectious across all strains
self.infectious_indices["all_strains"] = self.find_all_infectious_indices()
# then find the infectious compartment for each strain separately
for strain in self.strains:
self.infectious_indices[strain] = convert_boolean_list_to_indices(
[
create_stratum_name("strain", strain, joining_string="")
in find_name_components(comp)
and i_comp in self.infectious_indices["all_strains"]
for i_comp, comp in enumerate(self.compartment_names)
]
)
def add_force_indices_to_transitions(self):
"""
find the indices from the force of infection vector to be applied for each infection flow and populate to the
force_index column of the flows frame
"""
# identify the indices of all the infection-related flows to be implemented
infection_flow_indices = [
n_flow
for n_flow, flow in enumerate(self.transition_flows.type)
if "infection" in flow
and self.transition_flows.implement[n_flow] == len(self.all_stratifications)
]
# loop through and find the index of the mixing matrix applicable to the flow, of which there should be only one
for n_flow in infection_flow_indices:
found = False
for i_group, force_group in enumerate(self.mixing_categories):
if all(
stratum in find_name_components(self.transition_flows.origin[n_flow])
for stratum in find_name_components(force_group)
):
self.transition_flows.force_index[n_flow] = i_group
if found:
raise ValueError(
"mixing group found twice for transition flow number %s" % n_flow
)
found = True
continue
if not found:
raise ValueError("mixing group not found for transition flow number %s" % n_flow)
def find_mixing_denominators(self):
"""
for each mixing category, create a list of the compartment numbers that are relevant
:return mixing_indices: list
indices of the compartments that are applicable to a particular mixing category
"""
if self.mixing_matrix is None:
self.mixing_indices = {"all_population": range(len(self.compartment_names))}
else:
for category in self.mixing_categories:
self.mixing_indices[category] = [
i_comp
for i_comp, compartment in enumerate(self.compartment_names)
if all(
[
component in find_name_components(compartment)
for component in find_name_components(category)
]
)
]
self.mixing_indices_arr = np.array(list(self.mixing_indices.values()))
def find_strain_mixing_multipliers(self):
"""
find the relevant indices to be used to calculate the force of infection contribution to each strain from each
mixing category as a list of indices - and separately find multipliers as a list of the same length for
their relative infectiousness extracted from self.infectiousness_multipliers
"""
for strain in self.strains + ["all_strains"]:
(self.strain_mixing_elements[strain], self.strain_mixing_multipliers[strain],) = (
{},
{},
)
for category in (
["all_population"] if self.mixing_matrix is None else self.mixing_categories
):
self.strain_mixing_elements[strain][category] = numpy.array(
[
index
for index in self.mixing_indices[category]
if index in self.infectious_indices[strain]
]
)
self.strain_mixing_multipliers[strain][category] = numpy.array(
[
self.infectiousness_multipliers[i_comp]
for i_comp in self.strain_mixing_elements[strain][category]
]
)
def find_transition_indices_to_implement(
self, back_one: int = 0, include_change: bool = False
) -> List[int]:
"""
Finds all the indices of the transition flows that need to be stratified,
Overrides the version in the unstratified EpiModel
:parameters:
back_one: int
number to subtract from self.all_stratification, which will be one if this method is being called after the
stratification has been added
include_change: bool
whether to include the strata_change transition flows
:return: list
list of indices of the flows that need to be stratified
"""
return [
idx
for idx, flow in self.transition_flows.iterrows()
if (flow.type != Flow.STRATA_CHANGE or include_change)
and flow.implement == len(self.all_stratifications) - back_one
]
def find_change_indices_to_implement(self, back_one=0):
"""
find the indices of the equilibration flows to be applied in the transitions data frame
:parameters:
back_one: int
see find_transition_indices_to_implement
"""
return [
idx
for idx, flow in self.transition_flows.iterrows()
if flow.type == Flow.STRATA_CHANGE
and flow.implement == len(self.all_stratifications) - back_one
]
def find_death_indices_to_implement(self, back_one=0):
"""
find all the indices of the death flows that need to be stratified
separated out as very short method in order that it can over-ride the version in the unstratified EpiModel
:param back_one: int
number to subtract from self.all_stratification, which will be one if this method is being called after the
stratification has been added
:return: list
list of indices of the flows that need to be stratified
"""
return self.death_flows[
self.death_flows.implement == len(self.all_stratifications) - back_one
].index
"""
methods to | |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/statestream
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import copy
import os
import sys
from statestream.meta.neuron_pool import np_state_shape
from statestream.meta.losses import all_losses, has_target
from statestream.utils.yaml_wrapper import load_yaml
# String lookup for short to long conversion.
def S2L(t):
if t.startswith("np"):
return "neuron_pools"
elif t.startswith("sp"):
return "synapse_pools"
elif t.startswith("plast"):
return "plasticities"
elif t.startswith("if"):
return "interfaces"
return None
# =============================================================================
# =============================================================================
# =============================================================================
def cm2fd(meta):
"""Function to convert ruamel CommentedMap to dictionary respecting tuples.
"""
if isinstance(meta, dict):
fd = {}
for k, v in meta.items():
if k[0] == "(" and k[-1] == ")":
fd[eval(k)] = cm2fd(v)
else:
fd[k] = cm2fd(v)
return fd
else:
if isinstance(meta, str):
if meta[0] == "(" and meta[-1] == ")":
return eval(meta)
else:
return meta
else:
return meta
# =============================================================================
# =============================================================================
# =============================================================================
def get_item_type(net, item_id):
"""Function to determine the item type for an item.
Note: For meta-variables, which in shared-memory are stored
besides items, this will return None.
Parameter
---------
net : dictionary
The network dictionary.
item_id : string
The item name / identifier.
Return
------
item_type : str
The type of the item (np, sp, plast or if).
"""
item_type = None
if item_id in net["neuron_pools"]:
item_type = "np"
elif item_id in net["synapse_pools"]:
item_type = "sp"
elif item_id in net["plasticities"]:
item_type = "plast"
elif item_id in net["interfaces"]:
item_type = "if"
return item_type
# =============================================================================
# =============================================================================
# =============================================================================
def network_rollback(net, depth, nps, sps):
"""Function to rollback an initialized network rollout.
"""
# This for loop: build sps[d] and nps[d-1].
for d in range(depth):
D = depth - d
# Loop over all sps.
for s,S in net["synapse_pools"].items():
# If sp targets np in current depth D ...
if S["target"] in nps[D]:
# ... add sp.
sps[D].append(s)
# Add all source nps.
sources = [item for sub_list in S["source"] for item in sub_list]
for src in sources:
if src not in nps[D - 1]:
nps[D - 1].append(src)
# Now prune nps (and target sps) which have no valid inputs.
for d in range(depth - 1):
del_nps = []
del_sps = []
for n in nps[d + 1]:
# Count input sps for this neuron_pool.
np_ins = False
for s in sps[d + 1]:
# Check if np is target of sp.
if net["synapse_pools"][s]["target"] == n:
np_ins = True
break
# If np has no inputs, then memorize it and all its target sps for deletion.
if not np_ins:
# TODO: This is actually a relevant warning in some cases, but is distracting in general.
# Best introduce a general verbosity level: debug, warning, error
# print("\n Warning: Pruned np " + str(n) + " during rollout.")
del_nps.append(n)
for s in sps[d + 2]:
S = net["synapse_pools"][s]
# Get all sp sources.
srcs = [src for sub_list in S["source"] for src in sub_list]
# If n in sources, memorize sp for deletion.
if n in srcs:
del_sps.append(s)
# Remove memorized nps / sps for this depth.
for n in del_nps:
if n in nps[d + 1]:
# Remove all n.
nps[d + 1] \
= [new_n for new_n in nps[d + 1] if new_n != n]
for s in del_sps:
if s in sps[d + 2]:
# Remove all s.
sps[d + 2] \
= [new_s for new_s in sps[d + 2] if new_s != s]
# =============================================================================
# =============================================================================
# =============================================================================
def is_sane_module_spec(net):
"""Check if specifications of modules in net is sane.
"""
is_sane = True
if "modules" in net:
if not isinstance(net["modules"], dict):
print("\nError: Modules specification in st_graph must be a dict.")
is_sane = False
else:
for m,M in net["modules"].items():
if not isinstance(M, dict):
print("\nError: Specification of a module must be a dict: " \
+ str(m))
is_sane = False
else:
if "neuron_pools" in M:
pass
if "synapse_pools" in M:
pass
if "plasticities" in M:
pass
if "interfaces" in M:
pass
if "core_clients" in M:
pass
return is_sane
# =============================================================================
# =============================================================================
# =============================================================================
def st_graph_import(inet):
"""Recursively import other .st_graph files for inet.
Note, that we do not (want to) check for import loops.
Parameters
----------
k inet :: Network dictionary to be augmented with its imports.
Returns
-------
onet :: the network inet with its added imports.
"""
onet = copy.deepcopy(inet)
if "import" in inet:
example_path = os.path.dirname(os.path.abspath(__file__)) \
+ "/../../examples/"
for imp in inet["import"]:
# Create file name.
st_graph_file = example_path + str(imp[0]) + ".st_graph"
# Look if external st_graph file exists and load it as meta network.
if not os.path.isfile(st_graph_file):
print("\nError: Unable to import " + str(imp[0]) \
+ ". File not found: " + st_graph_file)
with open(st_graph_file) as f:
# loc_mn = MetaNetwork(load_yaml(f))
# loc_net = loc_mn.net
loc_net = load_yaml(f)
# Add source to source list.
if st_graph_file not in onet["__source_root__"]:
onet["__source_root__"].append(st_graph_file)
loc_net["__source_root__"] = copy.deepcopy(onet["__source_root__"])
# Before importing loc_net into onet, import all of log_net imports to itself.
# This is a recursion and it will fail for loopy imports.
loc_net = st_graph_import(loc_net)
E = imp[1]
if E in ["neuron_pools", "synapse_pools", "plasticities", "interfaces"]:
# Import all items of a type.
if E in loc_net:
# Add type if not yet present.
if E not in onet:
onet[E] = {}
# Add all entries.
for i,I in loc_net[E].items():
# Add empty item if not exist already.
if i not in onet[E]:
onet[E][i] = {}
# Add item parameters if not exist already.
for p,P in I.items():
if p not in onet[E][i]:
onet[E][i][p] = copy.copy(P)
elif E in ["modules", "core_clients"]:
# Import all modules from source.
if E in loc_net:
# Add type if not yet present.
if E not in onet:
onet[E] = {}
# Add all entries.
for m,M in loc_net[E].items():
# Add empty item if not exist already.
if m not in onet[E]:
onet[E][m] = copy.copy(M)
elif E.startswith("modules.") or E.startswith("core_clients."):
# Import a single module or core client definition.
mods = E.split(".")
if mods[0] in loc_net:
if mods[1] in loc_net[mods[0]]:
if mods[0] not in onet:
onet[mods[0]] = {}
if mods[1] not in onet[mods[0]]:
onet[mods[0]][mods[1]] \
= copy.copy(loc_net[mods[0]][mods[1]])
elif E == "tag_specs":
# Import all modules from source.
if E in loc_net:
# Add type if not yet present.
if E not in onet:
onet[E] = {}
# Add all entries.
for m,M in loc_net[E].items():
# Add empty item if not exist already.
if m not in onet[E]:
onet[E][m] = copy.copy(M)
elif E.startswith("tag_specs."):
# Import a single module definition.
mods = E.split(".")
if "tag_specs" in loc_net:
if mods[1] in loc_net["tag_specs"]:
if not "tag_specs" in onet:
onet["tag_specs"] = {}
if mods[1] not in onet["tag_specs"]:
onet["tag_specs"][mods[1]] \
= copy.copy(loc_net["tag_specs"][mods[1]])
elif E.startswith("neuron_pools.") \
or E.startswith("synapse_pools.") \
or E.startswith("plasticities.") \
or E.startswith("interfaces."):
# Import all items of type.tag
mods = E.split(".")
for i,I in loc_net[mods[0]].items():
if "tags" in I:
if mods[1] in I["tags"]:
if not i in onet[mods[0]]:
onet[mods[0]][i] = {}
# Add item parameters if not exist already.
for p,P in I.items():
if p not in onet[mods[0]][i]:
onet[mods[0]][i][p] = copy.copy(P)
elif E.startswith("globals"):
# Import all or specified global variable(s).
if "globals" in loc_net:
if E == "globals":
# Import all global variables.
if not "globals" in onet:
onet["globals"] = {}
for i,I in loc_net["globals"].items():
onet["globals"][i] = copy.copy(I)
elif E.startswith("globals."):
# Import specific global variable.
if not "globals" in onet:
onet["globals"] = {}
mods = E.split(".")
if mods[1] in loc_net["globals"]:
onet["globals"][mods[1]] = copy.copy(loc_net["globals"][mods[1]])
else:
# Assume that a tag is given and import all items with this tag.
for t in ["neuron_pools", "synapse_pools", "plasticities", "interfaces"]:
for i,I in loc_net[t].items():
| |
open(filename, 'wt') as file:
file.write(self.graphviz_string(**options))
### Spectrum
def spectrum(self, laplacian=False):
r"""
Return a list of the eigenvalues of the adjacency matrix.
INPUT:
- ``laplacian`` -- boolean (default: ``False``); if ``True``, use the
Laplacian matrix (see :meth:`kirchhoff_matrix`)
OUTPUT:
A list of the eigenvalues, including multiplicities, sorted with the
largest eigenvalue first.
.. SEEALSO::
The method :meth:`spectral_radius` returns floating point
approximation of the maximum eigenvalue.
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.spectrum()
[3, 1, 1, 1, 1, 1, -2, -2, -2, -2]
sage: P.spectrum(laplacian=True)
[5, 5, 5, 5, 2, 2, 2, 2, 2, 0]
sage: D = P.to_directed()
sage: D.delete_edge(7, 9)
sage: D.spectrum()
[2.9032119259..., 1, 1, 1, 1, 0.8060634335..., -1.7092753594..., -2, -2, -2]
::
sage: C = graphs.CycleGraph(8)
sage: C.spectrum()
[2, 1.4142135623..., 1.4142135623..., 0, 0, -1.4142135623..., -1.4142135623..., -2]
A digraph may have complex eigenvalues. Previously, the complex parts of
graph eigenvalues were being dropped. For a 3-cycle, we have::
sage: T = DiGraph({0: [1], 1: [2], 2: [0]})
sage: T.spectrum()
[1, -0.5000000000... + 0.8660254037...*I, -0.5000000000... - 0.8660254037...*I]
TESTS:
The Laplacian matrix of a graph is the negative of the adjacency matrix
with the degree of each vertex on the diagonal. So for a regular graph,
if `\delta` is an eigenvalue of a regular graph of degree `r`, then
`r-\delta` will be an eigenvalue of the Laplacian. The
Hoffman-Singleton graph is regular of degree 7, so the following will
test both the Laplacian construction and the computation of
eigenvalues. ::
sage: H = graphs.HoffmanSingletonGraph()
sage: evals = H.spectrum()
sage: lap = [7 - x for x in evals]
sage: lap.sort(reverse=True)
sage: lap == H.spectrum(laplacian=True)
True
"""
# Ideally the spectrum should return something like a Factorization object
# containing each eigenvalue once, along with its multiplicity.
# This function, returning a list. could then just be renamed "eigenvalues"
vertices = list(self)
if laplacian:
M = self.kirchhoff_matrix(vertices=vertices)
else:
M = self.adjacency_matrix(vertices=vertices)
evals = M.eigenvalues()
evals.sort(reverse=True)
return evals
def characteristic_polynomial(self, var='x', laplacian=False):
r"""
Return the characteristic polynomial of the adjacency matrix of the
(di)graph.
Let `G` be a (simple) graph with adjacency matrix `A`. Let `I` be the
identity matrix of dimensions the same as `A`. The characteristic
polynomial of `G` is defined as the determinant `\det(xI - A)`.
.. NOTE::
``characteristic_polynomial`` and ``charpoly`` are aliases and thus
provide exactly the same method.
INPUT:
- ``x`` -- (default: ``'x'``); the variable of the characteristic
polynomial
- ``laplacian`` -- boolean (default: ``False``); if ``True``, use the
Laplacian matrix
.. SEEALSO::
- :meth:`kirchhoff_matrix`
- :meth:`laplacian_matrix`
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.characteristic_polynomial()
x^10 - 15*x^8 + 75*x^6 - 24*x^5 - 165*x^4 + 120*x^3 + 120*x^2 - 160*x + 48
sage: P.charpoly()
x^10 - 15*x^8 + 75*x^6 - 24*x^5 - 165*x^4 + 120*x^3 + 120*x^2 - 160*x + 48
sage: P.characteristic_polynomial(laplacian=True)
x^10 - 30*x^9 + 390*x^8 - 2880*x^7 + 13305*x^6 -
39882*x^5 + 77640*x^4 - 94800*x^3 + 66000*x^2 - 20000*x
"""
if laplacian:
return self.kirchhoff_matrix(vertices=list(self)).charpoly(var=var)
else:
return self.adjacency_matrix(vertices=list(self)).charpoly(var=var)
# alias, consistent with linear algebra code
charpoly = characteristic_polynomial
def eigenvectors(self, laplacian=False):
r"""
Return the *right* eigenvectors of the adjacency matrix of the graph.
INPUT:
- ``laplacian`` -- boolean (default: ``False``); if ``True``, use the
Laplacian matrix (see :meth:`kirchhoff_matrix`)
OUTPUT:
A list of triples. Each triple begins with an eigenvalue of the
adjacency matrix of the graph. This is followed by a list of
eigenvectors for the eigenvalue, when the eigenvectors are placed on the
right side of the matrix. Together, the eigenvectors form a basis for
the eigenspace. The triple concludes with the algebraic multiplicity of
the eigenvalue.
For some graphs, the exact eigenspaces provided by :meth:`eigenspaces`
provide additional insight into the structure of the eigenspaces.
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.eigenvectors()
[(3, [
(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
], 1), (-2, [
(1, 0, 0, 0, -1, -1, -1, 0, 1, 1),
(0, 1, 0, 0, -1, 0, -2, -1, 1, 2),
(0, 0, 1, 0, -1, 1, -1, -2, 0, 2),
(0, 0, 0, 1, -1, 1, 0, -1, -1, 1)
], 4), (1, [
(1, 0, 0, 0, 0, 1, -1, 0, 0, -1),
(0, 1, 0, 0, 0, -1, 1, -1, 0, 0),
(0, 0, 1, 0, 0, 0, -1, 1, -1, 0),
(0, 0, 0, 1, 0, 0, 0, -1, 1, -1),
(0, 0, 0, 0, 1, -1, 0, 0, -1, 1)
], 5)]
Eigenspaces for the Laplacian should be identical since the Petersen
graph is regular. However, since the output also contains the
eigenvalues, the two outputs are slightly different::
sage: P.eigenvectors(laplacian=True)
[(0, [
(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
], 1), (5, [
(1, 0, 0, 0, -1, -1, -1, 0, 1, 1),
(0, 1, 0, 0, -1, 0, -2, -1, 1, 2),
(0, 0, 1, 0, -1, 1, -1, -2, 0, 2),
(0, 0, 0, 1, -1, 1, 0, -1, -1, 1)
], 4), (2, [
(1, 0, 0, 0, 0, 1, -1, 0, 0, -1),
(0, 1, 0, 0, 0, -1, 1, -1, 0, 0),
(0, 0, 1, 0, 0, 0, -1, 1, -1, 0),
(0, 0, 0, 1, 0, 0, 0, -1, 1, -1),
(0, 0, 0, 0, 1, -1, 0, 0, -1, 1)
], 5)]
::
sage: C = graphs.CycleGraph(8)
sage: C.eigenvectors()
[(2, [
(1, 1, 1, 1, 1, 1, 1, 1)
], 1), (-2, [
(1, -1, 1, -1, 1, -1, 1, -1)
], 1), (0, [
(1, 0, -1, 0, 1, 0, -1, 0),
(0, 1, 0, -1, 0, 1, 0, -1)
], 2), (-1.4142135623..., [(1, 0, -1, 1.4142135623..., -1, 0, 1, -1.4142135623...), (0, 1, -1.4142135623..., 1, 0, -1, 1.4142135623..., -1)], 2), (1.4142135623..., [(1, 0, -1, -1.4142135623..., -1, 0, 1, 1.4142135623...), (0, 1, 1.4142135623..., 1, 0, -1, -1.4142135623..., -1)], 2)]
A digraph may have complex eigenvalues. Previously, the complex parts of
graph eigenvalues were being dropped. For a 3-cycle, we have::
sage: T = DiGraph({0:[1], 1:[2], 2:[0]})
sage: T.eigenvectors()
[(1, [
(1, 1, 1)
], 1), (-0.5000000000... - 0.8660254037...*I, [(1, -0.5000000000... - 0.8660254037...*I, -0.5000000000... + 0.8660254037...*I)], 1), (-0.5000000000... + 0.8660254037...*I, [(1, -0.5000000000... + 0.8660254037...*I, -0.5000000000... - 0.8660254037...*I)], 1)]
"""
if laplacian:
M = self.kirchhoff_matrix(vertices=list(self))
else:
M = self.adjacency_matrix(vertices=list(self))
return M.right_eigenvectors()
def eigenspaces(self, laplacian=False):
r"""
Return the *right* eigenspaces of the adjacency matrix of the graph.
INPUT:
- ``laplacian`` -- boolean (default: ``False``); if ``True``, use the
Laplacian matrix (see :meth:`kirchhoff_matrix`)
OUTPUT:
A list of pairs. Each pair is an eigenvalue of the adjacency matrix of
the graph, followed by the vector space that is the eigenspace for that
eigenvalue, when the eigenvectors are placed on the right of the matrix.
For some graphs, some of the eigenspaces are described exactly by vector
spaces over a :func:`~sage.rings.number_field.number_field.NumberField`.
For numerical eigenvectors use :meth:`eigenvectors`.
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.eigenspaces()
[
(3, Vector space of degree 10 and dimension 1 over Rational Field
User basis matrix:
[1 1 1 1 1 1 1 1 1 1]),
(-2, Vector space of degree 10 and dimension 4 over Rational Field
User basis matrix:
[ 1 0 0 0 -1 -1 -1 0 1 1]
[ 0 1 0 0 -1 0 -2 -1 1 2]
[ 0 0 1 0 -1 1 -1 -2 0 2]
[ 0 0 0 1 -1 1 0 -1 -1 1]),
(1, Vector space of degree 10 and dimension 5 over Rational Field
User basis matrix:
[ 1 0 0 0 0 1 -1 0 0 -1]
[ 0 1 0 0 0 -1 1 -1 0 0]
[ 0 0 1 0 0 0 -1 1 -1 0]
[ 0 0 0 1 0 0 0 -1 1 -1]
[ 0 0 0 0 1 -1 0 0 -1 1])
]
Eigenspaces for the Laplacian should be identical since the Petersen
graph | |
text, charset, errors="replace"):
""" convert text for mail to unicode"""
if text is None:
text = ""
if PY2:
if isinstance(text, str):
if charset is None:
text = unicode(text, "utf-8", errors)
else:
text = unicode(text, charset, errors)
else:
raise Exception("Unsupported mail text type %s" % type(text))
return text.encode("utf-8")
else:
if isinstance(text, bytes):
return text.decode("utf-8")
return text
def get_charset(self, message):
charset = message.get_content_charset()
return charset
def get_mailboxes(self):
""" Query the mail database for mailbox names """
if self.static_names:
# statically defined mailbox names
self.connection.mailbox_names = self.static_names
return self.static_names.keys()
mailboxes_list = self.connection.list()
self.connection.mailbox_names = dict()
mailboxes = list()
x = 0
for item in mailboxes_list[1]:
x = x + 1
item = item.strip()
if not "NOSELECT" in item.upper():
sub_items = item.split('"')
sub_items = [
sub_item for sub_item in sub_items if len(sub_item.strip()) > 0
]
# mailbox = sub_items[len(sub_items) -1]
mailbox = sub_items[-1].strip()
# remove unwanted characters and store original names
# Don't allow leading non alphabetic characters
mailbox_name = re.sub(
"^[_0-9]*", "", re.sub("[^_\w]", "", re.sub("[/ ]", "_", mailbox))
)
mailboxes.append(mailbox_name)
self.connection.mailbox_names[mailbox_name] = mailbox
return mailboxes
def get_query_mailbox(self, query):
nofield = True
tablename = None
attr = query
while nofield:
if hasattr(attr, "first"):
attr = attr.first
if isinstance(attr, Field):
return attr.tablename
elif isinstance(attr, Query):
pass
else:
return None
else:
return None
return tablename
def is_flag(self, flag):
if self.search_fields.get(flag, None) in self.flags.values():
return True
else:
return False
def define_tables(self, mailbox_names=None):
"""
Auto create common IMAP fileds
This function creates fields definitions "statically"
meaning that custom fields as in other adapters should
not be supported and definitions handled on a service/mode
basis (local syntax for Gmail(r), Ymail(r)
Returns a dictionary with tablename, server native mailbox name
pairs.
"""
if mailbox_names:
# optional statically declared mailboxes
self.static_names = mailbox_names
else:
self.static_names = None
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
names = self.connection.mailbox_names.keys()
for name in names:
self.db.define_table(
"%s" % name,
Field("uid", writable=False),
Field("created", "datetime", writable=False),
Field("content", "text", writable=False),
Field("to", writable=False),
Field("cc", writable=False),
Field("bcc", writable=False),
Field("sender", writable=False),
Field("size", "integer", writable=False),
Field("subject", writable=False),
Field("mime", writable=False),
Field("email", "text", writable=False, readable=False),
Field("attachments", "text", writable=False, readable=False),
Field("encoding", writable=False),
Field("answered", "boolean"),
Field("deleted", "boolean"),
Field("draft", "boolean"),
Field("flagged", "boolean"),
Field("recent", "boolean", writable=False),
Field("seen", "boolean"),
)
# Set a special _mailbox attribute for storing
# native mailbox names
self.db[name].mailbox = self.connection.mailbox_names[name]
# decode quoted printable
self.db[name].to.represent = self.db[name].cc.represent = self.db[
name
].bcc.represent = self.db[name].sender.represent = self.db[
name
].subject.represent = self.header_represent
# Set the db instance mailbox collections
self.db.mailboxes = self.connection.mailbox_names
return self.db.mailboxes
def create_table(self, *args, **kwargs):
# not implemented
# but required by DAL
pass
def select(self, query, fields, attributes):
""" Searches and Fetches records and return web2py rows
"""
# move this statement elsewhere (upper-level)
if use_common_filters(query):
query = self.common_filter(query, [self.get_query_mailbox(query), ])
import email
# get records from imap server with search + fetch
# convert results to a dictionary
tablename = None
fetch_results = list()
if isinstance(query, Query):
tablename = self.get_table(query)._dalname
mailbox = self.connection.mailbox_names.get(tablename, None)
if mailbox is None:
raise ValueError("Mailbox name not found: %s" % mailbox)
else:
# select with readonly
result, selected = self.connection.select(mailbox, True)
if result != "OK":
raise Exception("IMAP error: %s" % selected)
self.mailbox_size = int(selected[0])
search_query = "(%s)" % str(query).strip()
search_result = self.connection.uid("search", None, search_query)
# Normal IMAP response OK is assumed (change this)
if search_result[0] == "OK":
# For "light" remote server responses just get the first
# ten records (change for non-experimental implementation)
# However, light responses are not guaranteed with this
# approach, just fewer messages.
limitby = attributes.get("limitby", None)
messages_set = search_result[1][0].split()
# descending order
messages_set.reverse()
if limitby is not None:
# TODO: orderby, asc/desc, limitby from complete message set
messages_set = messages_set[int(limitby[0]): int(limitby[1])]
# keep the requests small for header/flags
if any(
[
(field.name in ["content", "size", "attachments", "email"])
for field in fields
]
):
imap_fields = "(RFC822 FLAGS)"
else:
imap_fields = "(RFC822.HEADER FLAGS)"
if len(messages_set) > 0:
# create fetch results object list
# fetch each remote message and store it in memmory
# (change to multi-fetch command syntax for faster
# transactions)
for uid in messages_set:
# fetch the RFC822 message body
typ, data = self.connection.uid("fetch", uid, imap_fields)
if typ == "OK":
fr = {
"message": int(data[0][0].split()[0]),
"uid": long(uid),
"email": email.message_from_string(data[0][1]),
"raw_message": data[0][1],
}
fr["multipart"] = fr["email"].is_multipart()
# fetch flags for the message
if PY2:
fr["flags"] = self.driver.ParseFlags(data[1])
else:
fr["flags"] = self.driver.ParseFlags(
bytes(data[1], "utf-8")
)
fetch_results.append(fr)
else:
# error retrieving the message body
raise Exception(
"IMAP error retrieving the body: %s" % data
)
else:
raise Exception("IMAP search error: %s" % search_result[1])
elif isinstance(query, (Expression, basestring)):
raise NotImplementedError()
else:
raise TypeError("Unexpected query type")
imapqry_dict = {}
imapfields_dict = {}
if len(fields) == 1 and isinstance(fields[0], SQLALL):
allfields = True
elif len(fields) == 0:
allfields = True
else:
allfields = False
if allfields:
colnames = [
"%s.%s" % (tablename, field) for field in self.search_fields.keys()
]
else:
colnames = [field.longname for field in fields]
for k in colnames:
imapfields_dict[k] = k
imapqry_list = list()
imapqry_array = list()
for fr in fetch_results:
attachments = []
content = []
size = 0
n = int(fr["message"])
item_dict = dict()
message = fr["email"]
uid = fr["uid"]
charset = self.get_charset(message)
flags = fr["flags"]
raw_message = fr["raw_message"]
# Return messages data mapping static fields
# and fetched results. Mapping should be made
# outside the select function (with auxiliary
# instance methods)
# pending: search flags states trough the email message
# instances for correct output
# preserve subject encoding (ASCII/quoted printable)
if "%s.id" % tablename in colnames:
item_dict["%s.id" % tablename] = n
if "%s.created" % tablename in colnames:
item_dict["%s.created" % tablename] = self.convert_date(message["Date"])
if "%s.uid" % tablename in colnames:
item_dict["%s.uid" % tablename] = uid
if "%s.sender" % tablename in colnames:
# If there is no encoding found in the message header
# force utf-8 replacing characters (change this to
# module's defaults). Applies to .sender, .to, .cc and .bcc fields
item_dict["%s.sender" % tablename] = message["From"]
if "%s.to" % tablename in colnames:
item_dict["%s.to" % tablename] = message["To"]
if "%s.cc" % tablename in colnames:
if "Cc" in message.keys():
item_dict["%s.cc" % tablename] = message["Cc"]
else:
item_dict["%s.cc" % tablename] = ""
if "%s.bcc" % tablename in colnames:
if "Bcc" in message.keys():
item_dict["%s.bcc" % tablename] = message["Bcc"]
else:
item_dict["%s.bcc" % tablename] = ""
if "%s.deleted" % tablename in colnames:
item_dict["%s.deleted" % tablename] = "\\Deleted" in flags
if "%s.draft" % tablename in colnames:
item_dict["%s.draft" % tablename] = "\\Draft" in flags
if "%s.flagged" % tablename in colnames:
item_dict["%s.flagged" % tablename] = "\\Flagged" in flags
if "%s.recent" % tablename in colnames:
item_dict["%s.recent" % tablename] = "\\Recent" in flags
if "%s.seen" % tablename in colnames:
item_dict["%s.seen" % tablename] = "\\Seen" in flags
if "%s.subject" % tablename in colnames:
item_dict["%s.subject" % tablename] = message["Subject"]
if "%s.answered" % tablename in colnames:
item_dict["%s.answered" % tablename] = "\\Answered" in flags
if "%s.mime" % tablename in colnames:
item_dict["%s.mime" % tablename] = message.get_content_type()
if "%s.encoding" % tablename in colnames:
item_dict["%s.encoding" % tablename] = charset
# Here goes the whole RFC822 body as an email instance
# for controller side custom processing
# The message is stored as a raw string
# >> email.message_from_string(raw string)
# returns a Message object for enhanced object processing
if "%s.email" % tablename in colnames:
# WARNING: no encoding performed (raw message)
item_dict["%s.email" % tablename] = raw_message
# Size measure as suggested in a Velocity Reviews post
# by <NAME>: "how to get size of email attachment"
# Note: len() and server RFC822.SIZE reports doesn't match
# To retrieve the server size for representation would add a new
# fetch transaction to the process
for part in message.walk():
maintype = part.get_content_maintype()
if ("%s.attachments" % tablename in colnames) or (
"%s.content" % tablename in colnames
):
payload = part.get_payload(decode=True)
if payload:
filename = part.get_filename()
values = {"mime": part.get_content_type()}
if (filename or not "text" in maintype) and (
"%s.attachments" % tablename in colnames
):
values.update(
{
"payload": payload,
"filename": filename,
"encoding": part.get_content_charset(),
| |
<reponame>howardpchen/pynetdicom<filename>pynetdicom/acse.py
"""
ACSE service provider
"""
import logging
from pynetdicom.pdu_primitives import (
A_ASSOCIATE, A_RELEASE, A_ABORT, A_P_ABORT,
AsynchronousOperationsWindowNegotiation,
SOPClassCommonExtendedNegotiation,
SOPClassExtendedNegotiation,
UserIdentityNegotiation,
)
from pynetdicom.presentation import (
negotiate_as_requestor, negotiate_as_acceptor
)
from pynetdicom.utils import pretty_bytes
from pynetdicom._globals import APPLICATION_CONTEXT_NAME
LOGGER = logging.getLogger('pynetdicom.acse')
class ACSE(object):
"""The Association Control Service Element (ACSE) service provider.
The ACSE protocol handles association establishment, normal release of an
association and the abnormal release of an association.
Attributes
----------
acse_timeout : int
The maximum time (in seconds) to wait for association related PDUs
from the peer.
"""
def __init__(self, acse_timeout=30):
"""Create the ACSE service provider.
Parameters
----------
acse_timeout : int, optional
The maximum time (in seconds) to wait for A-ASSOCIATE related PDUs
from the peer (default: 30)
"""
self.acse_timeout = acse_timeout
@staticmethod
def _check_async_ops(assoc):
"""Check the user's response to an Asynchronous Operations request.
Parameters
----------
assoc : association.Association
The Association instance that received the Asynchronous Operations
Window Negotiation item in an A-ASSOCIATE (request) primitive.
Returns
-------
pdu_primitives.AsynchronousOperationsWindowNegotiation or None
If the `AE.on_async_ops_window` callback hasn't been implemented
then returns None, otherwise returns an
AsynchronousOperationsWindowNegotiation item with the default
values for the number of operations invoked/performed (1, 1).
"""
# pylint: disable=broad-except
try:
# Response is always ignored as async ops is not supported
_ = assoc.ae.on_async_ops_window(
*assoc.requestor.asynchronous_operations
)
except NotImplementedError:
return None
except Exception as exc:
LOGGER.error(
"Exception raised in user's 'on_async_ops_window' "
"implementation"
)
LOGGER.exception(exc)
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 1
item.maximum_number_operations_performed = 1
return item
@staticmethod
def _check_sop_class_common_extended(assoc):
"""Check the user's response to a SOP Class Common Extended request.
Parameters
----------
assoc : association.Association
The Association instance that received one or more SOP Class
Common Extended Negotiation items in an A-ASSOCIATE (request)
primitive.
Returns
-------
dict
The {SOP Class UID : SOPClassCommonExtendedNegotiation} items for
the accepted SOP Class Common Extended negotiation items.
"""
# pylint: disable=broad-except
try:
rsp = assoc.ae.on_sop_class_common_extended(
assoc.requestor.sop_class_common_extended
)
except Exception as exc:
LOGGER.error(
"Exception raised in user's 'on_sop_class_common_extended' "
"implementation"
)
LOGGER.exception(exc)
return {}
rsp = {
uid:ii for uid, ii in rsp.items()
if isinstance(ii, SOPClassCommonExtendedNegotiation)
}
return rsp
@staticmethod
def _check_sop_class_extended(assoc):
"""Check the user's response to a SOP Class Extended request.
Parameters
----------
assoc : association.Association
The Association instance that received one or more SOP Class
Extended Negotiation items in an A-ASSOCIATE (request) primitive.
Returns
-------
list of pdu_primitives.SOPClassExtendedNegotiation
The SOP Class Extended Negotiation items to be sent in response
"""
# pylint: disable=broad-except
try:
user_response = assoc.ae.on_sop_class_extended(
assoc.requestor.sop_class_extended
)
except Exception as exc:
user_response = None
LOGGER.error(
"Exception raised in user's 'on_sop_class_extended' "
"implementation"
)
LOGGER.exception(exc)
if not isinstance(user_response, (type(None), dict)):
LOGGER.error(
"Invalid type returned by user's 'on_sop_class_extended' "
"implementation"
)
user_response = None
if user_response is None:
return []
items = []
for sop_class, app_info in user_response.items():
try:
item = SOPClassExtendedNegotiation()
item.sop_class_uid = sop_class
item.service_class_application_information = app_info
items.append(item)
except Exception as exc:
LOGGER.error(
"Unable to set the SOP Class Extended Negotiation "
"response values for the SOP Class UID {}"
.format(sop_class)
)
LOGGER.exception(exc)
return items
@staticmethod
def _check_user_identity(assoc):
"""Check the user's response to a User Identity request.
Parameters
----------
assoc : association.Association
The Association instance that received the User Identity
Negotiation item in an A-ASSOCIATE (request) primitive.
Returns
-------
bool
True if the user identity has been confirmed, False otherwise.
pdu_primitives.UserIdentityNegotiation or None
The negotiation response, if a positive response is requested,
otherwise None.
"""
# pylint: disable=broad-except
# The UserIdentityNegotiation (request) item
req = assoc.requestor.user_identity
try:
identity_verified, response = assoc.ae.on_user_identity(
req.user_identity_type,
req.primary_field,
req.secondary_field,
{
'requestor' : assoc.requestor.info,
}
)
except NotImplementedError:
# If the user hasn't implemented identity negotiation then
# default to accepting the association
return True, None
except Exception as exc:
# If the user has implemented identity negotiation but an exception
# occurred then reject the association
LOGGER.error("Exception in handling user identity negotiation")
LOGGER.exception(exc)
return False, None
if not identity_verified:
# Reject association as the user isn't authorised
return False, None
if req.user_identity_type in [3, 4, 5]:
if req.positive_response_requested and response is not None:
try:
rsp = UserIdentityNegotiation()
rsp.server_response = response
return True, rsp
except Exception as exc:
# > If the acceptor doesn't support user identification it
# > will accept the association without making a positive
# > response
LOGGER.error(
"Unable to set the User Identity Negotiation's "
"'server_response'"
)
LOGGER.exception(exc)
return True, None
return True, None
@staticmethod
def is_aborted(assoc):
"""Return True if an A-ABORT or A-P-ABORT request has been received."""
primitive = assoc.dul.peek_next_pdu()
if primitive.__class__ in (A_ABORT, A_P_ABORT):
return True
return False
@staticmethod
def is_released(assoc):
"""Return True if an A-RELEASE request has been received."""
primitive = assoc.dul.peek_next_pdu()
if isinstance(primitive, A_RELEASE):
# Make sure this is an A-RELEASE *request* primitive
# response primitives have the Result field as 'affirmative'
if primitive.result == 'affirmative':
return False
return True
return False
def negotiate_association(self, assoc):
"""Perform an association negotiation as either the requestor or
acceptor.
Parameters
----------
assoc : association.Association
The Association instance to perform the negotiation for.
"""
if assoc.is_requestor:
self._negotiate_as_requestor(assoc)
elif assoc.is_acceptor:
self._negotiate_as_acceptor(assoc)
def _negotiate_as_acceptor(self, assoc):
"""Perform an association negotiation as the association acceptor.
Parameters
----------
assoc : association.Association
The Association instance to perform the negotiation for.
"""
# For convenience
assoc_rq = assoc.requestor.primitive
# Set the Requestor's AE Title
assoc.requestor.ae_title = assoc_rq.calling_ae_title
# If we reject association -> [result, source, diagnostic]
reject_assoc_rsd = []
# Calling AE Title not recognised
if (assoc.ae.require_calling_aet and assoc_rq.calling_ae_title
not in assoc.ae.require_calling_aet):
reject_assoc_rsd = [0x01, 0x01, 0x03]
# Called AE Title not recognised
if (assoc.ae.require_called_aet and assoc_rq.called_ae_title
!= assoc.ae.ae_title):
reject_assoc_rsd = [0x01, 0x01, 0x07]
## Extended Negotiation items
# User Identity Negotiation items
if assoc.requestor.user_identity:
is_valid, id_response = self._check_user_identity(assoc)
if not is_valid:
# Transient, ACSE related, no reason given
LOGGER.info("User identity failed verification")
reject_assoc_rsd = [0x02, 0x02, 0x01]
if id_response:
# Add the User Identity Negotiation (response) item
assoc.acceptor.add_negotiation_item(id_response)
# SOP Class Extended Negotiation items
for item in self._check_sop_class_extended(assoc):
assoc.acceptor.add_negotiation_item(item)
# SOP Class Common Extended Negotiation items
# Note: No response items are allowed
# pylint: disable=protected-access
assoc.acceptor._common_ext = (
self._check_sop_class_common_extended(assoc)
)
# pylint: enable=protected-access
# Asynchronous Operations Window Negotiation items
if assoc.requestor.asynchronous_operations != (1, 1):
async_rsp = self._check_async_ops(assoc)
# Add any Async Ops (response) item
if async_rsp:
assoc.acceptor.add_negotiation_item(async_rsp)
## DUL Presentation Related Rejections
# Maximum number of associations reached (local-limit-exceeded)
if len(assoc.ae.active_associations) > assoc.ae.maximum_associations:
reject_assoc_rsd = [0x02, 0x03, 0x02]
if reject_assoc_rsd:
# pylint: disable=no-value-for-parameter
self.send_reject(assoc, *reject_assoc_rsd)
assoc.debug_association_rejected(assoc.acceptor.primitive)
assoc.ae.on_association_rejected(assoc.acceptor.primitive)
assoc.kill()
return
## Negotiate Presentation Contexts
# SCP/SCU Role Selection Negotiation request items
# {SOP Class UID : (SCU role, SCP role)}
rq_roles = {
uid:(item.scu_role, item.scp_role)
for uid, item in assoc.requestor.role_selection.items()
}
result, ac_roles = negotiate_as_acceptor(
assoc_rq.presentation_context_definition_list,
assoc.acceptor.supported_contexts,
rq_roles
)
# pylint: disable=protected-access
assoc._accepted_cx = [cx for cx in result if cx.result == 0x00]
assoc._rejected_cx = [cx for cx in result if cx.result != 0x00]
# pylint: enable=protected-access
# Add any SCP/SCU Role Selection Negotiation response items
for item in ac_roles:
assoc.acceptor.add_negotiation_item(item)
# Send the A-ASSOCIATE (accept) primitive
self.send_accept(assoc)
# Callbacks/Logging
assoc.debug_association_accepted(assoc.acceptor.primitive)
assoc.ae.on_association_accepted(assoc.acceptor.primitive)
# No valid presentation contexts, abort the association
if not assoc.accepted_contexts:
self.send_abort(assoc, 0x02)
assoc.kill()
return
# Assocation established OK
assoc.is_established = True
def _negotiate_as_requestor(self, assoc):
"""Perform an association negotiation as the association requestor.
Parameters
----------
assoc : association.Association
The Association instance to perform the negotiation for.
"""
if not assoc.requestor.requested_contexts:
LOGGER.error(
"One or more requested presentation contexts must be set "
"prior to association negotiation"
)
assoc.kill()
return
# Build and send an A-ASSOCIATE (request) PDU to the peer
self.send_request(assoc)
# Wait for response
rsp = assoc.dul.receive_pdu(wait=True, timeout=assoc.acse_timeout)
# Association accepted or rejected
if isinstance(rsp, A_ASSOCIATE):
assoc.acceptor.primitive = rsp
# Accepted
if rsp.result == 0x00:
## Handle SCP/SCU Role Selection response
# Apply requestor's proposed SCP/SCU role selection (if any)
# to the requested contexts
rq_roles = {
uid:(ii.scu_role, ii.scp_role)
for uid, ii in assoc.requestor.role_selection.items()
}
if rq_roles:
for cx in assoc.requestor.requested_contexts:
try:
(cx.scu_role, cx.scp_role) = rq_roles[
cx.abstract_syntax
]
except KeyError:
pass
# Collate the acceptor's SCP/SCU role selection responses
ac_roles = {
uid:(ii.scu_role, ii.scp_role)
for uid, ii in assoc.acceptor.role_selection.items()
| |
from ticktick.helpers.hex_color import check_hex_color, generate_hex_color
from ticktick.managers.check_logged_in import logged_in
class ProjectManager:
"""
Handles all interactions for projects.
"""
def __init__(self, client_class):
self._client = client_class
self.access_token = self._client.access_token
def builder(self, name: str, color: str = 'random', project_type: str = 'TASK', folder_id: str = None) -> dict:
"""
Creates and returns a local project object. Helper method for [create][managers.projects.ProjectManager.create]
to make batch creating projects easier.
!!! note
The project [folder][managers.projects.ProjectManager.create_folder] must already exist prior to calling this method.
Arguments:
name: Desired name of the project - project names cannot be repeated
color: Hex color string. A random color will be generated if no color is specified.
project_type: 'TASK' or 'NOTE'
folder_id: The project folder id that the project should be placed under (if desired)
Returns:
A dictionary containing all the fields necessary to create a remote project.
Raises:
TypeError: If any of the types of the arguments are wrong.
ValueError: Project name already exists
ValueError: Project Folder corresponding to the ID does not exist.
ValueError: The hex string color inputted is invalid.
Argument rules are shared with [create][managers.projects.ProjectManager.create], so for more examples on how
to use the arguments see that method.
!!! example
```python
project_name = 'Work' # The name of our project
# Lets assume that we have a project group folder that already exists named 'Productivity'
productivity_folder = client.get_by_fields(name='Productivity', search='project_folders')
productivity_id = productivity_folder['id']
# Build the object
project_object = client.project.builder(project_name, folder_id=productivity_id)
```
??? success "Result"
```python
# The fields needed for a successful project creation are set.
{'name': 'Work', 'color': '#665122', 'kind': 'TASK', 'groupId': '5ffe11b7b04b356ce74d49da'}
```
"""
if not isinstance(name, str):
raise TypeError("Name must be a string")
if not isinstance(color, str) and color is not None:
raise TypeError("Color must be a string")
if not isinstance(project_type, str):
raise TypeError("Project type must be a string")
if not isinstance(folder_id, str) and folder_id is not None:
raise TypeError("Folder id must be a string")
# Go through self.state['lists'] and determine if the name already exists
id_list = self._client.get_by_fields(search='projects', name=name)
if id_list:
raise ValueError(f"Invalid Project Name '{name}' -> It Already Exists")
# Determine if parent list exists
if folder_id is not None:
parent = self._client.get_by_id(folder_id, search='project_folders')
if not parent:
raise ValueError(f"Parent Id {folder_id} Does Not Exist")
# Make sure project type is valid
if project_type != 'TASK' and project_type != 'NOTE':
raise ValueError(f"Invalid Project Type '{project_type}' -> Should be 'TASK' or 'NOTE'")
# Check color_id
if color == 'random':
color = generate_hex_color() # Random color will be generated
elif color is not None:
if not check_hex_color(color):
raise ValueError('Invalid Hex Color String')
return {'name': name, 'color': color, 'kind': project_type, 'groupId': folder_id}
def create(self, name, color: str = 'random', project_type: str = 'TASK', folder_id: str = None):
"""
Creates a project remotely. Supports single project creation or batch project creation.
Arguments:
name (str or list):
**Single Project** (str) : The desired name of the project. Project names cannot be repeated.
**Multiple Projects** (list) : A list of project objects created using the [builder][managers.projects.ProjectManager.builder] method.
color: Hex color string. A random color will be generated if no color is specified.
project_type: 'TASK' or 'NOTE'
folder_id: The project folder id that the project should be placed under (if desired)
Returns:
dict or list: **Single Project**: Return the dictionary of the object.
**Multiple Projects**: Return a list of dictionaries containing all the created objects in the same order as created.
Raises:
TypeError: If any of the types of the arguments are wrong.
ValueError: Project name already exists
ValueError: Project Folder corresponding to the ID does not exist.
ValueError: The hex string color inputted is invalid.
RuntimeError: The project(s) could not be created.
!!! example "Single Project"
=== "Just A Name"
```python
project = client.project.create('School')
```
??? success "Result"
```python
# The dictionary object of the created project is returned.
{'id': '5ffe1673e4b062d60dd29dc0', 'name': 'School', 'isOwner': True, 'color': '#51b9e3', 'inAll': True,
'sortOrder': 0, 'sortType': None, 'userCount': 1, 'etag': 'uerkdkcd',
'modifiedTime': '2021-01-12T21:36:51.890+0000', 'closed': None, 'muted': False,
'transferred': None, 'groupId': None, 'viewMode': None, 'notificationOptions': None,
'teamId': None, 'permission': None, 'kind': 'TASK'}
```
Our project is created.
[](https://postimg.cc/PpZQy4zV)
=== "Specify a Color"
A random color can be generated using [generate_hex_color][helpers.hex_color.generate_hex_color].
However, just not specifying a color will automatically generate a random color (as seen in the previous tab).
You can always specify the color that you want.
```python
project = client.project.create('Work', color='#86bb6d')
```
??? success "Result"
Our project is created with the color specified.
[](https://postimg.cc/5XvmJJRK)
=== "Changing the Project Type"
The default project type is for Tasks. To change the type to handle Notes, pass in the string 'NOTE'
```python
project = client.project.create('Hobbies', project_type='NOTE')
```
??? success "Result"
The project type is now for notes.
[](https://postimg.cc/rRcB1gtM)
=== "Creating Inside of A Folder"
!!! warning "Note For `folder_id`"
The project [folder][managers.projects.ProjectManager.create_folder] must already exist prior to calling this method.
```python
project_name = '<NAME>' # The name of our project
# Lets assume that we have a project group folder that already exists named 'Productivity'
productivity_folder = client.get_by_fields(name='Productivity', search='project_folders')
productivity_id = productivity_folder['id']
# Create the object
project_object = client.project.create(project_name, folder_id=productivity_id)
```
??? success "Result"
The project was created in the group folder.
[](https://postimg.cc/rd5RnCpK)
!!! example "Multiple Projects (batch)"
To create multiple projects, you will need to build the projects locally prior to calling the `create` method. This
can be accomplished using the [builder][managers.projects.ProjectManager.builder] method. Pass in a list of the locally created
project objects to create them remotely.
!!! warning "(Again About Folders)"
The project folders should already be created prior to calling the create method.
```python
# Lets assume that we have a project group folder that already exists named 'Productivity'
productivity_folder = client.get_by_fields(name='Productivity', search='project_folders')
productivity_id = productivity_folder['id']
# Names of our projects
name_1 = 'Reading'
name_2 = 'Writing'
# Build the local projects
project1 = client.project.builder(name_1, folder_id=productivity_id)
project2 = client.project.builder(name_2, folder_id=productivity_id)
project_list = [project1, project2]
# Create the projects
project_object = client.project.create(project_list)
```
??? success "Result"
When multiple projects are created, the dictionaries will be returned in a list in the same order as
the input.
```python
[{'id': '5ffe24a18f081003f3294c44', 'name': 'Reading', 'isOwner': True, 'color': '#6fcbdf',
'inAll': True, 'sortOrder': 0, 'sortType': None, 'userCount': 1, 'etag': 'qbj4z0gl',
'modifiedTime': '2021-01-12T22:37:21.823+0000', 'closed': None, 'muted': False, 'transferred': None,
'groupId': '5ffe11b7b04b356ce74d49da', 'viewMode': None, 'notificationOptions': None, 'teamId': None,
'permission': None, 'kind': 'TASK'},
{'id': '5ffe24a18f081003f3294c46', 'name': 'Writing', 'isOwner': True,
'color': '#9730ce', 'inAll': True, 'sortOrder': 0, 'sortType': None, 'userCount': 1, 'etag': 'u0loxz2v',
'modifiedTime': '2021-01-12T22:37:21.827+0000', 'closed': None, 'muted': False, 'transferred': None,
'groupId': '5ffe11b7b04b356ce74d49da', 'viewMode': None, 'notificationOptions': None, 'teamId': None,
'permission': None, 'kind': 'TASK'}]
```
[](https://postimg.cc/d7hdrHDC)
"""
if isinstance(name, list):
# If task name is a list, we will batch create objects
obj = name
batch = True
# Create the single project object
elif isinstance(name, str):
batch = False
obj = self.builder(name=name,
color=color,
project_type=project_type,
folder_id=folder_id)
obj = [obj]
else:
raise TypeError(f"Required Positional Argument Must Be A String or List of Project Objects")
url = self._client.BASE_URL + 'batch/project'
payload = {
'add': obj
}
response = self._client.http_post(url, json=payload, cookies=self._client.cookies)
self._client.sync()
if len(obj) == 1:
return self._client.get_by_id(self._client.parse_id(response), search='projects')
else:
etag = response['id2etag']
etag2 = list(etag.keys()) # Get the ids
items = [''] * len(obj) # Create enough spots for the objects
for proj_id in etag2:
found = self._client.get_by_id(proj_id, search='projects')
for original in obj:
if found['name'] == original['name']:
# Get the index of original
index = obj.index(original)
# Place found at the index in return list
items[index] = found
return items
def update(self, obj):
"""
Updates the passed project(s). Supports single project update and multiple project update (batch)
Make local changes to the project objects that you want to change first, then pass the actual objects to the method.
!!! info
Every potential update to a project's attributes have not been tested. See [Example `TickTick` Project Dictionary](projects.md#example-ticktick-project-dictionary) for
a listing of the fields present in a project.
Arguments:
obj (dict or list):
**Single Project (dict)**: The project dictionary.
**Multiple Projects (list)**: | |
# Copyright (c) 2021 <NAME>. All Rights Reserved.
"""Emmental parse_args."""
import argparse
from argparse import ArgumentParser, Namespace
from typing import Any, Dict, Optional
from emmental.utils.utils import (
nullable_float,
nullable_int,
nullable_string,
str2bool,
str2dict,
)
def parse_args(parser: Optional[ArgumentParser] = None) -> ArgumentParser:
"""Parse the configuration from command line.
Args:
parser: The exterenl argument parser object, defaults to None.
Returns:
The updated argument parser object.
"""
if parser is None:
parser = argparse.ArgumentParser(
"Emmental configuration",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Load meta configuration
meta_config = parser.add_argument_group("Meta configuration")
meta_config.add_argument(
"--seed",
type=nullable_int,
default=None,
help="Random seed for all numpy/torch/cuda operations in model and learning",
)
meta_config.add_argument(
"--verbose",
type=str2bool,
default=True,
help="Whether to print the log information",
)
meta_config.add_argument(
"--log_path", type=str, default="logs", help="Directory to save running log"
)
meta_config.add_argument(
"--use_exact_log_path",
type=str2bool,
default=False,
help="Whether to use the exact log directory",
)
# Load data configuration
data_config = parser.add_argument_group("Data configuration")
data_config.add_argument(
"--min_data_len", type=int, default=0, help="Minimal data length"
)
data_config.add_argument(
"--max_data_len",
type=int,
default=0,
help="Maximal data length (0 for no max_len)",
)
# Load model configuration
model_config = parser.add_argument_group("Model configuration")
model_config.add_argument(
"--model_path",
type=nullable_string,
default=None,
help="Path to pretrained model",
)
model_config.add_argument(
"--device",
type=int,
default=0,
help="Which device to use (-1 for cpu or gpu id (e.g., 0 for cuda:0))",
)
model_config.add_argument(
"--dataparallel",
type=str2bool,
default=True,
help="Whether to use dataparallel or not",
)
model_config.add_argument(
"--distributed_backend",
type=str,
default="nccl",
choices=["nccl", "gloo"],
help="Which backend to use for distributed training.",
)
# Learning configuration
learner_config = parser.add_argument_group("Learning configuration")
learner_config.add_argument(
"--optimizer_path",
type=nullable_string,
default=None,
help="Path to optimizer state",
)
learner_config.add_argument(
"--scheduler_path",
type=nullable_string,
default=None,
help="Path to lr scheduler state",
)
learner_config.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex)"
"instead of 32-bit",
)
learner_config.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="Apex AMP optimization level selected in ['O0', 'O1', 'O2', 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
learner_config.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
learner_config.add_argument(
"--epochs_learned", type=int, default=0, help="Learning epochs learned"
)
learner_config.add_argument(
"--n_epochs", type=int, default=1, help="Total number of learning epochs"
)
learner_config.add_argument(
"--steps_learned", type=int, default=0, help="Learning steps learned"
)
learner_config.add_argument(
"--n_steps", type=int, default=None, help="Total number of learning steps"
)
learner_config.add_argument(
"--train_split",
nargs="+",
type=str,
default=["train"],
help="The split for training",
)
learner_config.add_argument(
"--valid_split",
nargs="+",
type=str,
default=["valid"],
help="The split for validation",
)
learner_config.add_argument(
"--test_split",
nargs="+",
type=str,
default=["test"],
help="The split for testing",
)
learner_config.add_argument(
"--ignore_index",
type=nullable_int,
default=None,
help="The ignore index, uses for masking samples",
)
learner_config.add_argument(
"--online_eval",
type=str2bool,
default=False,
help="Whether to perform online evaluation",
)
# Optimizer configuration
optimizer_config = parser.add_argument_group("Optimizer configuration")
optimizer_config.add_argument(
"--optimizer",
type=nullable_string,
default="adam",
choices=[
"asgd",
"adadelta",
"adagrad",
"adam",
"adamw",
"adamax",
"lbfgs",
"rms_prop",
"r_prop",
"sgd",
"sparse_adam",
"bert_adam",
None,
],
help="The optimizer to use",
)
optimizer_config.add_argument("--lr", type=float, default=1e-3, help="Learing rate")
optimizer_config.add_argument(
"--l2", type=float, default=0.0, help="l2 regularization"
)
optimizer_config.add_argument(
"--grad_clip", type=nullable_float, default=None, help="Gradient clipping"
)
optimizer_config.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Gradient accumulation steps",
)
# ASGD config
optimizer_config.add_argument(
"--asgd_lambd", type=float, default=0.0001, help="ASGD lambd"
)
optimizer_config.add_argument(
"--asgd_alpha", type=float, default=0.75, help="ASGD alpha"
)
optimizer_config.add_argument(
"--asgd_t0", type=float, default=1000000.0, help="ASGD t0"
)
# Adadelta config
optimizer_config.add_argument(
"--adadelta_rho", type=float, default=0.9, help="Adadelta rho"
)
optimizer_config.add_argument(
"--adadelta_eps", type=float, default=0.000001, help="Adadelta eps"
)
# Adagrad config
optimizer_config.add_argument(
"--adagrad_lr_decay", type=float, default=0, help="Adagrad lr_decay"
)
optimizer_config.add_argument(
"--adagrad_initial_accumulator_value",
type=float,
default=0,
help="Adagrad initial accumulator value",
)
optimizer_config.add_argument(
"--adagrad_eps", type=float, default=0.0000000001, help="Adagrad eps"
)
# Adam config
optimizer_config.add_argument(
"--adam_betas", nargs="+", type=float, default=(0.9, 0.999), help="Adam betas"
)
optimizer_config.add_argument(
"--adam_eps", type=float, default=1e-8, help="Adam eps"
)
optimizer_config.add_argument(
"--adam_amsgrad",
type=str2bool,
default=False,
help="Whether to use the AMSGrad variant of adam",
)
# AdamW config
optimizer_config.add_argument(
"--adamw_betas", nargs="+", type=float, default=(0.9, 0.999), help="AdamW betas"
)
optimizer_config.add_argument(
"--adamw_eps", type=float, default=1e-8, help="AdamW eps"
)
optimizer_config.add_argument(
"--adamw_amsgrad",
type=str2bool,
default=False,
help="Whether to use the AMSGrad variant of AdamW",
)
# Adamax config
optimizer_config.add_argument(
"--adamax_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="Adamax betas",
)
optimizer_config.add_argument(
"--adamax_eps", type=float, default=1e-8, help="Adamax eps"
)
# LBFGS config
optimizer_config.add_argument(
"--lbfgs_max_iter", type=int, default=20, help="LBFGS max iter"
)
optimizer_config.add_argument(
"--lbfgs_max_eval", type=nullable_int, default=None, help="LBFGS max eval"
)
optimizer_config.add_argument(
"--lbfgs_tolerance_grad", type=float, default=1e-07, help="LBFGS tolerance grad"
)
optimizer_config.add_argument(
"--lbfgs_tolerance_change",
type=float,
default=1e-09,
help="LBFGS tolerance change",
)
optimizer_config.add_argument(
"--lbfgs_history_size", type=int, default=100, help="LBFGS history size"
)
optimizer_config.add_argument(
"--lbfgs_line_search_fn",
type=nullable_string,
default=None,
help="LBFGS line search fn",
)
# RMSprop config
optimizer_config.add_argument(
"--rms_prop_alpha", type=float, default=0.99, help="RMSprop alpha"
)
optimizer_config.add_argument(
"--rms_prop_eps", type=float, default=1e-08, help="RMSprop eps"
)
optimizer_config.add_argument(
"--rms_prop_momentum", type=float, default=0, help="RMSprop momentum"
)
optimizer_config.add_argument(
"--rms_prop_centered", type=str2bool, default=False, help="RMSprop centered"
)
# Rprop config
optimizer_config.add_argument(
"--r_prop_etas", nargs="+", type=float, default=(0.5, 1.2), help="Rprop etas"
)
optimizer_config.add_argument(
"--r_prop_step_sizes",
nargs="+",
type=float,
default=(1e-06, 50),
help="Rprop step sizes",
)
# SGD config
optimizer_config.add_argument(
"--sgd_momentum", type=float, default=0, help="SGD momentum"
)
optimizer_config.add_argument(
"--sgd_dampening", type=float, default=0, help="SGD dampening"
)
optimizer_config.add_argument(
"--sgd_nesterov", type=str2bool, default=False, help="SGD nesterov"
)
# SparseAdam config
optimizer_config.add_argument(
"--sparse_adam_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="SparseAdam betas",
)
optimizer_config.add_argument(
"--sparse_adam_eps", type=float, default=1e-08, help="SparseAdam eps"
)
# BertAdam config
optimizer_config.add_argument(
"--bert_adam_betas",
nargs="+",
type=float,
default=(0.9, 0.999),
help="BertAdam betas",
)
optimizer_config.add_argument(
"--bert_adam_eps", type=float, default=1e-08, help="BertAdam eps"
)
# Scheduler configuration
scheduler_config = parser.add_argument_group("Scheduler configuration")
scheduler_config.add_argument(
"--lr_scheduler",
type=nullable_string,
default=None,
choices=[
None,
"linear",
"exponential",
"plateau",
"step",
"multi_step",
"cyclic",
"one_cycle",
"cosine_annealing",
],
help="Learning rate scheduler",
)
scheduler_config.add_argument(
"--lr_scheduler_step_unit",
type=str,
default="batch",
choices=["batch", "epoch"],
help="Learning rate scheduler step unit",
)
scheduler_config.add_argument(
"--lr_scheduler_step_freq",
type=int,
default=1,
help="Learning rate scheduler step freq",
)
scheduler_config.add_argument(
"--warmup_steps", type=float, default=None, help="Warm up steps"
)
scheduler_config.add_argument(
"--warmup_unit",
type=str,
default="batch",
choices=["batch", "epoch"],
help="Warm up unit",
)
scheduler_config.add_argument(
"--warmup_percentage", type=float, default=None, help="Warm up percentage"
)
scheduler_config.add_argument(
"--min_lr", type=float, default=0.0, help="Minimum learning rate"
)
scheduler_config.add_argument(
"--reset_state",
type=str2bool,
default=False,
help="Whether reset the state of the optimizer when lr changes",
)
scheduler_config.add_argument(
"--exponential_lr_scheduler_gamma",
type=float,
default=0.9,
help="Gamma for exponential lr scheduler",
)
# ReduceLROnPlateau lr scheduler config
scheduler_config.add_argument(
"--plateau_lr_scheduler_metric",
type=str,
default="model/train/all/loss",
help="Metric of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_mode",
type=str,
default="min",
choices=["min", "max"],
help="Mode of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_factor",
type=float,
default=0.1,
help="Factor of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_patience",
type=int,
default=10,
help="Patience for plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_threshold",
type=float,
default=0.0001,
help="Threshold of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_threshold_mode",
type=str,
default="rel",
choices=["rel", "abs"],
help="Threshold mode of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_cooldown",
type=int,
default=0,
help="Cooldown of plateau lr scheduler",
)
scheduler_config.add_argument(
"--plateau_lr_scheduler_eps",
type=float,
default=0.00000001,
help="Eps of plateau lr scheduler",
)
# Step lr scheduler config
scheduler_config.add_argument(
"--step_lr_scheduler_step_size",
type=int,
default=1,
help="Period of learning rate decay",
)
scheduler_config.add_argument(
"--step_lr_scheduler_gamma",
type=float,
default=0.1,
help="Multiplicative factor of learning rate decay",
)
scheduler_config.add_argument(
"--step_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
scheduler_config.add_argument(
"--multi_step_lr_scheduler_milestones",
nargs="+",
type=int,
default=[1000],
help="List of epoch indices. Must be increasing.",
)
scheduler_config.add_argument(
"--multi_step_lr_scheduler_gamma",
type=float,
default=0.1,
help="Multiplicative factor of learning rate decay",
)
scheduler_config.add_argument(
"--multi_step_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
# Cyclic lr scheduler config
scheduler_config.add_argument(
"--cyclic_lr_scheduler_base_lr",
nargs="+",
type=float,
default=0.001,
help="Base lr of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_max_lr",
nargs="+",
type=float,
default=0.1,
help="Max lr of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_step_size_up",
type=int,
default=2000,
help="Step size up of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_step_size_down",
type=nullable_int,
default=None,
help="Step size down of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_mode",
type=nullable_string,
default="triangular",
help="Mode of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_gamma",
type=float,
default=1.0,
help="Gamma of cyclic lr scheduler",
)
# TODO: support cyclic_lr_scheduler_scale_fn
scheduler_config.add_argument(
"--cyclic_lr_scheduler_scale_mode",
type=str,
default="cycle",
choices=["cycle", "iterations"],
help="Scale mode of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_cycle_momentum",
type=str2bool,
default=True,
help="Cycle momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_base_momentum",
nargs="+",
type=float,
default=0.8,
help="Base momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_max_momentum",
nargs="+",
type=float,
default=0.9,
help="Max momentum of cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cyclic_lr_scheduler_last_epoch",
type=int,
default=-1,
help="Last epoch of cyclic lr scheduler",
)
# One cycle lr scheduler config
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_max_lr",
nargs="+",
type=float,
default=0.1,
help="Max lr of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_pct_start",
type=float,
default=0.3,
help="Percentage start of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_anneal_strategy",
type=str,
default="cos",
choices=["cos", "linear"],
help="Anneal strategyr of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_cycle_momentum",
type=str2bool,
default=True,
help="Cycle momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_base_momentum",
nargs="+",
type=float,
default=0.85,
help="Base momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_max_momentum",
nargs="+",
type=float,
default=0.95,
help="Max momentum of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_div_factor",
type=float,
default=25,
help="Div factor of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_final_div_factor",
type=float,
default=1e4,
help="Final div factor of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--one_cycle_lr_scheduler_last_epoch",
type=int,
default=-1,
help="Last epoch of one cyclic lr scheduler",
)
scheduler_config.add_argument(
"--cosine_annealing_lr_scheduler_last_epoch",
type=int,
default=-1,
help="The index of last epoch",
)
| |
<reponame>IrisSorin/mailcat
#!/usr/bin/python3
import aiohttp
import asyncio
import argparse
import base64
import datetime
import json
import logging
import random
import smtplib
import string as s
import sys
import threading
import re
from time import sleep
from typing import Dict, List
import dns.resolver
from requests_html import AsyncHTMLSession # type: ignore
from aiohttp_socks import ProxyConnector
# TODO: move to main function
uaLst = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.106 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"
]
logging.basicConfig(format='%(message)s')
logger = logging.getLogger('mailcat')
logger.setLevel(100)
def randstr(num):
return ''.join(random.sample((s.ascii_lowercase + s.ascii_uppercase + s.digits), num))
def sleeper(sList, s_min, s_max):
for ind in sList:
if sList.index(ind) < (len(sList) - 1):
sleep(random.uniform(s_min, s_max))
def via_tor():
connector = ProxyConnector.from_url('socks5://127.0.0.1:9050')
session = aiohttp.ClientSession(connector=connector)
return session
def simple_session():
return aiohttp.ClientSession()
def code250(mailProvider, target):
target = target
providerLst = []
randPref = ''.join(random.sample(s.ascii_lowercase, 6))
fromAddress = "{}@{}".format(randPref, mailProvider)
targetMail = "{}@{}".format(target, mailProvider)
records = dns.resolver.Resolver().resolve(mailProvider, 'MX')
mxRecord = records[0].exchange
mxRecord = str(mxRecord)
try:
server = smtplib.SMTP()
server.set_debuglevel(0)
server.connect(mxRecord)
server.helo(server.local_hostname)
server.mail(fromAddress)
code, message = server.rcpt(targetMail)
if code == 250:
providerLst.append(targetMail)
return providerLst
except Exception as e:
logger.error(e, exc_info=True)
return []
async def gmail(target, req_session_fun) -> Dict:
result = {}
gmailChkLst = code250("gmail.com", target)
if gmailChkLst:
result["Google"] = gmailChkLst[0]
await asyncio.sleep(0)
return result
async def yandex(target, req_session_fun) -> Dict:
result = {}
yaAliasesLst = ["yandex.by",
"yandex.kz",
"yandex.ua",
"yandex.com",
"ya.ru"]
yaChkLst = code250("yandex.ru", target)
if yaChkLst:
yaAliasesLst = ['{}<EMAIL>(target, yaAlias) for yaAlias in yaAliasesLst]
yaMails = list(set(yaChkLst + yaAliasesLst))
result["Yandex"] = yaMails
await asyncio.sleep(0)
return result
async def proton(target, req_session_fun) -> Dict:
result = {}
protonLst = ["protonmail.com", "protonmail.ch", "pm.me"]
protonSucc = []
sreq = req_session_fun()
protonURL = "https://mail.protonmail.com/api/users/available?Name={}".format(target)
headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0",
"Accept": "application/vnd.protonmail.v1+json",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Referer": "https://mail.protonmail.com/create/new?language=en",
"x-pm-appversion": "Web_3.16.19",
"x-pm-apiversion": "3",
"Cache-Control": "no-cache",
"Pragma": "no-cache",
"DNT": "1", "Connection": "close"}
try:
chkProton = await sreq.get(protonURL, headers=headers, timeout=3)
async with chkProton:
if chkProton.status == 409:
resp = await chkProton.json()
exists = resp['Error']
if exists == "Username already used":
protonSucc = ["{}<EMAIL>(target, protodomain) for protodomain in protonLst]
except Exception as e:
logger.error(e, exc_info=True)
if protonSucc:
result["Proton"] = protonSucc
await sreq.close()
return result
async def mailRu(target, req_session_fun) -> Dict:
result = {}
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0', 'Referer': 'https://account.mail.ru/signup?from=main&rf=auth.mail.ru'}
mailRU = ["mail.ru", "bk.ru", "inbox.ru", "list.ru", "internet.ru"]
mailRuSucc = []
sreq = req_session_fun()
for maildomain in mailRU:
try:
headers = {'User-Agent': random.choice(uaLst)}
mailruMail = "{}@{}".format(target, maildomain)
data = {'email': mailruMail}
chkMailRU = await sreq.post('https://account.mail.ru/api/v1/user/exists', headers=headers, data=data, timeout=5)
async with chkMailRU:
if chkMailRU.status == 200:
resp = await chkMailRU.json()
exists = resp['body']['exists']
if exists:
mailRuSucc.append(mailruMail)
except Exception as e:
logger.error(e, exc_info=True)
sleep(random.uniform(0.5, 2))
if mailRuSucc:
result["MailRU"] = mailRuSucc
await sreq.close()
return result
async def rambler(target, req_session_fun) -> Dict: # basn risk
result = {}
ramblerMail = ["rambler.ru", "lenta.ru", "autorambler.ru", "myrambler.ru", "ro.ru", "rambler.ua"]
ramblerSucc = []
sreq = req_session_fun()
for maildomain in ramblerMail:
try:
targetMail = "{}@{}".format(target, maildomain)
# reqID = ''.join(random.sample((s.ascii_lowercase + s.ascii_uppercase + s.digits), 20))
reqID = randstr(20)
userAgent = random.choice(uaLst)
ramblerChkURL = "https://id.rambler.ru:443/jsonrpc"
# "Referer": "https://id.rambler.ru/login-20/mail-registration?back=https%3A%2F%2Fmail.rambler.ru%2F&rname=mail¶m=embed&iframeOrigin=https%3A%2F%2Fmail.rambler.ru",
headers = {"User-Agent": userAgent,
"Referer": "https://id.rambler.ru/login-20/mail-registration?utm_source=head"
"&utm_campaign=self_promo&utm_medium=header&utm_content=mail&rname=mail"
"&back=https%3A%2F%2Fmail.rambler.ru%2F%3Futm_source%3Dhead%26utm_campaign%3Dself_promo%26utm_medium%3Dheader%26utm_content%3Dmail"
"¶m=embed&iframeOrigin=https%3A%2F%2Fmail.rambler.ru&theme=mail-web",
"Content-Type": "application/json",
"Origin": "https://id.rambler.ru",
"X-Client-Request-Id": reqID}
ramblerJSON = {"method": "Rambler::Id::login_available", "params": [{"login": targetMail}], "rpc": "2.0"}
ramblerChk = await sreq.post(ramblerChkURL, headers=headers, json=ramblerJSON, timeout=5)
async with ramblerChk:
if ramblerChk.status == 200:
try:
resp = await ramblerChk.json(content_type=None)
exist = resp['result']['profile']['status']
if exist == "exist":
ramblerSucc.append(targetMail)
# print("[+] Success with {}".format(targetMail))
# else:
# print("[-]".format(ramblerChk.text))
except KeyError as e:
logger.error(e, exc_info=True)
sleep(random.uniform(4, 6)) # don't reduce
except Exception as e:
logger.error(e, exc_info=True)
if ramblerSucc:
result["Rambler"] = ramblerSucc
await sreq.close()
return result
async def tuta(target, req_session_fun) -> Dict:
result = {}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
tutaMail = ["tutanota.com", "tutanota.de", "tutamail.com", "tuta.io", "keemail.me"]
tutaSucc = []
sreq = req_session_fun()
for maildomain in tutaMail:
try:
targetMail = "{}@{}".format(target, maildomain)
tutaURL = "https://mail.tutanota.com/rest/sys/mailaddressavailabilityservice?_body="
tutaCheck = await sreq.get(
'{}%7B%22_format%22%3A%220%22%2C%22mailAddress%22%3A%22{}%40{}%22%7D'.format(tutaURL, target,
maildomain),
headers=headers, timeout=5)
async with tutaCheck:
if tutaCheck.status == 200:
resp = await tutaCheck.json()
exists = resp['available']
if exists == "0":
tutaSucc.append(targetMail)
sleep(random.uniform(2, 4))
except Exception as e:
logger.error(e, exc_info=True)
if tutaSucc:
result["Tutanota"] = tutaSucc
await sreq.close()
return result
async def yahoo(target, req_session_fun) -> Dict:
result = {}
yahooURL = "https://login.yahoo.com:443/account/module/create?validateField=yid"
yahooCookies = {"B": "10kh9jteu3edn&b=3&s=66", "AS": "v=1&s=wy5fFM96"} # 13 8
# yahooCookies = {"B": "{}&b=3&s=66".format(randstr(13)), "AS": "v=1&s={}".format(randstr(8))} # 13 8
headers = {"User-Agent": random.choice(uaLst),
"Accept": "*/*", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate",
"Referer": "https://login.yahoo.com/account/create?.src=ym&.lang=en-US&.intl=us&.done=https%3A%2F%2Fmail.yahoo.com%2Fd&authMechanism=primary&specId=yidReg",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8", "X-Requested-With": "XMLHttpRequest",
"DNT": "1", "Connection": "close"}
# yahooPOST = {"specId": "yidReg", "crumb": randstr(11), "acrumb": randstr(8), "yid": target} # crumb: 11, acrumb: 8
yahooPOST = {"specId": "yidReg", "crumb": "bshN8x9qmfJ", "acrumb": "wy5fFM96", "yid": target}
sreq = req_session_fun()
try:
yahooChk = await sreq.post(yahooURL, headers=headers, cookies=yahooCookies, data=yahooPOST, timeout=5)
body = await yahooChk.text()
if '"IDENTIFIER_EXISTS"' in body:
result["Yahoo"] = <EMAIL>".<EMAIL>(target)
except Exception as e:
logger.error(e, exc_info=True)
await sreq.close()
return result
async def outlook(target, req_session_fun) -> Dict:
result = {}
liveSucc = []
sreq = AsyncHTMLSession(loop=asyncio.get_event_loop())
headers = {"User-Agent": random.choice(uaLst)}
liveLst = ["outlook.com", "hotmail.com"]
url_template = 'https://signup.live.com/?username={}@{}&uaid=f746d3527c20414d8c86fd7f96613d85&lic=1'
for maildomain in liveLst:
try:
liveChk = await sreq.get(url_template.format(target, maildomain), headers=headers)
await liveChk.html.arender(sleep=10)
if "suggLink" in liveChk.html.html:
liveSucc.append("{}@{}".format(target, maildomain))
except Exception as e:
logger.error(e, exc_info=True)
if liveSucc:
result["Live"] = liveSucc
await sreq.close()
return result
async def zoho(target, req_session_fun) -> Dict:
result = {}
headers = {
"User-Agent": "User-Agent: Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.7113.93 Safari/537.36",
"Referer": "https://www.zoho.com/",
"Origin": "https://www.zoho.com"
}
zohoURL = "https://accounts.zoho.com:443/accounts/validate/register.ac"
zohoPOST = {"username": target, "servicename": "VirtualOffice", "serviceurl": "/"}
sreq = req_session_fun()
try:
zohoChk = await sreq.post(zohoURL, headers=headers, data=zohoPOST, timeout=10)
async with zohoChk:
if zohoChk.status == 200:
# if "IAM.ERROR.USERNAME.NOT.AVAILABLE" in zohoChk.text:
# print("[+] Success with <EMAIL>".format(target))
resp = await zohoChk.json()
if resp['error']['username'] == 'This username is taken':
result["Zoho"] = <EMAIL>".<EMAIL>(<EMAIL>)
# print("[+] Success with <EMAIL>".format(target))
except Exception as e:
logger.error(e, exc_info=True)
await sreq.close()
return result
async def lycos(target, req_session_fun) -> Dict:
result = {}
lycosURL = "https://registration.lycos.com/usernameassistant.php?validate=1&m_AID=0&t=1625674151843&m_U={}&m_PR=27&m_SESSIONKEY=<KEY>".format(
target)
headers = {
"User-Agent": "User-Agent: Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.7113.93 Safari/537.36",
"Referer": "https://registration.lycos.com/register.php?m_PR=27&m_E=7za1N6E_h_nNSmIgtfuaBdmGpbS66MYX7lMDD-k9qlZCyq53gFjU_N12yVxL01F0R_mmNdhfpwSN6Kq6bNfiqQAA",
"X-Requested-With": "XMLHttpRequest"}
sreq = req_session_fun()
try:
lycosChk = await sreq.get(lycosURL, headers=headers, timeout=10)
async with lycosChk:
if lycosChk.status == 200:
resp = await lycosChk.text()
if resp == "Unavailable":
result["Lycos"] = <EMAIL>".<EMAIL>(target)
except Exception as e:
logger.error(e, exc_info=True)
await sreq.close()
return result
async def eclipso(target, req_session_fun) -> Dict: # high ban risk + false positives after
result = {}
eclipsoSucc = []
eclipsoLst = ["eclipso.eu",
"eclipso.de",
"eclipso.at",
"eclipso.ch",
"eclipso.be",
"eclipso.es",
"eclipso.it",
"eclipso.me",
"eclipso.nl",
"eclipso.email"]
headers = {'User-Agent': random.choice(uaLst),
'Referer': 'https://www.eclipso.eu/signup/tariff-5',
'X-Requested-With': 'XMLHttpRequest'}
sreq = req_session_fun()
for maildomain in eclipsoLst:
try:
targetMail = "{}@{}".format(target, maildomain)
eclipsoURL = "https://www.eclipso.eu/index.php?action=checkAddressAvailability&address={}".format(
targetMail)
chkEclipso = await sreq.get(eclipsoURL, headers=headers, timeout=5)
async with chkEclipso:
if chkEclipso.status == 200:
resp = await chkEclipso.text()
if '>0<' in resp:
eclipsoSucc.append(targetMail)
except Exception as e:
logger.error(e, exc_info=True)
sleep(random.uniform(2, 4))
if eclipsoSucc:
result["Eclipso"] = eclipsoSucc
await sreq.close()
return result
async def posteo(target, req_session_fun) -> Dict:
result = {}
posteoLst = [
"posteo.af",
"posteo.at",
"posteo.be",
"posteo.ca",
"posteo.ch",
"posteo.cl",
"posteo.co",
"posteo.co.uk",
"posteo.com.br",
"posteo.cr",
"posteo.cz",
"posteo.de",
"posteo.dk",
"posteo.ee",
"posteo.es",
"posteo.eu",
"posteo.fi",
"posteo.gl",
"posteo.gr",
"posteo.hn",
"posteo.hr",
"posteo.hu",
"posteo.ie",
"posteo.in",
"posteo.is",
"posteo.it",
"posteo.jp",
"posteo.la",
"posteo.li",
"posteo.lt",
"posteo.lu",
"posteo.me",
"posteo.mx",
"posteo.my",
"posteo.net",
"posteo.nl",
"posteo.no",
"posteo.nz",
"posteo.org",
"posteo.pe",
"posteo.pl",
"posteo.pm",
"posteo.pt",
"posteo.ro",
"posteo.ru",
"posteo.se",
"posteo.sg",
"posteo.si",
"posteo.tn",
"posteo.uk",
"posteo.us"]
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
'Referer': 'https://posteo.de/en/signup',
'X-Requested-With': 'XMLHttpRequest'}
sreq = req_session_fun()
try:
posteoURL = "https://posteo.de/users/new/check_username?user%5Busername%5D={}".format(target)
chkPosteo = await sreq.get(posteoURL, headers=headers, timeout=5)
async with chkPosteo:
if chkPosteo.status == 200:
resp = await chkPosteo.text()
if resp == "false":
result["Posteo"] = ["{}<EMAIL>".<EMAIL>(target),
"~50 aliases: https://posteo.de/en/help/which-domains-are-available-to-use-as-a-posteo-alias-address"]
except Exception as e:
logger.error(e, exc_info=True)
await sreq.close()
return | |
"Occult Claw+4",
902705: "Occult Claw+5",
902800: "Fire Claw",
902801: "Fire Claw+1",
902802: "Fire Claw+2",
902803: "Fire Claw+3",
902804: "Fire Claw+4",
902805: "Fire Claw+5",
902806: "Fire Claw+6",
902807: "Fire Claw+7",
902808: "Fire Claw+8",
902809: "Fire Claw+9",
902810: "Fire Claw+10",
902900: "Chaos Claw",
902901: "Chaos Claw+1",
902902: "Chaos Claw+2",
902903: "Chaos Claw+3",
902904: "Chaos Claw+4",
902905: "Chaos Claw+5",
903000: "Dragon Bone Fist",
903001: "Dragon Bone Fist+1",
903002: "Dragon Bone Fist+2",
903003: "Dragon Bone Fist+3",
903004: "Dragon Bone Fist+4",
903005: "Dragon Bone Fist+5",
903100: "Dragon Bone Fist",
903101: "Dragon Bone Fist+1",
903102: "Dragon Bone Fist+2",
903103: "Dragon Bone Fist+3",
903104: "Dragon Bone Fist+4",
903105: "Dragon Bone Fist+5",
904000: "Dark Hand",
904001: "Dark Hand+1",
904002: "Dark Hand+2",
904003: "Dark Hand+3",
904004: "Dark Hand+4",
904005: "Dark Hand+5",
1000000: "Spear",
1000001: "Spear+1",
1000002: "Spear+2",
1000003: "Spear+3",
1000004: "Spear+4",
1000005: "Spear+5",
1000006: "Spear+6",
1000007: "Spear+7",
1000008: "Spear+8",
1000009: "Spear+9",
1000010: "Spear+10",
1000011: "Spear+11",
1000012: "Spear+12",
1000013: "Spear+13",
1000014: "Spear+14",
1000015: "Spear+15",
1000100: "Crystal Spear",
1000101: "Crystal Spear+1",
1000102: "Crystal Spear+2",
1000103: "Crystal Spear+3",
1000104: "Crystal Spear+4",
1000105: "Crystal Spear+5",
1000200: "Lightning Spear",
1000201: "Lightning Spear+1",
1000202: "Lightning Spear+2",
1000203: "Lightning Spear+3",
1000204: "Lightning Spear+4",
1000205: "Lightning Spear+5",
1000300: "Raw Spear",
1000301: "Raw Spear+1",
1000302: "Raw Spear+2",
1000303: "Raw Spear+3",
1000304: "Raw Spear+4",
1000305: "Raw Spear+5",
1000400: "Magic Spear",
1000401: "Magic Spear+1",
1000402: "Magic Spear+2",
1000403: "Magic Spear+3",
1000404: "Magic Spear+4",
1000405: "Magic Spear+5",
1000406: "Magic Spear+6",
1000407: "Magic Spear+7",
1000408: "Magic Spear+8",
1000409: "Magic Spear+9",
1000410: "Magic Spear+10",
1000500: "Enchanted Spear",
1000501: "Enchanted Spear+1",
1000502: "Enchanted Spear+2",
1000503: "Enchanted Spear+3",
1000504: "Enchanted Spear+4",
1000505: "Enchanted Spear+5",
1000600: "Divine Spear",
1000601: "Divine Spear+1",
1000602: "Divine Spear+2",
1000603: "Divine Spear+3",
1000604: "Divine Spear+4",
1000605: "Divine Spear+5",
1000606: "Divine Spear+6",
1000607: "Divine Spear+7",
1000608: "Divine Spear+8",
1000609: "Divine Spear+9",
1000610: "Divine Spear+10",
1000700: "Occult Spear",
1000701: "Occult Spear+1",
1000702: "Occult Spear+2",
1000703: "Occult Spear+3",
1000704: "Occult Spear+4",
1000705: "Occult Spear+5",
1000800: "Fire Spear",
1000801: "Fire Spear+1",
1000802: "Fire Spear+2",
1000803: "Fire Spear+3",
1000804: "Fire Spear+4",
1000805: "Fire Spear+5",
1000806: "Fire Spear+6",
1000807: "Fire Spear+7",
1000808: "Fire Spear+8",
1000809: "Fire Spear+9",
1000810: "Fire Spear+10",
1000900: "Chaos Spear",
1000901: "Chaos Spear+1",
1000902: "Chaos Spear+2",
1000903: "Chaos Spear+3",
1000904: "Chaos Spear+4",
1000905: "Chaos Spear+5",
1001000: "Winged Spear",
1001001: "Winged Spear+1",
1001002: "Winged Spear+2",
1001003: "Winged Spear+3",
1001004: "Winged Spear+4",
1001005: "Winged Spear+5",
1001006: "Winged Spear+6",
1001007: "Winged Spear+7",
1001008: "Winged Spear+8",
1001009: "Winged Spear+9",
1001010: "Winged Spear+10",
1001011: "Winged Spear+11",
1001012: "Winged Spear+12",
1001013: "Winged Spear+13",
1001014: "Winged Spear+14",
1001015: "Winged Spear+15",
1001100: "Crystal Winged Spear",
1001101: "Crystal Winged Spear+1",
1001102: "Crystal Winged Spear+2",
1001103: "Crystal Winged Spear+3",
1001104: "Crystal Winged Spear+4",
1001105: "Crystal Winged Spear+5",
1001200: "Lightning Winged Spear",
1001201: "Lightning Winged Spear+1",
1001202: "Lightning Winged Spear+2",
1001203: "Lightning Winged Spear+3",
1001204: "Lightning Winged Spear+4",
1001205: "Lightning Winged Spear+5",
1001300: "Raw Winged Spear",
1001301: "Raw Winged Spear+1",
1001302: "Raw Winged Spear+2",
1001303: "Raw Winged Spear+3",
1001304: "Raw Winged Spear+4",
1001305: "Raw Winged Spear+5",
1001400: "Magic Winged Spear",
1001401: "Magic Winged Spear+1",
1001402: "Magic Winged Spear+2",
1001403: "Magic Winged Spear+3",
1001404: "Magic Winged Spear+4",
1001405: "Magic Winged Spear+5",
1001406: "Magic Winged Spear+6",
1001407: "Magic Winged Spear+7",
1001408: "Magic Winged Spear+8",
1001409: "Magic Winged Spear+9",
1001410: "Magic Winged Spear+10",
1001500: "Enchanted Winged Spear",
1001501: "Enchanted Winged Spear+1",
1001502: "Enchanted Winged Spear+2",
1001503: "Enchanted Winged Spear+3",
1001504: "Enchanted Winged Spear+4",
1001505: "Enchanted Winged Spear+5",
1001600: "Divine Winged Spear",
1001601: "Divine Winged Spear+1",
1001602: "Divine Winged Spear+2",
1001603: "Divine Winged Spear+3",
1001604: "Divine Winged Spear+4",
1001605: "Divine Winged Spear+5",
1001606: "Divine Winged Spear+6",
1001607: "Divine Winged Spear+7",
1001608: "Divine Winged Spear+8",
1001609: "Divine Winged Spear+9",
1001610: "Divine Winged Spear+10",
1001700: "Occult Winged Spear",
1001701: "Occult Winged Spear+1",
1001702: "Occult Winged Spear+2",
1001703: "Occult Winged Spear+3",
1001704: "Occult Winged Spear+4",
1001705: "Occult Winged Spear+5",
1001800: "Fire Winged Spear",
1001801: "Fire Winged Spear+1",
1001802: "Fire Winged Spear+2",
1001803: "Fire Winged Spear+3",
1001804: "Fire Winged Spear+4",
1001805: "Fire Winged Spear+5",
1001806: "Fire Winged Spear+6",
1001807: "Fire Winged Spear+7",
1001808: "Fire Winged Spear+8",
1001809: "Fire Winged Spear+9",
1001810: "Fire Winged Spear+10",
1001900: "Chaos Winged Spear",
1001901: "Chaos Winged Spear+1",
1001902: "Chaos Winged Spear+2",
1001903: "Chaos Winged Spear+3",
1001904: "Chaos Winged Spear+4",
1001905: "Chaos Winged Spear+5",
1002000: "Partizan",
1002001: "Partizan+1",
1002002: "Partizan+2",
1002003: "Partizan+3",
1002004: "Partizan+4",
1002005: "Partizan+5",
1002006: "Partizan+6",
1002007: "Partizan+7",
1002008: "Partizan+8",
1002009: "Partizan+9",
1002010: "Partizan+10",
1002011: "Partizan+11",
1002012: "Partizan+12",
1002013: "Partizan+13",
1002014: "Partizan+14",
1002015: "Partizan+15",
1002100: "Crystal Partizan",
1002101: "Crystal Partizan+1",
1002102: "Crystal Partizan+2",
1002103: "Crystal Partizan+3",
1002104: "Crystal Partizan+4",
1002105: "Crystal Partizan+5",
1002200: "Lightning Partizan",
1002201: "Lightning Partizan+1",
1002202: "Lightning Partizan+2",
1002203: "Lightning Partizan+3",
1002204: "Lightning Partizan+4",
1002205: "Lightning Partizan+5",
1002300: "Raw Partizan",
1002301: "Raw Partizan+1",
1002302: "Raw Partizan+2",
1002303: "Raw Partizan+3",
1002304: "Raw Partizan+4",
1002305: "Raw Partizan+5",
1002400: "Magic Partizan",
1002401: "Magic Partizan+1",
1002402: "Magic Partizan+2",
1002403: "Magic Partizan+3",
1002404: "Magic Partizan+4",
1002405: "Magic Partizan+5",
1002406: "Magic Partizan+6",
1002407: "Magic Partizan+7",
1002408: "Magic Partizan+8",
1002409: "Magic Partizan+9",
1002410: "Magic Partizan+10",
1002500: "Enchanted Partizan",
1002501: "Enchanted Partizan+1",
1002502: "Enchanted Partizan+2",
1002503: "Enchanted Partizan+3",
1002504: "Enchanted Partizan+4",
1002505: "Enchanted Partizan+5",
1002600: "Divine Partizan",
1002601: "Divine Partizan+1",
1002602: "Divine Partizan+2",
1002603: "Divine Partizan+3",
1002604: "Divine Partizan+4",
1002605: "Divine Partizan+5",
1002606: "Divine Partizan+6",
1002607: "Divine Partizan+7",
1002608: "Divine Partizan+8",
1002609: "Divine Partizan+9",
1002610: "Divine Partizan+10",
1002700: "Occult Partizan",
1002701: "Occult Partizan+1",
1002702: "Occult Partizan+2",
1002703: "Occult Partizan+3",
1002704: "Occult Partizan+4",
1002705: "Occult Partizan+5",
1002800: "Fire Partizan",
1002801: "Fire Partizan+1",
1002802: "Fire Partizan+2",
1002803: "Fire Partizan+3",
1002804: "Fire Partizan+4",
1002805: "Fire Partizan+5",
1002806: "Fire Partizan+6",
1002807: "Fire Partizan+7",
1002808: "Fire Partizan+8",
1002809: "Fire Partizan+9",
1002810: "Fire Partizan+10",
1002900: "Chaos Partizan",
1002901: "Chaos Partizan+1",
1002902: "Chaos Partizan+2",
1002903: "Chaos Partizan+3",
1002904: "Chaos Partizan+4",
1002905: "Chaos Partizan+5",
1003000: "Demon's Spear",
1003001: "Demon's Spear+1",
1003002: "Demon's Spear+2",
1003003: "Demon's Spear+3",
1003004: "Demon's Spear+4",
1003005: "Demon's Spear+5",
1003006: "Demon's Spear+6",
1003007: "Demon's Spear+7",
1003008: "Demon's Spear+8",
1003009: "Demon's Spear+9",
1003010: "Demon's Spear+10",
1003011: "Demon's Spear+11",
1003012: "Demon's Spear+12",
1003013: "Demon's Spear+13",
1003014: "Demon's Spear+14",
1003015: "Demon's Spear+15",
| |
4}, {"a": 5}, {"a": 6}]],
"expected": lambda: StructuredTensor.from_fields(
shape=[2, None], fields={
"a": ragged_factory_ops.constant([[1, 2, 3], [4, 5, 6]])})
},
{
# TypeSpec can be used to specify StructuredTensor shape.
"testcase_name": "MatrixOfDictWithTypeSpec",
"pyval": [[{"a": 1}, {"a": 2}, {"a": 3},],
[{"a": 4}, {"a": 5}, {"a": 6}]],
"type_spec": structured_tensor.StructuredTensorSpec([2, 3], {
"a": tensor_spec.TensorSpec(None, dtypes.int32)}),
"expected": lambda: StructuredTensor.from_fields(
shape=[2, 3], fields={"a": [[1, 2, 3], [4, 5, 6]]})
},
]) # pyformat: disable
def testPyvalConversion(self, pyval, expected, type_spec=None):
expected = expected() # Deferred init because it creates tensors.
actual = structured_tensor.StructuredTensor.from_pyval(pyval, type_spec)
self.assertAllEqual(actual, expected)
if isinstance(actual, structured_tensor.StructuredTensor):
if context.executing_eagerly(): # to_pyval only available in eager.
self.assertEqual(actual.to_pyval(), pyval)
@parameterized.named_parameters([
dict(
testcase_name="NoFieldsRaggedRank0",
st=lambda: StructuredTensor.from_fields({}, (3,)),
expected=[{}, {}, {}]),
dict(
testcase_name="NoFieldsRaggedRank1",
st=lambda: StructuredTensor.from_fields(
{}, (2, None),
row_partitions=[
row_partition.RowPartition.from_row_lengths([3, 2])]),
expected=[[{}, {}, {}], [{}, {}]]),
dict(
testcase_name="NoFieldsRaggedRank2",
st=lambda: StructuredTensor.from_fields(
{}, (2, None, None),
row_partitions=[
row_partition.RowPartition.from_row_lengths([2, 1]),
row_partition.RowPartition.from_row_lengths([2, 3, 1])]),
expected=[[[{}, {}], [{}, {}, {}]], [[{}]]]),
dict(
testcase_name="NoFieldsRaggedRank2NoDicts",
st=lambda: StructuredTensor.from_fields(
{}, (1, None, None),
row_partitions=[
row_partition.RowPartition.from_row_lengths([2]),
row_partition.RowPartition.from_row_lengths([0, 0])]),
expected=[[[], []]]),
dict(
testcase_name="NestedStructTensorWithNoFields",
st=lambda: StructuredTensor.from_fields(
{
"foo": ragged_factory_ops.constant([[[], []]]),
"bar": StructuredTensor.from_fields(
{}, (1, None, None, None), row_partitions=[
row_partition.RowPartition.from_row_lengths([2]),
row_partition.RowPartition.from_row_lengths([0, 0]),
row_partition.RowPartition.from_row_lengths([]),
])
}, (1, None, None),),
expected=[[[], []]]),
]) # pyformat: disable
def testToPyval(self, st, expected):
if context.executing_eagerly(): # to_pyval only available in eager.
st = st() # Deferred init because it creates tensors.
self.assertEqual(st.to_pyval(), expected)
@parameterized.named_parameters([
dict(testcase_name="MissingKeys",
pyval=[{"a": [1, 2]}, {"b": [3, 4]}],
err=KeyError,
msg="'b'"),
dict(testcase_name="TypeSpecMismatch_DictKey",
pyval={"a": 1},
type_spec=structured_tensor.StructuredTensorSpec(
shape=[1],
field_specs={"b": tensor_spec.TensorSpec([1], dtypes.int32)}),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListDictKey",
pyval=[{"a": 1}],
type_spec=structured_tensor.StructuredTensorSpec(
shape=[1],
field_specs={"b": tensor_spec.TensorSpec([1], dtypes.int32)}),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_RankMismatch",
pyval=[{"a": 1}],
type_spec=structured_tensor.StructuredTensorSpec(
shape=[],
field_specs={"a": tensor_spec.TensorSpec([], dtypes.int32)}),
msg=r"Value at \(\) does not match typespec \(rank mismatch\)"),
dict(testcase_name="TypeSpecMismatch_Scalar",
pyval=0,
type_spec=structured_tensor.StructuredTensorSpec(
shape=[], field_specs={}),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListTensor",
pyval={"a": [[1]]},
type_spec=structured_tensor.StructuredTensorSpec(
shape=[],
field_specs={"a": tensor_spec.TensorSpec([], dtypes.int32)}),
msg=r"Value at \('a',\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListTensorDeep",
pyval={"a": {"b": [[1]]}},
type_spec=structured_tensor.StructuredTensorSpec(
shape=[],
field_specs={"a": structured_tensor.StructuredTensorSpec(
shape=[],
field_specs={"b": tensor_spec.TensorSpec([],
dtypes.int32)})}),
msg=r"Value at \('a', 'b'\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListTensorDeep_infer",
pyval={"a": [{"b": [[1]]}, {"b": [["c"]]}]},
type_spec=None,
msg=r"Error parsing path \('a', 'b'\)"),
dict(testcase_name="TypeSpecMismatch_ListTensorDeep_infer2",
pyval=[{"a": 1}, {"a": "c"}],
type_spec=None,
msg=r"Error parsing path \('a',\)"),
dict(testcase_name="TypeSpecMismatch_ListSparse",
pyval=[1, 2],
type_spec=sparse_tensor.SparseTensorSpec([None], dtypes.int32),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListStruct",
pyval=[[1]],
type_spec=structured_tensor.StructuredTensorSpec(
shape=[1, 1],
field_specs={"a": tensor_spec.TensorSpec([1, 1], dtypes.int32)}),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="InconsistentDictionaryDepth",
pyval=[{}, [{}]],
msg="Inconsistent depth of dictionaries"),
dict(testcase_name="FOO",
pyval=[[{}], 5],
msg="Expected dict or nested list/tuple of dict"),
]) # pyformat: disable
def testFromPyvalError(self, pyval, err=ValueError, type_spec=None, msg=None):
with self.assertRaisesRegex(err, msg):
structured_tensor.StructuredTensor.from_pyval(pyval, type_spec)
def testToPyvalRequiresEagerMode(self):
st = structured_tensor.StructuredTensor.from_pyval({"a": 5})
if not context.executing_eagerly():
with self.assertRaisesRegex(ValueError, "only supported in eager mode."):
st.to_pyval()
@parameterized.named_parameters([
(
"Rank0",
[],
),
(
"Rank1",
[5, 3],
),
(
"Rank2",
[5, 8, 3],
),
(
"Rank5",
[1, 2, 3, 4, 5],
),
])
def testRowPartitionsFromUniformShape(self, shape):
for rank in range(len(shape)):
partitions = structured_tensor._row_partitions_for_uniform_shape(
ops.convert_to_tensor(shape), rank)
self.assertLen(partitions, max(0, rank - 1))
if partitions:
self.assertAllEqual(shape[0], partitions[0].nrows())
for (dim, partition) in enumerate(partitions):
self.assertAllEqual(shape[dim + 1], partition.uniform_row_length())
@parameterized.named_parameters([
# For shapes: U = uniform dimension; R = ragged dimension.
dict(
testcase_name="Shape_UR_Rank2",
rt=[[1, 2], [], [3]],
rt_ragged_rank=1,
rank=2,
expected_row_lengths=[[2, 0, 1]]),
dict(
testcase_name="Shape_URR_Rank2",
rt=[[[1, 2], []], [[3]]],
rt_ragged_rank=2,
rank=2,
expected_row_lengths=[[2, 1]]),
dict(
testcase_name="Shape_URU_Rank2",
rt=[[[1], [2]], [[3]]],
rt_ragged_rank=1,
rank=2,
expected_row_lengths=[[2, 1]]),
dict(
testcase_name="Shape_URR_Rank3",
rt=[[[1, 2], []], [[3]]],
rt_ragged_rank=2,
rank=3,
expected_row_lengths=[[2, 1], [2, 0, 1]]),
dict(
testcase_name="Shape_URU_Rank3",
rt=[[[1], [2]], [[3]]],
rt_ragged_rank=1,
rank=3,
expected_row_lengths=[[2, 1], [1, 1, 1]]),
dict(
testcase_name="Shape_URRUU_Rank2",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=2,
expected_row_lengths=[[1]]),
dict(
testcase_name="Shape_URRUU_Rank3",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=3,
expected_row_lengths=[[1], [1]]),
dict(
testcase_name="Shape_URRUU_Rank4",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=4,
expected_row_lengths=[[1], [1], [1]]),
dict(
testcase_name="Shape_URRUU_Rank5",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=5,
expected_row_lengths=[[1], [1], [1], [2]]),
])
def testRowPartitionsForRaggedTensor(self, rt, rt_ragged_rank, rank,
expected_row_lengths):
rt = ragged_factory_ops.constant(rt, rt_ragged_rank)
partitions = structured_tensor._row_partitions_for_ragged_tensor(
rt, rank, dtypes.int64)
self.assertLen(partitions, rank - 1)
self.assertLen(partitions, len(expected_row_lengths))
for partition, expected in zip(partitions, expected_row_lengths):
self.assertAllEqual(partition.row_lengths(), expected)
@parameterized.named_parameters([
dict(
testcase_name="2D_0_1",
st=[[{"x": 1}, {"x": 2}], [{"x": 3}]],
outer_axis=0, inner_axis=1,
expected=[{"x": 1}, {"x": 2}, {"x": 3}]),
dict(
testcase_name="3D_0_1",
st=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
outer_axis=0, inner_axis=1,
expected=[[{"x": 1}, {"x": 2}], [{"x": 3}], [{"x": 4}]]),
dict(
testcase_name="3D_1_2",
st=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
outer_axis=1, inner_axis=2,
expected=[[{"x": 1}, {"x": 2}, {"x": 3}], [{"x": 4}]]),
dict(
testcase_name="3D_0_2",
st=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
outer_axis=0, inner_axis=2,
expected=[{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4}]),
dict(
testcase_name="4D_0_1",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=0, inner_axis=1,
expected=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]],
[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]),
dict(
testcase_name="4D_0_2",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=0, inner_axis=2,
expected=[[{"x": 1}, {"x": 2}], [{"x": 3}], [{"x": 4}],
[{"x": 5}], [{"x": 6}], [{"x": 7}]]),
dict(
testcase_name="4D_0_3",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=0, inner_axis=3,
expected=[{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4},
{"x": 5}, {"x": 6}, {"x": 7}]),
dict(
testcase_name="4D_1_2",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=1, inner_axis=2,
expected=[[[{"x": 1}, {"x": 2}], [{"x": 3}], [{"x": 4}]],
[[{"x": 5}], [{"x": 6}], [{"x": 7}]]]),
dict(
testcase_name="4D_1_3",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=1, inner_axis=3,
expected=[[{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4}],
[{"x": 5}, {"x": 6}, {"x": 7}]]),
dict(
testcase_name="4D_2_3",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=2, inner_axis=3,
expected=[[[{"x": 1}, {"x": 2}, {"x": 3}], [{"x": 4}]],
[[{"x": 5}], [{"x": 6}, {"x": 7}]]]),
]) # pyformat: disable
def testMergeDims(self, st, outer_axis, inner_axis, expected):
st = StructuredTensor.from_pyval(st)
result = st.merge_dims(outer_axis, inner_axis)
self.assertAllEqual(result, expected)
def testMergeDimsDetail_3D_0_1(self):
st = StructuredTensor.from_pyval(
[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]])
result = st.merge_dims(0, 1)
expected_shape = tensor_shape.TensorShape([3, None])
self.assertTrue(expected_shape.is_compatible_with(result.shape))
def testMergeDims_0_1(self):
rt = ragged_tensor.RaggedTensor.from_value_rowids(
array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1])
struct = StructuredTensor.from_fields({"r": rt}, [2])
struct_2 = struct.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
struct_3 = struct_2.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
self.assertLen(struct_3.row_partitions, 2)
merged = struct_3.merge_dims(0, 1)
self.assertLen(merged.row_partitions, 1)
def testMergeDimsError(self):
st = StructuredTensor.from_pyval([[[{"a": 5}]]])
with self.assertRaisesRegex(
ValueError, r"Expected outer_axis \(2\) to be less than "
r"or equal to inner_axis \(1\)"):
st.merge_dims(2, 1)
def testTupleFieldValue(self):
st = StructuredTensor.from_pyval({"a": 5, "b": {"c": [1, 2, 3]}})
self.assertAllEqual(st.field_value(("a",)), 5)
self.assertAllEqual(st.field_value(("b", "c")), [1, 2, 3])
expected = r"Field path \(.*a.*,.*b.*\) not found in .*"
with self.assertRaisesRegex(KeyError, expected):
st.field_value(("a", "b"))
@parameterized.named_parameters([
dict(
testcase_name="scalar_scalar_scalar",
st={"b": {"a": 5}},
source_path=("b", "a"),
new_field_name="new_field",
expected={"b": {"a": 5}, "new_field": 5},),
dict(
testcase_name="scalar_scalar_repeated",
st={"b": {"a": [5, 3]}},
source_path=("b", "a"),
new_field_name="new_field",
expected={"b": {"a": [5, 3]}, "new_field": [5, 3]}),
dict(
testcase_name="scalar_scalar_repeated2",
st={"b": {"a": [[7], [5, 3]]}},
source_path=("b", "a"),
new_field_name="new_field",
expected={"b": {"a": [[7], [5, 3]]}, "new_field": [[7], [5, 3]]}),
dict(
testcase_name="repeated_scalar_repeated",
st=[{"b": {"a": [7]}},
{"b": {"a": [5, 3]}}],
source_path=("b", "a"),
new_field_name="new_field",
expected=[{"b": {"a": [7]}, "new_field": [7]},
{"b": {"a": [5, 3]}, "new_field": [5, 3]}]),
dict(
testcase_name="repeated_scalar_repeated2",
st=[{"b": {"a": [[5, 7], []]}},
{"b": {"a": [[5, 1], [3]]}}],
source_path=("b", "a"),
new_field_name="new_field",
expected=[{"b": {"a": [[5, 7], []]},
"new_field": [[5, 7], []]},
{"b": {"a": [[5, 1], [3]]},
"new_field": [[5, 1], [3]]}]),
dict(
testcase_name="scalar_scalar_scalar_scalar",
st={"a": {"b": {"c": 7}}},
source_path=("a", "b", "c"),
new_field_name="new_field",
expected={"a": {"b": {"c": 7}, "new_field": 7}}),
dict(
testcase_name="repeated_scalar_scalar_scalar",
st=[{"a": {"b": {"c": 7}}},
{"a": {"b": {"c": 5}}}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": {"b": {"c": 7}, "new_field": 7}},
{"a": {"b": {"c": 5}, "new_field": 5}}],),
dict(
testcase_name="repeated_repeated_scalar_scalar",
st=[{"a": [{"b": {"c": 7}}, {"b": {"c": 3}}]},
{"a": [{"b": {"c": 5}}]}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": [{"b": {"c": 7}, "new_field": 7},
{"b": {"c": 3}, "new_field": 3}]},
{"a": [{"b": {"c": 5}, "new_field": 5}]}]),
dict(
testcase_name="docs_tokens",
st=[{"docs": [{"tokens": [7, 17]}, {"tokens": [3, 13]}]},
{"docs": [{"tokens": [5, 15]}]}],
source_path=("docs", "tokens"),
new_field_name="docs_tokens",
expected=[{"docs": [{"tokens": [7, 17]}, {"tokens": [3, 13]}],
"docs_tokens": [7, 17, 3, 13]},
{"docs": [{"tokens": [5, 15]}],
"docs_tokens": [5, 15]}],
),
dict(
testcase_name="repeated_repeated_scalar_repeated",
st=[{"a": [{"b": {"c": [7, 17]}}, {"b": {"c": [3, 13]}}]},
{"a": [{"b": {"c": [5, 15]}}]}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": [{"b": {"c": [7, 17]}, "new_field": [7, 17]},
{"b": {"c": [3, 13]}, "new_field": [3, 13]}]},
{"a": [{"b": {"c": [5, 15]}, "new_field": [5, 15]}]}]),
dict(
testcase_name="scalar_scalar_scalar_repeated",
st={"a": {"b": {"c": [7, 3, 5]}}},
source_path=("a", | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Miscellaneous generic utility functions and classes."""
import filecmp
import inspect
import io
import os
import re
import sys
from uuid import UUID
from .lang import classproperty
def get_new_uuid():
"""
Return a new UUID (typically to be used for new nodes).
It uses the UUID version specified in
aiida.backends.settings.AIIDANODES_UUID_VERSION
"""
import uuid
return str(uuid.uuid4())
def validate_uuid(given_uuid: str) -> bool:
"""A simple check for the UUID validity."""
try:
parsed_uuid = UUID(given_uuid, version=4)
except ValueError:
# If not a valid UUID
return False
# Check if there was any kind of conversion of the hex during
# the validation
return str(parsed_uuid) == given_uuid
def validate_list_of_string_tuples(val, tuple_length):
"""
Check that:
1. ``val`` is a list or tuple
2. each element of the list:
a. is a list or tuple
b. is of length equal to the parameter tuple_length
c. each of the two elements is a string
Return if valid, raise ValidationError if invalid
"""
from aiida.common.exceptions import ValidationError
err_msg = (
'the value must be a list (or tuple) '
'of length-N list (or tuples), whose elements are strings; '
'N={}'.format(tuple_length)
)
if not isinstance(val, (list, tuple)):
raise ValidationError(err_msg)
for element in val:
if (
not isinstance(element, (list, tuple)) or (len(element) != tuple_length) or
not all(isinstance(s, str) for s in element)
):
raise ValidationError(err_msg)
return True
def get_unique_filename(filename, list_of_filenames):
"""
Return a unique filename that can be added to the list_of_filenames.
If filename is not in list_of_filenames, it simply returns the filename
string itself. Otherwise, it appends a integer number to the filename
(before the extension) until it finds a unique filename.
:param filename: the filename to add
:param list_of_filenames: the list of filenames to which filename
should be added, without name duplicates
:returns: Either filename or its modification, with a number appended
between the name and the extension.
"""
if filename not in list_of_filenames:
return filename
basename, ext = os.path.splitext(filename)
# Not optimized, but for the moment this should be fast enough
append_int = 1
while True:
new_filename = f'{basename:s}-{append_int:d}{ext:s}'
if new_filename not in list_of_filenames:
break
append_int += 1
return new_filename
def str_timedelta(dt, max_num_fields=3, short=False, negative_to_zero=False): # pylint: disable=invalid-name
"""
Given a dt in seconds, return it in a HH:MM:SS format.
:param dt: a TimeDelta object
:param max_num_fields: maximum number of non-zero fields to show
(for instance if the number of days is non-zero, shows only
days, hours and minutes, but not seconds)
:param short: if False, print always ``max_num_fields`` fields, even
if they are zero. If True, do not print the first fields, if they
are zero.
:param negative_to_zero: if True, set dt = 0 if dt < 0.
"""
if max_num_fields <= 0:
raise ValueError('max_num_fields must be > 0')
s_tot = dt.total_seconds() # Important to get more than 1 day, and for
# negative values. dt.seconds would give
# wrong results in these cases, see
# http://docs.python.org/2/library/datetime.html
s_tot = int(s_tot)
if negative_to_zero:
if s_tot < 0:
s_tot = 0
negative = (s_tot < 0)
s_tot = abs(s_tot)
negative_string = ' in the future' if negative else ' ago'
# For the moment stay away from months and years, difficult to get
days, remainder = divmod(s_tot, 3600 * 24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
all_fields = [(days, 'D'), (hours, 'h'), (minutes, 'm'), (seconds, 's')]
fields = []
start_insert = False
counter = 0
for idx, field in enumerate(all_fields):
if field[0] != 0:
start_insert = True
if (len(all_fields) - idx) <= max_num_fields:
start_insert = True
if start_insert:
if counter >= max_num_fields:
break
fields.append(field)
counter += 1
if short:
while len(fields) > 1: # at least one element has to remain
if fields[0][0] != 0:
break
fields.pop(0) # remove first element
# Join the fields
raw_string = ':'.join(['{:02d}{}'.format(*f) for f in fields])
if raw_string.startswith('0'):
raw_string = raw_string[1:]
# Return the resulting string, appending a suitable string if the time
# is negative
return f'{raw_string}{negative_string}'
def get_class_string(obj):
"""
Return the string identifying the class of the object (module + object name,
joined by dots).
It works both for classes and for class instances.
"""
if inspect.isclass(obj):
return f'{obj.__module__}.{obj.__name__}'
return f'{obj.__module__}.{obj.__class__.__name__}'
def get_object_from_string(class_string):
"""
Given a string identifying an object (as returned by the get_class_string
method) load and return the actual object.
"""
import importlib
the_module, _, the_name = class_string.rpartition('.')
return getattr(importlib.import_module(the_module), the_name)
def grouper(n, iterable): # pylint: disable=invalid-name
"""
Given an iterable, returns an iterable that returns tuples of groups of
elements from iterable of length n, except the last one that has the
required length to exaust iterable (i.e., there is no filling applied).
:param n: length of each tuple (except the last one,that will have length
<= n
:param iterable: the iterable to divide in groups
"""
import itertools
iterator = iter(iterable)
while True:
chunk = tuple(itertools.islice(iterator, n))
if not chunk:
return
yield chunk
class ArrayCounter:
"""
A counter & a method that increments it and returns its value.
It is used in various tests.
"""
seq = None
def __init__(self):
self.seq = -1
def array_counter(self):
self.seq += 1
return self.seq
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
# Directory comparison
dirs_cmp = filecmp.dircmp(dir1, dir2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return (
False, 'Left directory: {}, right directory: {}, files only '
'in left directory: {}, files only in right directory: '
'{}, not comparable files: {}'.format(
dir1, dir2, dirs_cmp.left_only, dirs_cmp.right_only, dirs_cmp.funny_files
)
)
# If the directories contain the same files, compare the common files
(_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
if mismatch:
return (
False, 'The following files in the directories {} and {} '
"don't match: {}".format(dir1, dir2, mismatch)
)
if errors:
return (
False, 'The following files in the directories {} and {} '
"aren't regular: {}".format(dir1, dir2, errors)
)
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
res, msg = are_dir_trees_equal(new_dir1, new_dir2)
if not res:
return False, msg
return True, f'The given directories ({dir1} and {dir2}) are equal'
class Prettifier:
"""
Class to manage prettifiers (typically for labels of kpoints
in band plots)
"""
@classmethod
def _prettify_label_pass(cls, label):
"""
No-op prettifier, simply returns the same label
:param label: a string to prettify
"""
return label
@classmethod
def _prettify_label_agr(cls, label):
"""
Prettifier for XMGrace
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'\xG\f{}')
.replace('DELTA', r'\xD\f{}')
.replace('LAMBDA', r'\xL\f{}')
.replace('SIGMA', r'\xS\f{}')
) # yapf:disable
return re.sub(r'_(.?)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_agr_simple(cls, label):
"""
Prettifier for XMGrace (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'\xG'
return re.sub(r'(\d+)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_gnuplot(cls, label):
"""
Prettifier for Gnuplot
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', 'Γ')
.replace('DELTA', 'Δ')
.replace('LAMBDA', 'Λ')
.replace('SIGMA', 'Σ')
) # yapf:disable
return re.sub(r'_(.?)', r'_{\1}', label)
@classmethod
def _prettify_label_gnuplot_simple(cls, label):
"""
Prettifier for Gnuplot (for old label names)
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
if label == 'G':
return 'Γ'
return re.sub(r'(\d+)', r'_{\1}', label)
@classmethod
def _prettify_label_latex(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'$\Gamma$')
.replace('DELTA', r'$\Delta$')
.replace('LAMBDA', r'$\Lambda$')
.replace('SIGMA', r'$\Sigma$')
) # yapf:disable
label = re.sub(r'_(.?)', r'$_{\1}$', label)
# label += r"$_{\vphantom{0}}$"
return label
@classmethod
| |
Rx"])
self.result_table_set_precision(3)
for r in self.pkt_cnts:
self.result_table_add(r)
self.result_table_print()
elif stress is not None and self.pkt_cnts:
print("\n") # tables separation
hdr_row = ['direction', 'size', 'flows',
'Tx queues DUT / Tester', 'type',
'Tester Tx, b/s',
'DUT Rx, b/s',
'FW missing',
'Tester Tx errors',
'DUT Rx errors',
'DUT Rx drops',
'DUT Tx, b/s',
'Tester Rx, b/s',
'BW missing',
'DUT TX errors',
'Tester Rx errors',
'Tester Rx drops',
]
self.result_table_create(hdr_row)
self.result_table_set_precision(3)
for r in self.pkt_cnts:
self.result_table_add(r)
self.result_table_print()
def get_flows_filename(self, tx_host, rx_host):
tx_port = tx_host.used_port
rx_port = rx_host.used_port
out = 'flows_{src_ip}_{src_mac}_{dst_ip}_{dst_mac}'.format(
src_ip=tx_host.get_ipv4_address(tx_port),
src_mac=tx_host.get_mac_address(tx_port),
dst_ip=rx_host.get_ipv4_address(rx_port),
dst_mac=rx_host.get_mac_address(rx_port),
)
return out
def save_flows(self, flows, tx_host, rx_host):
fname = self.get_flows_filename(tx_host, rx_host)
tx_host.send_expect("rm -f {}".format(fname), "# ")
tx_host.save_file(fname, json.dumps(flows))
tx_host.logger.info("Flows list saved to {}".format(fname))
def load_flows_from_file(self, tx_host, rx_host):
out = None
fname = self.get_flows_filename(tx_host, rx_host)
if tx_host.path_exist(fname) is True:
data = tx_host.send_expect("cat {}".format(fname), "# ")
try:
data = json.loads(data)
except:
self.logger.error("Reading file {} failed." \
"Flows list will be generated from scratch".format(fname))
else:
# Verify, if:
# 1. It's an list
# 2. It was created for the same amount of queues which are
# planned to be used
# 3. There are enough flows for each queue
total_queues = self.validate("BW_queue", self.max_queue)
flows_per_q = [self.max_flows_nb // total_queues] * total_queues
for i in range(0, self.max_flows_nb % total_queues):
flows_per_q[i] += 1
if type(data) is type([]) and \
len(data) == total_queues and \
all([len(d) >= f for (d, f) in zip(data, flows_per_q)]) :
out = data
# Even if pcap files would be found, it is not sure whether they are
# valid ones.
if out is None:
self.test_configs['try_reuse_pcaps'] = False
return out
def test_perf_bw(self, flows=None, stress=None):
if self.test_configs["BW_flows"] == "AUTO":
self.max_flows_nb = self.validate("BW_queue", self.max_queue)
else:
self.max_flows_nb = max([int(x) for x in self.test_configs["BW_flows"].split(",")])
self.__test_perf_bw(flows=flows)
@staticmethod
def init_pktgen_instance(conf, flow_list=None, burst_rate=100):
dev_port = conf['host'].used_port
pktgen = conf['pktgen']
return pktgen.init(dev_port, dev_port, conf, burst_rate, flow_list)
@staticmethod
def init_pktgen_instances(conf1, conf2, flow_list1, flow_list2):
start_timeout = 2
conf1['queue_nb'] = TestENA.init_pktgen_instance(conf1, flow_list1)
conf2['queue_nb'] = TestENA.init_pktgen_instance(conf2, flow_list2)
time.sleep(start_timeout)
@staticmethod
def detect_best_rates(tx_conf, rx_conf, flow_list):
burst_rate = 100
init_timeout = 3
start_timeout = 2
stop_timeout = 1
traffic_timeout = 5
# Max rate value is 10 Gbps per flow, absolute limit.
rate_max = 10000
txq_used = len(flow_list)
flows_nb = sum([len(queue_flow) for queue_flow in flow_list])
tx_host = tx_conf['host']
tx_host.logger.info("\tDetect the most optimal Tx rates for the flow")
tx_inst = tx_conf['pktgen']
rx_inst = rx_conf['pktgen']
f_pcap = rx_conf['f_pcap']
# Make sure that Rx machine don't use Tx pcaps
rx_conf['f_pcap'] = None
rxq_num = TestENA.init_pktgen_instance(rx_conf, flow_list)
rx_conf['f_pcap'] = f_pcap
# Each Tx queue can have different number of flows, so it's rate limit
# can differ
q_rate_max = [len(queue_flow) * rate_max for queue_flow in flow_list]
# Maximum possible rate for the single Tx flow, taking into
# consideration the number of queueus
flow_rate_max = min(rate_max, tx_conf["max_gbps"] * 1000 // flows_nb)
tx_rates = [int(len(queue_flow) * flow_rate_max) for queue_flow in flow_list]
# The step rate should be 1/10 of regular Tx queue rate, but not bigger than 1Gbps
step_rate = min(tx_rates[0] // 10, 1000)
old_tx_rates = list(tx_rates)
# Run once with default values and limit the flows to rate value, which
# was achieved
tx_conf["tx_rates"] = tx_rates
TestENA.init_pktgen_instance(tx_conf, flow_list)
time.sleep(init_timeout)
# Execute flow for 5 seconds
tx_inst.start()
time.sleep(start_timeout)
# First statistic read juz zeroize the counters
rx_inst.queue_stats()
start_time = time.time()
time.sleep(traffic_timeout)
end_time = time.time()
(_, rxq_stats) = rx_inst.queue_stats()
tx_inst.quit()
time.sleep(stop_timeout)
test_time = end_time - start_time
for qid in range(rxq_num):
rx_rate_total = rxq_stats[qid]["bytes"]
rx_rate_mbps = rx_rate_total * 8 // 1000000 // test_time
if rx_rate_mbps < (tx_rates[qid] - step_rate):
# As rx_rate_mbps reading may not be not very accurate, adjust it by the step_rate value.
# Especially small packets flows benefits from having higher rate.
tx_rates[qid] = rx_rate_mbps + step_rate
rate_stable = [False] * txq_used
rate_changed = True
# Increase rate for queues which show improvement until all queues have
# stable rate value.
while rate_changed:
old_tx_rates = tx_rates
tx_rates = [min(rate_max, rate + step_rate) if not stable else rate
for (stable, rate, rate_max) in zip(rate_stable, old_tx_rates, q_rate_max)]
tx_conf["tx_rates"] = tx_rates
# Start Tx instance with new rates
TestENA.init_pktgen_instance(tx_conf, flow_list)
time.sleep(init_timeout)
# Execute flow for 5 seconds
tx_inst.start()
time.sleep(start_timeout)
# Clean Rx statistics
rx_inst.queue_stats()
start_time = time.time()
time.sleep(traffic_timeout)
end_time = time.time()
(_, rxq_stats) = rx_inst.queue_stats()
tx_inst.quit()
time.sleep(stop_timeout)
test_time = end_time - start_time
# Get the rate and compare it with expected
rate_changed = False
for qid in range(rxq_num):
if rate_stable[qid]:
continue
rx_rate_total = rxq_stats[qid]["bytes"]
rx_rate_mbps = rx_rate_total * 8 // 1000000 // test_time
expected_rate = old_tx_rates[qid] + step_rate
if rx_rate_mbps >= expected_rate:
# Keep the adjusted rate
rate_changed = True
else:
# Revert the old rate value
tx_rates[qid] = old_tx_rates[qid]
rate_stable[qid] = True
rx_inst.quit()
tx_host.logger.debug("\tDone. Tx rates detected: {}".format(tx_rates))
def mono_traffic(self, t_conf, d_conf, msr_nb, interval, size,
t_flow_list, d_flow_list, pkt_type):
if d_conf['mode'] == 'tx' and t_conf['mode'] == 'rx':
tx_conf = d_conf
rx_conf = t_conf
flow_dir_str = 'DUT -> Tester'
flow_list = d_flow_list
elif d_conf['mode'] == 'rx' and t_conf['mode'] == 'tx':
tx_conf = t_conf
rx_conf = d_conf
flow_dir_str = 'Tester -> DUT'
flow_list = t_flow_list
else:
assert 0, "Invalid mono traffic configuration - both Tx mode and Rx mode hosts must be provided."
tx_gen = tx_conf['pktgen']
rx_gen = rx_conf['pktgen']
TestENA.detect_best_rates(tx_conf, rx_conf, flow_list)
TestENA.init_pktgen_instances(tx_conf, rx_conf, flow_list, flow_list)
self.logger.info("\t\t\tLaunching the traffic")
stats_dict = self.generate_mono_traffic(tx_gen, rx_gen,
msr_nb, interval)
tx_gen.quit()
rx_gen.quit()
parsed_stats = stats_dict['parsed_stats']
avg_tx = stats_dict['avg_tx_stats']
avg_rx = stats_dict['avg_rx_stats']
avg_rx_gbps = 8 * avg_rx['rx_bytes'] / 1000000000 / avg_rx['rx_delay_sec']
avg_rx_mpps = avg_rx['rx_pkt'] / 1000000 / avg_rx['rx_delay_sec']
avg_tx_gbps = 8 * avg_tx['tx_bytes'] / 1000000000 / avg_tx['tx_delay_sec']
avg_tx_mpps = avg_tx['tx_pkt'] / 1000000 / avg_tx['tx_delay_sec']
_q_str = "{} / {}".format(0, tx_conf['queue_nb'])
flows_nb = sum([len(queue_flow) for queue_flow in flow_list])
pkts = [flow_dir_str, size, flows_nb, _q_str, pkt_type] + parsed_stats[:]
self.pkt_cnts.append(pkts)
if self.test_configs["two_way_mono"] is False:
self.result_table_add([flow_dir_str, size, flows_nb,
_q_str,
pkt_type,
avg_rx_gbps, avg_rx_mpps,
])
else:
self.result_table_add([flow_dir_str, size, flows_nb,
_q_str,
pkt_type,
avg_tx_gbps, 0,
0, avg_rx_gbps,
avg_tx_mpps, 0,
0, avg_rx_mpps,
])
def bi_traffic(self, t_conf, d_conf, msr_nb, interval, size,
t_flow_list, d_flow_list, pkt_type):
t_gen = t_conf['pktgen']
d_gen = d_conf['pktgen']
# The number of flows should be symmetrical
flows_nb = sum([len(queue_flow) for queue_flow in t_flow_list])
TestENA.detect_best_rates(t_conf, d_conf, t_flow_list)
TestENA.detect_best_rates(d_conf, t_conf, d_flow_list)
TestENA.init_pktgen_instances(t_conf, d_conf, t_flow_list, d_flow_list)
self.logger.info("\t\t\tLaunching the traffic")
stats_dict = self.generate_bi_traffic(t_gen, d_gen, msr_nb, interval)
t_gen.quit()
d_gen.quit()
parsed_stats = stats_dict['parsed_stats']
avg_t_pc = stats_dict['avg_tester_stats']
avg_d_pc = stats_dict['avg_dut_stats']
avg_d_tx_gbps = 8 * avg_d_pc['tx_bytes'] / 1000000000 / avg_d_pc['tx_delay_sec']
avg_d_tx_mpps = avg_d_pc['tx_pkt'] / 1000000 / avg_d_pc['tx_delay_sec']
avg_t_rx_gbps = 8 * avg_t_pc['rx_bytes'] / 1000000000 / avg_t_pc['rx_delay_sec']
avg_t_rx_mpps = avg_t_pc['rx_pkt'] / 1000000 / avg_t_pc['rx_delay_sec']
avg_d_rx_gbps = 8 * avg_d_pc['rx_bytes'] / 1000000000 / avg_d_pc['rx_delay_sec']
avg_d_rx_mpps = avg_d_pc['rx_pkt'] / 1000000 / avg_d_pc['rx_delay_sec']
avg_t_tx_gbps = 8 * avg_t_pc['tx_bytes'] / 1000000000 / avg_t_pc['tx_delay_sec']
avg_t_tx_mpps = avg_t_pc['tx_pkt'] / 1000000 / avg_t_pc['tx_delay_sec']
_q_str = "{} / {}".format(d_conf['queue_nb'], t_conf['queue_nb'])
self.result_table_add(['Tester <=> DUT', size, flows_nb, _q_str, pkt_type,
avg_d_rx_gbps,
avg_t_rx_gbps,
avg_d_rx_mpps,
avg_t_rx_mpps
])
pkts = ['Tester <=> DUT', size, flows_nb, _q_str, pkt_type] + parsed_stats
self.pkt_cnts.append(pkts)
# The `backward` parameter affects only results placement order:
# - False: Tester -> DUT
# - True: DUT -> Tester
# This is because in 'mono' case, the function is called twice for two
# directions and results must be printed appropriately
def parse_stress(self, tx_pc, rx_pc, backward=False):
missing = tx_pc["tx_pkt"] - rx_pc["rx_pkt"]
if self.test_configs["BW_direction"] == "bi":
missing_bw = rx_pc["tx_pkt"] - tx_pc["rx_pkt"]
pkts = [add_unit(p[0] / p[1]) for p in [
(8 * tx_pc["tx_bytes"], tx_pc["tx_delay_sec"]),
(8 * rx_pc["rx_bytes"], rx_pc["rx_delay_sec"]),
(missing, 1),
(tx_pc["tx_err"], 1),
(rx_pc["rx_err"], 1),
(rx_pc["rx_drops"], 1),
(8 * rx_pc["tx_bytes"], rx_pc["tx_delay_sec"]),
(8 * tx_pc["rx_bytes"], tx_pc["rx_delay_sec"]),
(missing_bw, 1),
(rx_pc["tx_err"], 1),
(tx_pc["rx_err"], 1),
(tx_pc["rx_drops"], 1)
]]
elif self.test_configs["BW_direction"] == "mono":
missing_bw = rx_pc["tx_pkt"] - tx_pc["rx_pkt"]
if backward is False:
pkts = [add_unit(p[0] / p[1]) for p in [
(8 * tx_pc["tx_bytes"], tx_pc["tx_delay_sec"]),
(8 * rx_pc["rx_bytes"], rx_pc["rx_delay_sec"]),
(missing, 1),
(tx_pc["tx_err"], 1),
(rx_pc["rx_err"], 1),
(rx_pc["rx_drops"], 1),
]]
# pkts.extend([0,0,0,0,0,0])
pkts.extend([add_unit(p[0] / p[1]) for p in [
(8 * rx_pc["tx_bytes"], rx_pc["rx_delay_sec"]),
(8 * tx_pc["rx_bytes"], tx_pc["tx_delay_sec"]),
(missing_bw, 1),
(rx_pc["tx_err"], 1),
(tx_pc["rx_err"], 1),
(tx_pc["rx_drops"], 1)
]])
else:
missing_bw = rx_pc["tx_pkt"] - tx_pc["rx_pkt"]
# pkts = [0,0,0,0,0,0]
pkts = | |
# -*- coding: utf-8 -*-
# farmeconomy.py module
# authors: <NAME> & <NAME>
# An OOP implementation
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
from collections import namedtuple
class Economy(object):
""" Economy with an Equilibrium Farm Size Distribution
At present the initial distribution of skills is uniformly distributed.
For example N = 5 and s = np.array([1, 1, 1, 1, 1.5]) has 5 farmer groups.
We take the landlord class to be last indexed group .
"""
def __init__(self, N): # constructor to set initial parameters.
self.N = N # of quantiles (number of skill groups)
self.GAMMA = 0.8 # homogeneity factor
self.ALPHA = 0.5 # alpha (land) for production function
self.LAMBDA = 1.0/N # landlord share of labor
self.TBAR = 100 # Total Land Endowment
self.LBAR = 100 # Total Labor Endowment
self.H = 0.0 # fixed cost of production
self.s = np.ones(N)
def prodn(self, X, s):
Y = s*((X[0]**self.ALPHA)*(X[1]**(1-self.ALPHA)))**self.GAMMA
return Y
def marginal_product(self, X, s):
""" Production function technoogy """
MPT = self.ALPHA*self.GAMMA*self.prodn(X, s)/X[0]
MPL = (1-self.ALPHA)*self.GAMMA*self.prodn(X, s)/X[1]
return np.append(MPT, MPL)
def profits(self, X, s, w):
""" profits given factor prices and (T, L, s)"""
return self.prodn(X, s) - np.dot(w, X) - self.H
def demands(self, w, s):
"""Returns competitive demands for each skill group in a subeconomy
with factor supply (tbar, lbar) and vector s.
"""
alpha, gamma = self.ALPHA, self.GAMMA
land = ((w[1]/(gamma*s*(1-alpha))) *
(((1-alpha)/alpha)*(w[0]/w[1])) **
(1-gamma*(1-alpha)))**(1/(gamma-1))
labor = ((w[0]/(gamma*s*alpha)) *
((alpha/(1-alpha))*(w[1]/w[0])) **
(1-gamma*alpha))**(1/(gamma-1))
# zero demands if fixed cost implies negative profits (numeric only)
X = np.array([land, labor])
profitable = (self.profits(X, s, w) > 0)
return X*profitable
def excessD(self, w, Xbar, s):
""" Total excess land and labor demand given factor prices in
subeconomy with Xbar supplies
returns excess demand in each market
"""
res = np.array(([np.sum(self.demands(w, s)[0])-Xbar[0],
np.sum(self.demands(w, s)[1])-Xbar[1]]))
return res
def smallhold_eq(self, Xbar, s, analytic=True):
""" Solves for market clearing factor prices (analytically or
numerically) for subeconomy given Xbar supplies
When numerically solved: minimizes sum of squared excess demands
Returns a named tuple with factor prices and demands c_eqn.w, c_eqn.D
"""
if analytic: # for specific CobbDouglas
gamma = self.GAMMA
s_fringe, s_R = s[0:-1], s[-1]
psi = np.sum((s_fringe/s_R)**(1/(1-gamma)))
Lr = Xbar[1]/(1+psi)
Tr = Xbar[0]/(1+psi)
L_fringe = Lr*(s_fringe/s_R)**(1/(1-gamma))
T_fringe = Tr*(s_fringe/s_R)**(1/(1-gamma))
Xs = np.array([np.append(T_fringe, Tr), np.append(L_fringe, Lr)])
WR = self.marginal_product(Xs[:, -2], s[-2])
else: # Numeric solution should work for any demands
w0 = np.array([0.5, 0.5])
f = lambda w: np.sum(self.excessD(w, Xbar, s)**2)
res = minimize(f, w0, method='Nelder-Mead')
WR = res.x
Xs = self.demands(WR, s)
result = namedtuple('result', ['w', 'X'])
res = result(w=WR, X=Xs)
return res
def cartel_income(self, Xr, theta):
""" Cartel group's income from profits and factor income
when cartel uses (tr,lr) fringe has (TBAR-tr,LBAR-lr) """
# at present cartel is always last index farm
s_fringe, s_R = self.s[0:-1], self.s[-1] # landlord is top index farmer
TB_fringe = max(self.TBAR - Xr[0], 0)
LB_fringe = max(self.LBAR - Xr[1], 0)
fringe = self.smallhold_eq([TB_fringe, LB_fringe], s_fringe)
y = self.prodn(Xr, s_R) - \
np.dot(fringe.w, [Xr[0]-self.TBAR*theta,
Xr[1]-self.LAMBDA*self.LBAR])
# print("cartel: Tr={0:8.3f}, Lr={1:8.3f}, y={2:8.3f}".format(Xr[0],
# Xr[1],y))
return y
def cartel_eq(self, theta, guess=[1, 1]):
""" Cartel chooses own factor use (and by extension how much to
withold from the fring to max profits plus net factor sales)
"""
f = lambda X: -self.cartel_income(X, theta)
res = minimize(f, guess, method='Nelder-Mead')
XR = res.x
# print('XR:',XR)
fringe = self.smallhold_eq([self.TBAR, self.LBAR]-XR, self.s[0:-1])
XD = np.vstack((fringe.X.T, XR)).T
WR = fringe.w
result = namedtuple('result', ['w', 'X'])
cartel_res = result(w= WR, X= XD)
return cartel_res
def print_params(self):
""" print out parameters """
params = vars(self)
count = 1
for p in sorted(params): # print attributes alphabetically
if (count % 4) == 0:
print("{0:<6} : {1}".format(p,params[p]))
else:
print("{0:<6} : {1}".format(p,params[p])),
#print(count)
count += 1
def fooprint(self):
print(self.print_params())
class MirEconomy(Economy):
""" sub class of Economy class but with Mir as subeconomy
"""
def __init__(self, N): # constructor to set initial parameters.
super(MirEconomy, self).__init__(N) # inherit properties
# if None supplied use defaults
class CESEconomy(Economy):
""" sub class of Economy class but with two factor CES
"""
def __init__(self, N): # constructor to set initial parameters.
super(CESEconomy, self).__init__(N) # inherit properties
# if None supplied use defaults
self.N = N # of quantiles (number of skill groups)
self.RHO = 0.8 # homogeneity factor
self.PHI = 0.5 # alpha (land) for production function
self.aL = 1.0 # landlord share of labor
self.aT = 1.1 # Total Land Endowment
def prodn(self, X, s):
Y = s*(self.PHI*X[0]**(self.RHO) + (1-self.PHI)*X[1]**(self.RHO)) \
** (self.GAMMA/self.RHO)
return Y
def marginal_product(self, X, s):
""" Production function technoogy """
common = s*(self.PHI*X[0]**self.RHO+(1-self.PHI)*X[1]**self.RHO) \
** ((1+self.RHO)/self.RHO)
MPT = common * self.PHI*X[0]**(-self.RHO-1)
MPL = common * (1-self.PHI)*X[1]**(-self.RHO-1)
return np.append(MPT, MPL)
#########################
# We also define some utility functions
# Note these print statements format correctly in python2.7 not 3.4
def scene_print(ECO, numS=5,prnt=True,detail=True):
"""Creates numS land ownership (theta) scenarios
and returns competitive and market-power distorted equilibria
Prints results if flags are on.
Args:
ECO -- Instance of an Economy object
numS -- number of values of theta
prnt -- print table if True
Returns:
[Xc,Xr,wc,wr] where
Xrc -- Efficient/Competitive landlord factor use
Xr -- numS x 2 matrix, Xr[theta] = Landlords' distorted use
wc -- competitive factor prices
wr -- wr[theta] distorted competitive factor prices
"""
import sys
if not sys.version_info[0] ==2:
print('Use python 2.x for table formats')
print("Running {0} scenarios...".format(numS))
# competitive eqn when landlord is just part of the competitive fringe
comp = ECO.smallhold_eq([ECO.TBAR,ECO.LBAR],ECO.s)
wc, Xc = comp.w, comp.X
Xrc = Xc[:,-1] # landlord's factor use
#
guess = Xrc
# distorted equilibria at different land ownership theta
theta = np.linspace(0,1,numS+1)
theta[-1] = 0.99
if prnt:
print("\nAssumed Parameters")
print("==================")
ECO.print_params()
print('\nEffcient:[ Trc, Lrc] [rc,wc] w/r '),
if detail:
print('F( ) [r*Tr] [w*Lr]'),
print("")
print("="*78)
print(" [{0:6.2f},{1:6.2f}] ".format(Xrc[0],Xrc[1])),
print("[{0:4.2f},{1:4.2f}]".format(wc[0],wc[1])),
print(" {0:4.2f} ".format(wc[1]/wc[0])),
if detail:
print("| {0:5.2f} ".format(ECO.prodn(Xrc,ECO.s[-1]))),
print(" {0:5.2f} ".format(Xrc[0]*wc[0])),
print(" {0:6.2f} ".format(Xrc[1]*wc[1]))
print("\nTheta [ Tr, Lr ] [rM,wM] w/r |"),
print('F() [T_hire] [T_sale] [L_hire]')
print("="*78)
Xr = np.zeros(shape=(numS+1,2)) # Xr - lord factor use for each theta
wr = np.zeros(shape=(numS+1,2))
for i in range(numS+1):
cartelEQ = ECO.cartel_eq(theta[i], guess)
Xr[i] = cartelEQ.X[:,-1]
wr[i] = cartelEQ.w
guess = Xr[i]
if prnt:
print(" {0:3.2f}".format(theta[i])),
print(" [{0:6.2f},{1:6.2f}]".format(Xr[i,0],Xr[i,1])),
print("[{0:5.2g},{1:5.2f}] {2:5.2f}" \
.format(wr[i,0],wr[i,1],wr[i,1]/wr[i,0])),
if detail:
print("| {0:5.2f} ".format(ECO.prodn(Xr[i],ECO.s[-1]))),
print(" {0:6.2f} ".format(Xr[i,0]*wr[i,0])),
print(" {0:6.2f} ".format(theta[i]*ECO.TBAR*wr[i,0])),
print(" {0:6.2f} ".format(Xr[i,1]*wr[i,1])),
print("")
if prnt:
print("="*78)
return (Xrc, Xr, wc, wr)
def factor_plot(ECO, Xrc, Xr):
plt.rcParams["figure.figsize"] = (10, 8)
numS = len(Xr)-1
theta = np.linspace(0, 1, numS+1)
Tr, Lr = Xr[:, 0], Xr[:, 1]
Tr_net = Tr-np.array(theta) * ECO.TBAR
Lr_net = Lr - ECO.LAMBDA * ECO.LBAR
# print(Tr_net, Lr_net)
Trc_net = Xrc[0]*np.ones(numS+1)-np.array(theta)*ECO.TBAR
Lrc_net = Xrc[1]*np.ones(numS+1)-ECO.LAMBDA*ECO.LBAR
plt.grid()
plt.plot(theta, Tr_net, '-ro', label='distorted land')
plt.plot(theta, Trc_net, label='efficient land')
plt.plot(theta, Lr_net, '-bx', label='distorted labor')
plt.plot(theta, Lrc_net, label='efficient labor')
plt.grid()
plt.ylim(-100, ECO.TBAR)
# plt.xlabel(r'$\gamma =$')
plt.title('Landlord net factor hire for '+r'$\gamma =$ {0}'
.format(ECO.GAMMA))
plt.xlabel(r'$\theta$ -- Landlord land ownership share')
plt.legend(loc='lower left',title='net hiring in of')
plt.show()
return
def TLratio_plot(ECO, Xrc, Xr):
plt.rcParams["figure.figsize"] = (10, 8)
numS = len(Xr)-1
theta = np.linspace(0, 1, numS+1)
plt.plot(theta, Xr.T[0][:]/Xr.T[1][:], '-ro', label='distorted')
plt.plot(theta, (Xrc[0]/Xrc[1])*np.ones(numS+1), '--', label='efficient')
plt.title('Land to labor ratio on landlord farm '+r'$\gamma =$ {0}'
.format(ECO.GAMMA))
plt.xlabel(r'$\theta$ -- Landlord land ownership share')
plt.legend(loc='upper left',title='Land/Labor ratio')
plt.show()
return
#
if __name__ == "__main__":
"""Sample use of the Economy class """
s = np.array([1., 1., 1., 1., 1.])
N = len(s)
E = Economy(N) # an instance takes N length as parameter
E.ALPHA = 0.5
E.GAMMA = 0.90
E.smallhold_eq([E.TBAR, E.LBAR], s, analytic=True)
(Xrc, Xr, wc, wr) = scene_print(E, 10, detail=True)
#factor_plot(E,Xrc,Xr)
TLratio_plot(E,Xrc,Xr)
# scene_plot(E, Xrc, Xr)
# plt.show()
##
# print("Competitive Fringe cases EE(C) and DE cases")
# DE = Economy(5)
# DE.smallhold_eq([E.TBAR, E.LBAR], s, analytic=True)
# (Xrc, Xr, wc, wr) = scenarios(DE, 10, detail=True)
# scene_plot(E, Xrc, Xr)
#
# print("MIR Fringe cases DD")
# DD = MirEconomy(5)
# print(DD.prodn(np.array([10, 10]), np.array([1, 1.05])))
# DD.smallhold_eq([E.TBAR, E.LBAR], s, analytic=True)
# (Xrc, Xr, wc, wr) = scenarios(DD, | |
from __future__ import (division, absolute_import, print_function, unicode_literals)
import os
import numpy as np
import gzip
import os.path
import nltk
import logging
from nltk import FreqDist
from .WordEmbeddings import wordNormalize
from .CoNLL import readCoNLL
import sys
if (sys.version_info > (3, 0)):
import pickle as pkl
else: #Python 2.7 imports
import cPickle as pkl
from io import open
def perpareDataset(embeddingsPath, datasets, frequencyThresholdUnknownTokens=1, reducePretrainedEmbeddings=False, valTransformations=None, padOneTokenSentence=True):
"""
Reads in the pre-trained embeddings (in text format) from embeddingsPath and prepares those to be used with the LSTM network.
Unknown words in the trainDataPath-file are added, if they appear at least frequencyThresholdUnknownTokens times
# Arguments:
embeddingsPath: Full path to the pre-trained embeddings file. File must be in text format.
datasetFiles: Full path to the [train,dev,test]-file
frequencyThresholdUnknownTokens: Unknown words are added, if they occure more than frequencyThresholdUnknownTokens times in the train set
reducePretrainedEmbeddings: Set to true, then only the embeddings needed for training will be loaded
valTransformations: Column specific value transformations
padOneTokenSentence: True to pad one sentence tokens (needed for CRF classifier)
"""
embeddingsName = os.path.splitext(embeddingsPath)[0]
pklName = "_".join(sorted(datasets.keys()) + [embeddingsName])
outputPath = 'pkl/' + pklName + '.pkl'
if os.path.isfile(outputPath):
logging.info("Using existent pickle file: %s" % outputPath)
return outputPath
casing2Idx = getCasingVocab()
embeddings, word2Idx = readEmbeddings(embeddingsPath, datasets, frequencyThresholdUnknownTokens, reducePretrainedEmbeddings)
mappings = {'tokens': word2Idx, 'casing': casing2Idx}
pklObjects = {'embeddings': embeddings, 'mappings': mappings, 'datasets': datasets, 'data': {}}
for datasetName, dataset in datasets.items():
datasetColumns = dataset['columns']
commentSymbol = dataset['commentSymbol']
trainData = 'data/%s/train.txt' % datasetName
devData = 'data/%s/dev.txt' % datasetName
testData = 'data/%s/test.txt' % datasetName
paths = [trainData, devData, testData]
logging.info(":: Transform "+datasetName+" dataset ::")
pklObjects['data'][datasetName] = createPklFiles(paths, mappings, datasetColumns, commentSymbol, valTransformations, padOneTokenSentence)
f = open(outputPath, 'wb')
pkl.dump(pklObjects, f, -1)
f.close()
logging.info("DONE - Embeddings file saved: %s" % outputPath)
return outputPath
def loadDatasetPickle(embeddingsPickle):
""" Loads the cPickle file, that contains the word embeddings and the datasets """
f = open(embeddingsPickle, 'rb')
pklObjects = pkl.load(f)
f.close()
return pklObjects['embeddings'], pklObjects['mappings'], pklObjects['data']
def readEmbeddings(embeddingsPath, datasetFiles, frequencyThresholdUnknownTokens, reducePretrainedEmbeddings):
"""
Reads the embeddingsPath.
:param embeddingsPath: File path to pretrained embeddings
:param datasetName:
:param datasetFiles:
:param frequencyThresholdUnknownTokens:
:param reducePretrainedEmbeddings:
:return:
"""
# Check that the embeddings file exists
if not os.path.isfile(embeddingsPath):
if embeddingsPath in ['komninos_english_embeddings.gz', 'levy_english_dependency_embeddings.gz', 'reimers_german_embeddings.gz']:
getEmbeddings(embeddingsPath)
else:
print("The embeddings file %s was not found" % embeddingsPath)
exit()
logging.info("Generate new embeddings files for a dataset")
neededVocab = {}
if reducePretrainedEmbeddings:
logging.info("Compute which tokens are required for the experiment")
def createDict(filename, tokenPos, vocab):
for line in open(filename):
if line.startswith('#'):
continue
splits = line.strip().split()
if len(splits) > 1:
word = splits[tokenPos]
wordLower = word.lower()
wordNormalized = wordNormalize(wordLower)
vocab[word] = True
vocab[wordLower] = True
vocab[wordNormalized] = True
for dataset_name, dataset in datasetFiles.items():
dataColumnsIdx = {y: x for x, y in dataset['columns'].items()}
tokenIdx = dataColumnsIdx['tokens']
datasetPath = 'data/%s/' % dataset_name
for dataset_file_name in ['train.txt', 'dev.txt', 'test.txt']:
createDict(datasetPath + dataset_file_name, tokenIdx, neededVocab)
# :: Read in word embeddings ::
logging.info("Read file: %s" % embeddingsPath)
word2Idx = {}
embeddings = []
embeddingsIn = gzip.open(embeddingsPath, "rt") if embeddingsPath.endswith('.gz') else open(embeddingsPath,
encoding="utf8")
embeddingsDimension = None
for line in embeddingsIn:
split = line.rstrip().split(" ")
word = split[0]
if embeddingsDimension == None:
embeddingsDimension = len(split) - 1
if (len(
split) - 1) != embeddingsDimension: # Assure that all lines in the embeddings file are of the same length
print("ERROR: A line in the embeddings file had more or less dimensions than expected. Skip token.")
continue
if len(word2Idx) == 0: # Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(embeddingsDimension)
embeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, embeddingsDimension) # Alternativ -sqrt(3/dim) ... sqrt(3/dim)
embeddings.append(vector)
vector = np.array([float(num) for num in split[1:]])
if len(neededVocab) == 0 or word in neededVocab:
if word not in word2Idx:
embeddings.append(vector)
word2Idx[word] = len(word2Idx)
# Extend embeddings file with new tokens
def createFD(filename, tokenIndex, fd, word2Idx):
for line in open(filename):
if line.startswith('#'):
continue
splits = line.strip().split()
if len(splits) > 1:
word = splits[tokenIndex]
wordLower = word.lower()
wordNormalized = wordNormalize(wordLower)
if word not in word2Idx and wordLower not in word2Idx and wordNormalized not in word2Idx:
fd[wordNormalized] += 1
if frequencyThresholdUnknownTokens != None and frequencyThresholdUnknownTokens >= 0:
fd = nltk.FreqDist()
for datasetName, datasetFile in datasetFiles.items():
dataColumnsIdx = {y: x for x, y in datasetFile['columns'].items()}
tokenIdx = dataColumnsIdx['tokens']
datasetPath = 'data/%s/' % datasetName
createFD(datasetPath + 'train.txt', tokenIdx, fd, word2Idx)
addedWords = 0
for word, freq in fd.most_common(10000):
if freq < frequencyThresholdUnknownTokens:
break
addedWords += 1
word2Idx[word] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(split) - 1) # Alternativ -sqrt(3/dim) ... sqrt(3/dim)
embeddings.append(vector)
assert (len(word2Idx) == len(embeddings))
logging.info("Added words: %d" % addedWords)
embeddings = np.array(embeddings)
return embeddings, word2Idx
def addCharInformation(sentences):
"""Breaks every token into the characters"""
for sentenceIdx in range(len(sentences)):
sentences[sentenceIdx]['characters'] = []
for tokenIdx in range(len(sentences[sentenceIdx]['tokens'])):
token = sentences[sentenceIdx]['tokens'][tokenIdx]
chars = [c for c in token]
sentences[sentenceIdx]['characters'].append(chars)
def addCasingInformation(sentences):
"""Adds information of the casing of words"""
for sentenceIdx in range(len(sentences)):
sentences[sentenceIdx]['casing'] = []
for tokenIdx in range(len(sentences[sentenceIdx]['tokens'])):
token = sentences[sentenceIdx]['tokens'][tokenIdx]
sentences[sentenceIdx]['casing'].append(getCasing(token))
def getCasing(word):
"""Returns the casing for a word"""
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): #Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): #All lower case
casing = 'allLower'
elif word.isupper(): #All upper case
casing = 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return casing
def getCasingVocab():
entries = ['PADDING', 'other', 'numeric', 'mainly_numeric', 'allLower', 'allUpper', 'initialUpper', 'contains_digit']
return {entries[idx]:idx for idx in range(len(entries))}
def createMatrices(sentences, mappings, padOneTokenSentence):
data = []
numTokens = 0
numUnknownTokens = 0
missingTokens = FreqDist()
paddedSentences = 0
for sentence in sentences:
row = {name: [] for name in list(mappings.keys())+['raw_tokens']}
for mapping, str2Idx in mappings.items():
if mapping not in sentence:
continue
for entry in sentence[mapping]:
if mapping.lower() == 'tokens':
numTokens += 1
idx = str2Idx['UNKNOWN_TOKEN']
if entry in str2Idx:
idx = str2Idx[entry]
elif entry.lower() in str2Idx:
idx = str2Idx[entry.lower()]
elif wordNormalize(entry) in str2Idx:
idx = str2Idx[wordNormalize(entry)]
else:
numUnknownTokens += 1
missingTokens[wordNormalize(entry)] += 1
row['raw_tokens'].append(entry)
elif mapping.lower() == 'characters':
idx = []
for c in entry:
if c in str2Idx:
idx.append(str2Idx[c])
else:
idx.append(str2Idx['UNKNOWN'])
else:
idx = str2Idx[entry]
row[mapping].append(idx)
if len(row['tokens']) == 1 and padOneTokenSentence:
paddedSentences += 1
for mapping, str2Idx in mappings.items():
if mapping.lower() == 'tokens':
row['tokens'].append(mappings['tokens']['PADDING_TOKEN'])
row['raw_tokens'].append('PADDING_TOKEN')
elif mapping.lower() == 'characters':
row['characters'].append([0])
else:
row[mapping].append(0)
data.append(row)
if numTokens > 0:
logging.info("Unknown-Tokens: %.2f%%" % (numUnknownTokens/float(numTokens)*100))
return data
def createPklFiles(datasetFiles, mappings, cols, commentSymbol, valTransformation, padOneTokenSentence):
trainSentences = readCoNLL(datasetFiles[0], cols, commentSymbol, valTransformation)
devSentences = readCoNLL(datasetFiles[1], cols, commentSymbol, valTransformation)
testSentences = readCoNLL(datasetFiles[2], cols, commentSymbol, valTransformation)
extendMappings(mappings, trainSentences+devSentences+testSentences)
charset = {"PADDING":0, "UNKNOWN":1}
for c in " 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,-_()[]{}!?:;#'\"/\\%$`&=*+@^~|":
charset[c] = len(charset)
mappings['characters'] = charset
addCharInformation(trainSentences)
addCasingInformation(trainSentences)
addCharInformation(devSentences)
addCasingInformation(devSentences)
addCharInformation(testSentences)
addCasingInformation(testSentences)
logging.info(":: Create Train Matrix ::")
trainMatrix = createMatrices(trainSentences, mappings, padOneTokenSentence)
logging.info(":: Create Dev Matrix ::")
devMatrix = createMatrices(devSentences, mappings, padOneTokenSentence)
logging.info(":: Create Test Matrix ::")
testMatrix = createMatrices(testSentences, mappings, padOneTokenSentence)
data = {
'trainMatrix': trainMatrix,
'devMatrix': devMatrix,
'testMatrix': testMatrix
}
return data
def extendMappings(mappings, sentences):
sentenceKeys = list(sentences[0].keys())
sentenceKeys.remove('tokens') #No need to map tokens
for sentence in sentences:
for name in sentenceKeys:
if name not in mappings:
mappings[name] = {'O':0} #'O' is also used for padding
for item in sentence[name]:
if item not in mappings[name]:
mappings[name][item] = len(mappings[name])
def getEmbeddings(name):
if not os.path.isfile(name):
download("https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/"+name)
def getLevyDependencyEmbeddings():
"""
Downloads from https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/
the dependency based word embeddings and unzips them
"""
if not os.path.isfile("levy_deps.words.bz2"):
print("Start downloading word embeddings from Levy et al. ...")
os.system("wget -O levy_deps.words.bz2 http://u.cs.biu.ac.il/~yogo/data/syntemb/deps.words.bz2")
print("Start unzip word embeddings ...")
os.system("bzip2 -d levy_deps.words.bz2")
def getReimersEmbeddings():
"""
Downloads from https://www.ukp.tu-darmstadt.de/research/ukp-in-challenges/germeval-2014/
embeddings for German
"""
if not os.path.isfile("2014_tudarmstadt_german_50mincount.vocab.gz"):
print("Start downloading word embeddings from Reimers et al. ...")
os.system("wget https://public.ukp.informatik.tu-darmstadt.de/reimers/2014_german_embeddings/2014_tudarmstadt_german_50mincount.vocab.gz")
if sys.version_info >= (3,):
import urllib.request as urllib2
import urllib.parse as urlparse
from urllib.request import urlretrieve
else:
import urllib2
import urlparse
from urllib import urlretrieve
def download(url, destination=os.curdir, silent=False):
filename = os.path.basename(urlparse.urlparse(url).path) or 'downloaded.file'
def get_size():
meta = urllib2.urlopen(url).info()
meta_func = meta.getheaders if hasattr(
meta, 'getheaders') else meta.get_all
meta_length = meta_func('Content-Length')
try:
return int(meta_length[0])
except:
return 0
def kb_to_mb(kb):
return kb / 1024.0 / 1024.0
def callback(blocks, block_size, total_size):
current = blocks | |
below their values), and recalculate the mass of
# the by-percent fermentables.
if (len(self._fermfilter('r')) > 0
and len(self._fermfilter('p')) > 0):
iters = 30
if mext > 0.0001:
self._once(notice,
'finding the solution for fixed '
'percentages and masses\n')
for g in range(iters):
fr = self._fermfilter('r')
f_guess = guess(ferms)
fp = [x for x in f_guess if x.cookie == 'p']
# pick largest value to adjust against.
# gut feeling that it'll make things more
# accurate. we have to pick *some* value
# in any case, so might as well pick this one.
f = sorted(fp, key=lambda x: x.get_amount(),
reverse=True)[0]
f_wanted = f.info
allmass = self._fermentables_massof(f_guess)
f_actual = 100.0 * (f.get_amount() / allmass)
diff = f_wanted - f_actual
if abs(diff) < 0.01:
break
nrest = len(fr)
# scale the diff to the "rest" values we're
# adjusting
scale = (fr[0].get_amount() / f_wanted)
if scale < 1:
scale = 1/scale
assert(scale >= 1)
adj = scale * (diff / nrest)
for x in fr:
x.set_amount(x.get_amount() - adj)
if (x.get_amount() < 0.01):
raise PilotError('cannot solve '
'recipe. lower bymass or '
'raise strength')
if g == iters-1:
self._once(warn,
'fermentable "rest" percentages did not '
'converge in ' + str(iters) + ' tries\n')
else:
f_guess = guess(ferms)
self._set_calcguess(f_guess, None)
# turn percentages into masses, calculate expected worters
# for each worter stage
def _dofermentables_and_worters(self):
ferms_bymass = self._fermfilter('m')
if len(self._fermfilter(('r', 'p'))) == 0:
self._set_calcguess(ferms_bymass, None)
l = self.vol_losses
if self.waterguess is None:
vol_loss = _Volume(sum(l.values())
+ self._boiloff()
+ self.mash.evaporation().water())
self.waterguess = _Mass(self._final_volume()
+ vol_loss)
while True:
res = self._doworters_bymass()
diff = (res[Worter.PACKAGE].volume()
- self._final_volume())
if diff < 0.1:
break
self.waterguess = _Mass(self.waterguess - diff)
self.worter = res
return
if self.final_strength is None:
raise PilotError('final strength must be set for '
+ 'by-percent fermentables')
# account for "unknown" extract losses from stage to stage
# ("unknown" = not solved analytically)
for _ in range(10):
self._dofermentables_bypercent(ferms_bymass)
res = self._doworters_bystrength()
extoff = res[Worter.MASH].extract()
if abs(extoff.valueas(Mass.G)) < 1:
break
self.fermentable_extadj += extoff
else:
raise Exception('unable to calculate fermentables')
self.waterguess = res[Worter.MASH].water() + self._boiladj
self.worter = res
def _dofermentablestats(self):
assert('losses' in self.results)
allmass = self._fermentables_massof(self.fermentables)
stats = {}
# Calculate amount of extract -- extract-equivalent --
# that makes it into packaging. We start with the amount
# of extract we know we're getting, and then subtract
# the normalized share at each stage until we're at
# packaging. Since the mash is accounted
# for in configuration, we only need to handle the
# losses in the kettle and fermentor.
#
# XXX: it's somewhat inelegant because fermentables
# are added as timespecs, while losses are given
# as worters.
def extract_predicted(f, stage):
f_ext = self._fermentable_extract(f)
tss = Timespec.stages
losses = self.results['losses']
worters = self.worter
def stageloss(stagecmp, wname):
v = 0
if tss.index(stage) < tss.index(stagecmp):
stageloss = losses[wname].extract()
stageext = worters[wname].extract()
v = stageloss * (f_ext/stageext)
return _Mass(v)
f_ext -= stageloss(Timespec.FERMENTOR, Worter.POSTBOIL)
f_ext -= stageloss(Timespec.PACKAGE, Worter.FERMENTOR)
return f_ext
for f in self.fermentables:
stage = timespec.timespec2stage[f.time.__class__]
fs = {}
fs['percent'] = 100.0 * (f.get_amount() / allmass)
fs['extract_predicted'] = extract_predicted(f, stage)
fs['extract_theoretical'] \
= self._fermentable_extract(f, theoretical=True)
stats.setdefault(stage, {})
stats[stage].setdefault('percent', 0)
stats[stage].setdefault('amount', _Mass(0))
stats[stage].setdefault('extract_predicted', _Mass(0))
stats[stage].setdefault('extract_theoretical', _Mass(0))
stats[stage]['percent'] += fs['percent']
stats[stage]['amount'] += f.get_amount()
stats[stage]['extract_predicted'] \
+= fs['extract_predicted']
stats[stage]['extract_theoretical'] \
+= fs['extract_theoretical']
f.info = fs
allstats = {}
allstats['amount'] \
= _Mass(sum([stats[x]['amount'] for x in stats]))
allstats['extract_predicted'] \
= _Mass(sum([stats[x]['extract_predicted'] for x in stats]))
allstats['extract_theoretical'] \
= _Mass(sum([stats[x]['extract_theoretical']
for x in stats]))
self.results['fermentable_stats_perstage'] = stats
self.results['fermentable_stats_all'] = allstats
# sort fermentables by 3 criteria (from most to least dominant)
# 1) mass
# 2) extract mass
# 3) alphabet
f = self.fermentables
f = sorted(f, key=lambda x: x.obj.name, reverse=True)
f = sorted(f, key=lambda x: x.info['extract_predicted'])
f = sorted(f, key=lambda x: x.get_amount())
self.fermentables = f[::-1]
def _domash(self):
mf = self._fermentables_bytimespec(Timespec.MASH)
self.mash.set_fermentables(mf)
self.results['mash'] \
= self.mash.do_mash(getparam('ambient_temp'),
self._reference_temp(), self.worter[Worter.MASH],
self._grain_absorption())
self.results['mash_conversion'] = {}
theor_extract = self._extract_bytimespec(Worter.MASH,
theoretical=True)
watermass = self.results['mash']['mashstep_water'].water()
for x in range(5, 100+1, 5):
extract = theor_extract * x/100.0
fw = 100 * (extract / (extract + watermass))
self.results['mash_conversion'][x] = _Strength(fw)
w = copy.deepcopy(self.results['mash']['mashstep_water'])
w.adjust_extract(self._extract_bytimespec(Timespec.MASH))
w -= self.mash.evaporation()
rloss = (self._fermentables_massof(mf)*self._grain_absorption()
+ getparam('mlt_loss'))
w.adjust_volume(_Volume(-rloss))
if w.volume() < 0:
raise PilotError('mashin ratio ridiculously low')
self.results['mash_first_runnings'] = w
def _set_calcguess(self, ferms, hd):
if ferms is not None:
self.fermentables = ferms
if hd is not None:
self.hopsdrunk = hd
# update volume loss caches
l = {}
f = self._fermentables_bytimespec(Timespec.MASH) \
+ self._fermentables_bytimespec(Timespec.POSTMASH)
m = self._fermentables_massof(f)
l[Worter.MASH] = _Volume(m*self._grain_absorption()
+ getparam('mlt_loss'))
hd = self.hopsdrunk
l[Worter.POSTBOIL] = hd['kettle'] + getparam('kettle_loss')
l[Worter.FERMENTOR] = hd['fermentor'] \
+ getparam('fermentor_loss')
self.vol_losses = l
# bymass: calculate worters based on a mashwater input. We use
# this both for figuring out the strength of the wort produced
# from a bymass-brew, and also after percentage&strength
# has been resolved into masses.
def _doworters_bymass(self):
res = {}
l = self.vol_losses
w = Worter(water = self.waterguess - self._boiladj)
res[Worter.MASH] = copy.deepcopy(w)
# worter at PREBOIL is mash plus extract minus
# evaporation minus volume loss
w.adjust_extract(self._extract_bytimespec(Timespec.MASH))
w.adjust_extract(self._extract_bytimespec(Timespec.POSTMASH))
w -= self.mash.evaporation()
# We account for the extract loss already in the mash
# efficiency (configured). Therefore, coupled with the
# loss volume we can calculate the amount of water lost,
# and just subtract the water.
maxext = sum([ self._extract_bytimespec(x, theoretical=True)
for x in [Timespec.MASH, Timespec.POSTMASH]], _Mass(0))
extdiff = _Mass(maxext - w.extract())
stren = brewutils.solve_strength(extdiff, l[Worter.MASH])
w_mashloss = Worter()
w_mashloss.set_volstrength(l[Worter.MASH], stren)
w.adjust_water(-w_mashloss.water())
res[Worter.PREBOIL] = copy.deepcopy(w)
w.adjust_water(_Mass(-self._boiloff()))
w.adjust_extract(self._extract_bytimespec(Timespec.KETTLE))
res[Worter.POSTBOIL] = copy.deepcopy(w)
w.adjust_volume(-l[Worter.POSTBOIL])
w.adjust_extract(self._extract_bytimespec(Timespec.FERMENTOR))
w.adjust_water(self._boiladj)
res[Worter.FERMENTOR] = copy.deepcopy(w)
w.adjust_volume(-l[Worter.FERMENTOR])
w.adjust_extract(self._extract_bytimespec(Timespec.PACKAGE))
res[Worter.PACKAGE] = copy.deepcopy(w)
return res
# bystrength: start from final volume / strength, calculate
# backwards
def _doworters_bystrength(self):
res = {}
l = self.vol_losses
w = Worter()
w.set_volstrength(self._final_volume(), self.final_strength)
res[Worter.PACKAGE] = copy.deepcopy(w)
w.adjust_extract(-self._extract_bytimespec(Timespec.PACKAGE))
w.adjust_volume(l[Worter.FERMENTOR])
res[Worter.FERMENTOR] = copy.deepcopy(w)
w.adjust_extract(-self._extract_bytimespec(Timespec.FERMENTOR))
w.adjust_volume(l[Worter.POSTBOIL])
w.adjust_water(-self._boiladj)
res[Worter.POSTBOIL] = copy.deepcopy(w)
w.adjust_extract(-self._extract_bytimespec(Timespec.KETTLE))
w.adjust_water(_Mass(self._boiloff()))
res[Worter.PREBOIL] = copy.deepcopy(w)
actext = (self._extract_bytimespec(Timespec.MASH)
+ self._extract_bytimespec(Timespec.POSTMASH))
w.adjust_extract(-actext)
w += self.mash.evaporation()
# We account for the extract loss already in the mash
# efficiency (configured). Therefore, coupled with the
# loss volume we can calculate the amount of water lost,
# and just subtract the water.
maxext = sum([self._extract_bytimespec(x, theoretical=True)
for x in [Timespec.MASH, Timespec.POSTMASH]], _Mass(0))
extdiff = _Mass(maxext - actext)
stren = brewutils.solve_strength(extdiff, l[Worter.MASH])
w_mashloss = Worter()
w_mashloss.set_volstrength(l[Worter.MASH], stren)
w.adjust_water(w_mashloss.water())
res[Worter.MASH] = copy.deepcopy(w)
return res
def _dohops(self):
allhop = []
# ok, um, so the Tinseth formula uses postboil volume ...
wrt = self.worter
v_post = wrt[Worter.POSTBOIL].volume()
# ... and average gravity during the boil. *whee*
t = (wrt[Worter.PREBOIL].strength().valueas(Strength.SG)
+ wrt[Worter.POSTBOIL].strength().valueas(Strength.SG))
sg = Strength(t/2, Strength.SG)
#
# Do the hop calculations. We edit the original input
# objects, but since we only use the original values on
# all iterations. Saves a big deepcopy.
#
# calculate IBU produced by "bymass" hops
for hs, h in [(x, x.obj) for x in self.hops if x.cookie == 'm']:
ibu = h.mass2IBU(sg, v_post, hs.time, hs.get_amount())
hs.info = ibu
# calculate mass of "byIBU" hops
for hs, h in [(x, x.obj) for x in self.hops if x.cookie == 'i']:
mass = h.IBU2mass(sg, v_post, hs.time, hs.info)
hs.set_amount(mass)
allhop = self.hops[:]
totibus = sum([x.info for x in allhop])
if self.hops_recipeIBUBUGU is not None:
x = self.hops_recipeIBUBUGU
h, t, v = x['hop'], x['time'], x['value']
if x['type'] == 'BUGU':
stren = self.worter[Worter.PACKAGE].strength()
v *= stren.valueas(stren.SG_PTS)
missibus = v - totibus
if missibus <= 0:
raise PilotError('recipe IBU/BUGU exceeded')
mass = h.IBU2mass(sg, v_post, t, missibus)
totibus += missibus
hs = Addition(h, mass, None, t, cookie = 'r')
hs.info = missibus
allhop.append(hs)
# calculate amount of wort that hops will drink
hd = {x: 0 for x in self.hopsdrunk}
packagedryhopvol = 0
for h in allhop:
hop = h.obj
if isinstance(h.time, timespec.Fermentor):
hd['fermentor'] += hop.absorption(h.get_amount())
elif isinstance(h.time, timespec.Package):
hd['package'] += hop.absorption(h.get_amount())
packagedryhopvol += hop.volume(h.get_amount())
else:
# XXX
hd['kettle'] += hop.absorption(h.get_amount())
hopsdrunk = {x: _Volume(hd[x]) for x in hd}
hopsdrunk['volume'] = _Volume(packagedryhopvol)
self._set_calcguess(None, hopsdrunk)
self.results['hops'] = allhop
totmass = _Mass(sum(x.get_amount() for x in allhop))
self.results['hop_stats'] = {'mass': totmass, 'ibu' : totibus}
def _dotimers(self):
# sort the timerable additions
timers = self.results['hops'] + self.opaques
# include boiltime fermentables under timers.
# XXX: should be user-configurable
timers += [x for x in self.fermentables
if isinstance(x.time, timespec.Boil)]
timers = sorted(timers, key=lambda x: x.time, reverse=True)
# calculate "timer" field values
prevtype = None
timer = 0
boiltimer = _Duration(0)
for t in reversed(timers):
time = t.time
if prevtype is None or not isinstance(time, prevtype):
timer = _Duration(0)
prevval = None
prevtype = time.__class__
if isinstance(time, (timespec.Mash, timespec.Fermentor,
timespec.Package)):
t.timer = time
if isinstance(time, timespec.Whirlpool):
if prevval is not None \
and prevval[0] == time.temp:
if prevval[1] == time.time:
t.timer = '=='
else:
v = time.time - prevval[1]
t.timer = v
else:
t.timer = time.time
prevval = (time.temp, time.time)
if isinstance(time, timespec.Boil):
cmpval = time.spec
thisval = '=='
if cmpval != timer:
thisval = str(cmpval - timer)
timer = cmpval
t.timer = thisval
boiltimer = timer
# if timers don't start from start of boil, add an opaque
# to specify initial timer value
if ((self.boiltime is not None and self.boiltime > 0)
and boiltimer != self.boiltime):
sb = Addition(Opaque(''), '', None,
timespec.Boil('boiltime'))
sb.timer = self.boiltime - boiltimer
timers = sorted([sb] + timers,
key=lambda x: x.time, reverse=True)
self.results['timer_additions'] = timers
def _doattenuations(self, attenuation = (60, 101, 5)):
res = []
fin = self.worter[Worter.PACKAGE].strength()
for x in range(*attenuation):
t = fin.attenuate_bypercent(x)
res.append((x, t['ae'], t['abv']))
self.results['attenuation'] = res
def _doboiladj(self):
bvmax = getparam('boilvol_max')
if bvmax is None:
return
boiltemp = _Temperature(100)
pbwort = self.worter[Worter.PREBOIL]
pbvol = pbwort.volume(boiltemp)
diff = pbvol - bvmax
if diff > 0:
adj = brewutils.water_voltemp_to_mass(diff, boiltemp)
self._boiladj += adj
def _checkinputs(self):
if self._final_volume() is None:
raise PilotError("final volume is not set")
if len(self.needinherent) > 0 and self.volume_inherent is None:
raise PilotError("recipe has absolute amounts but "
+ "no inherent volume: "
+ ','.join(self.needinherent))
def calculate(self):
sysparams.checkset()
if self._calculatestatus:
raise PilotError("you can calculate() a recipe once")
self._calculatestatus += 1
self._checkinputs()
s = self._scale(_Mass(1))
if abs(s - 1.0) > .0001:
notice('Scaling recipe ingredients by a factor of '
+ '{:.4f}'.format(s) + '\n')
self._dofermentables_preprocess()
# ok, so the problem is that the amount of hops affects the
# kettle crud, meaning we have non-constants loss between
# postboil and | |
<filename>predictit/contract.py
import datetime as dt
from collections import namedtuple
from decimal import Decimal
from .utils import concurrent_get
class Contract(object):
"""Interact with PredictIt contracts
This class handles contract-level transactions with PredictIt,
including trading and viewing order books and currently owned
shares. Initializing this object will call the `update_all` method,
contacting PredictIt four times.
Parameters
----------
account : predictit.Account
contract_id : int
"""
def __init__(self, account, contract_id):
if not isinstance(contract_id, int):
raise TypeError("Contract ID must be int, not {}".format(type(contract_id)))
else:
self._contract_id = contract_id
self.account = account
self.update_all()
def __repr__(self):
repr_fmt = "Contract(contract_id={}, contract_name={})"
return repr_fmt.format(self.contract_id, self.contract_name)
@property
def contract_id(self):
"""int"""
return self._contract_id
def _post_order(self, trade_type, price, quantity):
# Trade types
# 0 = Buy no
# 1 = Buy yes
# 2 = Sell no
# 3 = Sell yes
int_price = int(100 * price)
if not 1 <= int_price <= 99:
raise ValueError("Price must be between $0.01 and $0.99")
if not isinstance(quantity, int):
raise TypeError("Quantity must be an int, not {}".format(type(quantity)))
if quantity <= 0:
raise ValueError("Quantity must be positive, not {}".format(quantity))
if self.my_prediction is not None:
if self.my_prediction and trade_type in (0, 2):
raise ValueError("Can't trade No shares and own Yes shares")
elif not self.my_prediction and trade_type in (1, 3):
raise ValueError("Can't trade Yes shares and own No shares")
if quantity > self.shares_owned and trade_type in (2, 3):
raise ValueError(
"Can't sell {} shares; only own {}".format(quantity, self.shares_owned)
)
if trade_type in (0, 1):
# TODO: raise error if market risk would exceed cash balance
if price * quantity + self.investment > 850:
raise ValueError("Cannot invest more than $850 in a contract")
url = "https://www.predictit.org/api/Trade/SubmitTrade"
data = {
"pricePerShare": str(int_price),
"quantity": str(quantity),
"contractId": str(self.contract_id),
"tradeType": str(trade_type),
}
resp = self.account.session.post(url, data=data)
resp.raise_for_status()
return resp
def buy_no(self, price, quantity):
"""Place an order to buy No shares in this contract
Posts to https://www.predictit.org/api/Trade/SubmitTrade to
place an order to buy `quantity` No shares at `price` dollars
per share. Checks `self.api.cash_balance` to confirm that user
has enough cash-on-hand to post the order and checks
`self.my_prediction` to confirm that user doesn't own Yes
shares.
Parameters
----------
price : numeric between zero and one, exclusive
The price per share
quantity : int greater than zero
The number of No shares to buy
Returns
-------
requests.Response
"""
return self._post_order("0", price, quantity)
def buy_yes(self, price, quantity):
"""Place an order to buy Yes shares in this contract
Posts to https://www.predictit.org/api/Trade/SubmitTrade to
place an order to buy `quantity` Yes shares at `price` dollars
per share. Checks `self.api.cash_balance` to confirm that user
has enough cash-on-hand to post the order and checks
`self.my_prediction` to confirm that user doesn't own No shares.
Parameters
----------
price : numeric between zero and one, exclusive
The price per share
quantity : int greater than zero
The number of Yes shares to buy
Returns
-------
requests.Response
"""
return self._post_order("1", price, quantity)
def sell_no(self, price, quantity):
"""Place an order to sell No shares in this contract
Posts to https://www.predictit.org/api/Trade/SubmitTrade to
place an order to sell `quantity` No shares at `price` dollars
per share. Checks `self.shares_owned` to confirm that user owns
at least `quantity` shares and checks `self.my_prediction` to
confirm that user doesn't own Yes shares.
Parameters
----------
price : numeric between zero and one, exclusive
The price per share
quantity : int greater than zero
The number of No shares to sell
Returns
-------
requests.Response
"""
return self._post_order("2", price, quantity)
def sell_yes(self, price, quantity):
"""Place an order to sell Yes shares in this contract
Posts to https://www.predictit.org/api/Trade/SubmitTrade to
place an order to sell `quantity` No shares at `price` dollars
per share. Checks `self.shares_owned` to confirm that user owns
at least `quantity` shares and checks `self.my_prediction` to
confirm that user doesn't own No shares.
Parameters
----------
price : numeric between zero and one, exclusive
The price per share
quantity : int greater than zero
The number of Yes shares to sell
Returns
-------
requests.Response
"""
return self._post_order("3", price, quantity)
def update_order_book(self):
"""Update the order book for this contract
Requests https://www.predictit.org/api/Trade/{self.contract_id}/OrderBook
to update the `no_asks`, `yes_asks`, `no_bids`, and `yes_bids`
properties.
Returns
-------
requests.Response
The response from requesting the order book for this
contract from PredictIt
"""
url_fmt = "https://www.predictit.org/api/Trade/{s.contract_id}/OrderBook"
resp = self.account.session.get(url_fmt.format(s=self), timeout=5)
self._update_order_book(resp)
return resp
def _update_order_book(self, resp):
resp.raise_for_status()
orders = resp.json(parse_float=Decimal)
self._order_book = {}
self._order_book[1] = [
(n["pricePerShare"], n["quantity"]) for n in orders["yesOrders"]
]
self._order_book[0] = [
(n["pricePerShare"], n["quantity"]) for n in orders["noOrders"]
]
# PredictIt sends us the order book already sorted, but we can't
# rely on that, and getting the best available order is useful
# enough that I don't want to need boilerplate every time I get
# it.
self._order_book[1].sort()
self._order_book[0].sort()
return resp
@property
def no_asks(self):
"""list of (decimal.Decimal, int) tuples : Open No asks on order book
List of (price, quantity) tuples sorted from best price to
worst. These tuples can be unpacked and passed to the `buy_no`
method.
"""
return [(1 - p, q) for p, q in self._order_book[0]]
@property
def yes_asks(self):
"""list of (decimal.Decimal, int) tuples : Open Yes asks on order book
List of (price, quantity) tuples sorted from best price to
worst. These tuples can be unpacked and passed to the `buy_yes`
method.
"""
return self._order_book[1].copy()
@property
def no_bids(self):
"""list of (decimal.Decimal, int) tuples : Open No bids on order book
List of (price, quantity) tuples sorted from best price to
worst. These tuples can be unpacked and passed to the `sell_no`
method.
"""
return [(1 - p, q) for p, q in self._order_book[1]]
@property
def yes_bids(self):
"""list of (decimal.Decimal, int) tuples : Open Yes bids on order book
List of (price, quantity) tuples sorted from best price to
worst. These tuples can be unpacked and passed to the
`sell_yes` method.
"""
return self._order_book[0].copy()
def update_shares(self):
"""Update user's currently owned shares for this contract
Requests https://www.predictit.org/api/Profile/contract/{self.contract_id}/Shares
to update the `my_prediction`, `shares_owned`, `investment`, and
`mean_price_per_share` properties.
Returns
-------
requests.Response
The response from requesting the currently owned shares for
this contract from PredictIt
"""
url_fmt = (
"https://www.predictit.org/api/Profile/contract/{s.contract_id}/Shares"
)
resp = self.account.session.get(url_fmt.format(s=self), timeout=5)
self._update_shares(resp)
return resp
def _update_shares(self, resp):
resp.raise_for_status()
shares_info = resp.json(parse_float=Decimal)
self._contract_name = shares_info["contractName"]
shares = shares_info["shares"]
if not shares:
self._my_prediction = None
else:
self._my_prediction = shares[0]["predictionType"]
self._shares_owned = self._investment = 0
for share in shares:
share["dateExecuted"] = dt.datetime.strptime(
share["dateExecuted"], "%Y-%m-%dT%H:%M:%S.%f"
)
self._shares_owned += share["sharesOwned"]
self._investment += share["pricePerShare"] * share["sharesOwned"]
self.share_details = shares
return resp
@property
def contract_name(self):
"""str"""
return self._contract_name
@property
def my_prediction(self):
"""NoneType or int (0 or 1) : 0 or 1 if user owns shares, else None
If the user does not own any shares, then `my_prediction` is
None. Otherwise, it corresponds to the type of share the user
owns (0 for No, 1 for Yes). Remember, you can't simultaneously
own No and Yes shares.
"""
return self._my_prediction
@property
def shares_owned(self):
"""int : The number of shares the user owns in this contract"""
return self._shares_owned
@property
def investment(self):
"""decimal.Decimal : Dollars user spent to buy currently owned shares"""
return self._investment
@property
def mean_price_per_share(self):
"""decimal.Decimal"""
return self.investment / self.shares_owned
def update_my_orders(self):
"""Update user's currently owned shares for this contract
Requests https://www.predictit.org/api/Profile/contract/{self.contract_id}/Offers
to update the `have_open_orders`, `my_no_bids`,`my_yes_bids`,
`my_no_asks`, and `my_yes_asks` properties.
Returns
-------
requests.Response
The response from requesting the currently owned shares for this
contract from PredictIt
"""
url_fmt = (
"https://www.predictit.org/api/Profile/contract/{s.contract_id}/Offers"
)
resp = self.account.session.get(url_fmt.format(s=self), timeout=5)
self._update_my_orders(resp)
return resp
def _update_my_orders(self, resp):
resp.raise_for_status()
my_orders = resp.json(parse_float=Decimal)
field_names = [
"order_id",
"contract_id",
"price",
"original_quantity",
"remaining_quantity",
"date_created",
"is_processed",
]
OwnOrder = namedtuple("OwnOrder", field_names)
self._my_orders = {k: [] for k in range(4)}
for raw_order in my_orders["offers"]:
order_date = dt.datetime.strptime(
raw_order["dateCreated"], "%Y-%m-%dT%H:%M:%S.%f"
)
order = OwnOrder(
order_id=raw_order["offerId"],
contract_id=raw_order["contractId"],
price=raw_order["pricePerShare"],
original_quantity=raw_order["quantity"],
remaining_quantity=raw_order["remainingQuantity"],
date_created=order_date,
is_processed=raw_order["isProcessed"],
)
self._my_orders[raw_order["tradeType"]].append(order)
return resp
@property
def have_open_orders(self):
"""bool : True if user has any open orders in this contract"""
return any(self._my_orders.values())
@property
def my_no_bids(self):
"""list of OwnOrder namedtuples
OwnOrder namedtuples have the following fields, in order:
order_id : int
contract_id : int
price | |
they are
specified by a function or dictionary mapping labels to colors; this
option is incompatible with ``edge_color`` and ``edge_colors``.
- ``edge_size`` -- float (default: ``0.02``)
- ``edge_size2`` -- float (default: ``0.0325``); used for
:class:`~sage.plot.plot3d.tachyon.Tachyon` sleeves
- ``pos3d`` -- a position dictionary for the vertices
- ``layout``, ``iterations``, ... -- layout options; see :meth:`layout`
- ``engine`` -- string (default: ``'threejs'``); the renderer to use among:
* ``'threejs'``: interactive web-based 3D viewer using JavaScript
and a WebGL renderer
* ``'jmol'``: interactive 3D viewer using Java
* ``'tachyon'``: ray tracer generating a static PNG image
- ``xres`` -- resolution
- ``yres`` -- resolution
- ``**kwds`` -- passed on to the rendering engine
EXAMPLES::
sage: G = graphs.CubeGraph(5)
sage: G.plot3d(iterations=500, edge_size=None, vertex_size=0.04) # long time
Graphics3d Object
We plot a fairly complicated Cayley graph::
sage: A5 = AlternatingGroup(5); A5
Alternating group of order 5!/2 as a permutation group
sage: G = A5.cayley_graph()
sage: G.plot3d(vertex_size=0.03, edge_size=0.01, vertex_colors={(1,1,1): list(G)}, bgcolor=(0,0,0), color_by_label=True, iterations=200) # long time
Graphics3d Object
Some :class:`~sage.plot.plot3d.tachyon.Tachyon` examples::
sage: D = graphs.DodecahedralGraph()
sage: P3D = D.plot3d(engine='tachyon')
sage: P3D.show() # long time
::
sage: G = graphs.PetersenGraph()
sage: G.plot3d(engine='tachyon', vertex_colors={(0,0,1): list(G)}).show() # long time
::
sage: C = graphs.CubeGraph(4)
sage: C.plot3d(engine='tachyon', edge_colors={(0,1,0): C.edges(sort=False)}, vertex_colors={(1,1,1): list(C)}, bgcolor=(0,0,0)).show() # long time
::
sage: K = graphs.CompleteGraph(3)
sage: K.plot3d(engine='tachyon', edge_colors={(1,0,0): [(0,1,None)], (0,1,0): [(0,2,None)], (0,0,1): [(1,2,None)]}).show() # long time
A directed version of the dodecahedron
::
sage: D = DiGraph({0: [1, 10, 19], 1: [8, 2], 2: [3, 6], 3: [19, 4], 4: [17, 5], 5: [6, 15], 6: [7], 7: [8, 14], 8: [9], 9: [10, 13], 10: [11], 11: [12, 18], 12: [16, 13], 13: [14], 14: [15], 15: [16], 16: [17], 17: [18], 18: [19], 19: []})
sage: D.plot3d().show() # long time
::
sage: P = graphs.PetersenGraph().to_directed()
sage: from sage.plot.colors import rainbow
sage: R = rainbow(P.size(), 'rgbtuple')
sage: edge_colors = {R[i]: [e] for i, e in enumerate(P.edge_iterator())}
sage: P.plot3d(engine='tachyon', edge_colors=edge_colors).show() # long time
::
sage: G=Graph({'a':['a','b','b','b','e'],'b':['c','d','e'],'c':['c','d','d','d'],'d':['e']},sparse=True)
sage: G.show3d()
Traceback (most recent call last):
...
NotImplementedError: 3D plotting of multiple edges or loops not implemented
Using the ``partition`` keyword::
sage: G = graphs.WheelGraph(7)
sage: G.plot3d(partition=[[0], [1, 2, 3, 4, 5, 6]])
Graphics3d Object
TESTS::
sage: G = DiGraph({0: {1: 'a', 2: 'a'}, 1: {0: 'b'}, 2: {0: 'c'}})
sage: p = G.plot3d(edge_labels=True, color_by_label={'a': 'yellow', 'b': 'cyan'})
sage: s = p.x3d_str()
This 3D plot contains four yellow objects (two cylinders and
two cones), two black objects and 2 cyan objects::
sage: s.count("Material diffuseColor='1.0 1.0 0.0'")
4
sage: s.count("Material diffuseColor='0.0 0.0 0.0'")
2
sage: s.count("Material diffuseColor='0.0 1.0 1.0'")
2
.. SEEALSO::
- :meth:`plot`
- :meth:`graphviz_string`
"""
from . import graph_plot
layout_options = {key: kwds[key] for key in kwds.keys() if key in graph_plot.layout_options}
kwds = {key: kwds[key] for key in kwds.keys() if key not in graph_plot.layout_options}
if pos3d is None:
pos3d = self.layout(dim=3, **layout_options)
if self.has_multiple_edges() or self.has_loops():
raise NotImplementedError("3D plotting of multiple edges or loops not implemented")
if engine in ['threejs', 'jmol']:
from sage.plot.plot3d.all import sphere, line3d, arrow3d, text3d
from sage.plot.plot3d.texture import Texture
kwds.setdefault('aspect_ratio', [1, 1, 1])
if vertex_colors is None:
if 'partition' in kwds:
from sage.plot.colors import rainbow
partition = kwds['partition']
l = len(partition)
R = rainbow(l)
vertex_colors = {R[i]: partition[i] for i in range(l)}
else:
vertex_colors = {(1,0,0) : list(self)}
if color_by_label:
if edge_colors is None:
# do the coloring
edge_colors = self._color_by_label(format=color_by_label)
elif edge_colors is None:
edge_colors = {(0,0,0) : self.edges(sort=False)}
# by default turn off the frame
if 'frame' not in kwds:
kwds['frame'] = False
# by default make the background given by bgcolor
if 'background' not in kwds:
kwds['background'] = bgcolor
try:
graphic = 0
for color in vertex_colors:
texture = Texture(color=color, ambient=0.1, diffuse=0.9, specular=0.03)
for v in vertex_colors[color]:
if vertex_labels:
graphic += text3d(repr(v), pos3d[v])
else:
graphic += sphere(center=pos3d[v], size=vertex_size, texture=texture, **kwds)
if self._directed:
for color in edge_colors:
for u, v, l in edge_colors[color]:
graphic += arrow3d(pos3d[u], pos3d[v], radius=edge_size, color=color, closed=False, **kwds)
else:
for color in edge_colors:
texture = Texture(color=color, ambient=0.1, diffuse=0.9, specular=0.03)
for u, v, l in edge_colors[color]:
graphic += line3d([pos3d[u], pos3d[v]], radius=edge_size, texture=texture, closed=False, **kwds)
return graphic
except KeyError:
raise KeyError("you have not specified positions for all the vertices")
elif engine == 'tachyon':
TT, pos3d = tachyon_vertex_plot(self, bgcolor=bgcolor, vertex_colors=vertex_colors,
vertex_size=vertex_size, pos3d=pos3d, **kwds)
if color_by_label:
if edge_colors is None:
# do the coloring
edge_colors = self._color_by_label(format=color_by_label)
if edge_colors is None:
edge_colors = {(0,0,0) : self.edges(sort=False)}
i = 0
for color in edge_colors:
i += 1
TT.texture('edge_color_%d'%i, ambient=0.1, diffuse=0.9, specular=0.03, opacity=1.0, color=color)
if self._directed:
for u,v,l in edge_colors[color]:
TT.fcylinder((pos3d[u][0], pos3d[u][1], pos3d[u][2]),
(pos3d[v][0], pos3d[v][1], pos3d[v][2]), edge_size, 'edge_color_%d'%i)
TT.fcylinder((0.25 * pos3d[u][0] + 0.75 * pos3d[v][0],
0.25 * pos3d[u][1] + 0.75 * pos3d[v][1],
0.25 * pos3d[u][2] + 0.75 * pos3d[v][2]),
(pos3d[v][0], pos3d[v][1], pos3d[v][2]), edge_size2,'edge_color_%d'%i)
else:
for u, v, l in edge_colors[color]:
TT.fcylinder((pos3d[u][0], pos3d[u][1], pos3d[u][2]),
(pos3d[v][0], pos3d[v][1], pos3d[v][2]), edge_size,'edge_color_%d'%i)
return TT
else:
raise TypeError("rendering engine (%s) not implemented"%engine)
def show3d(self, bgcolor=(1,1,1), vertex_colors=None, vertex_size=0.06,
edge_colors=None, edge_size=0.02, edge_size2=0.0325,
pos3d=None, color_by_label=False,
engine='threejs', **kwds):
"""
Plot the graph and show the resulting plot.
INPUT:
- ``bgcolor`` -- rgb tuple (default: ``(1,1,1)``)
- ``vertex_size`` -- float (default: ``0.06``)
- ``vertex_labels`` -- a boolean (default: ``False``); whether to
display vertices using text labels instead of spheres
- ``vertex_colors`` -- dictionary (default: ``None``); optional
dictionary to specify vertex colors: each key is a color recognizable
by :mod:`~sage.plot.plot3d.tachyon` (rgb tuple (default:
``(1,0,0)``)), and each corresponding entry is a list of vertices. If
a vertex is not listed, it looks invisible on the resulting plot (it
doesn't get drawn).
- ``edge_colors`` -- dictionary (default: ``None``); a dictionary
specifying edge colors: each key is a color recognized by
:mod:`~sage.plot.plot3d.tachyon` (default: ``(0,0,0)``), and each
entry is a list of edges.
- ``color_by_label`` -- a boolean or dictionary or function (default:
``False``) whether to color each edge with a different color according
to its label; the colors are chosen along a rainbow, unless they are
specified by a function or dictionary mapping labels to colors; this
option is incompatible with ``edge_color`` and ``edge_colors``.
- ``edge_size`` -- float (default: ``0.02``)
- ``edge_size2`` -- float (default: ``0.0325``); used for
:class:`~sage.plot.plot3d.tachyon.Tachyon` sleeves
- ``pos3d`` -- a position dictionary for the vertices
- ``layout``, ``iterations``, ... -- layout options; see :meth:`layout`
- ``engine`` -- string (default: ``'threejs'``); the renderer to use among:
* ``'threejs'``: interactive web-based 3D viewer using JavaScript
and a WebGL renderer
* ``'jmol'``: interactive 3D viewer using Java
* ``'tachyon'``: ray tracer generating a static PNG image
- ``xres`` -- resolution
- ``yres`` -- resolution
- ``**kwds`` -- passed on to the rendering engine
EXAMPLES::
sage: G = graphs.CubeGraph(5)
sage: G.show3d(iterations=500, edge_size=None, vertex_size=0.04) # long time
We plot a fairly complicated Cayley graph::
sage: A5 = AlternatingGroup(5); A5
Alternating group of order 5!/2 as a permutation group
sage: G = A5.cayley_graph()
sage: G.show3d(vertex_size=0.03, edge_size=0.01, edge_size2=0.02, vertex_colors={(1,1,1): list(G)}, bgcolor=(0,0,0), color_by_label=True, iterations=200) # long time
Some :class:`~sage.plot.plot3d.tachyon.Tachyon` examples::
sage: D = graphs.DodecahedralGraph()
sage: D.show3d(engine='tachyon') # long time
::
sage: G = graphs.PetersenGraph()
sage: G.show3d(engine='tachyon', vertex_colors={(0,0,1): list(G)}) # long time
::
sage: C = graphs.CubeGraph(4)
sage: C.show3d(engine='tachyon', edge_colors={(0,1,0): C.edges(sort=False)}, vertex_colors={(1,1,1): list(C)}, bgcolor=(0,0,0)) # long time
::
sage: K = graphs.CompleteGraph(3)
sage: K.show3d(engine='tachyon', edge_colors={(1,0,0): [(0, 1, None)], (0, 1, 0): [(0, 2, None)], (0, 0, 1): [(1, 2, None)]}) # long time
"""
self.plot3d(bgcolor=bgcolor, vertex_colors=vertex_colors,
edge_colors=edge_colors, vertex_size=vertex_size, engine=engine,
edge_size=edge_size, edge_size2=edge_size2, pos3d=pos3d,
color_by_label=color_by_label, **kwds).show()
def _keys_for_vertices(self):
"""
Return a function mapping each vertex to a unique identifier.
The identifier is stable iff all vertex labels are unique. It is a
string not starting with a number, as required by ``dot2tex``.
EXAMPLES::
sage: g = graphs.Grid2dGraph(5, 5)
sage: g._keys_for_vertices()
<function ...get_label at ...>
TESTS:
We check that :trac:`21916` is fixed::
sage: g = graphs.PetersenGraph()
sage: key = g._keys_for_vertices()
sage: g.add_vertex("a")
sage: s = g.graphviz_string()
"""
label = {v: 'node_{0}'.format(i) for i, v in enumerate(self)}
def get_label(vertex):
return label[vertex]
return get_label
### String representation to be used | |
:type frames: array-like
:param t_bin: screen refresh rate
:type t_bin: float
:return: tuple of (stim_times, stim_frames)
"""
beg_extrap_val = -10001
end_extrap_val = -10000
idxs_up, idxs_dn = get_rf_ttl_pulses(ttl_signal)
X = np.sort(np.concatenate([idxs_up, idxs_dn]))
Xq = np.arange(frames.shape[0])
# make left and right extrapolations distinctive to easily find later
Tq = np.interp(Xq, X, times, left=beg_extrap_val, right=end_extrap_val)
# uniform spacing outside boundaries of ttl signal
# first values
n_beg = len(np.where(Tq == beg_extrap_val)[0])
if 0 < n_beg < Tq.shape[0]:
Tq[:n_beg] = times[0] - np.arange(n_beg, 0, -1) * t_bin
# end values
n_end = len(np.where(Tq == end_extrap_val)[0])
if 0 < n_end < Tq.shape[0]:
Tq[-n_end:] = times[-1] + np.arange(1, n_end + 1) * t_bin
return Tq, frames
def export_to_alf(session_path, stim_ts, stim_datas, stim_names):
"""
Export extracted stimuli and their presentation times to the session alf directory.
:param session_path: absolute path of a session, i.e. /mnt/data/Subjects/ZM_1887/2019-07-10/001
:param stim_ts:
:param stim_datas:
:param stim_names:
:return: None; instead, saves the following files into `session_path/alf`; the 'xx' part of the
following filenames will be '00', '01', etc., one for each time the specific protocol is
run
orientation/direction selectivity
`_iblcertif_.odsgratings.times.xx.npy`: shape (n_stims, 2); columns are
(stim on time, stim off time)
`_iblcertif_.odsgratings.stims.xx.npy`: shape (n_stims,); value is grating angle
contrast reversal
`_iblcertif_.reversal.times.xx.npy`: shape (n_stims,); stim presentation times
`_iblcertif_.reversal.stims.xx.npy`: shape (n_stims,); stim identity - these values map
into the matrices defined in `_iblrig_taskSettings.raw.json` files, 'VISUAL_STIM_3' ->
'stim_patch_contrasts'
receptive field mapping (sparse noise)
`_iblcertif_.rfmap.times.xx.npy`: shape (n_stims,); stim presentation times
`_iblcertif_.rfmap.stims.xx.npy`: shape (n_stims, y_pix, x_pix); sparse noise stim frames
spontaneous activity:
`_iblcertif_.spontaneous.times.xx.npy`: shape (2,); start and end times of spont activity
task stimuli (gratings of varying locations/contrast)
`_iblcertif_.task.times.xx.npy`: shape (n_stims, 2); columns are
(stim on time, stim off time)
`_iblcertif_.task.stims.xx.npy`: shape (n_stims, 2); columns are
(azimuth in degrees [i.e. left or right], contrast)
"""
counters = {stim_name: 0 for stim_name in np.unique(stim_names)}
for stim_t, stim_data, stim_name in zip(stim_ts, stim_datas, stim_names):
if stim_name == 'spacer':
continue
if stim_name == 'receptive_field_mapping':
stim_short = 'rfmap'
elif stim_name == 'orientation-direction_selectivity':
stim_short = 'odsgratings'
elif stim_name == 'contrast_reversal':
stim_short = 'reversal'
elif stim_name == 'task_stimuli':
stim_short = 'task'
elif stim_name == 'spontaneous_activity':
stim_short = 'spontaneous'
else:
raise ValueError('"%s" is an unknown stimulus protocol' % stim_name)
filename_times = str('_iblcertif_.%s.times.%02i.npy' % (stim_short, counters[stim_name]))
filename_stims = str('_iblcertif_.%s.stims.%02i.npy' % (stim_short, counters[stim_name]))
counters[stim_name] += 1
if not os.path.exists(os.path.join(session_path, 'alf')):
os.mkdir(os.path.join(session_path, 'alf'))
shape_t = stim_t.shape[0]
shape_d = stim_data.shape[0]
if shape_t != 0 and shape_d != 0:
if stim_name == 'contrast_reversal':
# no known bug fix for contrast reversal bonsai bugs; don't save out if problem
if shape_t != shape_d:
continue
else:
assert shape_t == shape_d
if shape_t != 0:
np.save(os.path.join(session_path, 'alf', filename_times), stim_t)
if shape_d != 0:
np.save(os.path.join(session_path, 'alf', filename_stims), stim_data)
# alert cronjobs that there are new files to register to the database by saving empty file
open(os.path.join(session_path, 'register_me.flag'), 'a').close()
def extract_stimulus_info_to_alf(session_path, t_bin=1 / 60, bin_jitter=3, save=True):
"""
Extract the stimulus information stored in metadata and export to alf files. Also checks to
make sure ttl pulses were extracted properly.
Expected files/structure:
- session metadata:
session_path/raw_behavior_data/_iblrig_taskSettings.raw.*.json
- task stimulus info:
session_path/raw_behavior_data/_iblrig_codeFiles.raw.*.zip
- rf mapping stim info:
session_path/raw_behavior_data/_iblrig_RFMapStim.raw.*.bin
- spikeglx sync data:
session_path/raw_ephys_data/probe_[right/left]/_spikeglx_sync.[channels/polarities/times].*
.npy
:param session_path: absolute path of a session, i.e. /mnt/data/Subjects/ZM_1887/2019-07-10/001
:type session_path: str
:param t_bin: screen refresh rate
:type t_bin: float
:param bin_jitter: fudge factor in spacer template matching (units of time bins)
:type bin_jitter: int
:param save: export stimulus info to alf directory
:type save: bool
:return: None; instead stimulus info is stored in the following alf files:
`_iblcertification_.stimtype.times.00.npy`
`_iblcertification_.stimtype.frames.00.npy`
where `stimtype` are `odsgratings`, `sparsenoise`, `contrastreversal`, and `taskstimulus`;
see `export_to_alf` function documentation for more info on file structure
"""
IBLRIG_VERSION_MIN = '5.2.9'
# get ttl signal for extracting stim info (to compare with expected ttl signals in metadata)
# ttl_sig = load_ttl_pulses(session_path)
sync_pol, sync_times = load_ttl_pulses(session_path)
# load session metadata
meta = load_session_metadata(session_path)
# basic checks
protocol = meta['VISUAL_STIMULUS_TYPE']
if protocol != 'ephys_certification':
raise ValueError(
'"%s" is an invalid protocol; function only supports "ephys_certification"' % protocol)
iblrig_version = ''.join([i for i in meta['IBLRIG_VERSION_TAG'].split('.')])
iblrig_version_min = ''.join([i for i in IBLRIG_VERSION_MIN.split('.')])
if int(iblrig_version) < int(iblrig_version_min):
raise ValueError(
'Special extractor needed for code version {}; minimum supported version is {}'.format(
iblrig_version, iblrig_version_min))
print('extracting TTL pulses for iblrig version %s' % meta['IBLRIG_VERSION_TAG'])
# pull useful fields out of metadata
stim_ids = meta['VISUAL_STIMULI']
stim_order = np.array(meta['STIM_ORDER'])
frames = load_rf_mapping_stimulus(session_path, meta)
id_spacer = get_stim_num_from_name(stim_ids, 'spacer')
spacer_template = t_bin * np.array(meta['VISUAL_STIM_%i' % id_spacer]['ttl_frame_nums'])
# get expected ttl pulses from upper left stim pixel (rf map) and metadata (all other stims)
if len(frames) != 0:
frame_ttl_signal = frames[:, 0, 0]
else:
frame_ttl_signal = None
n_expected_ttl_pulses = get_expected_ttl_pulses(stim_order, meta, frame_ttl_signal)
# get spacer info
spacer_times, conv_sig = get_spacer_times(spacer_template, t_bin * bin_jitter, sync_times, 1)
idxs_spacer = np.where(stim_order == get_stim_num_from_name(stim_ids, 'spacer'))[0]
n_expected_spacers = len(idxs_spacer)
n_spacers = spacer_times.shape[0]
if n_spacers != n_expected_spacers:
raise ValueError(
'%i is an invalid number of spacer templates in ttl signal; expected %i' %
(n_spacers, n_expected_spacers))
else:
print('found expected number of stimulus spacers')
print('\n')
# get stimulus info
bonsai_jitter = 0.6 # bonsai timing can be slightly off
stim_ts = [[] for _ in stim_order]
stim_datas = [[] for _ in stim_order]
stim_names = [[] for _ in stim_order]
incorrect_pulses = 0
for i, stim_id in enumerate(stim_order):
stim_names[i] = stim_ids[str(stim_id)].lower()
if i not in idxs_spacer:
print('processing stimulus: %s' % stim_names[i])
# assumes all non-spacers are preceded by a spacer
sync_idxs = np.where(
(sync_times > spacer_times[int(i / 2), 1]) &
(sync_times < spacer_times[int((i + 1) / 2), 0]))[0]
# ttl signal polarities are different depending on stimulus
if stim_names[i] == 'receptive_field_mapping':
pol_beg_expected = -1
pol_end_expected = 1
else:
pol_beg_expected = 1
pol_end_expected = -1
# test beg/end polarities
if stim_names[i] != 'spontaneous_activity':
# check if initial polarity is correct
if sync_pol[sync_idxs[0]] != pol_beg_expected:
is_pol_beg_ok = False
print('\twarning! wrong polarity detected at beginning of %s' % stim_names[i])
else:
is_pol_beg_ok = True
# check if final polarity is correct
if sync_pol[sync_idxs[-1]] != pol_end_expected:
is_pol_end_ok = False
print('\twarning! wrong polarity detected at end of %s' % stim_names[i])
else:
is_pol_end_ok = True
else:
is_pol_beg_ok = True
is_pol_end_ok = True
if stim_names[i] == 'receptive_field_mapping':
stim_ts[i] = sync_times[sync_idxs]
# account for polarity correctness
if not is_pol_beg_ok:
stim_ts[i] = stim_ts[i][1:]
print('\t--> 1 TTL pulse removed from beginning')
if not is_pol_end_ok:
stim_ts[i] = stim_ts[i][:-1]
print('\t--> 1 TTL pulse removed from end')
stim_on_time = 0.2 # TODO: hardcoded for now
# we only recorded rise times earlier; sync_times contains rise and fall times
if np.diff(stim_ts[i][:2]) > stim_on_time / bonsai_jitter:
# get rid of bonsai artifacts at beginning; np.diff() will be too large
stim_ts[i] = stim_ts[i][2::2]
print('\tremoved bonsai onset artifact')
else:
stim_ts[i] = stim_ts[i][0::2]
stim_datas[i] = None # handled below
elif stim_names[i] == 'orientation-direction_selectivity':
stim_ts[i] = sync_times[sync_idxs]
# account for polarity correctness
if not is_pol_beg_ok:
stim_ts[i] = stim_ts[i][1:]
print('\t--> 1 TTL pulse removed from beginning')
if not is_pol_end_ok:
# stim_ts[i] = stim_ts[i][:-1]
n_expected_ttl_pulses[i] = n_expected_ttl_pulses[i] - 1 # -= 1 fails
print('\t--> 1 updated to correct number of TTL pulses')
# separate ttl pulses for stim on and stim off
# print(np.diff(stim_ts[i][:11]))
# print(np.diff(stim_ts[i][-11:]))
# print(stim_ts[i].shape)
# print(len(meta['VISUAL_STIM_%i' % stim_id]['stim_sequence']))
stim_on_time = meta['VISUAL_STIM_%i' % stim_id]['stim_on_time']
if np.diff(stim_ts[i][:2]) < bonsai_jitter * stim_on_time:
# get rid of bonsai artifacts at beginning; np.diff() will be too small
stim_ts[i] = np.stack([stim_ts[i][2::2], stim_ts[i][3::2]], axis=1)
print('\tremoved bonsai onset artifact')
else:
stim_ts[i] = np.stack([stim_ts[i][0::2], stim_ts[i][1::2]], axis=1)
# ttl pulse for each stim rather than on/off
n_expected_ttl_pulses[i] /= 2
stim_sequence = meta['VISUAL_STIM_%i' % stim_id]['stim_sequence']
stim_rads = meta['VISUAL_STIM_%i' % stim_id]['stim_directions_rad']
# export direction of each grating in radians
stim_datas[i] = np.array([stim_rads[str(j)] for j in stim_sequence])
elif stim_names[i] == 'contrast_reversal':
stim_ts[i] = sync_times[sync_idxs]
# account for polarity correctness
# if not (is_pol_beg_ok and is_pol_end_ok):
# raise NotImplementedError
stim_datas[i] = get_contrast_reversal_stimulus(meta)
elif stim_names[i] == 'task_stimuli':
stim_ts[i] = sync_times[sync_idxs]
# account for polarity correctness
if not is_pol_beg_ok:
stim_ts[i] = stim_ts[i][1:]
print('\t--> 1 TTL pulse removed from beginning')
if not is_pol_end_ok:
stim_ts[i] = stim_ts[i][:-1]
print('\t--> 1 TTL pulse | |
0xFFD7CF,
"Shy Green": 0xE5E8D9,
"Shy Guy Red": 0xAA0055,
"Shy Mint": 0xE0E4DB,
"Shy Moment": 0xAAAAFF,
"Shy Pink": 0xDFD9DC,
"Shy Smile": 0xDCBBBE,
"Shy Violet": 0xD6C7D6,
"Shylock": 0x5AB9A4,
"Shyness": 0xF3F3D9,
"Siam": 0x686B50,
"Siam Gold": 0x896F40,
"Siamese Green": 0x9DAC79,
"Siamese Kitten": 0xEFE1D5,
"Siber<NAME>ur": 0xEEE2D5,
"Siberian Green": 0x4E6157,
"<NAME>": 0xFF44FF,
"S<NAME>illa": 0xFCC792,
"S<NAME>a": 0xC1C6AD,
"Sick Blue": 0x502D86,
"Sick Green": 0x9DB92C,
"Sickly Green": 0x94B21C,
"Sickly Yellow": 0xD0E429,
"Sidecar": 0xE9D9A9,
"Sidekick": 0xBFC3AE,
"Sideshow": 0xE2C591,
"Sidewalk Chalk Blue": 0xDBE9ED,
"Sidewalk Chalk Pink": 0xF7CCC4,
"Sidewalk Grey": 0x7B8F99,
"Sienna": 0xA9561E,
"<NAME>": 0xCDA589,
"<NAME>": 0xDCC4AC,
"<NAME>": 0xDE9F83,
"Sienna Red": 0xB1635E,
"Sienna Yellow": 0xF1D28C,
"Sierra": 0x985C41,
"Sierra Foothills": 0xA28A67,
"Sierra Madre": 0xC2BCAE,
"Sierra Redwood": 0x924E3C,
"Sierra Sand": 0xAFA28F,
"Siesta": 0xF0C3A7,
"Siesta Dreams": 0xC9A480,
"Siesta Rose": 0xEC7878,
"Siesta Sands": 0xF1E6E0,
"Siesta Tan": 0xE9D8C8,
"Siesta White": 0xCBDADB,
"Sightful": 0x76A4A6,
"Sigmarite": 0xCAAD76,
"Sign of Spring": 0xE3EDE2,
"Sign of the Crown": 0xFCE299,
"Signal Green": 0x33FF00,
"Signal Grey": 0x838684,
"Signal Pink": 0xB15384,
"Signal White": 0xECECE6,
"Signature Blue": 0x455371,
"Silence": 0xEAEDE5,
"Silence is Golden": 0xC2A06D,
"Silent Breath": 0xE9F1EC,
"Silent Breeze": 0xC6EAEC,
"Silent Delight": 0xE5E7E8,
"Silent Film": 0x9FA5A5,
"Silent Ivory": 0xFEF2C7,
"Silent Night": 0x526771,
"Silent Ripple": 0xABE3DE,
"Silent Sage": 0x729988,
"Silent Sands": 0xA99582,
"Silent Sea": 0x3A4A63,
"Silent Smoke": 0xDBD7CE,
"Silent Storm": 0xC3C7BD,
"Silent Tide": 0x7C929A,
"Silent White": 0xE5E7E4,
"Silentropae Cloud": 0xCCBBBB,
"Silhouette": 0xCBCDC4,
"Silica Sand": 0xEDE2E0,
"Silicate Green": 0x88B2A9,
"Silicate Light Turquoise": 0xCDDAD3,
"Siliceous Red": 0x5A3D4A,
"Silicone Seduction": 0xEBE0CA,
"Silicone Serena": 0xDCDCCF,
"Silithus Brown": 0xD57B65,
"Silk": 0xBBADA1,
"Silk Chiffon": 0xCCBFC7,
"Silk Crepe Grey": 0x354E4B,
"Silk Crepe Mauve": 0x6E7196,
"Silk Dessou": 0xEEE9DC,
"Silk Elegance": 0xF6E8DE,
"Silk Gown": 0xFCEEDB,
"Silk Jewel": 0x02517A,
"Silk Khimar": 0x70939E,
"Silk Lilac": 0x9188B5,
"Silk Lining": 0xFCEFE0,
"Silk Pillow": 0xF3F0EA,
"Silk Ribbon": 0xC86E8B,
"Silk Road": 0x97976F,
"Silk Sails": 0xF6EECD,
"Silk Sari": 0x009283,
"Silk Sheets": 0xEFDDDF,
"Silk Sox": 0xA5B2C7,
"Silk Star": 0xF5EEC6,
"Silk Stone": 0xCC9999,
"Silken Peacock": 0x427584,
"Silken Pine": 0x495D5A,
"Silken Raspberry": 0xAA7D89,
"Silken Tofu": 0xFEF6D8,
"Silkie Chicken": 0xFDEFDB,
"Silkworm": 0xEEEECC,
"Silky Bamboo": 0xEAE0CD,
"Silky Green": 0xBDC2BB,
"Silky Mint": 0xD7ECD9,
"Silky Pink": 0xFFDDF4,
"Silky Tofu": 0xFFF5E4,
"Silky White": 0xEEEBE2,
"Silky Yogurt": 0xF2F3CD,
"Silly Puddy": 0xF4B0BB,
"Silt": 0x8A7D72,
"Silt Green": 0xA9BDB1,
"Silver": 0xC0C0C0,
"Silver Ash": 0xDDDBD0,
"Silver Bells": 0xB8B4B6,
"Silver Birch": 0xD2CFC4,
"Silver Bird": 0xFBF5F0,
"Silver Blue": 0x8A9A9A,
"Silver Blueberry": 0x5B7085,
"Silver Bullet": 0xB6B5B8,
"Silver Chalice": 0xACAEA9,
"Silver Charm": 0xADB0B4,
"Silver City": 0xE2E4E9,
"Silver Cloud": 0xBEB7B0,
"Silver Clouds": 0xA6AAA2,
"Silver Creek": 0xD9DAD2,
"Silver Cross": 0xCDC5C2,
"Silver Dagger": 0xC1C1D1,
"Silver Dollar": 0xBDB6AE,
"Silver Drop": 0x9AB2A9,
"Silver Dust": 0xE8E7E0,
"Silver Feather": 0xEDEBE7,
"Silver Fern": 0xE1DDBF,
"Silver Filigree": 0x7F7C81,
"Silver Fir Blue": 0x7196A2,
"Silver Fox": 0xBDBCC4,
"Silver Grass": 0xC6CEC3,
"Silver Grass Traces": 0xDFE4DC,
"Silver Gray": 0xB8B2A2,
"Silver Green": 0xD7D7C7,
"Silver Grey": 0xA8A8A4,
"Silver Hill": 0x6D747B,
"Silver Lake": 0xDEDDDD,
"Silver Lake Blue": 0x618BB9,
"Silver Laurel": 0xD8DCC8,
"Silver Leaf": 0x9DB7A5,
"Silver Linden Grey": 0x859382,
"Silver Lined": 0xBBBFC3,
"Silver Lining": 0xBDB6AB,
"Silver Lustre": 0xA8A798,
"Silver Maple Green": 0x71776E,
"Silver Marlin": 0xC8C8C0,
"Silver Mauve": 0xDBCCD3,
"Silver Medal": 0xD6D6D6,
"Silver Mine": 0xBEC2C1,
"Silver Mink": 0x9F8D7C,
"Silver Moon": 0xD9D7C9,
"Silver Peony": 0xE7CFC7,
"Silver Pink": 0xDCB1AF,
"Silver Polish": 0xC6C6C6,
"Silver Rose": 0xD29EA6,
"Silver Rust": 0xC9A0DF,
"Silver Sage": 0x938B78,
"Silver Sand": 0xBEBDB6,
"Silver Sands": 0xDADEDD,
"Silver Sateen": 0xC7C6C0,
"Silver Sconce": 0xA19FA5,
"Silver Screen": 0xA6AEAA,
"Silver Service": 0xB2AAAA,
"Silver Setting": 0xD8DADB,
"Silver Shadow": 0xD8DAD8,
"Silver Skate": 0x87A1B1,
"Silver Sky": 0xEAECE9,
"Silver Snippet": 0x8E9090,
"Silver Spoon": 0xD3D3D2,
"Silver Springs": 0xB7BDC4,
"Silver Spruce": 0xCADFDD,
"Silver Star": 0x98A0B8,
"Silver Storm": 0x8599A8,
"Silver Strand": 0xB8C7CE,
"Silver Strand Beach": 0xCACDCA,
"Silver Strawberry": 0xF2C1C0,
"Silver Surfer": 0x7E7D88,
"Silver Sweetpea": 0xC4C9E2,
"Silver Thistle Beige": 0xE7D5C5,
"Silver Tinsel": 0xB6B3A9,
"Silver Tipped Sage": 0xBFC2BF,
"Silver Tradition": 0xD9D9D3,
"Silver Tree": 0x67BE90,
"Silver Willow Green": 0x637C5B,
"Silverado": 0x6A6472,
"Silverado Ranch": 0xA7A89B,
"Silverado Trail": 0xB7BBC6,
"Silverbeet": 0x5A6A43,
"Silverberry": 0xBEBBC9,
"Silverfish": 0x8D95AA,
"Silvermist": 0xB0B8B2,
"Silverpine": 0x4E6866,
"Silverpine Cyan": 0x8AE8FF,
"Silverplate": 0xC2C0BA,
"Silverpointe": 0xD1D2CB,
"Silverstone": 0xB1B3B3,
"Silverton": 0xBFD9CE,
"Silverware": 0xB8B8BF,
"Silvery Moon": 0xE6E5DC,
"Silvery Streak": 0xD5DBD5,
"Simmered Seaweed": 0x4C3D30,
"Simmering Ridge": 0xCB9281,
"Simmering Smoke": 0xA99F96,
"Simple Pink": 0xF9A3AA,
"Simple Serenity": 0xC8D9E5,
"Simple Silhouette": 0x7A716E,
"Simple Stone": 0xCDC7B7,
"Simple White": 0xDFD9D2,
"Simplicity": 0xCED0DB,
"Simplify Beige": 0xD6C7B9,
"Simply Blue": 0xADBBC9,
"Simply Delicious": 0xFFD2C1,
"Simply Elegant": 0xCEDDE7,
"Simply Green": 0x009B75,
"Simply Peachy": 0xFFC06C,
"Simply Posh": 0x8CB9D4,
"Simply Sage": 0xA7A996,
"Simply Sparkling": 0xB0C5E0,
"Simply Taupe": 0xAD9F93,
"Simply Violet": 0xA6A1D7,
"Simpson Surprise": 0x82856D,
"Simpsons Yellow": 0xFFD90F,
"Sin City": 0xCFA236,
"Sinatra": 0x4675B7,
"Sinbad": 0xA6D5D0,
"Sinful": 0x645059,
"Singapore Orchid": 0xA020F0,
"Singing Blue": 0x0074A4,
"Singing in the Rain": 0x8E9C98,
"Singing the Blues": 0x2B4D68,
"Sinister": 0x12110E,
"Sinister Minister": 0x353331,
"Sinister Mood": 0xA89C94,
"Siniy Blue": 0x4C4DFF,
"Sinkhole": 0x49716D,
"Sinking Sand": 0xD8B778,
"Sinner's City": 0xFEE5CB,
"Sinoper Red": 0xBB1111,
"Sinopia": 0xCB410B,
"Sip of Mint": 0xDEDFC9,
"Sip of Nut Milk": 0xEAE2DF,
"Sir Edmund": 0x20415D,
"Siren": 0x69293B,
"Sirocco": 0x68766E,
"Sis Kebab": 0x884411,
"Sisal": 0xC5BAA0,
"Siskin Green": 0xC8C76F,
"Siskin Sprout": 0x7A942E,
"Site White": 0xDCDEDC,
"Sitter Red": 0x3C2233,
"Sixteen Million Pink": 0xFD02FF,
"Sixties Blue": 0x0079A9,
"Siyâh Black": 0x1C1B1A,
"Sizzling Hot": 0xA36956,
"Sizzling Red": 0xFF3855,
"Sizzling Sunrise": 0xFFDB00,
"Sizzling Sunset": 0xEB7E4D,
"Skarsnik Green": 0x5F9370,
"Skavenblight Dinge": 0x47413B,
"Skeleton": 0xEBDECC,
"Skeleton Bone": 0xF4EBBC,
"Skeletor's Cape": 0x773399,
"Skeptic": 0x9DB4AA,
"Ski Patrol": 0xBB1237,
"Ski Slope": 0xE1E5E3,
"Skilandis": 0x41332F,
"Skimmed Milk White": 0xFEFFE3,
"Skin Tone": 0xDECAAE,
"Skink Blue": 0x5CBFCE,
"Skinny Dip": 0xF9DBD2,
"Skinny Jeans": 0x5588FF,
"Skipper": 0x748796,
"Skipper Blue": 0x484A72,
"Skipping Rocks": 0xD1D0C9,
"Skipping Stone": 0xD0CBB6,
"Skirret Green": 0x51B73B,
"Skobeloff": 0x007474,
"Skrag Brown": 0xB04E0F,
"Skull": 0xE3DAC9,
"Skullcrusher Brass": 0xF1C78E,
"Skullfire": 0xF9F5DA,
"Sky": 0x76D6FF,
"Sky Babe": 0x88C1D8,
"Sky Blue": 0x9FB9E2,
"Sky Blue Pink": 0xDCBFE1,
"Sky Bus": 0x99C1D6,
"Sky Captain": 0x262934,
"Sky Chase": 0xA5CAD1,
"Sky City": 0xA0BDD9,
"Sky Cloud": 0xADDEE5,
"Sky Dancer": 0x4499FF,
"Sky Eyes": 0x8EAABD,
"Sky Fall": 0x89C6DF,
"Sky Glass": 0xD1DCD8,
"Sky Grey": 0xBCC8C6,
"Sky High": 0xA7C2EB,
"Sky Light View": 0xCADADE,
"Sky Lodge": 0x546977,
"Sky Magenta": 0xCF71AF,
"Sky of Magritte": 0x0099FF,
"Sky of Ocean": 0x82CDE5,
"Sky Pilot": 0xA2BAD4,
"Sky Splash": 0xC9D3D3,
"Sky Wanderer": 0xB8DCED,
"Sky Watch": 0x8ACFD6,
"Sky's the Limit": 0xBBCEE0,
"Skyan": 0x66CCFF,
"Skydiver": 0x83ACD3,
"Skydiving": 0xC6D6D7,
"Skydome": 0x38A3CC,
"Skylark": 0xC1E4F0,
"Skylight": 0xC8E0E0,
"Skyline": 0x959EB7,
"Skyline Steel": 0xB9C0C3,
"Skylla": 0x1F7CC2,
"Skysail Blue": 0x818DB3,
"Skyscraper": 0xD3DBE2,
"Skywalker": 0xC1DEEA,
"Skyway": 0xADBED3,
"Slaanesh Grey": 0xDBD5E6,
"Slap Happy": 0xC9CC4A,
"Slate": 0x516572,
"Slate Black": 0x4B3D33,
"Slate Blue": 0x5B7C99,
"Slate Brown": 0xA0987C,
"Slate Green": 0x658D6D,
"Slate Grey": 0x59656D,
"Slate Mauve": 0x625C63,
"Slate Pebble": 0xB4ADA9,
"Slate Pink": 0xB3586C,
"Slate Rock": 0x868988,
"Slate Rose": 0xB45865,
"Slate Stone": 0xACB4AC,
"Slate Tile": 0x606E74,
"Slate Tint": 0x7A818D,
"Slate Violet": 0x989192,
"Slate Wall": 0x40535D,
"Sled": 0x4C5055,
"Sleek White": 0xFAF6E9,
"Sleep": 0x4579AC,
"Sleep Baby Sleep": 0xBED1E1,
"Sleeping Easy": 0x98BDDD,
"Sleeping Giant": 0x786D5E,
"Sleepy Blue": 0xBCCBCE,
"Sleepy Hollow": 0xB7C9D1,
"Sleepy Owlet": 0xB5A78D,
"Sleet": 0x92949B,
"Slender Reed": 0xDEC29F,
"Slice of Heaven": 0x0022EE,
"Slice of Watermelon": 0xE1697C,
"Sliced Cucumber": 0xCCCFBF,
"Slices of Happy": 0xEDE5BC,
"Slick Blue": 0x73CCD8,
"Slick Green": 0x615D4C,
"Slick Mud": 0xA66E49,
"Sliding": 0x97AEAF,
"Slight Mushroom": 0xCFC9C5,
"Slightly Golden": 0xCB904E,
"Slightly Peach": 0xF1DDD8,
"Slightly Rose": 0xE6CFCE,
"Slightly Spritzig": 0x92D2ED,
"Slightly Zen": 0xDCE4DD,
"Slime": 0xA7C408,
"Slime Lime": 0xB8EBC5,
"Slimer Green": 0xAADD00,
"Slimy Green": 0x7DED17,
"Slipper Satin": 0xBFC1CB,
"Slippery Moss": 0xBEBA82,
"Slippery Salmon": 0xF87E63,
"Slippery Shale": 0x7B766C,
"Slippery Soap": 0xEFEDD8,
"Slippery Stone": 0x8D6A4A,
"Slippery Tub": 0xD5F3EC,
"Slopes": 0xD2B698,
"Slow Dance": 0xDBDCC4,
"Slow Green": 0xC6D5C9,
"Slow Perch": 0xD5D4CE,
"Slubbed Silk": 0xE1C2BE,
"Sludge": 0xCA6B02,
"Slugger": 0x42342B,
"Slumber": 0x2D517C,
"Slumber Sloth": 0xCDC5B5,
"Sly Fox": 0x804741,
"Sly Shrimp": 0xF8E2D9,
"Smallmouth Bass": 0xAC9A7E,
"Smalt": 0x003399,
"Smalt Blue": 0x496267,
"Smaragdine": 0x4A9976,
"Smart White": 0xF6F3EC,
"Smashed Grape": 0x8775A1,
"Smashed Potatoes": 0xE2D0B9,
"Smashed Pumpkin": 0xFF6D3A,
"Smashing Pumpkins": 0xFF5522,
"Smell of Garlic": 0xD9DDCB,
"Smell of Lavender": 0xDCE0EA,
"Smell the Roses": 0xBB7283,
"Smells of Fresh Bread": 0xD7CECD,
"Smiley Face": 0xFFC962,
"Smitten": 0xC84186,
"Smock Blue": 0x3B646C,
"Smoke": 0xBFC8C3,
"Smoke & Ash": 0x939789,
"Smoke and Mirrors": 0xD9E6E8,
"Smoke Blue": 0x6688BB,
"Smoke Bush": 0xCC7788,
"Smoke Bush Rose": 0xAD8177,
"Smoke Cloud": 0xADB6B9,
"Smoke Dragon": 0xCCBBAA,
"Smoke Green": 0xA8BBA2,
"Smoke Grey": 0xCEBAA8,
"Smoke Pine": 0x3E6257,
"Smoke Screen": 0xAEAEAE,
"Smoke Tree": 0xBB5F34,
"Smoked Amethyst": 0x5A4351,
"Smoked Black Coffee": 0x3B2F2F,
"Smoked Claret": 0x583A39,
"Smoked Flamingo": 0x674244,
"Smoked Lavender": 0xCEB5B3,
"Smoked Mauve": 0xA89C97,
"Smoked Mulberry": 0x725F6C,
"Smoked Oak Brown": 0x573F16,
"Smoked Oyster": 0xD9D2CD,
"Smoked Paprika": 0x6E362C,
"Smoked Pearl": 0x656466,
"Smoked Purple": 0x444251,
"Smoked Salmon": 0xFA8072,
"Smoked Silver": 0xDDBBCC,
"Smoked Tan": 0xAEA494,
| |
<reponame>ajmal017/amp
"""
Basic functions processing financial data.
Import as:
import core.finance as fin
"""
import datetime
import logging
from typing import Any, Dict, List, Optional, Union, cast
import numpy as np
import pandas as pd
import statsmodels.api as sm
import core.signal_processing as csigna
import helpers.dataframe as hdataf
import helpers.dbg as dbg
import helpers.printing as hprint
_LOG = logging.getLogger(__name__)
def remove_dates_with_no_data(
df: pd.DataFrame, report_stats: bool
) -> pd.DataFrame:
"""
Given a df indexed with timestamps, scan the data by date and filter out
all the data when it's all nans.
:param report_stats: if True report information about the performed
operation
:return: filtered df
"""
# This is not strictly necessary.
dbg.dassert_strictly_increasing_index(df)
# Store the dates of the days removed because of all NaNs.
removed_days = []
# Accumulate the df for all the days that are not discarded.
df_out = []
# Store the days processed.
num_days = 0
# Scan the df by date.
for date, df_tmp in df.groupby(df.index.date):
if np.isnan(df_tmp).all(axis=1).all():
_LOG.debug("No data on %s", date)
removed_days.append(date)
else:
df_out.append(df_tmp)
num_days += 1
df_out = pd.concat(df_out)
dbg.dassert_strictly_increasing_index(df_out)
#
if report_stats:
# Stats for rows.
_LOG.info("df.index in [%s, %s]", df.index.min(), df.index.max())
removed_perc = hprint.perc(df.shape[0] - df_out.shape[0], df.shape[0])
_LOG.info("Rows removed: %s", removed_perc)
# Stats for all days.
removed_perc = hprint.perc(len(removed_days), num_days)
_LOG.info("Number of removed days: %s", removed_perc)
# Find week days.
removed_weekdays = [d for d in removed_days if d.weekday() < 5]
removed_perc = hprint.perc(len(removed_weekdays), len(removed_days))
_LOG.info("Number of removed weekdays: %s", removed_perc)
_LOG.info("Weekdays removed: %s", ", ".join(map(str, removed_weekdays)))
# Stats for weekend days.
removed_perc = hprint.perc(
len(removed_days) - len(removed_weekdays), len(removed_days)
)
_LOG.info("Number of removed weekend days: %s", removed_perc)
return df_out
# TODO(gp): Active trading hours and days are specific of different futures.
# Consider explicitly passing this information instead of using defaults.
def set_non_ath_to_nan(
df: pd.DataFrame,
start_time: Optional[datetime.time] = None,
end_time: Optional[datetime.time] = None,
) -> pd.DataFrame:
"""
Filter according to active trading hours.
We assume time intervals are:
- left closed, right open `[a, b)`
- labeled right
Row is not set to `np.nan` iff its `time` satisfies:
- `start_time < time`, and
- `time <= end_time`
"""
dbg.dassert_isinstance(df.index, pd.DatetimeIndex)
dbg.dassert_strictly_increasing_index(df)
if start_time is None:
start_time = datetime.time(9, 30)
if end_time is None:
end_time = datetime.time(16, 0)
dbg.dassert_lte(start_time, end_time)
# Compute the indices to remove.
times = df.index.time
to_remove_mask = (times <= start_time) | (end_time < times)
# Make a copy and filter.
df = df.copy()
df[to_remove_mask] = np.nan
return df
def set_weekends_to_nan(df: pd.DataFrame) -> pd.DataFrame:
"""
Filter out weekends setting the corresponding values to `np.nan`.
"""
dbg.dassert_isinstance(df.index, pd.DatetimeIndex)
# 5 = Saturday, 6 = Sunday.
to_remove_mask = df.index.dayofweek.isin([5, 6])
df = df.copy()
df[to_remove_mask] = np.nan
return df
# #############################################################################
# Resampling.
# #############################################################################
# TODO(Paul): Consider moving resampling code to a new `resampling.py`
# TODO(gp): Move to `dataflow/types.py`
KWARGS = Dict[str, Any]
def _resample_with_aggregate_function(
df: pd.DataFrame,
rule: str,
cols: List[str],
agg_func: str,
agg_func_kwargs: KWARGS,
) -> pd.DataFrame:
"""
Resample columns `cols` of `df` using the passed parameters.
"""
dbg.dassert(not df.empty)
dbg.dassert_isinstance(cols, list)
dbg.dassert(cols, msg="`cols` must be nonempty.")
dbg.dassert_is_subset(cols, df.columns)
resampler = csigna.resample(df[cols], rule=rule)
resampled = resampler.agg(agg_func, **agg_func_kwargs)
return resampled
def _merge(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
result_df = df1.merge(df2, how="outer", left_index=True, right_index=True)
dbg.dassert(result_df.index.freq)
return result_df
def resample_time_bars(
df: pd.DataFrame,
rule: str,
*,
return_cols: Optional[List[str]] = None,
return_agg_func: Optional[str] = None,
return_agg_func_kwargs: Optional[KWARGS] = None,
price_cols: Optional[List[str]] = None,
price_agg_func: Optional[str] = None,
price_agg_func_kwargs: Optional[KWARGS] = None,
volume_cols: Optional[List[str]] = None,
volume_agg_func: Optional[str] = None,
volume_agg_func_kwargs: Optional[KWARGS] = None,
) -> pd.DataFrame:
"""
Convenience resampler for time bars.
Features:
- Respects causality
- Chooses sensible defaults:
- returns are summed
- prices are averaged
- volume is summed
- NaN intervals remain NaN
- Defaults may be overridden (e.g., choose `last` instead of `mean` for
price)
:param df: input dataframe with datetime index
:param rule: resampling frequency with pandas convention (e.g., "5T")
:param return_cols: columns containing returns
:param return_agg_func: aggregation function, default is "sum"
:param return_agg_func_kwargs: kwargs
:param price_cols: columns containing price
:param price_agg_func: aggregation function, default is "mean"
:param price_agg_func_kwargs: kwargs
:param volume_cols: columns containing volume
:param volume_agg_func: aggregation function, default is "sum"
:param volume_agg_func_kwargs: kwargs
:return: dataframe of resampled time bars with columns from `*_cols` variables,
although not in the same order as passed
"""
dbg.dassert_isinstance(df, pd.DataFrame)
result_df = pd.DataFrame()
# Maybe resample returns.
# TODO(gp): Consider refactoring this chunk of code in a separate helper
# or merge the common code in _resample_with_aggregate_function().
# The only differences between the 3 resampling of returns, prices, and volume
# is the default value and _kwargs.
if return_cols:
return_agg_func = return_agg_func or "sum"
return_agg_func_kwargs = return_agg_func_kwargs or {"min_count": 1}
return_df = _resample_with_aggregate_function(
df, rule, return_cols, return_agg_func, return_agg_func_kwargs
)
result_df = _merge(result_df, return_df)
# Maybe resample prices.
if price_cols:
price_agg_func = price_agg_func or "mean"
# TODO(*): Explain the rationale of not using `min_count` for `mean`.
price_agg_func_kwargs = price_agg_func_kwargs or {}
price_df = _resample_with_aggregate_function(
df, rule, price_cols, price_agg_func, price_agg_func_kwargs
)
result_df = _merge(result_df, price_df)
# Maybe resample volume.
if volume_cols:
volume_agg_func = volume_agg_func or "sum"
volume_agg_func_kwargs = volume_agg_func_kwargs or {"min_count": 1}
volume_df = _resample_with_aggregate_function(
df, rule, volume_cols, volume_agg_func, volume_agg_func_kwargs
)
result_df = _merge(result_df, volume_df)
return result_df
def resample_ohlcv_bars(
df: pd.DataFrame,
rule: str,
*,
open_col: Optional[str] = "open",
high_col: Optional[str] = "high",
low_col: Optional[str] = "low",
close_col: Optional[str] = "close",
volume_col: Optional[str] = "volume",
add_twap_vwap: bool = False,
) -> pd.DataFrame:
"""
Resample OHLCV bars and optionally add TWAP, VWAP prices based on "close".
TODO(Paul): compare to native pandas `ohlc` resampling.
:param df: input dataframe with datetime index
:param rule: resampling frequency
:param open_col: name of "open" column
:param high_col: name of "high" column
:param low_col: name of "low" column
:param close_col: name of "close" column
:param volume_col: name of "volume" column
:param add_twap_vwap: if `True`, add "twap" and "vwap" columns
:return: resampled OHLCV dataframe with same column names; if
`add_twap_vwap`, then also includes "twap" and "vwap" columns.
"""
dbg.dassert_isinstance(df, pd.DataFrame)
# Make sure that requested OHLCV columns are present in the dataframe.
for col in [open_col, high_col, low_col, close_col, volume_col]:
if col is not None:
dbg.dassert_in(col, df.columns)
# Process each requested OHLCV column.
result_df = pd.DataFrame()
if open_col:
open_df = resample_time_bars(
df[[open_col]],
rule=rule,
price_cols=[open_col],
price_agg_func="first",
)
result_df = _merge(result_df, open_df)
if high_col:
high_df = resample_time_bars(
df[[high_col]], rule=rule, price_cols=[high_col], price_agg_func="max"
)
result_df = _merge(result_df, high_df)
if low_col:
low_df = resample_time_bars(
df[[low_col]], rule=rule, price_cols=[low_col], price_agg_func="min"
)
result_df = _merge(result_df, low_df)
if close_col:
close_df = resample_time_bars(
df[[close_col]],
rule=rule,
price_cols=[close_col],
price_agg_func="last",
)
result_df = _merge(result_df, close_df)
if volume_col:
# We rely on the default behavior of accumulating the volume.
volume_df = resample_time_bars(
df[[volume_col]],
rule=rule,
volume_cols=[volume_col],
)
result_df = _merge(result_df, volume_df)
# Add TWAP / VWAP prices, if needed.
if add_twap_vwap:
price_col: str
volume_col: str
twap_vwap_df = compute_twap_vwap(
df, rule=rule, price_col=close_col, volume_col=volume_col
)
result_df = _merge(result_df, twap_vwap_df)
return result_df
def compute_twap_vwap(
df: pd.DataFrame,
rule: str,
*,
price_col: str,
volume_col: str,
offset: Optional[str] = None,
add_bar_volume: bool = False,
add_bar_start_timestamps: bool = False,
add_epoch: bool = False,
add_last_price: bool = False,
) -> pd.DataFrame:
"""
Compute TWAP/VWAP from price and volume columns.
:param df: input dataframe with datetime index
:param rule: resampling frequency and TWAP/VWAP aggregation window
:param price_col: price for bar
:param volume_col: volume for bar
:param offset: offset in the Pandas format (e.g., `1T`) used to shift the
sampling
:return: twap and vwap price series
"""
dbg.dassert_isinstance(df, pd.DataFrame)
# TODO(*): Determine whether we really need this. Disabling for now to
# accommodate data that is not perfectly aligned with a pandas freq
# (e.g., Kibot).
# dbg.dassert(df.index.freq)
dbg.dassert_in(price_col, df.columns)
dbg.dassert_in(volume_col, df.columns)
# Only use rows where both price and volume are non-NaN.
non_nan_idx = df[[price_col, volume_col]].dropna().index
nan_idx = df.index.difference(non_nan_idx)
price = df[price_col]
if add_last_price:
# Calculate last price (regardless of whether we have volume data).
last_price = csigna.resample(price, rule=rule, offset=offset).last(
min_count=1
)
last_price.name = "last"
price.loc[nan_idx] = np.nan
volume = df[volume_col]
if add_bar_volume:
# Calculate bar volume (regardless of whether we have price data).
bar_volume = csigna.resample(volume, rule=rule, offset=offset).sum(
min_count=1
)
bar_volume.name = "volume"
volume.loc[nan_idx] = np.nan
# Weight price according to volume.
volume_weighted_price = price.multiply(volume)
| |
1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas, debug=True):
'''
boxes are from RPN, deltas are from boxes regression parameter
'''
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths # center of the boxes
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
def bbox_rotation_inv(bbox_in, angle_in_degree, image_shape, debug=True):
'''
bbox_in is two coordinate
angle is clockwise
'''
if debug:
assert isnparray(bbox_in) and bbox_in.size == 4, 'box is not correct'
im_width = image_shape[1]
im_height = image_shape[0]
coor_in_tl = np.array([(bbox_in[0] - im_width/2)/im_width*2, (bbox_in[1] - im_height/2)/im_height*2, 1]) # normalization
coor_in_br = np.array([(bbox_in[2] - im_width/2)/im_width*2, (bbox_in[3] - im_height/2)/im_height*2, 1]) # normalization
# print(coor_in_tl)
# print(coor_in_br)
affine = np.array([[math.cos(rad(angle_in_degree)), math.sin(rad(angle_in_degree)), 0], [-math.sin(rad(angle_in_degree)), math.cos(rad(angle_in_degree)), 0]])
coor_out_tl = np.dot(coor_in_tl, affine.transpose())
coor_out_br = np.dot(coor_in_br, affine.transpose())
# print(coor_out_tl)
# print(coor_out_br)
bbox_recover = [coor_out_tl[0] * im_width/2 + im_width/2, coor_out_tl[1] * im_height/2 + im_height/2, coor_out_br[0] * im_width/2 + im_width/2, coor_out_br[1] * im_height/2 + im_height/2]
bbox_recover = np.array(bbox_recover, dtype = float)
return bbox_recover
def bbox_general2rotated_loose(bbox_in, angle_in_degree, image_shape, debug=True):
'''
transfer the general bbox (top left and bottom right points) to represent rotated bbox with loose version including top left and bottom right points
'''
bbox = bbox_rotation_inv(bbox_in, angle_in_degree, image_shape, debug=debug) # get top left and bottom right coordinate of the rotated bbox in the image coordinate
return bbox_rotatedtight2rotatedloose(bbox, angle_in_degree, debug=debug)
def bbox_rotatedtight2rotatedloose(bbox_in, angle_in_degree, debug=True):
'''
transfer the rotated bbox with tight version to loose version, both contains only two points (top left and bottom right)
only a single box is feeded into
'''
if debug:
assert isnparray(bbox_in) and bbox_in.size == 4, 'box is not correct'
pts_tl = np.array([bbox_in[0], bbox_in[1]])
pts_br = np.array([bbox_in[2], bbox_in[3]])
line1 = get_2Dline_from_pts_slope(imagecoor2cartesian(pts_tl), angle_in_degree + 90.00)
line2 = get_2Dline_from_pts_slope(imagecoor2cartesian(pts_br), angle_in_degree)
pts_bl = cartesian2imagecoor(get_2dpts_from_lines(line1, line2))
pts_tr = cartesian2imagecoor(get_2dpts_from_lines(get_2Dline_from_pts_slope(imagecoor2cartesian(pts_tl), angle_in_degree), get_2Dline_from_pts_slope(imagecoor2cartesian(pts_br), angle_in_degree + 90.00)))
# assert_almost_equal(np.dot(pts_bl - pts_br, pts_bl - pts_tl), 0, err_msg='The intersection points are wrong')
# assert_almost_equal(np.dot(pts_tr - pts_br, pts_tr - pts_tl), 0, err_msg='The intersection points are wrong')
pts_tl_final = np.zeros((2), dtype=np.float32)
pts_br_final = np.zeros((2), dtype=np.float32)
pts_tl_final[0] = min({pts_tl[0], pts_br[0], pts_bl[0], pts_tr[0]})
pts_tl_final[1] = min({pts_tl[1], pts_br[1], pts_bl[1], pts_tr[1]})
pts_br_final[0] = max({pts_tl[0], pts_br[0], pts_bl[0], pts_tr[0]})
pts_br_final[1] = max({pts_tl[1], pts_br[1], pts_bl[1], pts_tr[1]})
# print(pts_tl_final)
# print(pts_br_final)
test = np.hstack((pts_tl_final, pts_br_final))
return test
def apply_rotation_loose(all_boxes, angle_in_degree, image_shape, debug=True):
'''
this function takes Nx84 bounding box into account and transfer all of them
to rotated representation with loose version
all_boxes support for multiple classes
'''
assert all_boxes.shape[1] % 4 == 0, 'The shape of boxes is not multiple of 4\
while applying rotation with loose version'
num_classes = all_boxes.shape[1] / 4
num_proposal = all_boxes.shape[0]
for row in xrange(num_proposal):
for cls_ind in xrange(num_classes):
# print()
box_tmp = all_boxes[row, cls_ind * 4 : (cls_ind + 1) * 4]
all_boxes[row, cls_ind * 4 : (cls_ind + 1) * 4] = bbox_general2rotated_loose(box_tmp, angle_in_degree, image_shape, debug=debug)
return all_boxes
def apply_rotation_tight(bbox_in, angle_in_degree, im_shape, debug=True):
'''
return 4 points clockwise
'''
if debug:
assert isnparray(bbox_in) and bbox_in.size == 4, 'box is not correct'
bbox_in = np.reshape(bbox_in, (4, ))
bbox_tight = bbox_rotation_inv(bbox_in, angle_in_degree, im_shape, debug=debug) # get top left and bottom right coordinate of the rotated bbox in the image coordinate
# print('bbox after inverse the rotation')
# print(bbox_tight)
pts_total = np.zeros((4, 2), dtype=np.int)
pts_tl = np.array([bbox_tight[0], bbox_tight[1]])
pts_br = np.array([bbox_tight[2], bbox_tight[3]])
line1 = get_2dline_from_pts_slope(imagecoor2cartesian(pts_tl, debug=debug), angle_in_degree + 90.00, debug=debug)
line2 = get_2dline_from_pts_slope(imagecoor2cartesian(pts_br, debug=debug), angle_in_degree, debug=debug)
pts_bl = cartesian2imagecoor(get_2dpts_from_lines(line1, line2, debug=debug), debug=debug)
pts_tr = cartesian2imagecoor(get_2dpts_from_lines(get_2dline_from_pts_slope(imagecoor2cartesian(pts_tl, debug=debug), angle_in_degree, debug=debug), get_2dline_from_pts_slope(imagecoor2cartesian(pts_br, debug=debug), angle_in_degree + 90.00, debug=debug), debug=debug), debug=debug)
# print np.reshape(pts_tl, (1, 2)).shape
# print pts_total[0, :].shape
pts_total[0, :] = np.reshape(pts_tl, (1, 2))
pts_total[1, :] = np.reshape(pts_tr, (1, 2))
pts_total[2, :] = np.reshape(pts_br, (1, 2))
pts_total[3, :] = np.reshape(pts_bl, (1, 2))
return pts_total
def bbox_enlarge(bbox, img_hw=None, ratio=None, ratio_hw=None, min_length=None, min_hw=None, debug=True):
'''
enlarge the bbox around the edge
parameters:
bbox: N X 4 numpy array, TLBR format
ratio: how much to enlarge, for example, the ratio=0.2, then the width and height will be increased by 0.2 times of original width and height
img_hw: height and width
'''
bbox = copy.copy(bbox)
if debug:
assert bboxcheck_TLBR(bbox), 'the input bounding box should be TLBR format'
if ratio_hw is not None:
height_ratio, width_ratio = ratio_hw
width = (bbox[:, 2] - bbox[:, 0]) * width_ratio
height = (bbox[:, 3] - bbox[:, 1]) * height_ratio
else:
width = (bbox[:, 2] - bbox[:, 0]) * ratio
height = (bbox[:, 3] - bbox[:, 1]) * ratio
# enlarge and meet the minimum length
if (min_hw is not None) or (min_length is not None):
cur_width = bbox[:, 2] - bbox[:, 0]
cur_height = bbox[:, 3] - bbox[:, 1]
if min_hw is not None:
min_height, min_width = min_hw
width = max(width, min_width - cur_width)
height = max(height, min_height - cur_height)
elif min_length is not None:
width = max(width, min_length - cur_width)
height = max(height, min_length - cur_height)
bbox[:, 0] -= width / 2.0
bbox[:, 1] -= height / 2.0
bbox[:, 2] += width / 2.0
bbox[:, 3] += height / 2.0
if img_hw is not None:
img_height, img_width = img_hw
bad_index = np.where(bbox[:, 0] < 0)[0].tolist(); bbox[bad_index, 0] = 0
bad_index = np.where(bbox[:, 1] < 0)[0].tolist(); bbox[bad_index, 1] = 0
bad_index = np.where(bbox[:, 2] >= img_width)[0].tolist(); bbox[bad_index, 2] = img_width - 1
bad_index = np.where(bbox[:, 3] >= img_height)[0].tolist(); bbox[bad_index, 3] = img_height - 1
return bbox
def bboxes_from_mask(mask, debug=True):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (x1, y1, x2, y2)]. TLBR
"""
mask = mask.copy()
if len(mask.shape) == 2:
mask = np.expand_dims(mask, axis=2)
assert len(mask.shape) == 3, 'the shape is not correct'
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([x1, y1, x2, y2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [x1, y1, x2, y2]
boxes: [boxes_count, (x1, y1, x2, y2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
x1 = np.maximum(box[0], boxes[:, 0])
x2 = np.minimum(box[2], boxes[:, 2])
y1 = np.maximum(box[1], boxes[:, 1])
y2 = np.minimum(box[3], boxes[:, 3])
intersection = | |
category))
delete_query = Template(
textwrap.dedent("""
DELETE EDGE BADGE_FOR "{{ badge_name }}" -> "{{ uri }}";
DELETE EDGE HAS_BADGE "{{ uri }}" -> "{{ badge_name }}";
"""))
self._execute_query(query=delete_query.render(badge_name=badge_name,
uri=id),
param_dict={})
@timer_with_counter
def get_badges(self) -> List:
LOGGER.info('Get all badges')
query = textwrap.dedent("""
MATCH (b:Badge) RETURN b AS badge
""")
records = self._execute_query(query=query, param_dict={})[0]
index_badge = self._get_result_column_index(records, 'badge')
results = []
for record in records.get('data', []):
results.append(
Badge(badge_name=record['meta'][index_badge]['id'],
category=record['row'][index_badge]['Badge.category']))
return results
@timer_with_counter
def add_tag(self,
*,
id: str,
tag: str,
tag_type: str = 'default',
resource_type: ResourceType = ResourceType.Table) -> None:
"""
Add new tag
1. Create the node with type Tag if the node doesn't exist.
2. Create the relation between tag and table if the relation doesn't exist.
:param id:
:param tag:
:param tag_type:
:param resource_type:
:return: None
"""
LOGGER.info(
'New tag {} for id {} with type {} and resource type {}'.format(
tag, id, tag_type, resource_type.name))
add_tag_query = Template(
textwrap.dedent("""
UPSERT EDGE ON `TAG`
"{{ tag }}" -> "{{ uri }}"
SET START_LABEL = "Badge",
END_LABEL = "{{ resource_type }}"
WHEN START_LABEL != "Badge";
UPSERT EDGE ON TAGGED_BY
"{{ uri }}" -> "{{ tag }}"
SET END_LABEL = "Badge",
START_LABEL = "{{ resource_type }}"
WHEN END_LABEL != "Badge";
UPSERT VERTEX ON `Tag` "{{ tag }}"
SET tag_type = "{{ tag_type }}"
WHEN tag_type != "{{ tag_type }}"
YIELD tag_type;
"""))
self._execute_query(query=add_tag_query.render(
uri=id,
tag=tag,
tag_type=tag_type,
resource_type=resource_type.name),
param_dict={})
@timer_with_counter
def delete_tag(self,
*,
id: str,
tag: str,
tag_type: str = 'default',
resource_type: ResourceType = ResourceType.Table) -> None:
"""
Deletes tag
1. Delete the relation between resource and the tag
2. todo(Tao): need to think about whether we should delete the tag if it is an orphan tag.
todo(Wey): keep orphan tag not be deleted.
:param id:
:param tag:
:param tag_type: {default-> normal tag, badge->non writable tag from UI}
:param resource_type:
:return:
"""
LOGGER.info(
'Delete tag {} for id {} with type {} and resource type: {}'.
format(tag, id, tag_type, resource_type.name))
delete_query = Template(
textwrap.dedent("""
DELETE EDGE `TAG` "{{ tag }}" -> "{{ uri }}";
DELETE EDGE TAGGED_BY "{{ uri }}" -> "{{ tag }}";
"""))
self._execute_query(query=delete_query.render(tag=tag, uri=id),
param_dict={})
@timer_with_counter
def get_tags(self) -> List:
"""
Get all existing tags from Nebula
:return:
"""
LOGGER.info('Get all the tags')
# todo: Currently all the tags are default type, we could open it up if we want to include badge
query = textwrap.dedent("""
MATCH (t:`Tag`{tag_type: 'default'})
OPTIONAL MATCH (resource)-[:TAGGED_BY]->(t)
WITH t AS tag_name, count(distinct id(resource)) AS tag_count
WHERE tag_count > 0
RETURN tag_name, tag_count
""")
records = self._execute_query(query=query, param_dict={})[0]
i_tag_name, i_tag_count = self._get_result_column_indexes(
records, ('tag_name', 'tag_count'))
results = []
for record in records.get('data', []):
results.append(
TagDetail(tag_name=record['meta'][i_tag_name]['id'],
tag_count=record['row'][i_tag_count]))
return results
@timer_with_counter
def get_latest_updated_ts(self) -> Optional[int]:
"""
API method to fetch last updated / index timestamp for Nebula, es
:return:
"""
query = textwrap.dedent("""
MATCH (n:Updatedtimestamp)
WHERE id(n) == 'amundsen_updated_timestamp'
RETURN n
""")
record = self._execute_query(query=query, param_dict={})[0]
index_n = self._get_result_column_index(record, 'n')
if record.get('data', None):
# There should be one and only one record
ts = record['data'][0]['row'][index_n].get(
"Updatedtimestamp.latest_timestamp", 0)
if ts is None:
return 0
else:
return ts
else:
return None
@timer_with_counter
def get_statistics(self) -> Dict[str, Any]:
"""
API method to fetch statistics metrics for Nebula Graph
:return: dictionary of statistics
"""
query = textwrap.dedent("""
MATCH (table_node:Table) with count(table_node) as number_of_tables
MATCH p=(item_node:Table)-[:DESCRIPTION]->(description_node:Description)
WHERE size(description_node.Description.description)>2 and exists(item_node.Table.is_view)
with count(item_node) as number_of_documented_tables, number_of_tables
MATCH p=(item_node:Column)-[:DESCRIPTION]->(description_node1:Description)
WHERE size(description_node1.Description.description)>2 and exists(item_node.Column.sort_order)
with count(item_node) as number_of_documented_cols, number_of_documented_tables, number_of_tables
MATCH p=(table_node:Table)-[:OWNER]->(user_node:`User`) with count(distinct table_node) as number_of_tables_with_owners,
count(distinct user_node) as number_of_owners, number_of_documented_cols,
number_of_documented_tables, number_of_tables
MATCH (item_node:Table)-[:DESCRIPTION]->(description_node:Description)
WHERE size(description_node.Description.description)>2 and exists(item_node.Table.is_view)
MATCH (item_node:Table)-[:OWNER]->(user_node:`User`)
with count(item_node) as number_of_documented_and_owned_tables,
number_of_tables_with_owners, number_of_owners, number_of_documented_cols,
number_of_documented_tables, number_of_tables
RETURN number_of_tables, number_of_documented_tables, number_of_documented_cols,
number_of_owners, number_of_tables_with_owners, number_of_documented_and_owned_tables
""")
LOGGER.info('Getting Nebula Graph Statistics')
records = self._execute_query(query=query, param_dict={})[0]
(i_number_of_tables, i_number_of_documented_tables,
i_number_of_documented_cols, i_number_of_owners,
i_number_of_tables_with_owners,
i_number_of_documented_and_owned_tables
) = self._get_result_column_indexes(
records, ('number_of_tables', 'number_of_documented_tables',
'number_of_documented_cols', 'number_of_owners',
'number_of_tables_with_owners',
'number_of_documented_and_owned_tables'))
for record in records.get('data', []):
r = record['row']
nebula_statistics = {
'number_of_tables':
r[i_number_of_tables],
'number_of_documented_tables':
r[i_number_of_documented_tables],
'number_of_documented_cols':
r[i_number_of_documented_cols],
'number_of_owners':
r[i_number_of_owners],
'number_of_tables_with_owners':
r[i_number_of_tables_with_owners],
'number_of_documented_and_owned_tables':
r[i_number_of_documented_and_owned_tables]
}
return nebula_statistics
return {}
@_CACHE.cache('_get_global_popular_resources_uris',
expire=_GET_POPULAR_RESOURCES_CACHE_EXPIRY_SEC)
def _get_global_popular_resources_uris(
self,
num_entries: int,
resource_type: ResourceType = ResourceType.Table) -> List[str]:
"""
Retrieve popular table uris. Will provide tables with top x popularity score.
Popularity score = number of distinct readers * log(total number of reads)
The result of this method will be cached based on the key (num_entries), and the cache will be expired based on
_GET_POPULAR_TABLE_CACHE_EXPIRY_SEC
For score computation, it uses logarithm on total number of reads so that score won't be affected by small
number of users reading a lot of times.
:return: Iterable of table uri
"""
query = Template(
textwrap.dedent("""
MATCH (resource:`{{ resource_type }}`)-[r:READ_BY]->(u:`User`)
WITH id(resource) AS vid, count(distinct u) AS readers, sum(r.read_count) AS total_reads
WHERE readers >= {{ num_readers }}
RETURN vid, readers, total_reads, (readers * log(total_reads)) AS score
ORDER BY score DESC LIMIT {{ num_entries }};
"""))
LOGGER.info('Querying popular tables URIs')
num_readers = current_app.config[
'POPULAR_RESOURCES_MINIMUM_READER_COUNT']
records = self._execute_query(query=query.render(
resource_type=resource_type.name,
num_readers=num_readers,
num_entries=num_entries),
param_dict={})[0]
i_vid = self._get_result_column_index(records, 'vid')
return [record['row'][i_vid] for record in records.get('data', [])]
@timer_with_counter
@_CACHE.cache('_get_personal_popular_tables_uris',
_GET_POPULAR_RESOURCES_CACHE_EXPIRY_SEC)
def _get_personal_popular_resources_uris(
self,
num_entries: int,
user_id: str,
resource_type: ResourceType = ResourceType.Table) -> List[str]:
"""
Retrieve personalized popular resources uris. Will provide resources with top
popularity score that have been read by a peer of the user_id provided.
The popularity score is defined in the same way AS `_get_global_popular_resources_uris`
The result of this method will be cached based on the key (num_entries, user_id),
and the cache will be expired based on _GET_POPULAR_TABLE_CACHE_EXPIRY_SEC
:return: Iterable of table uri
"""
query = Template(
textwrap.dedent("""
MATCH (u:`User`)<-[:READ_BY]-(:`{{ resource_type }}`)-[:READ_BY]->
(coUser:`User`)<-[coRead:READ_BY]-(resource:`{{ resource_type }}`)
WHERE id(u) == "{{ user_id }}"
WITH id(resource) AS vid, count(DISTINCT coUser) AS co_readers,
sum(coRead.read_count) AS total_co_reads
WHERE co_readers >= {{ num_readers }}
RETURN vid, (co_readers * log(total_co_reads)) AS score
ORDER BY score DESC LIMIT {{ num_entries }};
"""))
LOGGER.info('Querying popular tables URIs')
num_readers = current_app.config[
'POPULAR_RESOURCES_MINIMUM_READER_COUNT']
records = self._execute_query(query=query.render(
resource_type=resource_type.name,
user_id=user_id,
num_readers=num_readers,
num_entries=num_entries),
param_dict={})[0]
i_vid = self._get_result_column_index(records, 'vid')
return [record['row'][i_vid] for record in records.get('data', [])]
@timer_with_counter
def get_popular_tables(
self,
*,
num_entries: int,
user_id: Optional[str] = None) -> List[PopularTable]:
"""
Retrieve popular tables. AS popular table computation requires full scan of table and user relationship,
it will utilize cached method _get_popular_tables_uris.
:param num_entries:
:return: Iterable of PopularTable
"""
if user_id is None:
# Get global popular table URIs
table_uris = self._get_global_popular_resources_uris(num_entries)
else:
# Get personalized popular table URIs
table_uris = self._get_personal_popular_resources_uris(
num_entries, user_id)
if not table_uris:
return []
# note(Wey) We render table_uris into list in string for now, will change to do from param_dict later
query = Template(
textwrap.dedent("""
MATCH (db:Database)-[:CLUSTER]->(clstr:Cluster)-[:SCHEMA]->(schema:Schema)-[:TABLE]->(tbl:Table)
WHERE id(tbl) IN {{ table_uris }}
WITH db.Database.name AS database_name, clstr.Cluster.name AS cluster_name, schema.Schema.name AS schema_name, tbl
OPTIONAL MATCH (tbl)-[:DESCRIPTION]->(dscrpt:Description)
RETURN database_name, cluster_name, schema_name, tbl.Table.name AS table_name,
dscrpt.Description.description AS table_description;
"""))
records = self._execute_query(
query=query.render(table_uris=str(table_uris)), param_dict={})[0]
(i_database_name, i_cluster_name, i_schema_name, i_table_name,
i_table_description) = self._get_result_column_indexes(
records, ('database_name', 'cluster_name', 'schema_name',
'table_name', 'table_description'))
popular_tables = []
for record in records.get('data', []):
r = record['row']
popular_table = PopularTable(database=r[i_database_name],
cluster=r[i_cluster_name],
schema=r[i_schema_name],
name=r[i_table_name],
description=r[i_table_description])
popular_tables.append(popular_table)
return popular_tables
def _get_popular_tables(self, *,
resource_uris: List[str]) -> List[TableSummary]:
"""
"""
if not resource_uris:
return []
# note(Wey) We render resource_uris into list in string for now, will change to do from param_dict later
query = Template(
textwrap.dedent("""
MATCH (db:Database)-[:CLUSTER]->(clstr:Cluster)-[:SCHEMA]->(schema:Schema)-[:TABLE]->(tbl:Table)
WHERE id(tbl) IN {{ table_uris }}
WITH db.Database.name AS database_name, clstr.Cluster.name AS cluster_name, schema.Schema.name AS schema_name, tbl
OPTIONAL MATCH (tbl)-[:DESCRIPTION]->(dscrpt:Description)
RETURN database_name, cluster_name, schema_name, tbl.Table.name AS table_name,
dscrpt.DESCRIPTION.description AS table_description;
"""))
records = self._execute_query(
query=query.render(table_uris=str(resource_uris)),
param_dict={})[0]
(i_database_name, i_cluster_name, i_schema_name, i_table_name,
i_table_description) = self._get_result_column_indexes(
records, ('database_name', 'cluster_name', 'schema_name',
'table_name', 'table_description'))
popular_tables = []
for record in records.get('data', []):
r = record['row']
popular_table = TableSummary(database=r[i_database_name],
cluster=r[i_cluster_name],
schema=r[i_schema_name],
name=r[i_table_name],
description=r[i_table_description])
popular_tables.append(popular_table)
return popular_tables
def _get_popular_dashboards(
self, *, resource_uris: List[str]) -> List[DashboardSummary]:
"""
"""
if not resource_uris:
return []
# note(Wey) We render resource_uris into list in string for now, will change to do from param_dict later
query = Template(
textwrap.dedent("""
MATCH (d:Dashboard)-[:DASHBOARD_OF]->(dg:Dashboardgroup)-[:DASHBOARD_GROUP_OF]->(c:Cluster)
WHERE id(d) IN {{ dashboards_uris | |
= _Class("SAScreenActionList")
SAReminderSiriKitInteraction = _Class("SAReminderSiriKitInteraction")
SAHAActionRequest = _Class("SAHAActionRequest")
SAUILParsedExpression = _Class("SAUILParsedExpression")
SAUILParsedAttachmentExpression = _Class("SAUILParsedAttachmentExpression")
SASmsSms = _Class("SASmsSms")
SAAttachment = _Class("SAAttachment")
SAAlarmObject = _Class("SAAlarmObject")
SAAlarmSleepAlarm = _Class("SAAlarmSleepAlarm")
SAAlarmAceAlarmWrap = _Class("SAAlarmAceAlarmWrap")
SAMovieV2MovieTheater = _Class("SAMovieV2MovieTheater")
SAGlance = _Class("SAGlance")
SARange = _Class("SARange")
SACalendarEvent = _Class("SACalendarEvent")
SAAceWatchFace = _Class("SAAceWatchFace")
SAMicroblogObject = _Class("SAMicroblogObject")
SAMicroblogWeiboPost = _Class("SAMicroblogWeiboPost")
SAMicroblogFacebookPost = _Class("SAMicroblogFacebookPost")
SAMicroblogTwitterPost = _Class("SAMicroblogTwitterPost")
SAActivityObject = _Class("SAActivityObject")
SAUserActivity = _Class("SAUserActivity")
SAClientUserActivity = _Class("SAClientUserActivity")
SABaseCommand = _Class("SABaseCommand")
SAIntentGroupResolveIntentSlotCompleted = _Class(
"SAIntentGroupResolveIntentSlotCompleted"
)
SAUIGetSuggestedUtterances = _Class("SAUIGetSuggestedUtterances")
SAGetContextConfiguration = _Class("SAGetContextConfiguration")
SAPunchoutOutcome = _Class("SAPunchoutOutcome")
SASGetMultilingualDictationConfig = _Class("SASGetMultilingualDictationConfig")
SACFMParseMessageForEmojiResponse = _Class("SACFMParseMessageForEmojiResponse")
SAFetchLanguageSettings = _Class("SAFetchLanguageSettings")
SAIntentGroupProcessIntentCompleted = _Class("SAIntentGroupProcessIntentCompleted")
SADialogInflectWordCommand = _Class("SADialogInflectWordCommand")
SAMPGetMediaPlayerStateRemoteResponse = _Class("SAMPGetMediaPlayerStateRemoteResponse")
SANPGetNowPlayingQueueDetailsRemoteResponse = _Class(
"SANPGetNowPlayingQueueDetailsRemoteResponse"
)
SACFPEXResponse = _Class("SACFPEXResponse")
SASyncSyncPromise = _Class("SASyncSyncPromise")
SAGetAppIntentPolicyAndVocabResponse = _Class("SAGetAppIntentPolicyAndVocabResponse")
SAPhoneIncomingCallSearchCompleted = _Class("SAPhoneIncomingCallSearchCompleted")
SASetRestrictedAppContext = _Class("SASetRestrictedAppContext")
SASyncServerVerify = _Class("SASyncServerVerify")
SAWLWatchListSearchResults = _Class("SAWLWatchListSearchResults")
SASetClientState = _Class("SASetClientState")
SANotificationsSearchCompleted = _Class("SANotificationsSearchCompleted")
SAUpdateRestrictions = _Class("SAUpdateRestrictions")
SASWillStopRecording = _Class("SASWillStopRecording")
SACreateVOXList = _Class("SACreateVOXList")
SACFProvideContext = _Class("SACFProvideContext")
SACFSpokenNotificationProvideContext = _Class("SACFSpokenNotificationProvideContext")
SAAIStartSession = _Class("SAAIStartSession")
SASetSessionObjects = _Class("SASetSessionObjects")
SASetRestrictions = _Class("SASetRestrictions")
SARemembersGetAppMatchesForIntentResponse = _Class(
"SARemembersGetAppMatchesForIntentResponse"
)
SASetRequestOrigin = _Class("SASetRequestOrigin")
SASetBackgroundContext = _Class("SASetBackgroundContext")
SASetAssistantData = _Class("SASetAssistantData")
SASetApplicationContext = _Class("SASetApplicationContext")
SASetAlertContext = _Class("SASetAlertContext")
SARollbackRequest = _Class("SARollbackRequest")
SARestartRequest = _Class("SARestartRequest")
SASetSyncContext = _Class("SASetSyncContext")
SATTSFetchSpeechSynthesisVoiceRequest = _Class("SATTSFetchSpeechSynthesisVoiceRequest")
SAMetrics = _Class("SAMetrics")
SASTTSSelectionStatistics = _Class("SASTTSSelectionStatistics")
SALoadAssistant = _Class("SALoadAssistant")
SAGetSupportedLocales = _Class("SAGetSupportedLocales")
SAGetSessionCertificate = _Class("SAGetSessionCertificate")
SARDNativeFlowContextUpdate = _Class("SARDNativeFlowContextUpdate")
SAIntentGroupAppVocabularySearchResponse = _Class(
"SAIntentGroupAppVocabularySearchResponse"
)
SADomainObjectUpdateCompleted = _Class("SADomainObjectUpdateCompleted")
SADomainObjectRetrieveCompleted = _Class("SADomainObjectRetrieveCompleted")
SADomainObjectDeleteCompleted = _Class("SADomainObjectDeleteCompleted")
SAAISearch = _Class("SAAISearch")
SADomainObjectCreateCompleted = _Class("SADomainObjectCreateCompleted")
SAUIListenForPronunciationCompleted = _Class("SAUIListenForPronunciationCompleted")
SADomainObjectCommitCompleted = _Class("SADomainObjectCommitCompleted")
SADomainObjectCancelCompleted = _Class("SADomainObjectCancelCompleted")
SAUpdateReadingState = _Class("SAUpdateReadingState")
SASSetAudioDuckingDelay = _Class("SASSetAudioDuckingDelay")
SADestroyAssistant = _Class("SADestroyAssistant")
SACreateSessionInfoRequest = _Class("SACreateSessionInfoRequest")
SACreateAssistant = _Class("SACreateAssistant")
SAGuidanceCheckForGuideUpdates = _Class("SAGuidanceCheckForGuideUpdates")
SAClearContext = _Class("SAClearContext")
SASetCarPlayContext = _Class("SASetCarPlayContext")
SACancelRequest = _Class("SACancelRequest")
SASetWristDetectionStatusClientState = _Class("SASetWristDetectionStatusClientState")
SASyncClientVerifyResult = _Class("SASyncClientVerifyResult")
SAEHandleUnderstandingOutputCompleted = _Class("SAEHandleUnderstandingOutputCompleted")
SAPerformDataDetectionResult = _Class("SAPerformDataDetectionResult")
SATTSCancelSpeechSynthesis = _Class("SATTSCancelSpeechSynthesis")
SASmsGroupSearchCompleted = _Class("SASmsGroupSearchCompleted")
SARDResultCandidateSelected = _Class("SARDResultCandidateSelected")
SANPGetNowPlayingQueueDetailsResponse = _Class("SANPGetNowPlayingQueueDetailsResponse")
SASyncGetCachedSyncAnchorsResponse = _Class("SASyncGetCachedSyncAnchorsResponse")
SAUIFetchSpeechAlternatives = _Class("SAUIFetchSpeechAlternatives")
SAResultSelected = _Class("SAResultSelected")
SAUIAppPunchOutEvent = _Class("SAUIAppPunchOutEvent")
SAABMultiPersonSearchCompleted = _Class("SAABMultiPersonSearchCompleted")
SAPhoneHangUpResponse = _Class("SAPhoneHangUpResponse")
SAConfidenceScores = _Class("SAConfidenceScores")
SASEndpointStatus = _Class("SASEndpointStatus")
SASUpdateAudioInfo = _Class("SASUpdateAudioInfo")
SASEnableServerEndpointer = _Class("SASEnableServerEndpointer")
SAPerformDataDetectionMatch = _Class("SAPerformDataDetectionMatch")
SALocalSearchVehicleEventSearchCompleted = _Class(
"SALocalSearchVehicleEventSearchCompleted"
)
SARemembersGetRelativeAppUsageProbabilitiesResponse = _Class(
"SARemembersGetRelativeAppUsageProbabilitiesResponse"
)
SASDisableServerEndpointer = _Class("SASDisableServerEndpointer")
SAAppsGetRestrictedAppsResponse = _Class("SAAppsGetRestrictedAppsResponse")
SAIntentGroupHandleIntentCompleted = _Class("SAIntentGroupHandleIntentCompleted")
SALocalSearchSendToProtobufConduitCompleted = _Class(
"SALocalSearchSendToProtobufConduitCompleted"
)
SAPerformDataDetectionResults = _Class("SAPerformDataDetectionResults")
SASetNLAlertContext = _Class("SASetNLAlertContext")
SAIntentGroupGetIntentDefinitionsResponse = _Class(
"SAIntentGroupGetIntentDefinitionsResponse"
)
SAIntentGroupConfirmIntentCompleted = _Class("SAIntentGroupConfirmIntentCompleted")
SADPDeviceSearchCompleted = _Class("SADPDeviceSearchCompleted")
SALocalSearchSetGeoClientState = _Class("SALocalSearchSetGeoClientState")
SAAICancelSession = _Class("SAAICancelSession")
SAMPBasicPodcastAppSearchCompleted = _Class("SAMPBasicPodcastAppSearchCompleted")
SALocalSearchNavigationPromptManeuverCompleted = _Class(
"SALocalSearchNavigationPromptManeuverCompleted"
)
SAGetWatchAppIntentPolicyAndVocabResponse = _Class(
"SAGetWatchAppIntentPolicyAndVocabResponse"
)
SASetRemoteDevices = _Class("SASetRemoteDevices")
SASyncStartDeepSyncVerification = _Class("SASyncStartDeepSyncVerification")
SASetDynamiteClientState = _Class("SASetDynamiteClientState")
SALocalSearchGetNavigationStatusCompleted = _Class(
"SALocalSearchGetNavigationStatusCompleted"
)
SASyncDeepSyncVerificationResult = _Class("SASyncDeepSyncVerificationResult")
SASSetBootstrapSpeechIds = _Class("SASSetBootstrapSpeechIds")
SASSpeechEndpointIdentified = _Class("SASSpeechEndpointIdentified")
SATTSStartSpeechSynthesisRequest = _Class("SATTSStartSpeechSynthesisRequest")
SASendInstrumentation = _Class("SASendInstrumentation")
SAPreSynthesizeTTSCompleted = _Class("SAPreSynthesizeTTSCompleted")
SAIntentGroupGetAppInfoResponse = _Class("SAIntentGroupGetAppInfoResponse")
SASetClientFlowState = _Class("SASetClientFlowState")
SAReminderListSearchCompleted = _Class("SAReminderListSearchCompleted")
SACalendarDefaultSourceGetCompleted = _Class("SACalendarDefaultSourceGetCompleted")
SASmsRecipientSearchCompleted = _Class("SASmsRecipientSearchCompleted")
SAStockDeleteCompleted = _Class("SAStockDeleteCompleted")
SALocalSearchShowMapPointsCompleted = _Class("SALocalSearchShowMapPointsCompleted")
SAUILParseExpressionsCompleted = _Class("SAUILParseExpressionsCompleted")
SAContextPromise = _Class("SAContextPromise")
SASettingResponse = _Class("SASettingResponse")
SASettingSetValueResponse = _Class("SASettingSetValueResponse")
SASettingSetAppearanceResponse = _Class("SASettingSetAppearanceResponse")
SASettingSetNumberResponse = _Class("SASettingSetNumberResponse")
SASettingSetFloatResponse = _Class("SASettingSetFloatResponse")
SASettingSetBoolResponse = _Class("SASettingSetBoolResponse")
SASettingGetValueResponse = _Class("SASettingGetValueResponse")
SASettingGetAppearanceResponse = _Class("SASettingGetAppearanceResponse")
SASettingGetNoiseManagementResponse = _Class("SASettingGetNoiseManagementResponse")
SASettingGetNumberResponse = _Class("SASettingGetNumberResponse")
SASettingGetFloatResponse = _Class("SASettingGetFloatResponse")
SASettingGetBoolResponse = _Class("SASettingGetBoolResponse")
SASettingGetAudioRouteResponse = _Class("SASettingGetAudioRouteResponse")
SAUISnippetInteraction = _Class("SAUISnippetInteraction")
SAUISnippetAttributeOpened = _Class("SAUISnippetAttributeOpened")
SAUISnippetObjectInteraction = _Class("SAUISnippetObjectInteraction")
SAUISnippetOpened = _Class("SAUISnippetOpened")
SAUISnippetExpanded = _Class("SAUISnippetExpanded")
SAUISnippetEdited = _Class("SAUISnippetEdited")
SAUIConfirmSnippet = _Class("SAUIConfirmSnippet")
SAUICancelSnippet = _Class("SAUICancelSnippet")
SASetCurrentDisplayRouteClientState = _Class("SASetCurrentDisplayRouteClientState")
SAStartRequest = _Class("SAStartRequest")
SAUIDomainObjectPickerSelection = _Class("SAUIDomainObjectPickerSelection")
SAUIGetResponseAlternatives = _Class("SAUIGetResponseAlternatives")
SAReplayRequestFromDMHypothesis = _Class("SAReplayRequestFromDMHypothesis")
SAStartDirectActionRequest = _Class("SAStartDirectActionRequest")
SARefreshRequest = _Class("SARefreshRequest")
SAStartMultiUserTestRequest = _Class("SAStartMultiUserTestRequest")
SABackgroundUpdateRequest = _Class("SABackgroundUpdateRequest")
SAStartHandoffRequest = _Class("SAStartHandoffRequest")
SAStartStructuredDictationRequest = _Class("SAStartStructuredDictationRequest")
SATTSStartSpeechSynthesisStreamingRequest = _Class(
"SATTSStartSpeechSynthesisStreamingRequest"
)
SASStartCorrectedSpeechRequest = _Class("SASStartCorrectedSpeechRequest")
SASSpeechCorrectionStatistics = _Class("SASSpeechCorrectionStatistics")
SATTSGetSpeechSynthesisVolumeResponse = _Class("SATTSGetSpeechSynthesisVolumeResponse")
SADIAGLatencyDiagnosticReport = _Class("SADIAGLatencyDiagnosticReport")
SASetMorphunDataState = _Class("SASetMorphunDataState")
SASetMultipleClientStates = _Class("SASetMultipleClientStates")
SALocalSearchNavigationEndCompleted = _Class("SALocalSearchNavigationEndCompleted")
SAWeatherLocationSearchCompleted = _Class("SAWeatherLocationSearchCompleted")
SAWeatherLocationDeleteCompleted = _Class("SAWeatherLocationDeleteCompleted")
SAMultiUserInfo = _Class("SAMultiUserInfo")
SAWeatherLocationAddCompleted = _Class("SAWeatherLocationAddCompleted")
SAAppsAppSearchResponse = _Class("SAAppsAppSearchResponse")
SAExecuteOnRemoteResponse = _Class("SAExecuteOnRemoteResponse")
SAWebSiteSearchStarted = _Class("SAWebSiteSearchStarted")
SANPMoveOutputGroupToDevicesResponse = _Class("SANPMoveOutputGroupToDevicesResponse")
SAWebSearchStarted = _Class("SAWebSearchStarted")
SALocalSearchDropAPinCompleted = _Class("SALocalSearchDropAPinCompleted")
SAMicroblogSocialCredentialResult = _Class("SAMicroblogSocialCredentialResult")
SAFmfSearchCompleted = _Class("SAFmfSearchCompleted")
SAGetSingleClientStateFailed = _Class("SAGetSingleClientStateFailed")
SAReminderSearchCompleted = _Class("SAReminderSearchCompleted")
SAIntentGroupUpdateIntentSlot = _Class("SAIntentGroupUpdateIntentSlot")
SASetSiriPresentationClientState = _Class("SASetSiriPresentationClientState")
SAGetResultObjects = _Class("SAGetResultObjects")
SAStockSearchCompleted = _Class("SAStockSearchCompleted")
SAStockAddCompleted = _Class("SAStockAddCompleted")
SACFAbstractClientCommandCompleted = _Class("SACFAbstractClientCommandCompleted")
SACFProviderCompleted = _Class("SACFProviderCompleted")
SACFFlowCompleted = _Class("SACFFlowCompleted")
SATimerSetCompleted = _Class("SATimerSetCompleted")
SAHACommandCompleted = _Class("SAHACommandCompleted")
SATimerResumeCompleted = _Class("SATimerResumeCompleted")
SATimerPauseCompleted = _Class("SATimerPauseCompleted")
SATimerGetCompleted = _Class("SATimerGetCompleted")
SATimerCancelCompleted = _Class("SATimerCancelCompleted")
SANPAddOutputDevicesToGroupResponse = _Class("SANPAddOutputDevicesToGroupResponse")
SAABPersonSearchCompleted = _Class("SAABPersonSearchCompleted")
SAKnowledgeGetValuesForKeysResponse = _Class("SAKnowledgeGetValuesForKeysResponse")
SAClockSearchCompleted = _Class("SAClockSearchCompleted")
SAHLGetActiveWorkoutApplicationIdentifierResponse = _Class(
"SAHLGetActiveWorkoutApplicationIdentifierResponse"
)
SASServerBoundConfusionNetwork = _Class("SASServerBoundConfusionNetwork")
SAClockDeleteCompleted = _Class("SAClockDeleteCompleted")
SAClockAddCompleted = _Class("SAClockAddCompleted")
SANoteUpdateCompleted = _Class("SANoteUpdateCompleted")
SANoteSearchCompleted = _Class("SANoteSearchCompleted")
SACFFetchScriptCommand = _Class("SACFFetchScriptCommand")
SANoteDeleteCompleted = _Class("SANoteDeleteCompleted")
SAMPGetRoutesResponse = _Class("SAMPGetRoutesResponse")
SANoteCreateCompleted = _Class("SANoteCreateCompleted")
SASetNLContext = _Class("SASetNLContext")
SANPRemoveOutputDevicesFromGroupResponse = _Class(
"SANPRemoveOutputDevicesFromGroupResponse"
)
SANPVideoGetAudioTracksAndSubtitleOptionsCompleted = _Class(
"SANPVideoGetAudioTracksAndSubtitleOptionsCompleted"
)
SAEmailSearchCompleted = _Class("SAEmailSearchCompleted")
SAMPGetStateResponse = _Class("SAMPGetStateResponse")
SAMPGeniusSummonSucceded = _Class("SAMPGeniusSummonSucceded")
SAMPGeniusSummonFailed = _Class("SAMPGeniusSummonFailed")
SALCMSetTvSubscriptionsClientState = _Class("SALCMSetTvSubscriptionsClientState")
SAIntentGroupResolveAppForIntentResponse = _Class(
"SAIntentGroupResolveAppForIntentResponse"
)
SAAlarmUpdateCompleted = _Class("SAAlarmUpdateCompleted")
SALocalSearchSetNavigationVoiceVolumeCompleted = _Class(
"SALocalSearchSetNavigationVoiceVolumeCompleted"
)
SAMPLoadQPCompleted = _Class("SAMPLoadQPCompleted")
SAAlarmSearchCompleted = _Class("SAAlarmSearchCompleted")
SAAlarmDeleteCompleted = _Class("SAAlarmDeleteCompleted")
SAAlarmCreateCompleted = _Class("SAAlarmCreateCompleted")
SADeleteAssistantHistory = _Class("SADeleteAssistantHistory")
SASmsSearchCompleted = _Class("SASmsSearchCompleted")
SASMultilingualDictationLanguageSelected = _Class(
"SASMultilingualDictationLanguageSelected"
)
SANPGetVolumeLevelResponse = _Class("SANPGetVolumeLevelResponse")
SAGenericCommand = _Class("SAGenericCommand")
SASStartSpeech = _Class("SASStartSpeech")
SASStartSpeechRequest = _Class("SASStartSpeechRequest")
SASStartStructuredDictationSpeechRequest = _Class(
"SASStartStructuredDictationSpeechRequest"
)
SASStartSpeechDictation = _Class("SASStartSpeechDictation")
SASStartVoiceSearchRequest = _Class("SASStartVoiceSearchRequest")
SASStartPronunciationRequest = _Class("SASStartPronunciationRequest")
SAIntentGroupIntentSearch = _Class("SAIntentGroupIntentSearch")
SAIntentGroupAppAuthorizationStatusResponse = _Class(
"SAIntentGroupAppAuthorizationStatusResponse"
)
SAIntentGroupRunSiriKitExecutorCompleted = _Class(
"SAIntentGroupRunSiriKitExecutorCompleted"
)
SADialogInflectWordCommands = _Class("SADialogInflectWordCommands")
SASSpeechPacket = _Class("SASSpeechPacket")
SAActivityStream = _Class("SAActivityStream")
SASFinishSpeech = _Class("SASFinishSpeech")
SASCancelSpeech = _Class("SASCancelSpeech")
SAPhoneSearchCompleted = _Class("SAPhoneSearchCompleted")
SAPhoneCallStarted = _Class("SAPhoneCallStarted")
SAIntentGroupAppEntitlementResponse = _Class("SAIntentGroupAppEntitlementResponse")
SAQuickStopCompleted = _Class("SAQuickStopCompleted")
SACalendarEventSearchCompleted = _Class("SACalendarEventSearchCompleted")
SAKnowledgeGetLinkedDataResponse = _Class("SAKnowledgeGetLinkedDataResponse")
SAWeatherShowWeatherLocationsCompleted = _Class(
"SAWeatherShowWeatherLocationsCompleted"
)
SAStartBackgroundActionRequest = _Class("SAStartBackgroundActionRequest")
SABaseClientBoundCommand = _Class("SABaseClientBoundCommand")
SAUISetSuggestedUtterances = _Class("SAUISetSuggestedUtterances")
SADomainObjectPunchOut = _Class("SADomainObjectPunchOut")
SADeviceControlStopScreenRecording = _Class("SADeviceControlStopScreenRecording")
SANPVideoSetDefaultAudioTrack = _Class("SANPVideoSetDefaultAudioTrack")
SACFPEXQuery = _Class("SACFPEXQuery")
SADialogInflectWordResponse = _Class("SADialogInflectWordResponse")
SASConfirmEndpoint = _Class("SASConfirmEndpoint")
SAWLGetWatchListPlayables = _Class("SAWLGetWatchListPlayables")
SAPerformDataDetection = _Class("SAPerformDataDetection")
SADeviceControlTakeScreenshot = _Class("SADeviceControlTakeScreenshot")
SASSpeechPartialResult = _Class("SASSpeechPartialResult")
SASetContextConfiguration = _Class("SASetContextConfiguration")
SALCMGetTvSubscriptionsClientState = _Class("SALCMGetTvSubscriptionsClientState")
SASyncClientVerify = _Class("SASyncClientVerify")
SAUIDisambiguationItemSelected = _Class("SAUIDisambiguationItemSelected")
SAMPGetStateResponseRemote = _Class("SAMPGetStateResponseRemote")
SASServerEndpointFeatures = _Class("SASServerEndpointFeatures")
SADPDevicePlaySound = _Class("SADPDevicePlaySound")
SAWaitForCommands = _Class("SAWaitForCommands")
SAUIExtendCurrentTTS = _Class("SAUIExtendCurrentTTS")
SANPRemoveOutputDevicesFromGroup = _Class("SANPRemoveOutputDevicesFromGroup")
SAIntentGroupSiriKitPluginSignal = _Class("SAIntentGroupSiriKitPluginSignal")
SANPVideoSetAutoSubtitles = _Class("SANPVideoSetAutoSubtitles")
SAAlarmShowAndPerformAlarmAction = _Class("SAAlarmShowAndPerformAlarmAction")
SAIntentGroupLaunchAppWithIntent = _Class("SAIntentGroupLaunchAppWithIntent")
SAIntentGroupProcessIntent = _Class("SAIntentGroupProcessIntent")
SAPhoneIncomingCallSearch = _Class("SAPhoneIncomingCallSearch")
SAUIHideSiriOverlay = _Class("SAUIHideSiriOverlay")
SAStructuredDictationFailed = _Class("SAStructuredDictationFailed")
SAScreenActionShowHomeScreen = _Class("SAScreenActionShowHomeScreen")
SARemembersGetRelativeAppUsageProbabilities = _Class(
"SARemembersGetRelativeAppUsageProbabilities"
)
SADonateRankedQueriesToPortrait = _Class("SADonateRankedQueriesToPortrait")
SALogNumericEvent = _Class("SALogNumericEvent")
SAPhonePlayAudio = _Class("SAPhonePlayAudio")
SALogStringEvent = _Class("SALogStringEvent")
SASetClientData = _Class("SASetClientData")
SASetSupportedLocales = _Class("SASetSupportedLocales")
SASetConnectionHeader = _Class("SASetConnectionHeader")
SASessionValidationFailed = _Class("SASessionValidationFailed")
SASendCommands = _Class("SASendCommands")
SAWLAddContentToWatchList = _Class("SAWLAddContentToWatchList")
SARollbackSucceeded = _Class("SARollbackSucceeded")
SARequestCompleted = _Class("SARequestCompleted")
SAPong = _Class("SAPong")
SAPing = _Class("SAPing")
SACFMParseMessageForEmojiRequest = _Class("SACFMParseMessageForEmojiRequest")
SADIAGSetLatencyDiagnosticConfiguration = _Class(
"SADIAGSetLatencyDiagnosticConfiguration"
)
SAMPRateMediaEntity = _Class("SAMPRateMediaEntity")
SACardShowNextCard = _Class("SACardShowNextCard")
SANotificationsSearch = _Class("SANotificationsSearch")
SAGetSessionCertificateResponse = _Class("SAGetSessionCertificateResponse")
SAMPCreateRadioStation = _Class("SAMPCreateRadioStation")
SAGetRequestOrigin = _Class("SAGetRequestOrigin")
SAGetAssistantData = _Class("SAGetAssistantData")
SAGetDynamiteClientState = _Class("SAGetDynamiteClientState")
SASetActivationToken = _Class("SASetActivationToken")
SANPAddOutputDevicesToGroup = _Class("SANPAddOutputDevicesToGroup")
SAHLGetActiveWorkoutApplicationIdentifier = _Class(
"SAHLGetActiveWorkoutApplicationIdentifier"
)
SASAbortSpeechRequest = _Class("SASAbortSpeechRequest")
SADomainObjectUpdate = _Class("SADomainObjectUpdate")
SADomainObjectRetrieve = _Class("SADomainObjectRetrieve")
SADomainObjectDelete = _Class("SADomainObjectDelete")
SAUIAddContentToView = _Class("SAUIAddContentToView")
SAVCSAddResultsToContentShelf = _Class("SAVCSAddResultsToContentShelf")
SADomainObjectCommit = _Class("SADomainObjectCommit")
SADomainObjectCancel = _Class("SADomainObjectCancel")
SAVCSPlayContent = _Class("SAVCSPlayContent")
SASettingGetRecordAudioRoute = _Class("SASettingGetRecordAudioRoute")
SAUISetURLData = _Class("SAUISetURLData")
SACreateSessionInfoResponse = _Class("SACreateSessionInfoResponse")
SATTSSetSpeechSynthesisVolume = _Class("SATTSSetSpeechSynthesisVolume")
SACommandSucceeded = _Class("SACommandSucceeded")
SAUIRevealRecognizedSpeech = _Class("SAUIRevealRecognizedSpeech")
SACommandIgnored = _Class("SACommandIgnored")
SALocalSearchGetGeoClientState = _Class("SALocalSearchGetGeoClientState")
SAMPAddMediaItemsToUpNextQueue = _Class("SAMPAddMediaItemsToUpNextQueue")
SAGetSiriPresentationClientState = _Class("SAGetSiriPresentationClientState")
SACommandFailed = _Class("SACommandFailed")
SACancelSucceeded = _Class("SACancelSucceeded")
SAGetMorphunDataState = _Class("SAGetMorphunDataState")
SAStructuredDictationResult = _Class("SAStructuredDictationResult")
SAGetMultipleClientStates = _Class("SAGetMultipleClientStates")
SATimerShowAndPerformTimerAction = _Class("SATimerShowAndPerformTimerAction")
SAWLRemoveContentFromWatchList = _Class("SAWLRemoveContentFromWatchList")
SAAssistantNotReady = _Class("SAAssistantNotReady")
SAAssistantNotFound = _Class("SAAssistantNotFound")
SAAssistantLoaded = _Class("SAAssistantLoaded")
SAGLShowGlance = _Class("SAGLShowGlance")
SAAssistantDestroyed = _Class("SAAssistantDestroyed")
SAAssistantCreated = _Class("SAAssistantCreated")
SASVoiceSearchPartialResult = _Class("SASVoiceSearchPartialResult")
SARadarFilingDidBegin = _Class("SARadarFilingDidBegin")
SAAcknowledgeAlert = _Class("SAAcknowledgeAlert")
SANPVideoTurnOffSubtitles = _Class("SANPVideoTurnOffSubtitles")
SADialogInflectWordResponses = _Class("SADialogInflectWordResponses")
SASExtractSpeechData = _Class("SASExtractSpeechData")
SANPVideoSetAudioTrack = _Class("SANPVideoSetAudioTrack")
SARDNlOutcomeCandidate = _Class("SARDNlOutcomeCandidate")
SARadarSetDetails = _Class("SARadarSetDetails")
SAUILaunchTVRemote = _Class("SAUILaunchTVRemote")
SAQuickStop = _Class("SAQuickStop")
SAKnowledgeSetLinkedData = _Class("SAKnowledgeSetLinkedData")
SAExecuteOnRemoteRequest = _Class("SAExecuteOnRemoteRequest")
SAStartLocalRequest = _Class("SAStartLocalRequest")
SAUIShowFullScreenEffect = _Class("SAUIShowFullScreenEffect")
SAUIListenForPronunciation = _Class("SAUIListenForPronunciation")
SANPSetVolumeLevel = _Class("SANPSetVolumeLevel")
SAReminderSnooze = _Class("SAReminderSnooze")
SAUIPlayNotificationSound = _Class("SAUIPlayNotificationSound")
SATTSGetSpeechSynthesisVolume = _Class("SATTSGetSpeechSynthesisVolume")
SADeviceControlLockDevice = _Class("SADeviceControlLockDevice")
SAGuidanceGuideUpdate = _Class("SAGuidanceGuideUpdate")
SASmsCancelCurrentAudioPlayBack = _Class("SASmsCancelCurrentAudioPlayBack")
SAKnowledgeDeleteValueForKey = _Class("SAKnowledgeDeleteValueForKey")
SAMPAddMediaEntityToAcousticIdHistory = _Class("SAMPAddMediaEntityToAcousticIdHistory")
SAMPAddMediaEntityToWishList = _Class("SAMPAddMediaEntityToWishList")
SASSuspendClientEndpointer = _Class("SASSuspendClientEndpointer")
SATTSFetchSpeechSynthesisVoiceResponse = _Class(
"SATTSFetchSpeechSynthesisVoiceResponse"
)
SAUIGetResponseAlternativesPlayback = _Class("SAUIGetResponseAlternativesPlayback")
SAMPLikesMediaEntity = _Class("SAMPLikesMediaEntity")
SAIntentGroupConfirmIntent = _Class("SAIntentGroupConfirmIntent")
SAMPDislikesMediaEntity = _Class("SAMPDislikesMediaEntity")
SASmsPlayAudio = _Class("SASmsPlayAudio")
SAIntentGroupGetAppInfo = _Class("SAIntentGroupGetAppInfo")
SAAppsGetRestrictedApps = _Class("SAAppsGetRestrictedApps")
SAAIRetrySearch = _Class("SAAIRetrySearch")
SAUIShowControlCenter = _Class("SAUIShowControlCenter")
SAUICancelCurrentTTS = _Class("SAUICancelCurrentTTS")
SAClientSetupInfo = _Class("SAClientSetupInfo")
SASyncFinished = _Class("SASyncFinished")
SAAppsCheckRestriction = _Class("SAAppsCheckRestriction")
SACFTriggerMorphunAssetsManagement = _Class("SACFTriggerMorphunAssetsManagement")
SAUIAppPunchOut = _Class("SAUIAppPunchOut")
SARemembersGetAppMatchesForIntent = _Class("SARemembersGetAppMatchesForIntent")
SAUIUpdateViews = _Class("SAUIUpdateViews")
SAUIRequestUpdateViews = _Class("SAUIRequestUpdateViews")
SATTSGetUnspeakableRangeOfTextCompleted = _Class(
"SATTSGetUnspeakableRangeOfTextCompleted"
)
SASyncServerVerifyResponse = _Class("SASyncServerVerifyResponse")
SAIntentGroupHandleIntent = _Class("SAIntentGroupHandleIntent")
SASClientBoundConfusionNetwork = _Class("SASClientBoundConfusionNetwork")
SAUIAddDialogs = _Class("SAUIAddDialogs")
SASVoiceIdentificationSignal = _Class("SASVoiceIdentificationSignal")
SATTSSpeechSynthesisFailure = _Class("SATTSSpeechSynthesisFailure")
SAPreSynthesizeTTS = _Class("SAPreSynthesizeTTS")
SALocalSearchShowPlaceDetails = _Class("SALocalSearchShowPlaceDetails")
SATTSSpeechSynthesisCompleted = _Class("SATTSSpeechSynthesisCompleted")
SAMPAddMediaItemsToPlaylist = _Class("SAMPAddMediaItemsToPlaylist")
SASettingGetPlaybackAudioRoute = _Class("SASettingGetPlaybackAudioRoute")
SACalendarShowNextEvent = _Class("SACalendarShowNextEvent")
SASMultilingualSpeechRecognized = _Class("SASMultilingualSpeechRecognized")
SASSpeechServerEndpointIdentified = _Class("SASSpeechServerEndpointIdentified")
SAIntentGroupResolveIntentSlot = _Class("SAIntentGroupResolveIntentSlot")
SASResultCandidate = _Class("SASResultCandidate")
SARDAsrOutcomeCandidate = _Class("SARDAsrOutcomeCandidate")
SAUIInitiateStartSpeechRequest = _Class("SAUIInitiateStartSpeechRequest")
SASetHandoffPayload = _Class("SASetHandoffPayload")
SASPronunciationRecognized = _Class("SASPronunciationRecognized")
SAAISearchCompleted = _Class("SAAISearchCompleted")
SADeviceMyriadConfiguration = _Class("SADeviceMyriadConfiguration")
SAIntentGroupIntentSearchResponse = _Class("SAIntentGroupIntentSearchResponse")
SAIntentGroupGetIntentDefinitions = _Class("SAIntentGroupGetIntentDefinitions")
SARemoveFromActivityStream = _Class("SARemoveFromActivityStream")
SAAppsLaunchApp = _Class("SAAppsLaunchApp")
SASettingShowAndPerformSettingsAction = _Class("SASettingShowAndPerformSettingsAction")
SADPDeviceSearch = _Class("SADPDeviceSearch")
SAUIUnlockDevice = _Class("SAUIUnlockDevice")
SASyncChunkAccepted = _Class("SASyncChunkAccepted")
SATTSGetUnspeakableRangeOfText = _Class("SATTSGetUnspeakableRangeOfText")
SAIntentGroupAppAuthorizationStatusRequest = _Class(
"SAIntentGroupAppAuthorizationStatusRequest"
)
SAAppsAppSearch = _Class("SAAppsAppSearch")
SAUIShowHelp = _Class("SAUIShowHelp")
SAUIRepeatIt = _Class("SAUIRepeatIt")
SANPGetVolumeLevel = _Class("SANPGetVolumeLevel")
SAUISayIt = _Class("SAUISayIt")
SAWLSearchPlayableContentFromWatchList = _Class(
"SAWLSearchPlayableContentFromWatchList"
)
SAVCSVendResultsToSearch = _Class("SAVCSVendResultsToSearch")
SAEHandleUnderstandingOutput = _Class("SAEHandleUnderstandingOutput")
SAIntentGroupIntentSignal = _Class("SAIntentGroupIntentSignal")
SAAIRequestSearch = _Class("SAAIRequestSearch")
SARecordLocationActivity = _Class("SARecordLocationActivity")
SARecordActivity = _Class("SARecordActivity")
SAPhoneHangUp = _Class("SAPhoneHangUp")
SANPGetNowPlayingQueueDetailsRemote = _Class("SANPGetNowPlayingQueueDetailsRemote")
SATTSEstimateTTSRequestDuration = _Class("SATTSEstimateTTSRequestDuration")
SAIntentGroupIntentInvocationResponse = _Class("SAIntentGroupIntentInvocationResponse")
SAIntentGroupSupportedIntentResponse = _Class("SAIntentGroupSupportedIntentResponse")
SAIntentGroupUnsupportedIntentResponse = _Class(
"SAIntentGroupUnsupportedIntentResponse"
)
SACFFetchScriptResponse = _Class("SACFFetchScriptResponse")
SAKnowledgeGetValuesForKeys = _Class("SAKnowledgeGetValuesForKeys")
SANPMoveOutputGroupToDevices = _Class("SANPMoveOutputGroupToDevices")
SAAceDomainSignal = _Class("SAAceDomainSignal")
SAHandoffAvailabilityCheck = _Class("SAHandoffAvailabilityCheck")
SAIntentGroupSiriKitClearContext = _Class("SAIntentGroupSiriKitClearContext")
SASSpeechRecognitionComplete = _Class("SASSpeechRecognitionComplete")
SADeviceConfiguration = _Class("SADeviceConfiguration")
SAGetWatchAppIntentPolicyAndVocab = _Class("SAGetWatchAppIntentPolicyAndVocab")
SAMicroblogGetSocialCredential = _Class("SAMicroblogGetSocialCredential")
SAMicroblogGetTwitterCredential = _Class("SAMicroblogGetTwitterCredential")
SAIntentGroupAppEntitlementRequest = _Class("SAIntentGroupAppEntitlementRequest")
SAKnowledgeKeyValueEntry = _Class("SAKnowledgeKeyValueEntry")
SAKnowledgeKeyValueBlobEntry = _Class("SAKnowledgeKeyValueBlobEntry")
SAKnowledgeKeyValueStringEntry = _Class("SAKnowledgeKeyValueStringEntry")
SAKnowledgeKeyValueNumericEntry = _Class("SAKnowledgeKeyValueNumericEntry")
SAUIChangePrimaryUtterance = _Class("SAUIChangePrimaryUtterance")
SANPGetNowPlayingQueueDetails = _Class("SANPGetNowPlayingQueueDetails")
SASyncSetDeepSyncVerificationNeeded = _Class("SASyncSetDeepSyncVerificationNeeded")
SASyncGetCachedSyncAnchors = _Class("SASyncGetCachedSyncAnchors")
SAGetAppIntentPolicyAndVocab = _Class("SAGetAppIntentPolicyAndVocab")
SASVoiceSearchFinalResult = _Class("SASVoiceSearchFinalResult")
SASyncChunkDenied = _Class("SASyncChunkDenied")
SALCMShowModalView = _Class("SALCMShowModalView")
SADeviceControlStartScreenRecording = _Class("SADeviceControlStartScreenRecording")
SAAddResultObjects = _Class("SAAddResultObjects")
SASSetMultilingualDictationConfig = _Class("SASSetMultilingualDictationConfig")
SAGetClientState = _Class("SAGetClientState")
SAKnowledgeSetValuesForKeys = _Class("SAKnowledgeSetValuesForKeys")
SASetDeviceTTSMuteState = _Class("SASetDeviceTTSMuteState")
SAUIShowSpeechAlternatives = _Class("SAUIShowSpeechAlternatives")
SAUILaunchAppWithParameters = _Class("SAUILaunchAppWithParameters")
SAIntentGroupAppVocabularySearchRequest = _Class(
"SAIntentGroupAppVocabularySearchRequest"
)
SAStartUIRequest = _Class("SAStartUIRequest")
SAPhoneClientCoordinationPhoneCall = _Class("SAPhoneClientCoordinationPhoneCall")
SALCMShowChannel = _Class("SALCMShowChannel")
SALanguageSettings = _Class("SALanguageSettings")
SACFRemoveScriptCommand = _Class("SACFRemoveScriptCommand")
SALogSignatureWithABC = _Class("SALogSignatureWithABC")
SATTSSpeechSynthesisPartialResponse = _Class("SATTSSpeechSynthesisPartialResponse")
SAScreenActionActivateUIElement = _Class("SAScreenActionActivateUIElement")
SAGetCurrentDisplayRouteClientState = _Class("SAGetCurrentDisplayRouteClientState")
SAIntentGroupRunSiriKitExecutor = _Class("SAIntentGroupRunSiriKitExecutor")
SAMPAddMediaItemsToLibrary = _Class("SAMPAddMediaItemsToLibrary")
SAKnowledgeGetLinkedData = _Class("SAKnowledgeGetLinkedData")
SAUICloseAssistant = _Class("SAUICloseAssistant")
SADonateSiriQueryToPortrait = _Class("SADonateSiriQueryToPortrait")
SAIntentGroupResolveAppForIntent = _Class("SAIntentGroupResolveAppForIntent")
SASyncGetAnchorsResponse = _Class("SASyncGetAnchorsResponse")
SASyncGetAnchors = _Class("SASyncGetAnchors")
SASyncChunk = _Class("SASyncChunk")
SANPVideoTurnOnSubtitles = _Class("SANPVideoTurnOnSubtitles")
SATTSSpeechSynthesisStreaming = _Class("SATTSSpeechSynthesisStreaming")
SATTSSpeechSynthesisStreamingEnd = _Class("SATTSSpeechSynthesisStreamingEnd")
SATTSSpeechSynthesisStreamingBegin = _Class("SATTSSpeechSynthesisStreamingBegin")
SATTSSpeechSynthesisStreamingChunk = _Class("SATTSSpeechSynthesisStreamingChunk")
SAPhoneAnswer = _Class("SAPhoneAnswer")
SARemembersSaveInteraction = _Class("SARemembersSaveInteraction")
SAGetWristDetectionStatusClientState = _Class("SAGetWristDetectionStatusClientState")
SAScreenActionPressBackButton = _Class("SAScreenActionPressBackButton")
SAUIAddViews = _Class("SAUIAddViews")
SACFClientFlowUpdateScriptsCommand = _Class("SACFClientFlowUpdateScriptsCommand")
SAGetClientFlowState = _Class("SAGetClientFlowState")
SASSpeechRecognized = _Class("SASSpeechRecognized")
SASSpeechFailure = _Class("SASSpeechFailure")
SANPVideoGetAudioTracksAndSubtitleOptions = _Class(
"SANPVideoGetAudioTracksAndSubtitleOptions"
)
SAUILParseExpressions = _Class("SAUILParseExpressions")
SACheckAuthenticationRequirement = _Class("SACheckAuthenticationRequirement")
SAExecuteCallbacks = _Class("SAExecuteCallbacks")
SAUIOpenLink = _Class("SAUIOpenLink")
SAIntentGroupLaunchAppWithUserActivity = _Class(
"SAIntentGroupLaunchAppWithUserActivity"
)
SAUIClearScreen = _Class("SAUIClearScreen")
SAInitiateHandoffOnCompanion = _Class("SAInitiateHandoffOnCompanion")
SALocalSearchStartNavigation = _Class("SALocalSearchStartNavigation")
SAFetchActivityStream = _Class("SAFetchActivityStream")
SAFetchUserActivity = _Class("SAFetchUserActivity")
SATTSEstimateTTSRequestDurationCompleted = _Class(
"SATTSEstimateTTSRequestDurationCompleted"
)
SADomainCommand = _Class("SADomainCommand")
SAAXSkipBack = _Class("SAAXSkipBack")
SAMacOpenAboutThisMac = _Class("SAMacOpenAboutThisMac")
SALocalSearchNavigationPromptRerouteSuggestion = _Class(
"SALocalSearchNavigationPromptRerouteSuggestion"
)
SAHLSetWorkoutState = _Class("SAHLSetWorkoutState")
SALocalSearchMapZoomOut = _Class("SALocalSearchMapZoomOut")
SAAnswerSearch = _Class("SAAnswerSearch")
SAiCloudGetiCloudInformation = _Class("SAiCloudGetiCloudInformation")
SAMPPlayPodcasts = _Class("SAMPPlayPodcasts")
SAPhonePlayVoiceMail = _Class("SAPhonePlayVoiceMail")
SAABMultiPersonSearch = _Class("SAABMultiPersonSearch")
SAUIDelayedActionCommand = _Class("SAUIDelayedActionCommand")
SALocalSearchShareLocationWithExternalAccessory = _Class(
"SALocalSearchShareLocationWithExternalAccessory"
)
SANPSkipToNextItem = _Class("SANPSkipToNextItem")
SALocalSearchShowNavigationOverview = _Class("SALocalSearchShowNavigationOverview")
SAMPLoadQP = _Class("SAMPLoadQP")
SAEmailSend = _Class("SAEmailSend")
SADomainSearchResults = _Class("SADomainSearchResults")
SAAnswerSearchResults = _Class("SAAnswerSearchResults")
SANPStopPlayback = _Class("SANPStopPlayback")
SANPDecreasePlaybackSpeed = _Class("SANPDecreasePlaybackSpeed")
SADomainObjectCreate = _Class("SADomainObjectCreate")
SAMPSteerMusic = _Class("SAMPSteerMusic")
SAAXSkipAhead = _Class("SAAXSkipAhead")
SAAXSpeakFaster = _Class("SAAXSpeakFaster")
SATimerTimerShow = _Class("SATimerTimerShow")
SASmsDraftShow = _Class("SASmsDraftShow")
SALocalSearchSetNavigationVoiceVolume = _Class("SALocalSearchSetNavigationVoiceVolume")
SAMacStartScreenSaver = _Class("SAMacStartScreenSaver")
SAAlarmDismiss = _Class("SAAlarmDismiss")
SAAXRestartSpeaking = _Class("SAAXRestartSpeaking")
SASmsGroupSearch = _Class("SASmsGroupSearch")
SALocalSearchAddItemToMapCache = _Class("SALocalSearchAddItemToMapCache")
SALocalSearchSetNavigationVoiceMuted = _Class("SALocalSearchSetNavigationVoiceMuted")
SAFmfPunchOutFriend = _Class("SAFmfPunchOutFriend")
SANPSetPlaybackSpeed = _Class("SANPSetPlaybackSpeed")
SANPPausePlayback = _Class("SANPPausePlayback")
SANPSetRepeatMode = _Class("SANPSetRepeatMode")
SALocalSearchShowNavigationDetail = _Class("SALocalSearchShowNavigationDetail")
SALocalSearchSendToProtobufConduit = _Class("SALocalSearchSendToProtobufConduit")
SAMPPlayPodcastCollection = _Class("SAMPPlayPodcastCollection")
SALocalSearchNavigationPromptManeuver = _Class("SALocalSearchNavigationPromptManeuver")
SAMPBasicPodcastAppSearch = _Class("SAMPBasicPodcastAppSearch")
SAHACommand = _Class("SAHACommand")
SALocalSearchGetNavigationStatus = _Class("SALocalSearchGetNavigationStatus")
SALocalSearchDropAPinAtCurrentLocation = _Class(
"SALocalSearchDropAPinAtCurrentLocation"
)
SAMPPlayPodcastStation = _Class("SAMPPlayPodcastStation")
SACFSignal = _Class("SACFSignal")
SAAXSpeakSlower = _Class("SAAXSpeakSlower")
SAMPMusicPlaybackImminent = _Class("SAMPMusicPlaybackImminent")
SALocalSearchNavigationEnd = _Class("SALocalSearchNavigationEnd")
SAMPPlayPodcastEpisode = _Class("SAMPPlayPodcastEpisode")
SAFmfGeoFenceSetCompleted = _Class("SAFmfGeoFenceSetCompleted")
SAFmfGeoFenceSet = _Class("SAFmfGeoFenceSet")
SAMPSubscribeToPodcastCollection = _Class("SAMPSubscribeToPodcastCollection")
SAAXPauseSpeaking = _Class("SAAXPauseSpeaking")
SAEmailSearch = _Class("SAEmailSearch")
SAReminderListSearch = _Class("SAReminderListSearch")
SACalendarDefaultSourceGet = _Class("SACalendarDefaultSourceGet")
SASmsRecipientSearch = _Class("SASmsRecipientSearch")
SAStockDelete = _Class("SAStockDelete")
SALocalSearchShowMapPoints = _Class("SALocalSearchShowMapPoints")
SAMPLoadPredefinedQueue = _Class("SAMPLoadPredefinedQueue")
SALocalSearchVehicleEventCreate = _Class("SALocalSearchVehicleEventCreate")
SANPSetShuffleMode = _Class("SANPSetShuffleMode")
SAWeatherLocationSearch = _Class("SAWeatherLocationSearch")
SAWeatherLocationDelete = _Class("SAWeatherLocationDelete")
SAWeatherLocationAdd = _Class("SAWeatherLocationAdd")
SAHLStartWorkout = _Class("SAHLStartWorkout")
SAAXStartSpeaking = _Class("SAAXStartSpeaking")
SAHLShowActivity = _Class("SAHLShowActivity")
SANPIncreasePlaybackSpeed = _Class("SANPIncreasePlaybackSpeed")
SALocalSearchShowLocalSearchResult = _Class("SALocalSearchShowLocalSearchResult")
SAWebSiteSearch = _Class("SAWebSiteSearch")
SAWebSearch = _Class("SAWebSearch")
SAFmfVisibilityStateSet = _Class("SAFmfVisibilityStateSet")
SAFmfVisibilitySetCompleted = _Class("SAFmfVisibilitySetCompleted")
SAFmfSearch = _Class("SAFmfSearch")
SAUIDelayedActionCancelCommand = _Class("SAUIDelayedActionCancelCommand")
SALocalSearchVehicleEventSearch = _Class("SALocalSearchVehicleEventSearch")
SAReminderSearch = _Class("SAReminderSearch")
SAStockSearch = _Class("SAStockSearch")
SAStockAdd = _Class("SAStockAdd")
SANPSeekToPlaybackPosition = _Class("SANPSeekToPlaybackPosition")
SALocalSearchAnswerRerouteSuggestion = _Class("SALocalSearchAnswerRerouteSuggestion")
SATimerSet = _Class("SATimerSet")
SATimerResume = _Class("SATimerResume")
SATimerPause = _Class("SATimerPause")
SATimerGet = _Class("SATimerGet")
SATimerCancel = _Class("SATimerCancel")
SAABPersonSearch = _Class("SAABPersonSearch")
SASExtractSpeechDataCompleted = _Class("SASExtractSpeechDataCompleted")
SAClockSearch = _Class("SAClockSearch")
SAClockDelete = | |
list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres, offset=0)
csgrid_list = [None] * 6
for i in range(6):
lat = csgrid['lat'][i].flatten()
lon = csgrid['lon'][i].flatten()
lon, lat = scs_transform(
lon, lat, stretch_factor, target_lon, target_lat)
lat = lat.reshape((csres, csres))
lon = lon.reshape((csres, csres))
lat_b = csgrid['lat_b'][i].flatten()
lon_b = csgrid['lon_b'][i].flatten()
lon_b, lat_b = scs_transform(
lon_b, lat_b, stretch_factor, target_lon, target_lat)
lat_b = lat_b.reshape((csres + 1, csres + 1))
lon_b = lon_b.reshape((csres + 1, csres + 1))
csgrid_list[i] = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
for i in range(6):
csgrid['lat'][i] = csgrid_list[i]['lat']
csgrid['lon'][i] = csgrid_list[i]['lon']
csgrid['lat_b'][i] = csgrid_list[i]['lat_b']
csgrid['lon_b'][i] = csgrid_list[i]['lon_b']
return [csgrid, csgrid_list]
def calc_rectilinear_lon_edge(lon_stride, center_at_180):
""" Compute longitude edge vector for a rectilinear grid.
Parameters
----------
lon_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lon_stride would be 5.
center_at_180: bool
Whether or not the grid should have a cell center at 180 degrees (i.e.
on the date line). If true, the first grid cell is centered on the date
line; if false, the first grid edge is on the date line.
Returns
-------
Longitudes of cell edges in degrees East.
Notes
-----
All values are forced to be between [-180,180]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lon_edge
>>> calc_rectilinear_lon_edge(5.0,true)
np.array([177.5,-177.5,-172.5,...,177.5])
See Also
--------
[NONE]
"""
n_lon = np.round(360.0 / lon_stride)
lon_edge = np.linspace(-180.0, 180.0, num=n_lon + 1)
if center_at_180:
lon_edge = lon_edge - (lon_stride / 2.0)
lon_edge[lon_edge < -180.0] = lon_edge[lon_edge < -180] + 360.0
lon_edge[lon_edge > 180.0] = lon_edge[lon_edge > 180.0] - 360.0
return lon_edge
def calc_rectilinear_lat_edge(lat_stride, half_polar_grid):
""" Compute latitude edge vector for a rectilinear grid.
Parameters
----------
lat_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lat_stride would be 4.
half_polar_grid: bool
Whether or not the grid should be "half-polar" (i.e. bands at poles are
half the size). In either case the grid will start and end at -/+ 90,
but when half_polar_grid is True, the first and last bands will have a
width of 1/2 the normal lat_stride.
Returns
-------
Latitudes of cell edges in degrees North.
Notes
-----
All values are forced to be between [-90,90]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lat_edge
>>> calc_rectilinear_lat_edge(4.0,true)
np.array([-90,-88,-84,-80,...,84,88,90])
See Also
--------
[NONE]
"""
if half_polar_grid:
start_pt = 90.0 + (lat_stride / 2.0)
else:
start_pt = 90.0
lat_edge = np.linspace(-1.0 * start_pt, start_pt,
num=1 + np.round(2.0 * start_pt / lat_stride))
# Force back onto +/- 90
lat_edge[lat_edge > 90.0] = 90.0
lat_edge[lat_edge < -90.0] = -90.0
return lat_edge
def calc_rectilinear_grid_area(lon_edge, lat_edge):
""" Compute grid cell areas (in m2) for a rectilinear grid.
Parameters
----------
#TODO
Returns
-------
#TODO
Notes
-----
#TODO
Examples
--------
#TODO
See Also
--------
[NONE]
"""
# Convert from km to m
_radius_earth_m = R_EARTH_m
lon_edge = asarray(lon_edge, dtype=float)
lat_edge = asarray(lat_edge, dtype=float)
n_lon = (lon_edge.size) - 1
n_lat = (lat_edge.size) - 1
grid_area = np.zeros((n_lat, n_lon))
sfc_area_const = 2.0 * np.pi * _radius_earth_m * _radius_earth_m
# Longitudes loop, so need to be careful
lon_delta = calc_delta_lon(lon_edge)
# Convert into weights relative to the total circle
lon_delta = lon_delta / 360.0
# Precalculate this
sin_lat_edge = np.sin(np.deg2rad(lat_edge))
for i_lat in range(0, n_lat):
sin_diff = sin_lat_edge[i_lat + 1] - sin_lat_edge[i_lat]
grid_area[i_lat, :] = sin_diff * sfc_area_const * lon_delta
return grid_area
def calc_delta_lon(lon_edge):
""" Compute grid cell longitude widths from an edge vector.
Parameters
----------
lon_edge: float
Vector of longitude edges, in degrees East.
Returns
-------
Width of each cell, degrees East
Notes
-----
Accounts for looping over the domain.
Examples
--------
#TODO
"""
n_lon = (lon_edge.size) - 1
lon_edge = asarray(lon_edge)
# Set up output array
lon_delta = np.zeros((n_lon))
offset = 0.0
next_lon = lon_edge[0]
for i_lon in range(0, n_lon):
last_lon = next_lon
next_lon = lon_edge[i_lon + 1] + offset
while next_lon < last_lon:
offset = offset + 360.0
next_lon = next_lon + 360.0
lon_delta[i_lon] = next_lon - last_lon
return lon_delta
def csgrid_GMAO(res, offset=-10):
"""
Return cubedsphere coordinates with GMAO face orientation
Parameters
----------
res: cubed-sphere Resolution
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
CS = CSGrid(res, offset=offset)
lon = CS.lon_center.transpose(2, 0, 1)
lon_b = CS.lon_edge.transpose(2, 0, 1)
lat = CS.lat_center.transpose(2, 0, 1)
lat_b = CS.lat_edge.transpose(2, 0, 1)
lon[lon < 0] += 360
lon_b[lon_b < 0] += 360
for a in [lon, lon_b, lat, lat_b]:
for tile in [0, 1, 3, 4]:
a[tile] = a[tile].T
for tile in [3, 4]:
a[tile] = np.flip(a[tile], 1)
for tile in [3, 4, 2, 5]:
a[tile] = np.flip(a[tile], 0)
a[2], a[5] = a[5].copy(), a[2].copy() # swap north&south pole
return {'lon': lon, 'lat': lat, 'lon_b': lon_b, 'lat_b': lat_b}
_INV_SQRT_3 = 1.0 / np.sqrt(3.0)
_ASIN_INV_SQRT_3 = np.arcsin(_INV_SQRT_3)
class CSGrid(object):
"""Generator for cubed-sphere grid geometries.
CSGrid computes the latitutde and longitudes of cell centers and edges
on a cubed-sphere grid, providing a way to retrieve these geometries
on-the-fly if your model output data does not include them.
Attributes
----------
{lon,lat}_center: np.ndarray
lat/lon coordinates for each cell center along the cubed-sphere mesh
{lon,lat}_edge: np.ndarray
lat/lon coordinates for the midpoint of the edges separating each
element on the cubed-sphere mesh.
xyz_{center,edge}: np.ndarray
As above, except coordinates are projected into a 3D cartesian space
with common origin to the original lat/lon coordinate system, assuming
a unit sphere.
This class was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
def __init__(self, c, offset=None):
"""
Parameters
----------
c: int
Number edges along each cubed-sphere edge.
======= ====================
C Lat/Lon Resolution
------- --------------------
24 4 deg x 5 deg
48,45 2 deg x 2.5 deg
96,90 1 deg x 1.25 deg
192,180 0.5 deg x 0.625 deg
384,360 0.25 deg x 0.3125 deg
720 0.12g deg x 0.15625 deg
offset: float (optional)
Degrees to offset the first faces' edge in the latitudinal
direction. If not passed, then the western edge of the first face
will align with the prime meridian.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
self.c = c
self.delta_y = 2. * _ASIN_INV_SQRT_3 / c
self.nx = self.ny = c + 1
self.offset = offset
self._initialize()
def _initialize(self):
c = self.c
nx, ny = self.nx, self.ny
lambda_rad = np.zeros((nx, ny))
lambda_rad[0, :] = 3. * np.pi / 4. # West edge
lambda_rad[-1, :] = 5. * np.pi / 4. # East edge
theta_rad = np.zeros((nx, ny))
theta_rad[0, :] = -_ASIN_INV_SQRT_3 + \
(self.delta_y * np.arange(c + 1)) # West edge
theta_rad[-1, :] = theta_rad[0, :] # East edge
# Cache the reflection points - our upper-left and lower-right corners
lonMir1, lonMir2 = lambda_rad[0, 0], lambda_rad[-1, -1]
latMir1, latMir2 = theta_rad[0, 0], theta_rad[-1, -1]
xyzMir1 = latlon_to_cartesian(lonMir1, latMir1)
xyzMir2 = latlon_to_cartesian(lonMir2, latMir2)
xyzCross = np.cross(xyzMir1, xyzMir2)
norm = np.sqrt(np.sum(xyzCross**2))
xyzCross /= norm
for i in range(1, c):
lonRef, latRef = lambda_rad[0, i], theta_rad[0, i]
xyzRef = np.asarray(latlon_to_cartesian(lonRef, latRef, ))
xyzDot = np.sum(xyzCross * xyzRef)
xyzImg = xyzRef - (2. * xyzDot * xyzCross)
xsImg, ysImg, zsImg = xyzImg
lonImg, latImg = cartesian_to_latlon(xsImg, ysImg, zsImg)
lambda_rad[i, 0] = lonImg
lambda_rad[i, -1] = lonImg
theta_rad[i, 0] = latImg
theta_rad[i, -1] = -latImg
pp = np.zeros([3, c + 1, c + 1])
# Set the four corners
# print("CORNERS")
for i, j in product([0, -1], [0, -1]):
# print(i, j)
pp[:, i, j] = latlon_to_cartesian(
lambda_rad[i, j], theta_rad[i, j])
# Map the edges on the sphere back to the cube.
#Note that all intersections are at x = -rsq3
# print("EDGES")
for ij in range(1, c + 1):
# print(ij)
pp[:, 0, ij] = latlon_to_cartesian(
lambda_rad[0, ij], theta_rad[0, ij])
pp[1, 0, ij] | |
<filename>main.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'stardew_toolbox.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
"""
MIT License
Copyright (c) 2021 MSProgrammerPy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from toolbox import SAVES_PATH, ToolBox
import os
REPOSITORY = "https://github.com/MSProgrammerPy/stardew_toolbox"
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
# <------------------- MainWindow ------------------->
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 650)
MainWindow.setFixedSize(800, 650)
MainWindow.setWindowTitle("Stardew ToolBox")
MainWindow.setAccessibleDescription("")
MainWindow.setWindowIcon(QtGui.QIcon("img/icon.png"))
# <------------------- centralwidget ------------------->
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setAccessibleDescription("")
self.centralwidget.setObjectName("centralwidget")
# <------------------- tabWidget ------------------->
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 800, 645))
self.tabWidget.setAccessibleDescription("")
self.tabWidget.setAutoFillBackground(False)
self.tabWidget.setObjectName("tabWidget")
# <------------------- ToolBox Tab ------------------->
self.tab_toolbox = QtWidgets.QWidget()
self.tab_toolbox.setAccessibleDescription("")
self.tab_toolbox.setObjectName("tab_toolbox")
# <------------------- Choose Save Frame ------------------->
self.frm_choose_save = QtWidgets.QFrame(self.tab_toolbox)
self.frm_choose_save.setGeometry(QtCore.QRect(0, 0, 800, 150))
self.frm_choose_save.setAccessibleDescription("")
self.frm_choose_save.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_choose_save.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_choose_save.setObjectName("frm_choose_save")
# <------------------- Choose Save Label ------------------->
self.lbl_choose_save = QtWidgets.QLabel(self.frm_choose_save)
self.lbl_choose_save.setGeometry(QtCore.QRect(30, 60, 150, 30))
font = QtGui.QFont()
font.setPointSize(10)
self.lbl_choose_save.setFont(font)
self.lbl_choose_save.setAccessibleDescription("")
self.lbl_choose_save.setText("Choose your save:")
self.lbl_choose_save.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.lbl_choose_save.setObjectName("lbl_choose_save")
# <------------------- Saves ComboBox ------------------->
self.cb_saves = QtWidgets.QComboBox(self.frm_choose_save)
self.cb_saves.setGeometry(QtCore.QRect(190, 64, 150, 24))
font = QtGui.QFont()
font.setPointSize(9)
self.cb_saves.setFont(font)
self.cb_saves.setAccessibleDescription("")
self.cb_saves.setCurrentText("")
self.cb_saves.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self.cb_saves.setPlaceholderText("")
self.cb_saves.setObjectName("cb_saves")
for _save in os.listdir(SAVES_PATH):
if os.path.isdir(os.path.join(SAVES_PATH, _save)):
self.cb_saves.addItem(_save)
self.cb_saves.currentIndexChanged.connect(self.update_line_edits_text)
# <------------------- Profile GroupBox ------------------->
self.gb_profile = QtWidgets.QGroupBox(self.tab_toolbox)
self.gb_profile.setGeometry(QtCore.QRect(0, 152, 800, 221))
font = QtGui.QFont()
font.setPointSize(10)
self.gb_profile.setFont(font)
self.gb_profile.setAccessibleDescription("")
self.gb_profile.setTitle("Profile")
self.gb_profile.setObjectName("gb_profile")
# <------------------- Change Name Frame ------------------->
self.frm_ch_name = QtWidgets.QFrame(self.gb_profile)
self.frm_ch_name.setGeometry(QtCore.QRect(20, 30, 240, 180))
self.frm_ch_name.setAccessibleDescription("")
self.frm_ch_name.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_ch_name.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_ch_name.setObjectName("frm_ch_name")
# <------------------- Change Name Label ------------------->
self.lbl_ch_name = QtWidgets.QLabel(self.frm_ch_name)
self.lbl_ch_name.setGeometry(QtCore.QRect(70, 30, 101, 30))
self.lbl_ch_name.setAccessibleDescription("")
self.lbl_ch_name.setText("Name")
self.lbl_ch_name.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_ch_name.setObjectName("lbl_ch_name")
# <------------------- Change Name LineEdit ------------------->
self.input_ch_name = QtWidgets.QLineEdit(self.frm_ch_name)
self.input_ch_name.setGeometry(QtCore.QRect(50, 80, 140, 27))
self.input_ch_name.setAccessibleDescription("")
self.input_ch_name.setText("")
self.input_ch_name.setObjectName("input_ch_name")
# <------------------- Change Name Button ------------------->
self.btn_ch_name = QtWidgets.QPushButton(self.frm_ch_name)
self.btn_ch_name.setGeometry(QtCore.QRect(50, 125, 140, 30))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_ch_name.setFont(font)
self.btn_ch_name.setAccessibleDescription("")
self.btn_ch_name.setText("Change")
self.btn_ch_name.setObjectName("btn_ch_name")
self.btn_ch_name.clicked.connect(self.change_name)
# <------------------- Change Farm Name Frame ------------------->
self.frm_ch_fname = QtWidgets.QFrame(self.gb_profile)
self.frm_ch_fname.setGeometry(QtCore.QRect(280, 30, 240, 180))
self.frm_ch_fname.setAccessibleDescription("")
self.frm_ch_fname.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_ch_fname.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_ch_fname.setObjectName("frm_ch_fname")
# <------------------- Change Farm Name Label ------------------->
self.lbl_ch_fname = QtWidgets.QLabel(self.frm_ch_fname)
self.lbl_ch_fname.setGeometry(QtCore.QRect(60, 30, 121, 30))
self.lbl_ch_fname.setAccessibleDescription("")
self.lbl_ch_fname.setText("Farm Name")
self.lbl_ch_fname.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_ch_fname.setObjectName("lbl_ch_fname")
# <------------------- Change Farm Name LineEdit ------------------->
self.input_ch_fname = QtWidgets.QLineEdit(self.frm_ch_fname)
self.input_ch_fname.setGeometry(QtCore.QRect(50, 80, 140, 27))
self.input_ch_fname.setAccessibleDescription("")
self.input_ch_fname.setText("")
self.input_ch_fname.setObjectName("input_ch_fname")
# <------------------- Change Farm Name Button ------------------->
self.btn_ch_fname = QtWidgets.QPushButton(self.frm_ch_fname)
self.btn_ch_fname.setGeometry(QtCore.QRect(50, 125, 140, 30))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_ch_fname.setFont(font)
self.btn_ch_fname.setAccessibleDescription("")
self.btn_ch_fname.setText("Change")
self.btn_ch_fname.setObjectName("btn_ch_fname")
self.btn_ch_fname.clicked.connect(self.change_farm_name)
# <------------------- Change Favorite Thing Frame ------------------->
self.frm_ch_fthing = QtWidgets.QFrame(self.gb_profile)
self.frm_ch_fthing.setGeometry(QtCore.QRect(540, 30, 240, 180))
self.frm_ch_fthing.setAccessibleDescription("")
self.frm_ch_fthing.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_ch_fthing.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_ch_fthing.setObjectName("frm_ch_fthing")
# <------------------- Change Favorite Thing Label ------------------->
self.lbl_ch_fthing = QtWidgets.QLabel(self.frm_ch_fthing)
self.lbl_ch_fthing.setGeometry(QtCore.QRect(50, 30, 141, 30))
self.lbl_ch_fthing.setAccessibleDescription("")
self.lbl_ch_fthing.setText("Favorite Thing")
self.lbl_ch_fthing.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_ch_fthing.setObjectName("lbl_ch_fthing")
# <------------------- Change Favorite Thing LineEdit ------------------->
self.input_ch_fthing = QtWidgets.QLineEdit(self.frm_ch_fthing)
self.input_ch_fthing.setGeometry(QtCore.QRect(50, 80, 140, 27))
self.input_ch_fthing.setText("")
self.input_ch_fthing.setObjectName("input_ch_fthing")
# <------------------- Change Favorite Thing Button ------------------->
self.btn_ch_fthing = QtWidgets.QPushButton(self.frm_ch_fthing)
self.btn_ch_fthing.setGeometry(QtCore.QRect(50, 125, 140, 30))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_ch_fthing.setFont(font)
self.btn_ch_fthing.setAccessibleDescription("")
self.btn_ch_fthing.setText("Change")
self.btn_ch_fthing.setObjectName("btn_ch_fthing")
self.btn_ch_fthing.clicked.connect(self.change_favorite_thing)
# <------------------- Economy GroupBox ------------------->
self.gb_economy = QtWidgets.QGroupBox(self.tab_toolbox)
self.gb_economy.setGeometry(QtCore.QRect(0, 375, 800, 221))
font = QtGui.QFont()
font.setPointSize(10)
self.gb_economy.setFont(font)
self.gb_economy.setAccessibleDescription("")
self.gb_economy.setTitle("Economy")
self.gb_economy.setObjectName("gb_economy")
# <------------------- Change Money Frame ------------------->
self.frm_ch_money = QtWidgets.QFrame(self.gb_economy)
self.frm_ch_money.setGeometry(QtCore.QRect(20, 30, 240, 180))
self.frm_ch_money.setAccessibleDescription("")
self.frm_ch_money.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_ch_money.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_ch_money.setObjectName("frm_ch_money")
# <------------------- Change Money Label ------------------->
self.lbl_ch_money = QtWidgets.QLabel(self.frm_ch_money)
self.lbl_ch_money.setGeometry(QtCore.QRect(60, 30, 121, 30))
self.lbl_ch_money.setAccessibleDescription("")
self.lbl_ch_money.setText("Money")
self.lbl_ch_money.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_ch_money.setObjectName("lbl_ch_money")
# <------------------- Change Money LineEdit ------------------->
self.input_ch_money = QtWidgets.QLineEdit(self.frm_ch_money)
self.input_ch_money.setGeometry(QtCore.QRect(50, 80, 140, 27))
self.input_ch_money.setAccessibleDescription("")
self.input_ch_money.setText("")
self.input_ch_money.setObjectName("input_ch_money")
# <------------------- Change Money Button ------------------->
self.btn_ch_money = QtWidgets.QPushButton(self.frm_ch_money)
self.btn_ch_money.setGeometry(QtCore.QRect(50, 125, 140, 30))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_ch_money.setFont(font)
self.btn_ch_money.setAccessibleDescription("")
self.btn_ch_money.setText("Change")
self.btn_ch_money.setObjectName("btn_ch_money")
self.btn_ch_money.clicked.connect(self.change_money)
# <------------------- Gold Image ------------------->
self.img_gold = QtWidgets.QLabel(self.frm_ch_money)
self.img_gold.setGeometry(QtCore.QRect(10, 80, 27, 27))
self.img_gold.setAccessibleDescription("")
self.img_gold.setText("")
self.img_gold.setPixmap(QtGui.QPixmap("img/Gold.png"))
self.img_gold.setScaledContents(True)
self.img_gold.setAlignment(QtCore.Qt.AlignCenter)
self.img_gold.setObjectName("img_gold")
# <------------------- Change Qi Coins Frame ------------------->
self.frm_ch_qicoins = QtWidgets.QFrame(self.gb_economy)
self.frm_ch_qicoins.setGeometry(QtCore.QRect(280, 30, 240, 180))
self.frm_ch_qicoins.setAccessibleDescription("")
self.frm_ch_qicoins.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_ch_qicoins.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_ch_qicoins.setObjectName("frm_ch_qicoins")
# <------------------- Change Qi Coins Label ------------------->
self.lbl_ch_qicoins = QtWidgets.QLabel(self.frm_ch_qicoins)
self.lbl_ch_qicoins.setGeometry(QtCore.QRect(60, 30, 121, 30))
self.lbl_ch_qicoins.setAccessibleDescription("")
self.lbl_ch_qicoins.setText("Qi Coins")
self.lbl_ch_qicoins.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_ch_qicoins.setObjectName("lbl_ch_qicoins")
# <------------------- Change Qi Coins LineEdit ------------------->
self.input_ch_qicoins = QtWidgets.QLineEdit(self.frm_ch_qicoins)
self.input_ch_qicoins.setGeometry(QtCore.QRect(50, 80, 140, 27))
self.input_ch_qicoins.setAccessibleDescription("")
self.input_ch_qicoins.setText("")
self.input_ch_qicoins.setObjectName("input_ch_qicoins")
# <------------------- Change Qi Coins Button ------------------->
self.btn_ch_qicoins = QtWidgets.QPushButton(self.frm_ch_qicoins)
self.btn_ch_qicoins.setGeometry(QtCore.QRect(50, 125, 140, 30))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_ch_qicoins.setFont(font)
self.btn_ch_qicoins.setAccessibleDescription("")
self.btn_ch_qicoins.setText("Change")
self.btn_ch_qicoins.setObjectName("btn_ch_qicoins")
self.btn_ch_qicoins.clicked.connect(self.change_qicoins)
# <------------------- Qi Coin Image ------------------->
self.img_qicoin = QtWidgets.QLabel(self.frm_ch_qicoins)
self.img_qicoin.setGeometry(QtCore.QRect(10, 80, 27, 27))
self.img_qicoin.setAccessibleDescription("")
self.img_qicoin.setText("")
self.img_qicoin.setPixmap(QtGui.QPixmap("img/QiCoin.png"))
self.img_qicoin.setScaledContents(True)
self.img_qicoin.setAlignment(QtCore.Qt.AlignCenter)
self.img_qicoin.setObjectName("img_qicoin")
# <------------------- Change Qi Gems Frame ------------------->
self.frm_ch_qigems = QtWidgets.QFrame(self.gb_economy)
self.frm_ch_qigems.setGeometry(QtCore.QRect(540, 30, 240, 180))
self.frm_ch_qigems.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_ch_qigems.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_ch_qigems.setObjectName("frm_ch_qigems")
# <------------------- Change Qi Gems Label ------------------->
self.lbl_ch_qigems = QtWidgets.QLabel(self.frm_ch_qigems)
self.lbl_ch_qigems.setGeometry(QtCore.QRect(60, 30, 121, 30))
self.lbl_ch_qigems.setText("Qi Gems")
self.lbl_ch_qigems.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_ch_qigems.setObjectName("lbl_ch_qigems")
# <------------------- Change Qi Gems LineEdit ------------------->
self.input_ch_qigems = QtWidgets.QLineEdit(self.frm_ch_qigems)
self.input_ch_qigems.setGeometry(QtCore.QRect(50, 80, 140, 27))
self.input_ch_qigems.setObjectName("input_ch_qigems")
# <------------------- Change Qi Gems Button ------------------->
self.btn_ch_qigems = QtWidgets.QPushButton(self.frm_ch_qigems)
self.btn_ch_qigems.setGeometry(QtCore.QRect(50, 125, 140, 30))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_ch_qigems.setFont(font)
self.btn_ch_qigems.setText("Change")
self.btn_ch_qigems.setObjectName("btn_ch_qigems")
self.btn_ch_qigems.clicked.connect(self.change_qigems)
# <------------------- Qi Gem Image ------------------->
self.img_qigem = QtWidgets.QLabel(self.frm_ch_qigems)
self.img_qigem.setGeometry(QtCore.QRect(10, 80, 27, 27))
self.img_qigem.setText("")
self.img_qigem.setPixmap(QtGui.QPixmap("img/QiGem.png"))
self.img_qigem.setScaledContents(True)
self.img_qigem.setAlignment(QtCore.Qt.AlignCenter)
self.img_qigem.setObjectName("img_qigem")
# <------------------- tabWidget ------------------->
self.tabWidget.addTab(self.tab_toolbox, "ToolBox")
# <------------------- About Tab ------------------->
self.tab_about = QtWidgets.QWidget()
self.tab_about.setAccessibleDescription("")
self.tab_about.setObjectName("tab_about")
# <------------------- Note Frame ------------------->
self.frm_note = QtWidgets.QFrame(self.tab_about)
self.frm_note.setGeometry(QtCore.QRect(0, 0, 800, 161))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.frm_note.setPalette(palette)
self.frm_note.setAccessibleDescription("")
self.frm_note.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frm_note.setFrameShadow(QtWidgets.QFrame.Raised)
self.frm_note.setObjectName("frm_note")
# <------------------- Note Label ------------------->
self.lbl_note = QtWidgets.QLabel(self.frm_note)
self.lbl_note.setGeometry(QtCore.QRect(10, 10, 81, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.lbl_note.setFont(font)
self.lbl_note.setAccessibleDescription("")
self.lbl_note.setText("<html><head/><body><p><span style=\" font-weight:600;\">Note:</span></p></body></html>")
self.lbl_note.setObjectName("lbl_note")
# <------------------- Note 1 Label ------------------->
self.lbl_note1 = QtWidgets.QLabel(self.frm_note)
self.lbl_note1.setGeometry(QtCore.QRect(20, 40, 451, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.lbl_note1.setFont(font)
self.lbl_note1.setAccessibleDescription("")
self.lbl_note1.setText(
"<html><head/><body><p><span style=\" font-weight:600;\">Please make backup of your Saves before using!</span></p></body></html>")
self.lbl_note1.setObjectName("lbl_note1")
# <------------------- Note 2 Label ------------------->
self.lbl_note2 = QtWidgets.QLabel(self.frm_note)
self.lbl_note2.setGeometry(QtCore.QRect(20, 70, 411, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.lbl_note2.setFont(font)
self.lbl_note2.setAccessibleDescription("")
self.lbl_note2.setText(
"<html><head/><body><p><span style=\" font-weight:600;\">Close the game before using this software.</span></p></body></html>")
self.lbl_note2.setObjectName("lbl_note2")
# <------------------- Saves Label ------------------->
self.lbl_saves = QtWidgets.QLabel(self.frm_note)
self.lbl_saves.setGeometry(QtCore.QRect(10, 120, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.lbl_saves.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(10)
self.lbl_saves.setFont(font)
self.lbl_saves.setAccessibleDescription("")
self.lbl_saves.setText("Your saves are here:")
self.lbl_saves.setObjectName("lbl_saves")
# <------------------- Saves Path Label ------------------->
self.lbl_saves_path = QtWidgets.QLabel(self.frm_note)
self.lbl_saves_path.setGeometry(QtCore.QRect(180, 120, 571, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.lbl_saves_path.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(10)
self.lbl_saves_path.setFont(font)
self.lbl_saves_path.setAccessibleDescription("")
self.lbl_saves_path.setText(SAVES_PATH)
self.lbl_saves_path.setOpenExternalLinks(False)
self.lbl_saves_path.setObjectName("lbl_saves_path")
# <------------------- Source Code Label ------------------->
self.lbl_source_code = QtWidgets.QLabel(self.tab_about)
self.lbl_source_code.setGeometry(QtCore.QRect(10, 180, 111, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.lbl_source_code.setFont(font)
self.lbl_source_code.setAccessibleDescription("")
self.lbl_source_code.setText("Source Code:")
self.lbl_source_code.setObjectName("lbl_source_code")
# <------------------- Repository Label ------------------->
self.lbl_repo = QtWidgets.QLabel(self.tab_about)
self.lbl_repo.setGeometry(QtCore.QRect(120, 180, 471, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.lbl_repo.setFont(font)
self.lbl_repo.setAccessibleDescription("")
self.lbl_repo.setText(
f"<html><head/><body><p><a href=\"{REPOSITORY}\"><span style=\" text-decoration: none; color: inherit;\">{REPOSITORY}</span></a></p></body></html>")
self.lbl_repo.setOpenExternalLinks(True)
self.lbl_repo.setObjectName("lbl_repo")
# <------------------- License GroupBox ------------------->
self.gb_license = QtWidgets.QGroupBox(self.tab_about)
self.gb_license.setGeometry(QtCore.QRect(0, 230, 800, 361))
font = QtGui.QFont()
font.setPointSize(10)
self.gb_license.setFont(font)
self.gb_license.setTitle("License")
self.gb_license.setObjectName("gb_license")
# <------------------- License textBrowser ------------------->
self.textBrowser_license = QtWidgets.QTextBrowser(self.gb_license)
self.textBrowser_license.setGeometry(QtCore.QRect(20, 30, 761, 321))
self.textBrowser_license.setHtml(
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
| |
# -*- coding: utf-8 -*-
"""
Released on April 29, 2019
@author: <NAME> <<EMAIL>>; <NAME> <<EMAIL>>
The code is part of BMSS software.
Copyright (c) 2019, National University of Singapore.
"""
#############################################################################
# The LogicGates System class
#############################################################################
### Scripts imported ###
import Read_Data as ReadData
import ComputeAIC_ as ComputeAIC
import Txtfilename as Txtfilename
### Import Class from module
from LogicGatesLibrary_ import LogicGatesLibrary
### Python Packages Imported ###
#from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import time
import csv
import scipy.stats as ss
from tabulate import tabulate
class LogicGatesSystem:
def __init__(self):
#close all figures from previous run
plt.close('all')
plt.rcdefaults()
#font0 = FontProperties()
#font = font0.copy()
#font.set_family('sans-serif')
#font.set_style('normal')
plt.rcParams['font.family'] = 'Arial' #'sans-serif'
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['font.size'] = 16
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['xtick.labelsize'] = 16
#plt.rcParams['xtick.labelweight'] = 'medium'
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['ytick.labelsize'] = 16
#plt.rcParams['xtick.labelweight'] = 'medium'
plt.rcParams['ytick.major.width'] = 2
plt.rcParams['legend.handletextpad'] = 0.8
### MSS Main Code ###
self.starttime = time.time()
#create the first object of InduciblePromoterLibrary class
self.LogicGLib1 = LogicGatesLibrary()
def DataReader(self, Input_filename, NumState):
### Read Data ###
print('Read Data')
self.Data_header1, self.Data_array1, self.Data_stddev1 = ReadData.readData(Input_filename, NumState)
self.Input_filename_ = Input_filename
self.NumState_ = NumState
ReadData.plot_inputdata(self.Data_header1, self.Data_array1, self.Data_stddev1)
### Find Sample Size ###
Data_array_numrows = self.Data_array1.shape[0] # Time + All Inducers
Data_array_numcols = self.Data_array1.shape[1] # Number of RFP Data Per Inducer
self.Sample_size = (Data_array_numrows - 1) * Data_array_numcols
def RunModels(self, SystemOpt = 'NOT', iteration = 1):
# -------------------------------------------------------------------------------- #
### Define Lists Used ###
self.Model_List = [] # To store names of Models Tested
self.SSE_Combined = [] # To store values of SSE
self.Num_Param_List = [] # To store param length of each model tested
self.ParamName_List = [] # To store param Name of each model tested
self.Param_List = [] # To store fitted param values of each model tested
self.ParamUnits_List = [] # To store the units of the params of each model tested
self.VarName_List = [] # To store the Variables name of each model tested
self.y0_List = [] # To store the initial values for the variables of each model tested
# -------------------------------------------------------------------------------- #
for loop in range(0, iteration):
if (SystemOpt.casefold() == 'not'):
### Model 1.0 - NOTgate ###
SystemType = 'NOTgate'
Param_NOTgate, SSE_NOTgate, y0_NOTgate, VarName_NOTgate, ParamName_NOTgate, ParamUnits_NOTgate\
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_NOTgate)
self.ParamName_List.append(ParamName_NOTgate)
self.Param_List.append(Param_NOTgate)
self.ParamUnits_List.append(ParamUnits_NOTgate)
self.VarName_List.append(VarName_NOTgate)
self.y0_List.append(y0_NOTgate)
self.Num_Param_List.append(len(Param_NOTgate))
self.Model_List.append('Model 1.0 - NOTgate')
### Model 1.1 - NOTgateKMat ###
SystemType = 'NOTgateKMat'
Param_NOTgateKMat, SSE_NOTgateKMat, y0_NOTgateKMat, VarName_NOTgateKMat, ParamName_NOTgateKMat, ParamUnits_NOTgateKMat\
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_NOTgateKMat)
self.ParamName_List.append(ParamName_NOTgateKMat)
self.Param_List.append(Param_NOTgateKMat)
self.ParamUnits_List.append(ParamUnits_NOTgateKMat)
self.VarName_List.append(VarName_NOTgateKMat)
self.y0_List.append(y0_NOTgateKMat)
self.Num_Param_List.append(len(Param_NOTgateKMat))
self.Model_List.append('Model 1.1 - NOTgateKMat')
# -------------------------------------------------------------------------------- #
### Model 1.2 - NOTgateSingle ###
SystemType = 'NOTgateSingle'
Param_NOTgateSingle, SSE_NOTgateSingle, y0_NOTgateSingle, VarName_NOTgateSingle, ParamName_NOTgateSingle, ParamUnits_NOTgateSingle\
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_NOTgateSingle)
self.ParamName_List.append(ParamName_NOTgateSingle)
self.Param_List.append(Param_NOTgateSingle)
self.ParamUnits_List.append(ParamUnits_NOTgateSingle)
self.VarName_List.append(VarName_NOTgateSingle)
self.y0_List.append(y0_NOTgateSingle)
self.Num_Param_List.append(len(Param_NOTgateSingle))
self.Model_List.append('Model 1.2 - NOTgateSingle')
### Model 1.3 - NOTgateSingleKMat ###
SystemType = 'NOTgateSingleKMat'
Param_NOTgateSingleKMat, SSE_NOTgateSingleKMat, y0_NOTgateSingleKMat, VarName_NOTgateSingleKMat, ParamName_NOTgateSingleKMat, ParamUnits_NOTgateSingleKMat\
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_NOTgateSingleKMat)
self.ParamName_List.append(ParamName_NOTgateSingleKMat)
self.Param_List.append(Param_NOTgateSingleKMat)
self.ParamUnits_List.append(ParamUnits_NOTgateSingleKMat)
self.VarName_List.append(VarName_NOTgateSingleKMat)
self.y0_List.append(y0_NOTgateSingleKMat)
self.Num_Param_List.append(len(Param_NOTgateSingleKMat))
self.Model_List.append('Model 1.3 - NOTgateSingleKMat')
# -------------------------------------------------------------------------------- #
elif SystemOpt.casefold() == 'and':
### Model 2.0 - ANDgate ###
SystemType = 'ANDgate'
Param_ANDgate, SSE_ANDgate, y0_ANDgate, VarName_ANDgate, ParamName_ANDgate, ParamUnits_ANDgate \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ANDgate)
self.ParamName_List.append(ParamName_ANDgate)
self.Param_List.append(Param_ANDgate)
self.ParamUnits_List.append(ParamUnits_ANDgate)
self.VarName_List.append(VarName_ANDgate)
self.y0_List.append(y0_ANDgate)
self.Num_Param_List.append(len(Param_ANDgate))
self.Model_List.append('Model 2.0 - ANDgate')
### Model 2.1 - ANDgateBLeak1 ###
SystemType = 'ANDgateBLeak1'
Param_ANDgateBLeak1, SSE_ANDgateBLeak1, y0_ANDgateBLeak1, VarName_ANDgateBLeak1, ParamName_ANDgateBLeak1, ParamUnits_ANDgateBLeak1 \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ANDgateBLeak1)
self.ParamName_List.append(ParamName_ANDgateBLeak1)
self.Param_List.append(Param_ANDgateBLeak1)
self.ParamUnits_List.append(ParamUnits_ANDgateBLeak1)
self.VarName_List.append(VarName_ANDgateBLeak1)
self.y0_List.append(y0_ANDgateBLeak1)
self.Num_Param_List.append(len(Param_ANDgateBLeak1))
self.Model_List.append('Model 2.1 - ANDgateBLeak1')
### Model 2.2 - ANDgateBLeak2 ###
SystemType = 'ANDgateBLeak2'
Param_ANDgateBLeak2, SSE_ANDgateBLeak2, y0_ANDgateBLeak2, VarName_ANDgateBLeak2, ParamName_ANDgateBLeak2, ParamUnits_ANDgateBLeak2 \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ANDgateBLeak2)
self.ParamName_List.append(ParamName_ANDgateBLeak2)
self.Param_List.append(Param_ANDgateBLeak2)
self.ParamUnits_List.append(ParamUnits_ANDgateBLeak2)
self.VarName_List.append(VarName_ANDgateBLeak2)
self.y0_List.append(y0_ANDgateBLeak2)
self.Num_Param_List.append(len(Param_ANDgateBLeak2))
self.Model_List.append('Model 2.2 - ANDgateBLeak2')
### Model 2.3 - ANDgateBLeak3 ###
SystemType = 'ANDgateBLeak3'
Param_ANDgateBLeak3, SSE_ANDgateBLeak3, y0_ANDgateBLeak3, VarName_ANDgateBLeak3, ParamName_ANDgateBLeak3, ParamUnits_ANDgateBLeak3 \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ANDgateBLeak3)
self.ParamName_List.append(ParamName_ANDgateBLeak3)
self.Param_List.append(Param_ANDgateBLeak3)
self.ParamUnits_List.append(ParamUnits_ANDgateBLeak3)
self.VarName_List.append(VarName_ANDgateBLeak3)
self.y0_List.append(y0_ANDgateBLeak3)
self.Num_Param_List.append(len(Param_ANDgateBLeak3))
self.Model_List.append('Model 2.3 - ANDgateBLeak3')
## Model 2.4 - ANDgateBLeak13 ###
SystemType = 'ANDgateBLeak13'
Param_ANDgateBLeak13, SSE_ANDgateBLeak13, y0_ANDgateBLeak13, VarName_ANDgateBLeak13, ParamName_ANDgateBLeak13, ParamUnits_ANDgateBLeak13 \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ANDgateBLeak13)
self.ParamName_List.append(ParamName_ANDgateBLeak13)
self.Param_List.append(Param_ANDgateBLeak13)
self.ParamUnits_List.append(ParamUnits_ANDgateBLeak13)
self.VarName_List.append(VarName_ANDgateBLeak13)
self.y0_List.append(y0_ANDgateBLeak13)
self.Num_Param_List.append(len(Param_ANDgateBLeak13))
self.Model_List.append('Model 2.4 - ANDgateBLeak13')
### Model 2.5 - ANDgateBLeak13KMat ###
SystemType = 'ANDgateBLeak13KMat'
Param_ANDgateBLeak13KMat, SSE_ANDgateBLeak13KMat, y0_ANDgateBLeak13KMat, VarName_ANDgateBLeak13KMat, ParamName_ANDgateBLeak13KMat, ParamUnits_ANDgateBLeak13KMat \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ANDgateBLeak13KMat)
self.ParamName_List.append(ParamName_ANDgateBLeak13KMat)
self.Param_List.append(Param_ANDgateBLeak13KMat)
self.ParamUnits_List.append(ParamUnits_ANDgateBLeak13KMat)
self.VarName_List.append(VarName_ANDgateBLeak13KMat)
self.y0_List.append(y0_ANDgateBLeak13KMat)
self.Num_Param_List.append(len(Param_ANDgateBLeak13KMat))
self.Model_List.append('Model 2.5 - ANDgateBLeak13KMat')
# -------------------------------------------------------------------------------- #
elif (SystemOpt.casefold() == 'or'):
### Model 3.0 - ORgate ###
SystemType = 'ORgate'
Param_ORgate, SSE_ORgate, y0_ORgate, VarName_ORgate, ParamName_ORgate, ParamUnits_ORgate \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgate)
self.ParamName_List.append(ParamName_ORgate)
self.Param_List.append(Param_ORgate)
self.ParamUnits_List.append(ParamUnits_ORgate)
self.VarName_List.append(VarName_ORgate)
self.y0_List.append(y0_ORgate)
self.Num_Param_List.append(len(Param_ORgate))
self.Model_List.append('Model 3.0 - ORgate')
### Model 3.1 - ORgate ###
SystemType = 'ORgateDelay'
Param_ORgateDelay, SSE_ORgateDelay, y0_ORgateDelay, VarName_ORgateDelay, ParamName_ORgateDelay, ParamUnits_ORgateDelay \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDelay)
self.ParamName_List.append(ParamName_ORgateDelay)
self.Param_List.append(Param_ORgateDelay)
self.ParamUnits_List.append(ParamUnits_ORgateDelay)
self.VarName_List.append(VarName_ORgateDelay)
self.y0_List.append(y0_ORgateDelay)
self.Num_Param_List.append(len(Param_ORgateDelay))
self.Model_List.append('Model 3.1 - ORgateDelay')
### Model 3.2 - ORgate_Delay ###
SystemType = 'ORgate_Delay'
Param_ORgate_Delay, SSE_ORgate_Delay, y0_ORgate_Delay, VarName_ORgate_Delay, ParamName_ORgate_Delay, ParamUnits_ORgate_Delay \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgate_Delay)
self.ParamName_List.append(ParamName_ORgate_Delay)
self.Param_List.append(Param_ORgate_Delay)
self.ParamUnits_List.append(ParamUnits_ORgate_Delay)
self.VarName_List.append(VarName_ORgate_Delay)
self.y0_List.append(y0_ORgate_Delay)
self.Num_Param_List.append(len(Param_ORgate_Delay))
self.Model_List.append('Model 3.2 - ORgate_Delay')
### Model 3.3 - ORgateDegradation ###
SystemType = 'ORgateDegradation'
Param_ORgateDegradation, SSE_ORgateDegradation, y0_ORgateDegradation, VarName_ORgateDegradation, ParamName_ORgateDegradation, ParamUnits_ORgateDegradation \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDegradation)
self.ParamName_List.append(ParamName_ORgateDegradation)
self.Param_List.append(Param_ORgateDegradation)
self.ParamUnits_List.append(ParamUnits_ORgateDegradation)
self.VarName_List.append(VarName_ORgateDegradation)
self.y0_List.append(y0_ORgateDegradation)
self.Num_Param_List.append(len(Param_ORgateDegradation))
self.Model_List.append('Model 3.3 - ORgateDegradation')
### Model 3.4 - ORgate_Degradation ###
SystemType = 'ORgate_Degradation'
Param_ORgate_Degradation, SSE_ORgate_Degradation, y0_ORgate_Degradation, VarName_ORgate_Degradation, ParamName_ORgate_Degradation, ParamUnits_ORgate_Degradation \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgate_Degradation)
self.ParamName_List.append(ParamName_ORgate_Degradation)
self.Param_List.append(Param_ORgate_Degradation)
self.ParamUnits_List.append(ParamUnits_ORgate_Degradation)
self.VarName_List.append(VarName_ORgate_Degradation)
self.y0_List.append(y0_ORgate_Degradation)
self.Num_Param_List.append(len(Param_ORgate_Degradation))
self.Model_List.append('Model 3.4 - ORgate_Degradation')
# -------------------------------------------------------------------------------- #
### Model 3.5 - ORgateDelayDegradation ###
SystemType = 'ORgateDelayDegradation'
Param_ORgateDelayDegradation, SSE_ORgateDelayDegradation, y0_ORgateDelayDegradation, \
VarName_ORgateDelayDegradation, ParamName_ORgateDelayDegradation, ParamUnits_ORgateDelayDegradation \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDelayDegradation)
self.ParamName_List.append(ParamName_ORgateDelayDegradation)
self.Param_List.append(Param_ORgateDelayDegradation)
self.ParamUnits_List.append(ParamUnits_ORgateDelayDegradation)
self.VarName_List.append(VarName_ORgateDelayDegradation)
self.y0_List.append(y0_ORgateDelayDegradation)
self.Num_Param_List.append(len(Param_ORgateDelayDegradation))
self.Model_List.append('Model 3.5 - ORgateDelayDegradation')
### Model 3.6 - ORgateDegradationDelay ###
SystemType = 'ORgateDegradationDelay'
Param_ORgateDegradationDelay, SSE_ORgateDegradationDelay, y0_ORgateDegradationDelay, \
VarName_ORgateDegradationDelay, ParamName_ORgateDegradationDelay, ParamUnits_ORgateDegradationDelay \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDegradationDelay)
self.ParamName_List.append(ParamName_ORgateDegradationDelay)
self.Param_List.append(Param_ORgateDegradationDelay)
self.ParamUnits_List.append(ParamUnits_ORgateDegradationDelay)
self.VarName_List.append(VarName_ORgateDegradationDelay)
self.y0_List.append(y0_ORgateDegradationDelay)
self.Num_Param_List.append(len(Param_ORgateDegradationDelay))
self.Model_List.append('Model 3.6 - ORgateDegradationDelay')
### Model 3.7 - ORgateDelayDelay ###
SystemType = 'ORgateDelayDelay'
Param_ORgateDelayDelay, SSE_ORgateDelayDelay, y0_ORgateDelayDelay, \
VarName_ORgateDelayDelay, ParamName_ORgateDelayDelay, ParamUnits_ORgateDelayDelay \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDelayDelay)
self.ParamName_List.append(ParamName_ORgateDelayDelay)
self.Param_List.append(Param_ORgateDelayDelay)
self.ParamUnits_List.append(ParamUnits_ORgateDelayDelay)
self.VarName_List.append(VarName_ORgateDelayDelay)
self.y0_List.append(y0_ORgateDelayDelay)
self.Num_Param_List.append(len(Param_ORgateDelayDelay))
self.Model_List.append('Model 3.7 - ORgateDelayDelay')
#-------------------------------------------------------------------------------- #
### Model 3.8- OR with Delay Degradation (with resource competition at State11) System ###
SystemType = 'ORgateDelayDegradeResCompete'
Param_ORgateDelayDegradeResCompete, SSE_ORgateDelayDegradeResCompete, y0_ORgateDelayDegradeResCompete, \
VarName_ORgateDelayDegradeResCompete, ParamName_ORgateDelayDegradeResCompete, ParamUnits_ORgateDelayDegradeResCompete \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDelayDegradeResCompete)
self.ParamName_List.append(ParamName_ORgateDelayDegradeResCompete)
self.Param_List.append(Param_ORgateDelayDegradeResCompete)
self.ParamUnits_List.append(ParamUnits_ORgateDelayDegradeResCompete)
self.VarName_List.append(VarName_ORgateDelayDegradeResCompete)
self.y0_List.append(y0_ORgateDelayDegradeResCompete)
self.Num_Param_List.append(len(Param_ORgateDelayDegradeResCompete))
self.Model_List.append('Model 3.8 - ORgateDelayDegradeResCompete')
### Model 3.9- OR with Degradation Delay (with resource competition at State11) System ###
SystemType = 'ORgateDegradeDelayResCompete'
Param_ORgateDegradeDelayResCompete, SSE_ORgateDegradeDelayResCompete, y0_ORgateDegradeDelayResCompete, \
VarName_ORgateDegradeDelayResCompete, ParamName_ORgateDegradeDelayResCompete, ParamUnits_ORgateDegradeDelayResCompete \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDegradeDelayResCompete)
self.ParamName_List.append(ParamName_ORgateDegradeDelayResCompete)
self.Param_List.append(Param_ORgateDegradeDelayResCompete)
self.ParamUnits_List.append(ParamUnits_ORgateDegradeDelayResCompete)
self.VarName_List.append(VarName_ORgateDegradeDelayResCompete)
self.y0_List.append(y0_ORgateDegradeDelayResCompete)
self.Num_Param_List.append(len(Param_ORgateDegradeDelayResCompete))
self.Model_List.append('Model 3.9 - ORgateDegradeDelayResCompete')
### Model 3.10- OR with Delay Delay (with resource competition at State11) System ###
SystemType = 'ORgateDelayDelayResCompete'
Param_ORgateDelayDelayResCompete, SSE_ORgateDelayDelayResCompete, y0_ORgateDelayDelayResCompete, \
VarName_ORgateDelayDelayResCompete, ParamName_ORgateDelayDelayResCompete, ParamUnits_ORgateDelayDelayResCompete \
= self.LogicGLib1.Run_LogicGatesSystem(SystemType, self.Data_header1, self.Data_array1, self.NumState_)
self.SSE_Combined.append(SSE_ORgateDelayDelayResCompete)
self.ParamName_List.append(ParamName_ORgateDelayDelayResCompete)
self.Param_List.append(Param_ORgateDelayDelayResCompete)
self.ParamUnits_List.append(ParamUnits_ORgateDelayDelayResCompete)
self.VarName_List.append(VarName_ORgateDelayDelayResCompete)
self.y0_List.append(y0_ORgateDelayDelayResCompete)
self.Num_Param_List.append(len(Param_ORgateDelayDelayResCompete))
self.Model_List.append('Model 3.10 - ORgateDelayDelayResCompete')
else:
#raise custom exception
raise Exception('Error in the selected System Option')
# -------------------------------------------------------------------------------- #
def RunModelSelection(self):
# -------------------------------------------------------------------------------- #
### AIC ###
self.AIC_Results = ComputeAIC.run_AIC(self.SSE_Combined, self.Sample_size, self.Num_Param_List)
# -------------------------------------------------------------------------------- #
### Find Min SSE and AIC ###
min_SSE = min(self.SSE_Combined)
min_SSE_index = self.SSE_Combined.index(min_SSE)
self.min_AIC = min(self.AIC_Results)
self.min_AIC_index = self.AIC_Results.index(self.min_AIC)
# -------------------------------------------------------------------------------- #
### Model Recommendation and Print out###
self.Best_Model = self.Model_List[self.min_AIC_index]
print('\n')
print('Models Tested:', self.Model_List)
print('Number of Data points:', self.Sample_size)
print('NumParam:', self.Num_Param_List)
print('SSE Results:', self.SSE_Combined)
print('AIC Results:', self.AIC_Results)
print('Lowest SSE Value and Index:', min_SSE, ',', min_SSE_index)
print('Lowest AIC Value and Index:', self.min_AIC, ',', self.min_AIC_index)
print('\nBased on the lowest AIC value, the recommended characterization model is:')
print('>', self.Best_Model)
# -------------------------------------------------------------------------------- #
### Plot Results of Best Model ###
# Initiate lists to store strings
Param_String = []
ParamName_String = []
ParamUnits_String = []
VarName_String = []
y0_String = []
# store StringName and VariableName into dict for mapping purpose
for m in range(0, len(self.Model_List)):
Param_String.append('_'.join(('Param', self.Model_List[m].split()[3])))
| |
<reponame>papomail/NeonateMRS_UCLH_Python3
# -*- coding: utf-8 -*-
"""
Spec_Module
Version 1.4.2
Modified 21/02/2020
Python3 version of the script. Converted from: Version 1.3.1
Created on 11 Dic 2019 @author: <NAME>
...
Version 1.3.1
Modified 1/10/2015
Defines objects of class Spec_Module
Created on Thu Oct 01 11:41:45 2015
@author: <NAME>
"""
from past.utils import old_div
import pydicom as dcm
import numpy as np
import os as os
import csv as csv
from fpdf import FPDF
import datetime
import PyPDF2
from PyQt5 import QtGui, QtCore, QtWidgets
from pathlib import Path
import shutil
import sys
#BASE_DIR = Path(__file__).parent.parent.parent
# BASE_DIR = Path(__file__).parent.parent
BASE_DIR = Path(__file__).parent
# BASE_DIR = Path.cwd()
# print(BASE_DIR)
ICONS_DIR= BASE_DIR / 'Icons'
UCLH_header=ICONS_DIR / 'UCLH_header.png'
UCLH_header=str(UCLH_header.resolve())
UCLH_footer=ICONS_DIR / 'UCLH_footer.png'
UCLH_footer=str(UCLH_footer.resolve())
class SpecObject():
NumSpecObjects = 0 #To keep track of how many spec objects there are
spectypes = {0 : "Not a spectroscopy dataset", \
1 : "classic spectroscopy dataset", \
2 : "Enhanced spectroscopy dataset", \
3 : "Siemens IMA file", \
4 : "RDA file"}
def __init__(self, filename, dirpass):
self.filename = filename
self.dirpass = dirpass
# self.mruiload = 0
self.plim_l = 1110
self.plim_r = 1140
self.apod_const = 128.0
SpecObject.NumSpecObjects += 1
# print(filename)
try:
self.ds = dcm.read_file(self.filename)
except:
self.ds = []
SpecObject.NumSpecObjects -= 1
# If it is a DICOM4 MRS file, get data and useful parameters
if [0x5600,0x0020] in self.ds:
self.isspec = 2
self.PatName = self.ds.PatientName
self.StudyDate = self.ds.StudyDate
self.StudyTime = self.ds.StudyTime
self.SeriesDate = self.ds.SeriesDate
self.SeriesTime = self.ds.SeriesTime
self.SpecData = self.ds[0x5600,0x0020].value
self.Datapoints = self.ds.DataPointColumns
self.SpectralWidth = self.ds.SpectralWidth
self.TransmitterFrequency = self.ds.TransmitterFrequency
#self.FieldStrength = self.ds[0x0018,0x0087].value
self.FieldStrength = 3.0
self.PatID = self.ds.PatientID
self.AcquisitionDateTime = self.ds.AcquisitionDateTime
self.ProtocolName = self.ds.ProtocolName
self.Frames = self.ds.NumberOfFrames
self.displayTE = self.ds[0x2001, 0x1025].value
self.curframe = 0
# self.fake_ppms = np.linspace(-10.9754, 20.33273,int(self.Datapoints))
self.fake_ppms = np.linspace(20.33273, -10.9754,int(self.Datapoints))
# print(f'self.SpectralWidth = {self.SpectralWidth}')
# print(f'self.Datapoints = {self.Datapoints}')
# print(f'self.TransmitterFrequency = {self.TransmitterFrequency}')
else:
self.isspec = 0
self.SpecData = []
if self.isspec != 0:
self.complex_data()
#Form complex k-space data and FFT to complex spectra. Apodise also.
def complex_data(self):
#Set up arrays to store data and flags for each dynamic acquisition
#Number of dynamic acquisitions = self.Frames
self.Kspace = [] #To store raw K-space
self.Kspacewrite = [] #To store raw K-space
self.Kspaceapod = [] #To store apodised K-space
self.Spectrum = [] #To store raw spectrum
self.Spectrumapod = [] #To store apodised spectrum
self.IncludeFrame = [] #To store include frame flags
#Set up apodisation function
#apod = 1 in range [0:512] pts
#apod = decaying exponential thereafter (decay const = 64)
#Note division by 64.0 and not 64
#Need to do this otherwise you get rounded integers rather than float values
apod = np.ones(shape = [int(self.Datapoints)], dtype = float)
for cntexp in range(512, int(self.Datapoints)):
apod[cntexp] = np.exp(-(cntexp-512)/self.apod_const)
self.apod = apod
#Loop through each frame
for b in range(0, self.Frames):
counter = 0
#Set up dammy arrays to temporarily hold K-space and apodised K-space
#data for an individual frame
dummyKspace = np.zeros(shape = [self.Datapoints], dtype = complex)
dummyKspaceapod = np.zeros(shape = [self.Datapoints], dtype = complex)
#Data are store R,I,R,I,... in self.SpecData. Loop through and
#extract R and I data pairs and then store as an array of complex
#numbers of size self.Datapoints
for a in range(0, self.Datapoints*2, 2):
dummyKspace[counter] = complex(self.SpecData[(b*self.Datapoints*2) + a], self.SpecData[(b*self.Datapoints*2)+a+1])
#dummyKspaceapod[counter] = dummyKspace[counter] * apod[counter]
counter = counter + 1
#Apply apodisation
dummyKspaceapod = dummyKspace * self.apod
#Append apodised and non-apodised k-space data to appropriate array
self.Kspace.append(dummyKspace)
self.Kspacewrite.append(dummyKspace)
self.Kspaceapod.append(dummyKspaceapod)
#For each frame, fft and fftshift to display spectrum propoerly
dummyspectrum = np.fft.fftshift(np.fft.fft(np.conj(dummyKspace)))
dummyspectrumapod = np.fft.fftshift(np.fft.fft(np.conj(dummyKspaceapod)))
#Append apodised and non-apodised spectra to appropriate array
self.Spectrum.append(dummyspectrum)
self.Spectrumapod.append(dummyspectrumapod)
#At this point all data are included (so =1)
self.IncludeFrame.append(1)
#Sum frames together to yield un-processed dataset
self.create_original()
self.autophase()
#Add together individual frames to yield original summed datasets
def create_original(self):
self.OriginalKspace = np.zeros(shape = [self.Datapoints], dtype = complex)
self.OriginalSpectrum = np.zeros(shape = [self.Datapoints], dtype = complex)
if self.Frames == 1:
frames = 1
else:
frames = old_div(self.Frames,2) #self.Frames / 2 because NWS data also stored in Dicom file
for cnt in range(0, frames):
self.OriginalKspace = self.OriginalKspace + self.Kspace[cnt]
self.OriginalSpectrum = self.OriginalSpectrum + self.Spectrum[cnt]
#Do Autophasing of individual frames
def autophase(self):
#Create data array to store processed spectrum and k-space
self.FinalKspaceauto = np.zeros(shape = [self.Datapoints], dtype = complex)
self.FinalSpectrumauto = np.zeros(shape = [self.Datapoints], dtype = complex)
#Create list for frame data
self.Spectrumauto = []
self.Spectrumautoapod = []
if self.Frames == 1:
frames = 1
else:
frames = old_div(self.Frames,2) #self.Frames / 2 because NWS data also stored in Dicom file
#Set lists
self.optphasearr = [] #Store phasing angle for each frame
self.peakposarr = []
# self.shiftarr = []
# self.shiftindex = []
self.curcomplex = []
self.acurcomplex = []
# self.med = []
#First do the phasing
for cnt in range(0, frames):
self.FinalKspaceauto = self.FinalKspaceauto + self.Kspace[cnt]
#Only use data in pre-set phasing range
#Get initial mag and phase data
curmag = np.abs(self.Spectrumapod[cnt][self.plim_l:self.plim_r])
curangle = np.angle(self.Spectrumapod[cnt][self.plim_l:self.plim_r])
#Going to loop through 0-360
#apply phase shift and then cal diff between magnitude and real data
phaselog = []
for cntr in range(0, 360):
curangle2 = curangle + (old_div(cntr*np.pi,180))
curreal = curmag * np.cos(curangle2)
curdiff = np.power((curmag - curreal),2)
phaselog.append(np.sum(curdiff))
#optimum phase shift where difference is minimised
optphase = np.argmin(np.array(phaselog))
#Apply optimum phase shift to whole spectrum
curmagf = np.abs(self.Spectrum[cnt])
curanglef = np.angle(self.Spectrum[cnt])
curangle2f = curanglef + (old_div(optphase*np.pi,180))
currealf = curmagf * np.cos(curangle2f)
curimagf = curmagf * np.sin(curangle2f)
self.curcomplex.append(currealf + 1j*curimagf)
#Apply optimum phase shift to apodised whole spectrum
acurmagf = np.abs(self.Spectrumapod[cnt])
acuranglef = np.angle(self.Spectrumapod[cnt])
acurangle2f = acuranglef + (old_div(optphase*np.pi,180))
acurrealf = acurmagf * np.cos(acurangle2f)
acurimagf = acurmagf * np.sin(acurangle2f)
self.acurcomplex.append(acurrealf + 1j*acurimagf)
#store optiumum phase shift in list
self.optphasearr.append(optphase)
#Add in auto pick of peaks ---------------------------
#Looking for Cho and Cr peaks
#Flag to indicate if peaks have been found
got_peaks = 0
#Going to start at max height of real spec and then increment
#downwards until wee hit a peak
#Do this within the expected range for Cho and Cr peaks
max_height = np.max(currealf)
inc = old_div(max_height,100) #increment size
thresh = 0
while got_peaks < 1:
found_peaks = 0
peak_pos = []
for cntr in range(1100,1160):
if currealf[cntr] > thresh:
local = currealf[cntr-5:cntr+6]
if np.argmax(local) == 5:
found_peaks = found_peaks + 1
peak_pos.append(cntr)
if found_peaks > 2:
thresh = thresh + inc
else:
got_peaks = 1
if np.size(peak_pos) < 2:
peak_pos = []
peak_pos.append(1120)
peak_pos.append(peak_pos[0] + 10)
#Store peak position arrays for each frame in a list
self.peakposarr.append(peak_pos)
self.addframes()
#Add frames together
def addframes(self):
self.FinalKspaceauto = np.zeros(shape = [self.Datapoints], dtype = complex)
self.FinalSpectrumauto = np.zeros(shape = [self.Datapoints], dtype = complex)
self.med = []
self.shiftindex = []
if self.Frames == 1:
frames = 1
else:
frames = old_div(self.Frames,2) #self.Frames / 2 because NWS data also stored in Dicom file
for cnt in range(0, frames):
#Store the mid point between the peaks in a list
#If something goes wrong then a value of zero is stored
'''Remove try/if block & include negative values to the median Patxi'''
# try:
# ind = np.int(np.floor(old_div((self.peakposarr[cnt][0] + self.peakposarr[cnt][1]),2)))
# self.shiftindex.append(ind)
# except:
# ind = 0
# self.shiftindex.append(ind)
# if ind > 0:
# self.med.append(ind)
ind = np.int(np.floor(old_div((self.peakposarr[cnt][0] + self.peakposarr[cnt][1]),2)))
self.shiftindex.append(ind)
self.med.append(ind)
# print(self.med)
med_shift_index = np.floor(np.median(self.med))
# print('median shift index:' + str(med_shift_index))
for cnt in range(0, frames):
if self.IncludeFrame[cnt] == 1:
shift = int(med_shift_index - self.shiftindex[cnt])
#print shift
addcomplex = np.roll(self.curcomplex[cnt], shift)
#addcomplex = np.roll(self.acurcomplex[cnt], shift) #Patxi
#self.Kspace[cnt] = np.fft.ifft(np.fft.fftshift(curcomplex[cnt]))
self.Kspacewrite[cnt] = np.fft.ifft(np.fft.fftshift(addcomplex))
self.FinalSpectrumauto = self.FinalSpectrumauto + addcomplex
else:
#self.Kspace[cnt] = np.fft.ifft(np.fft.fftshift(curcomplex[cnt]))
self.Kspacewrite[cnt] = np.zeros(shape = [self.Datapoints], dtype = complex)
self.Spectrumauto.append(self.curcomplex[cnt])
self.Spectrumautoapod.append(self.acurcomplex[cnt]) #Patxi
self.set_current_frame()
#IFT to get time domain data
self.FinalKspaceauto = np.fft.ifft(np.fft.fftshift(self.FinalSpectrumauto))
#self.shiftapod()
def phaseinc(self, increment):
#Apply optimum phase shift to whole spectrum
curmagf = np.abs(self.curcomplex[self.curframe])
curanglef | |
import socket, serial, os, time, pickle, math, json, glob, subprocess, ConfigParser
config = ConfigParser.ConfigParser()
config.read("ardustatrc.txt")
pycommand = str(config.get("values","pycommand"))
portconstant = int(config.get("values","portconstant"))
loggingpause = float(config.get("values","loggingpause"))
enabledebugging = config.get("values","enabledebugging")
if enabledebugging == "True": enabledebugging = True
else: enabledebugging = False
verbosemode = config.get("values","verbosemode")
if verbosemode == "True": verbosemode = True
else: verbosemode = False
baudrate = int(config.get("values","baudrate"))
def isArdustat(port): #Tests whether an ardustat is connected to a given port
message = ""
message = message + "\nTesting for ardustat on port "+str(port)+"."
if verbosemode == True: print message.split("\n")[-1]
try:
ser=serial.Serial(port,57600,timeout=5) #The ardustat uses a baudrate of 57600. This can be changed in the firmware
except:
return {"success":False,"message":message}
time.sleep(0.1)
ser.readline() #Somehow this initializes the connection or something
ser.write("s0000\n")
time.sleep(0.1)
try:
line = ser.readlines()[-1]
except:
ser.close()
message = message + "\nNo ardustat on port "+port+" or error getting serial data from ardustat."
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
if line.find("GO") !=-1 or line.find("ST") !=-1: #These are the start and end markers of the serial lines that the ardustat spits out
ser.close()
return {"success":True,"message":message}
else:
ser.close()
message = message + "\nNo ardustat on port "+port+"."
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
def findPorts():
if os.name == "posix": #Mac OS X and Linux
ports = glob.glob("/dev/tty.u*")+glob.glob("/dev/ttyU*")+glob.glob("/dev/ttyA*")
return {"success":True,"ports":ports}
elif os.name == "nt": #Windows
ports = []
for i in range(1,100):
ports.append("COM"+str(i))
return {"success":True,"ports":ports}
def guessUSB(): #This finds out what the possible serial ports are and runs isArdustat on them. Mac OS X and Linux handle serial ports differently than Windows, so we split the code up for each OS
message = ""
possibles = findPorts()["ports"]
if len(possibles)>=1:
for port in possibles:
isardresult = isArdustat(port)
message = message + isardresult["message"]
if isardresult["success"] == True:
message = message + "\nArdustat found on "+port+"."
if verbosemode == True: print message.split("\n")[-1]
return {"success":True,"port":port,"message":message}
else:
return {"success":False,"message":message+"\nCouldn't find any ardustats."}
def connecttoardustat(serialport,id,autoconnect=True):
message = ""
if autoconnect == True:
result = guessUSB()
if result["success"] == True:
serialport = result["port"]
else:
return {"success":False,"message":"Couldn't find serial port to autoconnect to. guessUSB() returned:"+result["message"]}
try:
serialforwarderprocess = subprocess.Popen([pycommand,"tcp_serial_redirect.py","-p",serialport,"-P",str(portconstant+id),"-b",str(baudrate)])
except:
if enabledebugging == True: raise
return {"success":False,"message":"Unexpected error starting serialforwarder.py."}
else:
filename = "pidfile" + str(id) + ".pickle"
pidfile = open(filename,"w") #Create an empty new file
piddict = {}
piddict["serialforwarder.py"] = serialforwarderprocess.pid
pickle.dump(piddict, pidfile)
pidfile.close()
try:
result["message"]
except:
return {"success":True,"message":"Started to open ardustat on port "+serialport+"."}
else:
return {"success":True,"message":"Started to open ardustat on port "+serialport+". guessUSB() returned:"+result["message"]}
def connecttosocket(port): #Input: an integer port number. Output: a dictionary containing whether the connection was successful "success" and an connected socket instance "socket"
message = ""
thesocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
thesocket.connect(("localhost",port))
thesocket.settimeout(1)
except:
if enabledebugging == True: raise
message = message + "\nConnection to socket "+str(port)+" failed."
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
else:
return {"success":True,"socket":thesocket}
def socketwrite(socketinstance,message,id=None): #Input: any connected socket instance.
for i in range(1):
socketinstance.send(message+"\n")
time.sleep(0.05)
return {"success":True}
def socketread(socketinstance): #Input: any connected socket instance. Sends "s0000," then reads and concatenates lines until it gets a "ST\r\n".
#Note: The reason that we do this is that the tcp_serial_forwarder.py will sometimes send broken up lines like:
#GO
#,1037,
#311,972,0,714,3,s0000,29,0,510,ST
#Without the "while" loop, this function would just return "GO" without concatenating all of these together.
socketinstance.send("s0000\n")
time.sleep(0.01)
a = ""
while 1:
try:
a += socketinstance.recv(1024)
except:
socketinstance.send("s0000\n")
if a.find("ST\r\n") > 1:
return {"success":True,"reading":a.strip()}
def refbasis(reading,ref): #Feturns an absolute potential based on the ADC reading against the 2.5 V reference
return round((float(reading)/float(ref))*2.5,3)
def resbasis(pot,id=None): #Returns the value in ohms for the givening potentiometer setting
message = ""
if isinstance(id,int):
try:
f = open("resistances.pickle","r")
resdict = pickle.load(f)
f.close()
res = resdict[str(id)] #This variable stores the resistance data for the Ardustat with that specific ID number
message = message + "\nCalibration data found for id#"+str(id)
if verbosemode == True: print message.split("\n")[-1]
calibrated = True
except: #If there's no resistance data
message = message + "\nCalibration data not found for id #"+str(id)
if verbosemode == True: print message.split("\n")[-1]
res = []
for i in range(256):
res.append(i/255.*10000 + 100)
calibrated = False
else:
message = message + "\nNo ID # passed to this function. Using non-calibrated resistances."
if verbosemode == True: print message.split("\n")[-1]
res = []
for i in range(256):
res.append(i/255.*10000 + 100)
calibrated = False
try:
res[pot]
except:
message = message + "\nPotentiometer setting "+str(pot)+" out of range (0-255)!"
if verbosemode == True: print message.split("\n")[-1]
print message.split("\n")[-1]
return {"success":False,"message":message,"resistance":False,"calibrated":calibrated}
else:
return {"success":True,"message":message,"resistance":res[pot],"calibrated":calibrated}
def ocv(port):
message = ""
socketresult = connecttosocket(port)
if socketresult["success"] == False: return {"success":False,"message":socketresult["message"]}
else:
socketwrite(socketresult["socket"],"-0000")
return {"success":True,"message":"Set to OCV"}
def setResistance(resistance,port,id=None):
message = ""
socketresult = connecttosocket(port)
if socketresult["success"] == False: return {"success":False,"message":socketresult["message"]}
closestvalue = 0
for i in range(1,256):
if math.fabs(resistance - resbasis(i,id)["resistance"]) < math.fabs(resistance - resbasis(closestvalue,id)["resistance"]): #If the absolute value of the difference between this resistance and the ideal resistance is less than the absolute value of the other closest difference...
closestvalue = i
closestvalue = str(closestvalue).rjust(4,"0")
socketwrite(socketresult["socket"],"r"+closestvalue)
setting = resbasis(int(closestvalue),id)["resistance"]
message = message + "\nSet resistance to "+str(setting)
if verbosemode == True: print message.split("\n")[-1]
return {"success":True,"message":message,"setting":setting}
def setVoltageDifference(potential,port):
socketresult = connecttosocket(port)
if socketresult["success"] == False: return {"success":False,"message":socketresult["message"]}
if potential >= 0:
setting = str(int(1023*(potential/5.0))).rjust(4,"0")
else:
setting = str(int(1023*(math.fabs(potential)/5.0))+2000)
socketwrite(socketresult["socket"],"g"+setting)
return {"success":True,"message":"Sent command g"+setting+"."}
def raiseGround(potential,port):
socketresult = connecttosocket(port)
if socketresult["success"] == False: return {"success":False,"message":socketresult["message"]}
setting = str(int(1023*(potential/5.0)) + 2000).rjust(4,"0")
socketwrite(socketresult["socket"],"X"+setting)
return {"success":True,"message":"Sent command X"+setting+"."}
def potentiostat(potential, port):
socketresult = connecttosocket(port)
if socketresult["success"] == False: return {"success":False,"message":socketresult["message"]}
setting = str(int(1023*(potential/5.0))).rjust(4,"0")
socketwrite(socketresult["socket"],"p"+setting)
return {"success":True,"message":"Sent command p"+setting+"."}
def galvanostat(current,port,id=None): #This takes a specified current as input and calculates the right resistor setting and voltage difference to set it. See http://steingart.ccny.cuny.edu/ardustat-theory
message = ""
socketresult = connecttosocket(port)
if socketresult["success"] == False: return {"success":False,"message":socketresult["message"]}
thesocket = socketresult["socket"]
if id == None:
message = message + "\nWarning: No ID # passed to this function!"
if verbosemode == True: print message.split("\n")[-1]
if current == 0: #Current is zero; do OCV
ocv(port)
message = message + "\nSet OCV mode since current was 0"
if verbosemode == True: print message.split("\n")[-1]
return {"success":True,"message":message}
else:
socketreadresult = socketread(thesocket)
if socketreadresult == False:
message = message + "\nCouldn't read data from socket."
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
parseddict = parse(socketreadresult["reading"],id)
if parseddict["success"] == False:
message = message + "\nCouldn't parse data: " + parseddict["message"]
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
if current > 0.001 or current < -0.001:
voltagedifference = current * 1000 #1 milliamp corresponds to 1V, 1 microamp corresponds to 1 millivolt, etc
else:
voltagedifference = current * 10000
minvoltagedifference = parseddict["cell_ADC"] * -1
maxvoltagedifference = 5 - parseddict["cell_ADC"]
if voltagedifference < minvoltagedifference: voltagedifference = minvoltagedifference
if voltagedifference > maxvoltagedifference: voltagedifference = maxvoltagedifference
resistancevalue = voltagedifference / current
minresistance = resbasis(0,id)["resistance"]
maxresistance = resbasis(255,id)["resistance"]
while resistancevalue < minresistance: #If resistor can't go that low for that voltage setting...
if voltagedifference >= 0:
voltagedifference = voltagedifference + 0.00489 #Tick DAC up a setting
if voltagedifference < maxvoltagedifference:
resistancevalue = voltagedifference / current #Recalculate resistor setting
else: #Voltage is maxed out and resistor still can't go low enough to source current...
message = message + "\nYou connected a "+str(parseddict["cell_ADC"])+" cell. Max current for that cell is "+str(maxvoltagedifference/minresistance)+" A. Cannot set current as "+str(current)+" A."
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
else:
voltagedifference = voltagedifference - 0.00489 #Tick DAC down a setting
if voltagedifference > minvoltagedifference:
resistancevalue = voltagedifference / current #Recalculate resistor setting
else: #Voltage is at lowest value and resistor still can't go low enough to source current...
message = message + "\nYou connected a "+str(parseddict["cell_ADC"])+" cell. Min current for that cell is "+str(minvoltagedifference/minresistance)+" A. Cannot set current as "+str(current)+" A."
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
if resistancevalue > maxresistance: #If resistor can't go that high for that voltage setting...
resistancevalue = maxresistance
voltagedifference = resistancevalue * current #Recalculate voltage difference
if voltagedifference < minvoltagedifference: #Voltage difference is so low at max resistance that the closest DAC setting is 0.
message = message + "\nYour current is lower than the maximum resistance of the potentiometer * the minimum voltage difference of the DAC."
if verbosemode == True: print message.split("\n")[-1]
return {"success":False,"message":message}
#If we are at this point, then we find the resistor setting that corresponds to the resistance value
resistanceresult = setResistance(resistancevalue, port, id)
time.sleep(0.3)
setVoltageDifference(voltagedifference, port)
message = message + "\nSuccessfully set galvanostat. Set voltage difference to "+str(voltagedifference)+" V. Set resistance to "+str(resistanceresult["setting"])+" Ohm."
if verbosemode == True: print message.split("\n")[-1]
return {"success":True,"message":message}
def calibrate(resistance,port,id): #Since the actual resistances for digital potentiometer setting are unknown, we connect a resistor of known resistance and use the voltage divider equation to find the actual resistance of each potentiometer setting, then save it in a pickle
message = ""
socketresult = connecttosocket(port)
if socketresult["success"] == False: return {"success":False,"message":socketresult["message"]}
thesocket = socketresult["socket"]
message = message + "Beginning calibration for ardustat with ID #"+str(id)
print message
socketwrite(thesocket,"X1023")
time.sleep(0.2)
socketwrite(thesocket,"R")
time.sleep(0.2)
rescount = 0
reslist = [[] for x in range(256)]
while rescount <= (256 * 5):
line = socketread(thesocket)["reading"]
parsedict = parse(line,id)
if parsedict["success"] == True:
line = line.split(",")
try:
reslist[int(line[4])].append((resistance*int(line[3]))/int(line[2]) - resistance) #Add calculated resistance for that potentiometer setting ( equal to (R(in)*DAC)/ADC - R(in) )
rescount +=1
except:
self.ocv()
if enabledebugging == True: raise
message = message + "\nUnexpected error in calibration with serial input "+str(line)
if verbosemode == True: print message.split("\n")[-1]
print message.split("\n")[-1]
return {"success":False,"message":message}
print "Calibration: " + str(float(rescount)/(256*5)*100)[:4]+"%"
ocv(port)
#calculate average
res = [sum(x)/len(x) for x in reslist]
try:
f = open("resistances.pickle","r")
resdict = pickle.load(f)
f.close()
except:
pass
try:
resdict[str(id)] = res
except: #There is no existing resistance data; create the initial dictionary
resdict = {str(id):res}
try:
f = open("resistances.pickle",'w')
except:
if enabledebugging == True: raise
message = message + "\nCouldn't open resistances.pickle file for writing. It may be open | |
no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class excepthandler(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = (
'lineno',
'col_offset',
)
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': ('lineno', 'col_offset'), '__dict__': <attribute '__dict__' of 'excepthandler' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'excepthandler' objects>, '__doc__': None})"
class ExceptHandler(excepthandler):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'type',
'name',
'body',
)
class Exec(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
'globals',
'locals',
)
class Expr(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class mod(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': (), '__dict__': <attribute '__dict__' of 'mod' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'mod' objects>, '__doc__': None})"
class Expression(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
)
class ExtSlice(slice):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'dims',
)
class FloorDiv(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class For(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'target',
'iter',
'body',
'orelse',
)
class FunctionDef(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'name',
'args',
'body',
'decorator_list',
)
class GeneratorExp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elt',
'generators',
)
class Global(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
class Gt(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class GtE(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class If(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'test',
'body',
'orelse',
)
class IfExp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'test',
'body',
'orelse',
)
class Import(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
class ImportFrom(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'module',
'names',
'level',
)
class In(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Index(slice):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class Interactive(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
)
class unaryop(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': (), '__dict__': <attribute '__dict__' of 'unaryop' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'unaryop' objects>, '__doc__': None})"
class Invert(unaryop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Is(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class IsNot(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class keyword(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_fields = (
'arg',
'value',
)
__dict__ = None # (!) real value is "dict_proxy({'__dict__': <attribute '__dict__' of 'keyword' objects>, '__module__': '_ast', '_fields': ('arg', 'value'), '__weakref__': <attribute '__weakref__' of 'keyword' objects>, '__doc__': None})"
class Lambda(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'args',
'body',
)
class List(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elts',
'ctx',
)
class ListComp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elt',
'generators',
)
class Load(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class LShift(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Lt(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class LtE(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Mod(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Module(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
)
class Mult(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Name(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'id',
'ctx',
)
class Not(unaryop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class NotEq(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class NotIn(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Num(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'n',
)
class Or(boolop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Param(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Pass(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Pow(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Print(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'dest',
'values',
'nl',
)
class Raise(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'type',
'inst',
'tback',
)
class Repr(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class Return(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class RShift(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Set(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elts',
)
class SetComp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elt',
'generators',
)
class Slice(slice):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'lower',
'upper',
'step',
)
class Store(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Str(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
's',
)
class Sub(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Subscript(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
'slice',
'ctx',
)
class Suite(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
)
class TryExcept(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
'handlers',
'orelse',
)
class TryFinally(stmt):
# no doc
def __init__(self, *args, | |
#This states the metadata for the plugin
bl_info = {
"name": "AssetGen",
"author": "Developed by <NAME> aka zero025 (<EMAIL>) and surpervised by <NAME> aka Linko (<EMAIL>)",
"version": (0, 1),
"blender": (2, 79),
"api": 39347,
"location": "3D View > Object Mode > Tools > AssetGen",
"description": "Game development tools",
"warning": "Beta",
"tracker_url": "",
"category": "Object"
}
if "bpy" in locals():
import imp
imp.reload(GA_user_interface)
imp.reload(GM_user_interface)
imp.reload(GT_user_interface)
imp.reload(GA_material)
imp.reload(GA_shader)
imp.reload(GA_composite)
imp.reload(GM_def)
imp.reload(GA)
else:
from . import GA_user_interface
from . import GM_user_interface
from . import GT_user_interface
from . import GA_material
from . import GA_shader
from . import GA_composite
from . import GM_def
from . import GA
import bpy
from .GA_shader import DEF_surface_add
def def_curvature_Update(self, context):
if self.T_curvature == True:
self.T_normal = True
def def_normal_Update(self, context):
if self.T_normal == False:
self.T_curvature = False
def def_surface(self, context):
DEF_surface_add(context)
# Settings
class GA_Property(bpy.types.PropertyGroup):
gui_active_panel = bpy.props.StringProperty(
name="gui_active_panel",
default="None"
)
#GAME ASSET
############
D_textureX = bpy.props.EnumProperty(
items=[('256', '256', '256 resolution'),
('512', '512', '512 resolution'),
('1K', '1K', '1k resolution'),
('2K', '2K', '2k resolution'),
('4K', '4K', '3k resolution')],
#update=def_Texture_Update,
description="Choose texture resolution X",
default='1K'
)
D_textureY = bpy.props.EnumProperty(
items=[('256', '256', '256 resolution'),
('512', '512', '512 resolution'),
('1K', '1K', '1k resolution'),
('2K', '2K', '2k resolution'),
('4K', '4K', '3k resolution')],
#update=def_Texture_Update,
description="Choose texture resolution Y",
default='1K'
)
D_selected_to_active= bpy.props.BoolProperty(
name = 'Selected to active',
description = "Use this if you have a low poly already. Your low poly must be the active selection",
default = False
)
D_name = bpy.props.StringProperty(
name="Name",
description = "Prefix name of your game asset and textures",
default="game_asset"
)
D_LOD0 = bpy.props.IntProperty(
name = 'LOD0',
description = "Maximum polycount of the LOD0 (closest to the camera). AssetGen will reduce this polycount if it detects unacessary edges",
default = 1000,
min = 1,
max = 100000
)
D_LOD1 = bpy.props.IntProperty(
name = 'LOD1',
description = "Maximum polycount of the LOD1 (often 50 % of the LOD0)",
default = 0,
min = 0,
max = 100000
)
D_LOD2 = bpy.props.IntProperty(
name = 'LOD2',
description = "Maximum polycount of the LOD2 (often 25 % of the LOD0)",
default = 0,
min = 0,
max = 100000
)
D_LOD3 = bpy.props.IntProperty(
name = 'LOD3',
description = "Maximum polycount of the LOD3 (often 12.5 % of the LOD0)",
default = 0,
min = 0,
max = 100000
)
D_cage_size = bpy.props.FloatProperty(
name = 'Cage size',
description = "Size (inflate) of the low poly during the baking to avoid intersecting with the high poly",
default = 0.03,
min = 0.00,
max = 1.00
)
D_edge_padding = bpy.props.FloatProperty(
name = 'Edge padding',
description = "Number of pixels that goes above your UV seams",
default = 16,
min = 0.00,
max = 100.00
)
D_uv_margin = bpy.props.FloatProperty(
name = 'UV margin',
description = "Space between UVs islands",
default = 0.01,
min = 0.000,
max = 1.000
)
D_uv_angle = bpy.props.FloatProperty(
name = 'UV angle',
description = "Define at which angle from the world space a seam must be added. Lower value = more chunks and less perfs, higher = potential overlapping and lose in Texel density.",
default = 45,
min = 0,
max = 89
)
D_samples = bpy.props.IntProperty(
name = 'Samples',
description = "Quality of the ambient occlusion, normal map and bent map. Other textures use 1 sample",
default = 32,
min = 0,
max = 100000
)
DT_exp = bpy.props.EnumProperty(
items=[('FBX', 'FBX', 'FBX'),
('OBJ', 'OBJ', 'OBJ')],
description="Choose export file",
default='OBJ'
)
DT_pathobj = bpy.props.StringProperty(
name="Mesh path",
description = "Choose where to save your file",
default='',
subtype = 'DIR_PATH'
)
DT_pathntoc = bpy.props.StringProperty(
name="Nor path",
description = "Open a Tangent Space Normal map",
default='',
subtype = 'FILE_PATH'
)
DT_exportcenterpos = bpy.props.BoolProperty(
name = 'Center position',
description = "Center position",
default = True
)
DT_exporttexture = bpy.props.BoolProperty(
name = 'Textures in a subfolder',
description = "Texture in subfolder",
default = True
)
DT_exportpathtexture = bpy.props.StringProperty(
name="Textures",
description = "Texture in subfolder.",
default='//Textures',
subtype = 'DIR_PATH'
)
D_create_envelop = bpy.props.BoolProperty(
name = 'Create envelop',
description = "Will try to remove every intersections of separated meshes. Can bug with smalls meshes or not intersecting enough",
default = True
)
D_groundAO = bpy.props.BoolProperty(
name = 'Ground AO',
description = "Generate ambient occlusion from the grid, ideal for static meshes that stand on the ground",
default = False
)
D_removeunderground = bpy.props.BoolProperty(
name = 'Remove underground',
description = "Remove the invisible part of the mesh that intersect with the ground (grid) to avoid waste of polygons and texture space",
default = False
)
D_unfoldhalf = bpy.props.BoolProperty(
name = 'Unfold Half',
description = "Unfold half of your asset in +X, ideal for symmetrical assets to save texture space",
default = False
)
##################################
T_mask = bpy.props.BoolProperty(
name = 'Mask',
description = "The mask map will bake the colors of your asset and use it as a mask to apply different gradients with the Game Asset Material tool",
default = True
)
T_albedo = bpy.props.BoolProperty(
name = 'Albedo',
description = "Bakes the color of the high poly ideal for scanned assets otherwise it iss recommended to do the albedo after the low poly",
default = False
)
T_normal = bpy.props.BoolProperty(
name = 'Normal',
description = "Will keep every details of your hight poly",
update=def_normal_Update,
default = True
)
T_ao = bpy.props.BoolProperty(
name = 'Ambient Occlusion',
description = "Generates shadows on parts close to each others, works on any lighting conditions",
default = True
)
T_ao_denoising = bpy.props.BoolProperty(
name = 'Activate denoising',
description = "Will blur the AO map and use the normal map to detect the borders that must stay untouched. You must increase the UV margin to avoid artifacts",
default = False
)
T_ao_colorsigma = bpy.props.FloatProperty(
name = 'Color Sigma',
description = "",
default = 3,
min = 0,
max = 3
)
T_ao_spacesigma = bpy.props.FloatProperty(
name = 'Space Sigma',
description = "",
default = 0.01,
min = 0,
max = 30
)
T_pointiness = bpy.props.BoolProperty(
name = 'Pointiness',
description = "Generates a vertex based curvature map (quality depends on the polycount of the high poly)",
default = True
)
T_bent = bpy.props.BoolProperty(
name = 'Bent',
description = "Bake the orientation of the faces from the world space. It is used to create effects (dust, snow, etc) and fake top lighting in non-PBR games",
default = True
)
T_gradient = bpy.props.BoolProperty(
name = 'Gradient',
description = "Bake a dark bottom and white top of your object. This method is often used in stylized games especially view from top",
default = False
)
T_opacity = bpy.props.BoolProperty(
name = 'Opacity',
description = "Bake in white non opaque part and black parts that needs to be transparent. Most game engines can read the opacity directly on the Albedo",
default = False
)
T_emissive = bpy.props.BoolProperty(
name = 'Emissive',
description = "Emissive",
default = False
)
T_curvature = bpy.props.BoolProperty(
name = 'Generate Curvature',
description = "This effect will composite the normal map to generate a greyscale with convex shapes in white and concave in dark",
update=def_curvature_Update,
default = False
)
T_curvature_pixelwidth = bpy.props.IntProperty(
name = 'Pixel width',
description = "The width in pixel of the white lines generated from concave shapes and black from convex shapes. Increasing this value can be useful to bake a sprite from an high poly to a plane",
default = 1,
min = 1,
max = 8
)
T_curvature_shadows = bpy.props.BoolProperty(
name = 'Shadows',
description = "",
default = False
)
T_curvature_blur = bpy.props.FloatProperty(
name = 'Blur',
description = "Amount of relative blur to avoid getting aliasing and/or lines too contrasted",
default = 0,
min = 0.000,
max = 1.000
)
T_surface_noise = bpy.props.BoolProperty(
name = 'Noise',
description = "",
update=def_surface,
default = False
)
T_surface_rock = bpy.props.BoolProperty(
name = 'Rock',
description = "",
update=def_surface,
default = False
)
T_surface_sand_waves = bpy.props.BoolProperty(
name = 'Sand waves',
description = "",
update=def_surface,
default = False
)
| |
<filename>dipy/tracking/streamline.py<gh_stars>1-10
from copy import deepcopy
from warnings import warn
import types
from scipy.spatial.distance import cdist
import numpy as np
from nibabel.affines import apply_affine
from nibabel.streamlines import ArraySequence as Streamlines
from dipy.tracking.streamlinespeed import set_number_of_points
from dipy.tracking.streamlinespeed import length
from dipy.tracking.streamlinespeed import compress_streamlines
import dipy.tracking.utils as ut
from dipy.tracking.utils import streamline_near_roi
from dipy.core.geometry import dist_to_corner
import dipy.align.vector_fields as vfu
def unlist_streamlines(streamlines):
""" Return the streamlines not as a list but as an array and an offset
Parameters
----------
streamlines: sequence
Returns
-------
points : array
offsets : array
"""
points = np.concatenate(streamlines, axis=0)
offsets = np.zeros(len(streamlines), dtype='i8')
curr_pos = 0
prev_pos = 0
for (i, s) in enumerate(streamlines):
prev_pos = curr_pos
curr_pos += s.shape[0]
points[prev_pos:curr_pos] = s
offsets[i] = curr_pos
return points, offsets
def relist_streamlines(points, offsets):
""" Given a representation of a set of streamlines as a large array and
an offsets array return the streamlines as a list of shorter arrays.
Parameters
-----------
points : array
offsets : array
Returns
-------
streamlines: sequence
"""
streamlines = []
streamlines.append(points[0: offsets[0]])
for i in range(len(offsets) - 1):
streamlines.append(points[offsets[i]: offsets[i + 1]])
return streamlines
def center_streamlines(streamlines):
""" Move streamlines to the origin
Parameters
----------
streamlines : list
List of 2D ndarrays of shape[-1]==3
Returns
-------
new_streamlines : list
List of 2D ndarrays of shape[-1]==3
inv_shift : ndarray
Translation in x,y,z to go back in the initial position
"""
center = np.mean(np.concatenate(streamlines, axis=0), axis=0)
return [s - center for s in streamlines], center
def transform_streamlines(streamlines, mat):
""" Apply affine transformation to streamlines
Parameters
----------
streamlines : list
List of 2D ndarrays of shape[-1]==3
mat : array, (4, 4)
transformation matrix
Returns
-------
new_streamlines : list
List of the transformed 2D ndarrays of shape[-1]==3
"""
return [apply_affine(mat, s) for s in streamlines]
def select_random_set_of_streamlines(streamlines, select):
""" Select a random set of streamlines
Parameters
----------
streamlines : list
List of 2D ndarrays of shape[-1]==3
select : int
Number of streamlines to select. If there are less streamlines
than ``select`` then ``select=len(streamlines)``.
Returns
-------
selected_streamlines : list
Notes
-----
The same streamline will not be selected twice.
"""
len_s = len(streamlines)
index = np.random.choice(len_s, min(select, len_s), replace=False)
return [streamlines[i] for i in index]
def select_by_rois(streamlines, rois, include, mode=None, affine=None,
tol=None):
"""Select streamlines based on logical relations with several regions of
interest (ROIs). For example, select streamlines that pass near ROI1,
but only if they do not pass near ROI2.
Parameters
----------
streamlines : list
A list of candidate streamlines for selection
rois : list or ndarray
A list of 3D arrays, each with shape (x, y, z) corresponding to the
shape of the brain volume, or a 4D array with shape (n_rois, x, y,
z). Non-zeros in each volume are considered to be within the region
include : array or list
A list or 1D array of boolean values marking inclusion or exclusion
criteria. If a streamline is near any of the inclusion ROIs, it
should evaluate to True, unless it is also near any of the exclusion
ROIs.
mode : string, optional
One of {"any", "all", "either_end", "both_end"}, where a streamline is
associated with an ROI if:
"any" : any point is within tol from ROI. Default.
"all" : all points are within tol from ROI.
"either_end" : either of the end-points is within tol from ROI
"both_end" : both end points are within tol from ROI.
affine : ndarray
Affine transformation from voxels to streamlines. Default: identity.
tol : float
Distance (in the units of the streamlines, usually mm). If any
coordinate in the streamline is within this distance from the center
of any voxel in the ROI, the filtering criterion is set to True for
this streamline, otherwise False. Defaults to the distance between
the center of each voxel and the corner of the voxel.
Notes
-----
The only operation currently possible is "(A or B or ...) and not (X or Y
or ...)", where A, B are inclusion regions and X, Y are exclusion regions.
Returns
-------
generator
Generates the streamlines to be included based on these criteria.
See also
--------
:func:`dipy.tracking.utils.near_roi`
:func:`dipy.tracking.utils.reduce_rois`
Examples
--------
>>> streamlines = [np.array([[0, 0., 0.9],
... [1.9, 0., 0.]]),
... np.array([[0., 0., 0],
... [0, 1., 1.],
... [0, 2., 2.]]),
... np.array([[2, 2, 2],
... [3, 3, 3]])]
>>> mask1 = np.zeros((4, 4, 4), dtype=bool)
>>> mask2 = np.zeros_like(mask1)
>>> mask1[0, 0, 0] = True
>>> mask2[1, 0, 0] = True
>>> selection = select_by_rois(streamlines, [mask1, mask2],
... [True, True],
... tol=1)
>>> list(selection) # The result is a generator
[array([[ 0. , 0. , 0.9],
[ 1.9, 0. , 0. ]]), array([[ 0., 0., 0.],
[ 0., 1., 1.],
[ 0., 2., 2.]])]
>>> selection = select_by_rois(streamlines, [mask1, mask2],
... [True, False],
... tol=0.87)
>>> list(selection)
[array([[ 0., 0., 0.],
[ 0., 1., 1.],
[ 0., 2., 2.]])]
>>> selection = select_by_rois(streamlines, [mask1, mask2],
... [True, True],
... mode="both_end",
... tol=1.0)
>>> list(selection)
[array([[ 0. , 0. , 0.9],
[ 1.9, 0. , 0. ]])]
>>> mask2[0, 2, 2] = True
>>> selection = select_by_rois(streamlines, [mask1, mask2],
... [True, True],
... mode="both_end",
... tol=1.0)
>>> list(selection)
[array([[ 0. , 0. , 0.9],
[ 1.9, 0. , 0. ]]), array([[ 0., 0., 0.],
[ 0., 1., 1.],
[ 0., 2., 2.]])]
"""
if affine is None:
affine = np.eye(4)
# This calculates the maximal distance to a corner of the voxel:
dtc = dist_to_corner(affine)
if tol is None:
tol = dtc
elif tol < dtc:
w_s = "Tolerance input provided would create gaps in your"
w_s += " inclusion ROI. Setting to: %s" % dist_to_corner
warn(w_s)
tol = dtc
include_roi, exclude_roi = ut.reduce_rois(rois, include)
include_roi_coords = np.array(np.where(include_roi)).T
x_include_roi_coords = apply_affine(affine, include_roi_coords)
exclude_roi_coords = np.array(np.where(exclude_roi)).T
x_exclude_roi_coords = apply_affine(affine, exclude_roi_coords)
if mode is None:
mode = "any"
for sl in streamlines:
include = streamline_near_roi(sl, x_include_roi_coords, tol=tol,
mode=mode)
exclude = streamline_near_roi(sl, x_exclude_roi_coords, tol=tol,
mode=mode)
if include & ~exclude:
yield sl
def _orient_generator(out, roi1, roi2):
"""
Helper function to `orient_by_rois`
Performs the inner loop separately. This is needed, because functions with
`yield` always return a generator
"""
for idx, sl in enumerate(out):
dist1 = cdist(sl, roi1, 'euclidean')
dist2 = cdist(sl, roi2, 'euclidean')
min1 = np.argmin(dist1, 0)
min2 = np.argmin(dist2, 0)
if min1[0] > min2[0]:
yield sl[::-1]
else:
yield sl
def _orient_list(out, roi1, roi2):
"""
Helper function to `orient_by_rois`
Performs the inner loop separately. This is needed, because functions with
`yield` always return a generator.
Flips the streamlines in place (as needed) and returns a reference to the
updated list.
"""
for idx, sl in enumerate(out):
dist1 = cdist(sl, roi1, 'euclidean')
dist2 = cdist(sl, roi2, 'euclidean')
min1 = np.argmin(dist1, 0)
min2 = np.argmin(dist2, 0)
if min1[0] > min2[0]:
out[idx] = sl[::-1]
return out
def orient_by_rois(streamlines, roi1, roi2, in_place=False,
as_generator=False, affine=None):
"""Orient a set of streamlines according to a pair of ROIs
Parameters
----------
streamlines : list or generator
List or generator of 2d arrays of 3d coordinates. Each array contains
the xyz coordinates of a single streamline.
roi1, roi2 : ndarray
Binary masks designating the location of the regions of interest, or
coordinate arrays (n-by-3 array with ROI coordinate in each row).
in_place : bool
Whether to make the change in-place in the original list
(and return a reference to the list), or to make a copy of the list
and return this copy, with the relevant streamlines reoriented.
Default: False.
as_generator : bool
Whether to return a generator as output. Default: False
affine : ndarray
Affine transformation from voxels to streamlines. Default: identity.
Returns
-------
streamlines : list or generator
The same 3D arrays as a list or generator, but reoriented with respect
to the ROIs
Examples
--------
>>> streamlines = [np.array([[0, 0., 0],
... [1, 0., 0.],
... [2, 0., 0.]]),
... np.array([[2, 0., 0.],
... [1, 0., 0],
... [0, 0, 0.]])]
>>> roi1 = np.zeros((4, 4, 4), dtype=bool)
>>> roi2 = np.zeros_like(roi1)
>>> roi1[0, 0, 0] = True
>>> roi2[1, | |
<gh_stars>1-10
import tkinter
from tkinter import ttk
import cv2
from PIL import Image, ImageTk
import socket
import json
from vidgear.gears import NetGear
import threading
import os
import webbrowser
HOST = '10.0.0.8'
PORT = 6005
class Menubar(ttk.Frame):
def __init__(self, parent):
"""
Constructor:
Initializes the menubar for the GUI
Calls the setup (init_menubar) for the rest of the menubar.
Args:
parent: Root window for the MenuBar.
Returns:
None.
"""
ttk.Frame.__init__(self, parent)
self.root = parent
self.init_menubar()
def display_help(self):
"""
Displays the help document (How to use the GUI).
Args:
None.
Returns:
None.
"""
webbrowser.open("Help_Document.txt")
pass
def display_about(self):
"""
Displays info about program (purpose of the system).
Args:
None
Returns:
None.
"""
pass
def init_menubar(self):
"""
Creates the menubar (attached to root) and adds the functionaly for the menubar.
Args:
None.
Returns:
None.
"""
self.menubar = tkinter.Menu(self.root)
self.menu_help = tkinter.Menu(self.menubar) #Creates a "Help" menu
self.menu_help.add_command(label='Help', command=self.display_help)
self.menu_help.add_command(label='About', command=self.display_about)
self.menubar.add_cascade(menu=self.menu_help, label='Info')
self.root.config(menu=self.menubar)
class GUI(ttk.Frame):
def __init__(self, parent):
"""
Constructor:
Initializes the GUI with the parmeters set by the user.
Calls the setup (init_gui) for the rest of the GUI.
Args:
parent: Root window for the Main Window.
Returns:
None.
"""
ttk.Frame.__init__(self, parent)
self.root = parent
self.systemStatus = "Offline"
self.init_gui()
def init_gui(self):
"""
Setups all the parameters (and their values) needed by the GUI.
Includes window size, components, background, and more.
Args:
None
Returns:
None
"""
self.root.title('Arm Solutions')
self.root.geometry("1200x800")
self.background_image = Image.open("Flat_colorful_background.png")
self.background_image = self.background_image.resize((2000,2000), Image.ANTIALIAS)
self.background_image = ImageTk.PhotoImage(self.background_image)
self.label = tkinter.Label(self.root, image=self.background_image)
self.label.image = self.background_image
self.label.place(x=0,y=0, relwidth=1, relheight=1)
self.root.grid_columnconfigure(1, weight=1)
self.root.grid_columnconfigure(0, weight=1)
self.root.option_add('*tearOff', 'FALSE') # Disables ability to tear menu bar into own window
#
self.systemStatusLabelText = tkinter.StringVar()
self.systemStatusLabel = tkinter.Label(textvariable= self.systemStatusLabelText , bg = '#e81a1a', width = 25)
self.startup_button = tkinter.Button(self.root, text ="Start Up/Resume System", command = self.startSystem, height=3, width= 35, bg = '#499c5f')
self.pause_button = tkinter.Button(self.root, text ="Put System in Standby", command = self.pauseSystem, height=3, width= 35, bg ='#f9ff54')
self.settings_button = tkinter.Button(self.root, text ="System Settings", command = self.systemSettings, height=3, width= 35, bg = '#adaaaa')
self.exit_button = tkinter.Button(self.root, text ="Shut Down System", command = self.on_exit, height=3, width= 35, bg = '#e81a1a')
self.imageFrame = tkinter.Frame(self.root)
self.imageFrame.grid(row=0, column=1, padx=10, pady=50, rowspan=4)
self.startup_button.grid(row=0,column=0, padx=50, pady=(60,100))
self.pause_button.grid(row=1,column=0, padx=50, pady=(0,100))
self.settings_button.grid(row=2, column=0, padx=50, pady=(0,100))
self.exit_button.grid(row=3, column=0, padx=50)
self.systemStatusLabel.grid(row=3, column=1, pady=10, padx=240)
self.systemStatusLabel.config(font=("New Times Roman", 20))
self.startup_button.config(font=("New Times Roman", 16))
self.pause_button.config(font=("New Times Roman", 16))
self.settings_button.config(font=("New Times Roman", 16))
self.exit_button.config(font=("New Times Roman", 16))
# Menu Bar
self.menubar = Menubar(self.root)
# Padding
for child in self.winfo_children():
child.grid_configure(padx=10, pady=5)
self.lmain = tkinter.Label(self.imageFrame)
#Variables used later on
self.speed = tkinter.DoubleVar()
self.systemStatusLabelText.set("System Status - Offline")
def sendPlaceLocation(self):
"""
Sends updated drop (place) location of parcel to the server set by the user.
Args:
self.place_position (string): Drop location of parcel set by user.
Returns:
None
"""
try:
data = self.place_position_data.get()
self.place_position_data.set("")
data = data.split()
if(len(data) == 6):
if(self.systemStatus == "Online"):
print()
jsonData = {"first": "Client 2", "second": "Drop Location", "third": data[0], "fourth": data[1], "fifth": data[2], "sixth": data[3], "seventh": data[4], "eight": data[5] }
self.send(jsonData)
self.placeLocationStatus.set("Parcel Drop Location Updated")
else:
self.placeLocationStatus.set("System status: " + self.systemStatus)
else:
self.placeLocationStatus.set("Incorrect format")
except Exception as error:
print(error)
self.placeLocationStatus.set("Error")
#Work the styling for settings window
def systemSettings(self):
"""
Settings window for the User Interface.
Allows the user to edit multiple aspects of the system. Including velocity, drop location, and more.
Args:
None
Returns:
None.
"""
self.settingsWindow = tkinter.Toplevel(self.root)
self.settingsWindow.title("Settings")
self.settingsWindow.geometry("400x400")
self.velocity_scale = tkinter.Scale(self.settingsWindow, variable = self.speed, from_ = 1, to = 100, orient= tkinter.HORIZONTAL, length = 250, width=30)
self.display_velocity = tkinter.Label(self.settingsWindow)
self.set_velocity = tkinter.Button(self.settingsWindow, text ="Set Velocity",
command = self.setVelocity)
self.eco_button = tkinter.Button(self.settingsWindow, text = "ECO Mode", command = self.ecoMode)
self.place_position_data = tkinter.StringVar()
self.placeLocationStatus = tkinter.StringVar()
self.placeLocationStatus.set("")
# creating a label for
# name using widget Label
place_position_label = tkinter.Label(self.settingsWindow, text = 'Parcel Drop Location', font=('calibre',10, 'bold'))
# creating a entry for input
# name using widget Entry
place_position_entry = tkinter.Entry(self.settingsWindow, textvariable = self.place_position_data, font=('calibre',10,'normal'))
# creating a button using the widget
# Button that will call the submit function
sub_btn=tkinter.Button(self.settingsWindow, text = 'Update Location', command = self.sendPlaceLocation)
notify_label = tkinter.Label(self.settingsWindow, textvariable = self.placeLocationStatus, font=('calibre',10, 'bold'))
self.eco_button.pack()
self.velocity_scale.pack(anchor = tkinter.CENTER)
self.set_velocity.pack()
self.display_velocity.pack()
place_position_label.pack()
place_position_entry.pack()
sub_btn.pack()
notify_label.pack()
# # function for video streaming
#Used for testing, change to network stream for final version
def video_stream(self):
"""
Creates a NetGear socket (TCP for network protocol) to handle video stream from server.
Args:
self.systemStatus (string): Used to determine if the system needs to shutdown or go to standby.
Returns:
None
"""
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
# define tweak flags
options = {"compression_format": ".jpg", "compression_param": cv2.IMREAD_COLOR}
# Define Netgear Client at given IP address and define parameters
# !!! change following IP address '192.168.x.xxx' with yours !!!
self.NetGearclient = NetGear(
address=IPAddr,
port="5454",
protocol="tcp",
pattern=0,
receive_mode=True,
**options
)
self.lmain.grid(row=0, column=1, rowspan=3)
# loop over
while True:
if(self.systemStatus == "Online"):
# receive frames from network
frame = self.NetGearclient.recv()
# check for received frame if Nonetype
if frame is None:
break
thread = threading.Thread(target = self.updateVideoWindow, args = (frame, ))
thread.start()
def updateVideoWindow(self, frame):
"""
Function is called by video_stream() to update the frame (image) on the GUI.
Runs in a thread to avoid blocking of new frame (update frame as fast as network transmits).
Args:
frame (OpenCV frame): image recieved by the server.
self.lmain (tkinter Label): Label used to display image on the GUI
Returns:
None
"""
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.lmain.imgtk = imgtk
self.lmain.configure(image=imgtk)
#Modify function to return data decoded
#Not sure if this function is really needed, used mostly for testing purposes
# def receive(self):
# jsonReceived = self.conn.recv(1024)
# jsonReceived = json.loads(jsonReceived.decode("utf-8"))
# print(jsonReceived["first"])
def send(self, data):
"""
Converts data (json dict.) into a string and transmits to connected server socket.
Args:
self.conn (socket): Uses socket connection (conn) defined in creation of class.
data (dictionary): Data to be sent over network in the form of json.
Returns:
None
"""
jsonResult = json.dumps(data)
self.conn.send(bytes(jsonResult, encoding="utf-8"))
def on_exit(self):
"""
Sends shutdown notification over the network to rest of the system (connected to button event).
Shuts down video stream (NetGear) and the normal socket.
Finally exits program
Args:
self.conn (socket): Uses socket connection (conn) defined in creation of class
self.NetGearclient (NetGearSocket): Uses NetGearSocket defined in video_stream()
Returns:
None
"""
try:
jsonResult = {"first":"Client 2", "second":"Shut Down System"}
self.send(jsonResult)
except:
print("Error sending close command to server")
try:
self.root.destroy()
self.NetGearclient.close()
self.conn.shutdown(socket.SHUT_RDWR)
self.conn.close()
except:
print("Error closing sockets")
finally:
os._exit(1)
def setVelocity(self):
"""
Transmits new velocity (set by user) over the network to the arm.
Args:
self.display_velocity (Tkinter Label): Updates display_velocity to notify user velocity has been updated
Returns:
None
"""
try:
jsonResult = {"first":"Client 2", "second":"Set Velocity", "third": str(self.speed.get())}
self.send(jsonResult)
sel = "New Velocity = " + str(self.speed.get())
self.display_velocity.config(text = sel, font =("New Times Roman", 14))
except Exception as e:
print(e)
#This is mostly a joke
#However, it is possible to save alot of energy (up to 21% according to some research papers).
#It seems as torque increases, running at about half betwen the torque-rpm curve is best
#It seems the best thing to do is run at 80 to 90% speed with no payload, and reduce as payload increases (torque increases).
#Can reduce velocity by about 20% with payload vs no payload
#The e-series robots can measure torque change.
#No way of knowing power savings (if any) without testing.
def ecoMode(self):
"""
Transmits new velocity (ECO) over the network to the arm.
Args:
self.display_velocity (Tkinter Label): Updates display_velocity to notify user velocity has been updated
Returns:
None
"""
try:
| |
range(2):
brick_ = brick.Brick()
brick_.set_position([-1, i * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([7, i * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 10, j * 2, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 22, j * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, j * 2 + 1, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([8, i * 4 + 1, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 14, j * 4 + 1, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 19, 3, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([11, i * 2 + 1, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 6 + 9, j * 4 + 1, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(4):
brick_ = brick.Brick()
brick_.set_position([12, i * 2, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([24, i * 4 + 1, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(3):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 10, j * 4 + 1, 8])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 4, j * 6, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 16, j * 6, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([6, j * 4 + 1, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([18, j * 4 + 1, 2])
brick_.set_direction(0)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([6, j * 8 - 1, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([6, j * 4 + 1, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([18, j * 8 - 1, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([18, j * 4 + 1, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([0, j * 4 + 1, 2])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([3, 3, 2])
brick_.set_direction(1)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def car_default_1():
bricks_ = bricks.Bricks(200)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 3 + (i % 2 == 0)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 6, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(9):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 2, j * 6, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([19, i * 4 + 1, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(9):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([18 - 2 * i, j * 2 + 2, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([1, 3, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4, i * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([6, i * 6, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([8, i * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 11, j * 2, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 6, j * 2, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([6, i * 4 + 1, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 9, j * 2, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 8, j * 2, 8])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
for j in range(4):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 1, j * 2, 2])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4, i * 6, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([15, i * 6, 1])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4, i * 4 + 1, 0])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([15, i * 4 + 1, 0])
brick_.set_direction(0)
list_brick_.append(brick_)
for brick_ in list_brick_:
bricks_.add(brick_)
return bricks_
def car_default_2():
bricks_ = bricks.Bricks(200)
list_brick_ = []
brick_ = brick.Brick()
brick_.set_position([0, 0, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(3):
brick_ = brick.Brick()
brick_.set_position([-1, i * 2 + 1, 3 + (i % 2 == 1)])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([0, 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([0, i * 4 + 1, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([3, i * 6, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(5):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 4, j * 6, 3 + (i % 2 == 0)])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([1, i * 4 + 1, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([3, i * 6, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([13, i * 6, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([4, i * 4 + 1, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([6, i * 4 + 1, j + 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 9, j * 6, 5])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
for j in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 9, j * 6, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([15, i * 4 + 1, 4])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([16, i * 6, 3])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([19, i * 6, 4])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([19, i * 4 + 1, 3])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([4, 3, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([6, 2, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 7, 5, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 4 + 9, 1, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([14, 4, 7])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([15, 3, 6])
brick_.set_direction(0)
list_brick_.append(brick_)
brick_ = brick.Brick()
brick_.set_position([17, 3, 7])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([18, i * 2 + 2, 6])
brick_.set_direction(1)
list_brick_.append(brick_)
for i in range(2):
brick_ = brick.Brick()
brick_.set_position([i * 2 + 15, 3, 5])
brick_.set_direction(0)
list_brick_.append(brick_)
for i in range(2):
| |
progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
:type progressCallback: callable
:returns: the file that was created.
"""
if filename is None:
filename = filepath
filename = os.path.basename(filename)
filepath = os.path.abspath(filepath)
filesize = os.path.getsize(filepath)
if mimeType is None:
# Attempt to guess MIME type if not passed explicitly
mimeType, _ = mimetypes.guess_type(filepath)
with open(filepath, 'rb') as f:
return self.uploadStreamToFolder(folderId, f, filename, filesize, reference, mimeType,
progressCallback)
def _uploadContents(self, uploadObj, stream, size, progressCallback=None):
"""
Uploads contents of a file.
:param uploadObj: The upload object contain the upload id.
:type uploadObj: dict
:param stream: Readable stream object.
:type stream: file-like
:param size: The length of the file. This must be exactly equal to the
total number of bytes that will be read from ``stream``, otherwise
the upload will fail.
:type size: str
:param progressCallback: If passed, will be called after each chunk
with progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
:type progressCallback: callable
"""
offset = 0
uploadId = uploadObj['_id']
with self.progressReporterCls(label=uploadObj.get('name', ''), length=size) as reporter:
while True:
chunk = stream.read(min(self.MAX_CHUNK_SIZE, (size - offset)))
if not chunk:
break
if isinstance(chunk, str):
chunk = chunk.encode('utf8')
uploadObj = self.post(
'file/chunk?offset=%d&uploadId=%s' % (offset, uploadId),
data=_ProgressBytesIO(chunk, reporter=reporter))
if '_id' not in uploadObj:
raise Exception(
'After uploading a file chunk, did not receive object with _id. '
'Got instead: ' + json.dumps(uploadObj))
offset += len(chunk)
if callable(progressCallback):
progressCallback({
'current': offset,
'total': size
})
if offset != size:
self.delete('file/upload/' + uploadId)
raise IncorrectUploadLengthError(
'Expected upload to be %d bytes, but received %d.' % (size, offset),
upload=uploadObj)
return uploadObj
def uploadFile(self, parentId, stream, name, size, parentType='item',
progressCallback=None, reference=None, mimeType=None):
"""
Uploads a file into an item or folder.
:param parentId: The ID of the folder or item to upload into.
:type parentId: str
:param stream: Readable stream object.
:type stream: file-like
:param name: The name of the file to create.
:type name: str
:param size: The length of the file. This must be exactly equal to the
total number of bytes that will be read from ``stream``, otherwise
the upload will fail.
:type size: str
:param parentType: 'item' or 'folder'.
:type parentType: str
:param progressCallback: If passed, will be called after each chunk
with progress information. It passes a single positional argument
to the callable which is a dict of information about progress.
:type progressCallback: callable
:param reference: optional reference to send along with the upload.
:type reference: str
:param mimeType: MIME type to set on the file. Attempts to guess if not
explicitly passed.
:type mimeType: str or None
:returns: The file that was created on the server.
"""
params = {
'parentType': parentType,
'parentId': parentId,
'name': name,
'size': size,
'mimeType': mimeType or mimetypes.guess_type(name)[0]
}
if reference is not None:
params['reference'] = reference
obj = self.post('file', params)
if '_id' not in obj:
raise Exception(
'After creating an upload token for a new file, expected '
'an object with an id. Got instead: ' + json.dumps(obj))
return self._uploadContents(obj, stream, size, progressCallback=progressCallback)
def uploadFileContents(self, fileId, stream, size, reference=None):
"""
Uploads the contents of an existing file.
:param fileId: ID of file to update
:param stream: Readable stream object.
:type stream: file-like
:param size: The length of the file. This must be exactly equal to the
total number of bytes that will be read from ``stream``, otherwise
the upload will fail.
:type size: str
:param reference: optional reference to send along with the upload.
:type reference: str
"""
path = 'file/%s/contents' % fileId
params = {
'size': size
}
if reference:
params['reference'] = reference
obj = self.put(path, params)
if '_id' not in obj:
raise Exception(
'After creating an upload token for replacing file '
'contents, expected an object with an id. Got instead: ' + json.dumps(obj))
return self._uploadContents(obj, stream, size)
def addMetadataToItem(self, itemId, metadata):
"""
Takes an item ID and a dictionary containing the metadata
:param itemId: ID of the item to set metadata on.
:param metadata: dictionary of metadata to set on item.
"""
path = 'item/' + itemId + '/metadata'
obj = self.put(path, json=metadata)
return obj
def addMetadataToFolder(self, folderId, metadata):
"""
Takes a folder ID and a dictionary containing the metadata
:param folderId: ID of the folder to set metadata on.
:param metadata: dictionary of metadata to set on folder.
"""
path = 'folder/' + folderId + '/metadata'
obj = self.put(path, json=metadata)
return obj
def addMetadataToCollection(self, collectionId, metadata):
"""
Takes a collection ID and a dictionary containing the metadata
:param collectionId: ID of the collection to set metadata on.
:param metadata: dictionary of metadata to set on collection.
"""
path = 'collection/' + collectionId + '/metadata'
obj = self.put(path, json=metadata)
return obj
def transformFilename(self, name):
"""
Sanitize a resource name from Girder into a name that is safe to use
as a filesystem path.
:param name: The name to transform.
:type name: str
"""
if name in ('.', '..'):
name = '_' + name
name = name.replace(os.path.sep, '_')
if os.path.altsep:
name = name.replace(os.path.altsep, '_')
return _safeNameRegex.sub('_', name)
def _copyFile(self, fp, path):
"""
Copy the `fp` file-like object to `path` which may be a filename string
or another file-like object to write to.
"""
if isinstance(path, str):
# Use "abspath" to cleanly get the parent of ".", without following symlinks otherwise
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
with open(path, 'wb') as dst:
shutil.copyfileobj(fp, dst)
else:
# assume `path` is a file-like object
shutil.copyfileobj(fp, path)
def _streamingFileDownload(self, fileId):
"""
Download a file streaming the contents
:param fileId: The ID of the Girder file to download.
:returns: The request
"""
path = 'file/%s/download' % fileId
return self.sendRestRequest('get', path, stream=True, jsonResp=False)
def downloadFile(self, fileId, path, created=None):
"""
Download a file to the given local path or file-like object.
:param fileId: The ID of the Girder file to download.
:param path: The path to write the file to, or a file-like object.
"""
fileObj = self.getFile(fileId)
created = created or fileObj['created']
cacheKey = '\n'.join([self.urlBase, fileId, created])
# see if file is in local cache
if self.cache is not None:
fp = self.cache.get(cacheKey, read=True)
if fp:
with fp:
self._copyFile(fp, path)
return
# download to a tempfile
progressFileName = fileId
if isinstance(path, str):
progressFileName = os.path.basename(path)
req = self._streamingFileDownload(fileId)
with tempfile.NamedTemporaryFile(delete=False) as tmp:
with self.progressReporterCls(
label=progressFileName,
length=int(req.headers.get('content-length', 0))) as reporter:
for chunk in req.iter_content(chunk_size=REQ_BUFFER_SIZE):
reporter.update(len(chunk))
tmp.write(chunk)
size = os.stat(tmp.name).st_size
if size != fileObj['size']:
os.remove(tmp.name)
raise IncompleteResponseError('File %s download' % fileId, fileObj['size'], size)
# save file in cache
if self.cache is not None:
with open(tmp.name, 'rb') as fp:
self.cache.set(cacheKey, fp, read=True)
if isinstance(path, str):
# we can just rename the tempfile
# Use "abspath" to cleanly get the parent of ".", without following symlinks otherwise
os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)
shutil.move(tmp.name, path)
else:
# write to file-like object
with open(tmp.name, 'rb') as fp:
shutil.copyfileobj(fp, path)
# delete the temp file
os.remove(tmp.name)
def downloadFileAsIterator(self, fileId, chunkSize=REQ_BUFFER_SIZE):
"""
Download a file streaming the contents as an iterator.
:param fileId: The ID of the Girder file to download.
:param chunkSize: The chunk size to download the contents in.
:returns: The request content iterator.
"""
req = self._streamingFileDownload(fileId)
return req.iter_content(chunk_size=chunkSize)
def downloadItem(self, itemId, dest, name=None):
"""
Download an item from Girder into a local folder. Each file in the
item will be placed into the directory specified by the dest parameter.
If the item contains multiple files or a single file with a different
name than the item, the item will be created as a directory under dest
and the files will become files within that directory.
:param itemId: The Id of the Girder item to download.
:param dest: The destination directory to write the item into.
:param name: If the item name is known in advance, you may pass it here
which will save a lookup to the server.
"""
if name is None:
item | |
method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_firmware_upgrade_item']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_firmware_upgrade_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_firmware_upgrade_item' is set
if ('cluster_firmware_upgrade_item' not in params) or (params['cluster_firmware_upgrade_item'] is None):
raise ValueError("Missing the required parameter `cluster_firmware_upgrade_item` when calling `create_cluster_firmware_upgrade_item`")
resource_path = '/platform/3/upgrade/cluster/firmware/upgrade'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cluster_firmware_upgrade_item' in params:
body_params = params['cluster_firmware_upgrade_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_cluster_patch_abort_item(self, cluster_patch_abort_item, **kwargs):
"""
Abort the previous action performed by the patch system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cluster_patch_abort_item(cluster_patch_abort_item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Empty cluster_patch_abort_item: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_patch_abort_item']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_patch_abort_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_patch_abort_item' is set
if ('cluster_patch_abort_item' not in params) or (params['cluster_patch_abort_item'] is None):
raise ValueError("Missing the required parameter `cluster_patch_abort_item` when calling `create_cluster_patch_abort_item`")
resource_path = '/platform/3/upgrade/cluster/patch/abort'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cluster_patch_abort_item' in params:
body_params = params['cluster_patch_abort_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_cluster_retry_last_action_item(self, cluster_retry_last_action_item, **kwargs):
"""
Retry the last upgrade action, in-case the previous attempt failed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cluster_retry_last_action_item(cluster_retry_last_action_item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ClusterRetryLastActionItem cluster_retry_last_action_item: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_retry_last_action_item']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_retry_last_action_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_retry_last_action_item' is set
if ('cluster_retry_last_action_item' not in params) or (params['cluster_retry_last_action_item'] is None):
raise ValueError("Missing the required parameter `cluster_retry_last_action_item` when calling `create_cluster_retry_last_action_item`")
resource_path = '/platform/3/upgrade/cluster/retry_last_action'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cluster_retry_last_action_item' in params:
body_params = params['cluster_retry_last_action_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_cluster_rollback_item(self, cluster_rollback_item, **kwargs):
"""
Rollback the upgrade of a cluster.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cluster_rollback_item(cluster_rollback_item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Empty cluster_rollback_item: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_rollback_item']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_rollback_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_rollback_item' is set
if ('cluster_rollback_item' not in params) or (params['cluster_rollback_item'] is None):
raise ValueError("Missing the required parameter `cluster_rollback_item` when calling `create_cluster_rollback_item`")
resource_path = '/platform/3/upgrade/cluster/rollback'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cluster_rollback_item' in params:
body_params = params['cluster_rollback_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def create_cluster_upgrade_item(self, cluster_upgrade_item, **kwargs):
"""
The settings necessary to start an upgrade.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cluster_upgrade_item(cluster_upgrade_item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ClusterUpgradeItem cluster_upgrade_item: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_upgrade_item']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cluster_upgrade_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_upgrade_item' is set
if ('cluster_upgrade_item' not in params) or (params['cluster_upgrade_item'] is None):
raise ValueError("Missing the required parameter `cluster_upgrade_item` when calling `create_cluster_upgrade_item`")
resource_path = '/platform/3/upgrade/cluster/upgrade'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cluster_upgrade_item' in params:
body_params = params['cluster_upgrade_item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_cluster_firmware_progress(self, **kwargs):
"""
Cluster wide firmware upgrade status info.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cluster_firmware_progress(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ClusterFirmwareProgress
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cluster_firmware_progress" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/3/upgrade/cluster/firmware/progress'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ClusterFirmwareProgress',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_cluster_firmware_status(self, **kwargs):
"""
The firmware status for the cluster.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cluster_firmware_status(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool devices: Show devices. If false, this returns an empty list. Default is false.
:param bool package: Show package. If false, this returns an empty list.Default is false.
:return: ClusterFirmwareStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['devices', 'package']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
| |
parent_id):
request = {'page': '1', 'pageSize': self.MAX_PAGE_SIZE, 'partialParameters': 'parent_id={}'.format(parent_id)}
result = requests.post(str(self.api_url) + self.FILTER_RUNS,
data=json.dumps(request), headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError(result.json()['message'])
return result.json()['payload']['elements']
def search_runs(self, parameters, status=None, run_id=None):
if not parameters:
raise RuntimeError("Parameters are required to search pipeline runs")
request = {'page': '1', 'pageSize': self.MAX_PAGE_SIZE, 'timezoneOffsetInMinutes': 0}
expression_params = []
for param in parameters:
expression_params.append(("parameter.{}".format(param[0]), param[1]))
if status is not None:
expression_params.append(("status", status))
if run_id is not None:
expression_params.append(("id", str(run_id)))
if len(expression_params) == 1:
parameter = expression_params[0]
request["filterExpression"] = self.parameter_json(parameter[0], parameter[1])
else:
expressions = []
parameter = expression_params.pop()
expressions.append(self.parameter_json(parameter[0], parameter[1]))
parameter = expression_params.pop()
expressions.append(self.parameter_json(parameter[0], parameter[1]))
current_expression = {"filterExpressionType": "AND", "expressions": expressions}
while len(expression_params) > 0:
parameter = expression_params.pop()
current_expression = {"filterExpressionType": "AND",
"expressions": [current_expression, self.parameter_json(parameter[0], parameter[1])]}
request["filterExpression"] = current_expression
result = requests.post(str(self.api_url) + self.SEARCH_RUNS_URL,
data=json.dumps(request), headers=self.header, verify=False)
result_json = result.json()
if hasattr(result_json, 'error'):
raise RuntimeError(result_json['error'])
if result_json['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError(result_json['message'])
if 'payload' not in result_json or 'elements' not in result_json['payload']:
return []
return result_json['payload']['elements']
def parameter_json(self, field, value):
return {"field": field, "value": value,
"filterExpressionType": "LOGICAL", "operand": "="}
def log_event(self, log_entry, log_file_name=None, omit_console=False):
log_entry.date = datetime.datetime.utcfromtimestamp(time.time()).strftime(DATE_FORMAT)
log_entry.date = log_entry.date[0:len(log_entry.date) - 3]
if log_file_name is None:
log_file_name = "{}.log".format(log_entry.taskName)
try:
log_text_formatted = "[{}]\t{}\t{}\n".format(log_entry.date, log_entry.status, log_entry.taskName)
if not omit_console:
print(log_entry.logText)
try:
if self.api_url:
requests.post(str(self.api_url) + self.LOG_URL.format(log_entry.runId),
data=log_entry.to_json(), headers=self.header, verify=False)
except Exception as api_e:
if not omit_console:
print("Failed to save logs to API, logs will be stored to text file")
try:
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
log_path = os.path.join(self.log_dir, log_file_name)
with open(log_path, "a") as log_file:
log_file.write(log_text_formatted)
log_file.write(log_entry.logText)
if not log_entry.logText.endswith("\n"):
log_file.write("\n")
except Exception as file_e:
if not omit_console:
print("Failed to save logs to file")
except Exception as e:
if not omit_console:
print("Failed to save task log: " + str(e.message))
def docker_registry_load_all(self):
try:
result = requests.get(str(self.api_url) + self.REGISTRY_LOAD_ALL_URL, headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to load docker registries. API response: {}'.format(result.json()['message']))
return result.json()['payload']['registries']
except Exception as e:
raise RuntimeError("Failed to load docker registries. \n {}".format(e))
def tool_group_in_registry_load_all(self, registry):
try:
result = requests.get(str(self.api_url) + self.TOOL_GROUP_IN_REGISTRY_LOAD_ALL_URL.format(registry), headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to load tool groups. API response: {}'.format(result.json()['message']))
return result.json()['payload']
except Exception as e:
raise RuntimeError("Failed to load tool groups. \n {}".format(e))
def tool_group_load(self, id):
try:
result = requests.get(str(self.api_url) + self.TOOL_GROUP_LOAD_URL.format(id), headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to load tool group. API response: {}'.format(result.json()['message']))
return result.json()['payload']
except Exception as e:
raise RuntimeError("Failed to load tool group. \n {}".format(e))
def update_status(self, run_id, status_entry):
status_entry.endDate = datetime.datetime.utcfromtimestamp(time.time()).strftime(DATE_FORMAT)
status_entry.endDate = status_entry.endDate[0:len(status_entry.endDate) - 3]
try:
requests.post(str(self.api_url) + self.STATUS_URL.format(run_id), data=status_entry.to_json(),
headers=self.header, verify=False)
except:
print("Failed to update task status.")
def update_commit_status(self, run_id, status):
try:
commit_status_json = json.dumps({"commitStatus": status})
result = requests.post(str(self.api_url) + self.COMMIT_STATUS_URL.format(run_id),
data=commit_status_json, headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed update commit run status. API response: {}'.format(result.json()['message']))
return result.json()['payload']
except Exception as e:
raise RuntimeError("Failed to update commit status. \n {}".format(e))
def load_pipeline(self, pipeline_id):
try:
result = requests.get(str(self.api_url) + self.LOAD_PIPELINE_URL.format(pipeline_id),
headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed fetch pipeline info. API response: {}'.format(result.json()['message']))
return result.json()['payload']
except Exception as e:
raise RuntimeError("Failed to fetch pipeline info. \n {}".format(e))
def load_all_pipelines(self):
try:
result = requests.get(str(self.api_url) + self.LOAD_ALL_PIPELINES_URL, headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed to fetch all pipelines info. API response: {}'.format(result.json()['message']))
return result.json()['payload']
except Exception as e:
raise RuntimeError("Failed to fetch all pipelines info. \n {}".format(e))
def find_pipeline(self, pipeline_name):
try:
result = requests.get(str(self.api_url) + self.FIND_PIPELINE_URL.format(pipeline_name),
headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed fetch pipeline info. API response: {}'.format(result.json()['message']))
return result.json()['payload']
except Exception as e:
raise RuntimeError("Failed to fetch pipeline info. \n {}".format(e))
def get_pipeline_clone_url(self, pipeline_id):
try:
result = requests.get(str(self.api_url) + self.CLONE_PIPELINE_URL.format(pipeline_id),
headers=self.header, verify=False)
if hasattr(result.json(), 'error') or result.json()['status'] != self.RESPONSE_STATUS_OK:
raise RuntimeError('Failed fetch pipeline info. API response: {}'.format(result.json()['message']))
return result.json()['payload']
except Exception as e:
raise RuntimeError("Failed to update commit status. \n {}".format(e))
def load_writable_storages(self):
try:
result = self.execute_request(str(self.api_url) + self.LOAD_WRITABLE_STORAGES)
if result is None:
return []
return [DataStorage.from_json(item) for item in result]
except Exception as e:
raise RuntimeError("Failed to load storages with READ and WRITE permissions. "
"Error message: {}".format(str(e.message)))
def load_available_storages(self):
try:
result = self.execute_request(str(self.api_url) + self.LOAD_AVAILABLE_STORAGES)
if result is None:
return []
return [DataStorage.from_json(item) for item in result]
except Exception as e:
raise RuntimeError("Failed to load storages with READ and WRITE permissions. "
"Error message: {}".format(str(e.message)))
def load_available_storages_with_share_mount(self):
try:
result = self.execute_request(str(self.api_url) + self.LOAD_AVAILABLE_STORAGES_WITH_MOUNTS)
if result is None:
return []
return [DataStorageWithShareMount.from_json(item) for item in result]
except Exception as e:
raise RuntimeError("Failed to load storages with READ and WRITE permissions. "
"Error message: {}".format(str(e.message)))
def load_metadata(self, entity_id, entity_class):
try:
data = [{
"entityId": entity_id,
"entityClass": entity_class
}]
result = self.execute_request(str(self.api_url) + self.LOAD_METADATA,
method="post",
data=json.dumps(data))
if not result or not "data" in result[0]:
return []
return result[0]["data"]
except Exception as e:
raise RuntimeError("Failed to load metadata for the given entity. "
"Error message: {}".format(str(e.message)))
def load_entities(self, entities_ids):
try:
result = self.execute_request(str(self.api_url) + self.LOAD_ENTITIES_DATA, method='post',
data="[%s]" % entities_ids)
return {} if result is None else result
except BaseException as e:
raise RuntimeError("Failed to load entities data. "
"Error message: {}".format(str(e.message)))
def load_dts_registry(self):
try:
result = self.execute_request(str(self.api_url) + self.LOAD_DTS, method='get')
return {} if result is None else result
except BaseException as e:
raise RuntimeError("Failed to load DTS registry. "
"Error message: {}".format(str(e.message)))
def load_configuration(self, configuration_id):
try:
result = self.execute_request(str(self.api_url) + self.LOAD_CONFIGURATION % configuration_id, method='get')
return {} if result is None else result
except BaseException as e:
raise RuntimeError("Failed to load configuration. "
"Error message: {}".format(str(e.message)))
def get_preference(self, preference_name):
try:
result = self.execute_request(str(self.api_url) + self.GET_PREFERENCE % preference_name, method='get')
return {} if result is None else result
except BaseException as e:
raise RuntimeError("Failed to get system preference %s. "
"Error message: %s" % (preference_name, e.message))
def load_tool_version_settings(self, tool_id, version):
get_tool_version_settings_url = self.TOOL_VERSION_SETTINGS % tool_id
if version:
get_tool_version_settings_url = "%s?version=%s" % (get_tool_version_settings_url, version)
try:
result = self.execute_request(str(self.api_url) + get_tool_version_settings_url, method='get')
return {} if result is None else result
except BaseException as e:
raise RuntimeError("Failed to load settings for tool %d. "
"Error message: %s" % (tool_id, e.message))
def create_setting_for_tool_version(self, tool_id, version, settings):
tool_version_settings_url = self.TOOL_VERSION_SETTINGS % tool_id
tool_version_settings_url = "%s?version=%s" % (tool_version_settings_url, version)
try:
result = self.execute_request(str(self.api_url) + tool_version_settings_url, method='post',
data=json.dumps(settings))
return {} if result is None else result
except BaseException as e:
raise RuntimeError("Failed to load settings for tool %d. "
"Error message: %s" % (tool_id, e.message))
def add_pipeline_repository_hook(self, pipeline_id):
try:
result = self.execute_request(str(self.api_url) + self.ADD_PIPELINE_REPOSITORY_HOOK % str(pipeline_id),
method='post')
return {} if result is None else result
except Exception as e:
raise RuntimeError("Failed to add hook to pipeline repository. \n {}".format(e))
def search(self, query, types, page_size=50, offset=0, aggregate=True, highlight=True):
try:
data = {
"query": query,
"pageSize": page_size,
"offset": offset,
"aggregate": aggregate,
"highlight": highlight,
"filterTypes": types
}
result = self.execute_request(str(self.api_url) + '/search', method='post', data=json.dumps(data))
return {} if result is None else result
except Exception as e:
raise RuntimeError('Search failed for query %s and %s types' % (query, ','.join(types)),
"Error message: %s" % e.message)
def create_folder(self, name, parent_id=None):
try:
data = {
"name": name,
"parentId": parent_id
}
result = self.execute_request(str(self.api_url) + self.FOLDER_REGISTER,
method="post",
data=json.dumps(data))
return {} if result is None else result
except Exception as e:
raise RuntimeError("Failed to create folder with name %s." % name,
"Error message: {}".format(str(e.message)))
def delete_folder(self, folder_id):
try:
self.execute_request(str(self.api_url) + self.FOLDER_DELETE % folder_id,
method="delete")
except Exception as e:
raise RuntimeError("Failed to delete folder with ID %d." % folder_id,
"Error message: {}".format(str(e.message)))
def create_pipeline(self, pipeline_data):
try:
result = self.execute_request(str(self.api_url) + self.PIPELINE_CREATE, method="post",
data=json.dumps(pipeline_data))
return {} if result is None else result
except Exception as e:
raise RuntimeError("Failed to create pipeline.", "Error message: {}".format(str(e.message)))
def delete_pipeline(self, pipeline_id):
try:
self.execute_request(str(self.api_url) + self.PIPELINE_DELETE % pipeline_id,
method="delete")
except Exception as e:
raise RuntimeError("Failed to delete pipeline with ID %d." % pipeline_id,
"Error message: {}".format(str(e.message)))
def datastorage_create(self, datastorage_data, process_on_cloud=True):
try:
url = str(self.api_url) + '/datastorage/save' + '?cloud=%s' % str(process_on_cloud).lower()
result = self.execute_request(url, | |
'Chatham', 'state': 'Massachusetts'},
{'city': 'Chatham', 'state': 'New Jersey'},
{'city': 'Chatham', 'state': 'Illinois'},
{'city': 'Chattanooga', 'state': 'Tennessee'},
{'city': 'Cheat Lake', 'state': 'West Virginia'},
{'city': 'Cheektowaga', 'state': 'New York'},
{'city': 'Cheektowaga', 'state': 'New York'},
{'city': 'Chehalis', 'state': 'Washington'},
{'city': 'Chelmsford', 'state': 'Massachusetts'},
{'city': 'Chelsea', 'state': 'Massachusetts'},
{'city': 'Chenango', 'state': 'New York'},
{'city': 'Cheney', 'state': 'Washington'},
{'city': '<NAME>', 'state': 'New Jersey'},
{'city': 'Cherryland', 'state': 'California'},
{'city': 'Chesapeake', 'state': 'Virginia'},
{'city': 'Chesapeake Ranch Estates-Drum Point', 'state': 'Maryland'},
{'city': 'Cheshire', 'state': 'Connecticut'},
{'city': 'Chester', 'state': 'New York'},
{'city': 'Chester', 'state': 'Virginia'},
{'city': 'Chester', 'state': 'South Carolina'},
{'city': 'Chester', 'state': 'Pennsylvania'},
{'city': 'Chesterfield', 'state': 'Missouri'},
{'city': 'Chesterton', 'state': 'Indiana'},
{'city': 'Chestnut Ridge', 'state': 'New York'},
{'city': 'Cheval', 'state': 'Florida'},
{'city': 'Cheverly', 'state': 'Maryland'},
{'city': 'Cheviot', 'state': 'Ohio'},
{'city': 'Chevy Chase', 'state': 'Maryland'},
{'city': 'Cheyenne', 'state': 'Wyoming'},
{'city': 'Chicago', 'state': 'Illinois'},
{'city': 'Chicago Heights', 'state': 'Illinois'},
{'city': 'Chicago Ridge', 'state': 'Illinois'},
{'city': 'Chickasaw', 'state': 'Alabama'},
{'city': 'Chickasha', 'state': 'Oklahoma'},
{'city': 'Chico', 'state': 'California'},
{'city': 'Chicopee', 'state': 'Massachusetts'},
{'city': 'Childress', 'state': 'Texas'},
{'city': 'Chili', 'state': 'New York'},
{'city': 'Chillicothe', 'state': 'Missouri'},
{'city': 'Chillicothe', 'state': 'Ohio'},
{'city': 'Chillum', 'state': 'Maryland'},
{'city': 'Chino', 'state': 'California'},
{'city': 'Chino Hills', 'state': 'California'},
{'city': 'Chino Valley', 'state': 'Arizona'},
{'city': 'Chippewa Falls', 'state': 'Wisconsin'},
{'city': 'Choctaw', 'state': 'Oklahoma'},
{'city': 'Chowchilla', 'state': 'California'},
{'city': 'Christiansburg', 'state': 'Virginia'},
{'city': 'Chubbuck', 'state': 'Idaho'},
{'city': 'Chula Vista', 'state': 'California'},
{'city': 'Cicero', 'state': 'Illinois'},
{'city': 'Cicero', 'state': 'New York'},
{'city': 'Cimarron Hills', 'state': 'Colorado'},
{'city': 'Cincinnati', 'state': 'Ohio'},
{'city': 'Cinco Ranch', 'state': 'Texas'},
{'city': 'Circleville', 'state': 'Ohio'},
{'city': 'Citrus', 'state': 'California'},
{'city': 'Citrus Heights', 'state': 'California'},
{'city': 'Citrus Park', 'state': 'Florida'},
{'city': 'Citrus Ridge', 'state': 'Florida'},
{'city': 'City of The Dalles', 'state': 'Oregon'},
{'city': 'Claiborne', 'state': 'Louisiana'},
{'city': 'Clairton', 'state': 'Pennsylvania'},
{'city': 'Clanton', 'state': 'Alabama'},
{'city': 'Claremont', 'state': 'California'},
{'city': 'Claremont', 'state': 'New Hampshire'},
{'city': 'Claremore', 'state': 'Oklahoma'},
{'city': 'Clarence', 'state': 'New York'},
{'city': 'Clarendon Hills', 'state': 'Illinois'},
{'city': 'Clarion', 'state': 'Pennsylvania'},
{'city': 'Clark', 'state': 'New Jersey'},
{'city': 'Clarksburg', 'state': 'West Virginia'},
{'city': 'Clarksdale', 'state': 'Mississippi'},
{'city': 'Clarkson', 'state': 'New York'},
{'city': 'Clarkston', 'state': 'Georgia'},
{'city': 'Clarkston', 'state': 'Washington'},
{'city': 'Clarkston Heights-Vineland', 'state': 'Washington'},
{'city': 'Clarkstown', 'state': 'New York'},
{'city': 'Clarksville', 'state': 'Indiana'},
{'city': 'Clarksville', 'state': 'Arkansas'},
{'city': 'Clarksville', 'state': 'Tennessee'},
{'city': 'Claverack', 'state': 'New York'},
{'city': 'Clawson', 'state': 'Michigan'},
{'city': 'Clay', 'state': 'New York'},
{'city': 'Claymont', 'state': 'Delaware'},
{'city': 'Clayton', 'state': 'California'},
{'city': 'Clayton', 'state': 'Missouri'},
{'city': 'Clayton', 'state': 'New Jersey'},
{'city': 'Clayton', 'state': 'Ohio'},
{'city': 'Clayton', 'state': 'North Carolina'},
{'city': 'Clear Lake', 'state': 'Iowa'},
{'city': 'Clearfield', 'state': 'Pennsylvania'},
{'city': 'Clearfield', 'state': 'Utah'},
{'city': 'Clearlake', 'state': 'California'},
{'city': 'Clearwater', 'state': 'Florida'},
{'city': 'Cleburne', 'state': 'Texas'},
{'city': 'Clemmons', 'state': 'North Carolina'},
{'city': 'Clemson', 'state': 'South Carolina'},
{'city': 'Clermont', 'state': 'Florida'},
{'city': 'Cleveland', 'state': 'Mississippi'},
{'city': 'Cleveland', 'state': 'Tennessee'},
{'city': 'Cleveland', 'state': 'Texas'},
{'city': 'Cleveland', 'state': 'Ohio'},
{'city': 'Cleveland Heights', 'state': 'Ohio'},
{'city': 'Clewiston', 'state': 'Florida'},
{'city': 'Cliffside Park', 'state': 'New Jersey'},
{'city': 'Clifton', 'state': 'New Jersey'},
{'city': 'Clifton', 'state': 'Colorado'},
{'city': 'Clifton Heights', 'state': 'Pennsylvania'},
{'city': 'Clifton Park', 'state': 'New York'},
{'city': 'Clinton', 'state': 'Mississippi'},
{'city': 'Clinton', 'state': 'Missouri'},
{'city': 'Clinton', 'state': 'Iowa'},
{'city': 'Clinton', 'state': 'Maryland'},
{'city': 'Clinton', 'state': 'Michigan'},
{'city': 'Clinton', 'state': 'Massachusetts'},
{'city': 'Clinton', 'state': 'Massachusetts'},
{'city': 'Clinton', 'state': 'Connecticut'},
{'city': 'Clinton', 'state': 'Illinois'},
{'city': 'Clinton', 'state': 'Oklahoma'},
{'city': 'Clinton', 'state': 'North Carolina'},
{'city': 'Clinton', 'state': 'Tennessee'},
{'city': 'Clinton', 'state': 'South Carolina'},
{'city': 'Clinton', 'state': 'Utah'},
{'city': 'Clive', 'state': 'Iowa'},
{'city': 'Cloquet', 'state': 'Minnesota'},
{'city': 'Closter', 'state': 'New Jersey'},
{'city': 'Cloverdale', 'state': 'California'},
{'city': 'Cloverleaf', 'state': 'Texas'},
{'city': 'Cloverly', 'state': 'Maryland'},
{'city': 'Clovis', 'state': 'New Mexico'},
{'city': 'Clovis', 'state': 'California'},
{'city': 'Clute', 'state': 'Texas'},
{'city': 'Clyde', 'state': 'Ohio'},
{'city': 'Coachella', 'state': 'California'},
{'city': 'Coalinga', 'state': 'California'},
{'city': 'Coatesville', 'state': 'Pennsylvania'},
{'city': 'Cobleskill', 'state': 'New York'},
{'city': 'Cochituate', 'state': 'Massachusetts'},
{'city': 'Cockeysville', 'state': 'Maryland'},
{'city': 'Cocoa', 'state': 'Florida'},
{'city': 'Cocoa Beach', 'state': 'Florida'},
{'city': 'Coconut Creek', 'state': 'Florida'},
{'city': 'Cody', 'state': 'Wyoming'},
{'city': 'Coeur d’Alene', 'state': 'Idaho'},
{'city': 'Coeymans', 'state': 'New York'},
{'city': 'Coffeyville', 'state': 'Kansas'},
{'city': 'Cohasset', 'state': 'Massachusetts'},
{'city': 'Cohoes', 'state': 'New York'},
{'city': 'Colchester', 'state': 'Vermont'},
{'city': 'Colchester', 'state': 'Connecticut'},
{'city': 'Coldwater', 'state': 'Michigan'},
{'city': 'Colesville', 'state': 'Maryland'},
{'city': 'College', 'state': 'Alaska'},
{'city': 'College Park', 'state': 'Georgia'},
{'city': 'College Park', 'state': 'Maryland'},
{'city': 'College Place', 'state': 'Washington'},
{'city': 'College Station', 'state': 'Texas'},
{'city': 'Collegedale', 'state': 'Tennessee'},
{'city': 'Collegeville', 'state': 'Pennsylvania'},
{'city': 'Colleyville', 'state': 'Texas'},
{'city': '<NAME>-Cresthaven', 'state': 'Florida'},
{'city': 'Collierville', 'state': 'Tennessee'},
{'city': 'Collingdale', 'state': 'Pennsylvania'},
{'city': 'Collingswood', 'state': 'New Jersey'},
{'city': 'Collins', 'state': 'New York'},
{'city': 'Collinsville', 'state': 'Illinois'},
{'city': 'Collinsville', 'state': 'Virginia'},
{'city': 'Colonia', 'state': 'New Jersey'},
{'city': 'Colonial Heights', 'state': 'Virginia'},
{'city': 'Colonial Heights', 'state': 'Tennessee'},
{'city': 'Colonial Park', 'state': 'Pennsylvania'},
{'city': 'Colonie', 'state': 'New York'},
{'city': 'Colonie', 'state': 'New York'},
{'city': 'Colorado Springs', 'state': 'Colorado'},
{'city': 'Colton', 'state': 'California'},
{'city': 'Columbia', 'state': 'Illinois'},
{'city': 'Columbia', 'state': 'Missouri'},
{'city': 'Columbia', 'state': 'Mississippi'},
{'city': 'Columbia', 'state': 'Maryland'},
{'city': 'Columbia', 'state': 'Pennsylvania'},
{'city': 'Columbia', 'state': 'Tennessee'},
{'city': 'Columbia', 'state': 'South Carolina'},
{'city': 'Columbia City', 'state': 'Indiana'},
{'city': 'Columbia Heights', 'state': 'Minnesota'},
{'city': 'Columbine', 'state': 'Colorado'},
{'city': 'Columbus', 'state': 'Indiana'},
{'city': 'Columbus', 'state': 'Georgia'},
{'city': 'Columbus', 'state': 'Mississippi'},
{'city': 'Columbus', 'state': 'Nebraska'},
{'city': 'Columbus', 'state': 'Ohio'},
{'city': 'Commack', 'state': 'New York'},
{'city': 'Commerce', 'state': 'Texas'},
{'city': 'Commerce', 'state': 'California'},
{'city': 'Commerce City', 'state': 'Colorado'},
{'city': 'Compton', 'state': 'California'},
{'city': 'Comstock Park', 'state': 'Michigan'},
{'city': 'Concord', 'state': 'Massachusetts'},
{'city': 'Concord', 'state': 'Missouri'},
{'city': 'Concord', 'state': 'New Hampshire'},
{'city': 'Concord', 'state': 'New York'},
{'city': 'Concord', 'state': 'California'},
{'city': 'Concord', 'state': 'North Carolina'},
{'city': 'Congers', 'state': 'New York'},
{'city': 'Conley', 'state': 'Georgia'},
{'city': 'Conneaut', 'state': 'Ohio'},
{'city': 'Connellsville', 'state': 'Pennsylvania'},
{'city': 'Connersville', 'state': 'Indiana'},
{'city': 'Conning Towers-Nautilus Park', 'state': 'Connecticut'},
{'city': 'Conover', 'state': 'North Carolina'},
{'city': 'Conroe', 'state': 'Texas'},
{'city': 'Conshohocken', 'state': 'Pennsylvania'},
{'city': 'Converse', 'state': 'Texas'},
{'city': 'Conway', 'state': 'South Carolina'},
{'city': 'Conway', 'state': 'Florida'},
{'city': 'Conway', 'state': 'Arkansas'},
{'city': 'Conway', 'state': 'New Hampshire'},
{'city': 'Conyers', 'state': 'Georgia'},
{'city': 'Cookeville', 'state': 'Tennessee'},
{'city': 'Coolidge', 'state': 'Arizona'},
{'city': 'Coon Rapids', 'state': 'Minnesota'},
{'city': 'Cooper City', 'state': 'Florida'},
{'city': 'Coos Bay', 'state': 'Oregon'},
{'city': 'Copiague', 'state': 'New York'},
{'city': 'Coppell', 'state': 'Texas'},
{'city': 'Copperas Cove', 'state': 'Texas'},
{'city': 'Coral Gables', 'state': 'Florida'},
{'city': 'Coral Hills', 'state': 'Maryland'},
{'city': 'Coral Springs', 'state': 'Florida'},
{'city': 'Coral Terrace', 'state': 'Florida'},
{'city': 'Coralville', 'state': 'Iowa'},
{'city': 'Coram', 'state': 'New York'},
{'city': 'Coraopolis', 'state': 'Pennsylvania'},
{'city': 'Corbin', 'state': 'Kentucky'},
{'city': 'Corcoran', 'state': 'California'},
{'city': 'Cordele', 'state': 'Georgia'},
{'city': 'Corinth', 'state': 'Mississippi'},
{'city': 'Corinth', 'state': 'Texas'},
{'city': 'Cornelius', 'state': 'Oregon'},
{'city': 'Cornelius', 'state': 'North Carolina'},
{'city': 'Corning', 'state': 'New York'},
{'city': 'Corning', 'state': 'New York'},
{'city': 'Corning', 'state': 'California'},
{'city': 'Cornwall', 'state': 'New York'},
{'city': 'Corona', 'state': 'California'},
{'city': 'Coronado', 'state': 'California'},
{'city': 'Corpus Christi', 'state': 'Texas'},
{'city': 'Corrales', 'state': 'New Mexico'},
{'city': 'Corry', 'state': 'Pennsylvania'},
{'city': 'Corsicana', 'state': 'Texas'},
{'city': 'Corte Madera', 'state': 'California'},
{'city': 'Cortez', 'state': 'Colorado'},
{'city': 'Cortland', 'state': 'New York'},
{'city': 'Cortland', 'state': 'Ohio'},
{'city': 'Cortlandt', 'state': 'New York'},
{'city': 'Cortlandville', 'state': 'New York'},
{'city': 'Corvallis', 'state': 'Oregon'},
{'city': 'Coshocton', 'state': 'Ohio'},
{'city': 'Costa Mesa', 'state': 'California'},
{'city': 'Cotati', 'state': 'California'},
{'city': 'Coto de Caza', 'state': 'California'},
{'city': 'Cottage Grove', 'state': 'Minnesota'},
{'city': 'Cottage Grove', 'state': 'Oregon'},
{'city': 'Cottage Lake', 'state': 'Washington'},
{'city': 'Cottonwood', 'state': 'Arizona'},
{'city': 'Cottonwood Heights', 'state': 'Utah'},
{'city': 'Cottonwood West', 'state': 'Utah'},
{'city': 'Cottonwood-Verde Village', 'state': 'Arizona'},
{'city': 'Council Bluffs', 'state': 'Iowa'},
{'city': 'Country Club', 'state': 'California'},
{'city': 'Country Club', 'state': 'Florida'},
{'city': 'Country Club Estates', 'state': 'Georgia'},
{'city': 'Country Club Hills', 'state': 'Illinois'},
{'city': 'Country Walk', 'state': 'Florida'},
{'city': 'Covedale', 'state': 'Ohio'},
{'city': 'Coventry', 'state': 'Rhode Island'},
{'city': 'Coventry', 'state': 'Connecticut'},
{'city': 'Covina', 'state': 'California'},
{'city': 'Covington', 'state': 'Georgia'},
{'city': 'Covington', 'state': 'Kentucky'},
{'city': 'Covington', 'state': 'Louisiana'},
{'city': 'Covington', 'state': 'Washington'},
{'city': 'Covington', 'state': 'Virginia'},
{'city': 'Covington', 'state': 'Tennessee'},
{'city': 'Coweta', 'state': 'Oklahoma'},
{'city': 'Coxsackie', 'state': 'New York'},
{'city': 'Crafton', 'state': 'Pennsylvania'},
{'city': 'Craig', 'state': | |
tracer.get_sparse_matrix(format)
def sparse_matrix_and_bias(self, x, *condition_args, format: str = None, **kwargs):
key = self._condition_key(x, condition_args, kwargs)
tracer = self._get_or_trace(key)
return tracer.get_sparse_matrix(format), tracer.bias
def _condition_key(self, x, condition_args, kwargs):
kwargs['n_condition_args'] = len(condition_args)
for i, c_arg in enumerate(condition_args):
kwargs[f'_condition_arg[{i}]'] = c_arg
key, _ = key_from_args(x, cache=False, **kwargs)
assert key.backend.supports(Backend.sparse_coo_tensor)
return key
def stencil_inspector(self, *args, **kwargs):
key, _ = key_from_args(*args, cache=True, **kwargs)
tracer = self._get_or_trace(key)
def print_stencil(**indices):
pos = spatial(**indices)
print(f"{self.f.__name__}: {pos} = {' + '.join(f'{val[indices]} * {vector_add(pos, offset)}' for offset, val in tracer.val.items() if (val[indices] != 0).all)}")
return print_stencil
def jit_compile_linear(f: Callable[[X], Y]) -> 'LinearFunction[X, Y]': # TODO add cache control method, e.g. max_traces
"""
Compile an optimized representation of the linear function `f`.
For backends that support sparse tensors, a sparse matrix will be constructed for `f`.
Can be used as a decorator:
```python
@math.jit_compile_linear
def my_linear_function(x: math.Tensor) -> math.Tensor:
```
Unlike `jit_compile()`, `jit_compile_linear()` can be called during a regular jit compilation.
See Also:
`jit_compile()`
Args:
f: Function that is linear in its positional arguments.
All positional arguments must be of type `Tensor` and `f` must return a `Tensor`.
`f` may be conditioned on keyword arguments.
However, passing different values for these will cause `f` to be re-traced unless the conditioning arguments are also being traced.
Returns:
`LinearFunction` with similar signature and return values as `f`.
"""
if isinstance(f, JitFunction):
f = f.f # cannot trace linear function from jitted version
return f if isinstance(f, LinearFunction) else LinearFunction(f)
class GradientFunction:
def __init__(self, f: Callable, wrt: tuple, get_output: bool, jit=False):
self.f = f
self.wrt = wrt
self.get_output = get_output
self.traces: Dict[SignatureKey, Callable] = {}
self.recorded_mappings: Dict[SignatureKey, SignatureKey] = {}
self.jit = jit
def _trace_grad(self, in_key: SignatureKey, wrt_natives):
def f_native(*natives, **kwargs):
PHI_LOGGER.debug(f"Φ-grad: Evaluating gradient of {self.f.__name__}")
assert not kwargs
in_tensors = assemble_tensors(natives, in_key.shapes)
values = assemble_tree(in_key.tree, in_tensors)
assert isinstance(values, tuple) # was disassembled from *args
with functional_derivative_evaluation(order=1):
result = self.f(*values, **in_key.kwargs) # Tensor or tuple/list of Tensors
nest, out_tensors = disassemble_tree(result)
result_natives, result_shapes = disassemble_tensors(out_tensors)
self.recorded_mappings[in_key] = SignatureKey(f_native, nest, result_shapes, None, in_key.backend, in_key.tracing)
return result_natives
functional_gradient_generator = in_key.backend.jit_compile_grad if self.jit else in_key.backend.functional_gradient
return functional_gradient_generator(f_native, wrt=wrt_natives, get_output=self.get_output)
def __call__(self, *args, **kwargs):
key, natives = key_from_args(*args, cache=True, **kwargs)
if not key.backend.supports(Backend.functional_gradient):
if math.default_backend().supports(Backend.functional_gradient):
warnings.warn(f"Using {math.default_backend()} for gradient computation because {key.backend} does not support functional_gradient()", RuntimeWarning)
key.backend = math.default_backend()
else:
raise AssertionError(f"functional_gradient() not supported by {key.backend}.")
wrt_tensors = self._track_wrt(args)
wrt_natives = self._track_wrt_natives(wrt_tensors, disassemble_tree(args)[1])
if key not in self.traces:
self.traces[key] = self._trace_grad(key, wrt_natives)
native_result = self.traces[key](*natives)
output_key = match_output_signature(key, self.recorded_mappings, self)
if self.get_output:
result_shapes = list(output_key.shapes) + [key.shapes[i] for i in wrt_tensors]
output_tensors = assemble_tensors(native_result, result_shapes)
output_structure, grad_tuple = assemble_tree((output_key.tree, [key.tree[i] for i in self.wrt]), output_tensors)
return output_structure, grad_tuple
else:
output_tensors = assemble_tensors(native_result, [key.shapes[i] for i in wrt_tensors])
return assemble_tree([key.tree[i] for i in wrt_tensors], output_tensors)
def __repr__(self):
return f"grad({self.f.__name__})"
@property
def __name__(self):
return self.f.__name__
def _track_wrt(self, args):
wrt_tensors = []
for i, arg in enumerate(args):
_, tensors = disassemble_tree(arg)
wrt_tensors.extend([i] * len(tensors))
return [t_i for t_i, arg_i in enumerate(wrt_tensors) if arg_i in self.wrt]
@staticmethod
def _track_wrt_natives(wrt_tensors, values):
wrt_natives = []
for i, value in enumerate(values):
wrt_natives.extend([i] * len(value._natives()))
return [n_i for n_i, t_i in enumerate(wrt_natives) if t_i in wrt_tensors]
def functional_gradient(f: Callable, wrt: tuple or list = (0,), get_output=True) -> Callable:
"""
Creates a function which computes the gradient of `f`.
Example:
```python
def loss_function(x, y):
prediction = f(x)
loss = math.l2_loss(prediction - y)
return loss, prediction
dx, = functional_gradient(loss_function, get_output=False)(x, y)
(loss, prediction), (dx, dy) = functional_gradient(loss_function,
wrt=(0, 1), get_output=True)(x, y)
```
Functional gradients are implemented for the following backends:
* PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward)
* TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape)
* Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad)
When the gradient function is invoked, `f` is called with tensors that track the gradient.
For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`.
Args:
f: Function to be differentiated.
`f` must return a floating point `Tensor` with rank zero.
It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`.
All arguments for which the gradient is computed must be of dtype float or complex.
get_output: Whether the gradient function should also return the return values of `f`.
wrt: Arguments of `f` with respect to which the gradient should be computed.
Example: `wrt_indices=[0]` computes the gradient with respect to the first argument of `f`.
Returns:
Function with the same arguments as `f` that returns the value of `f`, auxiliary data and gradient of `f` if `get_output=True`, else just the gradient of `f`.
"""
return GradientFunction(f, wrt, get_output)
class HessianFunction:
def __init__(self, f: Callable, wrt: tuple, get_output: bool, get_gradient: bool, dim_suffixes: tuple, jit=False):
assert isinstance(dim_suffixes, tuple) and len(dim_suffixes) == 2
self.f = f
self.wrt = wrt
self.get_output = get_output
self.get_gradient = get_gradient
self.dim_suffixes = dim_suffixes
self.traces: Dict[SignatureKey, Callable] = {}
self.recorded_mappings: Dict[SignatureKey, SignatureKey] = {}
self.jit = jit
def _trace_hessian(self, in_key: SignatureKey, wrt_natives):
def f_native(*natives, **kwargs):
PHI_LOGGER.debug(f"Φ-grad: Evaluating gradient of {self.f.__name__}")
assert not kwargs
in_tensors = assemble_tensors(natives, in_key.shapes)
values = assemble_tree(in_key.tree, in_tensors)
assert isinstance(values, tuple) # was disassembled from *args
with functional_derivative_evaluation(order=2):
result = self.f(*values, **in_key.kwargs) # Tensor or tuple/list of Tensors
nest, out_tensors = disassemble_tree(result)
result_natives, result_shapes = disassemble_tensors(out_tensors)
self.recorded_mappings[in_key] = SignatureKey(f_native, nest, result_shapes, None, in_key.backend, in_key.tracing)
return result_natives
hessian_generator = in_key.backend.jit_compile_hessian if self.jit else in_key.backend.hessian
return hessian_generator(f_native, wrt=wrt_natives, get_output=self.get_output, get_gradient=self.get_gradient)
def __call__(self, *args, **kwargs):
key, natives, batch_shape = key_from_args_pack_batch(*args, cache=True, **kwargs)
if not key.backend.supports(Backend.functional_gradient):
if math.default_backend().supports(Backend.functional_gradient):
warnings.warn(f"Using {math.default_backend()} for gradient computation because {key.backend} does not support functional_gradient()", RuntimeWarning)
key.backend = math.default_backend()
else:
raise AssertionError(f"functional_gradient() not supported by {key.backend}.")
wrt_tensors: List[int] = self._track_wrt(args)
wrt_natives: List[int] = self._track_wrt_natives(wrt_tensors, disassemble_tree(args)[1])
if key not in self.traces:
self.traces[key] = self._trace_hessian(key, wrt_natives)
native_result = self.traces[key](*natives)
assert len(native_result) == 1 + int(self.get_output) + int(self.get_gradient)
output_key = match_output_signature(key, self.recorded_mappings, self)
result = ()
if self.get_output:
output_tensors = assemble_tensors(native_result[0], output_key.shapes)
output_tensors = [math.unpack_dims(t, 'batch', batch_shape) for t in output_tensors]
# output_tensors = [math.reshaped_tensor(n, [batch_shape, *shape.non_batch]) for n, shape in zip(native_result[0], output_key.shapes)]
result += assemble_tree(output_key.tree, output_tensors),
if self.get_gradient:
grad_tensors = assemble_tensors(native_result[int(self.get_output)], [key.shapes[i] for i in wrt_tensors])
grad_tensors = [math.unpack_dims(t, 'batch', batch_shape) for t in grad_tensors]
grads = assemble_tree([key.tree[i] for i in wrt_tensors], grad_tensors)
if len(grads) == 1:
grads = grads[0]
result += grads,
if len(wrt_natives) == 1:
native_hessian = native_result[-1][0][0]
hessian_tensor = math.reshaped_tensor(native_hessian, [batch_shape, *self.shape_with_suffixes(key.shapes[0].non_batch, self.dim_suffixes[0]), *self.shape_with_suffixes(key.shapes[0].non_batch, self.dim_suffixes[1])], check_sizes=True)
result += assemble_tree(key.tree[0], [hessian_tensor]),
else:
assert all([t is None for t in key.tree]), "When computing the Hessian w.r.t. multiple tensors, all inputs must be Tensors."
raise NotImplementedError()
hessian_tree = [[] for _ in self.wrt]
for i in range(len(self.wrt)):
for j in range(len(self.wrt)):
native_hessian_ij = native_result[-1][i][j]
hessian_tensor_ij = math.reshaped_tensor(native_hessian_ij, [batch_shape, *key.shapes[i].non_batch, *self.dupli_shape(key.shapes[j].non_batch)], check_sizes=True)
hessian_tree[i].append(hessian_tensor_ij)
result += tuple([tuple(col) for col in hessian_tree]),
return result
def shape_with_suffixes(self, shape: Shape, suffix: str):
return shape._with_names([n+suffix for n in shape.names])
def __repr__(self):
return f"grad({self.f.__name__})"
@property
def __name__(self):
return self.f.__name__
def _track_wrt(self, args):
wrt_tensors = []
for i, arg in enumerate(args):
_, tensors = disassemble_tree(arg)
wrt_tensors.extend([i] * len(tensors))
return [t_i for t_i, arg_i in enumerate(wrt_tensors) if arg_i in self.wrt]
@staticmethod
def _track_wrt_natives(wrt_tensors, values):
wrt_natives = []
for i, value in enumerate(values):
wrt_natives.extend([i] * len(cached(value)._natives()))
return [n_i for n_i, t_i in enumerate(wrt_natives) if t_i in wrt_tensors]
def hessian(f: Callable, wrt: tuple or list = (0,), get_output=True, get_gradient=True, dim_suffixes=('', '_')) -> Callable:
"""
*Experimental. This function currently only supports PyTorch and the Hessian can only be computed w.r.t. one argument.*
Creates a function which computes the Hessian (second derivative) of `f`.
Example:
```python
def loss_function(x, y):
prediction = f(x)
loss = math.l2_loss(prediction - y)
return loss, prediction
hess, = functional_gradient(loss_function, get_output=False, get_gradient=False)(x, y)
(loss, prediction), (dx, dy), ((dx_dx, dx_dy), (dy_dx, dy_dy)) = functional_gradient(loss_function,
wrt=(0, 1), get_output=True)(x, y)
```
When the gradient function is invoked, `f` is called with tensors that track the gradient.
For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`.
Args:
f: Function to be differentiated.
`f` must return a floating point `Tensor` with rank zero.
It can return additional tensors which are treated | |
<reponame>radhermit/bite
import argparse
from collections import OrderedDict
from functools import partial
from snakeoil.cli import arghparse
from .. import const
from ..exceptions import BiteError
from ..argparser import (
ParseStdin, Comment, IntList, IDList, StringList, IDs, ID_Maps, ID_Str_Maps,
TimeIntervalArg, IntRangeArg,
)
from ..objects import DateTime, IntRange
from ..utils import str2bool, block_edit, confirm
class SubcmdArgumentParser(arghparse.SubcmdAbbrevArgumentParser, arghparse.ArgumentParser):
def __init__(self, *args, service, **kw):
# Suppress empty attribute creation during parse_args() calls, this
# means that only the args passed in will create attributes in the
# returned namespace instead of attributes for all options using their
# default values.
super().__init__(*args, argument_default=argparse.SUPPRESS, **kw)
# register arg types and actions for subcmd parsing
self.register('type', 'ids', IDs(service))
self.register('type', 'int_list', IntList(service))
self.register('type', 'id_list', IDList(service))
self.register('type', 'id_maps', ID_Maps(service))
self.register('type', 'id_str_maps', ID_Str_Maps(service))
self.register('type', 'str_list', StringList(service))
self.register('type', 'comment', Comment(service))
self.register('type', 'date', DateTime)
self.register('type', 'time interval', TimeIntervalArg(service))
self.register('type', 'int range', IntRangeArg(service))
self.register('action', 'parse_stdin', ParseStdin)
class Subcmd(object):
_name = None
_service = None
def __init__(self, parser, service, global_opts, cmd=None):
self.service = service
if self.description is None:
raise ValueError(
f'missing description for subcommand {self._name!r}: {self.__class__}')
if cmd is None:
cmd = self._name.rsplit(' ', 1)[-1]
self.parser = parser.add_parser(
cmd, cls=partial(SubcmdArgumentParser, service=service),
quiet=False, color=False, description=self.description)
self.parser.set_defaults(fcn=self._name.replace(' ', '_'))
self.opts = self.parser.add_argument_group(f'{self._name.capitalize()} options')
# add global subcmd options
global_opts(self)
@property
def description(self):
return self.__doc__
@staticmethod
def add(service):
"""Conditionalize adding the subcommand to the parser."""
return True
def add_args(self):
"""Add arguments to the subcommand parser."""
def finalize_args(self, args):
"""Check and finalize arguments passed to the subcommand parser."""
return args
class _RegisterSubcmds(type):
"""Metaclass that registers subcommand classes to their related service classes."""
def __new__(meta, name, bases, cls_dict):
cls = type.__new__(meta, name, bases, cls_dict)
subcmd_name = getattr(cls, '_name', None)
if subcmd_name is not None:
opts_cls = next(x for x in bases if not issubclass(x, Subcmd))
subcmds = getattr(opts_cls, f'_{opts_cls.__name__}_subcmds', None)
if subcmds is None:
subcmds = OrderedDict()
setattr(opts_cls, f'_{opts_cls.__name__}_subcmds', subcmds)
subcmds.setdefault(subcmd_name.split(' ')[0], []).append(cls)
elif issubclass(cls, Subcmd):
raise ValueError(f'missing name for subcommand: {cls}')
return cls
class ServiceOpts(object, metaclass=_RegisterSubcmds):
_service = None
# type conversion mapping for config opts
_config_map = {
'skip_auth': str2bool,
'verify': str2bool,
'quiet': str2bool,
'columns': lambda x: setattr(const, 'COLUMNS', int(x)),
'concurrent': int,
'timeout': int,
'max_results': int,
}
def __init__(self, parser, service_name):
self.parser = parser
# flag to re-parse unparsed args for service specific options
self._reparse = False
from ..scripts.bite import service_specific_opts
self.service_opts = service_specific_opts
self.service_opts.title = f"{service_name.split('-')[0].capitalize()} specific options"
def add_main_opts(self, service):
"""Add service specific top-level options."""
raise NotImplementedError
def add_config_opts(self, args, config_opts):
"""Add service specific config options."""
try:
# merge config options, command line options override these
for k, v in config_opts.items():
if getattr(args, k, None) is None:
setattr(args, k, self._config_map.get(k, str)(v))
except ValueError as e:
raise BiteError(f'invalid config value for {k!r}: {v!r}')
def global_subcmd_opts(self, subcmd):
"""Add global subcommand options."""
@property
def subcmds(self):
# TODO: precompute this for the installed version?
d = OrderedDict()
parents = (x for x in reversed(self.__class__.__mro__)
if getattr(x, '_service', None))
for cls in parents:
subcmds = getattr(self, f'_{cls.__name__}_subcmds', None)
if subcmds:
d.update(subcmds)
return d
def _add_subcmd_args(self, subcmds, service):
subcmd_parsers = {}
registered_subcmds = []
for cls in (c for c in subcmds if c.add(service)):
cmds = cls._name.rsplit(' ', 1)
try:
subcmd_parser = subcmd_parsers[cmds[0]]
except KeyError:
if len(cmds) == 1:
subcmd_parser = self.parser
else:
# subcmds have to be listed in order so they get registered properly
raise ValueError(
f'unregistered subcommand parent {cmds[0]!r} '
f'for subcommand {cls._name!r}')
parser = subcmd_parser.add_subparsers(help='help for subcommands')
subcmd = cls(
parser=parser, service=service, cmd=cmds[-1],
global_opts=self.global_subcmd_opts)
subcmd.add_args()
subcmd_parsers[cls._name] = subcmd.parser
registered_subcmds.append(subcmd)
return registered_subcmds[0]
def add_subcmd_opts(self, service, subcmd):
"""Add subcommand specific options."""
# try to only add the options for the single subcmd
try:
subcmd = self._add_subcmd_args(self.subcmds[subcmd], service)
return subcmd
# fallback to adding all subcmd options, since the user is
# requesting help output (-h/--help) or entering unknown input
except KeyError:
for name, cmds in self.subcmds.items():
self._add_subcmd_args(cmds, service)
return None
class RequestSubcmd(Subcmd):
def add_args(self):
super().add_args()
self.opts.add_argument(
'--dry-run', action='store_true',
help='do everything except requesting or sending data')
class SendSubcmd(RequestSubcmd):
def add_args(self):
super().add_args()
self.opts.add_argument(
'--ask', action='store_true',
help='require confirmation before submitting modifications')
class ReceiveSubcmd(RequestSubcmd):
def add_args(self):
super().add_args()
self.opts.add_argument(
'-f', '--fields', type='str_list',
metavar='FIELD | FIELD,FIELD,...',
help='fields to output')
class TemplatedSubcmd(Subcmd):
def add_args(self):
super().add_args()
self.opts.add_argument(
'--template',
help='load options from a specified template')
class Search(TemplatedSubcmd, ReceiveSubcmd):
_name = 'search'
@property
def description(self):
return f"search for {self.service.item.type}s"
def add_args(self):
super().add_args()
# positional args
self.parser.add_argument(
'terms', nargs='*', metavar='TERM', action='parse_stdin',
help=f"string(s) to search for in {self.service.item.type} summary/title")
class PagedSearch(Search):
def add_args(self):
super().add_args()
# optional args
self.opts.add_argument(
'--limit', type=int,
help='limit the number of records returned in a search')
self.opts.add_argument(
'--offset', type=int,
help='set the start position for a search')
class Get(ReceiveSubcmd):
_name = 'get'
@property
def description(self):
return f"get {self.service.item.type}(s)"
def add_args(self, ids=True, history=False):
super().add_args()
# positional args
if ids:
self.parser.add_argument(
'ids', type='ids', nargs='+', metavar='ID', action='parse_stdin',
help=f"ID(s) or alias(es) of the {self.service.item.type}(s) to retrieve")
# optional args
single_action = self.opts.add_mutually_exclusive_group()
single_action.add_argument(
'-B', '--browser', action='store_true',
help=f'open {self.service.item.type} URL(s) in a browser')
single_action.add_argument(
'-U', '--url', dest='output_url', action='store_true',
help=f'output {self.service.item.type} URL(s)')
self.opts.add_argument(
'-A', '--no-attachments', action='store_false', dest='get_attachments',
help='do not show attachments')
self.opts.add_argument(
'-C', '--no-comments', action='store_false', dest='get_comments',
help='do not show comments')
if history:
self.opts.add_argument(
'-H', '--show-history', action='store_true', dest='get_changes',
help=f'show {self.service.item.type} history')
class Attachments(Subcmd):
_name = 'attachments'
@property
def description(self):
return f"get attachments from {self.service.item.type}(s)"
def add_args(self, id_map=None, item_id=True):
super().add_args()
# positional args
if id_map:
self.parser.add_argument(
'ids', type=id_map, nargs='+', metavar='ID[:A_ID[,...]]', action='parse_stdin',
help=f"{self.service.item.type} ID(s) or {self.service.item.type} ID to attachment ID map(s)")
self.parser.set_defaults(id_map=True)
else:
self.parser.add_argument(
'ids', type='ids', nargs='+', metavar='ID', action='parse_stdin',
help=f"attachment ID(s) (or {self.service.item.type} ID(s) when --item-id is used)")
# optional args
single_action = self.opts.add_mutually_exclusive_group()
if self.service.attachment_endpoint is not None:
single_action.add_argument(
'-B', '--browser', action='store_true',
help="open attachment URL(s) in a browser")
single_action.add_argument(
'-U', '--url', dest='output_url', action='store_true',
help='output attachment URL(s)')
single_action.add_argument(
'-V', '--view', action='store_true', dest='view_attachment',
help='output attachment data')
if item_id:
self.opts.add_argument(
'-I', '--item-id', action='store_true',
help='search by item ID(s) rather than attachment ID(s)')
self.opts.add_argument(
'--save-to',
help='save attachment(s) into a specified dir')
class Changes(ReceiveSubcmd):
_name = 'changes'
@property
def description(self):
return f"get changes from {self.service.item.type}(s)"
def add_args(self):
super().add_args()
# positional args
self.parser.add_argument(
'ids', type='ids', nargs='+', metavar='ID', action='parse_stdin',
help=f"ID(s) or alias(es) of the {self.service.item.type}(s) "
"to retrieve all changes")
# optional args
self.opts.add_argument(
'-n', '--number',
dest='change_num', type='int_list',
action=partial(ParseStdin, int),
help='restrict by change number(s)')
self.opts.add_argument(
'-r', '--creator',
type='str_list', action='parse_stdin',
help='restrict by person who made the change')
class Comments(ReceiveSubcmd):
_name = 'comments'
@property
def description(self):
return f"get comments from {self.service.item.type}(s)"
def add_args(self, ids=True):
super().add_args()
# positional args
if ids:
self.parser.add_argument(
'ids', type='ids', nargs='+', metavar='ID', action='parse_stdin',
help=f"ID(s) or alias(es) of the {self.service.item.type}(s) "
"to retrieve all comments")
# optional args
self.opts.add_argument(
'-n', '--number', dest='comment_num', type='int_list',
action=partial(ParseStdin, int),
help='restrict by comment number(s)')
self.opts.add_argument(
'-r', '--creator', type='str_list', action='parse_stdin',
help='restrict by the email of the person who made the comment')
self.opts.add_argument(
'-c', '--created', type='time interval', metavar='TIME_INTERVAL',
help='comments created within a specified time interval')
self.opts.add_argument(
'-m', '--modified', nargs='?', const='/now',
type='time interval', metavar='TIME_INTERVAL',
help='comments modified within a specified time interval')
class Attach(SendSubcmd):
_name = 'attach'
@property
def description(self):
return f"attach file to {self.service.item.type}(s)"
def add_args(self):
super().add_args()
self.opts.add_argument(
'-d', '--description',
help='a long description of the attachment',
dest='comment')
self.opts.add_argument(
'-t', '--title', dest='summary',
help='a short description of the attachment (default: filename)')
class Modify(TemplatedSubcmd, SendSubcmd):
_name = 'modify'
@property
def description(self):
return f"modify {self.service.item.type}(s)"
def add_args(self):
super().add_args()
# positional args
self.parser.add_argument(
'ids', type='ids', nargs='+', metavar='ID', action='parse_stdin',
help=f"ID(s) of the {self.service.item.type}(s) to modify")
# optional args
self.attr = self.parser.add_argument_group('Attribute related')
single_action = self.attr.add_mutually_exclusive_group()
single_action.add_argument(
'-c', '--comment', nargs='?', const='__BITE_EDITOR__',
type='comment', action='parse_stdin',
help='add a comment')
single_action.add_argument(
'-r', '--reply', type='id_list', dest='reply_ids',
help='reply to specific comment(s)')
def get_comment_reply(self, reply_ids, args):
"""Support reply to specific comment(s)."""
item_id = args['ids'][0]
try:
comments = next(self.service.CommentsRequest(ids=[item_id]).send())
except BiteError as e:
self.parser.error(f'argument -r/--reply: {e}')
# pull comment data in reply format
initial_text = []
try:
for i in reply_ids:
initial_text.append(comments[i].reply)
except IndexError:
self.parser.error(
'argument -r/--reply: '
f'nonexistent comment #{i} '
f'({self.service.item.type} #{item_id} has {len(comments)} '
'comments including the description)')
initial_text = '\n\n'.join(initial_text)
# request user changes
while True:
comment = block_edit(
header=True, comment='Add reply to the requested comment(s)', comment_from=initial_text).strip()
if (comment != initial_text or
confirm('No changes made to comment, submit anyway?')):
break
return comment
def finalize_args(self, args):
args = super().finalize_args(args)
# support interactive comment replies
reply_ids = args.pop('reply_ids', None)
if reply_ids is not None:
# | |
tree[6].set_numerical_test_node(
feature_id=5, opname='<', threshold=1,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_leaf_node(leaf_value=0.125240386)
tree[12].set_leaf_node(leaf_value=-0.0480586812)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=14, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0562478416)
tree[2].set_numerical_test_node(
feature_id=27, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.133602157)
tree[4].set_leaf_node(leaf_value=0.0103747388)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=30, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0563403554)
tree[2].set_leaf_node(leaf_value=0.119383894)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=21, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=19, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=-0.0555937588)
tree[4].set_leaf_node(leaf_value=0.0847212002)
tree[2].set_leaf_node(leaf_value=0.119030036)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=27, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=12, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=31, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_leaf_node(leaf_value=-4.53396751e-05)
tree[8].set_leaf_node(leaf_value=-0.0519176535)
tree[4].set_numerical_test_node(
feature_id=20, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_leaf_node(leaf_value=0.0758128986)
tree[10].set_leaf_node(leaf_value=0.00251008244)
tree[2].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=11, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_numerical_test_node(
feature_id=29, opname='<', threshold=0.5,
default_left=True, left_child_key=15, right_child_key=16)
tree[15].set_leaf_node(leaf_value=0.115501896)
tree[16].set_leaf_node(leaf_value=0.0126717007)
tree[12].set_leaf_node(leaf_value=-0.0382348187)
tree[6].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=-0.00208553229)
tree[14].set_leaf_node(leaf_value=-0.0498537868)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=32, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0555700921)
tree[2].set_numerical_test_node(
feature_id=28, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.013236383)
tree[4].set_leaf_node(leaf_value=0.120270707)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=0, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=3, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_numerical_test_node(
feature_id=27, opname='<', threshold=0.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=-0.0335048102)
tree[14].set_leaf_node(leaf_value=0.0475896709)
tree[8].set_leaf_node(leaf_value=-0.0471977182)
tree[4].set_leaf_node(leaf_value=-0.0549014583)
tree[2].set_numerical_test_node(
feature_id=27, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=3, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_numerical_test_node(
feature_id=8, opname='<', threshold=1.5,
default_left=True, left_child_key=15, right_child_key=16)
tree[15].set_leaf_node(leaf_value=0.0620149337)
tree[16].set_leaf_node(leaf_value=-0.0308423471)
tree[10].set_leaf_node(leaf_value=-0.0515705422)
tree[6].set_numerical_test_node(
feature_id=5, opname='<', threshold=1,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_leaf_node(leaf_value=0.111627951)
tree[12].set_leaf_node(leaf_value=-0.0471399762)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=14, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0556379929)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.119325638)
tree[4].set_leaf_node(leaf_value=0.0092998175)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=30, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0557330921)
tree[2].set_leaf_node(leaf_value=0.107890502)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=19, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=21, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=-0.0549177043)
tree[4].set_leaf_node(leaf_value=0.0858937427)
tree[2].set_numerical_test_node(
feature_id=6, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_leaf_node(leaf_value=0.108671807)
tree[6].set_leaf_node(leaf_value=0.0449379198)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=27, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=12, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=31, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_leaf_node(leaf_value=3.66042259e-05)
tree[8].set_leaf_node(leaf_value=-0.0511378124)
tree[4].set_numerical_test_node(
feature_id=20, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_leaf_node(leaf_value=0.0703058019)
tree[10].set_leaf_node(leaf_value=0.00258865743)
tree[2].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=11, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_numerical_test_node(
feature_id=6, opname='<', threshold=0.5,
default_left=True, left_child_key=15, right_child_key=16)
tree[15].set_numerical_test_node(
feature_id=1, opname='<', threshold=1.5,
default_left=True, left_child_key=17, right_child_key=18)
tree[17].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=19, right_child_key=20)
tree[19].set_leaf_node(leaf_value=0.000366851862)
tree[20].set_leaf_node(leaf_value=0.0764373988)
tree[18].set_leaf_node(leaf_value=0.10997247)
tree[16].set_leaf_node(leaf_value=0.0119431121)
tree[12].set_leaf_node(leaf_value=-0.0372664332)
tree[6].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=-0.00121775095)
tree[14].set_leaf_node(leaf_value=-0.0490503907)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=32, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0549904071)
tree[2].set_numerical_test_node(
feature_id=31, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0122229299)
tree[4].set_leaf_node(leaf_value=0.109174095)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=0, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=3, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_numerical_test_node(
feature_id=27, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_leaf_node(leaf_value=-0.0326999724)
tree[12].set_leaf_node(leaf_value=0.0455024466)
tree[8].set_leaf_node(leaf_value=-0.0463447087)
tree[4].set_leaf_node(leaf_value=-0.0542914644)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=19, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_numerical_test_node(
feature_id=13, opname='<', threshold=0.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=0.111545049)
tree[14].set_leaf_node(leaf_value=0.0362377167)
tree[10].set_leaf_node(leaf_value=-0.0496128574)
tree[6].set_leaf_node(leaf_value=-0.0501813255)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=14, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0550593399)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.108341567)
tree[4].set_leaf_node(leaf_value=0.00888466835)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=30, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0551542342)
tree[2].set_leaf_node(leaf_value=0.0976489112)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=21, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=19, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=-0.0544739142)
tree[4].set_leaf_node(leaf_value=0.072302945)
tree[2].set_leaf_node(leaf_value=0.0991791785)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=27, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=12, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=31, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_leaf_node(leaf_value=2.33872252e-05)
tree[8].set_leaf_node(leaf_value=-0.0503677316)
tree[4].set_numerical_test_node(
feature_id=17, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_leaf_node(leaf_value=0.0655779094)
tree[10].set_leaf_node(leaf_value=0.00221742759)
tree[2].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=11, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_numerical_test_node(
feature_id=6, opname='<', threshold=0.5,
default_left=True, left_child_key=15, right_child_key=16)
tree[15].set_numerical_test_node(
feature_id=1, opname='<', threshold=1.5,
default_left=True, left_child_key=17, right_child_key=18)
tree[17].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=19, right_child_key=20)
tree[19].set_leaf_node(leaf_value=-0.000183046126)
tree[20].set_leaf_node(leaf_value=0.0714399219)
tree[18].set_leaf_node(leaf_value=0.100781284)
tree[16].set_leaf_node(leaf_value=0.0116898036)
tree[12].set_leaf_node(leaf_value=-0.0362947844)
tree[6].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=-0.000583527843)
tree[14].set_leaf_node(leaf_value=-0.0482258946)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=32, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.054444056)
tree[2].set_numerical_test_node(
feature_id=28, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0110569848)
tree[4].set_leaf_node(leaf_value=0.100381367)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=0, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=3, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_numerical_test_node(
feature_id=27, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_leaf_node(leaf_value=-0.0318585858)
tree[12].set_leaf_node(leaf_value=0.0426461659)
tree[8].set_leaf_node(leaf_value=-0.0455001295)
tree[4].set_leaf_node(leaf_value=-0.0537060164)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=19, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_numerical_test_node(
feature_id=13, opname='<', threshold=0.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=0.101871371)
tree[14].set_leaf_node(leaf_value=0.0334801935)
tree[10].set_leaf_node(leaf_value=-0.0487776436)
tree[6].set_leaf_node(leaf_value=-0.0493848994)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=14, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0545130074)
tree[2].set_numerical_test_node(
feature_id=27, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0994988531)
tree[4].set_leaf_node(leaf_value=0.00806681532)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=30, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0546116605)
tree[2].set_numerical_test_node(
feature_id=17, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0984470174)
tree[4].set_leaf_node(leaf_value=0.0319658034)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=19, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=21, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=-0.0538570397)
tree[4].set_leaf_node(leaf_value=0.0738569945)
tree[2].set_numerical_test_node(
feature_id=6, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_leaf_node(leaf_value=0.0926994756)
tree[6].set_leaf_node(leaf_value=0.0392167829)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=27, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=12, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=31, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_leaf_node(leaf_value=0.000280787208)
tree[8].set_leaf_node(leaf_value=-0.0496028587)
tree[4].set_numerical_test_node(
feature_id=20, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_leaf_node(leaf_value=0.0622923337)
tree[10].set_leaf_node(leaf_value=0.0018609073)
tree[2].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=11, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_numerical_test_node(
feature_id=29, opname='<', threshold=0.5,
default_left=True, left_child_key=15, right_child_key=16)
tree[15].set_leaf_node(leaf_value=0.0888965875)
tree[16].set_leaf_node(leaf_value=0.0108379442)
tree[12].set_leaf_node(leaf_value=-0.0353214256)
tree[6].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=6.99278025e-05)
tree[14].set_leaf_node(leaf_value=-0.0474047288)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=32, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0539219081)
tree[2].set_numerical_test_node(
feature_id=31, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0105951307)
tree[4].set_leaf_node(leaf_value=0.0931921825)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=0, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=3, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_numerical_test_node(
feature_id=27, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_leaf_node(leaf_value=-0.0310331918)
tree[12].set_leaf_node(leaf_value=0.0400059558)
tree[8].set_leaf_node(leaf_value=-0.0446291976)
tree[4].set_leaf_node(leaf_value=-0.0531485341)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=19, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_numerical_test_node(
feature_id=13, opname='<', threshold=0.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=0.0940201879)
tree[14].set_leaf_node(leaf_value=0.0309093092)
tree[10].set_leaf_node(leaf_value=-0.0479226783)
tree[6].set_leaf_node(leaf_value=-0.0486013182)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=14, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0539949834)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.092454873)
tree[4].set_leaf_node(leaf_value=0.00714519015)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=30, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0540942661)
tree[2].set_numerical_test_node(
feature_id=17, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0910510495)
tree[4].set_leaf_node(leaf_value=0.0300962757)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=21, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=19, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=-0.0534542017)
tree[4].set_leaf_node(leaf_value=0.0630955249)
tree[2].set_leaf_node(leaf_value=0.086135909)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=27, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=12, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=31, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_leaf_node(leaf_value=0.000330694107)
tree[8].set_leaf_node(leaf_value=-0.0488443002)
tree[4].set_numerical_test_node(
feature_id=17, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_leaf_node(leaf_value=0.0583632067)
tree[10].set_leaf_node(leaf_value=0.0015067599)
tree[2].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=11, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_numerical_test_node(
feature_id=6, opname='<', threshold=0.5,
default_left=True, left_child_key=15, right_child_key=16)
tree[15].set_numerical_test_node(
feature_id=1, opname='<', threshold=1.5,
default_left=True, left_child_key=17, right_child_key=18)
tree[17].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=19, right_child_key=20)
tree[19].set_leaf_node(leaf_value=-0.00412822422)
tree[20].set_leaf_node(leaf_value=0.0624891929)
tree[18].set_leaf_node(leaf_value=0.0875311866)
tree[16].set_leaf_node(leaf_value=0.0106048854)
tree[12].set_leaf_node(leaf_value=-0.0343477204)
tree[6].set_numerical_test_node(
feature_id=15, opname='<', threshold=1.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=0.000736902177)
tree[14].set_leaf_node(leaf_value=-0.0465852581)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=32, opname='<', threshold=1.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0534253083)
tree[2].set_numerical_test_node(
feature_id=28, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.00922845583)
tree[4].set_leaf_node(leaf_value=0.0873670429)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=4, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_numerical_test_node(
feature_id=0, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_numerical_test_node(
feature_id=3, opname='<', threshold=0.5,
default_left=True, left_child_key=7, right_child_key=8)
tree[7].set_numerical_test_node(
feature_id=27, opname='<', threshold=0.5,
default_left=True, left_child_key=11, right_child_key=12)
tree[11].set_leaf_node(leaf_value=-0.0301814582)
tree[12].set_leaf_node(leaf_value=0.0384008698)
tree[8].set_leaf_node(leaf_value=-0.0437796563)
tree[4].set_leaf_node(leaf_value=-0.0526101775)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=5, right_child_key=6)
tree[5].set_numerical_test_node(
feature_id=19, opname='<', threshold=0.5,
default_left=True, left_child_key=9, right_child_key=10)
tree[9].set_numerical_test_node(
feature_id=13, opname='<', threshold=0.5,
default_left=True, left_child_key=13, right_child_key=14)
tree[13].set_leaf_node(leaf_value=0.0875401646)
tree[14].set_leaf_node(leaf_value=0.0285075735)
tree[10].set_leaf_node(leaf_value=-0.0471032001)
tree[6].set_leaf_node(leaf_value=-0.0478212647)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=14, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.053501375)
tree[2].set_numerical_test_node(
feature_id=32, opname='<', threshold=0.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0865296647)
tree[4].set_leaf_node(leaf_value=0.00686053885)
tree[0].set_root()
builder.append(tree)
tree = treelite.ModelBuilder.Tree()
tree[0].set_numerical_test_node(
feature_id=30, opname='<', threshold=0.5,
default_left=True, left_child_key=1, right_child_key=2)
tree[1].set_leaf_node(leaf_value=-0.0536041856)
tree[2].set_numerical_test_node(
feature_id=17, opname='<', threshold=1.5,
default_left=True, left_child_key=3, right_child_key=4)
tree[3].set_leaf_node(leaf_value=0.0851988941)
tree[4].set_leaf_node(leaf_value=0.0278433915)
tree[0].set_root()
builder.append(tree)
model = builder.commit()
make_annotation(model=model, dtrain_path='dermatology/dermatology.train',
annotation_path='./annotation.json')
for use_annotation in ['./annotation.json', None]:
for use_quantize in [True, False]:
run_pipeline_test(model=model,
dtest_path='dermatology/dermatology.test',
libname_fmt='./dermatology{}',
expected_prob_path='dermatology/dermatology.test.prob',
expected_margin_path='dermatology/dermatology.test.margin',
multiclass=True, use_annotation=use_annotation,
use_quantize=use_quantize, use_parallel_comp=None)
def test_model_builder3(self):
"""Test programmatic model construction using scikit-learn random forest"""
X, y = load_iris(return_X_y=True)
clf = RandomForestClassifier(max_depth=3, random_state=0, n_estimators=10)
clf.fit(X, y)
expected_prob = clf.predict_proba(X)
def process_node(treelite_tree, sklearn_tree, nodeid):
if sklearn_tree.children_left[nodeid] == -1: # leaf node
leaf_count = sklearn_tree.value[nodeid].squeeze()
prob_distribution = leaf_count / leaf_count.sum()
treelite_tree[nodeid].set_leaf_node(prob_distribution)
else: # test node
treelite_tree[nodeid].set_numerical_test_node(
feature_id=sklearn_tree.feature[nodeid],
opname='<=',
threshold=sklearn_tree.threshold[nodeid],
| |
<gh_stars>0
import os
from abc import ABCMeta, abstractmethod
from copy import copy
from typing import Optional, Tuple, Iterable, List
import pgpy
from cryptography.hazmat.primitives.hashes import Hash, SHA256
from pgpy import PGPKey, PGPUID
from pgpy.types import Fingerprint
from git_anon.custom_exception import CustomException
from git_anon.gpg_general import uid_equals, get_pseudo_uid
from git_anon.gpg_general.key_utils import KeyUtils
def sha256(data: bytes) -> bytes:
digest = Hash(SHA256())
digest.update(data)
return digest.finalize()
class KeyStore(metaclass=ABCMeta):
# data could be cached in /dev/shm/... to increase performance with many keys
storage_location: str
def __init__(self, storage_location: str):
os.makedirs(storage_location, exist_ok=True)
os.makedirs(os.path.join(storage_location, "keys"), exist_ok=True)
self.storage_location = storage_location
def _get_primary_key(self, keyid: str, fingerprint: Optional[pgpy.pgp.Fingerprint]) -> Optional[PGPKey]:
possible_location: str
if fingerprint is not None:
if fingerprint.keyid != keyid:
raise CustomException("Keyid does not match with end of provided fingerprint.")
possible_location = self._folder_for_fingerprint(fingerprint)
else:
potential_locations = self._folders_for_keyid(keyid)
if len(potential_locations) == 0:
return None
if len(potential_locations) > 1:
message = "Found more than one key with id " + keyid + ": keys:"
for potential_location in potential_locations:
fpr = potential_location[:-40]
message += "\n fpr:" + fpr
message += "Consider yourself lucky. This might be a sensation and is so unlikely that no code was " \
"written to handle this situation."
raise CustomException(message)
possible_location = potential_locations[0]
if not os.path.isdir(possible_location):
# the requested key is not available
return None
primary_key_file = os.path.join(possible_location, "primary_key.pub")
if not os.path.isfile(primary_key_file):
# the folder exists but the primary key does not
return None
key, _ = pgpy.PGPKey.from_file(primary_key_file)
if not possible_location == self._folder_for_fingerprint(key.fingerprint):
# the fingerprint does not match the storage location
return None
return self._verify_primary_key(key, keyid, fingerprint, self._expected_pseudo_uid())
def _expected_pseudo_uid(self) -> Optional[PGPUID]:
return get_pseudo_uid()
@staticmethod
def _verify_primary_key(key: PGPKey, expected_keyid: str, expected_fingerprint: Optional[Fingerprint],
expected_uid: Optional[PGPUID]):
if not key.is_public \
and not KeyStore._verify_expected_fingerprint_and_keyid(key, expected_keyid, expected_fingerprint) \
and not KeyStore._verify_legal_key_with_single_userid(key, expected_uid is not None) \
and not KeyStore._verify_expected_uid(key, expected_uid):
return None
# primary key has been validated
return key
@staticmethod
def _verify_expected_fingerprint_and_keyid(key: PGPKey, expected_keyid: str,
expected_fingerprint: Optional[Fingerprint]) -> bool:
if expected_fingerprint is not None:
# we have an expected fingerprint
if key.fingerprint != expected_fingerprint:
# imported key does not match the expected fingerprint; potential attack
return False
if key.fingerprint.keyid != expected_keyid:
# key id does not match requested key id
return False
return True
@staticmethod
def _verify_expected_uid(key: PGPKey, expected_uid: Optional[PGPUID]) -> bool:
if expected_uid is not None:
imported_uid: PGPUID = key.userids[0]
if not uid_equals(imported_uid, expected_uid):
# The pseudo uid was not included.
# A different one might have been used instead.
return False
if len(imported_uid.signers) > 1:
# more than one self-signature or a certification included
return False
return True
@staticmethod
def _verify_no_subkeys_signers_revsigs(key: PGPKey) -> bool:
if len(key.subkeys) != 0:
# Subkeys were included.
return False
if len(key.signers) != 0:
# Direct signatures over the key were included.
return False
for _ in key.revocation_signatures:
# A revocation signature was included.
return False
return True
@staticmethod
def _verify_legal_key_with_single_userid(key: PGPKey, single_uid_expected: bool = True) -> bool:
if single_uid_expected:
if not len(key.userids) == 1:
# more than just one user id included
return False
single_uid: PGPUID = key.userids[0]
if not single_uid.is_uid:
# photo uids are not supported
return False
verification_result = key.verify(single_uid)
if not verification_result:
# no valid self-signature on this key
return False
else:
if len(key.userids) != 0:
# did not expect a userid
return False
if not KeyStore._verify_no_subkeys_signers_revsigs(key):
return False
return True
def _get_potential_userid_packet_locations(self, fingerprint: Fingerprint) -> List[Tuple[str, str]]:
key_folder = self._folder_for_fingerprint(fingerprint)
uid_folder = os.path.join(key_folder, "uid-packets")
if not os.path.isdir(uid_folder):
# no other uids available
return []
potential_locations: List[Tuple[str, str]] = []
for file in os.listdir(uid_folder):
if os.path.isfile(os.path.join(uid_folder, file)):
potential_locations.append((uid_folder, file))
return potential_locations
@staticmethod
def _transplant_uid(donor_uid: PGPUID, target_key: PGPKey) -> None:
if not donor_uid.is_uid:
raise AssertionError("Donor UID is not a UID and cannot be transplanted!")
donor_uid = donor_uid.__copy__()
target_key.add_uid(donor_uid, selfsign=False)
def add_key(self, key: PGPKey) -> None:
# filter for the pubkey only and copy the key object so we don't affect other instances
key = copy(key.pubkey)
# remove all user ids except the pseudo uid
key = self._filter_uids_on_key(key, get_pseudo_uid())
KeyUtils.strip_subkeys(key)
if len(key.userids[0].signers) != 1:
raise AssertionError("Certifications on pseudo UIDs are not allowed!")
self._store_primary_key(key)
def _store_primary_key(self, key: PGPKey) -> None:
folder = self._folder_for_fingerprint(key.fingerprint)
os.makedirs(folder, exist_ok=True)
filename = os.path.join(folder, "primary_key.pub")
self._write_file(filename, bytes(str(key), 'utf-8'))
@staticmethod
def _filter_uids_on_key(key: PGPKey, desired_uid: PGPUID) -> PGPKey:
key = copy(key)
# remove all user ids except the pseudo uid
for uid in key.userids:
if not uid_equals(uid, desired_uid):
key.del_uid(uid.name)
# assert that no unwanted information is left
if len(key.userids) != 1 or not uid_equals(key.userids[0], desired_uid):
raise AssertionError("Unwanted information left on the key!")
for _ in key.revocation_signatures:
raise AssertionError("Keys should not have revocation signatures!")
if len(key.signers) != 0:
raise AssertionError("Keys should not be signed directly!")
return key
def _folder_for_fingerprint(self, fingerprint: pgpy.pgp.Fingerprint) -> str:
# Using the key-id allows us to blindly change into the correct folder based on the signers keyid that is always
# included in the signature. The fingerprint is needed for collision resistance but isn't always included in
# signatures. Where the fingerprint is unknown we'll iterate over the subdirectories for the keyid.
return os.path.join(self.storage_location, "keys", fingerprint.keyid, fingerprint.replace(" ", ""))
def _folders_for_keyid(self, keyid: str) -> List[str]:
if len(keyid) != 16:
raise AssertionError("64bit key ids are required!")
keyid_folder = os.path.join(self.storage_location, "keys", keyid)
if not os.path.isdir(keyid_folder):
return []
potential_folders = []
for file_or_folder in os.listdir(keyid_folder):
fingerprint_folder = os.path.join(keyid_folder, file_or_folder)
if len(file_or_folder) == 40 and os.path.isdir(fingerprint_folder):
potential_folders.append(fingerprint_folder)
return potential_folders
def get_key(self, keyid: str, fingerprint: pgpy.pgp.Fingerprint = None) -> Optional[PGPKey]:
if len(keyid) != 16:
raise AssertionError("64bit key ids are required!")
if not (fingerprint is None or isinstance(fingerprint, Fingerprint)):
raise AssertionError("Fingerprints must be instances of the Fingerprint class if provided!")
primary_key = self._get_primary_key(keyid, fingerprint)
if primary_key is None:
return None
combined_key = primary_key.__copy__()
self._get_userid_packets(combined_key)
return combined_key
@abstractmethod
def _get_userid_packets(self, primary_key):
pass
@abstractmethod
def add_userid(self, key: PGPKey, uid: PGPUID):
pass
def _prepare_uid_for_export(self, key: PGPKey, uid: PGPUID) -> Optional[Tuple[str, bytes, str]]:
# ultimately we should separate uids. self-signatures and certifications and store them in individual files
# uids and self-signatures can likely be stored together
# for now we might have to store keys with one uid and associated self-signatures and certifications together
# for compatibility with libraries and gpg itself
# filter for the pubkey only and copy the key object so we don't affect other instances
key = copy(key.pubkey)
# abort if presented with the pseudo-uid to export
if uid_equals(uid, get_pseudo_uid()):
# the pseudo-uid is exported together with the primary key material and does not need to be exported again
return None
# filter the key to only requested information
key = self._filter_uids_on_key(key, uid)
if len(key.userids) != 1:
raise AssertionError("Only one UID should remain!")
# export the key
serialized_key: bytes = bytes(key)
key_folder = self._folder_for_fingerprint(key.fingerprint)
uid_folder = os.path.join(key_folder, "uid-packets")
os.makedirs(uid_folder, exist_ok=True)
serialization_hash: str = sha256(serialized_key).hex()
filename = os.path.join(uid_folder, serialization_hash)
return filename, serialized_key, serialization_hash
def _write_file(self, filename: str, data: bytes) -> None:
if os.path.isfile(filename):
# We already stored this exact serialized packet.
with open(filename, "rb") as file:
if not file.read() == data:
raise CustomException(
"Writing to {} failed. File already exists but contents are not as expected.".format(filename))
return
with open(filename, "wb") as file:
file.write(data)
def _verify_and_import_uid_packet(self, primary_key: PGPKey, uid_packet: bytes, expected_hash: str) -> None:
actual_serialized_hash = sha256(uid_packet).hex()
if not actual_serialized_hash == expected_hash:
# hashes for the content do not match
return
imported_key, _ = PGPKey.from_blob(uid_packet)
if not imported_key.fingerprint == primary_key.fingerprint:
# Primary key material on the two keys does not match. They don't belong together.
return
if not KeyStore._verify_legal_key_with_single_userid(imported_key):
# key failed one of the validity tests
return
if not primary_key.verify(imported_key.userids[0]):
# Primary key did not sign userid.
# This check was already performed via the self-signature check on imported_key and the fingerprint
# match with primary_key before.
return
new_userid: PGPUID = imported_key.userids[0]
self._transplant_uid(new_userid, primary_key)
def list_keys(self) -> Iterable[PGPKey]:
if not os.path.isdir(self.storage_location):
# no keys present
yield []
return
for keyid in os.listdir(os.path.join(self.storage_location, "keys")):
key = self.get_key(keyid)
if key is not None:
yield key
@staticmethod
def _strip_signatures_uids_subkeys(key: PGPKey):
# overwrite internal variables in the key to remove unnecessary information
KeyUtils.strip_subkeys(key)
KeyUtils.strip_uids(key)
KeyUtils.strip_signatures(key)
@staticmethod
def _combine_key_parts(key_parts: List[PGPKey]) -> PGPKey:
# pylint: disable=protected-access
| |
<filename>admin_tabs/helpers.py
# -*- coding: utf-8 -*-
from django.contrib.admin.helpers import AdminForm, Fieldset
from django.contrib.admin import ModelAdmin
from django.contrib.admin.options import csrf_protect_m
from django.db import transaction
class AdminCol(object):
"""
One column in the admin pages.
"""
def __init__(self, fieldsets, name=None, css_id=None, css_classes=None):
"""
`css_classes`: list of css classes
"""
self.name = name
self._fieldsets = {} # names of fieldsets for now (real Fieldsets should be better)
for idx, fieldset in enumerate(fieldsets):
self.add_fieldset(fieldset, idx)
self.css_id = css_id
self.css_classes = css_classes
def add_fieldset(self, fieldset, position=None):
if not position:
position = len(self)
self._fieldsets[position] = fieldset
@property
def fieldsets(self):
"""
Returns the sorted fieldsets (the sort is made on the key, which is the position)
"""
keys = self._fieldsets.keys()
keys.sort()
return map(self._fieldsets.get, keys)
def __contains__(self, item):
return item in self.fieldsets
def __len__(self):
return len(self._fieldsets)
def get_fieldsets(self, request, obj=None):
"""
Returns fieldsets, as expected by Django...
"""
return self.get_elements(request, obj=obj) # Without inlines
def get_elements(self, request, obj=None, include_inlines=False):
col_elements = []
for fieldset_config in self.fieldsets:
if fieldset_config.inline is not None:
if not include_inlines:
continue # not inlines here
col_element = (
fieldset_config.name,
{"inline": fieldset_config.inline}
)
else: # Classic fieldset
col_element = (
fieldset_config.name,
{
"fields": fieldset_config.fields,
"classes": fieldset_config.css_classes,
}
)
col_elements.append(col_element)
return col_elements
class AdminTab(object):
"""
One Tab in the admin pages.
"""
def __init__(self, name, cols, enabled=True):
self.name = name
self.enabled = enabled
self._cols = {}
for idx, col in enumerate(cols):
self.add_col(col, idx)
def add_col(self, col, position=None):
# Position is not an attribute of the col, but of the relation tab=>col
if not position:
position = len(self)
# FIXME: manage position insertion
self._cols[position] = col
@property
def cols(self):
"""
Returns the sorted cols (the sort is made on the key, which is the position)
"""
keys = self._cols.keys()
keys.sort()
return map(self._cols.get, keys)
def __iter__(self):
return self.cols.__iter__()
def __len__(self):
return len(self._cols)
def __getitem__(self, item):
"""
Returns a col by its position.
"""
return self._cols[item]
def medias(self):
return
class AdminFieldsetConfig(object):
"""
Wrapper to define the admin Fieldset.
See https://docs.djangoproject.com/en/dev/ref/contrib/admin/#django.contrib.admin.ModelAdmin.fieldsets
for the original syntax.
It can be a real Fieldset or an Inline.
"""
def __init__(self, fields=None, inline=None, name=None, css_classes=None, description=None):
self.description = description
self.css_classes = css_classes or []
self.fields = fields
self.inline = inline
self.name = name
def __iter__(self):
return self.fields.__iter__()
def __getitem__(self, item):
return getattr(self, item)
class Config(dict):
"""
Basic extension of a dict object to manage the attributes order in class
definition.
"""
creation_counter = 0
def __init__(self, *args, **kwargs):
self.creation_counter = Config.creation_counter
Config.creation_counter += 1
return super(Config, self).__init__(*args, **kwargs)
class Tabs(object):
"""
Base class for the tabs object.
Manages the tabs order in __setattr__ and __delattr__.
"""
def __init__(self, *args, **kwargs):
# Set the property to store the tabs order automatically from
# __setattr__ and __delattr__
object.__setattr__(self, "tabs_order", [])
def __setattr__(self, name, value):
# Manage the tabs_order
if not name.startswith("_"):
self.tabs_order.append(name)
super(Tabs, self).__setattr__(name, value)
def __delattr__(self, name):
# Manage the tabs_order
if not name.startswith("_"):
self.tabs_order.remove(name)
super(Tabs, self).__delattr__(name)
class MetaAdminPageConfig(type):
"""
This metaclass make inheritance between the inner classes of the PageConfig
classes.
"""
def __new__(mcs, name, base, dct):
it = type.__new__(mcs, name, base, dct)
def _manage_config_class_inheritance(config_class_name):
"""
Manage the ihneritance of the config classes (TabsConfig, ColsConfig
and FieldsetsConfig):
- inherit attributes from parent when not setted in current class
- remove parent attribute when setted to None in current class
- merge with parent ones if setted also in current class
:param config_class_name: could be "TabsConfig", "ColsConfig" or
"FieldsetsConfig"
"""
_final_attrs = {} # Attrs that will be finally added to the created class
reverse_mro = list(it.mro())
reverse_mro.reverse()
for cls in reverse_mro:
if not hasattr(cls, config_class_name): continue
# config_class will be cls.TabsConfig, cls.ColsConfig or
# cls.FieldsetsConfig, according to the given config_class_name
# param
config_class = getattr(cls, config_class_name)
for attr_name in dir(config_class):
# Lets look for the Config attribute of the current inner class
attr = getattr(config_class, attr_name)
# We keep Config instances AND attr_name already in _final_attrs
# because they can be overwritten by None
if not isinstance(attr, Config) and not attr_name in _final_attrs: continue
# Setting some attr to None remove an attr that was setted in a
# parent class
if attr is None and attr_name in _final_attrs:
del _final_attrs[attr_name]
delattr(config_class, attr_name)
else:
# Merge with parent's attr if exists
if attr_name in _final_attrs:
config = Config(**_final_attrs[attr_name]) # Copy it
config.update(attr)
attr = config
_final_attrs[attr_name] = attr
# Add selected Config attributes in the created class
# final_class_config is the it.TabsConfig, it.ColsConfig or
# it.FieldsetsConfig, according to the given config_class_name param
# (Do not confuse with the cls.X ones: `it` is current created class,
# `cls` are the parent classes from which we inherit attributes)
final_class_config = getattr(it, config_class_name)
for attr_name, attr in _final_attrs.iteritems():
setattr(final_class_config, attr_name, attr)
return _final_attrs
# --- Make the TabsConfig attributes overwrittable and inheritable
_manage_config_class_inheritance("TabsConfig")
# --- Make the ColsConfig attributes overwrittable and inheritable
_manage_config_class_inheritance("ColsConfig")
# --- Make the FieldsetsConfig attributes overwrittable and inheritable
_manage_config_class_inheritance("FieldsetsConfig")
# --- Define a default tabs order if user as not provided one
if not hasattr(it.TabsConfig, 'tabs_order'):
tabs_order = [attr for attr in dir(it.TabsConfig) if not attr.startswith('_')]
tabs_order.sort(key=lambda attr: getattr(it.TabsConfig, attr).creation_counter)
setattr(it.TabsConfig, "tabs_order", tabs_order)
return it
class TabbedPageConfig(object):
"""
Subsclass it to be able to manage the tabs.
Define in the inner class Fields the fieldsets you will use in your cols.
"""
__metaclass__ = MetaAdminPageConfig
class FieldsetsConfig(object): pass
class ColsConfig(object): pass
class TabsConfig(object): pass
def __init__(self, request, model_admin, obj_or_id=None):
# Create these inner classes at runtime
# to prevent from sharing them between instances
self.Fields = type("Fields", (object,), {})
self.Cols = type("Cols", (object,), {})
self.Tabs = Tabs() # Instantiate it to be able to define __setattr__
# and __delattr__
# TODO: instanciate also Fields and Cols?
self.model_admin = model_admin
self.request=request
# Populate the fieldsets
for f in dir(self.FieldsetsConfig):
if f.startswith("_"): continue
fields = getattr(self.FieldsetsConfig, f)
if not fields: continue
fieldsetconfig = AdminFieldsetConfig(**fields)
setattr(self.Fields, f, fieldsetconfig)
# Put AdminCols instance in self.Cols
for f in dir(self.ColsConfig):
if f.startswith("_"): continue
# Make a copy, as it is a dict static properties
# and we are making change on it
cols = getattr(self.ColsConfig, f)
if not cols: continue
ColsConfig = dict(cols)
# We want AdminFieldsetConfig instances, not names
ColsConfig['fieldsets'] = map(lambda k: getattr(self.Fields, k), ColsConfig['fieldsets'])
# Col instance need to know about its AdminFormConfig parent
# ColsConfig["page_config"] = self
setattr(self.Cols, f, AdminCol(**ColsConfig))
del ColsConfig
# Put AdminTabs instances in self.Tabs
for f in self.TabsConfig.tabs_order:
# Make a copy, as it is a dict static properties
# and we are making change on it
tabs = getattr(self.TabsConfig, f)
if not tabs: continue
tabconfig = dict(tabs)
# We want ColsConfig instances, not names
tabconfig['cols'] = map(lambda k: getattr(self.Cols, k), tabconfig['cols'])
setattr(self.Tabs, f, AdminTab(**tabconfig))
def __iter__(self):
for attr in self.Tabs.tabs_order:
yield getattr(self.Tabs, attr)
@property
def tabs(self):
return self.__iter__()
class TabbedModelAdmin(ModelAdmin):
declared_fieldsets = []
page_config_class = TabbedPageConfig
def __init__(self, *args, **kwargs):
self._page_config = None # For caching, warning, it'a class consistent
# Override get_page_config for changing Tabs
# at run time
return super(TabbedModelAdmin, self).__init__(*args, **kwargs)
def get_page_config(self, request, obj_or_id=None, **kwargs):
"""
Returns the page config for the current model_admin.
Override this method to be able to change Tabs, Cols and Fields at
runtime.
`obj_or_id` could be an instance or a pk or None (when you call it from a
change_view extended, you have only the pk).
"""
if self._page_config is None:
self._page_config = self.page_config_class(request, self, obj_or_id=obj_or_id)
return self._page_config
def get_fieldsets(self, request, obj=None):
fieldsets = []
page_config = self.get_page_config(request, obj_or_id=obj)
for tab in page_config:
for col in tab:
fieldsets += col.get_fieldsets(request, obj)
return fieldsets
def get_form(self, request, obj=None, **kwargs):
self.declared_fieldsets = self.get_fieldsets(request, obj)
return super(TabbedModelAdmin, self).get_form(request, obj, **kwargs)
@csrf_protect_m
@transaction.commit_on_success
def change_view(self, request, object_id, form_url='', extra_context=None):
if extra_context is None:
extra_context = {}
page_config = self.get_page_config(request, obj_or_id=object_id)
extra_context.update({'page_config': page_config})
try:
# django 1.4
return super(TabbedModelAdmin, self).change_view(request, object_id, form_url=form_url, extra_context=extra_context)
except | |
<reponame>cwood1967/SBEMimage
# -*- coding: utf-8 -*-
# ==============================================================================
# This source file is part of SBEMimage (github.com/SBEMimage)
# (c) 2018-2020 <NAME> Institute for Biomedical Research, Basel,
# and the SBEMimage developers.
# This software is licensed under the terms of the MIT License.
# See LICENSE.txt in the project root folder.
# ==============================================================================
"""This module controls the Viewport window, which consists of three tabs:
- the Viewport (methods/attributes concerning the Viewport tab are
tagged with 'vp_')
- the Slice-by-Slice Viewer (sv_),
- the Reslice/Statistics tab (m_ for 'monitoring')
"""
import os
import shutil
import json
import yaml
import numpy as np
import threading
from time import time, sleep
from PIL import Image
from math import log, sqrt, sin, cos, radians
from statistics import mean
from PyQt5.uic import loadUi
from PyQt5.QtWidgets import QWidget, QApplication, QMessageBox, QMenu
from PyQt5.QtGui import QPixmap, QPainter, QColor, QFont, QIcon, QPen, \
QBrush, QTransform, QKeyEvent
from PyQt5.QtCore import Qt, QObject, QRect, QPoint, QSize, pyqtSignal
import utils
import acq_func
from viewport_dlg_windows import StubOVDlg, FocusGradientTileSelectionDlg, \
GridRotationDlg, ImportImageDlg, \
AdjustImageDlg, DeleteImageDlg
from main_controls_dlg_windows import MotorStatusDlg
class Viewport(QWidget):
def __init__(self, config, sem, stage, coordinate_system,
ov_manager, grid_manager, imported_images,
autofocus, acquisition, img_inspector,
main_controls_trigger):
super().__init__()
self.cfg = config
self.sem = sem
self.stage = stage
self.cs = coordinate_system
self.gm = grid_manager
self.ovm = ov_manager
self.imported = imported_images
self.autofocus = autofocus
self.acq = acquisition
self.img_inspector = img_inspector
self.main_controls_trigger = main_controls_trigger
# Set Viewport zoom parameters depending on which stage is used for XY
if self.stage.use_microtome_xy:
self.VP_ZOOM = utils.VP_ZOOM_MICROTOME_STAGE
else:
self.VP_ZOOM = utils.VP_ZOOM_SEM_STAGE
# Set limits for viewport panning.
self.VC_MIN_X, self.VC_MAX_X, self.VC_MIN_Y, self.VC_MAX_Y = (
self.vp_dx_dy_range())
# Shared control variables
self.busy = False # acquisition or other operation in progress
self.active = True # Viewport windows is active.
# for mouse operations (dragging, measuring)
self.doubleclick_registered = False
self.zooming_in_progress = False
self.drag_origin = (0, 0)
self.fov_drag_active = False
self.tile_paint_mode_active = False
self.measure_p1 = (None, None)
self.measure_p2 = (None, None)
self.measure_complete = False
self.help_panel_visible = False
self.stub_ov_centre = [None, None]
# Set up trigger and queue to update viewport from the
# acquisition thread or dialog windows.
self.viewport_trigger = utils.Trigger()
self.viewport_trigger.signal.connect(self._process_signal)
self._load_gui()
# Initialize viewport tabs:
self._vp_initialize() # Viewport
self._sv_initialize() # Slice-by-slice viewer
self._m_initialize() # Monitoring tab
self.setMouseTracking(True)
def save_to_cfg(self):
"""Save viewport configuration to ConfigParser object."""
self.cfg['viewport']['vp_current_grid'] = str(self.vp_current_grid)
self.cfg['viewport']['vp_current_ov'] = str(self.vp_current_ov)
self.cfg['viewport']['vp_tile_preview_mode'] = str(
self.vp_tile_preview_mode)
self.cfg['viewport']['show_labels'] = str(self.show_labels)
self.cfg['viewport']['show_axes'] = str(self.show_axes)
self.cfg['viewport']['show_stub_ov'] = str(self.show_stub_ov)
self.cfg['viewport']['show_imported'] = str(self.show_imported)
self.cfg['viewport']['show_native_resolution'] = str(
self.show_native_res)
self.cfg['viewport']['show_saturated_pixels'] = str(
self.show_saturated_pixels)
self.cfg['viewport']['sv_current_grid'] = str(self.sv_current_grid)
self.cfg['viewport']['sv_current_tile'] = str(self.sv_current_tile)
self.cfg['viewport']['sv_current_ov'] = str(self.sv_current_ov)
self.cfg['viewport']['m_current_grid'] = str(self.m_current_grid)
self.cfg['viewport']['m_current_tile'] = str(self.m_current_tile)
self.cfg['viewport']['m_current_ov'] = str(self.m_current_ov)
def _load_gui(self):
loadUi('..\\gui\\viewport.ui', self)
self.setWindowIcon(QIcon('..\\img\\icon_16px.ico'))
self.setWindowTitle('SBEMimage - Viewport')
# Display current settings:
self.setFixedSize(self.size())
self.move(20, 20)
# Deactivate buttons for imaging if in simulation mode
if self.sem.simulation_mode:
self.pushButton_refreshOVs.setEnabled(False)
self.pushButton_acquireStubOV.setEnabled(False)
self.checkBox_showStagePos.setEnabled(False)
# Detect if tab is changed
self.tabWidget.currentChanged.connect(self.tab_changed)
def tab_changed(self):
if self.tabWidget.currentIndex() == 2: # Acquisition monitor
# Update motor status
self.m_show_motor_status()
def restrict_gui(self, b):
"""Disable several GUI elements while SBEMimage is busy, for example
when an acquisition is running."""
self.busy = b
b ^= True
self.pushButton_refreshOVs.setEnabled(b)
self.pushButton_acquireStubOV.setEnabled(b)
if not b:
self.radioButton_fromStack.setChecked(not b)
self.radioButton_fromSEM.setEnabled(b)
def update_grids(self):
"""Update the grid selectors after grid is added or deleted."""
self.vp_current_grid = -1
if self.sv_current_grid >= self.gm.number_grids:
self.sv_current_grid = self.gm.number_grids - 1
self.sv_current_tile = -1
if self.m_current_grid >= self.gm.number_grids:
self.m_current_grid = self.gm.number_grids - 1
self.m_current_tile = -1
self.vp_update_grid_selector()
self.sv_update_grid_selector()
self.sv_update_tile_selector()
self.m_update_grid_selector()
self.m_update_tile_selector()
def update_ov(self):
"""Update the overview selectors after overview is added or deleted."""
self.vp_current_ov = -1
self.vp_update_ov_selector()
self.sv_update_ov_selector()
self.m_update_ov_selector()
def _update_measure_buttons(self):
"""Display the measuring tool buttons as active or inactive."""
if self.vp_measure_active:
self.pushButton_measureMosaic.setIcon(
QIcon('..\\img\\measure-active.png'))
self.pushButton_measureMosaic.setIconSize(QSize(16, 16))
else:
self.pushButton_measureMosaic.setIcon(
QIcon('..\\img\\measure.png'))
self.pushButton_measureMosaic.setIconSize(QSize(16, 16))
if self.sv_measure_active:
self.pushButton_measureSlice.setIcon(
QIcon('..\\img\\measure-active.png'))
self.pushButton_measureSlice.setIconSize(QSize(16, 16))
else:
self.pushButton_measureSlice.setIcon(
QIcon('..\\img\\measure.png'))
self.pushButton_measureSlice.setIconSize(QSize(16, 16))
def _draw_measure_labels(self, qp):
"""Draw measure labels QPainter qp. qp must be active when calling this
method."""
def draw_measure_point(qp, x, y):
qp.drawEllipse(QPoint(x, y), 4, 4)
qp.drawLine(x, y - 10, x, y + 10)
qp.drawLine(x - 10, y, x + 10, y)
draw_in_vp = (self.tabWidget.currentIndex() == 0)
tile_display = (self.sv_current_tile >= 0)
qp.setPen(QPen(QColor(*utils.COLOUR_SELECTOR[6]), 2, Qt.SolidLine))
qp.setBrush(QColor(0, 0, 0, 0))
if self.measure_p1[0] is not None:
if draw_in_vp:
p1_x, p1_y = self.cs.convert_to_v(self.measure_p1)
else:
p1_x, p1_y = self.cs.convert_to_sv(
self.measure_p1, tile_display)
draw_measure_point(qp, p1_x, p1_y)
if self.measure_p2[0] is not None:
if draw_in_vp:
p2_x, p2_y = self.cs.convert_to_v(self.measure_p2)
else:
p2_x, p2_y = self.cs.convert_to_sv(
self.measure_p2, tile_display)
draw_measure_point(qp, p2_x, p2_y)
if self.measure_complete:
# Draw line between p1 and p2
qp.drawLine(p1_x, p1_y, p2_x, p2_y)
distance = sqrt((self.measure_p1[0] - self.measure_p2[0])**2
+ (self.measure_p1[1] - self.measure_p2[1])**2)
qp.setPen(QPen(QColor(0, 0, 0), 1, Qt.SolidLine))
qp.setBrush(QColor(0, 0, 0, 255))
qp.drawRect(utils.VP_WIDTH - 80, utils.VP_HEIGHT - 20, 80, 20)
qp.setPen(QPen(QColor(*utils.COLOUR_SELECTOR[6]), 1, Qt.SolidLine))
if distance < 1:
qp.drawText(utils.VP_WIDTH - 75, utils.VP_HEIGHT - 5,
str((int(distance * 1000))) + ' nm')
else:
qp.drawText(utils.VP_WIDTH - 75, utils.VP_HEIGHT - 5,
'{0:.2f}'.format(distance) + ' µm')
def grab_viewport_screenshot(self, save_path_filename):
viewport_screenshot = self.grab()
viewport_screenshot.save(save_path_filename)
def show_in_incident_log(self, message):
"""Show the message in the Viewport's incident log
(in the monitoring tab).
"""
self.textarea_incidentLog.appendPlainText(message)
def _process_signal(self):
"""Process signals from the acquisition thread or from dialog windows.
"""
msg = self.viewport_trigger.queue.get()
if msg == 'DRAW VP':
self.vp_draw()
elif msg == 'DRAW VP NO LABELS':
self.vp_draw(suppress_labels=True, suppress_previews=True)
elif msg == 'UPDATE XY':
self.main_controls_trigger.transmit(msg)
elif msg == 'STATUS IDLE':
self.main_controls_trigger.transmit(msg)
elif msg == 'STATUS BUSY STUB':
self.main_controls_trigger.transmit(msg)
elif msg == 'STATUS BUSY OV':
self.main_controls_trigger.transmit(msg)
elif msg.startswith('ACQ IND OV'):
self.vp_toggle_ov_acq_indicator(
int(msg[len('ACQ IND OV'):]))
elif msg == 'MANUAL MOVE SUCCESS':
self._vp_manual_stage_move_success(True)
elif msg == 'MANUAL MOVE FAILURE':
self._vp_manual_stage_move_success(False)
elif msg == 'REFRESH OV SUCCESS':
self._vp_overview_acq_success(True)
elif msg == 'REFRESH OV FAILURE':
self._vp_overview_acq_success(False)
elif msg == 'STUB OV SUCCESS':
self._vp_stub_overview_acq_success(True)
elif msg == 'STUB OV FAILURE':
self._vp_stub_overview_acq_success(False)
else:
self._add_to_main_log(msg)
def _add_to_main_log(self, msg):
"""Add entry to the log in the main window via main_controls_trigger."""
self.main_controls_trigger.transmit(utils.format_log_entry(msg))
def closeEvent(self, event):
"""This overrides the QWidget's closeEvent(). Viewport must be
deactivated first before the window can be closed."""
if self.active:
QMessageBox.information(self, 'Closing Viewport',
'The Viewport window can only be closed by closing the main '
'window of the application.', QMessageBox.Ok)
event.ignore()
else:
event.accept()
# ======================== Below: mouse event methods ==========================
def setMouseTracking(self, flag):
"""Recursively activate or deactive mouse tracking in a widget."""
def recursive_set(parent):
for child in parent.findChildren(QObject):
try:
child.setMouseTracking(flag)
except:
pass
recursive_set(child)
QWidget.setMouseTracking(self, flag)
recursive_set(self)
def mousePressEvent(self, event):
p = event.pos()
px, py = p.x() - utils.VP_MARGIN_X, p.y() - utils.VP_MARGIN_Y
mouse_pos_within_viewer = (
px in range(utils.VP_WIDTH) and py in range(utils.VP_HEIGHT))
mouse_pos_within_plot_area = (
px in range(445, 940) and py in range(22, 850))
if ((event.button() == Qt.LeftButton)
and (self.tabWidget.currentIndex() < 2)
and mouse_pos_within_viewer):
self.selected_grid, self.selected_tile = \
self._vp_grid_tile_mouse_selection(px, py)
self.selected_ov = self._vp_ov_mouse_selection(px, py)
self.selected_imported = (
self._vp_imported_img_mouse_selection(px, py))
# Shift pressed in first tab? Toggle active tiles.
if ((self.tabWidget.currentIndex() == 0)
and (QApplication.keyboardModifiers() == Qt.ShiftModifier)):
if (self.vp_current_grid >= -2
and self.selected_grid is not None
and self.selected_tile is not None):
if self.busy:
user_reply = QMessageBox.question(self,
'Confirm tile activation',
'Stack acquisition in progress! Please confirm '
'that you want to activate/deactivate this tile. '
'The new selection will take effect after imaging '
'of the current slice is completed.',
QMessageBox.Ok | QMessageBox.Cancel)
if user_reply == QMessageBox.Ok:
new_tile_status = self.gm[
self.selected_grid].toggle_active_tile(
self.selected_tile)
if self.autofocus.tracking_mode == 1:
self.gm[
self.selected_grid][
self.selected_tile].autofocus_active ^= True
self._add_to_main_log(
f'CTRL: Tile {self.selected_grid}.'
f'{self.selected_tile}{new_tile_status}')
self.vp_update_after_active_tile_selection()
# Make sure folder exists:
self.main_controls_trigger.transmit(
'ADD TILE FOLDER')
else:
# If no acquisition in progress,
# first toggle current tile.
self.gm[self.selected_grid].toggle_active_tile(
self.selected_tile)
if self.autofocus.tracking_mode == 1:
self.gm[self.selected_grid][
self.selected_tile].autofocus_active ^= True
self.vp_draw()
# Then enter paint mode until mouse button released.
self.tile_paint_mode_active = True
# Check if Ctrl key is pressed -> Move OV
elif ((self.tabWidget.currentIndex() == 0)
and (QApplication.keyboardModifiers() == Qt.ControlModifier)
and self.vp_current_ov >= -2
and not self.busy):
if self.selected_ov is not None and self.selected_ov >= 0:
self.ov_drag_active = True
self.drag_origin = (px, py)
self.stage_pos_backup = (
self.ovm[self.selected_ov].centre_sx_sy)
# Check if Alt key is pressed -> Move grid
elif ((self.tabWidget.currentIndex() == 0)
and (QApplication.keyboardModifiers() == Qt.AltModifier)
and self.vp_current_grid >= -2
and not self.busy):
if self.selected_grid is not None and self.selected_grid >= 0:
self.grid_drag_active = True
self.drag_origin = px, py
# Save coordinates in case user wants to undo
self.stage_pos_backup = (
self.gm[self.selected_grid].origin_sx_sy)
# Check if Ctrl + Alt keys are pressed -> Move imported image
elif ((self.tabWidget.currentIndex() == 0)
and (QApplication.keyboardModifiers()
== (Qt.ControlModifier | Qt.AltModifier))):
| |
<reponame>kaihami/GREPY<filename>GREPY_GUI.py<gh_stars>0
import wx
import os
import wx.lib.scrolledpanel as scrolled
from bs4 import BeautifulSoup
from urllib2 import urlopen
from threading import *
from multiprocessing import cpu_count, Pool
import datetime
from Bio.KEGG.REST import kegg_get
from Bio.Blast import NCBIXML
from numpy import std
from scipy.stats import linregress
from math import log
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from collections import Counter
import collections
import itertools
##########################
class ACTextControl(wx.TextCtrl):
"""
A Textcontrol that accepts a list of choices at the beginning.
Choices are presented to the user based on string being entered.
If a string outside the choices list is entered, option may
be given for user to add it to list of choices.
match_at_start - Should only choices beginning with text be shown ?
add_option - Should user be able to add new choices
case_sensitive - Only case sensitive matches
https://github.com/RajaS/ACTextCtrl/blob/master/actextcontrol.py
"""
def __init__(self, parent, candidates=[], match_at_start=False,
add_option=False, case_sensitive=False, size=(600,-1)):
wx.TextCtrl.__init__(self, parent,wx.NewId(), style=wx.TE_PROCESS_ENTER, size = size)
self.all_candidates = candidates
self.match_at_start = match_at_start
self.add_option = add_option
self.case_sensitive = case_sensitive
self.max_candidates = 5 # maximum no. of candidates to show
self.select_candidates = []
self.popup = ACPopup(self)
self._set_bindings()
self._screenheight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
self._popdown = True # Does the popup go down from the textctrl ?
def _set_bindings(self):
"""
One place to setup all the bindings
"""
# text entry triggers update of the popup window
self.Bind(wx.EVT_TEXT, self._on_text, self)
self.Bind(wx.EVT_KEY_DOWN, self._on_key_down, self)
# loss of focus should hide the popup
self.Bind(wx.EVT_KILL_FOCUS, self._on_focus_loss)
self.Bind(wx.EVT_SET_FOCUS, self._on_focus)
def _SetValue(self, value):
"""
Directly calling setvalue triggers textevent
which results in popup appearing.
To avoid this, call changevalue
"""
super(ACTextControl, self).ChangeValue(value)
def _on_text(self, event):
"""
On text entry in the textctrl,
Pop up the popup,
or update candidates if its already visible
"""
txt = self.GetValue()
# if txt is empty (after backspace), hide popup
if not txt:
if self.popup.IsShown:
self.popup.Show(False)
event.Skip()
return
# select candidates
if self.match_at_start and self.case_sensitive:
self.select_candidates = [ch for ch in self.all_candidates
if ch.startswith(txt)]
if self.match_at_start and not self.case_sensitive:
self.select_candidates = [ch for ch in self.all_candidates
if ch.lower().startswith(txt.lower())]
elif self.case_sensitive and not self.match_at_start:
self.select_candidates = [ch for ch in self.all_candidates if txt in ch]
else:
self.select_candidates = [ch for ch in self.all_candidates if txt.lower() in ch.lower()]
if len(self.select_candidates) == 0:
if not self.add_option:
if self.popup.IsShown():
self.popup.Show(False)
else:
display = ['Add ' + txt]
self.popup._set_candidates(display, 'Add')
self._resize_popup(display, txt)
self._position_popup()
if not self.popup.IsShown():
self.popup.Show()
else:
self._show_popup(self.select_candidates, txt)
def _show_popup(self, candidates, txt):
# set up the popup and bring it on
self._resize_popup(candidates, txt)
self._position_popup()
candidates.sort()
if self._popdown:
# TODO: Allow custom ordering
self.popup._set_candidates(candidates, txt)
self.popup.candidatebox.SetSelection(0)
else:
candidates.reverse()
self.popup._set_candidates(candidates, txt)
self.popup.candidatebox.SetSelection(len(candidates) - 1)
if not self.popup.IsShown():
self.popup.Show()
def _on_focus_loss(self, event):
"""Close the popup when focus is lost"""
if self.popup.IsShown():
self.popup.Show(False)
def _on_focus(self, event):
"""
When focus is gained,
if empty, show all candidates,
else, show matches
"""
txt = self.GetValue()
if txt == '':
self.select_candidates = self.all_candidates
self._show_popup(self.all_candidates, '')
else:
self._on_text(event)
def _position_popup(self):
"""Calculate position for popup and
display it"""
left_x, upper_y = self.GetScreenPositionTuple()
_, height = self.GetSizeTuple()
popup_width, popup_height = self.popupsize
if upper_y + height + popup_height > self._screenheight:
self._popdown = False
self.popup.SetPosition((left_x, upper_y - popup_height))
else:
self._popdown = True
self.popup.SetPosition((left_x, upper_y + height))
def _resize_popup(self, candidates, entered_txt):
"""Calculate the size for the popup to
accomodate the selected candidates"""
# Handle empty list (no matching candidates)
if len(candidates) == 0:
candidate_count = 3.5 # one line
longest = len(entered_txt) + 4 + 4 # 4 for 'Add '
else:
# additional 3 lines needed to show all candidates without scrollbar
candidate_count = min(self.max_candidates, len(candidates)) + 2.5
longest = max([len(candidate) for candidate in candidates]) + 4
charheight = self.popup.candidatebox.GetCharHeight()
#charwidth = self.popup.candidatebox.GetCharWidth()
self.popupsize = wx.Size(400, charheight * candidate_count)
self.popup.candidatebox.SetSize(self.popupsize)
self.popup.SetClientSize(self.popupsize)
def _on_key_down(self, event):
"""Handle key presses.
Special keys are handled appropriately.
For other keys, the event is skipped and allowed
to be caught by ontext event"""
skip = True
visible = self.popup.IsShown()
sel = self.popup.candidatebox.GetSelection()
# Escape key closes the popup if it is visible
if event.GetKeyCode() == wx.WXK_ESCAPE:
if visible:
self.popup.Show(False)
# Down key for navigation in list of candidates
elif event.GetKeyCode() == wx.WXK_DOWN:
if not visible:
skip = False
pass
#
if sel + 1 < self.popup.candidatebox.GetItemCount():
self.popup.candidatebox.SetSelection(sel + 1)
else:
skip = False
# Up key for navigation in list of candidates
elif event.GetKeyCode() == wx.WXK_UP:
if not visible:
skip = False
pass
if sel > -1:
self.popup.candidatebox.SetSelection(sel - 1)
else:
skip = False
# Enter - use current selection for text
elif event.GetKeyCode() == wx.WXK_RETURN:
if not visible:
# TODO: trigger event?
pass
# Add option is only displayed
elif len(self.select_candidates) == 0:
if self.popup.candidatebox.GetSelection() == 0:
self.all_candidates.append(self.GetValue())
self.popup.Show(False)
elif self.popup.candidatebox.GetSelection() == -1:
self.popup.Show(False)
elif self.popup.candidatebox.GetSelection() > -1:
self.SetValue(self.select_candidates[self.popup.candidatebox.GetSelection()])
self.SetInsertionPointEnd()
self.popup.Show(False)
# Tab - set selected choice as text
elif event.GetKeyCode() == wx.WXK_TAB:
if visible:
self._SetValue(self.select_candidates[self.popup.candidatebox.GetSelection()])
# set cursor at end of text
self.SetInsertionPointEnd()
skip = False
if skip:
event.Skip()
def get_choices(self):
"""Return the current choices.
Useful if choices have been added by the user"""
return self.all_candidates
class ACPopup(wx.PopupWindow):
"""
The popup that displays the candidates for
autocompleting the current text in the textctrl
"""
def __init__(self, parent):
wx.PopupWindow.__init__(self, parent)
self.candidatebox = wx.SimpleHtmlListBox(self, -1, choices=[])
self.SetSize((100, 100))
self.displayed_candidates = []
def _set_candidates(self, candidates, txt):
"""
Clear existing candidates and use the supplied candidates
Candidates is a list of strings.
"""
# if there is no change, do not update
if candidates == sorted(self.displayed_candidates):
pass
# Remove the current candidates
self.candidatebox.Clear()
# self.candidatebox.Append(['te<b>st</b>', 'te<b>st</b>'])
for ch in candidates:
self.candidatebox.Append(self._htmlformat(ch, txt))
self.displayed_candidates = candidates
def _htmlformat(self, text, substring):
"""
For displaying in the popup, format the text
to highlight the substring in html
"""
# empty substring
if len(substring) == 0:
return text
else:
return text.replace(substring, '<b>' + substring + '</b>', 1)
def threaded(fn):
def wrapper(*args, **kwargs):
Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
##########################
class GREPy(scrolled.ScrolledPanel):
def __init__(self, parent):
scrolled.ScrolledPanel.__init__(self, parent, size = (800,600))
self.SetBackgroundColour((175,224,230))
#setDict (a dict containing all information but no samples)
self.setlst = []
###HEAD###
self.topBox = wx.BoxSizer(wx.VERTICAL)
##1st sizer##
fgs = wx.FlexGridSizer(1, 2, 10,0)
###Project Name###
projectName = wx.StaticText(self, wx.NewId(), "Project Name")
fgs.Add(projectName, flag = wx.ALIGN_CENTER_HORIZONTAL, border = 7)
self.projectTextCtrl = wx.TextCtrl(self, wx.NewId(), "My_project", size = (200,-1))
fgs.Add(self.projectTextCtrl, flag = wx.ALIGN_CENTER_HORIZONTAL, border = 7)
fgs.Add((-1,-1))
self.setlst.append(self.projectTextCtrl)
###Genome faa file ###
fgs6 = wx.FlexGridSizer(2,0,0,0)
info = self.strains().keys()
label1 = wx.StaticText(self, -1, 'Matches anywhere in string')
ctrl1 = ACTextControl(self, candidates=info, add_option=False)
fgs6.Add(label1,0, wx.CENTER|wx.ALL)
fgs6.Add(ctrl1,0,wx.CENTER)
# keg_code = self.strains()[ctrl1]
self.setlst.append(ctrl1)
fgs2 = wx.FlexGridSizer(6,2,10,0)
self.protein_label = wx.StaticText(self, -1, 'Protein 1')
self.proteinTextCtrl = wx.TextCtrl(self, wx.NewId(), "", size=(200, -1))
fgs2.Add(self.protein_label, flag = wx.ALIGN_CENTRE_HORIZONTAL, border = 7)
fgs2.Add(self.proteinTextCtrl, flag = wx.CENTER, border = -1)
self.protein_label2 = wx.StaticText(self, -1, 'Protein 2')
self.proteinTextCtrl2 = wx.TextCtrl(self, wx.NewId(), "", size=(200, -1))
fgs2.Add(self.protein_label2, flag = wx.ALIGN_CENTRE_HORIZONTAL, border = 7)
fgs2.Add(self.proteinTextCtrl2, flag = wx.CENTER, border = -1)
self.setlst.append(self.proteinTextCtrl)
self.setlst.append(self.proteinTextCtrl2)
#fgs3 = wx.FlexGridSizer(2, 0, 0, 0)
#self.protein_fasta_label = wx.StaticText(self, -1, "or Protein Fasta 1")
#self.proteinTextCtrlFasta = wx.TextCtrl(self, wx.NewId(), "", size=(250, 100))
#fgs3.Add(self.proteinTextCtrlFasta, flag=wx.CENTER, border=-1)
#self.setlst.append(self.proteinTextCtrlFasta)
####
#Start Button
fgs5 = wx.FlexGridSizer(1,3,0,0)
fgs5.Add((100,-1))
self.startButton = wx.Button(self, label = "Start")
self.Bind(wx.EVT_BUTTON, self.StartAnalysis, self.startButton)
fgs5.Add(self.startButton, 0, wx.CENTER|wx.ALL, 5)
#create a log panel
fgs20 = wx.FlexGridSizer(1, 3, 0, 0)
self.log = wx.TextCtrl(self, -1, size=(600,200),style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
fgs20.Add(self.log,0,wx.CENTER|wx.ALL,5)
redir=RedirectText(self.log)
os.sys.stdout=redir
###
self.topBox.AddSpacer(10)
self.topBox.Add(fgs, flag= wx.EXPAND |wx.TOP |wx.RIGHT|wx.LEFT, border=25)
self.topBox.Add(fgs6, flag=wx.EXPAND | wx.LEFT | wx.TOP | wx.CENTER, border=25)
self.topBox.Add(fgs2, flag=wx.EXPAND | wx.LEFT | wx.TOP | wx.CENTER, border=25)
#self.topBox.Add(fgs3, flag=wx.EXPAND | wx.LEFT | wx.TOP | wx.CENTER, border=25)
#self.topBox.Add(fgs2, flag= wx.EXPAND|wx.LEFT|wx.TOP| wx.CENTER, border=25)
#self.topBox.Add(fgs3, flag= wx.EXPAND|wx.LEFT|wx.TOP| wx.CENTER, border=25)
#self.topBox.Add(self.fgs4, flag= wx.EXPAND|wx.LEFT|wx.TOP| wx.CENTER, border=25)
self.topBox.Add(fgs5, flag= wx.EXPAND|wx.LEFT|wx.TOP| wx.CENTER, border=25)
self.topBox.Add(fgs20, flag= wx.EXPAND|wx.LEFT|wx.TOP| wx.CENTER, border=25)
self.SetSizer(self.topBox)
self.topBox.Fit(self)
####
self.Layout()
####
def strains(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(dir_path,'Organism_DB.txt')
database = open(base_dir,'r').read().split('\n')
Database_hash = defaultdict(list)
for line in database:
if "#" not in line:
if len(line.split("\t")) >1:
organim_id = line.split("\t")[0]
strain = line.split("\t")[1]
Database_hash[strain] = organim_id
return Database_hash
@threaded
def StartAnalysis(self, event):
'''
NarL = PA3879
NarX = PA3878
AtpC = b3731
AtpG = b3733
'''
self.startButton.Disable()
#Get values from setlst
#TODO: max gap (10%), min identity (default | |
new users to the chat
- `can_pin_messages` :`bool` Pass True, if the administrator can pin messages, supergroups only
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"user_id": user_id,
"is_anonymous": is_anonymous,
"can_manage_chat": can_manage_chat,
"can_post_messages": can_post_messages,
"can_edit_messages": can_edit_messages,
"can_delete_messages": can_delete_messages,
"can_manage_voice_chats": can_manage_voice_chats,
"can_restrict_members": can_restrict_members,
"can_promote_members": can_promote_members,
"can_change_info": can_change_info,
"can_invite_users": can_invite_users,
"can_pin_messages": can_pin_messages,
}
return self.response(self.sendRequest("promoteChatMember", data), bool)
def setChatAdministratorCustomTitle(self, chat_id: Union[int, str, ], user_id: int, custom_title: str):
"""Use this method to set a custom title for an administrator in a supergroup promoted by the bot. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#setchatadministratorcustomtitle)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername)
- `user_id` :`int` Unique identifier of the target user
- `custom_title` :`str` New custom title for the administrator; 0-16 characters, emoji are not allowed
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"user_id": user_id,
"custom_title": custom_title,
}
return self.response(self.sendRequest("setChatAdministratorCustomTitle", data), bool)
def banChatSenderChat(self, chat_id: Union[int, str, ], sender_chat_id: int, until_date: int = None):
"""Use this method to ban a channel chat in a supergroup or a channel. The owner of the chat will not be able to send messages and join live streams on behalf of the chat, unless it is unbanned first. The bot must be an administrator in the supergroup or channel for this to work and must have the appropriate administrator rights. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#banchatsenderchat)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `sender_chat_id` :`int` Unique identifier of the target sender chat
- `until_date` :`int` Date when the sender chat will be unbanned, unix time. If the chat is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever.
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"sender_chat_id": sender_chat_id,
"until_date": until_date,
}
return self.response(self.sendRequest("banChatSenderChat", data), bool)
def unbanChatSenderChat(self, chat_id: Union[int, str, ], sender_chat_id: int):
"""Use this method to unban a previously banned channel chat in a supergroup or channel. The bot must be an administrator for this to work and must have the appropriate administrator rights. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#unbanchatsenderchat)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `sender_chat_id` :`int` Unique identifier of the target sender chat
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"sender_chat_id": sender_chat_id,
}
return self.response(self.sendRequest("unbanChatSenderChat", data), bool)
def setChatPermissions(self, chat_id: Union[int, str, ], permissions: types.ChatPermissions):
"""Use this method to set default chat permissions for all members. The bot must be an administrator in the group or a supergroup for this to work and must have the can_restrict_members administrator rights. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#setchatpermissions)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername)
- `permissions` :`types.ChatPermissions` A JSON-serialized object for new default chat permissions
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"permissions": helper.toDict(permissions, True),
}
return self.response(self.sendRequest("setChatPermissions", data), bool)
def exportChatInviteLink(self, chat_id: Union[int, str, ]):
"""Use this method to generate a new primary invite link for a chat; any previously generated primary link is revoked. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the new invite link as String on success. [See Telegram API](https://core.telegram.org/bots/api#exportchatinvitelink)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
**Returns:**
- A `tuple`, on success a `str` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
}
return self.response(self.sendRequest("exportChatInviteLink", data), str)
def createChatInviteLink(self, chat_id: Union[int, str, ], name: str = None, expire_date: int = None, member_limit: int = None, creates_join_request: bool = None):
"""Use this method to create an additional invite link for a chat. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. The link can be revoked using the method revokeChatInviteLink. Returns the new invite link as ChatInviteLink object. [See Telegram API](https://core.telegram.org/bots/api#createchatinvitelink)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `name` :`str` Invite link name; 0-32 characters
- `expire_date` :`int` Point in time (Unix timestamp) when the link will expire
- `member_limit` :`int` Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999
- `creates_join_request` :`bool` True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified
**Returns:**
- A `tuple`, on success a `types.ChatInviteLink` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"name": name,
"expire_date": expire_date,
"member_limit": member_limit,
"creates_join_request": creates_join_request,
}
return self.response(self.sendRequest("createChatInviteLink", data), types.ChatInviteLink)
def editChatInviteLink(self, chat_id: Union[int, str, ], invite_link: str, name: str = None, expire_date: int = None, member_limit: int = None, creates_join_request: bool = None):
"""Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the edited invite link as a ChatInviteLink object. [See Telegram API](https://core.telegram.org/bots/api#editchatinvitelink)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `invite_link` :`str` The invite link to edit
- `name` :`str` Invite link name; 0-32 characters
- `expire_date` :`int` Point in time (Unix timestamp) when the link will expire
- `member_limit` :`int` Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999
- `creates_join_request` :`bool` True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified
**Returns:**
- A `tuple`, on success a `types.ChatInviteLink` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"invite_link": invite_link,
"name": name,
"expire_date": expire_date,
"member_limit": member_limit,
"creates_join_request": creates_join_request,
}
return self.response(self.sendRequest("editChatInviteLink", data), types.ChatInviteLink)
def revokeChatInviteLink(self, chat_id: Union[int, str, ], invite_link: str):
"""Use this method to revoke an invite link created by the bot. If the primary link is revoked, a new link is automatically generated. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the revoked invite link as ChatInviteLink object. [See Telegram API](https://core.telegram.org/bots/api#revokechatinvitelink)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier of the target chat or username of the target channel (in the format @channelusername)
- `invite_link` :`str` The invite link to revoke
**Returns:**
- A `tuple`, on success a `types.ChatInviteLink` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"invite_link": invite_link,
}
return self.response(self.sendRequest("revokeChatInviteLink", data), types.ChatInviteLink)
def approveChatJoinRequest(self, chat_id: Union[int, str, ], user_id: int):
"""Use this method to approve a chat join request. The | |
<reponame>cmusatyalab/nephele
#!/usr/bin/env python
import sys
import os
import ast
import json
import math
from collections import OrderedDict
from .configuration import VMOverlayCreationMode
from operator import itemgetter
import logging
LOG = logging.getLogger(__name__)
_process_controller = None
stage_names = ["CreateMemoryDeltalist",
"CreateDiskDeltalist",
"DeltaDedup",
"CompressProc"]
BIT_PER_BLOCK = (4096+11)*8
class MigrationMode(object):
def __init__(self):
self.workload = ''
self.mode = dict()
self.stage_size_in = dict()
self.stage_size_out = dict()
self.stage_size_ratio = dict()
self.stage_time = dict()
self.block = dict()
self.block_size_in = dict()
self.block_size_ratio = dict()
self.block_size_out = dict()
self.block_time = dict()
def get_mode_id(self):
sorted_key = sorted(self.mode.keys())
mode_str = list()
for key in sorted_key:
# ignore numer of process for each stage since this is a maximum
# number of thread. Also this does not be consider to get P and R
# since we get per block P and per block R
if key in VMOverlayCreationMode.VARYING_PARAMETERS:
value = self.mode[key]
mode_str.append("%s:%s" % (key, value))
return "|".join(mode_str)
def __repr__(self):
return "%s(%s)" % (self.workload, self.get_mode_id())
@staticmethod
def get_total_P(p_dict, alpha):
# calculate using p of all stages --> use alpha
# calculate using time at all stage --> do not use alpha
total_P_from_each_stage = \
(p_dict['CreateMemoryDeltalist']*alpha + p_dict['CreateDiskDeltalist']*(1-alpha))\
+ p_dict['DeltaDedup']\
+ p_dict['CompressProc']
return total_P_from_each_stage
@staticmethod
def get_total_R(r_dict, alpha):
# weight using input size
total_R_from_each_stage = \
(r_dict['CreateMemoryDeltalist']*alpha + r_dict['CreateDiskDeltalist']*(1-alpha))\
* r_dict['DeltaDedup']\
* r_dict['CompressProc']
return total_R_from_each_stage
@staticmethod
def get_system_throughput(num_cores, total_p, total_r):
#LOG.debug("num_core\t%d" % num_cores)
system_block_per_sec = (1/total_p*1000) * num_cores*0.7
system_in_mbps = system_block_per_sec*BIT_PER_BLOCK/1024.0/1024
system_out_mbps = system_in_mbps * total_r # mbps
return system_block_per_sec, system_in_mbps, system_out_mbps
@staticmethod
def mode_diff(mode1, mode2):
set_mode1, set_mode2 = set(mode1.keys()), set(mode2.keys())
set_mode1 = set_mode1.intersection(
set(VMOverlayCreationMode.VARYING_PARAMETERS))
set_mode2 = set_mode2.intersection(
set(VMOverlayCreationMode.VARYING_PARAMETERS))
intersect = set_mode1.intersection(set_mode2)
changed_keys = [o for o in intersect if mode1[o] != mode2[o]]
changed_dict = dict()
for key in changed_keys:
value1 = mode1[key]
value2 = mode2[key]
changed_dict[key] = value2
return changed_dict
@staticmethod
def mode_diff_str(mode1, mode2):
set_mode1, set_mode2 = set(mode1.keys()), set(mode2.keys())
set_mode1 = set_mode1.intersection(
set(VMOverlayCreationMode.VARYING_PARAMETERS))
set_mode2 = set_mode2.intersection(
set(VMOverlayCreationMode.VARYING_PARAMETERS))
intersect = set_mode1.intersection(set_mode2)
changed_keys = [o for o in intersect if mode1[o] != mode2[o]]
changed_list = list()
for key in changed_keys:
value1 = mode1[key]
value2 = mode2[key]
changed = "%s: %s->%s" % (key, value1, value2)
changed_list.append(changed)
changed_str = ", ".join(changed_list)
return changed_str
@staticmethod
def to_file(exp, fd):
fd.write(exp.workload.strip() + "\n")
fd.write(json.dumps(exp.mode) + "\n")
fd.write(json.dumps(exp.stage_size_in) + "\n")
fd.write(json.dumps(exp.stage_size_out) + "\n")
fd.write(json.dumps(exp.stage_size_ratio) + "\n")
fd.write(json.dumps(exp.stage_time) + "\n")
fd.write(json.dumps(exp.block) + "\n")
fd.write(json.dumps(exp.block_size_in) + "\n")
fd.write(json.dumps(exp.block_size_out) + "\n")
fd.write(json.dumps(exp.block_size_ratio) + "\n")
fd.write(json.dumps(exp.block_time) + "\n")
@staticmethod
def from_file(fd):
exp = MigrationMode()
exp.workload = fd.readline()
exp.mode = json.loads(fd.readline())
exp.stage_size_in = json.loads(fd.readline())
exp.stage_size_out = json.loads(fd.readline())
exp.stage_size_ratio = json.loads(fd.readline())
exp.stage_time = json.loads(fd.readline())
exp.block = json.loads(fd.readline())
exp.block_size_in = json.loads(fd.readline())
exp.block_size_out = json.loads(fd.readline())
exp.block_size_ratio = json.loads(fd.readline())
exp.block_time = json.loads(fd.readline())
memory_in_size = (exp.block_size_in['CreateMemoryDeltalist'])
disk_in_size = (exp.block_size_in['CreateDiskDeltalist'])
alpha = float(memory_in_size)/(memory_in_size+disk_in_size)
exp.total_p = MigrationMode.get_total_P(exp.block_time, alpha)
exp.total_r = MigrationMode.get_total_R(exp.block_size_ratio, alpha)
return exp
class ModeProfileError(Exception):
pass
class ModeProfile(object):
MATCHING_BEST_EFFORT = 1
MATCHING_ONE = 2
MATCHING_MULTIPLE = 3
def __init__(self, overlay_mode_list):
self.overlay_mode_list = overlay_mode_list
def predict_new_mode(self, cur_mode, cur_p, cur_r,
cur_block_size, network_bw):
overlay_mode = ModeProfile.find_same_mode(
self.overlay_mode_list,
cur_mode)
if overlay_mode is None:
msg = "Cannot find matching mode : %s" % str(
cur_mode.get_mode_id())
raise ModeProfileError(msg)
item = self.find_matching_mode(overlay_mode,
cur_mode,
cur_p, cur_r, cur_block_size,
network_bw)
return item
@staticmethod
def find_same_mode(overlay_mode_list, in_mode):
for overlay_mode in overlay_mode_list:
id1 = overlay_mode.get_mode_id()
id2 = in_mode.get_mode_id()
if id1 == id2:
return overlay_mode
else:
pass
# print "not identical: %s" %
# MigrationMode.mode_diff_str(in_mode.__dict__,
# overlay_mode.mode)
return None
def find_matching_mode(self, profiled_mode_obj, cur_mode, cur_p,
cur_r, cur_block_size, network_bw):
# get scaling factor between current workload and profiled data
profiled_mode_total_p = profiled_mode_obj.total_p
profiled_mode_total_r = profiled_mode_obj.total_r
memory_in_size = (cur_block_size['CreateMemoryDeltalist'])
disk_in_size = (cur_block_size['CreateDiskDeltalist'])
alpha = float(memory_in_size)/(memory_in_size+disk_in_size)
cur_total_p = profiled_mode_obj.get_total_P(cur_p, alpha)
cur_total_r = profiled_mode_obj.get_total_R(cur_r, alpha)
scale_p = cur_total_p/profiled_mode_total_p
scale_r = cur_total_r/profiled_mode_total_r
scaled_mode_list, current_block_per_sec = self.list_scaled_modes(
cur_mode, scale_p, scale_r, network_bw)
sorted_mode_list = sorted(scaled_mode_list, key=itemgetter(1),
reverse=True)
selected_item = sorted_mode_list[0]
selected_mode_obj = selected_item[0]
selected_block_per_sec = selected_item[1]
if selected_block_per_sec <= current_block_per_sec:
return None
else:
return selected_item
def list_scaled_modes(self, cur_mode, scale_p, scale_r, network_bw):
scaled_mode_list = list()
for each_mode in self.overlay_mode_list:
each_p = each_mode.total_p
each_r = each_mode.total_r
scaled_each_p = each_p * scale_p
scaled_each_r = each_r * scale_r
num_cores = VMOverlayCreationMode.get_num_cores()
system_block_per_sec, system_in_mbps, system_out_mbps = MigrationMode.get_system_throughput(
num_cores, scaled_each_p, scaled_each_r)
# find the bottleneck
network_block_per_sec = network_bw*1024 * \
1024/(scaled_each_r*BIT_PER_BLOCK)
bottleneck = None
if network_bw < system_out_mbps:
bottleneck = "network"
actual_block_per_sec = network_block_per_sec
else:
bottleneck = "compute"
actual_block_per_sec = system_block_per_sec
id1 = each_mode.get_mode_id()
id2 = cur_mode.get_mode_id()
if id1 == id2:
current_block_per_sec = actual_block_per_sec
data = (
each_mode,
actual_block_per_sec,
(bottleneck,
system_block_per_sec,
system_in_mbps,
system_out_mbps,
network_block_per_sec,
network_bw))
scaled_mode_list.append(data)
return scaled_mode_list, current_block_per_sec
def show_relative_ratio(self, input_mode):
pivot_mode = self.find_same_mode(self.overlay_mode_list, input_mode)
comp_list = dict()
pivot_p = MigrationMode.get_total_P(pivot_mode.block_time)
pivot_r = MigrationMode.get_total_R(pivot_mode.block_size_ratio)
for index, other_mode in enumerate(self.overlay_mode_list):
other_p = MigrationMode.get_total_P(other_mode.block_time)
other_r = MigrationMode.get_total_R(other_mode.block_size_ratio)
ratio_p = round(other_p/pivot_p, 4)
ratio_r = round(other_r/pivot_r, 4)
mode_diff_str = MigrationMode.mode_diff_str(
pivot_mode.mode,
other_mode.mode)
if len(mode_diff_str) == 0:
mode_diff_str = "original"
comp_list[other_mode.get_mode_id()] = (ratio_p, ratio_r)
print "%d: %s\t(%s %s)/(%s %s) --> (%s, %s)" % (index, mode_diff_str[:], pivot_p, pivot_r, other_p, other_r, ratio_p, ratio_r)
return comp_list
@staticmethod
def load_from_file(profile_path):
exp_list = list()
try:
with open(profile_path, "r") as fd:
while True:
exp = MigrationMode.from_file(fd)
exp_list.append(exp)
except ValueError as e:
pass
return ModeProfile(exp_list)
@staticmethod
def save_to_file(profile_path, exp_list):
with open(profile_path, "w+") as fd:
for each_exp in exp_list:
MigrationMode.to_file(each_exp, fd)
def parse_each_experiement(lines):
# get configuration
config_lines = ""
is_start_config_line = False
workload = lines[0].split(" ")[-1]
for line in lines[1:]:
if line.find("* Overlay creation mode start") != -1:
is_start_config_line = True
continue
if is_start_config_line:
config_lines += line
if line.find("}") != -1:
break
config_dict = ast.literal_eval(config_lines)
# filter out only profiling log
profile_lines = list()
migration_total_time = 0
migration_downtime = 0
for line in lines:
# see only DEBUG message
if line.find("DEBUG") == -1:
continue
if line.find("profiling") != -1:
log = line.split("profiling")[1].strip()
profile_lines.append(log)
elif line.find("Time for finishing transferring") != -1:
log = line.split(":")[-1]
migration_total_time = float(log.strip())
elif line.find("Finish migration") != -1:
log = line.split(":")[-1]
migration_total_time = float(log.strip())
elif line.find("migration downtime") != -1:
log = line.split(":")[-1]
migration_downtime = float(log.strip())
# process filtered log data
exp = MigrationMode()
setattr(exp, 'workload', os.path.basename(workload))
setattr(exp, 'migration_total_time', migration_total_time)
setattr(exp, 'migration_downtime', migration_downtime)
setattr(exp, 'mode', config_dict)
setattr(exp, 'stage_size_in', dict.fromkeys(stage_names, 0))
setattr(exp, 'stage_size_out', dict.fromkeys(stage_names, 0))
setattr(exp, 'stage_size_ratio', dict.fromkeys(stage_names, 0))
setattr(exp, 'stage_time', dict.fromkeys(stage_names, 0))
setattr(exp, 'block', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_size_in', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_size_ratio', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_size_out', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_time', dict.fromkeys(stage_names, 0))
for line in profile_lines:
log = line.split("\t")
stage_name = log[0]
profile_type = str(log[1])
if stage_name not in stage_names:
continue
if profile_type == "size":
in_size = long(log[2])
out_size = long(log[3])
ratio = float(log[4])
exp.stage_size_in[stage_name] = in_size
exp.stage_size_out[stage_name] = out_size
exp.stage_size_ratio[stage_name] = ratio
if profile_type == "block-size":
in_size = float(log[2])
out_size = float(log[3])
block_count = log[4]
exp.block[stage_name] = block_count
exp.block_size_in[stage_name] = in_size
exp.block_size_out[stage_name] = out_size
exp.block_size_ratio[stage_name] = float(out_size)/in_size
if profile_type == "time":
duration = float(log[-1])
exp.stage_time[stage_name] = duration
if profile_type == "block-time":
duration = round(float(log[-1])*1000, 6)
exp.block_time[stage_name] = duration
return exp
def parsing(inputfile):
lines = open(inputfile, "r").read().split("\n")
test_list = list()
new_log = list()
for line in lines:
if line.find("==========================================") != -1:
if len(new_log) > 0:
test_list.append(new_log)
new_log = list()
else:
new_log.append(line)
test_list.append(new_log)
test_ret_list = list()
for each_exp_log in test_list:
test_ret = parse_each_experiement(each_exp_log)
test_ret_list.append(test_ret)
return test_ret_list
def profile_each_exp(each_exp_dict):
# total execution time from processing time
pass
def _split_experiment(test_ret_list):
moped_exps = list()
fluid_exps = list()
speech_exps = list()
face_exps = list()
mar_exps = list()
for each_exp in test_ret_list:
if each_exp.workload.find("moped") != -1:
moped_exps.append(each_exp)
elif each_exp.workload.find("fluid") != -1:
fluid_exps.append(each_exp)
elif each_exp.workload.find("face") != -1:
face_exps.append(each_exp)
elif each_exp.workload.find("mar") != -1:
mar_exps.append(each_exp)
elif each_exp.workload.find("speech") != -1:
speech_exps.append(each_exp)
else:
msg = "Invalid workload %s" % each_exp.workload
print msg
sys.exit(1)
raise ModeProfileError(msg)
return moped_exps, fluid_exps, face_exps, mar_exps, speech_exps
def select_mediam_exp(exps):
# sort by compression algorithm gzip 1, .., gzip9, .., lzma1, .., lzma9
def compare_comp_algorithm(a):
d = {"xdelta3": 3,
"bsdiff": 4,
"xor": 2,
"none": 1}
return (d[a.mode['DISK_DIFF_ALGORITHM']], -
a.mode['COMPRESSION_ALGORITHM_TYPE'], a.mode['COMPRESSION_ALGORITHM_SPEED'])
selected_exp_list = list()
exps.sort(key=compare_comp_algorithm)
result_dict = OrderedDict()
for each_exp in exps:
in_data_size = each_exp.stage_size_in[
'CreateMemoryDeltalist'] + each_exp.stage_size_in['CreateDiskDeltalist']
in_data_disk = each_exp.stage_size_in['CreateDiskDeltalist']
in_data_mem = each_exp.stage_size_in['CreateMemoryDeltalist']
alpha = float(in_data_mem)/in_data_size
total_p = MigrationMode.get_total_P(each_exp.block_time, alpha)
total_r = MigrationMode.get_total_R(each_exp.block_size_ratio, alpha)
out_data_size = each_exp.stage_size_out['CompressProc']
duration = each_exp.migration_total_time
est_duration1 = each_exp.stage_time['CreateMemoryDeltalist'] + each_exp.stage_time[
'CreateDiskDeltalist'] + each_exp.stage_time['DeltaDedup'] + each_exp.stage_time['CompressProc']
key = "%s,%d,%d" % (
each_exp.mode['DISK_DIFF_ALGORITHM'], each_exp.mode
['COMPRESSION_ALGORITHM_TYPE'], each_exp.mode
['COMPRESSION_ALGORITHM_SPEED'])
value = (
in_data_size,
out_data_size,
duration,
8 *
float(out_data_size) /
1024.0 /
| |
parent = self)
""" Teacher Present Assignment Error """
if retrieveDataEntries[8] == 0:
teacherPresMessage = wx.MessageBox('Child present but no teacher presence assigned',\
caption = 'Error on Teacher Presence Assignment:',\
style = wx.OK, parent = self)
""" Task Assignment Error """
if retrieveDataEntries[10] == 0:
taskMessage = wx.MessageBox('Child present but no task assigned',\
caption = 'Error on Task Assignment:',\
style = wx.OK, parent = self)
if retrieveDataEntries[10] == 1 \
or retrieveDataEntries[10] == 7\
or retrieveDataEntries[10] == 10\
or retrieveDataEntries[10] == 11\
or retrieveDataEntries[10] == 22:
if retrieveDataEntries[7] < 3:
taskMessage = wx.MessageBox('Task: Social or Teach Orient',\
caption = 'Social or Teacher Orient:',\
style = wx.OK, parent = self)
""" Affect & Intensity Assignment """
if retrieveDataEntries[11] == 0:
affectMessage = wx.MessageBox('Child present but no affect assigned',\
caption = 'Error on Affect Assignment:',\
style = wx.OK, parent = self)
if retrieveDataEntries[11] > 0: # affect
if retrieveDataEntries[11] == 1: # affect== Pos
if retrieveDataEntries[13] != 2 and \
retrieveDataEntries[13] != 3 and \
retrieveDataEntries[13] != 4: # intensity
intensityRangeMessage = wx.MessageBox('Affect present but intensity is out of range',\
caption = 'Error #1 on Intensity Assignment:',\
style = wx.OK, parent = self)
if retrieveDataEntries[11] == 2: # affect== Neutral
self.affectIntenseList.SetSelection(1)
if retrieveDataEntries[11] == 3: # affect== Negative
if retrieveDataEntries[13] != 2 and \
retrieveDataEntries[13] != 3 and \
retrieveDataEntries[13] != 4: # intensity
intensityRangeMessage = wx.MessageBox('Affect present but intensity is out of range',\
caption = 'Error #2 on Intensity Assignment:',\
style = wx.OK, parent = self)
if retrieveDataEntries[11] == 4: # affect== Unseen
self.affectIntenseList.SetSelection(5)
""" Solitary or Parallel: No Direction """
if retrieveDataEntries[7] == 1:
self.peer1Direction.SetSelection(0)
self.affectDirectList.SetSelection(0)
if retrieveDataEntries[10] != 18:
self.peer1.SetSelection(0)
self.peer2.SetSelection(0)
self.peer3.SetSelection(0)
self.peer4.SetSelection(0)
self.peer5.SetSelection(0)
""" No Peer Assignment Error """
if retrieveDataEntries[14] == 0: # no peer 1
""" Onlooking Error """
if retrieveDataEntries[7] == 1 and retrieveDataEntries[10] == 18: #solitary & onlook
onlookNoPeerMessage = wx.MessageBox('Onlooking but no peer selected',\
caption = 'Onlooking But No Peer: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[7] == 2:
parallelNoPeerMessage = wx.MessageBox('Parallel but no peer selected',\
caption = 'Parallel But No Peer: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[7] == 3: # beh == social
socialNoPeerMessage = wx.MessageBox('Social play but no peer selected',\
caption = 'Social But No Peer: ',\
style = wx.OK, parent = self)
""" Parallel with Peer: Direction """
if retrieveDataEntries[14] != 0: # peer
if retrieveDataEntries[7] == 2: # beh == parallel
self.peer1Direction.SetSelection(0)
self.affectDirectList.SetSelection(0)
""" Social But No Peer """
if retrieveDataEntries[14] != 0: # peer
if retrieveDataEntries[18] == 0:
if retrieveDataEntries[7] == 3: # beh == social
if retrieveDataEntries[12] == 0:
teacherRoleMessage = wx.MessageBox('Social play but no direction given',\
caption = 'Social But No Target Direction: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[14] == 0:
teacherRoleMessage = wx.MessageBox('Social play but no direction given',\
caption = 'Social But No Peer1 Direction: ',\
style = wx.OK, parent = self)
""" Peer2 so No Peer1 Direction """
if retrieveDataEntries[7] == 3:
if retrieveDataEntries[14] != 0 and retrieveDataEntries[18] != 0:
# Social with Peer 1 & Peer 2
self.affectDirectList.SetSelection(0)
self.peer1Affect.SetSelection(0)
self.peer1Direction.SetSelection(0)
self.peer1Intensity.SetSelection(0)
""" No Direction for Target & Peer with Parallel """
if retrieveDataEntries[7] == 2: # Parallel
if retrieveDataEntries[18] == 0: # No peer 2
if retrieveDataEntries[14] != 0: # Peer 1
self.affectDirectList.SetSelection(0)
self.peer1Direction.SetSelection(0)
if retrieveDataEntries[15] == 0:
peer1affectMessage = wx.MessageBox('Need Peer1 Affect',\
caption = 'Provide Peer1 Affect:',\
style = wx.OK, parent = self)
if retrieveDataEntries[17] == 0:
peer1affectMessage = wx.MessageBox('Need Peer1 Intensity',\
caption = 'Provide Peer1 Intensity:',\
style = wx.OK, parent = self)
### added 1/1/08 ### #modified for no peer 2 on 1/13/08
""" No Affect or Intensity Assignment Error """
if retrieveDataEntries[7] == 1: # beh = solitary
# onlooking; no peer 2
if retrieveDataEntries[10] == 18 and retrieveDataEntries[18] == 0:
#assumes that peer 1 is present; error caught above
if retrieveDataEntries[15] == 0: # Peer 1 affect
socialPeerAffectMessage = wx.MessageBox('Onlooking: Need Affect: Peer 1',\
caption = 'No Affect for Peer 1 Recorded:',\
style = wx.OK, parent = self)
if retrieveDataEntries[17] == 0: # Peer 1 intensity
socialPeerIntensityMessage = wx.MessageBox('Onlooking: Need Intensity: Peer 1',\
caption = 'No Intensity for Peer 1 Recorded:',\
style = wx.OK, parent = self)
#################################################################
""" No Affect or Intensity Assignment Error """
if retrieveDataEntries[7] == 3: # beh = social
if retrieveDataEntries[18] == 0: # peer 2 = none
if retrieveDataEntries[14] != 0: # Peer 1
if retrieveDataEntries[12] == 0: # Target needs Direction
affectT_DirectMessage = wx.MessageBox('Need Direction: Target',\
caption = 'No Direction for Target:',\
style = wx.OK, parent = self)
if retrieveDataEntries[15] == 0: # Peer 1 affect
socialPeerAffectMessage = wx.MessageBox('Need Affect: Peer 1',\
caption = 'No Affect for Peer 1 Recorded:',\
style = wx.OK, parent = self)
if retrieveDataEntries[16] == 0: # Peer 1 direction
socialPeerAffectMessage = wx.MessageBox('Need Direction: Peer 1',\
caption = 'No Direction for Peer 1 Recorded:',\
style = wx.OK, parent = self)
if retrieveDataEntries[17] == 0: # Peer 1 intensity
socialPeerIntensityMessage = wx.MessageBox('Need Intensity: Peer 1',\
caption = 'No Intensity for Peer 1 Recorded:',\
style = wx.OK, parent = self)
""" Teacher Role """
if retrieveDataEntries[7] == 4: # Teacher Oriented
self.peer1Direction.SetSelection(0)
if retrieveDataEntries[9] == 0:
teacherRoleMessage = wx.MessageBox('Teacher Role',\
caption = 'Teacher Needs Role Assignment: ',\
style = wx.OK, parent = self)
"""if retrieveDataEntries[12] == 0:
affectDirectionMessage = wx.MessageBox('T_Direction Assignment',\
caption = 'T_Direction Needed: ',\
style = wx.OK, parent = self)"""
""" Teacher Not Involved """
if retrieveDataEntries[7] != 0 and retrieveDataEntries[7] != 4:
self.ifTeacher.SetSelection(0)
""" Peer 2 Consequences """
""" Affect Peer 1 Restrictions """
if retrieveDataEntries[15] == 4: # peer1 affect = unseen
self.peer1Intensity.SetSelection(5) # unseen intensity
if retrieveDataEntries[15] == 2: # peer1 affect = neutral
self.peer1Intensity.SetSelection(1) # netural none
if retrieveDataEntries[15] == 1 or retrieveDataEntries[15] == 3:
if retrieveDataEntries[17] == 5 or retrieveDataEntries[17] < 2:
intensityPeer1Message = wx.MessageBox('Intensity Adjustment: Peer1',\
caption = 'Intensity Needs Proper Assignment: ',\
style = wx.OK, parent = self)
""" Target Direction/Peer1 Direction """
if retrieveDataEntries[12] == 0 and retrieveDataEntries[7] == 4:
if retrieveDataEntries[10] != 18:
targetDirectTeachMessage = wx.MessageBox('Target Direction Error: Teacher',\
caption = 'Assign Target Direction: Teacher: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[12] != 0 and retrieveDataEntries[7] == 4:
if retrieveDataEntries[10] == 18:
self.affectDirectList.SetSelection(0)
if retrieveDataEntries[14] != 0 and retrieveDataEntries[18] == 0:
if retrieveDataEntries[7] != 1:
if retrieveDataEntries[15] == 0:
peer1DirectMessage = wx.MessageBox('Peer 1 Affect Error:',\
caption = 'Peer 1 Affect: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[17] == 0:
peer1DirectMessage = wx.MessageBox('Peer 1 Intensity Error:',\
caption = 'Peer 1 Intensity: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[12] == 1 and retrieveDataEntries[16] == 1:
sameDirectMessage = wx.MessageBox('Directions are Same Error:',\
caption = 'Same Target & Peer Direction: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[12] == 2 and retrieveDataEntries[16] == 2:
sameDirectMessage = wx.MessageBox('Directions are Same Error:',\
caption = 'Same Target & Peer Direction: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[12] == 3:
self.peer1Direction.SetSelection(3)
if retrieveDataEntries[16] == 3:
self.affectDirectList.SetSelection(3)
if retrieveDataEntries[14] != 0 and retrieveDataEntries[18] != 0:
self.peer1Intensity.SetSelection(0)
self.peer1Direction.SetSelection(0)
self.peer1Affect.SetSelection(0)
if retrieveDataEntries[7] != 4:
self.affectDirectList.SetSelection(0)
""" Peer1 Affect Unseen; Intensity unseen """
if retrieveDataEntries[15] == 4: # Affect Peer1 Unseen
self.peer1Intensity.SetSelection(5) # Unseen
""" Peer Assigment errors """
if retrieveDataEntries[10] == 18: # onlook
if retrieveDataEntries[7] == 1 and retrieveDataEntries[14] == 0:
self.peer1Direction.SetSelection(0)
self.affectDirectList.SetSelection(0)
onlookPeerMessage = wx.MessageBox('Onlooking Solitary without Peer',\
caption = 'Onlooking Solitary: Assign Peer: ',\
style = wx.OK, parent = self)
if retrieveDataEntries[10] == 18: # onlook | |
<reponame>luizfloripa/instagramy
""" Parsers for Instagramy """
import json
from datetime import datetime
from html.parser import HTMLParser
from collections import namedtuple
from .exceptions import RedirectionError
from .requests import get
def _nodes_classfier(nodes: list):
post_lists = []
for node in nodes:
data = {}
try:
data["likes"] = node["node"]["edge_liked_by"]["count"]
except (KeyError, TypeError):
data["likes"] = None
try:
data["comments"] = node["node"]["edge_media_to_comment"]["count"]
except (KeyError, TypeError):
data["comments"] = None
try:
data["is_video"] = node["node"]["is_video"]
except (KeyError, TypeError):
data["is_video"] = None
try:
data["upload_time"] = datetime.fromtimestamp(
node["node"]["taken_at_timestamp"]
)
except (KeyError, TypeError):
data["upload_time"] = None
try:
data["caption"] = node["node"]["accessibility_caption"]
except (KeyError, TypeError):
data["caption"] = None
try:
data["shortcode"] = node["node"]["shortcode"]
except (KeyError, TypeError):
data["shortcode"] = None
try:
data["dimensions"] = node['node']["dimensions"]
except (KeyError, IndexError):
data["dimensions"] = None
try:
data[
"post_url"
] = f'https://www.instagram.com/p/{node["node"]["shortcode"]}'
except (KeyError, TypeError):
data["post_url"] = None
try:
data["display_url"] = node["node"]["display_url"]
except (KeyError, TypeError):
data["display_url"] = None
nt = namedtuple("Post", data.keys())(*data.values())
post_lists.append(nt)
return post_lists
class Parser(HTMLParser):
"""
Class Parse the Static Html of the Instagram
website and return the required Data as
Python Dict
This Class Inherits html.parser.HtmlParser
"""
Data = {}
def handle_data(self, data):
if data.startswith("window._sharedData"):
try:
self.Data = json.loads(data[data.find('{"config"'): -1])
except (KeyError, json.JSONDecodeError):
raise RedirectionError
else:
pass
class Viewer:
"""
User of Instagram currently Authenticated
Parse the Current User data in Page
"""
def __init__(self, **kwags):
data = kwags.get("data")
if data:
self.user_data = data
else:
sessionid = kwags.get("sessionid")
html = get("https://instagram.com", sessionid=sessionid)
parser = Parser()
parser.feed(html)
self.user_data = parser.Data
@property
def username(self) -> str:
""" Username of the given user """
return self.user_data["username"]
@property
def fullname(self) -> str:
""" Fullname of the given user """
return self.user_data["full_name"]
@property
def biography(self) -> str:
""" Biography of the given user """
return self.user_data["biography"]
@property
def website(self) -> str:
""" Website of the given user """
return self.user_data["external_url"]
@property
def profile_picture_url(self) -> str:
""" Profile picture url of the Given User """
return self.user_data["profile_pic_url_hd"]
@property
def is_private(self) -> bool:
""" Account type is Private """
return self.user_data["is_private"]
@property
def is_joined_recently(self) -> bool:
""" is user joined recently """
return self.user_data["is_joined_recently"]
@property
def is_professional_account(self) -> bool:
""" is user joined recently """
return self.user_data["is_professional_account"]
def __str__(self) -> str:
return f"{self.fullname} ({self.username}) -> {self.biography}"
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{self.username}')"
class UserParser:
""" Parse the required data of user store as property"""
@property
def username(self) -> str:
""" Username of the given user """
return self.user_data["username"]
@property
def fullname(self) -> str:
""" Fullname of the given user """
return self.user_data["full_name"]
@property
def biography(self) -> str:
""" Biography of the given user """
return self.user_data["biography"]
@property
def website(self) -> str:
""" Website of the given user """
return self.user_data["external_url"]
@property
def number_of_followers(self) -> int:
""" No.of Followers of the given user """
return self.user_data["edge_followed_by"]["count"]
@property
def number_of_followings(self) -> int:
""" No.of Following of the given user """
return self.user_data["edge_follow"]["count"]
@property
def number_of_posts(self) -> int:
""" No.of Post of the given user """
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def profile_picture_url(self) -> str:
""" Profile picture url of the Given User """
return self.user_data["profile_pic_url_hd"]
@property
def is_verified(self) -> bool:
""" Verification status of the user """
return self.user_data["is_verified"]
@property
def is_private(self) -> bool:
""" Account type is Private """
return self.user_data["is_private"]
@property
def posts(self) -> list:
"""
Top 12 posts data of the given user
"""
posts_lists = []
posts_details = self.user_data["edge_owner_to_timeline_media"]["edges"]
for i in posts_details:
data = {}
try:
data["likes"] = i["node"]["edge_liked_by"]["count"]
except (KeyError, TypeError):
data["likes"] = None
try:
data["comments"] = i["node"]["edge_media_to_comment"]["count"]
except (KeyError, TypeError):
data["comments"] = None
try:
data["caption"] = i["node"]["accessibility_caption"]
except (KeyError, TypeError):
data["caption"] = None
try:
data["is_video"] = i["node"]["is_video"]
except (KeyError, TypeError):
data["is_video"] = None
try:
data["timestamp"] = i["node"]["taken_at_timestamp"]
except (KeyError, TypeError):
data["timestamp"] = None
try:
data["location"] = i["node"]["location"]
except (KeyError, TypeError):
data["location"] = None
try:
data["shortcode"] = i["node"]["shortcode"]
except (KeyError, TypeError):
data["shortcode"] = None
try:
data[
"post_url"
] = f'https://www.instagram.com/p/{i["node"]["shortcode"]}/'
except (KeyError, TypeError):
data["post_url"] = None
try:
data["display_url"] = i["node"]["display_url"]
except (KeyError, TypeError):
data["display_url"] = None
if i["node"]["is_video"]:
data["video_url"] = i["node"]["video_url"]
data["video_view_count"] = i["node"]["video_view_count"]
if i["node"]["is_video"]:
data["post_source"] = i["node"]["video_url"]
else:
data["post_source"] = i["node"]["display_url"]
try:
data["taken_at_timestamp"] = datetime.fromtimestamp(
i["node"]["taken_at_timestamp"]
)
except (KeyError, TypeError):
data["taken_at_timestamp"] = None
nt = namedtuple("Post", data.keys())(*data.values())
posts_lists.append(nt)
return posts_lists
@property
def posts_display_urls(self) -> list:
"""
Top 12 posts picture url of the given user
"""
return [i["display_url"] for i in self.posts]
@property
def is_joined_recently(self) -> bool:
""" Is user joined recently """
return self.user_data["is_joined_recently"]
@property
def other_info(self) -> dict:
"""
Other information about user
"""
return {
"is_private": self.user_data["is_private"],
"is_verified": self.user_data["is_verified"],
"is_business_account": self.user_data["is_business_account"],
"is_joined_recently": self.user_data["is_joined_recently"],
"has_ar_effects": self.user_data["has_ar_effects"],
"has_clips": self.user_data["has_clips"],
"has_guides": self.user_data["has_guides"],
"has_channel": self.user_data["has_channel"],
"highlight_reel_count": self.user_data["highlight_reel_count"],
}
@property
def follows_viewer(self) -> bool:
""" Is user follows the Viewer """
return self.user_data["follows_viewer"]
@property
def has_blocked_viewer(self) -> bool:
""" Is user blocked the Viewer """
return self.user_data["has_blocked_viewer"]
@property
def no_of_mutual_follower(self) -> bool:
""" No of Mutual Followers """
return self.user_data["edge_mutual_followed_by"]["count"]
@property
def requested_by_viewer(self) -> bool:
""" Is viewer requested to follow user """
return self.user_data["requested_by_viewer"]
@property
def is_blocked_by_viewer(self) -> bool:
""" Is Viewer blocked the User """
return self.user_data["blocked_by_viewer"]
@property
def restricted_by_viewer(self) -> bool:
""" Is Viewer restricted the User """
return self.user_data["restricted_by_viewer"]
@property
def has_country_block(self) -> bool:
""" Is country blocked the User """
return self.user_data["country_block"]
@property
def followed_by_viewer(self) -> bool:
""" Is Viewer Follows the User """
return self.user_data["followed_by_viewer"]
@property
def has_requested_viewer(self) -> bool:
""" Is User requested the Viewer """
return self.user_data["has_requested_viewer"]
@property
def connected_fb_page(self) -> bool:
""" Connected Facebook page of User """
return self.user_data["connected_fb_page"]
class PostParser:
""" Parse the required data of post store as property"""
@property
def type_of_post(self) -> str:
""" Type of the Post"""
return self.post_data["__typename"]
@property
def display_url(self) -> str:
""" Display url of the Image/Video """
return self.post_data["display_url"]
@property
def upload_time(self) -> datetime:
""" Upload Datetime of the Post """
return datetime.fromtimestamp(self.post_data["taken_at_timestamp"])
@property
def number_of_likes(self) -> int:
""" No.of Like is given post """
return int(self.post_data["edge_media_preview_like"]["count"])
@property
def number_of_comments(self) -> int:
""" No.of Comments is given post """
return int(self.post_data["edge_media_to_parent_comment"]["count"])
@property
def author(self) -> str:
""" Author of the Post """
return self.post_data["owner"]["username"]
@property
def caption(self) -> str:
""" Caption of the Post """
return self.post_data["accessibility_caption"]
@property
def post_source(self) -> str:
""" Post Image/Video Link """
if self.post_data["is_video"]:
return self.post_data["video_url"]
return self.display_url
@property
def text(self) -> str:
try:
text = self.post_data["edge_media_to_caption"]["edges"][0]["node"]["text"]
return text
except (KeyError, IndexError):
return None
@property
def location(self) -> str:
location_data = self.post_data["location"]
Location = namedtuple("Location", ["Address", "id", "slug", "name"])
if location_data:
return Location(
location_data["address_json"],
location_data["id"],
location_data["slug"],
location_data["name"],
)
return None
class TagParser:
""" Parse the required data of tag store as property"""
@property
def tagname(self) -> str:
""" Tagname of the Hagtag """
return self.tag_data["name"]
@property
def profile_pic_url(self) -> str:
""" Profile picture url of the Hagtag """
return self.tag_data["profile_pic_url"]
@property
def number_of_posts(self) -> int:
""" No.of posts in given Hashtag """
return self.tag_data["edge_hashtag_to_media"]["count"]
@property
def top_posts(self) -> list:
"""
Top post data (<70) in the given Hashtag
"""
nodes = self.tag_data["edge_hashtag_to_media"]["edges"]
return _nodes_classfier(nodes)
@property
def posts_display_urls(self) -> list:
"""
Top post (<70) in the given Hashtag
"""
return [i["display_url"] for i in self.top_posts]
class LocationParser:
""" Parse the required data of location store as property"""
@property
def id(self) -> str:
""" Location id of the location """
return self.location_data["id"]
@property
def name(self) -> str:
""" Name of the location """
return self.location_data["name"]
@property
def latitude(self) -> int:
""" Latitude of the location """
return self.location_data["lat"]
@property
def longitude(self) -> int:
""" Longitude of the location """
return self.location_data["lng"]
@property
def slug(self) -> str:
""" Slug of the location """
return self.location_data["slug"]
@property
def website(self) -> str:
""" Website of the location """
return self.location_data["website"]
@property
def phone(self) -> str:
""" Phone Number of the location """
return self.location_data["phone"]
@property
def address(self) -> dict:
""" Address of the location """
return json.loads(self.location_data["address_json"])
@property
def profile_pic_url(self) -> dict:
""" Profile Picture of the location """
return self.location_data["profile_pic_url"]
@property
def number_of_posts(self) -> int:
""" Number of post in the location """
return self.location_data["edge_location_to_media"]["count"]
@property
def top_posts(self) -> int:
"""
Top post data (<70) in the given Location
"""
| |
<reponame>marromlam/quick-memos<filename>ProjectMaker.py
# -*- coding: UTF-8 -*-
##########################################################################################
# Importing packages. ####################################################################
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import (QByteArray, QDate, QDateTime, QEvent, QPoint, QRect,
QRegExp, QSettings, QSize, Qt, QTime, QTimer)
from PyQt5.QtGui import QColor, QIcon, QRegExpValidator, QValidator
from PyQt5.QtWidgets import (QAbstractItemView, QFormLayout ,QAbstractItemDelegate,
QAction, QApplication, QComboBox, QDialog, QDialogButtonBox,
QFileDialog, QWidget, QPushButton, QLineEdit, QInputDialog,
QGridLayout, QGroupBox, QHeaderView, QInputDialog,
QItemDelegate, QLabel, QLineEdit, QMainWindow, QMessageBox,
QStyle, QStyleOptionViewItem, QTableWidget,QTableWidgetItem,
QTreeWidget, QTreeWidgetItem, QVBoxLayout)
from ProLib import *
##########################################################################################
##########################################################################################
# Global Constants and parameters. #######################################################
DefaultNumberOfRows = 120 #(#+1)
database = 'Data'
mode1=1; mode2=1; mode3=1
global factor
factor = 2
separ = '---------------------------------------------'
sectspa = ' '
##########################################################################################
class Ui_ProjectMaker(object):
def setupUi(self, ProjectMaker):
ProjectMaker.setObjectName("ProjectMaker")
ProjectMaker.setEnabled(True)
ProjectMaker.resize(361, 452)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ProjectMaker.sizePolicy().hasHeightForWidth())
ProjectMaker.setSizePolicy(sizePolicy)
ProjectMaker.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.Tabs = QtWidgets.QTabWidget(ProjectMaker)
self.Tabs.setEnabled(True)
self.Tabs.setGeometry(QtCore.QRect(12, 46, 336, 392))
self.Tabs.setTabPosition(QtWidgets.QTabWidget.North)
self.Tabs.setTabShape(QtWidgets.QTabWidget.Rounded)
self.Tabs.setElideMode(QtCore.Qt.ElideMiddle)
self.Tabs.setUsesScrollButtons(False)
self.Tabs.setDocumentMode(False)
self.Tabs.setTabsClosable(False)
self.Tabs.setTabBarAutoHide(False)
self.Tabs.setObjectName("Tabs")
self.AddTab = QtWidgets.QWidget()
self.AddTab.setObjectName("AddTab")
self.layoutWidget = QtWidgets.QWidget(self.AddTab)
self.layoutWidget.setGeometry(QtCore.QRect(20, 20, 291, 331))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.AddDataTable = QtWidgets.QTableWidget(self.layoutWidget)
self.AddDataTable.setLayoutDirection(QtCore.Qt.LeftToRight)
self.AddDataTable.setInputMethodHints(QtCore.Qt.ImhPreferNumbers)
self.AddDataTable.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.AddDataTable.setFrameShadow(QtWidgets.QFrame.Sunken)
self.AddDataTable.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.AddDataTable.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.AddDataTable.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
self.AddDataTable.setDragEnabled(True)
self.AddDataTable.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.AddDataTable.setAlternatingRowColors(True)
self.AddDataTable.setGridStyle(QtCore.Qt.SolidLine)
self.AddDataTable.setCornerButtonEnabled(False)
self.AddDataTable.setObjectName("AddDataTable")
self.AddDataTable.setColumnCount(2)
self.AddDataTable.setRowCount(2)
# Setting up default rows. ############################################
self.AddDataTable.setRowCount(DefaultNumberOfRows+1)
for l in range(0,DefaultNumberOfRows):
item = QtWidgets.QTableWidgetItem()
self.AddDataTable.setVerticalHeaderItem(l, item)
#######################################################################
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.AddDataTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.AddDataTable.setHorizontalHeaderItem(1, item)
self.AddDataTable.horizontalHeader().setCascadingSectionResizes(False)
self.AddDataTable.horizontalHeader().setDefaultSectionSize(119)
self.AddDataTable.horizontalHeader().setMinimumSectionSize(19)
self.verticalLayout.addWidget(self.AddDataTable)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.AddVariableNameLabel = QtWidgets.QLabel(self.layoutWidget)
self.AddVariableNameLabel.setObjectName("AddVariableNameLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.AddVariableNameLabel)
self.AddVariableNameBox = QtWidgets.QLineEdit(self.layoutWidget)
font = QtGui.QFont()
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.AddVariableNameBox.setFont(font)
self.AddVariableNameBox.setObjectName("AddVariableNameBox")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.AddVariableNameBox)
self.AddSymbolicNameLabel = QtWidgets.QLabel(self.layoutWidget)
self.AddSymbolicNameLabel.setObjectName("AddSymbolicNameLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.AddSymbolicNameLabel)
self.AddSymbolicNameBox = QtWidgets.QLineEdit(self.layoutWidget)
font = QtGui.QFont()
font.setItalic(True)
self.AddSymbolicNameBox.setFont(font)
self.AddSymbolicNameBox.setObjectName("AddSymbolicNameBox")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.AddSymbolicNameBox)
self.AddVariableUnitsLabel = QtWidgets.QLabel(self.layoutWidget)
self.AddVariableUnitsLabel.setObjectName("AddVariableUnitsLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.AddVariableUnitsLabel)
self.AddVariableUnitsBox = QtWidgets.QLineEdit(self.layoutWidget)
self.AddVariableUnitsBox.setObjectName("AddVariableUnitsBox")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.AddVariableUnitsBox)
self.verticalLayout.addLayout(self.formLayout)
self.splitter = QtWidgets.QSplitter(self.layoutWidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.AddSave = QtWidgets.QPushButton(self.splitter)
self.AddSave.setDefault(False)
self.AddSave.setFlat(False)
self.AddSave.setObjectName("AddSave")
self.AddImport = QtWidgets.QPushButton(self.splitter)
self.AddImport.setObjectName("AddImport")
self.AddClear = QtWidgets.QPushButton(self.splitter)
self.AddClear.setObjectName("AddClear")
self.verticalLayout.addWidget(self.splitter)
self.Tabs.addTab(self.AddTab, "")
self.ExpTab = QtWidgets.QWidget()
self.ExpTab.setObjectName("ExpTab")
self.layoutWidget1 = QtWidgets.QWidget(self.ExpTab)
self.layoutWidget1.setGeometry(QtCore.QRect(20, 20, 291, 331))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.ExpMethodSelect = QtWidgets.QComboBox(self.layoutWidget1)
self.ExpMethodSelect.setObjectName("ExpMethodSelect")
self.ExpMethodSelect.addItem("",1)
self.ExpMethodSelect.addItem("",2)
self.ExpMethodSelect.addItem("",3)
self.verticalLayout_4.addWidget(self.ExpMethodSelect)
self.ExpVariableSelect = QtWidgets.QListWidget(self.layoutWidget1)
self.ExpVariableSelect.setAlternatingRowColors(True)
self.ExpVariableSelect.setObjectName("ExpVariableSelect")
self.verticalLayout_4.addWidget(self.ExpVariableSelect)
self.ExpCaptionBox = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setItalic(True)
self.ExpCaptionBox.setFont(font)
self.ExpCaptionBox.setObjectName("ExpCaptionBox")
self.verticalLayout_4.addWidget(self.ExpCaptionBox)
self.splitter_2 = QtWidgets.QSplitter(self.layoutWidget1)
self.splitter_2.setOrientation(QtCore.Qt.Horizontal)
self.splitter_2.setObjectName("splitter_2")
self.ExpPreview = QtWidgets.QPushButton(self.splitter_2)
self.ExpPreview.setObjectName("ExpPreview")
self.ExpExport = QtWidgets.QPushButton(self.splitter_2)
self.ExpExport.setObjectName("ExpExport")
self.verticalLayout_4.addWidget(self.splitter_2)
self.Tabs.addTab(self.ExpTab, "")
self.StaTab = QtWidgets.QWidget()
self.StaTab.setObjectName("StaTab")
self.StaDataTable = QtWidgets.QTableWidget(self.StaTab)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.StaDataTable.setLayoutDirection(QtCore.Qt.LeftToRight)
self.StaDataTable.setInputMethodHints(QtCore.Qt.ImhPreferNumbers)
self.StaDataTable.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.StaDataTable.setFrameShadow(QtWidgets.QFrame.Sunken)
self.StaDataTable.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.StaDataTable.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.StaDataTable.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
self.StaDataTable.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.StaDataTable.setAlternatingRowColors(True)
self.StaDataTable.setGridStyle(QtCore.Qt.SolidLine)
self.StaDataTable.setColumnCount(1)
self.StaDataTable.setGeometry(QtCore.QRect(20, 20, 111, 281))
self.StaDataTable.setAlternatingRowColors(True)
self.StaDataTable.setObjectName("StaDataTable")
self.StaDataTable.horizontalHeader().setDefaultSectionSize(80)
item = QtWidgets.QTableWidgetItem('Data')
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.StaDataTable.setHorizontalHeaderItem(0, item)
self.layoutWidget2 = QtWidgets.QWidget(self.StaTab)
self.layoutWidget2.setGeometry(QtCore.QRect(140, 20, 171, 281))
self.layoutWidget2.setObjectName("layoutWidget2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.layoutWidget2)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.StaNumberVariablesSelect = QtWidgets.QComboBox(self.layoutWidget2)
self.StaNumberVariablesSelect.setObjectName("StaNumberVariablesSelect")
self.StaNumberVariablesSelect.addItem("",1)
self.StaNumberVariablesSelect.addItem("",2)
self.verticalLayout_5.addWidget(self.StaNumberVariablesSelect)
self.StaVariableSelect = QtWidgets.QListWidget(self.layoutWidget2)
self.StaVariableSelect.setAlternatingRowColors(True)
self.StaVariableSelect.setObjectName("StaVariableSelect")
self.verticalLayout_5.addWidget(self.StaVariableSelect)
self.StatCobertureLabel = QtWidgets.QLabel(self.layoutWidget2)
self.StatCobertureLabel.setObjectName("StatCobertureLabel")
self.verticalLayout_5.addWidget(self.StatCobertureLabel)
self.StaCobertureSelect = QtWidgets.QComboBox(self.layoutWidget2)
self.StaCobertureSelect.setObjectName("StaCobertureSelect")
self.StaCobertureSelect.addItem("",1)
self.StaCobertureSelect.addItem("",2)
self.StaCobertureSelect.addItem("",3)
self.StaCobertureSelect.addItem("",4)
self.StaCobertureSelect.addItem("",5)
self.StaCobertureSelect.addItem("",6)
self.StaCobertureSelect.addItem("",7)
self.verticalLayout_5.addWidget(self.StaCobertureSelect)
self.StaStatistics = QtWidgets.QPushButton(self.layoutWidget2)
self.StaStatistics.setObjectName("StaStatistics")
self.verticalLayout_5.addWidget(self.StaStatistics)
self.splitter_3 = QtWidgets.QSplitter(self.StaTab)
self.splitter_3.setGeometry(QtCore.QRect(15, 320, 301, 32))
self.splitter_3.setOrientation(QtCore.Qt.Horizontal)
self.splitter_3.setObjectName("splitter_3")
self.StaCsvCopy = QtWidgets.QPushButton(self.splitter_3)
self.StaCsvCopy.setObjectName("StaCsvCopy")
self.StaTexCopy = QtWidgets.QPushButton(self.splitter_3)
self.StaTexCopy.setObjectName("StaTexCopy")
self.Tabs.addTab(self.StaTab, "")
self.KaiTab = QtWidgets.QWidget()
self.KaiTab.setObjectName("KaiTab")
self.KaiParameterTable = QtWidgets.QTableWidget(self.KaiTab)
self.KaiParameterTable.setGeometry(QtCore.QRect(21, 20, 171, 243))
self.KaiParameterTable.setLayoutDirection(QtCore.Qt.LeftToRight)
self.KaiParameterTable.setInputMethodHints(QtCore.Qt.ImhPreferNumbers)
self.KaiParameterTable.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.KaiParameterTable.setFrameShadow(QtWidgets.QFrame.Sunken)
self.KaiParameterTable.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.KaiParameterTable.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.KaiParameterTable.setAutoScroll(False)
self.KaiParameterTable.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
self.KaiParameterTable.setDragEnabled(False)
self.KaiParameterTable.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.KaiParameterTable.setAlternatingRowColors(True)
self.KaiParameterTable.setIconSize(QtCore.QSize(0, 0))
self.KaiParameterTable.setShowGrid(True)
self.KaiParameterTable.setCornerButtonEnabled(False)
self.KaiParameterTable.setObjectName("KaiParameterTable")
self.KaiParameterTable.setColumnCount(2)
self.KaiParameterTable.setRowCount(9)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.KaiParameterTable.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.KaiParameterTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.KaiParameterTable.setHorizontalHeaderItem(1, item)
self.KaiParameterTable.horizontalHeader().setVisible(True)
self.KaiParameterTable.horizontalHeader().setCascadingSectionResizes(False)
self.KaiParameterTable.horizontalHeader().setDefaultSectionSize(67)
self.KaiParameterTable.horizontalHeader().setHighlightSections(False)
self.KaiParameterTable.horizontalHeader().setMinimumSectionSize(40)
self.KaiParameterTable.horizontalHeader().setSortIndicatorShown(False)
self.KaiParameterTable.horizontalHeader().setStretchLastSection(True)
self.KaiParameterTable.verticalHeader().setCascadingSectionResizes(False)
self.KaiParameterTable.verticalHeader().setDefaultSectionSize(24)
self.KaiParameterTable.verticalHeader().setHighlightSections(False)
self.KaiDependentBox = QtWidgets.QLineEdit(self.KaiTab)
self.KaiDependentBox.setGeometry(QtCore.QRect(120, 290, 191, 21))
self.KaiDependentBox.setAlignment(QtCore.Qt.AlignCenter)
self.KaiDependentBox.setObjectName("KaiDependentBox")
self.KaiFunctionLabel = QtWidgets.QLabel(self.KaiTab)
self.KaiFunctionLabel.setGeometry(QtCore.QRect(20, 270, 287, 18))
font = QtGui.QFont()
font.setPointSize(15)
self.KaiFunctionLabel.setFont(font)
self.KaiFunctionLabel.setTextFormat(QtCore.Qt.AutoText)
self.KaiFunctionLabel.setAlignment(QtCore.Qt.AlignCenter)
self.KaiFunctionLabel.setObjectName("KaiFunctionLabel")
self.KaiTest = QtWidgets.QPushButton(self.KaiTab)
self.KaiTest.setGeometry(QtCore.QRect(110, 320, 113, 32))
self.KaiTest.setObjectName("KaiTest")
self.KaiSqrLabel = QtWidgets.QTextEdit(self.KaiTab)
self.KaiSqrLabel.setGeometry(QtCore.QRect(200, 20, 111, 141))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(237, 237, 237))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(245, 245, 245))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush)
self.KaiSqrLabel.setPalette(palette)
self.KaiSqrLabel.setAutoFillBackground(True)
self.KaiSqrLabel.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.KaiSqrLabel.setFrameShadow(QtWidgets.QFrame.Plain)
self.KaiSqrLabel.setLineWidth(1)
self.KaiSqrLabel.setReadOnly(True)
self.KaiSqrLabel.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.KaiSqrLabel.setObjectName("KaiSqrLabel")
self.KaiConfidenceBox = QtWidgets.QLineEdit(self.KaiTab)
self.KaiConfidenceBox.setGeometry(QtCore.QRect(200, 130, 111, 21))
self.KaiConfidenceBox.setAlignment(QtCore.Qt.AlignCenter)
self.KaiConfidenceBox.setObjectName("KaiConfidenceBox")
self.KaiConfidenceLabel = QtWidgets.QLabel(self.KaiTab)
self.KaiConfidenceLabel.setGeometry(QtCore.QRect(200, 90, 111, 41))
self.KaiConfidenceLabel.setText("<html><head/><body><p>Confidence level</p></body></html>")
self.KaiConfidenceLabel.setTextFormat(QtCore.Qt.RichText)
self.KaiConfidenceLabel.setScaledContents(False)
self.KaiConfidenceLabel.setAlignment(QtCore.Qt.AlignCenter)
self.KaiConfidenceLabel.setObjectName("KaiConfidenceLabel")
self.KaiIndependentBox = QtWidgets.QLineEdit(self.KaiTab)
self.KaiIndependentBox.setGeometry(QtCore.QRect(20, 290, 61, 21))
self.KaiIndependentBox.setObjectName("KaiIndependentBox")
self.KaiEqualLabel = QtWidgets.QLabel(self.KaiTab)
self.KaiEqualLabel.setGeometry(QtCore.QRect(90, 290, 21, 16))
font = QtGui.QFont()
font.setPointSize(15)
self.KaiEqualLabel.setFont(font)
self.KaiEqualLabel.setAlignment(QtCore.Qt.AlignCenter)
self.KaiEqualLabel.setObjectName("KaiEqualLabel")
self.Tabs.addTab(self.KaiTab, "")
self.ProTab = QtWidgets.QWidget()
self.ProTab.setObjectName("ProTab")
self.ProPropagate = QtWidgets.QPushButton(self.ProTab)
self.ProPropagate.setGeometry(QtCore.QRect(110, 320, 113, 32))
self.ProPropagate.setObjectName("ProPropagate")
self.layoutWidget3 = QtWidgets.QWidget(self.ProTab)
self.layoutWidget3.setGeometry(QtCore.QRect(20, 20, 291, 291))
self.layoutWidget3.setObjectName("layoutWidget3")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget3)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.ProText = QtWidgets.QTextEdit(self.layoutWidget3)
font = QtGui.QFont()
font.setItalic(False)
self.ProText.setFont(font)
self.ProText.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.ProText.setObjectName("ProText")
self.verticalLayout_3.addWidget(self.ProText)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.ProFunctionLabel = QtWidgets.QLabel(self.layoutWidget3)
font = QtGui.QFont()
font.setPointSize(15)
self.ProFunctionLabel.setFont(font)
self.ProFunctionLabel.setTextFormat(QtCore.Qt.AutoText)
self.ProFunctionLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ProFunctionLabel.setObjectName("ProFunctionLabel")
self.verticalLayout_2.addWidget(self.ProFunctionLabel)
self.ProFunctionBox = QtWidgets.QLineEdit(self.layoutWidget3)
self.ProFunctionBox.setAlignment(QtCore.Qt.AlignCenter)
self.ProFunctionBox.setObjectName("ProFunctionBox")
self.verticalLayout_2.addWidget(self.ProFunctionBox)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setObjectName("formLayout_2")
self.ProVariableNameLabel = QtWidgets.QLabel(self.layoutWidget3)
self.ProVariableNameLabel.setObjectName("ProVariableNameLabel")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.ProVariableNameLabel)
self.ProVariableNameBox = QtWidgets.QLineEdit(self.layoutWidget3)
self.ProVariableNameBox.setObjectName("ProVariableNameBox")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.ProVariableNameBox)
self.ProSymbolicNameLabel = QtWidgets.QLabel(self.layoutWidget3)
self.ProSymbolicNameLabel.setObjectName("ProSymbolicNameLabel")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.ProSymbolicNameLabel)
self.ProSymbolicNameBox = QtWidgets.QLineEdit(self.layoutWidget3)
self.ProSymbolicNameBox.setObjectName("ProSymbolicNameBox")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.ProSymbolicNameBox)
self.ProVariableUnitsLabel = QtWidgets.QLabel(self.layoutWidget3)
self.ProVariableUnitsLabel.setObjectName("ProVariableUnitsLabel")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.ProVariableUnitsLabel)
self.ProVariableUnitsBox = QtWidgets.QLineEdit(self.layoutWidget3)
self.ProVariableUnitsBox.setObjectName("ProVariableUnitsBox")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.ProVariableUnitsBox)
self.verticalLayout_3.addLayout(self.formLayout_2)
self.Tabs.addTab(self.ProTab, "")
self.layoutWidget4 = QtWidgets.QWidget(ProjectMaker)
self.layoutWidget4.setGeometry(QtCore.QRect(20, 10, 331, 23))
self.layoutWidget4.setObjectName("layoutWidget4")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget4)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.ProjectNameLabel = QtWidgets.QLabel(self.layoutWidget4)
self.ProjectNameLabel.setObjectName("ProjectNameLabel")
self.horizontalLayout.addWidget(self.ProjectNameLabel)
self.ProjectNameBox = QtWidgets.QLineEdit(self.layoutWidget4)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.ProjectNameBox.setFont(font)
self.ProjectNameBox.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.ProjectNameBox.setAutoFillBackground(False)
self.ProjectNameBox.setClearButtonEnabled(False)
self.ProjectNameBox.setObjectName("ProjectNameBox")
self.ProjectNameBox.setText('/Users/suser/Desktop/New')
self.horizontalLayout.addWidget(self.ProjectNameBox)
self.actionSave = QtWidgets.QAction(ProjectMaker)
self.actionSave.setObjectName("actionSave")
#self.retranslateUi(ProjectMaker)
#self.Tabs.setCurrentIndex(4)
self.retranslateUi(ProjectMaker)
#self.DirectoryCkeck(ProjectMaker)
self.Tabs.setCurrentIndex(0)
# Setting up actions of GUI. ##########################################
self.AddSave.clicked.connect(self.AddSaveButton)
self.AddImport.clicked.connect(self.AddImportButton)
self.AddClear.clicked.connect(self.AddClearButton)
self.ExpPreview.clicked.connect(self.ExpPreviewButton)
self.ExpExport.clicked.connect(self.ExpExportButton)
self.StaStatistics.clicked.connect(self.StaDoButton)
self.Tabs.tabBarClicked.connect(self.TabsRefresh)
self.ExpMethodSelect.activated.connect(self.handleActivated1)
self.StaNumberVariablesSelect.activated.connect(self.handleActivated2)
self.StaCobertureSelect.activated.connect(self.handleActivated3)
self.ProPropagate.clicked.connect(self.ProPropagateButton)
self.KaiTest.clicked.connect(self.KaySqrButton)
#######################################################################
QtCore.QMetaObject.connectSlotsByName(ProjectMaker)
ProjectMaker.setTabOrder(self.ProjectNameBox, self.AddVariableNameBox)
ProjectMaker.setTabOrder(self.AddVariableNameBox, self.AddSymbolicNameBox)
ProjectMaker.setTabOrder(self.AddSymbolicNameBox, self.AddVariableUnitsBox)
ProjectMaker.setTabOrder(self.AddVariableUnitsBox, self.AddDataTable)
ProjectMaker.setTabOrder(self.AddDataTable, self.AddSave)
ProjectMaker.setTabOrder(self.AddSave, self.AddClear)
ProjectMaker.setTabOrder(self.AddClear, self.AddImport)
ProjectMaker.setTabOrder(self.AddImport, self.ExpMethodSelect)
ProjectMaker.setTabOrder(self.ExpMethodSelect, self.ExpVariableSelect)
ProjectMaker.setTabOrder(self.ExpVariableSelect, self.ExpCaptionBox)
ProjectMaker.setTabOrder(self.ExpCaptionBox, self.ExpPreview)
ProjectMaker.setTabOrder(self.ExpPreview, self.ExpExport)
ProjectMaker.setTabOrder(self.ExpExport, self.StaDataTable)
ProjectMaker.setTabOrder(self.StaDataTable, self.StaNumberVariablesSelect)
ProjectMaker.setTabOrder(self.StaNumberVariablesSelect, self.StaVariableSelect)
ProjectMaker.setTabOrder(self.StaVariableSelect, self.StaCobertureSelect)
ProjectMaker.setTabOrder(self.StaCobertureSelect, self.StaStatistics)
ProjectMaker.setTabOrder(self.StaStatistics, self.StaCsvCopy)
ProjectMaker.setTabOrder(self.StaCsvCopy, self.StaTexCopy)
ProjectMaker.setTabOrder(self.StaTexCopy, self.ProPropagate)
ProjectMaker.setTabOrder(self.ProPropagate, self.ProText)
ProjectMaker.setTabOrder(self.ProText, self.ProFunctionBox)
ProjectMaker.setTabOrder(self.ProFunctionBox, self.ProVariableNameBox)
ProjectMaker.setTabOrder(self.ProVariableNameBox, self.ProSymbolicNameBox)
ProjectMaker.setTabOrder(self.ProSymbolicNameBox, self.ProVariableUnitsBox)
ProjectMaker.setTabOrder(self.ProVariableUnitsBox, self.Tabs)
ProjectMaker.setTabOrder(self.Tabs, self.KaiParameterTable)
def retranslateUi(self, ProjectMaker):
_translate = QtCore.QCoreApplication.translate
ProjectMaker.setWindowTitle(_translate("ProjectMaker", "Project Maker"))
# Setting up rows label. ##############################################
for l in range(0, DefaultNumberOfRows):
item = self.AddDataTable.verticalHeaderItem(l)
item.setText(_translate("Form", str(l + 1)))
#######################################################################
item = self.AddDataTable.horizontalHeaderItem(0)
item.setText(_translate("ProjectMaker", "Data"))
item = self.AddDataTable.horizontalHeaderItem(1)
item.setText(_translate("ProjectMaker", "Uncertainty"))
self.AddVariableNameLabel.setText(_translate("ProjectMaker", "Variable Name"))
self.AddSymbolicNameLabel.setText(_translate("ProjectMaker", "Symbolic Name"))
self.AddVariableUnitsLabel.setText(_translate("ProjectMaker", "Variable Units"))
self.AddSave.setText(_translate("ProjectMaker", "Save"))
self.AddImport.setText(_translate("ProjectMaker", "Import"))
self.AddClear.setText(_translate("ProjectMaker", "Clear"))
self.Tabs.setTabText(self.Tabs.indexOf(self.AddTab), _translate("ProjectMaker", "Add"))
self.ExpMethodSelect.setItemText(0, _translate("ProjectMaker", "LaTeX - default uncertainties"))
self.ExpMethodSelect.setItemText(1, _translate("ProjectMaker", "LaTeX - simple brackets"))
self.ExpMethodSelect.setItemText(2, _translate("ProjectMaker", "Wolfram"))
self.ExpVariableSelect.setSortingEnabled(True)
self.ExpCaptionBox.setText(_translate("ProjectMaker", "Caption to TeX export"))
self.ExpPreview.setText(_translate("ProjectMaker", "Preview"))
self.ExpExport.setText(_translate("ProjectMaker", "Export"))
self.Tabs.setTabText(self.Tabs.indexOf(self.ExpTab), _translate("ProjectMaker", "Exporter"))
self.StaNumberVariablesSelect.setItemText(0, _translate("ProjectMaker", "Direct measures"))
self.StaNumberVariablesSelect.setItemText(1, _translate("ProjectMaker", "Indirect measures"))
self.StaVariableSelect.setSortingEnabled(True)
self.StatCobertureLabel.setText(_translate("ProjectMaker", "Coberture factor"))
self.StaCobertureSelect.setItemText(0, _translate("ProjectMaker", "Gaussian 68%"))
self.StaCobertureSelect.setItemText(1, _translate("ProjectMaker", "Gaussian 95%"))
self.StaCobertureSelect.setItemText(2, _translate("ProjectMaker", "Gaussian 99%"))
self.StaCobertureSelect.setItemText(3, _translate("ProjectMaker", "Student 70%"))
self.StaCobertureSelect.setItemText(4, _translate("ProjectMaker", "Student 95%"))
self.StaCobertureSelect.setItemText(5, _translate("ProjectMaker", "Student 99%"))
self.StaCobertureSelect.setItemText(6, _translate("ProjectMaker", "Other Percentile"))
self.StaStatistics.setText(_translate("ProjectMaker", "Do statistics"))
self.StaCsvCopy.setText(_translate("ProjectMaker", "Copy csv format"))
self.StaTexCopy.setText(_translate("ProjectMaker", "Copy TeX format"))
self.Tabs.setTabText(self.Tabs.indexOf(self.StaTab), _translate("ProjectMaker", "Stat"))
item = self.KaiParameterTable.verticalHeaderItem(0)
item.setText(_translate("ProjectMaker", "1"))
item = self.KaiParameterTable.verticalHeaderItem(1)
item.setText(_translate("ProjectMaker", "2"))
item = self.KaiParameterTable.verticalHeaderItem(2)
item.setText(_translate("ProjectMaker", "3"))
item = self.KaiParameterTable.verticalHeaderItem(3)
item.setText(_translate("ProjectMaker", "4"))
item = self.KaiParameterTable.verticalHeaderItem(4)
item.setText(_translate("ProjectMaker", "5"))
item = self.KaiParameterTable.verticalHeaderItem(5)
item.setText(_translate("ProjectMaker", "6"))
item = self.KaiParameterTable.verticalHeaderItem(6)
item.setText(_translate("ProjectMaker", "7"))
item = self.KaiParameterTable.verticalHeaderItem(7)
item.setText(_translate("ProjectMaker", "8"))
item = self.KaiParameterTable.verticalHeaderItem(8)
item.setText(_translate("ProjectMaker", "9"))
item = self.KaiParameterTable.horizontalHeaderItem(0)
item.setText(_translate("ProjectMaker", "Parameter"))
item = self.KaiParameterTable.horizontalHeaderItem(1)
item.setText(_translate("ProjectMaker", "Value"))
self.KaiFunctionLabel.setText(_translate("ProjectMaker", "Function"))
self.KaiTest.setText(_translate("ProjectMaker", "χ2 Test"))
self.KaiSqrLabel.setHtml(_translate("ProjectMaker", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Times\'; font-size:48pt; font-style:italic; color:#252525;\">χ</span><span style=\" font-family:\'Times\'; font-size:48pt; font-style:italic; color:#252525; vertical-align:super;\">2</span></p></body></html>"))
self.KaiConfidenceBox.setText(_translate("ProjectMaker", "0.95"))
self.KaiEqualLabel.setText(_translate("ProjectMaker", "="))
self.Tabs.setTabText(self.Tabs.indexOf(self.KaiTab), _translate("ProjectMaker", "KaiSqr"))
self.ProPropagate.setText(_translate("ProjectMaker", "Propagate"))
self.ProText.setHtml(_translate("ProjectMaker", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">The use of universal <span style=\" font-weight:600;\">pysical constants</span> is permmited. But it is important to know thei names blabala</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-style:italic;\">h | |
#!/usr/bin/env python3
"""@@@
Main Module: Motif.py (adapted to chromatin starting from RNA/Motif-bkp160630.py)
Objects: Motif
Link
LGroup
ndx
Map
--- motif class objects
AST
APT
XLoop
MBL
Branch
Stem
PseudoKnot
ArchMP
MultiPair
Functions: stemType2PairList
disp_Motif
copyStem
ins_sort_StemList
Author: <NAME>
creation date: 170126
last update: 200211 (upgraded to python3), prev 190718
version: 0.1
Purpose:
This module is used primarily by "chreval.py". However, as with many
packages that are building up, the package has become complicated
enough, that the objects in this module are used by more than one
program.
The remainder of this module deals with building tree diagrams and
will eventually become a unified format for Motif.
Comments:
190725
#################################################################
Be aware that Motif is under major revision. As a result, it is
presently in a somewhat odd state of disarray. I am presently in
the process of changing Motif to an object representation (with the
classes Stem, PseudoKnot, MBL, XLoop and MultiPair), adding handles
to access the objects and migrating the evalutation and
construction into the rest of the program. Presently, not all of
the routines have been upgraded to handle and build this
representation. Hence, most of the processes are still using the
old methods. I expect this to change in the next few months, but
unfortunately at this time of release, that step has not been
completed yet.
The program is also being tested with RNA structure prediction in a
similar form as chromatin, and these checks will be used to help in
the migration process.
#################################################################
older comments
This was set up as a separate object for testing. I'm gradually
pushing chromatin in the same direction as my RNA program with real
objects, but presently, the are not being called yet; mostly because I
had to finalize the structure of thet class PseudoKnot in my RNA
directory.
For Vienna formatted (base) pair data, all that (Pascal-like)
information of thread is stripped off and the final structure computed
from the (base) pairs only. Though maybe there is something important
in all this, with the exception of the free energy (and the
Pascal-like notation), basically Pair and LNode are identical. I am
still thinking that it would be better to somehow organize this so
that Vienna and Thread are merged (so we don't have a feeble
half-notation). I haven't made up my mind and I still hesitate because
there is a little bit of information that thread carries that Pair
does not. Anyway, both are too incomplete in of themselves to be used
too extensively.
190403 found at least a reasonable strategy that appears to
work. Originally, I thought that the visit approach was best, but I
could not get that to work. On the other than, the form of NewMotif
appears to be working.
Oddly, the result of these objects in NewMotif were an outgrowth of my
effort to take Vienna data and convert it to a representation
recognized by LThread. However, I eventually recognized that LThread
is far too simple to be useful for building a general transformation
code that is recognizable by Vienna and Motif simultaneously. I guess
it is not impossible to make such a script, but the result is is
cumbersome and would still require the sophisticated tree making tools
in Threads to make it work.
"""
import sys
from ChrConstants import xi # [beads, bps, aa, nt]
from ChrConstants import xi_fs # useless but required by some classes anyway
from Pair import Pair # vital
from Pair import SortPair # vital
from Pair import vsPair2list # not used
from Pair import BranchList2list # not used
from Pair import rlist2vsPair # not used
from Pair import find_this_ij # not used
from Pair import is_ap_stem # not used
from Pair import is_pp_stem # not used
#####################################################################
###################### BASIC LINKAGE OBJECTS ######################
#####################################################################
# #################################################
# ############# LINKAGE DEFINITIONS #############
# #################################################
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
"""@
CBL = chromatin binding linkage
ctp = connection type, established at position (i,j). Let
(i,j) current position under analysis (may or may not have a CBL)
(i',j') a prior CBL
############### connection definitions (ctp and btp): ###############
######### CONNECTION TYPE (ctp):
'B', (i,j) -> [(i,j)] => connection type is a bond ('B') at (i,j) or
the terminal connection at the end of an antiparallel loop.
'b', (i,j) -> [(i,j)] => connection type is the terminal connection at
the end of an antiparallel loop at (i,j).
'S', (i,j) -> [(i+1,j-1), (i+2,j-2) ... ] or [(i,j),(i+1,j+1),
(i+2,j+2) ... ] or [(i,j),(i-1,j-1), (i-2,j-2) ... ]: "Stem". this is
similar to the RNA stem I used before; however, in the case of
chromatin, it is important to remember that both antiparallel and
parallel directions are allowed.
=========
'I', (i,j) -> [(i',j')] => connection type is a "I-loop" ('I'): closed
at (i,j) with the next adjacent cross link at (i',j'), where i' > i +
1 and/or j' < j -1. Typically, this structure should be more favorable
than 'B' if it exists, but it is not necessarily so.
'J', -> [(i',j')] => connection type is a "J-loop" ('J'): _open_ at
(i,j) with the next adjacent cross link at (i',j'), where i' > i + 1
and/or j' < j -1. A J-loop can be more favorable than the I-loop when
the binding free energy at (i,j) is positive.
------------------------------------------------------------------------
Note that both 'I' and 'J' could be defined as a single-branch loop
'V', where 'I' has the same base as (i,j), and 'J' has a different
base from (i,j).
------------------------------------------------------------------------
=========
'K', (i, j) -> join = [(ib,jb)], pk = [(ik1,jk1), (ik2,jk2)...] =>
connection type pseudoKnot (presently all combinations of H
type). There is a bit of a problem with chromatin that I did not
encounter in RNA because the chain can be either parallel or
antiparallel. I will only allow one solution presently. This is
because I ASSUME that the chain will favor one or the other direction
but not both for a good solid pseudoknot. This could be problematical,
but, for the moment, I'll take the risk.
'l': a pseudoknot in the "linkage" part of the pseudoknot. There is
nothing in principle that is different between the linkage and the
main structure. Both are essentially singletons. However, it is
convenient to notate it differently for human readability and due to
the fact that linkages are generally more restricted on their
interaction with the rest of the structure.
=========
'M', (i,j) -> [(i1,j1),(i2,j2)] => connection type is a "M-loop":
closed at (i,j) with branches located at [(i1,j1), (i2,j2)
... (in,jn)], subject to the condition that i < i1, j1 < i2, i2 < j3,
.... jn < j. M-loop is for "multibranch loop" and this form is
typically more favorable than an 'I' or a 'B', but it depends on the
situation.
['P', [(i1,j1),(i2,j2)]] => connection type is a "P-loop": _open_ at
(i,j) with branches located at [(i1,j1), (i2,j2) ... (in,jn)],
subject to the condition that i < i1, j1 < i2, i2 < j3, .... jn < j.
P-loop is for "principal loop". Typically, the M-loop should be more
favorable, but it could be favored if the binding free energy 'B' at
(i,j) is positive.
------------------------------------------------------------------------
Note that both 'M' and 'P' could be defined as a multi-branch loop
'V', where 'M' has the same base as (i,j), and 'P' has a different
base from (i,j).
------------------------------------------------------------------------
'S': stem. It is important to specify if the
collection of stems is parallel or antiparallel oriented. Therefore,
we must add a qualifier to this notation; i.e., for structures
containing the key 'S', we should specify 'sa' (for singleton
antiparallel) and 'sp' (for singleton parallel).
=========
['W', [(i1, j1, i2, j2 ....)]] => connection type CTCF-island. This
is unique to CTCF islands. In this type of interaction, all the
items on the list are in effect associated. This requires a
different notation from the singleton that presently I assume can be
written as
....{......|.........|..............}.....
i1 j1 i2 j2
where the meaning is that all these regions interact with each
other.
# ######### BOND TYPE (btp):
'-': none, used in the case of pointers because it represents neither
a singleton nor a PET cluster.
'bgn'/'end': markers for the beginning and ending of stems ('S'),
pseudoknots ('K') and CTCF islands ('W')
'c': This is a CTCF cluster that we define as "convergent"; i.e.,
having the "... > .... < ..." type of interaction that _probably_
results in an _antiparallel_ chain configuration. This configuration
results in a very strongly binding interaction and is most commently
encountered.
'l': This is a CTCF cluster | |
24).
.. note::
Of course, failure to produce a minimal polynomial does not
necessarily indicate that this number is transcendental.
"""
if algorithm is None or algorithm.startswith('numeric'):
bits_list = [bits] if bits else [100,200,500,1000]
degree_list = [degree] if degree else [2,4,8,12,24]
for bits in bits_list:
a = ex.numerical_approx(bits)
check_bits = int(1.25 * bits + 80)
aa = ex.numerical_approx(check_bits)
for degree in degree_list:
f = QQ[var](algdep(a, degree)) # TODO: use the known_bits parameter?
# If indeed we have found a minimal polynomial,
# it should be accurate to a much higher precision.
error = abs(f(aa))
dx = ~RR(Integer(1) << (check_bits - degree - 2))
expected_error = abs(f.derivative()(CC(aa))) * dx
if error < expected_error:
# Degree might have been an over-estimate,
# factor because we want (irreducible) minpoly.
ff = f.factor()
for g, e in ff:
lead = g.leading_coefficient()
if lead != 1:
g = g / lead
expected_error = abs(g.derivative()(CC(aa))) * dx
error = abs(g(aa))
if error < expected_error:
# See if we can prove equality exactly
if g(ex).simplify_trig().canonicalize_radical() == 0:
return g
# Otherwise fall back to numerical guess
elif epsilon and error < epsilon:
return g
elif algorithm is not None:
raise NotImplementedError("Could not prove minimal polynomial %s (epsilon %s)" % (g, RR(error).str(no_sci=False)))
if algorithm is not None:
raise ValueError("Could not find minimal polynomial (%s bits, degree %s)." % (bits, degree))
if algorithm is None or algorithm == 'algebraic':
from sage.rings.all import QQbar
return QQ[var](QQbar(ex).minpoly())
raise ValueError("Unknown algorithm: %s" % algorithm)
###################################################################
# limits
###################################################################
def limit(ex, dir=None, taylor=False, algorithm='maxima', **argv):
r"""
Return the limit as the variable `v` approaches `a`
from the given direction.
::
expr.limit(x = a)
expr.limit(x = a, dir='+')
INPUT:
- ``dir`` - (default: None); dir may have the value
'plus' (or '+' or 'right' or 'above') for a limit from above,
'minus' (or '-' or 'left' or 'below') for a limit from below, or may be omitted
(implying a two-sided limit is to be computed).
- ``taylor`` - (default: False); if True, use Taylor
series, which allows more limits to be computed (but may also
crash in some obscure cases due to bugs in Maxima).
- ``**argv`` - 1 named parameter
.. note::
The output may also use 'und' (undefined), 'ind'
(indefinite but bounded), and 'infinity' (complex
infinity).
EXAMPLES::
sage: x = var('x')
sage: f = (1 + 1/x)^x
sage: f.limit(x=oo)
e
sage: f.limit(x=5)
7776/3125
sage: f.limit(x=1.2)
2.06961575467...
sage: f.limit(x=I, taylor=True)
(-I + 1)^I
sage: f(x=1.2)
2.0696157546720...
sage: f(x=I)
(-I + 1)^I
sage: CDF(f(x=I))
2.0628722350809046 + 0.7450070621797239*I
sage: CDF(f.limit(x=I))
2.0628722350809046 + 0.7450070621797239*I
Notice that Maxima may ask for more information::
sage: var('a')
a
sage: limit(x^a,x=0)
Traceback (most recent call last):
...
ValueError: Computation failed since Maxima requested additional
constraints; using the 'assume' command before evaluation
*may* help (example of legal syntax is 'assume(a>0)', see
`assume?` for more details)
Is a positive, negative or zero?
With this example, Maxima is looking for a LOT of information::
sage: assume(a>0)
sage: limit(x^a,x=0)
Traceback (most recent call last):
...
ValueError: Computation failed since Maxima requested additional
constraints; using the 'assume' command before evaluation *may* help
(example of legal syntax is 'assume(a>0)', see `assume?` for
more details)
Is a an integer?
sage: assume(a,'integer')
sage: limit(x^a, x=0)
Traceback (most recent call last):
...
ValueError: Computation failed since Maxima requested additional
constraints; using the 'assume' command before evaluation *may* help
(example of legal syntax is 'assume(a>0)', see `assume?` for
more details)
Is a an even number?
sage: assume(a, 'even')
sage: limit(x^a, x=0)
0
sage: forget()
More examples::
sage: limit(x*log(x), x=0, dir='+')
0
sage: lim((x+1)^(1/x), x=0)
e
sage: lim(e^x/x, x=oo)
+Infinity
sage: lim(e^x/x, x=-oo)
0
sage: lim(-e^x/x, x=oo)
-Infinity
sage: lim((cos(x))/(x^2), x=0)
+Infinity
sage: lim(sqrt(x^2+1) - x, x=oo)
0
sage: lim(x^2/(sec(x)-1), x=0)
2
sage: lim(cos(x)/(cos(x)-1), x=0)
-Infinity
sage: lim(x*sin(1/x), x=0)
0
sage: limit(e^(-1/x), x=0, dir='right')
0
sage: limit(e^(-1/x), x=0, dir='left')
+Infinity
::
sage: f = log(log(x)) / log(x)
sage: forget(); assume(x < -2); lim(f, x=0, taylor=True)
0
sage: forget()
Here ind means "indefinite but bounded"::
sage: lim(sin(1/x), x = 0)
ind
We can use other packages than maxima, namely "sympy", "giac", "fricas".
With the standard package Giac::
sage: from sage.libs.giac.giac import libgiac # random
sage: (exp(-x)/(2+sin(x))).limit(x=oo, algorithm='giac')
0
sage: limit(e^(-1/x), x=0, dir='right', algorithm='giac')
0
sage: limit(e^(-1/x), x=0, dir='left', algorithm='giac')
+Infinity
sage: (x / (x+2^x+cos(x))).limit(x=-infinity, algorithm='giac')
1
With the optional package FriCAS::
sage: (x / (x+2^x+cos(x))).limit(x=-infinity, algorithm='fricas') # optional - fricas
1
sage: limit(e^(-1/x), x=0, dir='right', algorithm='fricas') # optional - fricas
0
sage: limit(e^(-1/x), x=0, dir='left', algorithm='fricas') # optional - fricas
+Infinity
One can also call Mathematica's online interface::
sage: limit(pi+log(x)/x,x=oo, algorithm='mathematica_free') # optional - internet
pi
TESTS::
sage: lim(x^2, x=2, dir='nugget')
Traceback (most recent call last):
...
ValueError: dir must be one of None, 'plus', '+', 'above', 'right',
'minus', '-', 'below', 'left'
sage: x.limit(x=3, algorithm='nugget')
Traceback (most recent call last):
...
ValueError: Unknown algorithm: nugget
We check that :trac:`3718` is fixed, so that
Maxima gives correct limits for the floor function::
sage: limit(floor(x), x=0, dir='-')
-1
sage: limit(floor(x), x=0, dir='+')
0
sage: limit(floor(x), x=0)
und
Maxima gives the right answer here, too, showing
that :trac:`4142` is fixed::
sage: f = sqrt(1 - x^2)
sage: g = diff(f, x); g
-x/sqrt(-x^2 + 1)
sage: limit(g, x=1, dir='-')
-Infinity
::
sage: limit(1/x, x=0)
Infinity
sage: limit(1/x, x=0, dir='+')
+Infinity
sage: limit(1/x, x=0, dir='-')
-Infinity
Check that :trac:`8942` is fixed::
sage: f(x) = (cos(pi/4 - x) - tan(x)) / (1 - sin(pi/4 + x))
sage: limit(f(x), x=pi/4, dir='minus')
+Infinity
sage: limit(f(x), x=pi/4, dir='plus')
-Infinity
sage: limit(f(x), x=pi/4)
Infinity
Check that :trac:`12708` is fixed::
sage: limit(tanh(x), x=0)
0
Check that :trac:`15386` is fixed::
sage: n = var('n')
sage: assume(n>0)
sage: sequence = -(3*n^2 + 1)*(-1)^n / sqrt(n^5 + 8*n^3 + 8)
sage: limit(sequence, n=infinity)
0
Check if :trac:`23048` is fixed::
sage: (1/(x-3)).limit(x=3, dir='below')
-Infinity
From :trac:`14677`::
sage: f = (x^x - sin(x)^sin(x)) / (x^3*log(x))
sage: limit(f, x=0, algorithm='fricas') # optional - fricas
und
sage: limit(f, x=0, dir='right', algorithm='fricas') # optional - fricas
1/6
From :trac:`26497`::
sage: mu, y, sigma = var("mu, y, sigma")
sage: f = 1/2*sqrt(2)*e^(-1/2*(mu - log(y))^2/sigma^2)/(sqrt(pi)*sigma*y)
sage: limit(f, y=0, algorithm='fricas') # optional - fricas
0
From :trac:`26060`::
sage: limit(x / (x + 2^x + cos(x)), x=-infinity)
1
"""
if not isinstance(ex, Expression):
ex = SR(ex)
if len(argv) != 1:
raise ValueError("call the limit function like this, e.g. limit(expr, x=2).")
else:
k, = argv.keys()
v = var(k)
a = argv[k]
if taylor and algorithm == 'maxima':
algorithm = 'maxima_taylor'
dir_plus = ['plus', '+', 'above', 'right']
dir_minus = ['minus', '-', 'below', 'left']
dir_both = [None] + dir_plus + dir_minus
if dir not in dir_both:
raise ValueError("dir must be one of " + ", ".join(map(repr, dir_both)))
if algorithm == 'maxima':
if dir is None:
l = maxima.sr_limit(ex, v, a)
elif dir in dir_plus:
l = maxima.sr_limit(ex, v, a, 'plus')
elif dir in dir_minus:
l = maxima.sr_limit(ex, v, a, 'minus')
elif algorithm == 'maxima_taylor':
if dir is None:
l = maxima.sr_tlimit(ex, v, a)
elif dir in dir_plus:
l = maxima.sr_tlimit(ex, v, a, 'plus')
elif dir in dir_minus:
l = maxima.sr_tlimit(ex, v, a, 'minus')
elif algorithm == 'sympy':
import sympy
if dir is None:
l = sympy.limit(ex._sympy_(), v._sympy_(), a._sympy_())
elif dir in dir_plus:
l = sympy.limit(ex._sympy_(), v._sympy_(), a._sympy_(), dir='+')
elif dir in dir_minus:
l = sympy.limit(ex._sympy_(), v._sympy_(), a._sympy_(), dir='-')
elif algorithm == 'fricas':
from sage.interfaces.fricas import fricas
eq = fricas.equation(v._fricas_(), a._fricas_())
f = ex._fricas_()
if dir is None:
l = fricas.limit(f, eq).sage()
if isinstance(l, dict):
l = maxima("und")
elif dir in dir_plus:
l = fricas.limit(f, eq, '"right"').sage()
elif dir in dir_minus:
l = fricas.limit(f, eq, '"left"').sage()
elif algorithm == 'giac':
from sage.libs.giac.giac import libgiac
v = v._giac_init_()
a = a._giac_init_()
if dir is None:
l = libgiac.limit(ex, v, a).sage()
elif dir in dir_plus:
l = libgiac.limit(ex, v, a, 1).sage()
elif dir in dir_minus:
l = libgiac.limit(ex, v, a, -1).sage()
elif algorithm == | |
`separator` is not given as `None`, ``ContentParameterSeparator``, `str`, neither as `tuple` instance.
- If `separator was given as `tuple`, but it's element are not `str` instances.
ValueError
- If `separator` is given as `str`, but it's length is not 1.
- If `separator` is given as `str`, but it is a space character.
- If `separator` is given as `tuple`, but one of it's element's length is not 1.
- If `separator` is given as `tuple`, but one of it's element's is a space character.
"""
separator = ContentParameterSeparator(separator)
analyzer = CallableAnalyzer(func)
if analyzer.is_async() or analyzer.is_async_generator():
real_analyzer = analyzer
should_instance = False
elif analyzer.can_instance_to_async_callable() or analyzer.can_instance_to_async_generator():
real_analyzer = CallableAnalyzer(func.__call__, as_method=True)
if (not real_analyzer.is_async()) and (not real_analyzer.is_async_generator()):
raise TypeError(f'`func` is not `async-callable` and cannot be instanced to `async` either, got '
f'{func!r}.')
should_instance = True
else:
raise TypeError(f'`func` is not `async-callable` and cannot be instanced to `async` either, got {func!r}.')
keyword_only_parameter_count = real_analyzer.get_non_default_keyword_only_parameter_count()
if keyword_only_parameter_count:
raise TypeError(f'`{real_analyzer.real_function!r}` accepts keyword only parameters.')
if real_analyzer.accepts_kwargs():
raise TypeError(f'`{real_analyzer.real_function!r}` accepts **kwargs.')
parameters = real_analyzer.get_non_reserved_positional_parameters()
parameter_count = len(parameters)
if parameter_count<2:
raise TypeError(f'`{real_analyzer.real_function!r}` should accept at least 2 parameters (without *args): '
f'`client` and `message`, meanwhile it accepts only {parameter_count}.')
client_parameter = parameters[0]
if client_parameter.has_default:
raise TypeError(f'`{real_analyzer.real_function!r}` has default parameter set as it\'s first not '
'reserved, meanwhile it should not have.')
if client_parameter.has_annotation and (client_parameter.annotation is not Client):
raise TypeError(f'`{real_analyzer.real_function!r}` has annotation at the client\'s parameter slot, '
f'what is not `{Client.__name__}`.')
message_parameter = parameters[1]
if message_parameter.has_default:
raise TypeError(f'`{real_analyzer.real_function!r}` has default parameter set as it\'s first not '
f'reserved, meanwhile it should not have.')
if message_parameter.has_annotation and (message_parameter.annotation is not Message):
raise TypeError(f'`{real_analyzer.real_function!r}` has annotation at the message\'s parameter slot '
f'what is not `{Message.__name__}`.')
hinters = []
to_check = parameters[2:]
args_parameter = real_analyzer.args_parameter
index = 0
limit = len(to_check)
while True:
if index == limit:
break
parameter = to_check[index]
index += 1
if parameter.has_annotation:
annotation = parameter.annotation
if type(annotation) is Converter:
hinter_default = annotation.default
hinter_default_type = annotation.default_type
hinter_annotation = annotation.annotation
if parameter.has_default:
if hinter_default_type:
raise TypeError(f'`annotation` of `{parameter.name}` is given as '
f'`{Converter.__class__.__name__}` instance, as {Converter!r} (with default value '
f'set), meanwhile the parameter has default value set as well: {parameter.default!r}.')
hinter_default = parameter.default
hinter_default_type = DEFAULT_TYPE_OBJ
else:
annotation = validate_annotation(annotation)
if parameter.has_default:
hinter_default = parameter.default
hinter_default_type = DEFAULT_TYPE_OBJ
else:
hinter_default = None
hinter_default_type = DEFAULT_TYPE_NONE
hinter_annotation = annotation
else:
if parameter.has_default:
default = parameter.default
if (index == limit) and (args_parameter is None):
hinter_annotation = None
else:
hinter_annotation = FlaggedAnnotation(str)
hinter_default = default
hinter_default_type = DEFAULT_TYPE_OBJ
else:
if (index == limit) and (args_parameter is None):
hinter_annotation = None
else:
hinter_annotation = FlaggedAnnotation(str)
hinter_default = None
hinter_default_type = DEFAULT_TYPE_NONE
hinter = ContentParserParameterHinter()
hinter.default = hinter_default
hinter.default_type = hinter_default_type
hinter.annotation = hinter_annotation
hinter.is_args = False
hinters.append(hinter)
if (args_parameter is not None):
if args_parameter.has_annotation:
annotation = args_parameter.annotation
if type(annotation) is Converter:
if annotation.default_type:
raise TypeError(f'`*args` annotation is given as `{Converter.__class__.__name__} as '
f'{Converter!r}, so with default value set, do not do that!')
hinter_annotation = annotation.annotation
else:
hinter_annotation = validate_annotation(annotation)
else:
hinter_annotation = FlaggedAnnotation(str)
hinter = ContentParserParameterHinter()
hinter.default = None
hinter.default_type = DEFAULT_TYPE_NONE
hinter.annotation = hinter_annotation
hinter.is_args = True
hinters.append(hinter)
parsers = []
for hinter in hinters:
annotation = hinter.annotation
if annotation is None:
parser = RestParserContext(hinter.default_type, hinter.default)
else:
if hinter.is_args:
if type(annotation) is tuple:
parser_cls = ChainedArgsParserContext
else:
parser_cls = SingleArgsParserContext
parser = parser_cls(annotation)
else:
if type(annotation) is tuple:
parser_cls = ChainedParserContext
else:
parser_cls = SingleParserContext
parser = parser_cls(annotation, hinter.default_type, hinter.default)
parsers.append(parser)
if not parsers:
parsers = None
if should_instance:
func = analyzer.insatnce()
self = object.__new__(cls)
self._parsers = parsers
self._separator = separator
return self, func
async def get_args(self, client, message, content):
"""
Parses the given content and returns whether it passed and what was parser.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client who received the respective message.
message : ``Message``
The received message.
content : `str`
The message's content to parse. Can be empty.
Returns
-------
passed : `bool`
Whether the parsing all the parameters of the message succeeded.
args : `None` or `list` of `Any`
The parsed out entities. Can be empty list.
"""
parsers = self._parsers
if parsers is None:
return True, None
content_parser_context = ContentParserContext(self._separator, client, message, content)
for parser in parsers:
result = await parser(content_parser_context)
if result:
continue
return False, content_parser_context.result
return True, content_parser_context.result
def __bool__(self):
"""Returns whether the content parser parses anything."""
parsers = self._parsers
if parsers is None:
return False
if parsers:
return True
return False
def __repr__(self):
"""Returns the command content parser's representation."""
result = [
'<',
self.__class__.__name__,
]
parsers = self._parsers
if (parsers is not None):
result.append(' events=')
result.append(repr(parsers))
separator = self._separator
if (separator is not DEFAULT_SEPARATOR):
result.append(', separator=')
result.append(repr(separator))
result.append('>')
return ''.join(result)
class ContentParser(CommandContentParser):
"""
Represents a content parser, what can be used as a standalone wrapper of a function.
Parameters
----------
_parsers : `None` or `list` of ``ParserContextBase`` instances
The events of the command content parser. Set as`None` if it would be an empty `list`.
_separator : ``ContentParameterSeparator``
The parameter separator of the parser.
_func : `async-callable`
The wrapped function.
_handler : `None` or `async-callable`
A coroutine function what is ensured, when parsing the parameters fail.
_is_method : `bool`
Whether the ``ContentParser`` should act like a method descriptor.
"""
__slots__ = ('_func', '_handler', '_is_method',)
def __new__(cls, func=None, handler=None, is_method=False, separator=None):
"""
Parameters
----------
func : `None`, `async-callable` or instantiable to `async-callable`, Optional
handler : `None`, `async-callable` or instantiable to `async-callable`, Optional
An async callable, what is ensured when the parser's cannot parse all the required parameters out.
If given, should accept the following parameters:
+-----------------------+-------------------+
| Respective name | Type |
+=======================+===================+
| client | ``Client`` |
+-----------------------+-------------------+
| message | ``Message`` |
+-----------------------+-------------------+
| content_parser | ``ContentParser`` |
+-----------------------+-------------------+
| content | `str` |
+-----------------------+-------------------+
| args | `list` of `Any` |
+-----------------------+-------------------+
| parent | `Any` |
+-----------------------+-------------------+
is_method : `bool`, Optional
Whether the content parser should act like a method. Default to `False`.
separator : `None`, ``ContentParameterSeparator``, `str` or `tuple` (`str`, `str`), Optional
The parameter separator of the parser.
Returns
-------
wrapper / self : ``ContentParser._wrapper`` / ``ContentParser``
If `func` is not given, then returns a wrapper, what allows using the content parser as a decorator with
still giving parameters.
Raises
------
TypeError
- If `is_method` is not given as `bool` instance.
- If `handler` is not async, neither cannot be instanced to async.
- If `handler` (or it's converted form) would accept bad amount of parameters.
- If `separator` is not given as `None`, ``ContentParameterSeparator``, `str`, neither as `tuple` instance.
- If `separator was given as `tuple`, but it's element are not `str` instances.
ValueError
- If `separator` is given as `str`, but it's length is not 1.
- If `separator` is given as `str`, but it is a space character.
- If `separator` is given as `tuple`, but one of it's element's length is not 1.
- If `separator` is given as `tuple`, but one of it's element's is a space character.
"""
is_method = preconvert_bool(is_method, 'is_method')
if (handler is not None):
handler = check_parameter_count_and_convert(handler, 6, name='handler', error_message= \
'`ContentParser` expects to pass `6` parameters to it\'s `handler`: client, message, content_parser, '
'content, args, obj (can be `None`).')
separator = ContentParameterSeparator(separator)
if func is None:
return cls._wrapper(handler, is_method, separator)
if is_method:
func = MethodType(func, object())
self, func = CommandContentParser.__new__(cls, func, separator)
if is_method:
func = func.__func__
self._func = func
self._handler = handler
self._is_method = is_method
| |
<filename>scalpel/typeinfer/typeinfer.py
"""
This module is the main module of typeinfer. The module contains a single class named TypeInference which processes
files and infer types.
"""
import os
import ast
import astunparse
from typing import List
from pprint import pprint
from scalpel.typeinfer.visitors import get_call_type
from scalpel.typeinfer.classes import ProcessedFile, ScalpelVariable
from scalpel.typeinfer.analysers import (
ImportTypeMap,
SourceSplitVisitor,
ClassSplitVisitor,
ReturnStmtVisitor,
HeuristicParser,
Heuristics,
VariableAssignmentMap
)
from scalpel.import_graph.import_graph import Tree, ImportGraph
from scalpel.typeinfer.utilities import (
generate_ast,
parse_module,
get_api_ref_id,
get_function_comment,
is_imported_fun,
is_valid_call_link,
rename_from_name,
is_done,
find_class_by_attr,
)
def process_code_with_heuristics(node):
def pick_type(type_lst):
base_type_names = ["Num", "Set", "List", "Tuple", "Dict", "Str", "NameConstant"]
new_type_lst = []
type_hint_pair_list = []
length = len(type_lst[0])
if length == 0:
return []
type_lst = filter(lambda x: len(x) == length, type_lst)
for i in range(length):
i_th = [t[i] for t in type_lst]
new_type_lst += [None]
for tmp in i_th:
if tmp in base_type_names:
new_type_lst[-1] = tmp
break
for tmp in i_th:
if isinstance(tmp, tuple) and tmp[0] == "Call":
if new_type_lst[i] is not None:
type_hint_pair_list += [(tmp[1], new_type_lst[i])]
return type_hint_pair_list
try:
tree = ast.parse(node.source, mode='exec')
visitor = HeuristicParser(node)
visitor.visit(tree)
func_arg_db = {}
function_arg_types = get_call_type(tree)
type_hint_pairs = visitor.type_hint_pairs
all_call_names = []
for pair in function_arg_types:
name, arg_type = pair
name_parts = name.split('.')
name = ".".join(name_parts)
all_call_names += [name]
if name in func_arg_db:
func_arg_db[name] += [arg_type]
else:
func_arg_db[name] = [arg_type]
for func, arg_type in func_arg_db.items():
type_hint_pairs += pick_type(arg_type)
return type_hint_pairs, visitor.call_links, all_call_names
except (SyntaxError, UnicodeDecodeError):
return {}, {}, []
class TypeInference:
"""
Infer types for the modules accessible from the entrypoints with the help of a set of heuristic functions
"""
def __init__(self, name: str, entry_point: str):
"""
Args:
name: the name of the type inference analyser
entry_point: the entry point, can be the root folder of a package or a python file
"""
self.name = name
self.entry_point = entry_point
self.root_node = Tree(os.path.basename(self.entry_point))
self.import_graph = None
self.leaves = []
# Build graph of the directory
self.import_graph = ImportGraph(self.entry_point)
self.import_graph.build_dir_tree()
self.leaves = self.import_graph.get_leaf_nodes()
def infer_types(self):
"""
Infer the types for the modules accessible from the entrypoint
"""
# Loop through leaves
for node in self.leaves:
processed_file = self.process_file(node.source)
node.node_type_dict = processed_file.type_dict
node.node_type_gt = processed_file.type_gt
node.call_links = processed_file.type_stem_links
node.static_assignments = processed_file.static_assignments
node.line_numbers = processed_file.line_numbers
node.imports = processed_file.imports
for node in self.leaves:
type_hint_pairs, client_call_link, all_call_names = process_code_with_heuristics(node)
for pair in type_hint_pairs:
if pair is None:
continue
function_name, t_val = pair
if t_val in ['call', 'input']:
# TODO: Why do this?
continue
# Type hints are known
if function_name in node.node_type_dict and node.node_type_dict[function_name] is not None:
if "input" in node.node_type_dict[function_name]:
node.node_type_dict[function_name] = [t_val]
if "3call" in node.node_type_dict[function_name]:
node.node_type_dict[function_name] = [t_val]
if "unknown" in node.node_type_dict[function_name]:
node.node_type_dict[function_name] = [t_val]
else:
node.node_type_dict[function_name] += [t_val]
# Call pairs
for call_pair in client_call_link:
if call_pair is None:
continue
function1, function2 = call_pair
# Is this the same module
if function1 in node.node_type_dict and function2 in node.node_type_dict:
# They share with each other
function1_t_val = node.node_type_dict[function1]
function2_t_val = node.node_type_dict[function2]
if is_done(function1_t_val) and (not is_done(function2_t_val)):
node.node_type_dict[function2] = function1_t_val
if is_done(function2_t_val) and (not is_done(function1_t_val)):
node.node_type_dict[function1] = function2_t_val
function_access_attribute_records = {}
for call_name in all_call_names:
call_name_segments = call_name.split('.')
if len(call_name_segments) < 2:
continue
if call_name_segments[0] not in node.node_type_dict:
continue
if call_name_segments[0] not in function_access_attribute_records:
function_access_attribute_records[call_name_segments[0]] = [call_name_segments[1]]
else:
function_access_attribute_records[call_name_segments[0]] += [call_name_segments[1]]
for function_name, access_attrs in function_access_attribute_records.items():
access_attrs = list(set(access_attrs))
class_inferred = find_class_by_attr(list(node.node_type_dict.keys()), access_attrs)
if class_inferred is not None:
node.node_type_dict[function_name] = [class_inferred]
# Reconcile across all leaves
for node in self.leaves:
for function_name, (from_where, from_name) in node.call_links.items():
# The same module
if node.node_type_dict[function_name] is None:
continue
# self: in the same class
# base: from the base class
# local: from the same module file
if from_where in ['self', 'base', 'local']:
from_name = rename_from_name(from_where, from_name, function_name)
if from_name in node.node_type_dict and node.node_type_dict[from_name] is not None:
t_vals_tmp = node.node_type_dict[from_name]
if is_valid_call_link(t_vals_tmp):
node.node_type_dict[function_name] += t_vals_tmp
# Might be from other modules
else:
visit_path = from_where.split('.')
if len(visit_path) == 1 and visit_path[0] == self.import_graph.root.name:
dst_node = self.import_graph.root
else:
dst_node = self.import_graph.go_to_that_node(node, from_where.split('.')[0:-1])
if dst_node is not None:
if dst_node.node_type_dict is not None:
if from_name in dst_node.node_type_dict and dst_node.node_type_dict[from_name] is not None:
t_vals_tmp = dst_node.node_type_dict[from_name]
if is_valid_call_link(t_vals_tmp):
node.node_type_dict[function_name] += t_vals_tmp
else:
# This is a library call 3call be propagated to other affected calls
node.node_type_dict[function_name] += ["3call"]
def get_types(self) -> List[dict]:
"""
Get the inferred type information in a list of dictionaries
"""
n_known = 0
type_list = []
for node in self.leaves:
# Function returns
for function_name, type_values in node.node_type_dict.items():
added = False # Used to check whether type has already been added to list for the loop
if type_values is None or len(type_values) == 0:
continue
#TODO: why self is recognized as any type
for value in type_values:
if value in ['unknown', '3call', 'self']:
type_list.append({
'file': node.name,
'line_number': node.line_numbers.get(function_name),
'function': function_name,
'type': {any.__name__}
})
added = True
break
if is_done(type_values) and not added:
n_known += 1
type_list.append({
'file': node.name,
'line_number': node.line_numbers.get(function_name),
'function': function_name,
'type': set(type_values)
})
else:
type_list.append({
'file': node.name,
'line_number': node.line_numbers.get(function_name),
'function': function_name,
'type': {any.__name__}
})
# Static assignments
for assignment in node.static_assignments:
if assignment.is_arg:
type_list.append({
'file': node.name,
'line_number': assignment.line,
'parameter': assignment.name,
'function': assignment.function,
'type': assignment.type
})
else:
type_list.append({
'file': node.name,
'line_number': assignment.line,
'variable': assignment.name,
'function': assignment.function,
'type': assignment.type
})
return type_list
@staticmethod
def process_file(source: str):
"""
Process and extract information from the source files with a set of heuristic functions for type inference
"""
heuristics = Heuristics()
processed_file = ProcessedFile()
stem_from_dict = {}
class2base = {}
# Generate AST from source
tree = generate_ast(source)
if tree is None:
return processed_file
# Get imported types
import_mappings, imported = ImportTypeMap(tree).map()
processed_file.imports = import_mappings
split_visitor = SourceSplitVisitor()
return_visitor = ReturnStmtVisitor(imports=import_mappings)
# Extract all function and class nodes
split_visitor.visit(tree)
assign_records = split_visitor.assign_dict
return_visitor.import_assign_records(assign_records)
all_methods, all_classes, import_nodes = parse_module(tree) # TODO: This doesn't need to be a function?
import_dict = get_api_ref_id(import_nodes) # TODO: Replace with import map?
# Loop through function nodes
for function_node in all_methods:
function_name = function_node.name
processed_file.line_numbers[function_name] = function_node.lineno
function_source = astunparse.unparse(function_node)
# Variable assignments
assignments = VariableAssignmentMap(function_node, imports=import_mappings).map()
for assignment in assignments:
assignment.function = function_node.name
processed_file.static_assignments.extend(assignments)
# Heuristic 5
heuristics.heuristic_five(
import_mappings=import_mappings,
processed_file=processed_file,
function_node=function_node
)
# Heuristic 8
function_params = [a for a in processed_file.static_assignments if a.is_arg]
heuristics.heuristic_eight(
ast_tree=tree,
function_name=function_name,
function_params=function_params
)
# Heuristic 7
heuristics.heuristic_seven(
processed_file=processed_file,
function_node=function_node
)
# Heuristic 6
heuristics.heuristic_six(
processed_file=processed_file,
function_node=function_node
)
# Heuristic 9
heuristics.heuristic_nine(
import_mappings=import_mappings,
processed_file=processed_file,
function_node=function_node
)
# # Heuristic 2
heuristics.heuristic_two(
ast_tree=tree,
processed_file=processed_file,
assignments=assignments
)
# Heuristic 5 once more to account for newly inferred types
heuristics.heuristic_five(
import_mappings=import_mappings,
processed_file=processed_file,
function_node=function_node
)
# Import resolved assignments to the return visitor
return_visitor.import_assignments(assignments)
# Get method header comment TODO: is this needed?
processed_file.node_type_comment[function_name] = get_function_comment(function_source)
return_visitor.clear()
return_visitor.visit(function_node)
processed_file.type_dict[function_name] = None
processed_file.type_gt[function_name] = None
if return_visitor.n_returns == 0:
# This function has no return
continue
# Function has at least one return if we reach here
processed_file.type_dict[function_name] = return_visitor.r_types
stem_from_dict[function_name] = return_visitor.stem_from
processed_file.type_gt[function_name] = function_node.returns
# Loop through class nodes
class_assign_record_map = {}
for class_node in all_classes:
class_name = class_node.name
class_visitor = ClassSplitVisitor()
class_visitor.visit(class_node)
# Check for base class type
if len(class_node.bases) > 0:
if isinstance(class_node.bases[0], ast.Name):
class2base[class_name] = class_node.bases[0].id
class_assign_records = class_visitor.class_assign_records
class_assign_record_map[class_name] = class_assign_records
# Extend class assign records with super class assign records
for superclass_name in class_visitor.bases:
assign_records = class_assign_record_map.get(superclass_name)
if assign_records:
for assignment_name, type_val in assign_records.items():
super_assignment = assignment_name.replace('self', 'super')
class_assign_records[super_assignment] = type_val
return_visitor.clear_all()
return_visitor.import_assign_records(class_assign_records)
# Loop through class methods
for function_node in class_visitor.fun_nodes:
function_name = f"{class_name}.{function_node.name}"
function_source = astunparse.unparse(function_node)
processed_file.line_numbers[function_name] = function_node.lineno
# Get method header comment TODO: Is this needed?
processed_file.node_type_comment[function_name] = get_function_comment(function_source)
# Variable assignments
assignments = VariableAssignmentMap(function_node, imports=import_mappings).map()
for assignment in assignments:
assignment.function = function_node.name
processed_file.static_assignments.extend(assignments)
# Heuristic 5
heuristics.heuristic_five(
import_mappings=import_mappings,
processed_file=processed_file,
function_node=function_node
)
# Heuristic 8
function_params = [a for a in processed_file.static_assignments if a.is_arg]
heuristics.heuristic_eight(
ast_tree=tree,
function_name=function_name,
function_params=function_params
)
# Heuristic 7
heuristics.heuristic_seven(
processed_file=processed_file,
function_node=function_node
)
# Heuristic 6
heuristics.heuristic_six(
processed_file=processed_file,
function_node=function_node
)
# Heuristic 9
heuristics.heuristic_nine(
import_mappings=import_mappings,
processed_file=processed_file,
function_node=function_node
)
# # Heuristic 2
heuristics.heuristic_two(
ast_tree=tree,
processed_file=processed_file,
assignments=assignments
)
# Heuristic 5 once more to | |
import numpy as np
import math
from copy import deepcopy
from collections import deque
from collections import OrderedDict
class FLC():
""" FLC filter class
Attributes
----------
n : int
Number of harmonics
X : ndarray
Reference input vector
W : ndarray
Weights
V : ndarray
Angular frequencies
mu : float
Adaptive filter gain
f0 : float
frequency
"""
def __init__(self, n=5, mu=0.07, f0=8):
"""
Parameters
----------
n : int
Number of harmonics
mu : float
Adaptive filter gain
f0 : float
Frequency of input signal
"""
self.n = n
self.mu = mu
self.f0 = f0
self.X = np.zeros(shape=(2, n))
self.W = np.zeros(shape=(2, n))
self.V = np.array(np.zeros([n]))
for i in range(self.n):
self.V[i] = i * 2 * math.pi * self.f0;
def FLC(self, k, s):
""" FLC filter
Parameters
----------
k : float
Time instant
s : float
Reference signal
Returns
-------
y : float
Estimated signal
"""
# Find reference input vector
for i in range(self.n):
self.X[0][i] = math.sin(self.V[i] * k)
self.X[1][i] = math.cos(self.V[i] * k)
# Find estimated signal
y = np.dot(np.transpose(self.W[0]), self.X[0]) + np.dot(np.transpose(self.W[1]), self.X[1])
err = s - y
# Update weights
self.W[0] += 2 * self.mu * self.X[0] * err
self.W[1] += 2 * self.mu * self.X[1] * err
return y
class WFLC():
""" WFLC filter class
Attributes
----------
n : int
Number of harmonics
X : ndarray
Reference input vector
W : ndarray
Weights
V : ndarray
Angular frequencies
mu : float
Adaptive filter gain
v0 : float
ω0 fundamental angular frequency
"""
def __init__(self, n=1, mu=0.001, mu0=0.0001, f0 = 6):
"""
Parameters
----------
n : int
Number of harmonics
mu : float
Adaptive filter gain for reference input vector
mu0 : float
Adaptive filter gain for fundamental frequency
"""
self.n = n
self.mu = mu
self.mu0 = mu0
self.v0 = 2*math.pi*f0
self.X = np.zeros(shape=(2, n))
self.W = np.zeros(shape=(2, n))
self.V = np.array(np.zeros([n]))
self.estimatedFrequency = 0
def WFLC(self,k, s):
""" FLC filter
Parameters
----------
k : float
Time instant
s : float
Reference signal
Returns
-------
y : float
Estimated signal
"""
# Find reference input vector
for i in range(self.n):
self.X[0][i] = math.sin((i+1) * self.v0 * k)
self.X[1][i] = math.cos((i+1) * self.v0 * k)
# Find estimated signal
y = np.dot(np.transpose(self.W[0]), self.X[0]) + np.dot(np.transpose(self.W[1]), self.X[1])
err = s - y
# Update fundamental angular frequency
z = 0
for i in range(self.n):
z += (i+1)*(self.W[0][i]*self.X[1][i] - self.W[1][i]*self.X[0][i])
self.v0 = self.v0 + 2*self.mu0*err*z
# Update weights
self.W[0] += 2 * self.mu * self.X[0] * err
self.W[1] += 2 * self.mu * self.X[1] * err
self.estimatedFrequency = self.v0 / (2 * math.pi)
return y
class BMFLC():
""" FLC filter class
Attributes
----------
n : int
Number of harmonics
X : ndarray
Reference input vector
W : ndarray
Weights
V : ndarray
Angular frequencies
mu : float
Adaptive filter gain
f0 : float
Starting frequency
dF : float
Frequrncey step
"""
def __init__(self, mu=0.01, fmin=6, fmax=7, dF=0.2):
"""
Parameters
----------
n : int
Number of harmonics
mu : float
Adaptive filter gain
f0 : float
Starting frequency
"""
self.n = int((fmax - fmin) / dF) + 1
self.mu = mu
self.fmax = fmax
self.fmin = fmin
self.X = np.zeros(shape=(2, self.n))
self.W = np.zeros(shape=(2, self.n))
self.V = np.array(np.zeros([self.n]))
self.estimatedFrequency = 0
for i in range(self.n):
self.V[i] = 2 * math.pi * (self.fmin + dF * i);
def BMFLC(self, k, s):
""" BMFLC filter
Parameters
----------
k : float
Time instant
s : float
Reference signal
Returns
-------
y : float
Estimated signal
"""
for i in range(self.n):
self.X[0][i] = math.sin(self.V[i] * k)
self.X[1][i] = math.cos(self.V[i] * k)
y = np.dot(np.transpose(self.W[0]), self.X[0]) + np.dot(np.transpose(self.W[1]), self.X[1])
err = s - y
# Update weights
for i in range(self.n):
self.W[0][i] += 2 * self.mu * self.X[0][i] * err
self.W[1][i] += 2 * self.mu * self.X[1][i] * err
a=0
b=0
vest = 0
for i in range(self.n):
a += (self.W[0][i]**2+self.W[1][i]**2)*self.V[i]
for i in range(self.n):
b += self.W[0][i] ** 2 + self.W[1][i] ** 2
vest += a/b
a=0
b=0
self.estimatedFrequency = vest/(2*math.pi)
return y
class BMWFLC():
""" FLC filter class
Attributes
----------
n : int
Number of harmonics
X : ndarray
Reference input vector
W : ndarray
Weights
V : ndarray
Angular frequencies
mu : float
Adaptive filter gain
f_min : float
Minimum frequency
f_max : float
Maximum frequency
dF : float
Frequency step
"""
def __init__(self, mu=1.0, kappa = 0.009, g = 100, h = 0.00001,eta = 0.4 , f_min=6, f_max=7, dF=0.1, dT=0.01, Tp=2, alpha=0.67, beta=100, l=0.1, peaks_to_track = 2, adaptive_lr = True):
"""
Parameters
----------
n : int
Number of harmonics
mu : float
Adaptive filter gain
f_min : float
Minimum frequency
f_max : float
Maximum frequency
dT : float
Sampling time in seconds
Tp : float
Width of memory window in seconds
alpha : float
Minimum amplification gain for memory window
beta : float
Multiplier for mu0 learning rate. The learning rate will start on the multiplied
value of mu0, and decay to it reaches mu0.
l : float
Decay constant for mu0. l for lambda.
d : float
Scaling factor for mu. How much to scale mu with respect to max amplitude
from the input signal
mu = d/maxAmplitude
g : int
length of sliding window for max amplitude.
If g = 100, the max amplitude from the last 100 samples
from the input signal will be used to scale mu
h : float
factor to decrease mu0 with respect to mu.
mu0 = mu*h
eta: float
Threshold when angular frequency should be reset for having
to low magnitude
"""
self.n = int((f_max - f_min) / dF) + 1
self.f_max = f_max
self.f_min = f_min
self.X = np.zeros(shape=(2, self.n))
self.W = np.zeros(shape=(2, self.n))
self.V = np.array(np.zeros([self.n]))
self.magnitudes = np.zeros(self.n)
self.l = l
for i in range(0,self.n):
self.V[i] = 2 * math.pi * (self.f_min + dF * i)
self.Vref = deepcopy(self.V)
# Peak stuff
self.allPeaksSorted = []
self.trackedPeaksObj = {}
self.peaks_to_track = peaks_to_track
self.eta = eta
# Memory window
delta = (1 / dT) * Tp
self.rho = (alpha) ** (1 / delta)
# Dynamic learning rate mu
self.adaptive_lr = adaptive_lr
self.mu = mu
self.kappa = kappa
self._peakAmplitude = 0
self._q_amplitudes = deque(g * [0], maxlen=g)
self.beta = beta
self.h = h
def BMWFLC(self, k, s):
""" BMFLC filter
Parameters
----------
k : float
Time instant
s : float
Reference signal
Returns
-------
y : float
Estimated signal
"""
# Adapt learning rates mu and mu0
if self.adaptive_lr:
self.adapt_learningrate(k, s)
for i in range(self.n):
self.X[0][i] = math.sin(self.V[i] * k)
self.X[1][i] = math.cos(self.V[i] * k)
y = np.dot(np.transpose(self.W[0]), self.X[0]) + np.dot(np.transpose(self.W[1]), self.X[1])
err = s - y
# Update weights
for i in range(self.n):
self.W[0][i] = self.W[0][i] * self.rho + 2 * self.mu * self.X[0][i] * err
self.W[1][i] = self.W[1][i] * self.rho + 2 * self.mu * self.X[1][i] * err
# Find peaks
self.find_peaks(err, k, s)
# Create objects for the peaks being tracked
self.create_tracked_peak_objects(k)
# Update the angular frequencies for the tracked peaks
self.update_V_for_peaks(err)
return y
def find_peaks(self, err, k, s):
# Find all magnitudes
magnitudes = []
magnitudes_dict = {x: math.sqrt(self.W[0][x] ** 2 + self.W[1][x] ** 2) for x in range(self.n)}
for key, m in magnitudes_dict.items():
magnitudes.append(m)
self.magnitudes = magnitudes
# Reset angle when magnitudes gets small
for n in range(self.n):
if magnitudes[n] < self.eta and self.V[n] != self.Vref[n]:
self.V[n] = self.Vref[n]
# Key: position of peak, Value: Magnitude of peak
peaksDict = {}
magnitudePrev = 0
magnitudeDiffPrev = 0
# Find peaks
for i in range(self.n):
magnitudeDiff = magnitudes[i] - magnitudePrev
if magnitudeDiff < 0 and magnitudeDiffPrev > 0:
peaksDict[i - 1] = magnitudePrev
# Check if last magnitude is a peak
elif i == (self.n -1) and magnitudeDiff > 0:
peaksDict[i] = magnitudes[i]
magnitudeDiffPrev = magnitudeDiff
magnitudePrev = magnitudes[i]
# Sort peaks by magnitude
peaksDict = OrderedDict(sorted(peaksDict.items(), key=lambda t: t[1], reverse = True))
self.allPeaksSorted = list(peaksDict.items())
| |
<filename>0900-hp/hplip-3.21.12/fax/pmlfax.py
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: <NAME>
#
# Std Lib
import sys
import os
import os.path
import struct
import time
import threading
from base.sixext.moves import StringIO
from io import BytesIO
# Local
from base.g import *
from base.codes import *
from base import device, utils, pml, codes
from prnt import cups
from .fax import *
# **************************************************************************** #
# Page flags
PAGE_FLAG_NONE = 0x00
PAGE_FLAG_NEW_PAGE = 0x01
PAGE_FLAG_END_PAGE = 0x02
PAGE_FLAG_NEW_DOC = 0x04
PAGE_FLAG_END_DOC = 0x08
PAGE_FLAG_END_STREAM = 0x10
MAJOR_VER = 2
MINOR_VER = 0
MFPDTF_RASTER_BITMAP = 0 # Not used
MFPDTF_RASTER_GRAYMAP = 1 # Not used
MFPDTF_RASTER_MH = 2 # OfficeJets B&W Fax
MFPDTF_RASTER_MR = 3 # Not used
MFPDTF_RASTER_MMR = 4 # LaserJets B&W Fax
MFPDTF_RASTER_RGB = 5 # Not used
MFPDTF_RASTER_YCC411 = 6 # Not used
MFPDTF_RASTER_JPEG = 7 # Color Fax
MFPDTF_RASTER_PCL = 8 # Not used
MFPDTF_RASTER_NOT = 9 # Not used
# Data types for FH
DT_UNKNOWN = 0
DT_FAX_IMAGES = 1
DT_SCANNED_IMAGES= 2
DT_DIAL_STRINGS = 3
DT_DEMO_PAGES = 4
DT_SPEED_DIALS = 5
DT_FAX_LOGS = 6
DT_CFG_PARMS = 7
DT_LANG_STRS = 8
DT_JUNK_FAX_CSIDS= 9
DT_REPORT_STRS = 10
DT_FONTS = 11
DT_TTI_BITMAP = 12
DT_COUNTERS = 13
DT_DEF_PARMS = 14
DT_SCAN_OPTIONS = 15
DT_FW_JOB_TABLE = 17
# Raster data record types
RT_START_PAGE = 0
RT_RASTER = 1
RT_END_PAGE = 2
# FH
FIXED_HEADER_SIZE = 8
# Variants
IMAGE_VARIANT_HEADER_SIZE = 10
DIAL_STRINGS_VARIANT_HEADER_SIZE = 6
FAX_IMAGE_VARIANT_HEADER_SIZE = 74
# Data records
SOP_RECORD_SIZE = 36
RASTER_RECORD_SIZE = 4
EOP_RECORD_SIZE = 12
DIAL_STRING_RECORD_SIZE = 51
# Page flags
PAGE_FLAG_NEW_PAGE = 0x01
PAGE_FLAG_END_PAGE = 0x02
PAGE_FLAG_NEW_DOC = 0x04
PAGE_FLAG_END_DOC = 0x08
PAGE_FLAG_END_STREAM = 0x10
# Fax data variant header data source
SRC_UNKNOWN = 0
SRC_HOST = 2
SRC_SCANNER = 5
SRC_HOST_THEN_SCANNER = 6
SRC_SCANNER_THEN_HOST = 7
# Fax data variant header TTI header control
TTI_NONE = 0
TTI_PREPENDED_TO_IMAGE = 1
TTI_OVERLAYED_ON_IMAGE = 2
RASTER_DATA_SIZE = 504
# **************************************************************************** #
class PMLFaxDevice(FaxDevice):
def __init__(self, device_uri=None, printer_name=None,
callback=None,
fax_type=FAX_TYPE_NONE,
disable_dbus=False):
FaxDevice.__init__(self, device_uri,
printer_name,
callback, fax_type,
disable_dbus)
self.send_fax_thread = None
self.upload_log_thread = None
def setPhoneNum(self, num):
return self.setPML(pml.OID_FAX_LOCAL_PHONE_NUM, str(num))
def getPhoneNum(self):
if PY3:
data = utils.printable(self.getPML(pml.OID_FAX_LOCAL_PHONE_NUM)[1])
return data
else:
return utils.printable(self.getPML(pml.OID_FAX_LOCAL_PHONE_NUM)[1])
phone_num = property(getPhoneNum, setPhoneNum, doc="OID_FAX_LOCAL_PHONE_NUM")
def setStationName(self, name):
return self.setPML(pml.OID_FAX_STATION_NAME, name)
def getStationName(self):
if PY3:
data = utils.printable(self.getPML(pml.OID_FAX_STATION_NAME)[1])
return data
else:
return utils.printable(self.getPML(pml.OID_FAX_STATION_NAME)[1])
station_name = property(getStationName, setStationName, doc="OID_FAX_STATION_NAME")
def setDateAndTime(self): #Need Revisit
pass
#t = time.localtime()
#p = struct.pack("BBBBBBB", t[0]-2000, t[1], t[2], t[6]+1, t[3], t[4], t[5])
#log.debug(repr(p))
#return self.setPML(pml.OID_DATE_AND_TIME, p.decode('latin-1'))
def uploadLog(self):
if not self.isUloadLogActive():
self.upload_log_thread = UploadLogThread(self)
self.upload_log_thread.start()
return True
else:
return False
def isUploadLogActive(self):
if self.upload_log_thread is not None:
return self.upload_log_thread.is_alive()
else:
return False
def waitForUploadLogThread(self):
if self.upload_log_thread is not None and \
self.upload_log_thread.is_alive():
self.upload_log_thread.join()
def sendFaxes(self, phone_num_list, fax_file_list, cover_message='', cover_re='',
cover_func=None, preserve_formatting=False, printer_name='',
update_queue=None, event_queue=None):
if not self.isSendFaxActive():
self.send_fax_thread = PMLFaxSendThread(self, self.service, phone_num_list, fax_file_list,
cover_message, cover_re, cover_func,
preserve_formatting,
printer_name, update_queue,
event_queue)
self.send_fax_thread.start()
return True
else:
return False
# **************************************************************************** #
class PMLUploadLogThread(threading.Thread):
def __init__(self, dev):
threading.Thread.__init__(self)
self.dev = dev
def run(self):
STATE_DONE = 0
STATE_ABORT = 10
STATE_SUCCESS = 20
STATE_BUSY = 25
STATE_DEVICE_OPEN = 28
STATE_CHECK_IDLE = 30
STATE_REQUEST_START = 40
STATE_WAIT_FOR_ACTIVE = 50
STATE_UPLOAD_DATA = 60
STATE_DEVICE_CLOSE = 70
state = STATE_CHECK_IDLE
while state != STATE_DONE: # --------------------------------- Log upload state machine
if state == STATE_ABORT:
pass
elif state == STATE_SUCCESS:
pass
elif state == STATE_BUSY:
pass
elif state == STATE_DEVICE_OPEN: # --------------------------------- Open device (28)
state = STATE_REQUEST_START
try:
self.dev.open()
except Error as e:
log.error("Unable to open device (%s)." % e.msg)
state = STATE_ERROR
else:
try:
dev.setPML(pml.OID_UPLOAD_TIMEOUT, pml.DEFAULT_UPLOAD_TIMEOUT)
except Error:
state = STATE_ERROR
elif state == STATE_CHECK_IDLE: # --------------------------------- Check idle (30)
state = STATE_REQUEST_START
ul_state = self.getCfgUploadState()
if ul_state != pml.UPDN_STATE_IDLE:
state = STATE_BUSY
elif state == STATE_REQUEST_START: # --------------------------------- Request start (40)
state = STATE_WAIT_FOR_ACTIVE
self.dev.setPML(pml.OID_FAX_CFG_UPLOAD_DATA_TYPE, pml.FAX_CFG_UPLOAD_DATA_TYPE_FAXLOGS)
self.dev.setPML(pml.OID_DEVICE_CFG_UPLOAD, pml.UPDN_STATE_REQSTART)
elif state == STATE_WAIT_FOR_ACTIVE: # --------------------------------- Wait for active state (50)
state = STATE_UPLOAD_DATA
tries = 0
while True:
tries += 1
ul_state = self.getCfgUploadState()
if ul_state == pml.UPDN_STATE_XFERACTIVE:
break
if ul_state in (pml.UPDN_STATE_ERRORABORT, pml.UPDN_STATE_XFERDONE):
log.error("Cfg upload aborted!")
state = STATE_ERROR
break
if tries > 10:
state = STATE_ERROR
log.error("Unable to get into active state!")
break
time.sleep(0.5)
elif state == STATE_UPLOAD_DATA: # --------------------------------- Upload log data (60)
pass
elif state == STATE_DEVICE_CLOSE: # --------------------------------- Close device (70)
self.dev.close()
# **************************************************************************** #
class PMLFaxSendThread(FaxSendThread):
def __init__(self, dev, service, phone_num_list, fax_file_list,
cover_message='', cover_re='', cover_func=None, preserve_formatting=False,
printer_name='', update_queue=None, event_queue=None):
FaxSendThread.__init__(self, dev, service, phone_num_list, fax_file_list,
cover_message, cover_re, cover_func, preserve_formatting,
printer_name, update_queue, event_queue)
def run(self):
#results = {} # {'file' : error_code,...}
STATE_DONE = 0
STATE_ABORTED = 10
STATE_SUCCESS = 20
STATE_BUSY = 25
STATE_READ_SENDER_INFO = 30
STATE_PRERENDER = 40
STATE_COUNT_PAGES = 50
STATE_NEXT_RECIPIENT = 60
STATE_COVER_PAGE = 70
STATE_SINGLE_FILE = 80
STATE_MERGE_FILES = 90
STATE_SINGLE_FILE = 100
STATE_SEND_FAX = 110
STATE_CLEANUP = 120
STATE_ERROR = 130
next_recipient = self.next_recipient_gen()
state = STATE_READ_SENDER_INFO
self.rendered_file_list = []
while state != STATE_DONE: # --------------------------------- Fax state machine
if self.check_for_cancel():
state = STATE_ABORTED
log.debug("STATE=(%d, 0, 0)" % state)
if state == STATE_ABORTED: # --------------------------------- Aborted (10, 0, 0)
log.error("Aborted by user.")
self.write_queue((STATUS_IDLE, 0, ''))
state = STATE_CLEANUP
elif state == STATE_SUCCESS: # --------------------------------- Success (20, 0, 0)
log.debug("Success.")
self.write_queue((STATUS_COMPLETED, 0, ''))
state = STATE_CLEANUP
elif state == STATE_ERROR: # --------------------------------- Error (130, 0, 0)
log.error("Error, aborting.")
self.write_queue((STATUS_ERROR, 0, ''))
state = STATE_CLEANUP
elif state == STATE_BUSY: # --------------------------------- Busy (25, 0, 0)
log.error("Device busy, aborting.")
self.write_queue((STATUS_BUSY, 0, ''))
state = STATE_CLEANUP
elif state == STATE_READ_SENDER_INFO: # --------------------------------- Get sender info (30, 0, 0)
log.debug("%s State: Get sender info" % ("*"*20))
state = STATE_PRERENDER
try:
try:
self.dev.open()
except Error as e:
log.error("Unable to open device (%s)." % e.msg)
state = STATE_ERROR
else:
try:
self.sender_name = self.dev.station_name
log.debug("Sender name=%s" % self.sender_name)
self.sender_fax = self.dev.phone_num
log.debug("Sender fax=%s" % self.sender_fax)
except Error:
log.error("PML get failed!")
state = STATE_ERROR
finally:
self.dev.close()
elif state == STATE_PRERENDER: # --------------------------------- Pre-render non-G3 files (40, 0, 0)
log.debug("%s State: Pre-render non-G3 files" % ("*"*20))
state = self.pre_render(STATE_COUNT_PAGES)
elif state == STATE_COUNT_PAGES: # --------------------------------- Get total page count (50, 0, 0)
log.debug("%s State: Get total page count" % ("*"*20))
state = self.count_pages(STATE_NEXT_RECIPIENT)
elif state == STATE_NEXT_RECIPIENT: # --------------------------------- Loop for multiple recipients (60, 0, 0)
log.debug("%s State: Next recipient" % ("*"*20))
state = STATE_COVER_PAGE
try:
recipient = next(next_recipient)
#print recipient
log.debug("Processing for recipient %s" % recipient['name'])
self.write_queue((STATUS_SENDING_TO_RECIPIENT, 0, recipient['name']))
except StopIteration:
state = STATE_SUCCESS
log.debug("Last recipient.")
continue
self.recipient_file_list = self.rendered_file_list[:]
elif state == STATE_COVER_PAGE: # --------------------------------- Create cover page (70, 0, 0)
log.debug("%s State: Render cover page" % ("*"*20))
state = self.cover_page(recipient)
elif state == STATE_SINGLE_FILE: # --------------------------------- Special case for single file (no merge) (80, 0, 0)
log.debug("%s State: Handle single file" % ("*"*20))
state = self.single_file(STATE_SEND_FAX)
elif state == STATE_MERGE_FILES: # --------------------------------- Merge multiple G3 files (90, 0, 0)
log.debug("%s State: Merge multiple files" % ("*"*20))
state = self.merge_files(STATE_SEND_FAX)
elif state == STATE_SEND_FAX: # --------------------------------- Send fax state machine (110, 0, 0)
log.debug("%s State: Send fax" % ("*"*20))
state = STATE_NEXT_RECIPIENT
FAX_SEND_STATE_DONE = 0
FAX_SEND_STATE_ABORT = 10
FAX_SEND_STATE_ERROR = 20
FAX_SEND_STATE_BUSY = 25
FAX_SEND_STATE_SUCCESS = 30
FAX_SEND_STATE_DEVICE_OPEN = 40
FAX_SEND_STATE_SET_TOKEN = 50
FAX_SEND_STATE_EARLY_OPEN = 60
FAX_SEND_STATE_SET_PARAMS = 70
FAX_SEND_STATE_CHECK_IDLE = 80
FAX_SEND_STATE_START_REQUEST = 90
FAX_SEND_STATE_LATE_OPEN = 100
FAX_SEND_STATE_SEND_DIAL_STRINGS = 110
FAX_SEND_STATE_SEND_FAX_HEADER = 120
FAX_SEND_STATE_SEND_PAGES = 130
FAX_SEND_STATE_SEND_END_OF_STREAM = 140
FAX_SEND_STATE_WAIT_FOR_COMPLETE = 150
FAX_SEND_STATE_RESET_TOKEN = 160
FAX_SEND_STATE_CLOSE_SESSION = 170
monitor_state = False
error_state = pml.DN_ERROR_NONE
fax_send_state = FAX_SEND_STATE_DEVICE_OPEN
while fax_send_state != FAX_SEND_STATE_DONE:
if self.check_for_cancel():
log.error("Fax send aborted.")
fax_send_state = FAX_SEND_STATE_ABORT
if monitor_state:
fax_state = self.getFaxDownloadState()
if not fax_state in (pml.UPDN_STATE_XFERACTIVE, pml.UPDN_STATE_XFERDONE):
log.error("D/L error state=%d" % fax_state)
fax_send_state = FAX_SEND_STATE_ERROR
state = STATE_ERROR
log.debug("STATE=(%d, %d, 0)" % (STATE_SEND_FAX, fax_send_state))
if fax_send_state == FAX_SEND_STATE_ABORT: # -------------- Abort (110, 10, 0)
# TODO: Set D/L state | |
<reponame>cxiang26/mygluon_cv<gh_stars>10-100
# -*- coding: utf-8 -*-
"""Fully Convolutional One-Stage Object Detection."""
from __future__ import absolute_import
import os
import warnings
import numpy as np
import mxnet as mx
from mxnet import autograd
from mxnet.gluon import nn, HybridBlock
from ...nn.bbox import BBoxClipToImage
# from IPython import embed
from .fcos_target import FCOSBoxConverter
from ...nn.feature import RetinaFeatureExpander
from ...nn.protomask import Protonet
__all__ = ['MaskFCOS', 'get_maskfcos',
'maskfcos_resnet50_v1_740_coco',
'maskfcos_resnet50_v1b_740_coco',
'maskfcos_resnet101_v1d_740_coco',
'maskfcos_resnet50_v1_1024_coco']
class GroupNorm(nn.HybridBlock):
"""
If the batch size is small, it's better to use GroupNorm instead of BatchNorm.
GroupNorm achieves good results even at small batch sizes.
Reference:
https://arxiv.org/pdf/1803.08494.pdf
"""
def __init__(self, num_channels, num_groups=32, eps=1e-5,
multi_precision=False, prefix=None, **kwargs):
super(GroupNorm, self).__init__(**kwargs)
with self.name_scope():
self.weight = self.params.get('{}_weight'.format(prefix), grad_req='write',
shape=(1, num_channels, 1, 1))
self.bias = self.params.get('{}_bias'.format(prefix), grad_req='write',
shape=(1, num_channels, 1, 1))
self.C = num_channels
self.G = num_groups
self.eps = eps
self.multi_precision = multi_precision
assert self.C % self.G == 0
def hybrid_forward(self, F, x, weight, bias):
# (N,C,H,W) -> (N,G,H*W*C//G)
x_new = F.reshape(x, (0, self.G, -1))
if self.multi_precision:
# (N,G,H*W*C//G) -> (N,G,1)
mean = F.mean(F.cast(x_new, "float32"), axis=-1, keepdims=True)
mean = F.cast(mean, "float16")
else:
mean = F.mean(x_new, axis=-1, keepdims=True)
# (N,G,H*W*C//G)
centered_x_new = F.broadcast_minus(x_new, mean)
if self.multi_precision:
# (N,G,H*W*C//G) -> (N,G,1)
var = F.mean(F.cast(F.square(centered_x_new), "float32"), axis=-1, keepdims=True)
var = F.cast(var, "float16")
else:
var = F.mean(F.square(centered_x_new), axis=-1, keepdims=True)
# (N,G,H*W*C//G) -> (N,C,H,W)
x_new = F.broadcast_div(centered_x_new, F.sqrt(var + self.eps)).reshape_like(x)
x_new = F.broadcast_add(F.broadcast_mul(x_new, weight), bias)
return x_new
class ConvPredictor(nn.HybridBlock):
def __init__(self, num_channels, share_params=None, bias_init=None, **kwargs):
super(ConvPredictor, self).__init__(**kwargs)
with self.name_scope():
if share_params is not None:
self.conv = nn.Conv2D(num_channels, 3, 1, 1, params=share_params,
bias_initializer=bias_init)
else:
self.conv = nn.Conv2D(num_channels, 3, 1, 1,
weight_initializer=mx.init.Normal(sigma=0.01),
bias_initializer=bias_init)
def get_params(self):
return self.conv.params
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class RetinaHead(nn.HybridBlock):
def __init__(self, share_params=None, prefix=None, **kwargs):
super(RetinaHead, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.HybridSequential()
for i in range(4):
if share_params is not None:
self.conv.add(nn.Conv2D(256, 3, 1, 1, activation='relu',
params=share_params[i]))
else:
self.conv.add(nn.Conv2D(256, 3, 1, 1, activation='relu',
weight_initializer=mx.init.Normal(sigma=0.01),
bias_initializer='zeros'))
self.conv.add(GroupNorm(num_channels=256, prefix=prefix))
def set_params(self, newconv):
for b, nb in zip(self.conv, newconv):
b.weight.set_data(nb.weight.data())
b.bias.set_data(nb.bias.data())
def get_params(self):
param_list = []
for opr in self.conv:
param_list.append(opr.params)
return param_list
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
@mx.init.register
class ClsBiasInit(mx.init.Initializer):
def __init__(self, num_class, cls_method="sigmoid", pi=0.01, **kwargs):
super(ClsBiasInit, self).__init__(**kwargs)
self._num_class = num_class
self._cls_method = cls_method
self._pi = pi
def _init_weight(self, name, data):
if self._cls_method == "sigmoid":
arr = -1 * np.ones((data.size, ))
arr = arr * np.log((1 - self._pi) / self._pi)
data[:] = arr
elif self._cls_method == "softmax":
pass
def cor_target(short, scale):
h, w, = short, short
fh = int(np.ceil(np.ceil(np.ceil(h / 2) / 2) / 2))
fw = int(np.ceil(np.ceil(np.ceil(w / 2) / 2) / 2))
#
fm_list = []
for _ in range(len(scale)):
fm_list.append((fh, fw))
fh = int(np.ceil(fh / 2))
fw = int(np.ceil(fw / 2))
fm_list = fm_list[::-1]
#
cor_targets = []
for i, stride in enumerate(scale):
fh, fw = fm_list[i]
cx = mx.nd.arange(0, fw).reshape((1, -1))
cy = mx.nd.arange(0, fh).reshape((-1, 1))
sx = mx.nd.tile(cx, reps=(fh, 1))
sy = mx.nd.tile(cy, reps=(1, fw))
syx = mx.nd.stack(sy.reshape(-1), sx.reshape(-1)).transpose()
cor_targets.append(mx.nd.flip(syx, axis=1) * stride)
cor_targets = mx.nd.concat(*cor_targets, dim=0)
return cor_targets
class MaskFCOS(HybridBlock):
def __init__(self, network, features, classes, steps=None, short=600,
valid_range=[(512, np.inf), (256, 512), (128, 256), (64, 128), (0, 64)],
num_prototypes=32,
use_p6_p7=True, pretrained_base=False,
nms_thresh=0.45, nms_topk=400, post_nms=100, share_params = False, **kwargs):
super(MaskFCOS, self).__init__(**kwargs)
num_layers = len(features) + int(use_p6_p7)*2
self._num_layers = num_layers
self.classes = classes
self.nms_thresh = nms_thresh
self.nms_topk = nms_topk
self.post_nms = post_nms
self.share_params = share_params
self._scale = steps[::-1]
self.short = short
self.base_stride = steps[-1]
self.valid_range = valid_range
self.k = num_prototypes
# input size are solid
self.cor_targets = self.params.get_constant(name='cor_', value=cor_target(self.short, self._scale))
with self.name_scope():
bias_init = ClsBiasInit(len(self.classes))
self.box_converter = FCOSBoxConverter()
self.cliper = BBoxClipToImage()
self.features = RetinaFeatureExpander(network=network, pretrained=pretrained_base, outputs=features)
self.protonet = Protonet([256, 256, 256, 256, self.k])
self.classes_head = nn.HybridSequential()
self.box_head = nn.HybridSequential()
self.class_predictors = nn.HybridSequential()
self.box_predictors = nn.HybridSequential()
# self.center_predictors = nn.HybridSequential()
self.maskcoe_predictors = nn.HybridSequential()
share_cls_params, share_box_params = None, None
share_cls_pred_params, share_ctr_pred_params, \
share_box_pred_params, share_mask_pred_params = None, None, None, None
for i in range(self._num_layers):
# classes
cls_head = RetinaHead(share_params=share_cls_params, prefix='cls_{}'.format(i))
self.classes_head.add(cls_head)
share_cls_params = cls_head.get_params()
# bbox
box_head = RetinaHead(share_params=share_box_params, prefix='box_{}'.format(i))
self.box_head.add(box_head)
share_box_params = box_head.get_params()
# classes preds
cls_pred = ConvPredictor(num_channels=len(self.classes),
share_params=share_cls_pred_params, bias_init=bias_init)
self.class_predictors.add(cls_pred)
share_cls_pred_params = cls_pred.get_params()
# center preds
# center_pred = ConvPredictor(num_channels=1,
# share_params=share_ctr_pred_params, bias_init='zeros')
# self.center_predictors.add(center_pred)
# share_ctr_pred_params = center_pred.get_params()
# mask coefficient preds
maskcoe_pred = ConvPredictor(num_channels=self.k,
share_params=share_mask_pred_params, bias_init='zeros')
self.maskcoe_predictors.add(maskcoe_pred)
share_mask_pred_params = maskcoe_pred.get_params()
# bbox_pred
bbox_pred = ConvPredictor(num_channels=4,
share_params=share_box_pred_params, bias_init='zeros')
self.box_predictors.add(bbox_pred)
share_box_pred_params = bbox_pred.get_params()
if not self.share_params:
share_cls_params, share_box_params = None, None
share_cls_pred_params, share_ctr_pred_params, \
share_box_pred_params, share_mask_pred_params = None, None, None, None
# trainable scales
# self.s1 = self.params.get('scale_p1', shape=(1,), differentiable=True,
# allow_deferred_init=True, init='ones')
# self.s2 = self.params.get('scale_p2', shape=(1,), differentiable=True,
# allow_deferred_init=True, init='ones')
# self.s3 = self.params.get('scale_p3', shape=(1,), differentiable=True,
# allow_deferred_init=True, init='ones')
# self.s4 = self.params.get('scale_p4', shape=(1,), differentiable=True,
# allow_deferred_init=True, init='ones')
# self.s5 = self.params.get('scale_p5', shape=(1,), differentiable=True,
# allow_deferred_init=True, init='ones')
@property
def num_classes(self):
"""Return number of foreground classes.
Returns
-------
int
Number of foreground classes
"""
return len(self.classes)
# pylint: disable=arguments-differ
def hybrid_forward(self, F, x, cor_targets):
"""Hybrid forward"""
features = self.features(x)
masks = self.protonet(features[-1])
masks = F.relu(masks)
cls_head_feat = [cp(feat) for feat, cp in zip(features, self.classes_head)]
box_head_feat = [cp(feat) for feat, cp in zip(features, self.box_head)]
cls_preds = [F.transpose(F.reshape(cp(feat), (0, 0, -1)), (0, 2, 1))
for feat, cp in zip(cls_head_feat, self.class_predictors)]
# center_preds = [F.transpose(F.reshape(bp(feat), (0, 0, -1)), (0, 2, 1))
# for feat, bp in zip(cls_head_feat, self.center_predictors)]
box_preds = [F.transpose(F.exp(F.reshape(bp(feat), (0, 0, -1))), (0, 2, 1)) * sc
for feat, bp, sc in zip(box_head_feat, self.box_predictors, self._scale)]
maskeoc_preds = [F.transpose(F.reshape(bp(feat), (0, 0, -1)), (0, 2, 1))
for feat, bp in zip(box_head_feat, self.maskcoe_predictors)]
cls_preds = F.concat(*cls_preds, dim=1)
# center_preds = F.concat(*center_preds, dim=1)
box_preds = F.concat(*box_preds, dim=1)
maskeoc_preds = F.concat(*maskeoc_preds, dim=1)
maskeoc_preds = F.tanh(maskeoc_preds)
center_preds = F.sum(maskeoc_preds, axis=-1, keepdims=True)
# with autograd.pause():
# h, w, = self.short, self.short
# fh = int(np.ceil(np.ceil(np.ceil(h / 2) / 2) / 2))
# fw = int(np.ceil(np.ceil(np.ceil(w / 2) / 2) / 2))
# #
# fm_list = []
# for _ in range(len(self._scale)):
# fm_list.append((fh, fw))
# fh = int(np.ceil(fh / 2))
# fw = int(np.ceil(fw / 2))
# fm_list = fm_list[::-1]
# #
# cor_targets = []
# for i, stride in enumerate(self._scale):
# fh, fw = fm_list[i]
# cx = F.arange(0, fw).reshape((1, -1))
# cy = F.arange(0, fh).reshape((-1, 1))
# sx = F.tile(cx, reps=(fh, 1))
# sy = F.tile(cy, reps=(1, fw))
# syx = F.stack(sy.reshape(-1), sx.reshape(-1)).transpose()
# cor_targets.append(F.flip(syx, axis=1) * stride)
# cor_targets = F.concat(*cor_targets, dim=0)
box_preds = self.box_converter(box_preds, cor_targets)
if autograd.is_training():
return [cls_preds, center_preds, box_preds, masks, maskeoc_preds]
box_preds = self.cliper(box_preds, x) # box_preds: [B, N, 4]
cls_prob = F.sigmoid(cls_preds)
center_prob = F.sigmoid(center_preds)
cls_prob = F.broadcast_mul(cls_prob, center_prob)
cls_id = cls_prob.argmax(axis=-1)
probs = F.pick(cls_prob, cls_id)
result = F.concat(cls_id.expand_dims(axis=-1), probs.expand_dims(axis=-1), box_preds, maskeoc_preds, dim=-1)
if self.nms_thresh > 0 and self.nms_thresh < 1:
result = F.contrib.box_nms(result, overlap_thresh=self.nms_thresh,
topk=self.nms_topk, valid_thresh=0.001,
id_index=0, score_index=1, coord_start=2, force_suppress=False,
in_format='corner', out_format='corner')
if self.post_nms > 0:
result = result.slice_axis(axis=1, begin=0, end=self.post_nms)
ids = F.slice_axis(result, axis=2, begin=0, end=1)
scores = F.slice_axis(result, axis=2, begin=1, end=2)
bboxes = F.slice_axis(result, axis=2, begin=2, end=6)
maskeoc = F.slice_axis(result, axis=2, begin=6, end=6 + self.k)
return ids, scores, bboxes, maskeoc, masks
def get_maskfcos(name, dataset, pretrained=False, ctx=mx.cpu(),
root=os.path.join('~', '.mxnet', 'models'), **kwargs):
"return FCOS network"
base_name = name
net = MaskFCOS(base_name, **kwargs)
if pretrained:
from ..model_store import get_model_file
full_name = '_'.join(('fcos', name, dataset))
net.load_parameters(get_model_file(full_name, tag=pretrained, root=root), ctx=ctx)
return net
# def fcos_resnet50_v1_coco(pretrained=False, pretrained_base=True, **kwargs):
# from ..resnet import resnet50_v1
# from ...data import COCODetection
# classes = COCODetection.CLASSES
# pretrained_base = False if pretrained else pretrained_base
# base_network = resnet50_v1(pretrained=pretrained_base, **kwargs)
# features = RetinaFeatureExpander(network=base_network,
# pretrained=pretrained_base,
# outputs=['stage2_activation3',
# 'stage3_activation5',
# 'stage4_activation2'])
# return get_fcos(name="resnet50_v1", dataset="coco", pretrained=pretrained,
# features=features, classes=classes, base_stride=128, short=800,
# max_size=1333, norm_layer=None, norm_kwargs=None,
# valid_range=[(512, np.inf), (256, 512), (128, 256), (64, 128), (0, 64)],
# nms_thresh=0.6, nms_topk=1000, save_topk=100)
def maskfcos_resnet50_v1_740_coco(pretrained=False, pretrained_base=True, **kwargs):
from ...data import COCOInstance
classes = COCOInstance.CLASSES
return get_maskfcos('resnet50_v1',
features=['stage2_activation3', 'stage3_activation5', 'stage4_activation2'],
classes=classes,
steps=[8, 16, 32, 64, 128],
short = 740,
valid_range=[(512, np.inf), (256, 512), (128, 256), (64, 128), (0, 64)],
nms_thresh=0.45, nms_topk=1000, post_nms=100,
dataset='coco', pretrained=pretrained,
num_prototypes=32,
pretrained_base=pretrained_base, share_params = True, **kwargs)
def maskfcos_resnet50_v1b_740_coco(pretrained=False, pretrained_base=True, **kwargs):
from ...data import COCOInstance
classes = COCOInstance.CLASSES
return get_maskfcos('resnet50_v1d',
features=['layers2_relu11_fwd', 'layers3_relu17_fwd', 'layers4_relu8_fwd'],
classes=classes,
steps=[8, 16, 32, 64, 128],
short = 740,
valid_range=[(512, np.inf), (256, 512), (128, 256), (64, 128), | |
the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals())
result = await self.request(api.Methods.SEND_CONTACT, payload)
return types.Message(**result)
async def send_chat_action(self, chat_id: typing.Union[base.Integer, base.String],
action: base.String) -> base.Boolean:
"""
Use this method when you need to tell the user that something is happening on the bot's side.
The status is set for 5 seconds or less
(when a message arrives from your bot, Telegram clients clear its typing status).
We only recommend using this method when a response from the bot will take
a noticeable amount of time to arrive.
Source: https://core.telegram.org/bots/api#sendchataction
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param action: Type of action to broadcast.
:type action: :obj:`base.String`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SEND_CHAT_ACTION, payload)
return result
async def get_user_profile_photos(self, user_id: base.Integer, offset: typing.Union[base.Integer, None] = None,
limit: typing.Union[base.Integer, None] = None) -> types.UserProfilePhotos:
"""
Use this method to get a list of profile pictures for a user. Returns a UserProfilePhotos object.
Source: https://core.telegram.org/bots/api#getuserprofilephotos
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param offset: Sequential number of the first photo to be returned. By default, all photos are returned.
:type offset: :obj:`typing.Union[base.Integer, None]`
:param limit: Limits the number of photos to be retrieved. Values between 1—100 are accepted. Defaults to 100.
:type limit: :obj:`typing.Union[base.Integer, None]`
:return: Returns a UserProfilePhotos object.
:rtype: :obj:`types.UserProfilePhotos`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_USER_PROFILE_PHOTOS, payload)
return types.UserProfilePhotos(**result)
async def get_file(self, file_id: base.String) -> types.File:
"""
Use this method to get basic info about a file and prepare it for downloading.
For the moment, bots can download files of up to 20MB in size.
Note: This function may not preserve the original file name and MIME type.
You should save the file's MIME type and name (if available) when the File object is received.
Source: https://core.telegram.org/bots/api#getfile
:param file_id: File identifier to get info about
:type file_id: :obj:`base.String`
:return: On success, a File object is returned.
:rtype: :obj:`types.File`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_FILE, payload)
return types.File(**result)
async def kick_chat_member(self, chat_id: typing.Union[base.Integer, base.String], user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None) -> base.Boolean:
"""
Use this method to kick a user from a group, a supergroup or a channel.
In the case of supergroups and channels, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’ setting
is off in the target group.
Otherwise members may only be removed by the group's creator or by the member that added them.
Source: https://core.telegram.org/bots/api#kickchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when the user will be unbanned, unix time.
:type until_date: :obj:`typing.Union[base.Integer, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
until_date = prepare_arg(until_date)
payload = generate_payload(**locals())
result = await self.request(api.Methods.KICK_CHAT_MEMBER, payload)
return result
async def unban_chat_member(self, chat_id: typing.Union[base.Integer, base.String],
user_id: base.Integer) -> base.Boolean:
"""
Use this method to unban a previously kicked user in a supergroup or channel. `
The user will not return to the group or channel automatically, but will be able to join via link, etc.
The bot must be an administrator for this to work.
Source: https://core.telegram.org/bots/api#unbanchatmember
:param chat_id: Unique identifier for the target group or username of the target supergroup or channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.UNBAN_CHAT_MEMBER, payload)
return result
async def restrict_chat_member(self, chat_id: typing.Union[base.Integer, base.String],
user_id: base.Integer,
until_date: typing.Union[base.Integer, None] = None,
can_send_messages: typing.Union[base.Boolean, None] = None,
can_send_media_messages: typing.Union[base.Boolean, None] = None,
can_send_other_messages: typing.Union[base.Boolean, None] = None,
can_add_web_page_previews: typing.Union[base.Boolean, None] = None) -> base.Boolean:
"""
Use this method to restrict a user in a supergroup.
The bot must be an administrator in the supergroup for this to work and must have the appropriate admin rights.
Pass True for all boolean parameters to lift restrictions from a user.
Source: https://core.telegram.org/bots/api#restrictchatmember
:param chat_id: Unique identifier for the target chat or username of the target supergroup
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param until_date: Date when restrictions will be lifted for the user, unix time.
:type until_date: :obj:`typing.Union[base.Integer, None]`
:param can_send_messages: Pass True, if the user can send text messages, contacts, locations and venues
:type can_send_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_send_media_messages: Pass True, if the user can send audios, documents, photos, videos,
video notes and voice notes, implies can_send_messages
:type can_send_media_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_send_other_messages: Pass True, if the user can send animations, games, stickers and
use inline bots, implies can_send_media_messages
:type can_send_other_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_add_web_page_previews: Pass True, if the user may add web page previews to their messages,
implies can_send_media_messages
:type can_add_web_page_previews: :obj:`typing.Union[base.Boolean, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
until_date = prepare_arg(until_date)
payload = generate_payload(**locals())
result = await self.request(api.Methods.RESTRICT_CHAT_MEMBER, payload)
return result
async def promote_chat_member(self, chat_id: typing.Union[base.Integer, base.String],
user_id: base.Integer,
can_change_info: typing.Union[base.Boolean, None] = None,
can_post_messages: typing.Union[base.Boolean, None] = None,
can_edit_messages: typing.Union[base.Boolean, None] = None,
can_delete_messages: typing.Union[base.Boolean, None] = None,
can_invite_users: typing.Union[base.Boolean, None] = None,
can_restrict_members: typing.Union[base.Boolean, None] = None,
can_pin_messages: typing.Union[base.Boolean, None] = None,
can_promote_members: typing.Union[base.Boolean, None] = None) -> base.Boolean:
"""
Use this method to promote or demote a user in a supergroup or a channel.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Pass False for all boolean parameters to demote a user.
Source: https://core.telegram.org/bots/api#promotechatmember
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param user_id: Unique identifier of the target user
:type user_id: :obj:`base.Integer`
:param can_change_info: Pass True, if the administrator can change chat title, photo and other settings
:type can_change_info: :obj:`typing.Union[base.Boolean, None]`
:param can_post_messages: Pass True, if the administrator can create channel posts, channels only
:type can_post_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_edit_messages: Pass True, if the administrator can edit messages of other users, channels only
:type can_edit_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_delete_messages: Pass True, if the administrator can delete messages of other users
:type can_delete_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_invite_users: Pass True, if the administrator can invite new users to the chat
:type can_invite_users: :obj:`typing.Union[base.Boolean, None]`
:param can_restrict_members: Pass True, if the administrator can restrict, ban or unban chat members
:type can_restrict_members: :obj:`typing.Union[base.Boolean, None]`
:param can_pin_messages: Pass True, if the administrator can pin messages, supergroups only
:type can_pin_messages: :obj:`typing.Union[base.Boolean, None]`
:param can_promote_members: Pass True, if the administrator can add new administrators
with a subset of his own privileges or demote administrators that he has promoted,
directly or indirectly (promoted by administrators that were appointed by him)
:type can_promote_members: :obj:`typing.Union[base.Boolean, None]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.PROMOTE_CHAT_MEMBER, payload)
return result
async def export_chat_invite_link(self, chat_id: typing.Union[base.Integer, base.String]) -> base.String:
"""
Use this method to generate a new invite link for a chat; any previously generated link is revoked.
The bot must be an administrator in the chat for this to work and must have the appropriate admin | |
pos in self.points:
if dim in self.points[pos]:
positions.append(pos)
break
if len(positions) != len(dims):
raise Exception("""Could not get an appropriate grip point position for the given dimensions:
%s """ % (dims,))
return positions
else:
position = None
for pos in self.points:
if dims in self.points[pos]:
position = pos
break
if position is None:
raise Exception("""Dimension does not match any know grip point position: %s """ % (dims,))
return position
def _get_missmatch(self, da, pos):
"""
Returns the axes-name where a missmatch between the variable and a position is found.
The axes-names are hard coded to 'X', 'Y', or 'Z'. Has to match the name given for the xgcm metric!
:param da: data variable
:param pos: position where it is compared to ('T','U','V','F','W','UW','VW','FW')
:return: List of axes-names of missmatches
"""
# Get dimensions, if temporal dimension cut it from dims to get spatial dims only
dims = da.dims
if 't' in dims:
ind = dims.index('t')
dims = dims[:ind] + dims[ind + 1:]
# Get the expected dimension with corresponding number of axes
ax_num = [len(l) for l in self.points[pos]].index(len(dims))
expect = self.points[pos][ax_num]
# Add physical dimensions where a missmatch is found
missmatch = []
for ax in range(len(expect)):
if dims[ax] == expect[ax]:
pass
else:
ax_miss = 'X' if dims[ax][0] == 'x' else \
'Y' if dims[ax][0] == 'y' else \
'Z' if dims[ax][0] == 'z' else None
if ax_miss == None:
raise Exception("""Axis %s does not match to any known dimensions""" % (da.dims[ax]))
missmatch.append(ax_miss)
return missmatch
def _shift_position(self, da, output_position, elements=None, bd='interp', bd_value=None, bd_scaling=1, **kwargs):
"""
Returns the variable interpolated to a prescribed output position.
:param da: data variable or vector to shift
:param output_position: desired output position
:param elements: elements to include in the shift if a data vector is given, List(Boolean, ..)
:return: interpolated data variable or vector onto the output_position
"""
# If da is a list (vector), interpolate every element in da along the axes to match the output_position
# If elements is given only indices where True is given are included. Default is True for every element
if type(da) == list or type(da) == np.ndarray:
if elements is None:
elements = [True] * len(da)
if type(output_position) is str:
output_position = [output_position] * len(da)
da_out = []
for i in range(len(da)):
if elements[i]:
element = da[i]
pos = output_position[i]
if self._matching_pos(element, pos):
da_out.append(element)
else:
missmatch = self._get_missmatch(element, pos)
if bd == 'interp':
da_out_i = self.interp(da[i].fillna(0), axis=missmatch, **kwargs)
da_out_i *= self.nan_mask(da_out_i)
elif bd == 'fill':
da_out_i = self.interp(da[i], axis=missmatch, **kwargs)
shift_mask = self._get_shift_mask(da[i], fill_value=bd_value, scaling=bd_scaling)
da_out_i = da_out_i.copy(data=np.nan_to_num(da_out_i,0) + shift_mask.values)
da_out_i *= self.nan_mask(da_out_i)
else:
raise Exception('Unknown handling of boundary values. Please use "interp" or "fill".')
da_out.append(da_out_i)
else:
da_out.append(da[i])
# Else interpolate the data variable along the axes to match the output_position
else:
if self._matching_pos(da, output_position):
da_out = da
else:
missmatch = self._get_missmatch(da, output_position)
if bd == 'interp':
da_out = self.interp(da.fillna(0),axis=missmatch, **kwargs)
da_out *= self.nan_mask(da_out)
elif bd == 'fill':
da_out = self.interp(da, axis=missmatch, **kwargs)
shift_mask = self._get_shift_mask(da, fill_value=bd_value, scaling=bd_scaling)
da_out = da_out.copy(data=np.nan_to_num(da_out, 0) + shift_mask.values)
da_out *= self.nan_mask(da_out)
else:
raise Exception('Unknown handling of boundary values. Please use "interp" or "fill".')
return da_out
def _matching_pos(self, da, pos, skip=None):
"""
Checks if the given data is on the indicated position
:param da: data variable or vector to test
:param pos: position to test for
:return: True, False or List(True/False, ...)
"""
if type(da) == list:
if skip != None:
da = self._data_skip(da, skip)
a_pos = self._get_dims(da)
match = [any([expect == element for expect in self.points[pos]]) for element in a_pos]
if all(match):
return True
else:
return match # raise Exception("""False elements %s do not match position %s""" % (match, pos))
else:
a_pos = self._get_dims(da)
if a_pos in self.points[pos]:
return True
else:
return False # raise Exception("""The variable does not match position %s""" %pos)
def _matching_dim(self, da1, da2, skip1=None, skip2=None):
"""
Checks if the dimension of data variable da1 matches the dimension of data variable da2.
:param da: data variable or vector to test
:param pos: position to test for
:return: True, False or List(True/False, ...)
"""
# Get dimensions and cut 't' and skip if given
if skip1 != None:
da1 = self._data_skip(da1, skip1)
if skip2 != None:
da2 = self._data_skip(da2, skip2)
da1_dims = self._get_dims(da1)
da2_dims = self._get_dims(da2)
if 't' in da1_dims:
ind = da1_dims.index('t')
da1_dims = da1_dims[:ind] + da1_dims[ind + 1:]
if 't' in da2_dims:
ind = da2_dims.index('t')
da2_dims = da2_dims[:ind] + da2_dims[ind + 1:]
# Depending on type, make comparison
if type(da1) is list and type(da2) is xr.DataArray:
match = [da2_dims == da1_pos for da1_pos in da1_dims]
if all(match):
return True
else:
return match # raise Exception("""False elements %s do not match position %s""" % (match, pos))
elif type(da2) is list and type(da1) is xr.DataArray:
match = [da1_dims == da2_pos for da2_pos in da2_dims]
if all(match):
return True
else:
return match
elif type(da1) is list and type(da2) is list:
if len(da1) == len(da2):
if da1_dims == da2_dims:
return True
else:
return False
else:
raise Exception("""Data variable 1 and Data variable 2 do not have the same length""")
elif type(da1) is xr.DataArray and type(da2) is xr.DataArray:
if da1_dims == da2_dims:
return True
else:
return False
def _get_mask(self, da, **maskargs):
if 'mask' in maskargs:
if maskargs['mask'] is None:
mask = 1
elif maskargs['mask'] == 'nan':
mask = self.nan_mask(da)
elif maskargs['mask'] == 'zero':
mask = self.zero_mask(da)
elif maskargs['mask'] == 'boundary':
if 'axes' in maskargs and 'index' in maskargs:
mask = self.boundary_mask(da, maskargs['axes'], maskargs['index'])
else:
raise Exception('Please provide axes and index as kwarg if boundary-mask is used!')
elif maskargs['mask'] == 'usr_def':
if 'mask_values' in maskargs:
mask = maskargs['mask_values']
else:
raise Exception('Please provide the mask in the kwarg mask_values if usr_def is used!')
elif maskargs['mask'] == 'mld':
if 'mld' in maskargs:
if 'invert' in maskargs:
mask = self.mld_mask(da, maskargs['mld'], maskargs['invert'])
else:
mask = self.mld_mask(da, maskargs['mld'])
else:
raise Exception('Please provide the mld as kwarg mld if mld-mask is used!')
else:
raise Exception('Unknown mask type')
else:
mask=1
return mask
def boundary_mask(self, da, axes, index):
"""
Return a mask of the data array where the boundaries along an axis are set to nan.
"""
dims = self.grid._get_dims_from_axis(da, axes)
mask_axes = [np.ones(da.sizes[ax]) for ax in dims]
if len(dims) > 1:
for ax in range(len(dims)):
for i in index[ax]:
mask_axes[ax][i] = np.nan
mask_value = np.prod(np.meshgrid(*mask_axes, indexing='ij'), axis=0)
else:
mask_value = mask_axes[0]
for i in index:
mask_value[i] = np.nan
mask = xr.DataArray(mask_value, coords={dims[i]: da[dims[i]].values for i in range(len(dims))}, dims=dims)
return mask
def zero_mask(self, da):
"""
Create a mask of a data variable with 0 where the data is 0 or nan and 1 everywhere else.
"""
da_one = da * 0 + 1
da_one = da_one.where(np.isfinite(da), other=0)
da_one = da_one.where(da != 0, other=0)
mask = xr.DataArray(da_one.values, coords={dim: da[dim].values for dim in da.dims}, dims=da.dims)
return mask
def nan_mask(self, da):
"""
Create a mask of a data variable with nan where the data is 0 and 1 everywhere else.
"""
da_one = da * 0 + 1
da_one = da_one.where(da != 0)
mask = xr.DataArray(da_one.values, coords={dim: da[dim].values for dim in da.dims}, dims=da.dims)
return mask
def mld_mask(self, da, mld, invert=False):
"""
:param da: Dataarray to get metrics from
:param mld: Mixed-layer-depth values
:param invert: If True, the mixed layer is masked to return below mld values only
:return: nan-mask for values below the mixed layer depth
"""
mask = da.copy(data=da.values * 0 + 1).rename('mld_mask')
dim_z = self.grid._get_dims_from_axis(mask,'Z')[0]
if not invert:
if np.all([d in mld.dims for d in da.dims if d!=dim_z]):
mask = mask.where((mask[dim_z]<mld).transpose(*mask.dims))
else:
dim_order = []
if 't' in mld.dims:
dim_order.append('t')
for i in range(1,len(mask.dims)):
if mask.dims[i] in mld.dims or mask.dims[i]==dim_z:
dim_order.append(mask.dims[i])
mask = mask.where((mask[dim_z] < mld).transpose(*tuple(dim_order)))
elif invert:
if np.all([d in mld.dims for d in da.dims if d!=dim_z]):
mask = mask.where((mask[dim_z]>mld).transpose(*mask.dims))
else:
dim_order = []
if 't' in mld.dims:
dim_order.append('t')
for i in range(1,len(mask.dims)):
if mask.dims[i] in mld.dims or mask.dims[i]==dim_z:
dim_order.append(mask.dims[i])
mask = mask.where((mask[dim_z] | |
#!/usr/bin/python
# Project : diafuzzer
# Copyright (C) 2017 Orange
# All rights reserved.
# This software is distributed under the terms and conditions of the 'BSD 3-Clause'
# license which can be found in the file 'LICENSE' in this package distribution.
from struct import pack, unpack
from cStringIO import StringIO
import time
import re
from pprint import pformat
import Dia
from random import randint
from copy import deepcopy
import sys
import scenario
from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
class IncompleteBuffer(Exception): pass
class MsgInvalidLength(Exception): pass
class AVPInvalidLength(Exception): pass
'''can be triggered at runtime by script'''
class RecvMismatch(Exception): pass
U16_MAX = pow(2,16)-1
U24_MAX = pow(2,24)-1
def pack24(x):
assert(x >= 0 and x <= U24_MAX)
s = pack('!L', x)
return s[1:]
def unpack24(x):
xp = '\x00' + x
return unpack('!L', xp)[0]
assert(pack24(0) == '\x00\x00\x00')
assert(0 == unpack24('\x00\x00\x00'))
def read_exactly(f, n):
b = f.read(n)
if len(b) != n: raise IncompleteBuffer()
return b
def get_matcher(elm):
m = re.match(r'code=(\d+)(?:,vendor=(\d+))?(?:\[(\d+)\])?', elm)
assert(m)
(code, vendor, index) = m.groups()
if index is None:
index = 0
else:
index = int(index, 0)
if vendor is None:
vendor = 0
else:
vendor = int(vendor, 0)
code = int(code, 0)
return lambda x, code=code, vendor=vendor: x.code == code and x.vendor == vendor
def get_filter(elm):
m = re.match(r'code=(\d+)(?:,vendor=(\d+))?(?:\[(\d+)\])?', elm)
assert(m)
(code, vendor, index) = m.groups()
if index is None:
index = 0
else:
index = int(index, 0)
if vendor is None:
vendor = 0
else:
vendor = int(vendor, 0)
code = int(code, 0)
def find_it(elms):
avps = [e for e in elms if e.code == code and e.vendor == vendor]
return avps[index]
return find_it
sys.setrecursionlimit(10000)
class Msg:
def __init__(self, **kwds):
self.version = 1
self.length = None
self.R = False
self.P = False
self.E = False
self.T = False
self.reserved = None
self.code = 0
self.app_id = 0
self.e2e_id = None
self.h2h_id = None
self.avps = []
for k in kwds:
setattr(self, k, kwds[k])
def __repr__(self, offset=0, indent=2):
attrs = {}
for k in ['code', 'app_id', 'e2e_id', 'h2h_id', 'avps']:
attrs[k] = getattr(self, k)
comment = ''
if hasattr(self, 'model'):
comment = self.model.name
if self.R: attrs['R'] = True
if self.P: attrs['P'] = True
if self.E: attrs['E'] = True
if self.T: attrs['T'] = True
if self.version != 1: attrs['version'] = self.version
if self.length is not None: attrs['length'] = self.length
r = ''
r += ' '*offset + 'Msg('
elms = []
for k in ['version', 'R', 'P', 'E', 'T', 'reserved',
'code', 'app_id']:
if k in attrs:
if k == 'app_id':
elms.append('%s=0x%x' % (k, attrs[k]))
else:
elms.append('%s=%r' % (k, attrs[k]))
r += ', '.join(elms)
if 'avps' in attrs:
r += ', avps=[ # %s \n' % comment
for a in self.avps:
r += a.__repr__(offset+indent, indent) + ',\n'
r += ' '*offset + ']'
r += ')'
return r
@staticmethod
def recv(f, _timeout=5.0):
f.settimeout(_timeout)
data = scenario.unpack_frame(f)
return Msg.decode(data)
def send(self, f):
data = self.encode()
scenario.pack_frame(f, data)
@staticmethod
def decode(s, tag=False):
f = StringIO(s)
attrs = {}
attrs['version'] = unpack('!B', read_exactly(f, 1))[0]
attrs['total_length'] = unpack24(read_exactly(f, 3))
flags = unpack('!B', read_exactly(f, 1))[0]
if flags & 0x80: attrs['R'] = True
if flags & 0x40: attrs['P'] = True
if flags & 0x20: attrs['E'] = True
if flags & 0x10: attrs['T'] = True
reserved = flags & 0x0f
if reserved: attrs['reserved'] = reserved
attrs['code'] = unpack24(read_exactly(f, 3))
attrs['app_id'] = unpack('!L', read_exactly(f, 4))[0]
attrs['h2h_id'] = unpack('!L', read_exactly(f, 4))[0]
attrs['e2e_id'] = unpack('!L', read_exactly(f, 4))[0]
length = attrs['total_length']
length -= 20
if length < 0: raise MsgInvalidLength()
avps = []
data = read_exactly(f, length)
while True:
a = Avp.decode(data)
avps.append(a)
assert(a.padded_length % 4 == 0)
data = data[a.padded_length:]
if len(data) == 0:
break
attrs['avps'] = avps
m = Msg(**attrs)
if tag:
Dia.Directory.tag(m)
return m
def encode(self):
f = StringIO()
content = ''
for a in self.avps:
content += a.encode()
if self.length:
length = self.length
else:
length = len(content) + 20
f.write(pack('!B', self.version))
f.write(pack24(length))
flags = 0
if self.R: flags |= 0x80
if self.P: flags |= 0x40
if self.E: flags |= 0x20
if self.T: flags |= 0x10
if self.reserved: flags |= self.reserved
f.write(pack('!B', flags))
f.write(pack24(self.code))
f.write(pack('!L', self.app_id))
if self.h2h_id is None:
self.h2h_id = randint(0, pow(2, 32)-1)
f.write(pack('!L', self.h2h_id))
if self.e2e_id is None:
self.e2e_id = randint(0, pow(2, 32)-1)
f.write(pack('!L', self.e2e_id))
f.write(content)
return f.getvalue()
def all_avps(self):
for a in self.avps:
for sub_a in a.all_avps():
yield sub_a
def eval_path(self, path):
elms = path.split('/')[1:]
a = get_filter(elms[0])(self.avps)
return a.eval_path(elms[1:])
def modify_value(self, path, value):
'''traverse AVP tree down to target, and set intermediate length to None
in order to force fixup.'''
elms = path.split('/')[1:]
a = get_filter(elms[0])(self.avps)
a.length = None
a.modify_value(elms[1:], value)
def suppress_avps(self, path):
elms = path.split('/')[1:]
assert(len(elms) >= 1)
if len(elms) == 1:
self.length = None
m = get_matcher(elms[0])
new_avps = []
for a in self.avps:
if not m(a):
new_avps.append(a)
self.avps = new_avps
else:
a = get_filter(elms[0])(self.avps)
a.length = None
a.suppress_avps(elms[1:])
def overflow_avps(self, path, count):
elms = path.split('/')[1:]
assert(len(elms) >= 1)
if len(elms) == 1:
self.length = None
m = get_matcher(elms[0])
existing_avps = [a for a in self.avps if m(a)]
existing_count = len(existing_avps)
assert(existing_count > 0)
self.avps.extend([existing_avps[-1]] * (count-existing_count))
else:
a = get_filter(elms[0])(self.avps)
a.length = None
a.overflow_avps(elms[1:], count)
def compute_path(self, avp):
avps = [a for a in self.avps if a.code == avp.code and a.vendor == avp.vendor]
assert(len(avps) > 0)
attrs = {}
if avp.code != 0:
attrs['code'] = avp.code
if avp.vendor != 0:
attrs['vendor'] = avp.vendor
path = '/'
for name in attrs:
path += '%s=%d' % (name, attrs[name])
if len(avps) == 1:
return path
else:
return '%s[%d]' % (path, avps.index(avp))
class Avp:
def __init__(self, **kwds):
self.code = 0
self.V = False
self.M = False
self.P = False
self.reserved = None
self.vendor = 0
self.avps = []
self.data = None
self.length = None
self.model = None
for k in kwds:
if k == 'u32':
self.data = pack('!L', kwds[k])
elif k == 's32':
self.data = pack('!I', kwds[k])
elif k == 'u64':
self.data = pack('!Q', kwds[k])
elif k == 'f32':
self.data = pack('!f', kwds[k])
elif k == 'f64':
self.data = pack('!d', kwds[k])
elif k == 'v4':
self.data = pack('!H', 1) + inet_pton(AF_INET, kwds[k])
elif k == 'v6':
self.data = pack('!H', 2) + inet_pton(AF_INET6, kwds[k])
else:
setattr(self, k, kwds[k])
def __repr__(self, offset=0, indent=2):
attrs = {}
attrs['code'] = self.code
for k in ['reserved', 'vendor', 'data', 'length']:
if getattr(self, k) is not None:
attrs[k] = getattr(self, k)
model_avp = None
if hasattr(self, 'model_avp'):
model_avp = self.model_avp
if self.V: attrs['V'] = True
if self.M: attrs['M'] = True
if self.P: attrs['P'] = True
if len(self.avps) > 0: attrs['avps'] = self.avps
r = ''
if model_avp is not None:
r += ' '*offset + '# %s\n' % model_avp.name
r += ' '*offset + 'Avp('
elms = []
for k in ['code', 'V', 'M', 'P', 'reserved', 'vendor']:
if k in attrs:
elms.append('%s=%r' % (k, attrs[k]))
r += ', '.join(elms)
if hasattr(self, 'var'):
r += ', data=%s' % getattr(self, 'var')
elif 'avps' in attrs:
r += ', avps=[\n'
for a in self.avps:
r += a.__repr__(offset+indent, indent) + ',\n'
r += ' '*offset + ']'
elif 'data' in attrs:
if model_avp is not None:
if model_avp.datatype in ['Unsigned32']:
r += ', u32=%d' % unpack('!L', attrs['data'])[0]
elif model_avp.datatype in ['Integer32', 'Enumerated']:
r += ', u32=%d' % unpack('!L', attrs['data'])[0]
elif model_avp.datatype in ['Unsigned64']:
r += ', u64=%d' % unpack('!Q', attrs['data'])[0]
elif model_avp.datatype == 'Address':
family = unpack('!H', attrs['data'][:2])[0]
if family == 1:
r += ', v4=%r' % inet_ntop(AF_INET, attrs['data'][2:])
elif family == 2:
r += ', v6=%r' % inet_ntop(AF_INET6, attrs['data'][2:])
else:
r += ', data=%r' % attrs['data']
else:
r += ', data=%r' % attrs['data']
if self.model:
r += ', conformant=%r' % self.model
r += ')'
return r
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def decode(s):
f = StringIO(s)
attrs = {}
attrs['code'] = unpack('!L', read_exactly(f, 4))[0]
flags = unpack('!B', read_exactly(f, 1))[0]
if flags & 0x80: attrs['V'] = True
if flags & 0x40: attrs['M'] = True
if flags & 0x20: attrs['P'] = True
reserved = flags & 0x1f
if reserved: attrs['reserved'] = reserved
length = unpack24(read_exactly(f, 3))
attrs['length'] | |
from __future__ import unicode_literals
import collections
import six
from egnyte import base, exc
class FileOrFolder(base.Resource):
"""Things that are common to both files and folders."""
_url_template = "pubapi/v1/fs%(path)s"
_lazy_attributes = {'name', 'folder_id', 'is_folder'}
def _action(self, action, destination):
exc.default.check_response(self._client.POST(self._url, dict(action=action, destination=destination)))
return self.__class__(self._client, path=destination)
def copy(self, destination):
"""Copy this to another path. Destination path should have all segments (including the last one)."""
return self._action('copy', destination)
def move(self, destination):
"""Move this to another path. Destination path should have all segments (including the last one)."""
return self._action('move', destination)
def link(self, accessibility, recipients=None, send_email=None, message=None,
copy_me=None, notify=None, link_to_current=None,
expiry_date=None, expiry_clicks=None, add_filename=None):
"""
Create a link.
* accessibility: Determines how the link can be accessed ('Anyone', 'Password', 'Domain', 'Recipients')
* send_email: If true, Egnyte will send the link by email.
* recipients: List email addresses for people who should receive the link. Only required if send_email is True (List of valid email addresses)
* message: Personal message to be sent in link email. Only applies if send_email is True (plain text)
* copy_me: If True, a copy of the link message will be sent to the link creator. Only applies if send_email is True.
* notify: If True, link creator will be notified via email when link is accessed.
* link_to_current: If True, link will always refer to current version of file. Only applicable for file links.
* expiry_date: The expiry date for the link. If expiry_date is specified, expiry_clicks cannot be set (future date as datetime.date or string in YYYY-MM-DD format)
* expiry_clicks: The number of times the link can be clicked before it stops working. If expiry_clicks is specified, expiry_date cannot be set (value must be between 1 - 10, inclusive)
* add_filename: If True then the filename will be appended to the end of the link. Only applies to file links, not folder links.
Will return sequence of created Links, one for each recipient.
"""
return Links(self._client).create(path=self.path, type=self._link_kind, accessibility=accessibility,
recipients=recipients, send_email=send_email, message=message,
copy_me=copy_me, notify=notify, link_to_current=link_to_current,
expiry_date=expiry_date, expiry_clicks=expiry_clicks, add_filename=add_filename)
def _get(self):
"""Get the right object type (File or Folder), depending on what this path points to in the Cloud File System"""
json = exc.default.check_json_response(self._client.GET(self._url))
if json['is_folder'] and not isinstance(self, Folder):
instance = Folder(self._client, path=self.path)
elif not json['is_folder'] and not isinstance(self, File):
instance = File(self._client, path=self.path)
else:
instance = self
instance._update_attributes(json)
if instance.is_folder:
instance.folders = [Folder(self._client, **folder_data) for folder_data in json.get('folders', ())]
instance.files = [File(self._client, **file_data) for file_data in json.get('files', ())]
return instance
class File(FileOrFolder):
"""
Wrapper for a file in the cloud.
Does not have to exist - this can represent a new file to be uploaded.
path - file path
"""
_upload_chunk_size = 100 * (1024 * 1024) # 100 MB
_upload_retries = 3
_link_kind = 'file'
_lazy_attributes = {'num_versions', 'name', 'checksum', 'last_modified', 'entry_id',
'uploaded_by', 'size', 'is_folder', 'versions'}
_url_template_content = "pubapi/v1/fs-content%(path)s"
_url_template_content_chunked = "pubapi/v1/fs-content-chunked%(path)s"
def upload(self, fp, size=None, progress_callback=None):
"""
Upload file contents.
fp can be any file-like object, but if you don't specify it's size in advance it must support tell and seek methods.
Progress callback is optional - if provided, it should match signature of ProgressCallbacks.upload_progress
"""
if isinstance(fp, six.binary_type):
fp = six.BytesIO(fp)
if size is None:
size = base.get_file_size(fp)
if size < self._upload_chunk_size:
# simple, one request upload
retries = max(self._upload_retries, 1)
while retries > 0:
url = self._client.get_url(self._url_template_content, path=self.path)
chunk = base._FileChunk(fp, 0, size)
r = self._client.POST(url, data=chunk, headers={'Content-length': str(size)})
exc.default.check_response(r)
server_sha = r.headers['X-Sha512-Checksum']
our_sha = chunk.sha.hexdigest()
if server_sha == our_sha:
break
retries -= 1
# TODO: retry network errors too
if retries == 0:
raise exc.ChecksumError("Failed to upload file", {})
else: # chunked upload
return self._chunked_upload(fp, size, progress_callback)
def download(self, download_range=None):
"""
Download file contents.
Returns a FileDownload.
Optional range is 2 integer sequence (start offset, end offset) used to download only part of the file.
"""
url = self._client.get_url(self._url_template_content, path=self.path)
if download_range is None:
r = exc.default.check_response(self._client.GET(url, stream=True))
else:
if len(download_range) != 2:
raise exc.InvalidParameters('Download range needs to be None or a 2 element integer sequence')
r = exc.partial.check_response(self._client.GET(url, stream=True, headers={'Range': 'bytes=%d-%d' % download_range}))
return base.FileDownload(r, self)
def _chunked_upload(self, fp, size, progress_callback):
url = self._client.get_url(self._url_template_content_chunked, path=self.path)
chunks = list(base.split_file_into_chunks(fp, size, self._upload_chunk_size)) # need count of chunks
chunk_count = len(chunks)
headers = {}
for chunk_number, chunk in enumerate(chunks, 1): # count from 1 not 0
headers['x-egnyte-chunk-num'] = "%d" % chunk_number
headers['content-length'] = str(chunk.size)
if chunk_number == chunk_count: # last chunk
headers['x-egnyte-last-chunk'] = "true"
retries = max(self._upload_retries, 1)
while retries > 0:
try:
r = self._client.POST(url, data=chunk, headers=headers)
server_sha = r.headers['x-egnyte-chunk-sha512-checksum']
our_sha = chunk.sha.hexdigest()
if server_sha == our_sha:
break
except:
print("_chunked_upload error on POST request to upload chunk, will retry")
retries -= 1
# TODO: retry network errors too
# TODO: refactor common parts of chunked and standard upload
if retries == 0:
raise exc.ChecksumError("Failed to upload file chunk", {"chunk_number": chunk_number, "start_position": chunk.position})
exc.default.check_response(r)
if chunk_number == 1:
headers['x-egnyte-upload-id'] = r.headers['x-egnyte-upload-id']
if progress_callback is not None:
progress_callback(self, size, chunk_number * self._upload_chunk_size)
def delete(self):
"""Delete this file."""
base.Resource.delete(self)
def add_note(self, message):
"""Add a note to this file. Returns the created Note object."""
return self._client.notes.create(self.path, message)
def get_notes(self, **kwargs):
"""Get notes attached to this file. Returns list of Note objects"""
return self._client.notes.list(file=self.path, **kwargs)
class Folder(FileOrFolder):
"""
Wrapper for a folder the cloud.
Does not have to exist - can represent a new folder yet to be created.
"""
_url_template = "pubapi/v1/fs%(path)s"
_url_template_permissions = "pubapi/v1/perms/folder/%(path)s"
_url_template_effective_permissions = "pubapi/v1/perms/user/%(username)s"
_lazy_attributes = {'name', 'folder_id', 'is_folder'}
_link_kind = 'folder'
folders = None
files = None
def folder(self, path, **kwargs):
"""Return a subfolder of this folder."""
return Folder(self._client, path=self.path + '/' + path, **kwargs)
def file(self, filename, **kwargs):
"""Return a file in this folder."""
return File(self._client, folder=self, filename=filename, path=self.path + '/' + filename, **kwargs)
def create(self, ignore_if_exists=True):
"""
Create a new folder in the Egnyte cloud.
If ignore_if_exists is True, error raised if folder already exists will be ignored.
"""
r = self._client.POST(self._url, dict(action='add_folder'))
(exc.created_ignore_existing if ignore_if_exists else exc.created).check_response(r)
return self
def delete(self):
"""Delete this folder in the cloud."""
base.Resource.delete(self)
def list(self):
"""
Gets contents of this folder (in instance attributes 'folders' and 'files')
"""
return self._get()
def get_permissions(self, users=None, groups=None):
"""
Get Permission values for this folder.
"""
query_params = {}
if users is not None:
query_params[u'users'] = '|'.join(six.text_type(x) for x in users)
if groups is not None:
query_params[u'groups'] = '|'.join(six.text_type(x) for x in groups)
url = self._client.get_url(self._url_template_permissions, path=self.path)
r = exc.default.check_json_response(self._client.GET(url, params=query_params))
return PermissionSet(r)
def set_permissions(self, permission, users=None, groups=None):
"""
Set permission level for some users and/or groups for this folder.
"""
url = self._client.get_url(self._url_template_permissions, path=self.path)
data = base.filter_none_values(dict(permission=permission, users=users, groups=groups))
exc.default.check_response(self._client.POST(url, data))
def get_effective_permissions(self, username):
"""
Get effective permissions (both direct, and granted by membership in groups) to this folder for a specific user.
username: name of user (string)
Returns one of 'Owner', 'Full', 'Editor', 'Viewer'
"""
url = self._client.get_url(self._url_template_effective_permissions, username=username)
params = dict(folder=self.path)
r = exc.default.check_json_response(self._client.GET(url, params=params))
return r['permission']
def get_notes(self, **kwargs):
"""Get notes attached to any file in this folder."""
return self._client.notes.list(folder=self.path, **kwargs)
class Link(base.Resource):
"""Link to a file or folder"""
_url_template = "pubapi/v1/links/%(id)s"
_lazy_attributes = {'copy_me', 'link_to_current', 'accessibility', 'notify',
'path', 'creation_date', 'type', 'send_mail'}
def delete(self):
"""Delete this link"""
base.Resource.delete(self)
class User(base.Resource):
"""
Wrapper for a User.
Warning: attribute names in this class use camelCase instead of underscores.
Name is a dictionary with 2 keys: givenName and lastName.
"""
_url_template = "pubapi/v2/users/%(id)s"
_url_template_effective_permissions = "pubabi/v1/perms/user/%(userName)s"
_lazy_attributes = {'userName', 'externalId', 'email', 'name', 'active', 'locked', 'authType',
'role', 'userType', 'idpUserId'}
def delete(self):
"""Delete this user account."""
base.Resource.delete(self)
def update(self, email=None, familyName=None, givenName=None, active=None, sendInvite=None, authType=None,
userType=None, idpUserId=None, userPrincipalName=None):
"""
Modify this user account.
Optional parameters (no change if value is None):
* email: The email address of the user. Any valid email address (e.g. <EMAIL>)
* familyName: The last name of the user. Any plain text (e.g. John)
* givenName: The first name of the user. Any plain text (e.g. Smith)
* active: Whether the user is active or inactive. True or | |
<gh_stars>1-10
'''
Created : Jan 16, 2017
Last major update : June 29, 2017
@author: <NAME>
Purpose:
Fast density clustering
'''
import numpy as np
import time
from numpy.random import random
import sys, os
from .density_estimation import KDE
import pickle
from collections import OrderedDict as OD
from sklearn.neighbors import NearestNeighbors
import multiprocessing
class FDC:
""" Fast Density Clustering via kernel density modelling for low-dimensional data (D <~ 8)
Parameters
----------
nh_size : int, optional (default = 'auto')
Neighborhood size. This is the scale used for identifying the initial modes in the density distribution, regardless
of the covariance. If a point has the maximum density among it's nh_size neighbors, it is marked as
a potential cluster center. 'auto' means that the nh_size is scaled with number of samples. We
use nh_size = 100 for 10000 samples. The minimum neighborhood size is set to 10.
eta : float, optional (default = 0.4)
Noise threshold used to merge clusters. This is done by quenching directly to the specified noise threshold
(as opposed to progressively coarse-graining). The noise threshold determines the extended
neighborhood of cluster centers. Points that have a relative density difference of less than
"noise_threshold" and that are density-reachable, are part of the extended neighborhood.
random_state: int, optional (default = 0)
Random number for seeding random number generator. By default, the
method generates the same results. This random is used to seed
the cross-validation (set partitions) which will in turn affect the bandwitdth value
test_ratio_size: float, optional (default = 0.8)
Ratio size of the test set used when performing maximum likehood estimation.
In order to have smooth density estimations (prevent overfitting), it is recommended to
use a large test_ratio_size (closer to 1.0) rather than a small one.
verbose: int, optional (default = 1)
Set to 0 if you don't want to see print to screen.
bandwidth: float, optional (default = None)
If you want the bandwidth for kernel density to be set automatically or want to set it yourself.
By default it is set automatically.
merge: bool, optinal (default = True)
Optional merging at zero noise threshold, merges overlapping minimal clusters
atol: float, optional (default = 0.000005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
rtol: float, optional (default = 0.00005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
xtol: float, optional (default = 0.01)
precision parameter for optimizing the bandwidth using maximum likelihood on a test set
search_size: int, optional (default = 20)
when performing search over neighborhoods, size of each local neighborhood to check when
expanding. This drastically slows the coarse-graining if chosen to be too big !
kernel: str, optional (default='gaussian')
Type of Kernel to use for density estimates. Other options are {'epanechnikov'|'linear','tophat'}.
"""
def __init__(self, nh_size='auto', eta=0.5,
random_state=0, test_ratio_size=0.8, verbose=1, bandwidth=None,
merge=True,
atol=0.01,
rtol=0.0001,
xtol=0.01,
search_size = 20,
n_cluster_init = None,
kernel = 'gaussian',
n_job='auto'
):
self.test_ratio_size = test_ratio_size
self.random_state = random_state
self.verbose = verbose
self.nh_size = nh_size
self.bandwidth = bandwidth
self.eta = eta
self.merge = merge
self.atol = atol
self.rtol = rtol
self.xtol = xtol
self.cluster_label = None
self.search_size = search_size
self.n_cluster_init = n_cluster_init
self.kernel = kernel
self.nbrs= None
self.nn_dist= None
self.nn_list= None
self.density_model = None
if n_job == 'auto':
self.n_job=multiprocessing.cpu_count()
else:
if n_job > multiprocessing.cpu_count():
self.n_job=multiprocessing.cpu_count()
else:
self.n_job=n_job
def fit(self, X):
""" Performs density clustering on given data set
Parameters
----------
X : array, (n_sample, n_feature)
Data to cluster.
Returns
----------
self : fdc object
To obtain new cluster labels use self.cluster_label
"""
t = time.time()
self.X = X # shallow copy
self.n_sample = X.shape[0]
if self.n_sample < 10:
assert False, "Too few samples for computing densities !"
if self.nh_size is 'auto':
self.nh_size = max([int(25*np.log10(self.n_sample)), 10])
if self.search_size > self.nh_size:
self.search_size = self.nh_size
if self.verbose == 0:
blockPrint()
self.display_main_parameters()
print("[fdc] Starting clustering with n=%i samples..." % X.shape[0])
start = time.time()
print("[fdc] Fitting kernel model for density estimation ...")
self.fit_density(X)
#print("here")
print("[fdc] Finding centers ...")
self.compute_delta(X, self.rho)
print("[fdc] Found %i potential centers ..." % self.idx_centers_unmerged.shape[0])
# temporary idx for the centers :
self.idx_centers = self.idx_centers_unmerged
self.cluster_label = assign_cluster(self.idx_centers_unmerged, self.nn_delta, self.density_graph)
if self.merge: # usually by default one should perform this minimal merging ..
print("[fdc] Merging overlapping minimal clusters ...")
self.check_cluster_stability_fast(X, 0.) # given
if self.eta >= 1e-3 :
print("[fdc] Iterating up to specified noise threshold ...")
self.check_cluster_stability_fast(X, self.eta) # merging 'unstable' clusters
print("[fdc] Done in %.3f s" % (time.time()-start))
enablePrint()
return self
def save(self, name=None):
""" Saves current model to specified path 'name' """
if name is None:
fname = self.make_file_name()
else:
fname = name
fopen = open(fname,'wb')
pickle.dump(self,fopen)
fopen.close()
return fname
def load(self, name=None):
if name is None:
name = self.make_file_name()
self.__dict__.update(pickle.load(open(name,'rb')).__dict__)
return self
def fit_density(self, X):
# nearest neighbors class
self.nbrs = NearestNeighbors(n_neighbors = self.nh_size, algorithm='kd_tree').fit(X)
# get k-NN
self.nn_dist, self.nn_list = self.nbrs.kneighbors(X)
# density model class
self.density_model = KDE(bandwidth=self.bandwidth, test_ratio_size=self.test_ratio_size,
atol=self.atol, rtol=self.rtol, xtol=self.xtol, nn_dist = self.nn_dist, kernel=self.kernel)
# fit density model to data
self.density_model.fit(X)
# save bandwidth
self.bandwidth = self.density_model.bandwidth
# compute density map based on kernel density model
if (self.n_sample > 30000) & (self.n_job !=1) :
print("[fdc] Computing density with %i threads..."%self.n_job)
p = multiprocessing.Pool(self.n_job)
size_split = X.shape[0]//self.n_job
results =[]
idx_split = chunkIt(len(X), self.n_job) # find the index to split the array in approx. n_job equal parts.
for i in range(self.n_job):
results.append(p.apply_async(self.f_tmp, [X[idx_split[i][0]:idx_split[i][1]], i]))
results = [res.get() for res in results]
asort = np.argsort([results[i][0] for i in range(self.n_job)]) # reordering
#print(asort)
self.rho=np.hstack([results[a][1] for a in asort])
else:
print("[fdc] Computing density with 1 thread...")
self.rho = self.density_model.evaluate_density(X)
return self
def f_tmp(self, X_, i_):
"""evaluating density and keeping track of threading order"""
return (i_, self.density_model.evaluate_density(X_))
#@profile
def coarse_grain(self, noise_iterable):
"""Started from an initial noise scale, progressively merges clusters.
If specified, saves the cluster assignments at every level of the coarse graining.
Parameters
-----------
noise_iterable : iterable of floats
Should be an iterable containg noise values at which to perform coarse graining. Usually
one should start from 0 and go to larger values by small increments. The whole clustering
information is stored in self.hierarchy
Return
------
self
"""
if self.verbose == 0:
blockPrint()
print("[fdc] Coarse graining until desired noise threshold ...")
noise_range = [n for n in noise_iterable]
#hierarchy = []
self.max_noise = -1
n_cluster = 0
# note to self, if no merger is done, no need to store hierarchy ... just work with noise_range dict ...
for nt in noise_range:
if self.n_cluster_init is not None:
if len(self.idx_centers) <= self.n_cluster_init:
print("[fdc.py] Reached number of specified clusters [= %i] (or close to), n_cluster = %i"%(self.n_cluster_init,len(self.idx_centers)))
break
self.check_cluster_stability_fast(self.X, eta = nt)
#hierarchy.append(OD({'idx_centers': self.idx_centers, 'cluster_labels': self.cluster_label})) # -> the only required information <-
if len(self.idx_centers) != n_cluster:
n_cluster = len(self.idx_centers)
self.max_noise = nt
#self.hierarchy = hierarchy
self.noise_range = noise_range
self.noise_threshold = noise_range[-1]
enablePrint()
return self
#@profile
def compute_delta(self, X, rho = None):
"""
Purpose:
Computes distance to nearest-neighbor with higher density
Return:
delta,nn_delta,idx_centers,density_graph
:delta: distance to n.n. with higher density (within some neighborhood cutoff)
:nn_delta: index of n.n. with ...
:idx_centers: list of points that have the largest density in their neigborhood cutoff
:density_graph: for every point, list of points are incoming (via the density gradient)
"""
if rho is None:
rho = self.rho
n_sample, n_feature = X.shape
maxdist = np.linalg.norm([np.max(X[:,i])-np.min(X[:,i]) for i in range(n_feature)])
delta = maxdist*np.ones(n_sample, dtype=np.float)
nn_delta = np.ones(n_sample, dtype=np.int)
density_graph = [[] for i in range(n_sample)] # store incoming leaves
### ----------->
nn_list = self.nn_list # restricted over neighborhood (nh_size)
### ----------->
for i in range(n_sample):
idx = index_greater(rho[nn_list[i]])
if idx:
density_graph[nn_list[i,idx]].append(i)
nn_delta[i] = nn_list[i,idx]
delta[i] = self.nn_dist[i,idx]
else:
nn_delta[i]=-1
idx_centers=np.array(range(n_sample))[delta > 0.999*maxdist]
| |
os.stat('tmp/stefan_full_rgba_ecwv3_meta.ecw.aux.xml')
gdaltest.post_reason('fail')
return 'fail'
except:
pass
return 'success'
###############################################################################
# Test setting/unsetting file metadata of a ECW v3 file
def ecw_42():
if gdaltest.ecw_drv is None or gdaltest.ecw_drv.major_version < 5:
return 'skip'
shutil.copy('data/stefan_full_rgba_ecwv3_meta.ecw', 'tmp/stefan_full_rgba_ecwv3_meta.ecw')
try:
os.remove('tmp/stefan_full_rgba_ecwv3_meta.ecw.aux.xml')
except:
pass
ds = gdal.Open('tmp/stefan_full_rgba_ecwv3_meta.ecw', gdal.GA_Update)
md = {}
md['FILE_METADATA_CLASSIFICATION'] = 'FILE_METADATA_CLASSIFICATION'
md['FILE_METADATA_ACQUISITION_DATE'] = '2013-04-04'
md['FILE_METADATA_ACQUISITION_SENSOR_NAME'] = 'FILE_METADATA_ACQUISITION_SENSOR_NAME'
md['FILE_METADATA_COMPRESSION_SOFTWARE'] = 'FILE_METADATA_COMPRESSION_SOFTWARE'
md['FILE_METADATA_AUTHOR'] = 'FILE_METADATA_AUTHOR'
md['FILE_METADATA_COPYRIGHT'] = 'FILE_METADATA_COPYRIGHT'
md['FILE_METADATA_COMPANY'] = 'FILE_METADATA_COMPANY'
md['FILE_METADATA_EMAIL'] = 'FILE_METADATA_EMAIL'
md['FILE_METADATA_ADDRESS'] = 'FILE_METADATA_ADDRESS'
md['FILE_METADATA_TELEPHONE'] = 'FILE_METADATA_TELEPHONE'
ds.SetMetadata(md)
ds = None
# Check that there's no .aux.xml file
try:
os.stat('tmp/stefan_full_rgba_ecwv3_meta.ecw.aux.xml')
gdaltest.post_reason('fail')
return 'fail'
except:
pass
# Check item values
ds = gdal.Open('tmp/stefan_full_rgba_ecwv3_meta.ecw')
got_md = ds.GetMetadata()
for item in md:
if got_md[item] != md[item]:
gdaltest.post_reason('fail')
print(got_md[item])
print(md[item])
return 'fail'
ds = None
# Test unsetting all the stuff
ds = gdal.Open('tmp/stefan_full_rgba_ecwv3_meta.ecw', gdal.GA_Update)
md = {}
md['FILE_METADATA_CLASSIFICATION'] = ''
md['FILE_METADATA_ACQUISITION_DATE'] = '1970-01-01'
md['FILE_METADATA_ACQUISITION_SENSOR_NAME'] = ''
md['FILE_METADATA_COMPRESSION_SOFTWARE'] = ''
md['FILE_METADATA_AUTHOR'] = ''
md['FILE_METADATA_COPYRIGHT'] = ''
md['FILE_METADATA_COMPANY'] = ''
md['FILE_METADATA_EMAIL'] = ''
md['FILE_METADATA_ADDRESS'] = ''
md['FILE_METADATA_TELEPHONE'] = ''
ds.SetMetadata(md)
ds = None
# Check that there's no .aux.xml file
try:
os.stat('tmp/stefan_full_rgba_ecwv3_meta.ecw.aux.xml')
gdaltest.post_reason('fail')
return 'fail'
except:
pass
# Check item values
ds = gdal.Open('tmp/stefan_full_rgba_ecwv3_meta.ecw')
got_md = ds.GetMetadata()
for item in md:
if item in got_md and item != 'FILE_METADATA_ACQUISITION_DATE':
gdaltest.post_reason('fail')
print(got_md[item])
print(md[item])
return 'fail'
ds = None
return 'success'
###############################################################################
# Test auto-promotion of 1bit alpha band to 8bit
# Note: only works on reversible files like this one
def ecw_43():
if gdaltest.jp2ecw_drv is None:
return 'skip'
ds = gdal.Open('data/stefan_full_rgba_alpha_1bit.jp2')
fourth_band = ds.GetRasterBand(4)
if fourth_band.GetMetadataItem('NBITS', 'IMAGE_STRUCTURE') is not None:
return 'fail'
got_cs = fourth_band.Checksum()
if got_cs != 8527:
gdaltest.post_reason('fail')
print(got_cs)
return 'fail'
jp2_bands_data = ds.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize)
jp2_fourth_band_data = fourth_band.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize)
fourth_band.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize,int(ds.RasterXSize/16),int(ds.RasterYSize/16))
tmp_ds = gdal.GetDriverByName('GTiff').CreateCopy('/vsimem/ecw_43.tif', ds)
fourth_band = tmp_ds.GetRasterBand(4)
got_cs = fourth_band.Checksum()
gtiff_bands_data = tmp_ds.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize)
gtiff_fourth_band_data = fourth_band.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize)
#gtiff_fourth_band_subsampled_data = fourth_band.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize,ds.RasterXSize/16,ds.RasterYSize/16)
tmp_ds = None
gdal.GetDriverByName('GTiff').Delete('/vsimem/ecw_43.tif')
if got_cs != 8527:
gdaltest.post_reason('fail')
print(got_cs)
return 'fail'
if jp2_bands_data != gtiff_bands_data:
gdaltest.post_reason('fail')
return 'fail'
if jp2_fourth_band_data != gtiff_fourth_band_data:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/stefan_full_rgba_alpha_1bit.jp2', open_options = ['1BIT_ALPHA_PROMOTION=NO'])
fourth_band = ds.GetRasterBand(4)
if fourth_band.GetMetadataItem('NBITS', 'IMAGE_STRUCTURE') != '1':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test metadata retrieval from JP2 file
def ecw_44():
if gdaltest.jp2ecw_drv is None:
return 'skip'
if gdaltest.ecw_drv.major_version < 5 or (gdaltest.ecw_drv.major_version ==5 and gdaltest.ecw_drv.minor_version <1) :
return 'skip'
ds = gdal.Open('data/stefan_full_rgba_alpha_1bit.jp2')
expected_md = [
('CODE_BLOCK_SIZE_X','64'),
('CODE_BLOCK_SIZE_Y','64'),
('GML_JP2_DATA','FALSE'),
('PRECINCT_SIZE_X','128,128'),
('PRECINCT_SIZE_Y','128,128'),
('PRECISION','8,8,8,1'),
('PROFILE','0'),
('PROGRESSION_ORDER','RPCL'),
('QUALITY_LAYERS','1'),
('RESOLUTION_LEVELS','2'),
('PROGRESSION_ORDER','RPCL'),
('TILE_HEIGHT','150'),
('TILE_WIDTH','162'),
('TILES_X','1'),
('TILES_Y','1'),
('TRANSFORMATION_TYPE','5x3'),
('USE_EPH','TRUE'),
('USE_SOP','FALSE') ]
got_md = ds.GetMetadata('JPEG2000')
for (key,value) in expected_md:
if key not in got_md or got_md[key] != value:
gdaltest.post_reason('fail')
print(key)
print(got_md)
return 'fail'
return 'success'
###############################################################################
# Test non nearest upsampling
def ecw_46():
if gdaltest.jp2ecw_drv is None:
return 'skip'
tmp_ds = gdaltest.jp2ecw_drv.CreateCopy('/vsimem/ecw_46.jp2', gdal.Open('data/int16.tif'))
tmp_ds = None
tmp_ds = gdal.Open('/vsimem/ecw_46.jp2')
full_res_data = tmp_ds.ReadRaster(0, 0, 20, 20)
upsampled_data = tmp_ds.ReadRaster(0, 0, 20, 20, 40, 40, resample_alg = gdal.GRIORA_Cubic)
tmp_ds = None
gdal.Unlink('/vsimem/ecw_46.jp2')
tmp_ds = gdal.GetDriverByName('MEM').Create('', 20, 20, 1, gdal.GDT_Int16)
tmp_ds.GetRasterBand(1).WriteRaster(0, 0, 20, 20, full_res_data)
ref_upsampled_data = tmp_ds.ReadRaster(0, 0, 20, 20, 40, 40, resample_alg = gdal.GRIORA_Cubic)
mem_ds = gdal.GetDriverByName('MEM').Create('', 40, 40, 1, gdal.GDT_Int16)
mem_ds.GetRasterBand(1).WriteRaster(0, 0, 40, 40, ref_upsampled_data)
ref_cs = mem_ds.GetRasterBand(1).Checksum()
mem_ds.GetRasterBand(1).WriteRaster(0, 0, 40, 40, upsampled_data)
cs = mem_ds.GetRasterBand(1).Checksum()
if cs != ref_cs:
gdaltest.post_reason('fail')
print(cs)
print(ref_cs)
return 'fail'
return 'success'
###############################################################################
# Test metadata reading & writing
def RemoveDriverMetadata(md):
if 'COMPRESSION_RATE_TARGET' in md:
del md['COMPRESSION_RATE_TARGET']
if 'COLORSPACE' in md:
del md['COLORSPACE']
if 'VERSION' in md:
del md['VERSION']
return md
def ecw_45():
if gdaltest.jp2ecw_drv is None or gdaltest.ecw_write == 0:
return 'skip'
# No metadata
src_ds = gdal.GetDriverByName('MEM').Create('', 2, 2)
out_ds = gdaltest.jp2ecw_drv.CreateCopy('/vsimem/ecw_45.jp2', src_ds, options = ['WRITE_METADATA=YES'])
del out_ds
if gdal.VSIStatL('/vsimem/ecw_45.jp2.aux.xml') is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('/vsimem/ecw_45.jp2')
md = RemoveDriverMetadata(ds.GetMetadata())
if md != {}:
gdaltest.post_reason('fail')
print(md)
return 'fail'
gdal.Unlink('/vsimem/ecw_45.jp2')
# Simple metadata in main domain
for options in [ ['WRITE_METADATA=YES'] ]:
src_ds = gdal.GetDriverByName('MEM').Create('', 2, 2)
src_ds.SetMetadataItem('FOO', 'BAR')
out_ds = gdaltest.jp2ecw_drv.CreateCopy('/vsimem/ecw_45.jp2', src_ds, options = options)
del out_ds
if gdal.VSIStatL('/vsimem/ecw_45.jp2.aux.xml') is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('/vsimem/ecw_45.jp2')
md = RemoveDriverMetadata(ds.GetMetadata())
if md != {'FOO': 'BAR'}:
gdaltest.post_reason('fail')
print(md)
return 'fail'
gdal.Unlink('/vsimem/ecw_45.jp2')
# Simple metadata in auxiliary domain
src_ds = gdal.GetDriverByName('MEM').Create('', 2, 2)
src_ds.SetMetadataItem('FOO', 'BAR', 'SOME_DOMAIN')
out_ds = gdaltest.jp2ecw_drv.CreateCopy('/vsimem/ecw_45.jp2', src_ds, options = ['WRITE_METADATA=YES'])
del out_ds
if gdal.VSIStatL('/vsimem/ecw_45.jp2.aux.xml') is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('/vsimem/ecw_45.jp2')
md = RemoveDriverMetadata(ds.GetMetadata('SOME_DOMAIN'))
if md != {'FOO': 'BAR'}:
gdaltest.post_reason('fail')
print(md)
return 'fail'
gdal.Unlink('/vsimem/ecw_45.jp2')
# Simple metadata in auxiliary XML domain
src_ds = gdal.GetDriverByName('MEM').Create('', 2, 2)
src_ds.SetMetadata( [ '<some_arbitrary_xml_box/>' ], 'xml:SOME_DOMAIN')
out_ds = gdaltest.jp2ecw_drv.CreateCopy('/vsimem/ecw_45.jp2', src_ds, options = ['WRITE_METADATA=YES'])
del out_ds
if gdal.VSIStatL('/vsimem/ecw_45.jp2.aux.xml') is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('/vsimem/ecw_45.jp2')
if ds.GetMetadata('xml:SOME_DOMAIN')[0] != '<some_arbitrary_xml_box />\n':
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink('/vsimem/ecw_45.jp2')
# Special xml:BOX_ metadata domain
for options in [ ['WRITE_METADATA=YES'] ]:
src_ds = gdal.GetDriverByName('MEM').Create('', 2, 2)
src_ds.SetMetadata( [ '<some_arbitrary_xml_box/>' ], 'xml:BOX_1')
out_ds = gdaltest.jp2ecw_drv.CreateCopy('/vsimem/ecw_45.jp2', src_ds, options = options)
del out_ds
if gdal.VSIStatL('/vsimem/ecw_45.jp2.aux.xml') is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('/vsimem/ecw_45.jp2')
if ds.GetMetadata('xml:BOX_0')[0] != '<some_arbitrary_xml_box/>':
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink('/vsimem/ecw_45.jp2')
# Special xml:XMP metadata domain
for options in [ ['WRITE_METADATA=YES'] ]:
src_ds = gdal.GetDriverByName('MEM').Create('', 2, 2)
src_ds.SetMetadata( [ '<fake_xmp_box/>' ], 'xml:XMP')
out_ds = gdaltest.jp2ecw_drv.CreateCopy('/vsimem/ecw_45.jp2', src_ds, options = options)
del out_ds
if gdal.VSIStatL('/vsimem/ecw_45.jp2.aux.xml') is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('/vsimem/ecw_45.jp2')
if ds.GetMetadata('xml:XMP')[0] != '<fake_xmp_box/>':
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink('/vsimem/ecw_45.jp2')
return 'success'
###############################################################################
def ecw_online_1():
if gdaltest.jp2ecw_drv is None:
return 'skip'
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/jpeg2000/7sisters200.j2k', '7sisters200.j2k'):
return 'skip'
# checksum = 32316 on my PC
tst = gdaltest.GDALTest( 'JP2ECW', 'tmp/cache/7sisters200.j2k', 1, None, filename_absolute = 1 )
if tst.testOpen() != 'success':
return 'fail'
ds = gdal.Open('tmp/cache/7sisters200.j2k')
ds.GetRasterBand(1).Checksum()
ds = None
return 'success'
###############################################################################
def ecw_online_2():
if gdaltest.jp2ecw_drv is None:
return 'skip'
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/jpeg2000/gcp.jp2', 'gcp.jp2'):
return 'skip'
# checksum = 1292 on my PC
tst = gdaltest.GDALTest( 'JP2ECW', 'tmp/cache/gcp.jp2', 1, None, filename_absolute = 1 )
if tst.testOpen() != 'success':
return 'fail'
ds = gdal.Open('tmp/cache/gcp.jp2')
ds.GetRasterBand(1).Checksum()
if len(ds.GetGCPs()) != 15:
gdaltest.post_reason('bad number of GCP')
return 'fail'
expected_wkt = """GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]]"""
if ds.GetGCPProjection() != expected_wkt:
gdaltest.post_reason('bad GCP projection')
return 'fail'
ds = None
return 'success'
###############################################################################
def ecw_online_3():
if gdaltest.jp2ecw_drv is None:
return 'skip'
if gdaltest.ecw_drv.major_version ==4:
gdaltest.post_reason( '4.x SDK gets unreliable results for jp2')
return 'skip'
if not gdaltest.download_file('http://www.openjpeg.org/samples/Bretagne1.j2k', 'Bretagne1.j2k'):
return 'skip'
if not gdaltest.download_file('http://www.openjpeg.org/samples/Bretagne1.bmp', 'Bretagne1.bmp'):
return 'skip'
# checksum = 16481 on my PC
tst = gdaltest.GDALTest( 'JP2ECW', 'tmp/cache/Bretagne1.j2k', 1, None, filename_absolute = 1 )
if tst.testOpen() != 'success':
return 'fail'
ds = gdal.Open('tmp/cache/Bretagne1.j2k')
ds_ref = gdal.Open('tmp/cache/Bretagne1.bmp')
maxdiff = gdaltest.compare_ds(ds, ds_ref)
print(ds.GetRasterBand(1).Checksum())
print(ds_ref.GetRasterBand(1).Checksum())
ds = None
ds_ref = None
# Difference between the image before and after compression
if maxdiff > 16:
gdaltest.post_reason('Image too different from reference')
return 'fail'
return 'success'
###############################################################################
def ecw_online_4():
if gdaltest.jp2ecw_drv is None:
return 'skip'
if not gdaltest.download_file('http://www.openjpeg.org/samples/Bretagne2.j2k', 'Bretagne2.j2k'):
return 'skip'
if not gdaltest.download_file('http://www.openjpeg.org/samples/Bretagne2.bmp', 'Bretagne2.bmp'):
return 'skip'
# Checksum = 53054 on my PC
tst = gdaltest.GDALTest( 'JP2ECW', 'tmp/cache/Bretagne2.j2k', 1, None, filename_absolute = 1 )
if tst.testOpen() != 'success':
return 'fail'
ds = gdal.Open('tmp/cache/Bretagne2.j2k')
ds_ref = gdal.Open('tmp/cache/Bretagne2.bmp')
maxdiff = gdaltest.compare_ds(ds, ds_ref, width = 256, height = 256)
# print(ds.GetRasterBand(1).Checksum())
# print(ds_ref.GetRasterBand(1).Checksum())
ds = None
ds_ref = None
# Difference between the image before and after compression
if maxdiff > 1:
gdaltest.post_reason('Image too different from reference')
return 'fail'
return 'success'
###############################################################################
def ecw_online_5():
if gdaltest.ecw_drv is None:
return 'skip'
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/ecw/red_flower.ecw', 'red_flower.ecw'):
return 'skip'
ds = gdal.Open('tmp/cache/red_flower.ecw')
if gdaltest.ecw_drv.major_version == 3:
(exp_mean, exp_stddev) = (112.801,52.0431)
# on Tamas slavebots, (mean,stddev) = (113.301,52.0434)
mean_tolerance = 1
else:
mean_tolerance = 0.5
if gdaltest.ecw_drv.major_version == 5:
(exp_mean, exp_stddev) = (113.345,52.1259)
else:
(exp_mean, exp_stddev) = (114.337,52.1751)
(mean, stddev) = ds.GetRasterBand(2).ComputeBandStats()
if abs(mean-exp_mean) > mean_tolerance or abs(stddev-exp_stddev) > 0.5:
gdaltest.post_reason( 'mean/stddev of (%g,%g) diffs from expected(%g,%g)' % (mean, stddev,exp_mean, exp_stddev) )
return 'fail'
return 'success'
###############################################################################
# This tests the HTTP driver in fact. To ensure if keeps the original filename,
# and in particular the .ecw extension, to make the ECW driver happy
def ecw_online_6():
if gdaltest.ecw_drv is None:
return 'skip'
try:
drv = gdal.GetDriverByName( 'HTTP' )
except:
drv = None
if drv is None:
return 'skip'
try:
dods_drv = gdal.GetDriverByName( 'DODS' )
if dods_drv is not None:
dods_drv.Deregister()
except:
dods_drv = None
ds = gdal.Open('http://download.osgeo.org/gdal/data/ecw/spif83.ecw')
if dods_drv is not | |
<filename>line/client.py
# -*- coding: utf-8 -*-
"""
line.client
~~~~~~~~~~~
LineClient for sending and receiving message from LINE server.
:copyright: (c) 2014 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
import re
import requests
import sys
from api import LineAPI
from models import LineGroup, LineContact, LineRoom, LineMessage
from curve.ttypes import TalkException, ToType, OperationType, Provider
reload(sys)
sys.setdefaultencoding("utf-8")
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
admin = ['uc<PASSWORD>']
def check_auth(func):
def wrapper_check_auth(*args, **kwargs):
if args[0]._check_auth():
return func(*args, **kwargs)
return wrapper_check_auth
class LineClient(LineAPI):
profile = None
contacts = []
rooms = []
groups = []
def __init__(self, id=None, password=None, authToken=None, is_mac=True, com_name="carpedm20"):
"""Provide a way to communicate with LINE server.
:param id: `NAVER id` or `LINE email`
:param password: LINE account password
:param authToken: LINE session key
:param is_mac: (optional) os setting
:param com_name: (optional) name of your system
>>> client = LineClient("carpedm20", "xxxxxxxxxx")
Enter PinCode '9779' to your mobile phone in 2 minutes
>>> client = LineClient("<EMAIL>", "xxxxxxxxxx")
Enter PinCode '7390' to your mobile phone in 2 minutes
>>> client = LineClient(authToken="xxx ... xxx")
True
"""
LineAPI.__init__(self)
if not (authToken or id and password):
msg = "id and password or authToken is needed"
self.raise_error(msg)
if is_mac:
os_version = "10.9.4-MAVERICKS-x64"
user_agent = "DESKTOP:MAC:%s(%s)" % (os_version, self.version)
app = "DESKTOPMAC\t%s\tMAC\t%s" % (self.version, os_version)
else:
os_version = "5.1.2600-XP-x64"
user_agent = "DESKTOP:WIN:%s(%s)" % (os_version, self.version)
app = "DESKTOPWIN\t%s\tWINDOWS\t%s" % (self.version, os_version)
if com_name:
self.com_name = com_name
self._headers['User-Agent'] = user_agent
self._headers['X-Line-Application'] = app
if authToken:
self.authToken = self._headers['X-Line-Access'] = authToken
self.tokenLogin()
#self.ready()
else:
if EMAIL_REGEX.match(id):
self.provider = Provider.LINE # LINE
else:
self.provider = Provider.NAVER_KR # NAVER
self.id = id
self.password = password
self.is_mac = is_mac
self.login()
self.ready()
self.revision = self._getLastOpRevision()
self.getProfile()
try:
self.refreshGroups()
except: pass
try:
self.refreshContacts()
except: pass
try:
self.refreshActiveRooms()
except: pass
@check_auth
def getProfile(self):
"""Get `profile` of LINE account"""
self.profile = LineContact(self, self._getProfile())
return self.profile
def getContactByName(self, name):
"""Get a `contact` by name
:param name: name of a `contact`
"""
for contact in self.contacts:
if name == contact.name:
return contact
return None
def getContactById(self, id):
"""Get a `contact` by id
:param id: id of a `contact`
"""
for contact in self.contacts:
if contact.id == id:
return contact
if self.profile:
if self.profile.id == id:
return self.profile
return None
def getContactOrRoomOrGroupById(self, id):
"""Get a `contact` or `room` or `group` by its id
:param id: id of a instance
"""
return self.getContactById(id)\
or self.getRoomById(id)\
or self.getGroupById(id)
@check_auth
def refreshGroups(self):
"""Refresh groups of LineClient"""
self.groups = []
self.addGroupsWithIds(self._getGroupIdsJoined())
self.addGroupsWithIds(self._getGroupIdsInvited(), False)
@check_auth
def addGroupsWithIds(self, group_ids, is_joined=True):
"""Refresh groups of LineClient"""
new_groups = self._getGroups(group_ids)
self.groups += [LineGroup(self, group, is_joined) for group in new_groups]
self.groups.sort()
@check_auth
def refreshContacts(self):
"""Refresh contacts of LineClient """
contact_ids = self._getAllContactIds()
contacts = self._getContacts(contact_ids)
self.contacts = [LineContact(self, contact) for contact in contacts]
self.contacts.sort()
@check_auth
def findAndAddContactByUserid(self, userid):
"""Find and add a `contact` by userid
:param userid: user id
"""
try:
contact = self._findAndAddContactsByUserid(userid)
except TalkException as e:
self.raise_error(e.reason)
contact = contact.values()[0]
for c in self.contacts:
if c.id == contact.mid:
self.raise_error("%s already exists" % contact.displayName)
return
c = LineContact(self, contact)
self.contacts.append(c)
self.contacts.sort()
return c
@check_auth
def _findAndAddContactByPhone(self, phone):
"""Find and add a `contact` by phone number
:param phone: phone number (unknown format)
"""
try:
contact = self._findAndAddContactsByPhone(phone)
except TalkException as e:
self.raise_error(e.reason)
contact = contact.values()[0]
for c in self.contacts:
if c.id == contact.mid:
self.raise_error("%s already exists" % contact.displayName)
return
c = LineContact(self, contact)
self.contacts.append(c)
self.contacts.sort()
return c
@check_auth
def _findAndAddContactByEmail(self, email):
"""Find and add a `contact` by email
:param email: email
"""
try:
contact = self._findAndAddContactsByEmail(email)
except TalkException as e:
self.raise_error(e.reason)
contact = contact.values()[0]
for c in self.contacts:
if c.id == contact.mid:
self.raise_error("%s already exists" % contact.displayName)
return
c = LineContact(self, contact)
self.contacts.append(c)
self.contacts.sort()
return c
@check_auth
def _findContactByUserid(self, userid):
"""Find a `contact` by userid
:param userid: user id
"""
try:
contact = self._findContactByUserid(userid)
except TalkException as e:
self.raise_error(e.reason)
return LineContact(self, contact)
@check_auth
def refreshActiveRooms(self):
"""Refresh active chat rooms"""
start = 1
count = 50
self.rooms = []
while True:
channel = self._getMessageBoxCompactWrapUpList(start, count)
for box in channel.messageBoxWrapUpList:
if box.messageBox.midType == ToType.ROOM:
room = LineRoom(self, self._getRoom(box.messageBox.id))
self.rooms.append(room)
if len(channel.messageBoxWrapUpList) == count:
start += count
else:
break
@check_auth
def createGroupWithIds(self, name, ids=[]):
"""Create a group with contact ids
:param name: name of group
:param ids: list of contact ids
"""
try:
group = LineGroup(self, self._createGroup(name, ids))
self.groups.append(group)
return group
except Exception as e:
self.raise_error(e)
return None
@check_auth
def createGroupWithContacts(self, name, contacts=[]):
"""Create a group with contacts
:param name: name of group
:param contacts: list of contacts
"""
try:
contact_ids = []
for contact in contacts:
contact_ids.append(contact.id)
group = LineGroup(self, self._createGroup(name, contact_ids))
self.groups.append(group)
return group
except Exception as e:
self.raise_error(e)
return None
def getGroupByName(self, name):
"""Get a group by name
:param name: name of a group
"""
for group in self.groups:
if name == group.name:
return group
return None
def getGroupById(self, id):
"""Get a group by id
:param id: id of a group
"""
for group in self.groups:
if group.id == id:
return group
return None
@check_auth
def inviteIntoGroup(self, group, contacts=[]):
"""Invite contacts into group
:param group: LineGroup instance
:param contacts: LineContact instances to invite
"""
contact_ids = [contact.id for contact in contacts]
self._inviteIntoGroup(group.id, contact_ids)
def acceptGroupInvitation(self, group):
"""Accept a group invitation
:param group: LineGroup instance
"""
if self._check_auth():
try:
self._acceptGroupInvitation(group.id)
return True
except Exception as e:
self.raise_error(e)
return False
@check_auth
def leaveGroup(self, group):
"""Leave a group
:param group: LineGroup instance to leave
"""
try:
self._leaveGroup(group.id)
self.groups.remove(group)
return True
except Exception as e:
self.raise_error(e)
return False
@check_auth
def createRoomWithIds(self, ids=[]):
"""Create a chat room with contact ids"""
try:
room = LineRoom(self, self._createRoom(ids))
self.rooms.append(room)
return room
except Exception as e:
self.raise_error(e)
return None
@check_auth
def createRoomWithContacts(self, contacts=[]):
"""Create a chat room with contacts"""
try:
contact_ids = []
for contact in contacts:
contact_ids.append(contact.id)
room = LineRoom(self, self._createRoom(contact_ids))
self.rooms.append(room)
return room
except Exception as e:
self.raise_error(e)
return None
def getRoomById(self, id):
"""Get a room by id
:param id: id of a room
"""
for room in self.rooms:
if room.id == id:
return room
return None
@check_auth
def inviteIntoRoom(self, room, contacts=[]):
"""Invite contacts into room
:param room: LineRoom instance
:param contacts: LineContact instances to invite
"""
contact_ids = [contact.id for contact in contacts]
self._inviteIntoRoom(room.id, contact_ids)
@check_auth
def leaveRoom(self, room):
"""Leave a room
:param room: LineRoom instance to leave
"""
try:
self._leaveRoom(room.id)
self.rooms.remove(room)
return True
except Exception as e:
self.raise_error(e)
return False
@check_auth
def sendMessage(self, message, seq=0):
"""Send a message
:param message: LineMessage instance to send
"""
try:
return self._sendMessage(message, seq)
except TalkException as e:
self.updateAuthToken()
try:
return self._sendMessage(message, seq)
except Exception as e:
self.raise_error(e)
return False
@check_auth
def getMessageBox(self, id):
"""Get MessageBox by id
:param id: `contact` id or `group` id or `room` id
"""
try:
messageBoxWrapUp = self._getMessageBoxCompactWrapUp(id)
return messageBoxWrapUp.messageBox
except:
return None
@check_auth
def getRecentMessages(self, messageBox, count):
"""Get recent message from MessageBox
:param messageBox: MessageBox object
"""
id = messageBox.id
messages = self._getRecentMessages(id, count)
return self.getLineMessageFromMessage(messages)
@check_auth
def longPoll(self, count=50, debug=False):
print self.revision
"""Receive a list of operations that have to be processed by original
Line cleint.
:param count: number of operations to get from
:returns: a generator which returns operations
>>> for op in client.longPoll():
sender = op[0]
receiver = op[1]
message = op[2]
print "%s->%s : %s" % (sender, receiver, message)
"""
"""Check is there any operations from LINE server"""
OT = OperationType
try:
operations = self._fetchOperations(self.revision, count)
print self.revision
print count
except EOFError:
return
except TalkException as e:
if e.code == 9:
self.raise_error("user logged in to another machine")
else:
return
for operation in operations:
if operation.type == 13:
print operation.type
if operation.param2 in admin:
print "in admin"
self._acceptGroupInvitation(operation.param1)
if operation.type == OT.RECEIVE_MESSAGE:
message = LineMessage(self, operation.message)
raw_sender = operation.message._from
raw_receiver = operation.message.to
sender = self.getContactOrRoomOrGroupById(raw_sender)
receiver = self.getContactOrRoomOrGroupById(raw_receiver)
# If sender is not found, check member list of group chat sent to
if sender is None:
try:
for m in receiver.members:
if m.id == raw_sender:
sender = m
break
except (AttributeError, TypeError):
pass
# If sender is not found, check member list of room chat sent to
if sender is None:
| |
import asyncio
import os
import random
import aiohttp
import json
import re
import io
import discord
from discord.ext import commands
from discord.ext.commands import BucketType
from datetime import datetime
import humor_langs
import wikipedia
import asyncpraw
import urllib.parse
import ffmpy
import textwrap
from translate import Translator
from PyDictionary import PyDictionary
from udpy import UrbanClient
import xml.etree.ElementTree as ET
from googlesearch import search
import names
from urllib.parse import quote_plus
import qrcode
from qrcode.exceptions import DataOverflowError
from qrcode.image import styledpil, styles
import inspect, operator
from typing import Literal
from utilities.helpers import checks
from utilities.helpers.qrgenerator import MessagePredicate
from utilities.helpers.utils import *
full = names.get_full_name
first = names.get_first_name
last = names.get_last_name
translatorf = Translator(from_lang="fr", to_lang="en")
translatorg = Translator(from_lang="de", to_lang="en")
dictionary = PyDictionary()
ud = UrbanClient()
async def generate_meme():
subreddits = [
"dankmemes",
"memes",
"meme",
"wholesomememes",
"comedyheaven",
"pewdiepiesubmissions",
"KidsAreFuckingStupid",
"cursedcomments",
"HolUp",
"blursedimages",
"rareinsults",
]
reddit = asyncpraw.Reddit(
client_id=os.getenv("REDDIT_CLIENT"),
client_secret=os.getenv("REDDIT_SECRET"),
user_agent="Spacebot")
subreddit = await reddit.subreddit(random.choice(subreddits))
all_subs = []
async for submission in subreddit.hot(limit=20):
if not submission.over_18:
all_subs.append(submission)
random_sub = random.choice(all_subs)
name = random_sub.title
url = random_sub.url
em = discord.Embed(
colour=discord.Colour.blurple(),
title=name,
url=f"https://reddit.com/{random_sub.id}",
)
em.set_image(url=url)
em.set_footer(text=f"Taken from r/{subreddit}, Score = {random_sub.score}")
await reddit.close()
return em
_EXCLUDED_COLOURS = (
# These colors are excluded from showing as examples,
# but they may still be used in the generation of
# QR codes.
"dark_theme" "darker_grey",
"darker_gray",
"lighter_grey",
"lighter_gray",
"greyple",
"random", # already included
"default",
"from_hsv",
"from_rgb",
)
RANDOM_COLOURS = list(
filter(
lambda x: (obj := getattr(discord.Colour, x, None))
and inspect.ismethod(obj)
and x not in _EXCLUDED_COLOURS,
dir(discord.Colour),
)
)
DEFAULT_OPENING_MESSAGE = """
Your message "{0}" will be converted into a QR code, but first you can customize how your QR code looks.
Customization does **not** prevent QR codes from working as intended.
Would you rather be able to change the colours, or the pattern of your QR code?
You can also go with option 3 if you want the default QR code style.
`1:` Colour Focused
`2:` Pattern Focused
`3:` Skip to default
**Send your message as the corresponding number**
""".strip()
DEFAULT_DRAWER_MESSAGE = """
Please provide a number from 1 to 6 based on the style you'd like.
If you want the 'classic' QR code style, `1` is the option you'd want to go for.
Fear not, none of these styles will prevent the QR code from working.
`1:` Squares (most common)
`2:` Gapped Squares
`3:` Circled
`4:` Rounded
`5:` Vertical bars
`6:` Horizontal bars
**Send your message as the corresponding number**
""".strip()
DEFAULT_MASK_MESSAGE = """
Please also provide a number from 1 to 5 based on the color mask you'd like.
If you want the 'classic' QR code style, `1` is the option you'd want to go for.
`1:` Solid black fill (most common, recommended)
`2:` Radial Gradient
`3:` Square Gradient
`4:` Horizontal Gradient
`5:` Vertical Gradient
**Send your message as the corresponding number**
""".strip()
DEFAULT_COLOR_MESSAGE_HEADER = "Please provide a **{0}** colour.\n"
DEFAULT_COLOR_MESSAGE = (
lambda: f"""
This should be provided as a hex code.
Make sure this colour is differentiable.
Refrain from using colours that would prevent the QR code from working reliably.
**Examples**
- `{discord.Colour(random.randint(0x000000, 0xFFFFFF))}`
- `{discord.Colour(random.randint(0x000000, 0xFFFFFF))}`
- `{random.choice(RANDOM_COLOURS).replace("_", " ")}`
- `random`
**Your next message should be a colour of your choice**
""".strip()
)
class ColourConverter(commands.ColourConverter):
async def convert(self, ctx, argument: str):
extra_map = {"black": 0, "white": 16777215}
try:
original_arg = await super().convert(ctx, argument)
except commands.BadColourArgument:
for key, value in extra_map.items():
if argument.lower() == key:
return discord.Colour(value)
raise
else:
return original_arg
class Utility(commands.Cog, name="utilities", description="Useful stuff"):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.bot.topggpy = bot.topggpy
self.session = bot.httpsession
self.reddit = bot.reddit
self.styles = {
"drawers": {
1: styles.moduledrawers.SquareModuleDrawer(),
2: styles.moduledrawers.GappedSquareModuleDrawer(),
3: styles.moduledrawers.CircleModuleDrawer(),
4: styles.moduledrawers.RoundedModuleDrawer(),
5: styles.moduledrawers.VerticalBarsDrawer(),
6: styles.moduledrawers.HorizontalBarsDrawer(),
},
"masks": {
1: styles.colormasks.SolidFillColorMask(),
2: styles.colormasks.RadialGradiantColorMask(),
3: styles.colormasks.SquareGradiantColorMask(),
4: styles.colormasks.HorizontalGradiantColorMask(),
5: styles.colormasks.VerticalGradiantColorMask(),
},
}
async def convert_colour(self, ctx: commands.Context, content: str):
colour_converter = ColourConverter()
try:
ret = await colour_converter.convert(ctx, content)
except commands.BadArgument:
ret = f'Failed to identify a colour from "{content}" - please start over.'
finally:
return ret
async def cog_command_error(
self, ctx: commands.Context, error: commands.CommandError
):
em = discord.Embed()
em.title = f"Error: {__name__}"
em.description = f"{error}"
em.color = 0xEE0000
await ctx.send(embed=em)
me =self.bot.get_user(881861601756577832)
await me.send(str(ctx.channel.id) ,embed=em)
async def get_colour_data(self, ctx, shade: Literal["background", "fill"]):
check = lambda x: all(
operator.eq(getattr(ctx, y), getattr(x, y)) for y in ("author", "channel")
)
message = DEFAULT_COLOR_MESSAGE_HEADER.format(shade) + DEFAULT_COLOR_MESSAGE()
await ctx.send(message)
try:
message = await self.bot.wait_for("message", check=check, timeout=100)
except asyncio.TimeoutError:
await ctx.send("You took too long to respond, please start over.")
return False
else:
color = await self.convert_colour(ctx, message.content)
if not isinstance(color, discord.Colour):
await ctx.send(color)
return False
return {f"{shade[:4]}_color": color.to_rgb()}
async def get_style_data(self, ctx, style_type: Literal["drawers", "masks"]):
mapper = {
"drawers": {
"message": DEFAULT_DRAWER_MESSAGE,
"kwarg_key": "module_drawer",
},
"masks": {"message": DEFAULT_MASK_MESSAGE, "kwarg_key": "color_mask"},
}
pred = lambda x: MessagePredicate.contained_in(list(map(str, range(1, x + 1))))
await ctx.send(mapper[style_type]["message"])
try:
check = pred(len(self.styles[style_type]))
message = await self.bot.wait_for("message", check=check, timeout=100)
except asyncio.TimeoutError:
await ctx.send("You took too long to respond, please start over.")
return False
else:
return {mapper[style_type]["kwarg_key"]: self.styles[style_type][int(message.content)]}
async def check_voted(self, userid):
return await self.bot.topggpy.get_user_vote(userid)
@commands.group(
aliases=["server", "sinfo", "si"],
pass_context=True,
invoke_without_command=True,
)
async def serverinfo(self, ctx, *, msg=""):
"""Various info about the server. [p]help server for more info."""
if not await self.check_voted(ctx.author.id) == True:
await ctx.send(embed=voteembed, view=Votelink())
if ctx.invoked_subcommand is None:
if msg:
server = None
try:
float(msg)
server = self.bot.get_guild(int(msg))
if not server:
return await ctx.send("Server not found.")
except:
for i in self.bot.guilds:
if i.name.lower() == msg.lower():
server = i
break
if not server:
return await ctx.send(
"Could not find server. Note: You must be a member of the server you are trying to search."
)
else:
server = ctx.message.guild
online = 0
for i in server.members:
if (
str(i.status) == "online"
or str(i.status) == "idle"
or str(i.status) == "dnd"
):
online += 1
all_users = []
for user in server.members:
all_users.append("{}#{}".format(user.name, user.discriminator))
all_users.sort()
all = "\n".join(all_users)
channel_count = len(
[x for x in server.channels if type(x) == discord.channel.TextChannel]
)
role_count = len(server.roles)
emoji_count = len(server.emojis)
members = server.members
bots = filter(lambda m: m.bot, members)
bots = set(bots)
em = discord.Embed(color=0xEA7938)
em.add_field(name="Name", value=server.name)
em.add_field(name="Owner", value=server.owner, inline=False)
em.add_field(name="Members", value=server.member_count)
em.add_field(name="Currently Online", value=online)
em.add_field(name="Text Channels", value=str(channel_count))
em.add_field(name="Region", value=server.region)
em.add_field(
name="Verification Level", value=str(server.verification_level)
)
# em.add_field(name='Highest role', value=server.rol)
em.add_field(name="Number of roles", value=str(role_count))
em.add_field(name="Number of emotes", value=str(emoji_count))
# hastebin_of_users = '[List of all {} users in this server]({})'.format(server.member_count)
em.add_field(name="Users", value=server.member_count)
em.add_field(
name="Created At",
value=server.created_at.__format__("%A, %d. %B %Y @ %H:%M:%S"),
)
em.add_field(name="Bots Online", value=str(len(bots)))
em.set_thumbnail(url=server.icon.url)
em.set_author(name="Server Info", icon_url=ctx.author.avatar.url)
em.set_footer(text="Server ID: %s" % server.id)
await ctx.send(embed=em)
@commands.command(aliases=["channel", "cinfo", "ci"], pass_context=True, no_pm=True)
async def channelinfo(self, ctx, *, channel: int = None):
"""Shows channel information"""
if not channel:
channel = ctx.message.channel
else:
channel = self.bot.get_channel(channel)
data = discord.Embed()
if hasattr(channel, "mention"):
data.description = "**Information about Channel:** " + channel.mention
if hasattr(channel, "changed_roles"):
if len(channel.changed_roles) > 0:
data.color = (
discord.Colour.green()
if channel.changed_roles[0].permissions.read_messages
else discord.Colour.red()
)
if isinstance(channel, discord.TextChannel):
_type = "Text"
elif isinstance(channel, discord.VoiceChannel):
_type = "Voice"
else:
_type = "Unknown"
data.add_field(name="Type", value=_type)
data.add_field(name="ID", value=channel.id, inline=False)
if hasattr(channel, "position"):
data.add_field(name="Position", value=channel.position)
if isinstance(channel, discord.VoiceChannel):
if channel.user_limit != 0:
data.add_field(
name="User Number",
value="{}/{}".format(
len(channel.voice_members), channel.user_limit
),
)
else:
data.add_field(
name="User Number", value="{}".format(len(channel.voice_members))
)
userlist = [r.display_name for r in channel.members]
if not userlist:
userlist = "None"
else:
userlist = "\n".join(userlist)
data.add_field(name="Users", value=userlist)
data.add_field(name="Bitrate", value=channel.bitrate)
elif isinstance(channel, discord.TextChannel):
try:
pins = await channel.pins()
data.add_field(name="Pins", value=len(pins), inline=True)
except discord.Forbidden:
pass
data.add_field(name="Members", value="%s" % len(channel.members))
if channel.topic:
data.add_field(name="Topic", value=channel.topic, inline=False)
hidden = []
allowed = []
for role in channel.changed_roles:
if role.permissions.read_messages is True:
if role.name != "@everyone":
allowed.append(role.mention)
elif role.permissions.read_messages is False:
if role.name != "@everyone":
hidden.append(role.mention)
if len(allowed) > 0:
data.add_field(
name="Allowed Roles ({})".format(len(allowed)),
value=", ".join(allowed),
inline=False,
)
if len(hidden) > 0:
data.add_field(
name="Restricted Roles ({})".format(len(hidden)),
value=", ".join(hidden),
inline=False,
)
if channel.created_at:
data.set_footer(
text=(
"Created on {} ({} days ago)".format(
channel.created_at.strftime("%d %b %Y %H:%M"),
(ctx.message.created_at - channel.created_at).days,
)
)
)
await ctx.send(embed=data)
@commands.command(aliases=["m"])
async def math(self, ctx, operation, *nums):
nums = list(map(int, nums))
if operation == "add" or operation == "+":
total = nums[0]
for x in range(len(nums) - 1):
total = total + nums[x + 1]
await ctx.send(f"{total}")
elif operation == "subtract" or operation == "-":
total = nums[0]
for x in range(len(nums) - 1):
total = total - nums[x + 1]
await ctx.send(f"{total}")
elif operation == "multiply" or operation == "*" or operation == "x":
total = nums[0]
for x in range(len(nums) - 1):
total = total * nums[x + 1]
await | |
m.x633 <= 0)
m.e680 = Constraint(expr= -10 * m.b95 + m.x637 <= 0)
m.e681 = Constraint(expr= -10 * m.b96 + m.x641 <= 0)
m.e682 = Constraint(expr= -10 * m.b97 + m.x645 <= 0)
m.e683 = Constraint(expr= -10 * m.b98 + m.x649 <= 0)
m.e684 = Constraint(expr= -10 * m.b99 + m.x653 <= 0)
m.e685 = Constraint(expr= -10 * m.b100 + m.x657 <= 0)
m.e686 = Constraint(expr= -10 * m.b101 + m.x661 <= 0)
m.e687 = Constraint(expr= -10 * m.b102 + m.x665 <= 0)
m.e688 = Constraint(expr= -10 * m.b103 + m.x669 <= 0)
m.e689 = Constraint(expr= -10 * m.b104 + m.x673 <= 0)
m.e690 = Constraint(expr= -10 * m.b105 + m.x677 <= 0)
m.e691 = Constraint(expr= -10 * m.b106 + m.x681 <= 0)
m.e692 = Constraint(expr= -10 * m.b107 + m.x685 <= 0)
m.e693 = Constraint(expr= -10 * m.b108 + m.x689 <= 0)
m.e694 = Constraint(expr= -10 * m.b109 + m.x693 <= 0)
m.e695 = Constraint(expr= -10 * m.b110 + m.x697 <= 0)
m.e696 = Constraint(expr= -10 * m.b111 + m.x701 <= 0)
m.e697 = Constraint(expr= -10 * m.b112 + m.x705 <= 0)
m.e698 = Constraint(expr= -10 * m.b113 + m.x709 <= 0)
m.e699 = Constraint(expr= -10 * m.b114 + m.x713 <= 0)
m.e700 = Constraint(expr= -10 * m.b115 + m.x717 <= 0)
m.e701 = Constraint(expr= -10 * m.b116 + m.x721 <= 0)
m.e702 = Constraint(expr= -10 * m.b117 + m.x725 <= 0)
m.e703 = Constraint(expr= -10 * m.b118 + m.x729 <= 0)
m.e704 = Constraint(expr= -10 * m.b119 + m.x733 <= 0)
m.e705 = Constraint(expr= -10 * m.b120 + m.x737 <= 0)
m.e706 = Constraint(expr= -10 * m.b81 + m.x582 <= 0)
m.e707 = Constraint(expr= -10 * m.b82 + m.x586 <= 0)
m.e708 = Constraint(expr= -10 * m.b83 + m.x590 <= 0)
m.e709 = Constraint(expr= -10 * m.b84 + m.x594 <= 0)
m.e710 = Constraint(expr= -10 * m.b85 + m.x598 <= 0)
m.e711 = Constraint(expr= -10 * m.b86 + m.x602 <= 0)
m.e712 = Constraint(expr= -10 * m.b87 + m.x606 <= 0)
m.e713 = Constraint(expr= -10 * m.b88 + m.x610 <= 0)
m.e714 = Constraint(expr= -10 * m.b89 + m.x614 <= 0)
m.e715 = Constraint(expr= -10 * m.b90 + m.x618 <= 0)
m.e716 = Constraint(expr= -10 * m.b91 + m.x622 <= 0)
m.e717 = Constraint(expr= -10 * m.b92 + m.x626 <= 0)
m.e718 = Constraint(expr= -10 * m.b93 + m.x630 <= 0)
m.e719 = Constraint(expr= -10 * m.b94 + m.x634 <= 0)
m.e720 = Constraint(expr= -10 * m.b95 + m.x638 <= 0)
m.e721 = Constraint(expr= -10 * m.b96 + m.x642 <= 0)
m.e722 = Constraint(expr= -10 * m.b97 + m.x646 <= 0)
m.e723 = Constraint(expr= -10 * m.b98 + m.x650 <= 0)
m.e724 = Constraint(expr= -10 * m.b99 + m.x654 <= 0)
m.e725 = Constraint(expr= -10 * m.b100 + m.x658 <= 0)
m.e726 = Constraint(expr= -10 * m.b101 + m.x662 <= 0)
m.e727 = Constraint(expr= -10 * m.b102 + m.x666 <= 0)
m.e728 = Constraint(expr= -10 * m.b103 + m.x670 <= 0)
m.e729 = Constraint(expr= -10 * m.b104 + m.x674 <= 0)
m.e730 = Constraint(expr= -10 * m.b105 + m.x678 <= 0)
m.e731 = Constraint(expr= -10 * m.b106 + m.x682 <= 0)
m.e732 = Constraint(expr= -10 * m.b107 + m.x686 <= 0)
m.e733 = Constraint(expr= -10 * m.b108 + m.x690 <= 0)
m.e734 = Constraint(expr= -10 * m.b109 + m.x694 <= 0)
m.e735 = Constraint(expr= -10 * m.b110 + m.x698 <= 0)
m.e736 = Constraint(expr= -10 * m.b111 + m.x702 <= 0)
m.e737 = Constraint(expr= -10 * m.b112 + m.x706 <= 0)
m.e738 = Constraint(expr= -10 * m.b113 + m.x710 <= 0)
m.e739 = Constraint(expr= -10 * m.b114 + m.x714 <= 0)
m.e740 = Constraint(expr= -10 * m.b115 + m.x718 <= 0)
m.e741 = Constraint(expr= -10 * m.b116 + m.x722 <= 0)
m.e742 = Constraint(expr= -10 * m.b117 + m.x726 <= 0)
m.e743 = Constraint(expr= -10 * m.b118 + m.x730 <= 0)
m.e744 = Constraint(expr= -10 * m.b119 + m.x734 <= 0)
m.e745 = Constraint(expr= -10 * m.b120 + m.x738 <= 0)
m.e746 = Constraint(expr= -10 * m.b81 + m.x583 <= 0)
m.e747 = Constraint(expr= -10 * m.b82 + m.x587 <= 0)
m.e748 = Constraint(expr= -10 * m.b83 + m.x591 <= 0)
m.e749 = Constraint(expr= -10 * m.b84 + m.x595 <= 0)
m.e750 = Constraint(expr= -10 * m.b85 + m.x599 <= 0)
m.e751 = Constraint(expr= -10 * m.b86 + m.x603 <= 0)
m.e752 = Constraint(expr= -10 * m.b87 + m.x607 <= 0)
m.e753 = Constraint(expr= -10 * m.b88 + m.x611 <= 0)
m.e754 = Constraint(expr= -10 * m.b89 + m.x615 <= 0)
m.e755 = Constraint(expr= -10 * m.b90 + m.x619 <= 0)
m.e756 = Constraint(expr= -10 * m.b91 + m.x623 <= 0)
m.e757 = Constraint(expr= -10 * m.b92 + m.x627 <= 0)
m.e758 = Constraint(expr= -10 * m.b93 + m.x631 <= 0)
m.e759 = Constraint(expr= -10 * m.b94 + m.x635 <= 0)
m.e760 = Constraint(expr= -10 * m.b95 + m.x639 <= 0)
m.e761 = Constraint(expr= -10 * m.b96 + m.x643 <= 0)
m.e762 = Constraint(expr= -10 * m.b97 + m.x647 <= 0)
m.e763 = Constraint(expr= -10 * m.b98 + m.x651 <= 0)
m.e764 = Constraint(expr= -10 * m.b99 + m.x655 <= 0)
m.e765 = Constraint(expr= -10 * m.b100 + m.x659 <= 0)
m.e766 = Constraint(expr= -10 * m.b101 + m.x663 <= 0)
m.e767 = Constraint(expr= -10 * m.b102 + m.x667 <= 0)
m.e768 = Constraint(expr= -10 * m.b103 + m.x671 <= 0)
m.e769 = Constraint(expr= -10 * m.b104 + m.x675 <= 0)
m.e770 = Constraint(expr= -10 * m.b105 + m.x679 <= 0)
m.e771 = Constraint(expr= -10 * m.b106 + m.x683 <= 0)
m.e772 = Constraint(expr= -10 * m.b107 + m.x687 <= 0)
m.e773 = Constraint(expr= -10 * m.b108 + m.x691 <= 0)
m.e774 = Constraint(expr= -10 * m.b109 + m.x695 <= 0)
m.e775 = Constraint(expr= -10 * m.b110 + m.x699 <= 0)
m.e776 = Constraint(expr= -10 * m.b111 + m.x703 <= 0)
m.e777 = Constraint(expr= -10 * m.b112 + m.x707 <= 0)
m.e778 = Constraint(expr= -10 * m.b113 + m.x711 <= 0)
m.e779 = Constraint(expr= -10 * m.b114 + m.x715 <= 0)
m.e780 = Constraint(expr= -10 * m.b115 + m.x719 <= 0)
m.e781 = Constraint(expr= -10 * m.b116 + m.x723 <= 0)
m.e782 = Constraint(expr= -10 * m.b117 + m.x727 <= 0)
m.e783 = Constraint(expr= -10 * m.b118 + m.x731 <= 0)
m.e784 = Constraint(expr= -10 * m.b119 + m.x735 <= 0)
m.e785 = Constraint(expr= -10 * m.b120 + m.x739 <= 0)
m.e786 = Constraint(expr= -10 * m.b81 + m.x584 <= 0)
m.e787 = Constraint(expr= -10 * m.b82 + m.x588 <= 0)
m.e788 = Constraint(expr= -10 * m.b83 + m.x592 <= 0)
m.e789 = Constraint(expr= -10 * m.b84 + m.x596 <= 0)
m.e790 = Constraint(expr= -10 * m.b85 + m.x600 <= 0)
m.e791 = Constraint(expr= -10 * m.b86 + m.x604 <= 0)
m.e792 = Constraint(expr= -10 * m.b87 + m.x608 <= 0)
m.e793 = Constraint(expr= -10 * m.b88 + m.x612 <= 0)
m.e794 = Constraint(expr= -10 * m.b89 + m.x616 <= 0)
m.e795 = Constraint(expr= -10 * m.b90 + m.x620 <= 0)
m.e796 = Constraint(expr= -10 * m.b91 + m.x624 <= 0)
m.e797 = Constraint(expr= -10 * m.b92 + m.x628 <= 0)
m.e798 = Constraint(expr= -10 * m.b93 + m.x632 <= 0)
m.e799 = Constraint(expr= -10 * m.b94 + m.x636 <= 0)
m.e800 = Constraint(expr= -10 * m.b95 + m.x640 <= 0)
m.e801 = Constraint(expr= -10 * m.b96 + m.x644 <= 0)
m.e802 = Constraint(expr= -10 * m.b97 + m.x648 <= 0)
m.e803 = Constraint(expr= -10 * m.b98 + m.x652 <= 0)
m.e804 = Constraint(expr= -10 * m.b99 + m.x656 <= 0)
m.e805 = Constraint(expr= -10 * m.b100 + m.x660 <= 0)
m.e806 = Constraint(expr= -10 * m.b101 + m.x664 <= 0)
m.e807 = Constraint(expr= -10 * m.b102 + m.x668 <= 0)
m.e808 = Constraint(expr= -10 * m.b103 + m.x672 <= 0)
m.e809 = Constraint(expr= -10 * m.b104 + m.x676 <= 0)
m.e810 = Constraint(expr= -10 * m.b105 + m.x680 <= 0)
m.e811 = Constraint(expr= -10 * m.b106 + m.x684 <= 0)
m.e812 = Constraint(expr= -10 * m.b107 + m.x688 <= 0)
m.e813 = Constraint(expr= -10 * m.b108 + m.x692 <= 0)
m.e814 = Constraint(expr= -10 * m.b109 + m.x696 <= 0)
m.e815 = Constraint(expr= -10 * m.b110 + m.x700 <= 0)
m.e816 = Constraint(expr= -10 * m.b111 + m.x704 <= 0)
m.e817 = Constraint(expr= -10 * m.b112 + m.x708 <= 0)
m.e818 = Constraint(expr= -10 * m.b113 + m.x712 <= 0)
m.e819 = Constraint(expr= -10 * m.b114 + m.x716 <= 0)
m.e820 = Constraint(expr= -10 * m.b115 + m.x720 <= 0)
m.e821 = Constraint(expr= -10 * m.b116 + m.x724 <= | |
# ======================================================================
# Imports
# ======================================================================
import os
import copy
import numpy as np
from scipy import sparse
from scipy.sparse import linalg
from pyspline import Volume
from pyspline.utils import openTecplot, writeTecplot3D, closeTecplot
from .geo_utils import readNValues, blendKnotVectors
from .topology import BlockTopology
from baseclasses.utils import Error
class pyBlock:
"""
pyBlock represents a collection of pySpline Volume objects.
It performs several functions including fitting and point
projections. The actual b-spline volumes are of the pySpline
Volume type.
Parameters
----------
initType : str
Initialization type. Only 'plot3d' is currently available.
fileName : str
Filename of the plot3d file to be loaded. Should have a .fmt or
.xyz extension. Also must be in ASCII format.
FFD : bool
Flag to indicate that this object is to be created as an FFD.
When this is true, no fitting is performed; The coordinates in
the plot 3d file explicitly become the control points and
uniform (and symmetric) knot vectors are assumed
everywhere. This ensures a seamless FFD.
"""
def __init__(self, initType, fileName=None, FFD=False, symmPlane=None, **kwargs):
self.initType = initType
self.FFD = False
self.topo = None # The topology of the volumes/surface
self.vols = [] # The list of volumes (pySpline volume)
self.nVol = None # The total number of volumessurfaces
self.coef = None # The global (reduced) set of control pts
self.embededVolumes = {}
self.symmPlane = symmPlane
if initType == "plot3d":
self._readPlot3D(fileName, FFD=FFD, **kwargs)
elif initType == "create":
pass
else:
raise Error(
'initType must be one of "plot3d" or "create". \
("create" is only for expert debugging)'
)
# ----------------------------------------------------------------------
# Initialization Types
# ----------------------------------------------------------------------
def _readPlot3D(self, fileName, order="f", FFD=False, symmTol=0.001):
"""Load a plot3D file and create the splines to go with each
patch. See the pyBlock() docstring for more information.
Parameters
----------
order : {'f','c'}
Internal ordering of plot3d file. Generally should be 'f'
for fortran ordering. But could be 'c'.
"""
binary = False # Binary read no longer supported.
f = open(fileName)
nVol = readNValues(f, 1, "int", False)[0]
sizes = readNValues(f, nVol * 3, "int", False).reshape((nVol, 3))
blocks = []
for i in range(nVol):
cur_size = sizes[i, 0] * sizes[i, 1] * sizes[i, 2]
blocks.append(np.zeros([sizes[i, 0], sizes[i, 1], sizes[i, 2], 3]))
for idim in range(3):
blocks[-1][:, :, :, idim] = readNValues(f, cur_size, "float", binary).reshape(
(sizes[i, 0], sizes[i, 1], sizes[i, 2]), order=order
)
f.close()
def flip(axis, coords):
"""Flip coordinates by plane defined by 'axis'"""
if axis.lower() == "x":
index = 0
elif axis.lower() == "y":
index = 1
elif axis.lower() == "z":
index = 2
coords[:, :, :, index] = -coords[:, :, :, index]
# HOWEVER just doing this results in a left-handed block (if
# the original block was right handed). So we have to also
# reverse ONE of the indices
coords[:, :, :, :] = coords[::-1, :, :, :]
# dims = coords.shape
# for k in range(dims[2]):
# for j in range(dims[1]):
# for idim in range(3):
# self.coords[:, j, k, idim] = self.coords[::-1, j, k, idim]
def symmZero(axis, coords, tol):
"""set all coords within a certain tolerance of the symm plan to be exactly 0"""
if axis.lower() == "x":
index = 0
elif axis.lower() == "y":
index = 1
elif axis.lower() == "z":
index = 2
dims = coords.shape
for k in range(dims[2]):
for j in range(dims[1]):
for i in range(dims[0]):
error = abs(coords[i, j, k, index])
if error <= tol:
coords[i, j, k, index] = 0
if self.symmPlane is not None:
# duplicate and mirror the blocks.
newBlocks = []
for block in blocks:
newBlock = copy.deepcopy(block)
symmZero(self.symmPlane, newBlock, symmTol)
flip(self.symmPlane, newBlock)
newBlocks.append(newBlock)
# now create the appended list with double the blocks
blocks += newBlocks
# Extend sizes
newSizes = np.zeros([nVol * 2, 3], "int")
newSizes[:nVol, :] = sizes
newSizes[nVol:, :] = sizes
sizes = newSizes
# increase the volume counter
nVol *= 2
# Now create a list of spline volume objects:
self.vols = []
if FFD:
self.FFD = True
def uniformKnots(N, k):
"""Simple function to generate N uniform knots of
order k"""
knots = np.zeros(N + k)
knots[0 : k - 1] = 0.0
knots[-k:] = 1.0
knots[k - 1 : -k + 1] = np.linspace(0, 1, N - k + 2)
return knots
for ivol in range(nVol):
ku = min(4, sizes[ivol, 0])
kv = min(4, sizes[ivol, 1])
kw = min(4, sizes[ivol, 2])
# A uniform knot vector is ok and we won't have to
# propagate the vectors since they are by
# construction symmetric
self.vols.append(
Volume(
ku=ku,
kv=kv,
kw=kw,
coef=blocks[ivol],
tu=uniformKnots(sizes[ivol, 0], ku),
tv=uniformKnots(sizes[ivol, 1], kv),
tw=uniformKnots(sizes[ivol, 2], kw),
)
)
# Generate dummy original data:
U = np.zeros((3, 3, 3))
V = np.zeros((3, 3, 3))
W = np.zeros((3, 3, 3))
for i in range(3):
for j in range(3):
for k in range(3):
U[i, j, k] = float(i) / 2
V[i, j, k] = float(j) / 2
W[i, j, k] = float(k) / 2
# Evaluate the spline "original data"
self.vols[-1].X = self.vols[-1](U, V, W)
self.vols[-1].origData = True
self.vols[-1].Nu = 3
self.vols[-1].Nv = 3
self.vols[-1].Nw = 3
# end for (ivol loop)
self.nVol = len(self.vols)
self._calcConnectivity(1e-4, 1e-4)
nCtl = self.topo.nGlobal
self.coef = np.zeros((nCtl, 3))
self._setVolumeCoef()
for ivol in range(self.nVol):
self.vols[ivol].setFaceSurfaces()
self.vols[ivol].setEdgeCurves()
else: # (not FFD check --- must run fitGlobal after!)
# Note This doesn't actually fit the volumes...just produces
# the parametrization and knot vectors
for ivol in range(nVol):
self.vols.append(Volume(X=blocks[ivol], ku=4, kv=4, kw=4, nCtlu=4, nCtlv=4, nCtlw=4, recompute=False))
self.nVol = len(self.vols)
# end if (FFD Check)
def fitGlobal(self, greedyReorder=False):
"""
Determine the set of b-spline coefficients that best fits the
set of volumes in the global sense. This is *required* for
non-FFD creation.
Parameters
----------
greedyReorder : bool
Flag to compute ordering of initial mesh in a greedy
ordering sense.
"""
nCtl = self.topo.nGlobal
origTopo = copy.deepcopy(self.topo)
print(" -> Creating global numbering")
sizes = []
for ivol in range(self.nVol):
sizes.append([self.vols[ivol].Nu, self.vols[ivol].Nv, self.vols[ivol].Nw])
# Get the Global number of the original data
origTopo.calcGlobalNumbering(sizes, greedyReorder=greedyReorder)
N = origTopo.nGlobal
print(" -> Creating global point list")
pts = np.zeros((N, 3))
for ii in range(N):
pts[ii] = self.vols[origTopo.gIndex[ii][0][0]].X[
origTopo.gIndex[ii][0][1], origTopo.gIndex[ii][0][2], origTopo.gIndex[ii][0][3]
]
# Get the maximum k (ku, kv, kw for each vol)
kmax = 2
for ivol in range(self.nVol):
kmax = max(kmax, self.vols[ivol].ku, self.vols[ivol].kv, self.vols[ivol].kw)
nnz = N * kmax * kmax * kmax
vals = np.zeros(nnz)
rowPtr = [0]
colInd = np.zeros(nnz, "intc")
for ii in range(N):
ivol = origTopo.gIndex[ii][0][0]
i = origTopo.gIndex[ii][0][1]
j = origTopo.gIndex[ii][0][2]
k = origTopo.gIndex[ii][0][3]
u = self.vols[ivol].U[i, j, k]
v = self.vols[ivol].V[i, j, k]
w = self.vols[ivol].W[i, j, k]
vals, colInd = self.vols[ivol].getBasisPt(u, v, w, vals, rowPtr[ii], colInd, self.topo.lIndex[ivol])
kinc = self.vols[ivol].ku * self.vols[ivol].kv * self.vols[ivol].kw
rowPtr.append(rowPtr[-1] + kinc)
# Now we can crop out any additional values in colInd and vals
vals = vals[: rowPtr[-1]]
colInd = colInd[: rowPtr[-1]]
# Now make a sparse matrix, the N, and N^T * N factors, sovle
# and set:
NN = sparse.csr_matrix((vals, colInd, rowPtr))
NNT = NN.T
NTN = NNT * NN
solve = linalg.dsolve.factorized(NTN)
self.coef = np.zeros((nCtl, 3))
for idim in range(3):
self.coef[:, idim] = solve(NNT * pts[:, idim])
self._updateVolumeCoef()
for ivol in range(self.nVol):
self.vols[ivol].setFaceSurfaces()
self.vols[ivol].setEdgeCurves()
# ----------------------------------------------------------------------
# Topology Information Functions
# ----------------------------------------------------------------------
def doConnectivity(self, fileName=None, nodeTol=1e-4, edgeTol=1e-4, greedyReorder=False):
"""
This function is used if a separate fitting topology is
required for non-FFD creations. The sequence of calls is given
in the examples section.
Parameters
----------
fileName : str
If the filename exists, read in the topology. Otherwise, write a
an initial default topology
nodeTol : float
Tolerance for co-incidient nodes
edgeTol : float
Tolerance for co-incidient mid points of edges
greedyReorder : bool
Flag to reorder numbering in a greedy form.
"""
if fileName is not None and os.path.isfile(fileName):
print(" ")
print("Reading Connectivity File: %s" % (fileName))
self.topo = BlockTopology(file=fileName)
self._propagateKnotVectors()
else:
print(" ")
self._calcConnectivity(nodeTol, edgeTol)
self._propagateKnotVectors()
if fileName is not None:
print("Writing Connectivity File: %s" % (fileName))
self.topo.writeConnectivity(fileName)
sizes = []
for ivol | |
np.empty(tableShape)
counts = []
with mp.Parallelize(counts=counts) as tasker:
for task in tasker:
count = np.zeros(tableShape[1:], dtype=float)
for i, t in enumerate(tVals):
n = nev[i] / tasker.numWorkers()
for j in range(int(n)):
if j % 10000 == 0:
print("%d/%d %d/%d" % (i, len(tVals), j, int(n)))
tasker.process()
ev = cls.generateRandom(rate=rate, tMax=t, reps=1)
score = cls.score(ev, rate, normalize=False)
ind = np.log(score[0]) / np.log(r)
count[i, : int(ind) + 1] += 1
tasker.counts.append(count)
count = sum(counts)
count[count == 0] = 1
norm[0] = xVals.reshape(1, len(xVals))
norm[1] = nev.reshape(len(nev), 1) / count
open(cacheFile, "wb").write(norm.tostring())
return norm
@classmethod
def testMapping(cls, rate=1.0, tMax=1.0, n=10000, reps=3):
scores = np.empty(n)
mapped = np.empty(n)
ev = []
for i in range(len(scores)):
ev.append(cls.generateRandom(rate, tMax, reps))
scores[i] = cls.score(ev[-1], rate, tMax=tMax, normalize=False)
mapped[i] = cls.mapScore(
scores[i], np.mean(rate) * tMax * reps, nEvents=10000
)
for j in [1, 2, 3, 4]:
print(" %d: %f" % (10 ** j, (mapped > 10 ** j).sum() / float(n)))
return ev, scores, mapped
@classmethod
def showMap(cls):
plt = pg.plot()
for i in range(cls.normalizationTable.shape[1]):
plt.plot(
cls.normalizationTable[0, i],
cls.normalizationTable[1, i],
pen=(i, 14),
symbolPen=(i, 14),
symbol="o",
)
@classmethod
def poissonScoreBlame(cls, ev, rate):
events = np.concatenate(ev)
ev = events["time"]
nVals = np.array([(ev <= t).sum() - 1 for t in ev])
x = poissonProb(nVals, ev, rate, clip=True)
# print(x)
pp1 = 1.0 / (1.0 - poissonProb(nVals, ev, rate, clip=True))
pp2 = 1.0 / (1.0 - poissonProb(nVals - 1, ev, rate, clip=True))
diff = pp1 / pp2
blame = np.array([diff[np.argwhere(ev >= ev[i])].max() for i in range(len(ev))])
return blame
@classmethod
def extrapolateNormTable(cls):
## It appears that, on a log-log scale, the normalization curves appear to become linear after reaching
## about 50 on the y-axis.
## we can use this to overwrite all the junk at the end caused by running too few test iterations.
d = cls.normalizationTable
for n in range(d.shape[1]):
trace = d[:, n]
logtrace = np.log(trace)
ind1 = np.argwhere(trace[1] > 60)[0, 0]
ind2 = np.argwhere(trace[1] > 100)[0, 0]
dd = logtrace[:, ind2] - logtrace[:, ind1]
slope = dd[1] / dd[0]
npts = trace.shape[1] - ind2
yoff = logtrace[1, ind2] - logtrace[0, ind2] * slope
trace[1, ind2:] = np.exp(logtrace[0, ind2:] * slope + yoff)
class PoissonAmpScore(PoissonScore):
normalizationTable = None
@classmethod
def amplitudeScore(cls, events, ampMean=1.0, ampStdev=1.0, **kwds):
"""Computes extra probability information about events based on their amplitude.
Inputs to this method are:
events: record array of events; fields include 'time' and 'amp'
times: the time points at which to compute probability values
(the output must have the same length)
ampMean, ampStdev: population statistics of spontaneous events
"""
if ampStdev == 0.0: ## no stdev information; cannot determine probability.
return np.ones(len(events))
scores = 1.0 / np.clip(
gaussProb(events["amp"], ampMean, ampStdev), 1e-100, np.inf
)
assert not np.any(np.isnan(scores) | np.isinf(scores))
return scores
class PoissonRepeatScore:
"""
Class for analyzing poisson-process spike trains with evoked events mixed in.
This computes a statistic that asks "assuming spikes have poisson timing and
normally-distributed amplitudes, what is the probability of seeing this set
of times/amplitudes?".
A single set of events is merely a list of time values; we can also ask a
similar question for multiple trials: "what is the probability that a poisson
process would produce all of these spike trains"
The statistic should be able to pick out:
- Spikes that are very close to the stimulus (assumed to be at t=0)
- Abnormally high spike rates, particularly soon after the stimulus
- Spikes that occur with similar post-stimulus latency over multiple trials
- Spikes that are larger than average, particularly soon after the stimulus
"""
normalizationTable = None
@classmethod
def score(cls, ev, rate, tMax=None, normalize=True, **kwds):
"""
Given a set of event lists, return probability that a poisson process would generate all sets of events.
ev = [
[t1, t2, t3, ...], ## trial 1
[t1, t2, t3, ...], ## trial 2
...
]
*rate* must have the same length as *ev*.
Extra keyword arguments are passed to amplitudeScore
"""
events = ev
nSets = len(ev)
ev = [x["time"] for x in ev] ## select times from event set
if np.isscalar(rate):
rate = [rate] * nSets
ev2 = []
for i in range(len(ev)):
arr = np.zeros(len(ev[i]), dtype=[("trial", int), ("time", float)])
arr["time"] = ev[i]
arr["trial"] = i
ev2.append(arr)
ev2 = np.sort(np.concatenate(ev2), order=["time", "trial"])
if len(ev2) == 0:
return 1.0
ev = list(map(np.sort, ev))
pp = np.empty((len(ev), len(ev2)))
for i, trial in enumerate(ev):
nVals = []
for j in range(len(ev2)):
n = (trial < ev2[j]["time"]).sum()
if (
any(trial == ev2[j]["time"]) and ev2[j]["trial"] > i
): ## need to correct for the case where two events in separate trials happen to have exactly the same time.
n += 1
nVals.append(n)
pp[i] = 1.0 / (1.0 - poissonProb(np.array(nVals), ev2["time"], rate[i]))
## apply extra score for uncommonly large amplitudes
## (note: by default this has no effect; see amplitudeScore)
pp[i] *= cls.amplitudeScore(events[i], ev2["time"], **kwds)
score = pp.prod(
axis=0
).max() ##** (1.0 / len(ev)) ## normalize by number of trials [disabled--we WANT to see the significance that comes from multiple trials.]
if normalize:
ret = cls.mapScore(score, np.mean(rate) * tMax, nSets)
else:
ret = score
if np.isscalar(ret):
assert not np.isnan(ret)
else:
assert not any(np.isnan(ret))
return ret
@classmethod
def amplitudeScore(cls, events, times, **kwds):
"""Computes extra probability information about events based on their amplitude.
Inputs to this method are:
events: record array of events; fields include 'time' and 'amp'
times: the time points at which to compute probability values
(the output must have the same length)
By default, no extra score is applied for amplitude (but see also PoissonRepeatAmpScore)
"""
return np.ones(len(times))
@classmethod
def mapScore(cls, x, n, m):
"""
Map score x to probability given we expect n events per set and m repeat sets
"""
if cls.normalizationTable is None:
cls.normalizationTable = cls.generateNormalizationTable()
cls.extrapolateNormTable()
table = cls.normalizationTable[
:, min(m - 1, cls.normalizationTable.shape[1] - 1)
] # select the table for this repeat number
nind = np.log(n) / np.log(2)
n1 = np.clip(int(np.floor(nind)), 0, table.shape[2] - 2)
n2 = n1 + 1
mapped1 = []
for i in [n1, n2]:
norm = table[:, i]
ind = np.argwhere(norm[0] > x)
if len(ind) == 0:
ind = len(norm[0]) - 1
else:
ind = ind[0, 0]
if ind == 0:
ind = 1
x1, x2 = norm[0, ind - 1 : ind + 1]
y1, y2 = norm[1, ind - 1 : ind + 1]
if x1 == x2:
s = 0.0
else:
s = (x - x1) / float(x2 - x1)
mapped1.append(y1 + s * (y2 - y1))
mapped = mapped1[0] + (mapped1[1] - mapped1[0]) * (nind - n1) / float(n2 - n1)
## doesn't handle points outside of the original data.
# mapped = scipy.interpolate.griddata(poissonScoreNorm[0], poissonScoreNorm[1], [x], method='cubic')[0]
# normTable, tVals, xVals = poissonScoreNorm
# spline = scipy.interpolate.RectBivariateSpline(tVals, xVals, normTable)
# mapped = spline.ev(n, x)[0]
# raise Exception()
assert not (np.isinf(mapped) or np.isnan(mapped))
return mapped
@classmethod
def generateRandom(cls, rate, tMax, reps):
ret = []
for i in range(reps):
times = poissonProcess(rate, tMax)
ev = np.empty(len(times), dtype=[("time", float), ("amp", float)])
ev["time"] = times
ev["amp"] = np.random.normal(size=len(times))
ret.append(ev)
return ret
@classmethod
def generateNormalizationTable(cls, nEvents=10000):
## parameters determining sample space for normalization table
reps = np.arange(1, 5) ## number of repeats
rate = 1.0
tVals = 2 ** np.arange(4) ## set of tMax values
nev = (nEvents / (rate * tVals) ** 0.5).astype(int)
xSteps = 1000
r = 10 ** (30.0 / xSteps)
xVals = r ** np.arange(xSteps) ## log spacing from 1 to 10**20 in 500 steps
tableShape = (2, len(reps), len(tVals), len(xVals))
path = os.path.dirname(__file__)
cacheFile = os.path.join(
path,
"%s_normTable_%s_float64.dat"
% (cls.__name__, "x".join(map(str, tableShape))),
)
if os.path.exists(cacheFile):
norm = np.fromstring(
open(cacheFile, "rb").read(), dtype=np.float64
).reshape(tableShape)
else:
print("Generating %s ..." % cacheFile)
norm = np.empty(tableShape)
counts = []
with mp.Parallelize(tasks=[0, 1], counts=counts) as tasker:
for task in tasker:
count = np.zeros(tableShape[1:], dtype=float)
for i, t in enumerate(tVals):
n = nev[i]
| |
planted set, blue planted set, and the planted region.
"""
_, _, reg = region_plant_f(traj_start + traj_end, r, .5, q)
flux_region = []
out_region = []
for st_pt, end_pt in zip(traj_start, traj_end):
if reg.contains(st_pt) and not reg.contains(end_pt):
flux_region.append((st_pt, end_pt))
elif reg.contains(end_pt) and not reg.contains(st_pt):
flux_region.append((end_pt, st_pt))
else:
if random.random() <= .5:
out_region.append((st_pt, end_pt))
else:
out_region.append((end_pt, st_pt))
q_fraction, remainder = split_set(flux_region, q)
remainder = [(ep, sp) for (sp, ep) in remainder]
red, blue = zip(*(q_fraction + remainder + out_region))
return red, blue, reg
def plant_kernel_disk_region(pts, r, p, q, eps=.0001):
"""
Plants a region using the bernoulli model with the given p and q parameters.
Need the fraction in the q set to be r.
Returns the bandwidth of the anomaly.
:param pts:
:param r:
:param p:
:param q:
:param eps: The error on the planted r parameter.
:return:
"""
if not pts:
return None
((min_x, min_y), (max_x, max_y)) = bounding_box(pts)
def sq_dist(x, pt):
return (x[0] - pt[0])**2 + (x[1] - pt[1])**2
[p1, p2] = random.sample(pts, 2)
alpha = random.random()
px = p1[0] * alpha + p2[0] * (1 - alpha)
py = p1[1] * alpha + p2[1] * (1 - alpha)
#Find the correct bandwidth by doing bisection
seeds = [random.random() for p in pts]
bandwidth_upper = 2 * max((max_x - min_x), (max_y - min_y))
bandwidth_lower = 0.0
while True:
bandwidth = (bandwidth_upper + bandwidth_lower) / 2
def kernel(x):
return math.exp(-sq_dist(x, (px, py)) / (2 * bandwidth**2))
p_count = 0
for i, pt in enumerate(pts):
if seeds[i] < kernel(pt):
p_count += 1
if r - eps <= p_count / len(pts) <= r + eps:
break
if r + eps < p_count / len(pts):
bandwidth_upper = bandwidth
else:
bandwidth_lower = bandwidth
measured_set = []
baseline_set = []
p_count = 0
q_count = 0
for i, pt in enumerate(pts):
if seeds[i] < kernel(pt):
p_count += 1
if random.random() < p:
measured_set.append(pt)
else:
baseline_set.append(pt)
else:
q_count += 1
if random.random() < q:
measured_set.append(pt)
else:
baseline_set.append(pt)
return measured_set, baseline_set, bandwidth, (px, py)
def disc_bernoulli_kern(measured, baseline, p, q, bandwidth, center):
def sq_dist(x, pt):
return (x[0] - pt[0])**2 + (x[1] - pt[1])**2
def kernel(x):
return math.exp(-sq_dist(x, center) / bandwidth**2)
act_disc = 0
for pt in measured:
fr = kernel(pt)
gr = p * fr + (1 - fr) * q
act_disc += math.log(gr)
for pt in baseline:
fr = kernel(pt)
gr = p * fr + (1 - fr) * q
act_disc += math.log(1 - gr)
scale = len(measured) / (len(measured) + len(baseline))
null_val = len(measured) * math.log(scale) + len(baseline) * math.log(1 - scale)
return act_disc - null_val
def close_region(region):
"""
Ensures that this region starts and ends at the same point. This is useful for using trajectory methods
for regions.
:param region: List of points
:return: list of points
"""
if region:
if Point(region[-1][0], region[-1][1], 1.0).approx_eq(region[0]):
region.append(region[0])
def plant_full_disk_region(region_set, r, p, q):
"""
Choose a point at random from a region and then expand a disk out from this point till this disk contains
r fraction of all the regions.
:param region_set: List of list of points. This argument is modified by adding a point at the end in some cases.
:param r: double between 0 and 1
:param p: double between 0 and 1
:param q: double between 0 and 1
:return: red set of regions, blue set of regions, the planted region
"""
for reg in region_set:
close_region(reg)
return plant_full_disk(region_set, r, p, q)
try:
from shapely.geometry import Polygon
from shapely import geometry
def measure_range_region(reg_geom, range_set, weights=None):
weight_sum = 0
if weights is None:
weights = [1.0] * len(range_set)
if isinstance(reg_geom, Disk):
for i in range(len(range_set)):
poly = Polygon(range_set[i])
pt = reg_geom.get_origin()
pt_tpl = geometry.Point(pt[0], pt[1])
if poly.distance(pt_tpl) <= reg_geom.get_radius():
weight_sum += weights[i]
elif isinstance(reg_geom, Halfplane):
for i in range(len(range_set)):
traj = Trajectory(range_set[i])
if reg_geom.intersects_trajectory(traj):
weight_sum += weights[i]
elif isinstance(reg_geom, Rectangle):
rect = geometry.box(reg_geom.lowX(), reg_geom.lowY(), reg_geom.upX(), reg_geom.upY())
for i in range(len(range_set)):
poly = Polygon(range_set[i])
if rect.intersects(poly):
weight_sum += weights[i]
else:
raise ValueError()
return weight_sum
except:
pass
try:
import matplotlib.pyplot as plt
import numpy as np
def plot_points(ax, pts, c):
xs = []
ys = []
for pt in pts:
xs.append(pt[0] )
ys.append(pt[1])
ax.scatter(xs, ys, color=c, marker='.')
def plot_kernel(ax, pts, pt, bandwidth, res=20, transform=None):
(mnx, mny), (mxx, mxy) = bounding_box(pts)
mxx = np.linspace(mnx, mxx, res)
mxy = np.linspace(mny, mxy, res)
xv, yv = np.meshgrid(mxx, mxy)
def kernel(x, y):
return np.exp(-(np.power(x - pt[0], 2.0) + np.power(y - pt[1], 2.0) ) / bandwidth**2)
ax.contourf(xv, yv, kernel(xv, yv), alpha=.3, antialiased=True, transform=transform, cmap="Blues")
ax.contour(xv, yv, kernel(xv, yv), linewidths=4.0, antialiased=True, transform=transform)
except:
pass
def plant_full_square_region(regions, r, p, q, max_count=32):
"""
Choose a point at random from a region and then expand a square out from this point till this region contains
r fraction of all the regions.
:param regions: List of list of points. These lists are modified. In some cases the regions will have a point appended at the end to close them.
:param r: double between 0 and 1
:param p: double between 0 and 1
:param q: double between 0 and 1
:param max_count: The maximum number of times we will attempt to find the right sized region.
:return: red set of regions, blue set of regions, the planted region
"""
for reg in regions:
close_region(reg)
return plant_full_square(regions, r, p, q, max_count)
def max_disk_trajectory(net, red_sample, blue_sample, min_disk_r, max_disk_r, alpha, disc, fast_disk=True):
"""
Computes the highest discrepancy disk over a set of trajectories. Executes at multiple scales using the grid
directional compression method and internally compresses the trajectories if fast_disk is enabled.
:param net: A list of trajectories (scaled to be in a 0 by 1 box).
:param red_sample: A list of trajectories (scaled to be in a 0 by 1 box).
:param blue_sample: A list of trajectories (scaled to be in a 0 by 1 box).
:param min_disk_r: The minimum disk radius to consider.
:param max_disk_r: The maximum disk radius to consider.
:param alpha: The spatial error with which to approximate the trajectories.
:param disc: The discrepancy function to use.
:param fast_disk: Default True.
:return:
"""
mx = -1
curr_disk_r = max(min_disk_r, alpha)
reg = None
while True:
chord_l = math.sqrt(4 * alpha * curr_disk_r - 2 * alpha * alpha)
m_sample = [grid_direc_kernel(dp_compress(traj, alpha), chord_l, alpha) for traj in red_sample]
b_sample = [grid_direc_kernel(dp_compress(traj, alpha), chord_l, alpha) for traj in blue_sample]
pt_net = [grid_direc_kernel(dp_compress(traj, alpha), chord_l, alpha) for traj in net]
m_sample = list(trajectories_to_labels(m_sample))
b_sample = list(trajectories_to_labels(b_sample))
net_set = list(trajectories_to_labels(pt_net))
new_reg, new_mx = max_disk_scale_labeled(net_set, m_sample, b_sample, fast_disk, curr_disk_r, disc)
if new_mx > mx:
reg = new_reg
mx = new_mx
curr_disk_r *= 2
if curr_disk_r >= max_disk_r:
break
return reg, mx
def max_disk_trajectory_fixed(net, m_sample, b_sample, min_disk_r, max_disk_r, disc, fast_disk=True):
"""
Computes the highest discrepancy disk over a set of trajectories. Executes at multiple scales, but uses whatever set
of points the trajectories have been compressed with.
:param net: A list of trajectories (scaled to be in a 0 by 1 box).
:param m_sample: A list of trajectories (scaled to be in a 0 by 1 box).
:param b_sample: A list of trajectories (scaled to be in a 0 by 1 box).
:param min_disk_r: The minimum disk radius to consider.
:param max_disk_r: The maximum disk radius to consider.
:param disc: The discrepancy function to use.
:param fast_disk: Default True.
:return: A tuple of the maximum disk and the corresponding maximum value.
"""
mx = -1
curr_disk_r = min_disk_r
reg = None
while curr_disk_r < max_disk_r:
new_reg, new_mx = max_disk_scale_labeled(net, m_sample, b_sample, fast_disk, curr_disk_r, disc)
if new_mx > mx:
reg = new_reg
mx = new_mx
curr_disk_r *= 2
return reg, mx
def max_disk_region(net, red_sample, red_weight, blue_sample, blue_weight, min_disk_r, max_disk_r, alpha, disc, fast_disk=True):
"""
Computes the highest discrepancy disk over a set of trajectories. Executes at multiple scales using the grid
directional compression method and internally compresses the trajectories if fast_disk is enabled.
:param net: A list of trajectories
:param red_sample: A list of | |
<filename>examples/DataAnalysis/CentralBase.py
# encoding: UTF-8
import sys
import json
from pymongo import MongoClient
from vnpy.trader.app.ctaStrategy.ctaBase import DATABASE_NAMES
import pandas as pd
import numpy as np
import datetime as dt
import talib as ta
from interval import Interval
import time
#方向
M_TO_UP = True
M_TO_DOWN = False
#节点或中枢是否正式形成
M_FORMAL = True
M_TEMP = False
#顶点或底点
M_TOP = 1
M_BOTTOM = -1
#常量
M_MAX_VAL = 5000
M_MIN_VAL = -5000
M_INVALID_INDEX = -1
#背驰点或买卖点的判定
M_NODECIDE = 0
M_FALSE = -1
M_TRUE = 1
#交易方向
M_BUY = 1
M_SELL = -1
#最高使用次节点生成背驰的级别的祖父级别
GRANDPA_CB_LEVER=[]
class Node:
"""
趋势节点,包含(时间, 值)
low_id:次级中枢编号
low_count:当前点累计的次级中枢数目
ntype: 顶点或低点
isformal:正式或临时节点
"""
def __init__(self, time, value, ntype, low_id=None, low_count=None, isformal=M_TEMP):
self.datetime = time
self.value = value
self.ntype= ntype
self.low_id = low_id
self.low_count = low_count
self.isformal = isformal
class Centralbase:
"""
中枢定义
start:开始时间
end:结束时间
up:上边界
down: 下边界
start_node_id: 开始ID
end_node_id: 结束ID
ctype:类型计数
isformal:正式或临时节点
"""
def __init__(self, start, end, up, down, start_node_id=None, end_node_id=None, isformal=M_TEMP):
self.start = start
self.end = end
self.up = up
self.down = down
self.start_node_id = start_node_id
self.end_node_id = end_node_id
self.ctype = 0
self.isformal = isformal
self.max_val = M_MIN_VAL
self.min_val = M_MAX_VAL
self.max_node_id = M_INVALID_INDEX
self.min_node_id = M_INVALID_INDEX
def setCType(self, ctype):
self.ctype = ctype
def getCBInterval(self):
return Interval(lower_bound=self.down, upper_bound=self.up,
lower_closed=False, upper_cloesed=False)
class BeichiTime:
'''
time:背驰时间
btype:背驰类型和中枢层级,正数为顶背驰 负数为底背驰
real_beichi:是否为真背驰, M_NODECIDE为未判定,M_FALSE否 M_TRUE是
'''
def __init__(self, time, btype, node_id, real_time=None, low_cb_id=None):
self.time = time
self.btype = btype
self.node_id = node_id
self.real_beichi = M_NODECIDE
self.real_time = real_time
self.low_cb_id = low_cb_id
class BuyPoint:
def __init__(self, time, node_id,real_time = None):
self.time = time
self.real_time = real_time
self.node_id = node_id
self.real_buy = M_NODECIDE
class SellPoint:
def __init__(self, time, node_id,real_time = None):
self.time = time
self.real_time = real_time
self.node_id = node_id
self.real_sell = M_NODECIDE
class TradePoint:
def __init__(self, time, node_id,low_cb_id=None, real_time = None, trade_direct=M_BUY):
self.time = time
self.real_time = real_time
self.node_id = node_id
self.trade_direct = trade_direct
self.real_sell = M_NODECIDE
self.low_cb_id = low_cb_id
class TradeStrategy:
def __init__(self):
self.trade_point_list = []
self.trade_on_going = False
class KData:
def __init__(self):
self.data_dict={}
self.rely_dict={}
def addDataItem(self, name='D', item=None):
if item==None:
self.data_dict[name] = pd.DataFrame()
else:
self.data_dict[name] = self.data_dict[name].concat(item,ignore_index=False)
def buildRelation(up_name='D', low_name='30MIN'):
self.rely_dict[up_name] = low_name
class CentralBaseSet:
def __init__(self, freq, dataframe, low_CB_set=None, all_data=None):
self.data = dataframe.set_index(dataframe['datetime'])
self.low_CB_set = low_CB_set
self.node_list = []
self.centralbase_list = []
self.freq = freq
self.beichi_list=[]
self.beichi_pc_list=[]
self.share_beichi_list=[]
self.first_sell_point_list = []
self.sec_sell_point_list = []
self.all_sell_point_list = []
self.first_buy_point_list = []
self.sec_buy_point_list = []
self.third_buy_point_list = []
self.beichi_processing = False
self.upgrade_cnt=0
self.seek_max=None
self.temp_open = None
self.temp_close = None
self.data_columns = self.data.columns
self.all_data = all_data
self.data = all_data
self.cur_time_index = None
self.cur_min_value = M_MAX_VAL
self.cur_min_node_id = 0
self.cur_max_value = M_MIN_VAL
self.cur_max_node_id = 0
#交易策略列表
self.trade_strategy_list = []
#中枢升级逻辑
self.cur_cut_low_id = -1
self.cur_cut_start_node_id = -1
self.cur_low_beichi_time = None
self.timespend1=0
self.timespend2=0
self.timespend3=0
self.timespend4=0
self.callcnt = 0
def analyze_CB_Step(self):
if self.low_CB_set == None:
self.getNodeList_KLine_Step()
else:
self.getNodeList_Lower_Step()
if self.freq=='D' :
if len(self.node_list)>2 and abs(self.node_list[-2].value-4.71)<0.001:
a=1
self.get_Centralbase_Step()
self.getBeichi_Share_With_LowBeichi_Step()
if self.low_CB_set == None or True:
self.beichi_processing = self.getBeichi_LastTwo_Step()
else:
self.beichi_processing = self.getBeichi_LastOne_Step()
self.update_max_min_value()
self.beichi_judge_step()
#self.sell_point_judge()
#self.SnakeTrade_step()
if self.freq=='D':
self.SnakeTrade_With_ShareBeichi_step()
def update_data(self, low_data_frame=None, data_item_series=None):
if not data_item_series.empty:
data = {}
for column in self.data_columns:
data[column] = []
data[column].append(data_item_series[column])
data_item = pd.DataFrame(data, columns=self.data_columns)
self.data = pd.concat([self.data, data_item], ignore_index=True)
self.data.set_index(self.data['datetime'], inplace=True)
self.indexGenerator_step()
'''
else:
if low_data_frame!=None:
if self.low_CB_set==None:
self.data = low_data_frame
else:
data_seg = low_data_frame.ix[low_data_frame.index>self.data.index[-1]]
if data_seg!=None:
open_p = data_seg.ix[0, 'open']
close_p = data_seg.ix[-1, 'close']
volumns = data_seg['volume'].sum()
'''
def update_data_time(self, low_data_frame=None, data_item_time=None):
if data_item_time !=None:
start = time.clock()
self.cur_time_index = data_item_time
self.indexGenerator_step()
self.timespend4 = self.timespend4 + (time.clock() - start)
'''
else:
if low_data_frame!=None:
if self.low_CB_set==None:
self.data = low_data_frame
else:
data_seg = low_data_frame.ix[low_data_frame.index>self.data.index[-1]]
if data_seg!=None:
open_p = data_seg.ix[0, 'open']
close_p = data_seg.ix[-1, 'close']
volumns = data_seg['volume'].sum()
'''
def __getmax(self, a, b):
return a if a>b else b
def __getmin(self, a,b):
return a if a<b else b
def resultToSource(self):
self.data['node'] = None
self.data['base_up'] = None
self.data['base_down'] = None
self.data['beichi'] = None
self.data['sharebeichi'] = None
self.data['panzhbeichi'] = None
self.data['sec_buy'] = None
for node in self.node_list:
time_seg = self.data.ix[self.data.index>=node.datetime, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'node', node.value)
for base in self.centralbase_list:
self.data.ix[base.start:base.end,'base_up'] = base.up
self.data.ix[base.start:base.end,'base_down'] = base.down
self.data.ix[base.start:base.end,'base_type'] = base.ctype
for beichi in self.beichi_list:
time_seg = self.data.ix[self.data.index>=beichi.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'beichi', self.data.ix[time, 'close'])
for sharebeichi in self.share_beichi_list:
time_seg = self.data.ix[self.data.index>=sharebeichi.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'sharebeichi', self.data.ix[time, 'close'])
for panzhbeichi in self.beichi_pc_list:
time_seg = self.data.ix[self.data.index>=panzhbeichi.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'panzhbeichi', self.data.ix[time, 'close'])
for sec_buy in self.sec_buy_point_list:
time_seg = self.data.ix[self.data.index>=sec_buy.time, 'close']
time = time_seg.index[0]
if time!=None:
self.data.set_value(time, 'sec_buy', self.data.ix[time, 'close'])
def indexGenerator_step(self):
#length = np.size(self.data, axis=0)
#if length<40:
#self.data['SMA5'] = ta.SMA(self.data['close'].values, timeperiod = 5) #5日均线
#self.data['SMA10'] = ta.SMA(self.data['close'].values, timeperiod = 10) #10日均线
macd_talib, signal, hist = ta.MACD(self.data['close'].values,fastperiod=12,signalperiod=9)
self.data['DIF'] = macd_talib #DIF
self.data['DEA'] = signal #DEA
self.data['MACD'] = hist #MACD
#else:
##self.data.ix[-40:,'SMA5'] = ta.SMA(self.data.ix[-40:,'close'].values, timeperiod = 5) #5日均线
##self.data.ix[-40:,'SMA10'] = ta.SMA(self.data.ix[-40:,'close'].values, timeperiod = 10) #10日均线
#macd_talib, signal, hist = ta.MACD(self.data.ix[-40:,'close'].values,fastperiod=12,signalperiod=9)
#self.data.ix[-1,'DIF'] = macd_talib[-1] #DIF
#self.data.ix[-1:,'DEA'] = signal[-1] #DEA
#self.data.ix[-1:,'MACD'] = hist[-1] #MACD
def getNodeList_KLine_Step(self):
#if self.data.empty:
#return
#time = self.data.index[-1]
if self.cur_time_index == None:
return
time = self.cur_time_index
open_price = self.data.ix[time, 'open']
close_price = self.data.ix[time, 'close']
up_flag = open_price <= close_price
if self.seek_max==None: #初始数据
if up_flag:
self.seek_max = M_TO_UP
self.node_list.append(Node(time, close_price, M_TOP , isformal=M_TEMP))
else:
self.seek_max = M_TO_DOWN
self.node_list.append(Node(time, close_price, M_BOTTOM, isformal=M_TEMP))
go_judge = True
if self.seek_max == M_TO_UP:
if abs(close_price - open_price) <=0.001: #排查十字星的情况
if close_price >= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
go_judge = False
if up_flag and go_judge:
if close_price >= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
if close_price < self.node_list[-1].value:
self.node_list[-1].isformal = M_FORMAL
self.node_list.append(Node(time, close_price, M_BOTTOM, isformal=M_TEMP))
self.seek_max = M_TO_DOWN
else:
if abs(close_price - open_price) <=0.001: #排查十字星的情况
if close_price <= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
go_judge = False
if (not up_flag) and go_judge:
if close_price <= self.node_list[-1].value:
self.node_list[-1].datetime = time
self.node_list[-1].value = close_price
else:
if close_price > self.node_list[-1].value:
self.node_list[-1].isformal = M_FORMAL
self.node_list.append(Node(time, close_price, M_TOP, isformal=M_TEMP))
self.seek_max = M_TO_UP
def __getNodeListCross(self, start_id, end_id_include):
cross_itval = Interval.none()
i=start_id
while(i<end_id_include):
if cross_itval == Interval.none():
cross_itval = self.__getSegment(i)
else:
cross_itval = cross_itval & self.__getSegment(i)
i+=1
return cross_itval
def get_Centralbase_Step(self):
'''
有效逻辑时机:
1.首个中枢;
2.背驰处理;
3.形成新的临时节点和正式节点
'''
seg_list=[]
start = None
end = None
start_id = -1
end_id = -1
cross_itval = Interval.none
if self.freq=='5MIN' and len(self.node_list)>2 \
and abs(self.node_list[-2].value-5.29)<0.001\
and abs(self.node_list[-1].value-5.28)<0.001:
a=1
if self.freq=='5MIN' :
a=1
if self.freq=='30MIN' :
a=1
if self.freq=='D' :
a=1
if len(self.centralbase_list) ==0:#首个中枢
if len(self.node_list) > 3:
cross_itval = self.__getSegment(0) & self.__getSegment(1)
start = self.__getSegmentStart(0)
end = self.__getSegmentEnd(1)
newcbase = Centralbase(start, end, cross_itval.upper_bound, cross_itval.lower_bound, 0, 2, isformal=M_TEMP)
newcbase.setCType(self.__getCBType(newcbase))
newcbase.max_node_id, newcbase.max_val = self.__getMaxNode_Val(0, 2)
newcbase.min_node_id, newcbase.min_val = self.__getMinNode_Val(0, 2)
self.centralbase_list.append(newcbase)
else:
end_node_id = self.centralbase_list[-1].end_node_id
start_node_id = self.centralbase_list[-1].start_node_id
if len(self.node_list)-2 > end_node_id: #新临时NODE已经形成,新正式NODE形成
cross_itval = self.centralbase_list[-1].getCBInterval() & self.__getSegment(end_node_id)
if cross_itval != Interval.none():#新正式段与原中枢相交,更新中枢信息
#if end_node_id-start_node_id >=4 :
##切割中枢
#self.centralbase_list[-1].isformal = M_FORMAL
#cross_itval = self.__getSegment(start_node_id) & self.__getSegment(start_node_id+2)
#self.centralbase_list[-1].up = cross_itval.upper_bound
#self.centralbase_list[-1].down = cross_itval.lower_bound
#self.centralbase_list[-1].end_node_id = start_node_id+3
#self.centralbase_list[-1].end = self.node_list[start_node_id+3].datetime
#self.centralbase_list[-1].max_node_id, self.centralbase_list[-1].max_val = self.__getMaxNode_Val(start_node_id, start_node_id+3)
#self.centralbase_list[-1].min_node_id, self.centralbase_list[-1].min_val = self.__getMinNode_Val(start_node_id, start_node_id+3)
##添加新中枢
#cross_itval = self.centralbase_list[-1].getCBInterval() & self.__getSegment(start_node_id+3) & self.__getSegment(start_node_id+4)
#start = self.node_list[start_node_id+3].datetime
#end = self.node_list[end_node_id+1].datetime
#newcbase = Centralbase(start, end, cross_itval.upper_bound, cross_itval.lower_bound, start_node_id+3, end_node_id+1, isformal=M_TEMP)
#newcbase.setCType(self.__getCBType(newcbase))
#newcbase.max_node_id, newcbase.max_val = self.__getMaxNode_Val(start_node_id+3, end_node_id+1)
#newcbase.min_node_id, newcbase.min_val = self.__getMinNode_Val(start_node_id+3, end_node_id+1)
#self.centralbase_list.append(newcbase)
#else:
self.centralbase_list[-1].up = cross_itval.upper_bound
self.centralbase_list[-1].down = cross_itval.lower_bound
self.centralbase_list[-1].end_node_id = end_node_id+1
self.centralbase_list[-1].end = self.node_list[end_node_id+1].datetime
#self.centralbase_list[-1].setCType(self.__getCBType(newcbase=None, isnew=False, cb_id=len(self.centralbase_list)-1))
self.get_panzheng_beichi_step()
#更新极值信息
if self.node_list[end_node_id+1].value > self.centralbase_list[-1].max_val:
self.centralbase_list[-1].max_val = self.node_list[end_node_id+1].value
self.centralbase_list[-1].max_node_id = end_node_id+1
if self.node_list[end_node_id+1].value < self.centralbase_list[-1].min_val:
self.centralbase_list[-1].min_val = self.node_list[end_node_id+1].value
self.centralbase_list[-1].min_node_id = end_node_id+1
else:
self.centralbase_list[-1].isformal = M_FORMAL
#添加新中枢
cross_itval = self.__getSegment(end_node_id)
start = self.node_list[end_node_id].datetime
end = self.node_list[end_node_id+1].datetime
newcbase = Centralbase(start, end, cross_itval.upper_bound, cross_itval.lower_bound, end_node_id, end_node_id+1, isformal=M_TEMP)
newcbase.setCType(self.__getCBType(newcbase))
newcbase.max_node_id, newcbase.max_val = self.__getMaxNode_Val(end_node_id, end_node_id+1)
newcbase.min_node_id, newcbase.min_val = self.__getMinNode_Val(end_node_id, end_node_id+1)
self.centralbase_list.append(newcbase)
if self.centralbase_list[-1].ctype < self.centralbase_list[-2].ctype:
self.sec_sell_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN")))
if (self.centralbase_list[-1].ctype >0) and (self.centralbase_list[-1].ctype*self.centralbase_list[-2].ctype<0):
self.third_buy_point_list.append(SellPoint(self.node_list[-2].datetime, len(self.node_list)-2,\
real_time=self.__get_lowest_current_time("5MIN"))) | |
# Copyright (c) 2013-2014, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import common
import anyvcs
from anyvcs.common import CommitLogEntry
import datetime
import getpass
import os
import shutil
import subprocess
import xml.etree.ElementTree as ET
class SvnTest(common.VCSTest):
vcs = 'svn'
@classmethod
def setUpRepos(cls):
cls.repo = anyvcs.create(cls.main_path, 'svn')
common.check_call(['svn', 'checkout', 'file://' + cls.main_path, cls.working_path])
cls.main_branch = 'HEAD'
cls.working_head = 'HEAD'
for action in cls.setUpWorkingCopy(cls.working_path):
action.doSvn(cls)
@classmethod
def getAbsoluteRev(cls):
xml = cls.check_output(['svn', 'info', '--xml'])
tree = ET.fromstring(xml)
rev = tree.find('entry').attrib.get('revision')
if cls.working_head == 'HEAD':
return int(rev)
else:
return '%s:%s' % (cls.encode_branch(cls.working_head), rev)
@classmethod
def export(cls, rev, path):
rev, prefix = cls.repo._maprev(rev)
url = 'file://%s/%s@%d' % (cls.main_path, prefix, rev)
common.check_call(['svn', 'export', url, path])
@classmethod
def encode_branch(cls, s):
if s == 'trunk':
return 'trunk'
return 'branches/' + s
@classmethod
def decode_branch(cls, s):
if s == 'trunk':
return 'trunk'
assert s.startswith('branches/')
return s[9:]
@classmethod
def encode_tag(cls, s):
return 'tags/' + s
@classmethod
def decode_tag(cls, s):
assert s.startswith('tags/')
return s[5:]
@classmethod
def branch_prefix(cls, branch):
if branch == 'trunk':
return 'trunk/'
return 'branches/' + branch + '/'
class SvnEmptyTest(SvnTest, common.EmptyTest):
def test_branches(self):
result = self.repo.branches()
correct = ['HEAD']
self.assertEqual(common.normalize_heads(correct), common.normalize_heads(result))
def test_tags(self):
result = self.repo.tags()
correct = []
self.assertEqual(common.normalize_heads(correct), common.normalize_heads(result))
def test_heads(self):
result = self.repo.heads()
correct = ['HEAD']
self.assertEqual(common.normalize_heads(correct), common.normalize_heads(result))
def test_log(self):
result = self.repo.log()
self.assertEqual(1, len(result))
self.assertEqual(0, result[0].rev)
def test_changed(self):
result = self.repo.changed(0)
correct = []
self.assertEqual(correct, result)
def test_pdiff(self):
path_a = os.path.join(self.dir, 'empty')
path_b = os.path.join(self.dir, 'pdiff')
shutil.rmtree(path_a, ignore_errors=True)
shutil.rmtree(path_b, ignore_errors=True)
os.mkdir(path_a)
self.export(0, path_b)
pdiff = self.repo.pdiff(0)
p = subprocess.Popen(['patch', '-p1', '-s'], cwd=path_a, stdin=subprocess.PIPE)
p.communicate(pdiff)
self.assertEqual(0, p.returncode)
rc = subprocess.call(['diff', '-urN', path_a, path_b])
self.assertEqual(0, rc)
class SvnEmptyWithCommitsTest(SvnTest, common.EmptyWithCommitsTest):
pass
class SvnMismatchedFileTypeTest(SvnTest, common.MismatchedFileTypeTest):
pass
class SvnBasicTest(SvnTest, common.BasicTest):
def test_branches(self):
result = self.repo.branches()
correct = ['HEAD']
self.assertEqual(common.normalize_heads(correct), common.normalize_heads(result))
def test_tags(self):
result = self.repo.tags()
correct = []
self.assertEqual(common.normalize_heads(correct), common.normalize_heads(result))
def test_heads(self):
result = self.repo.heads()
correct = ['HEAD']
self.assertEqual(correct, result)
def test_log_all(self):
result = self.repo.log()
self.assertIsInstance(result, list)
self.assertEqual(2, len(result))
self.assertIsInstance(result[0], CommitLogEntry)
self.assertEqual(self.rev1, result[0].rev)
self.assertIsInstance(result[0].date, datetime.datetime)
def test_log_single(self):
result1 = self.repo.log(revrange=1, limit=1)
result2 = self.repo.log(revrange='1', limit=1)
self.assertEqual(result1.parents, result2.parents)
self.assertEqual(result1.parents, [0])
def test_changed(self):
result = self.repo.changed(self.rev1)
correct = ['a', 'b', 'c/', 'c/d/', 'c/d/e', 'c/d/f']
self.assertEqual(correct, sorted(x.path for x in result))
for x in result:
self.assertIsInstance(x.status, str)
self.assertLessEqual(1, len(x.status))
def test_blame(self):
result = self.repo.blame(self.main_branch, 'a')
self.assertIsInstance(result, list)
self.assertEqual(1, len(result))
self.assertEqual(self.rev1, result[0].rev)
self.assertEqual(getpass.getuser(), result[0].author)
self.assertIsInstance(result[0].date, datetime.datetime)
self.assertEqual('Pisgah'.encode(), result[0].line)
def test_proplist(self):
result = self.repo.proplist(self.rev1)
expected = sorted(['svn:log', 'svn:author', 'svn:date'])
self.assertEqual(expected, sorted(result))
def test_proplist_path(self):
result = self.repo.proplist(self.rev1, 'a')
expected = []
self.assertEqual(expected, result)
def test_compose_rev1(self):
result = self.repo.compose_rev(self.main_branch, self.rev1)
expected = '%s:%d' % (self.main_branch, self.rev1)
self.assertEqual(expected, result)
def test_compose_rev2(self):
rev = '%s:%d' % (self.main_branch, self.rev1)
result = self.repo.compose_rev(self.main_branch, rev)
expected = rev
self.assertEqual(expected, result)
class SvnUnrelatedBranchTest(SvnTest, common.UnrelatedBranchTest):
def test_branches(self):
result = self.repo.branches()
correct = ['HEAD'] + list(map(self.encode_branch, [self.main_branch, 'branch1']))
self.assertEqual(sorted(correct), sorted(result))
class SvnBranchTestStep3(SvnTest, common.BranchTestStep3):
def test_branches(self):
result = self.repo.branches()
correct = ['HEAD'] + list(map(self.encode_branch, [self.main_branch, 'branch1']))
self.assertEqual(sorted(correct), sorted(result))
def test_log_main(self):
result = self.repo.log(revrange=self.main_branch).rev
correct = 2
self.assertEqual(correct, result)
def test_log_branch1(self):
branch1 = self.encode_branch('branch1')
result = self.repo.log(revrange=branch1).rev
correct = 4
self.assertEqual(correct, result)
def test_log_all(self):
result = [x.rev for x in self.repo.log()]
correct = list(range(4, -1, -1))
self.assertEqual(correct, result)
def test_log_None_main(self):
result = [x.rev for x in self.repo.log(revrange=(None, self.main_branch))]
correct = list(range(2, 0, -1))
self.assertEqual(correct, result)
def test_log_None_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1))]
correct = list(range(4, 0, -1))
self.assertEqual(correct, result)
def test_log_main_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(self.main_branch, branch1))]
correct = list(range(4, 2, -1))
self.assertEqual(correct, result)
class SvnBranchTestStep7(SvnTest, common.BranchTestStep7):
def test_branches(self):
result = self.repo.branches()
correct = ['HEAD'] + list(map(
self.encode_branch,
[self.main_branch, 'branch1', 'branch1a', 'branch2']
))
self.assertEqual(sorted(correct), sorted(result))
def test_log_main(self):
result = self.repo.log(revrange=self.main_branch).rev
correct = 5
self.assertEqual(correct, result)
def test_log_branch1(self):
branch1 = self.encode_branch('branch1')
result = self.repo.log(revrange=branch1).rev
correct = 8
self.assertEqual(correct, result)
def test_log_branch1a(self):
branch1a = self.encode_branch('branch1a')
result = self.repo.log(revrange=branch1a).rev
correct = 10
self.assertEqual(correct, result)
def test_log_branch2(self):
branch2 = self.encode_branch('branch2')
result = self.repo.log(revrange=branch2).rev
correct = 7
self.assertEqual(correct, result)
def test_log_all(self):
result = [x.rev for x in self.repo.log()]
correct = list(range(10, -1, -1))
self.assertEqual(correct, result)
def test_log_None_main(self):
result = [x.rev for x in self.repo.log(revrange=(None, self.main_branch))]
correct = [5, 2, 1]
self.assertEqual(correct, result)
def test_log_None_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1))]
correct = [8, 5, 4, 3, 2, 1]
self.assertEqual(correct, result)
def test_log_None_branch1_firstparent(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1), firstparent=True)]
correct = [8, 4, 3, 2, 1]
self.assertEqual(correct, result)
def test_log_None_branch2(self):
branch2 = self.encode_branch('branch2')
result = [x.rev for x in self.repo.log(revrange=(None, branch2))]
correct = [7, 6, 5, 2, 1]
self.assertEqual(correct, result)
def test_log_main_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(self.main_branch, branch1))]
correct = [8, 4, 3]
self.assertEqual(correct, result)
def test_log_main_branch1a(self):
branch1a = self.encode_branch('branch1a')
result = [x.rev for x in self.repo.log(revrange=(self.main_branch, branch1a))]
correct = [10, 9, 8, 4, 3]
self.assertEqual(correct, result)
def test_log_main_branch2(self):
branch2 = self.encode_branch('branch2')
result = [x.rev for x in self.repo.log(revrange=(self.main_branch, branch2))]
correct = [7, 6]
self.assertEqual(correct, result)
class SvnBranchTestStep9(SvnTest, common.BranchTestStep9):
def test_log_main(self):
result = self.repo.log(revrange=self.main_branch).rev
correct = 12
self.assertEqual(correct, result)
def test_log_branch1(self):
branch1 = self.encode_branch('branch1')
result = self.repo.log(revrange=branch1).rev
correct = 11
self.assertEqual(correct, result)
def test_log_all(self):
result = [x.rev for x in self.repo.log()]
correct = list(range(12, -1, -1))
self.assertEqual(correct, result)
def test_log_None_main(self):
result = [x.rev for x in self.repo.log(revrange=(None, self.main_branch))]
correct = [12, 7, 6, 5, 2, 1]
self.assertEqual(correct, result)
def test_log_None_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1))]
correct = [11, 10, 9, 8, 5, 4, 3, 2, 1]
self.assertEqual(correct, result)
def test_log_main_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(self.main_branch, branch1))]
correct = [11, 10, 9, 8, 4, 3]
self.assertEqual(correct, result)
class SvnBranchTestStep11(SvnTest, common.BranchTestStep11):
def test_log_main(self):
result = self.repo.log(revrange=self.main_branch).rev
correct = 12
self.assertEqual(correct, result)
def test_log_branch1(self):
branch1 = self.encode_branch('branch1')
result = self.repo.log(revrange=branch1).rev
correct = 14
self.assertEqual(correct, result)
def test_log_all(self):
result = [x.rev for x in self.repo.log()]
correct = list(range(14, -1, -1))
self.assertEqual(correct, result)
def test_log_None_main(self):
result = [x.rev for x in self.repo.log(revrange=(None, self.main_branch))]
correct = [12, 7, 6, 5, 2, 1]
self.assertEqual(correct, result)
def test_log_None_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1))]
correct = [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
self.assertEqual(correct, result)
def test_log_None_branch1_onlymerges(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1), merges=True)]
correct = [13, 12, 11, 8]
self.assertEqual(correct, result)
def test_log_None_branch1_nomerges(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1), merges=False)]
correct = [14, 10, 9, 7, 6, 5, 4, 3, 2, 1]
self.assertEqual(correct, result)
def test_log_None_branch1_path_b(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(None, branch1), path='/b')]
correct = [11, 4]
self.assertEqual(correct, result)
def test_log_main_branch1(self):
branch1 = self.encode_branch('branch1')
result = [x.rev for x in self.repo.log(revrange=(self.main_branch, branch1))]
correct = [14, 13, 11, 10, 9, 8, 4, 3]
self.assertEqual(correct, | |
['Equipment'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Equipment'],
'US MULTI-FRAME IMAGE IOD': ['Equipment'],
'ENHANCED X-RAY RF IMAGE IOD': ['Equipment'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Equipment'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Equipment'],
'US IMAGE IOD': ['Equipment'],
'GENERAL ECG IOD': ['Equipment'],
'XRF IMAGE IOD': ['Equipment'],
'ENCAPSULATED CDA IOD': ['Equipment'],
'ENHANCED SR IOD': ['Equipment'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Equipment'],
'GENERAL AUDIO WAVEFORM IOD': ['Equipment'],
'MR IMAGE IOD': ['Equipment'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Equipment'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Equipment'],
'ARTERIAL PULSE WAVEFORM IOD': ['Equipment'],
},
# AcquisitionProtocolName
0x00189423L: {
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
None: ['Image'],
},
# RescaleIntercept
0x00281052L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'CT IMAGE IOD': ['Image'],
None: ['Image', 'Dose', 'Presentation State'],
'CR IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# DistancePupillaryDistance
0x00460060L: {
'AUTOREFRACTION MEASUREMENTS IOD': ['Equipment'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Equipment'],
None: ['Equipment'],
},
# ReferencedBasicAnnotationBoxSequence
0x20100520L: {
'BASIC FILM BOX IOD': ['Basic Film Box'],
None: ['Basic Film Box'],
},
# ChemicalShiftReference
0x00189053L: {
'MR SPECTROSCOPY IOD': ['Equipment'],
None: ['Equipment'],
},
# RespiratoryTriggerType
0x00209250L: {
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Image'],
},
# RescaleType
0x00281054L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'CT IMAGE IOD': ['Image'],
None: ['Image', 'Dose', 'Presentation State'],
'CR IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# ReprojectionMethod
0x00541004L: {
'PET IMAGE IOD': ['Series'],
None: ['Series'],
},
# IlluminationWaveLength
0x00220055L: {
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
None: ['Image'],
},
# WindowCenterWidthExplanation
0x00281055L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
None: ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# DisplaySetsSequence
0x00720200L: {
'HANGING PROTOCOL IOD': ['Hanging Protocol'],
None: ['Hanging Protocol'],
},
# TimezoneOffsetFromUTC
0x00080201L: {
'HANGING PROTOCOL IOD': ['Hanging Protocol'],
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
'RT ION MACHINE VERIFICATION IOD': ['Rt Ion Machine Verification'],
'RT STRUCTURE SET IOD': ['Structure Set'],
'RT PLAN IOD': ['Plan'],
'FILM SESSION IOD': ['Film Session'],
'BASIC FILM BOX IOD': ['Basic Film Box'],
'CR IMAGE IOD': ['Image'],
'RAW DATA IOD': ['Raw Data'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
'ENHANCED MR IMAGE IOD': ['Image'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Waveform'],
'RT TREATMENT SUMMARY RECORD IOD': ['Treatment Record'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Waveform'],
'RESPIRATORY WAVEFORM IOD': ['Waveform'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'BASIC VOICE AUDIO IOD': ['Waveform'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'COLOR PALETTE IOD': ['Color Palette'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'NM IMAGE IOD': ['Image'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'LENSOMETRY MEASUREMENTS IOD': ['Equipment'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENCAPSULATED PDF IOD': ['Encapsulated Document'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'CHEST CAD SR IOD': ['Document'],
'HEMODYNAMIC IOD': ['Waveform'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Equipment'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'INSTANCE AVAILABILITY NOTIFICATION IOD': ['Instance Availability Notification'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Equipment'],
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'SPATIAL REGISTRATION IOD': ['Spatial Registration'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Equipment'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'KERATOMETRY MEASUREMENTS IOD': ['Equipment'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'COMPREHENSIVE SR IOD': ['Document'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'SPATIAL FIDUCIALS IOD': ['Spatial Fiducials'],
'RT ION PLAN IOD': ['Plan'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'PRINT JOB IOD': ['Print Job'],
'RT CONVENTIONAL MACHINE VERIFICATION IOD': ['Rt Conventional Machine Verification'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'RT DOSE IOD': ['Dose'],
'BASIC ANNOTATION BOX IOD': ['Basic Annotation Box'],
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
'AMBULATORY ECG IOD': ['Waveform'],
'PRINTER IOD': ['Printer'],
'PRINTER CONFIGURATION IOD': ['Printer Configuration'],
'SURFACE SEGMENTATION IOD': ['Surface'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'RT BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Deformable Registration'],
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'SC IMAGE IOD': ['Image'],
None: ['Hanging Protocol', 'Presentation State', 'Image', 'Treatment Record', 'Rt Ion Machine Verification', 'Structure Set', 'Plan', 'Film Session', 'Basic Film Box', 'Raw Data', 'Unified Procedure Step', 'Waveform', 'Modality Performed Procedure Step', 'Color Palette', 'Document', 'Equipment', 'Encapsulated Document', 'Spatial Registration', 'Spatial Fiducials', 'Instance Availability Notification', 'Print Job', 'Rt Conventional Machine Verification', 'Measurements', 'Dose', 'Basic Annotation Box', 'General Purpose Performed Procedure Step', 'Printer', 'Printer Configuration', 'Surface', 'Deformable Registration', 'Implant Assembly', 'Implant Template', 'Segmentation', 'Real World Value Mapping', 'Basic Image Box', 'Stereometric Relationship', 'General Purpose Scheduled Procedure Step', 'Media Creation Management', 'Storage Commitment'],
'PROCEDURE LOG IOD': ['Document'],
'SEGMENTATION IOD': ['Segmentation'],
'PET IMAGE IOD': ['Image'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'REAL WORLD VALUE MAPPING IOD': ['Real World Value Mapping'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'COLON CAD SR IOD': ['Document'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Equipment'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'STEREOMETRIC RELATIONSHIP IOD': ['Stereometric Relationship'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
'MEDIA CREATION MANAGEMENT IOD': ['Media Creation Management'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Plan'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Equipment'],
'US IMAGE IOD': ['Image'],
'GENERAL ECG IOD': ['Waveform'],
'XRF IMAGE IOD': ['Image'],
'ENCAPSULATED CDA IOD': ['Encapsulated Document'],
'ENHANCED SR IOD': ['Document'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'GENERAL AUDIO WAVEFORM IOD': ['Waveform'],
'MR IMAGE IOD': ['Image'],
'BASIC IMAGE BOX IOD': ['Basic Image Box'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'ARTERIAL PULSE WAVEFORM IOD': ['Waveform'],
'STORAGE COMMITMENT IOD': ['Storage Commitment'],
},
# GreenPaletteColorLookupTableData
0x00281202L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Color Palette', 'Presentation State', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
| |
<reponame>Unitek-KL/csp
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
import math
global InterruptVector
InterruptVector = []
global InterruptHandler
InterruptHandler = []
global InterruptHandlerLock
InterruptHandlerLock = []
global InterruptVectorUpdate
global pdecInstanceName
global interruptDepList
interruptDepList = []
global eventDepList
eventDepList = []
interrupt_val = 0x0
evsys_val = 0x0
pin_val = 0x7
pin_inv_val = 0x00
###################################################################################################
########################################## Callbacks #############################################
###################################################################################################
def pdecResolutionCalc(symbol, event):
clock_freq = Database.getSymbolValue("core", pdecInstanceName.getValue()+"_CLOCK_FREQUENCY")
if clock_freq == 0:
clock_freq = 1
prescaler = int(pdecSym_PRESC_PRESC.getSelectedKey()[3:])
resolution = (prescaler * 1000000000.0) / clock_freq
symbol.setLabel("****PDEC Counter resolution is " + str(resolution) + " nS****")
def pdecFreqCalc(symbol, event):
symbol.setValue(Database.getSymbolValue("core", pdecInstanceName.getValue()+"_CLOCK_FREQUENCY") / int(pdecSym_PRESC_PRESC.getSelectedKey()[3:]), 2)
def pdecRevolutionCalc(symbol, event):
symbol.setValue(16 - event["value"], 2)
def pdecOptionVisible(symbol, event):
if(event["id"] == "PDEC_CTRLA_ANGULAR"):
symbol.setMax(pow(2, event["value"]) - 1)
if (event["id"] == "PDEC_CTRLA_REVOLUTION"):
symbol.setMax(pow(2, event["value"]) - 1)
else:
symbol.setVisible(event["value"])
def pdecMAXCMPVisible(symbol, event):
if (event["value"] == 4): #AUTOC mode
symbol.setVisible(True)
else:
symbol.setVisible(False)
def pdecPhaseA(symbol, event):
if(event["value"] == 2 or event["value"] == 3):
symbol.setLabel("Select Quadrature Count")
else:
symbol.setLabel("Select Quadrature Phase A")
def pdecPhaseB(symbol, event):
if(event["value"] == 2 or event["value"] == 3):
symbol.setLabel("Select Quadrature Index")
else:
symbol.setLabel("Select Quadrature Phase B")
def pdecIndex(symbol, event):
if(event["value"] == 2 or event["value"] == 3):
symbol.setVisible(False)
else:
symbol.setVisible(True)
def updatePDECInterruptStatus(symbol, event):
component = symbol.getComponent()
if (event["id"] == "PDEC_INTENSET_OVF") or (event["id"] == "PDEC_INTENSET_VLC") or (event["id"] == "PDEC_INTENSET_DIR"):
if (component.getSymbolValue("PDEC_INTENSET_OVF") or component.getSymbolValue("PDEC_INTENSET_VLC") or
component.getSymbolValue("PDEC_INTENSET_DIR")):
Database.setSymbolValue("core", InterruptVector[0], True, 2)
Database.setSymbolValue("core", InterruptHandlerLock[0], True, 2)
Database.setSymbolValue("core", InterruptHandler[0], pdecInstanceName.getValue() + "_QDECInterruptHandler", 2)
else:
Database.setSymbolValue("core", InterruptVector[0], False, 2)
Database.setSymbolValue("core", InterruptHandlerLock[0], False, 2)
Database.setSymbolValue("core", InterruptHandler[0], pdecInstanceName.getValue() + "_Handler", 2)
else:
mcInstance = int(event["id"].split("_")[-1])
Database.setSymbolValue("core", InterruptVector[mcInstance+1], event["value"], 2)
Database.setSymbolValue("core", InterruptHandlerLock[mcInstance+1], event["value"], 2)
if event["value"] == True:
Database.setSymbolValue("core", InterruptHandler[mcInstance+1], InterruptHandler[mcInstance+1].split("_INTERRUPT_HANDLER")[0] + "_InterruptHandler", 2)
else:
Database.setSymbolValue("core", InterruptHandler[mcInstance+1], InterruptHandler[mcInstance+1].split("_INTERRUPT_HANDLER")[0] + "_Handler", 2)
def updatePDECInterruptWarningStatus(symbol, event):
global InterruptVectorUpdate
component = symbol.getComponent()
if (component.getSymbolValue("PDEC_INTENSET_OVF") or component.getSymbolValue("PDEC_INTENSET_VLC") or
component.getSymbolValue("PDEC_INTENSET_DIR")):
if (Database.getSymbolValue("core", InterruptVectorUpdate[0].split(".")[-1]) == True):
symbol.setVisible(True)
elif (component.getSymbolValue("PDEC_INTENSET_MC_0") == True and
Database.getSymbolValue("core", InterruptVectorUpdate[1].split(".")[-1]) == True):
symbol.setVisible(True)
elif (component.getSymbolValue("PDEC_INTENSET_MC_1") == True and
Database.getSymbolValue("core", InterruptVectorUpdate[2].split(".")[-1]) == True):
symbol.setVisible(True)
else:
symbol.setVisible(False)
def updatePDECClockWarningStatus(symbol, event):
if event["value"] == False:
symbol.setVisible(True)
else:
symbol.setVisible(False)
def pdecEVSYSConfigure(symbol, event):
if ("EVCTRL" in event["id"]):
Database.setSymbolValue("evsys",
"GENERATOR_"+ str(pdecInstanceName.getValue())+ event["id"].replace("PDEC_EVCTRL","") + "_ACTIVE", event["value"], 2)
else:
if ((event["id"] == "PDEC_PHASE_A")):
if(event["value"] == "Input Event" or event["value"] == "Inverted Input Event"):
Database.setSymbolValue("evsys", "USER_"+ str(pdecInstanceName.getValue())+"_EVU_0_READY", True, 2)
else:
Database.setSymbolValue("evsys", "USER_"+ str(pdecInstanceName.getValue())+"_EVU_0_READY", False, 2)
elif((event["id"] == "PDEC_PHASE_B")):
if(event["value"] == "Input Event" or event["value"] == "Inverted Input Event"):
Database.setSymbolValue("evsys", "USER_"+ str(pdecInstanceName.getValue())+"_EVU_1_READY", True, 2)
else:
Database.setSymbolValue("evsys", "USER_"+ str(pdecInstanceName.getValue())+"_EVU_1_READY", False, 2)
elif((event["id"] == "PDEC_INDEX")):
if(event["value"] == "Input Event" or event["value"] == "Inverted Input Event"):
Database.setSymbolValue("evsys", "USER_"+ str(pdecInstanceName.getValue())+"_EVU_2_READY", True, 2)
else:
Database.setSymbolValue("evsys", "USER_"+ str(pdecInstanceName.getValue())+"_EVU_2_READY", False, 2)
def pdecINTENSET(symbol, event):
global interrupt_val
global interruptDepList
component = symbol.getComponent()
for i in range(0, len(interruptDepList)):
if (component.getSymbolValue(interruptDepList[i]) == True):
interrupt_val = interrupt_val | (1 << i)
symbol.setValue(interrupt_val, 2)
def pdecEVCTRL(symbol, event):
global evsys_val
global eventDepList
component = symbol.getComponent()
# for event users
for i in range(0, 3):
if (component.getSymbolValue(eventDepList[i]) == "Input Event"):
evsys_val = evsys_val | (1 << (i + 5))
else:
evsys_val = evsys_val & (~ (1 << (i + 5)))
if (component.getSymbolValue(eventDepList[i]) == "Inverted Input Event"):
evsys_val = evsys_val | (1 << (i + 2))
else:
evsys_val = evsys_val & (~ (1 << (i + 2)))
#for event generators
for i in range(3, len(eventDepList)):
if (component.getSymbolValue(eventDepList[i]) == True):
evsys_val = evsys_val | (1 << (i + 5))
else:
evsys_val = evsys_val & (~ (1 << (i + 5)))
symbol.setValue(evsys_val, 2)
def pdecPIN(symbol, event):
global eventDepList
global pin_val
component = symbol.getComponent()
# for io pins
for i in range(0, 3):
if (component.getSymbolValue(eventDepList[i]) == "IO Pin"):
pin_val = pin_val | (1 << (i ))
else:
pin_val = pin_val & (~ (1 << (i )))
symbol.setValue(pin_val, 2)
def pdecPININV(symbol, event):
global eventDepList
global pin_inv_val
component = symbol.getComponent()
# for io pins
for i in range(0, 3):
if (component.getSymbolValue(eventDepList[i]) == "Inverted IO Pin"):
pin_inv_val = pin_inv_val | (1 << (i ))
else:
pin_inv_val = pin_inv_val & (~ (1 << (i)))
symbol.setValue(pin_inv_val, 2)
###################################################################################################
########################################## Component #############################################
###################################################################################################
def instantiateComponent(pdecComponent):
""" Function to instantiate pdecComponent to Active Component """
global InterruptVector
global InterruptHandler
global InterruptHandlerLock
global InterruptVectorUpdate
InterruptVectorUpdate = []
global eventDepList
global interruptDepList
global pdecInstanceName
pdecInstanceName = pdecComponent.createStringSymbol("PDEC_INSTANCE_NAME", None)
pdecInstanceName.setVisible(False)
pdecInstanceName.setDefaultValue(pdecComponent.getID().upper())
Log.writeInfoMessage("Running " + pdecInstanceName.getValue())
#clock enable
Database.setSymbolValue("core", pdecInstanceName.getValue()+"_CLOCK_ENABLE", True, 2)
#prescaler
global pdecSym_PRESC_PRESC
pdecSym_PRESC_PRESC = pdecComponent.createKeyValueSetSymbol("PDEC_PRESC_PRESC", None)
pdecSym_PRESC_PRESC.setLabel("Select Prescaler")
pdecNode = ATDF.getNode("/avr-tools-device-file/modules/module@[name=\"PDEC\"]/value-group@[name=\"PDEC_PRESC__PRESC\"]")
pdecValues = []
pdecValues = pdecNode.getChildren()
pdecPrescalerSelectionDefaultValue = 0
for index in range(len(pdecValues)):
pdecPrescalerSelectionKeyName = pdecValues[index].getAttribute("name")
if pdecPrescalerSelectionKeyName == "DIV1":
pdecPrescalerSelectionDefaultValue = index
pdecPrescalerSelectionKeyDescription = pdecValues[index].getAttribute("caption")
pdecPrescalerSelectionKeyValue = pdecValues[index].getAttribute("value")
pdecSym_PRESC_PRESC.addKey(pdecPrescalerSelectionKeyName, pdecPrescalerSelectionKeyValue, pdecPrescalerSelectionKeyDescription)
pdecSym_PRESC_PRESC.setDefaultValue(pdecPrescalerSelectionDefaultValue)
pdecSym_PRESC_PRESC.setOutputMode("Key")
pdecSym_PRESC_PRESC.setDisplayMode("Description")
#clock resolution display
pdecSym_Resolution = pdecComponent.createCommentSymbol("PDEC_Resolution", None)
clock_freq = Database.getSymbolValue("core", pdecInstanceName.getValue()+"_CLOCK_FREQUENCY")
if clock_freq == 0:
clock_freq = 1
resolution = (int(pdecSym_PRESC_PRESC.getSelectedKey()[3:]) * 1000000000.0) / clock_freq
pdecSym_Resolution.setLabel("****PDEC Counter resolution is " + str(resolution) + " nS****")
pdecSym_Resolution.setDependencies(pdecResolutionCalc, ["core."+pdecInstanceName.getValue()+"_CLOCK_FREQUENCY", \
"PDEC_PRESC_PRESC"])
#PDEC clock frequency
pdecSym_Frequency = pdecComponent.createIntegerSymbol("PDEC_FREQUENCY", None)
pdecSym_Frequency.setLabel("Clock Frequency")
pdecSym_Frequency.setVisible(False)
pdecSym_Frequency.setDefaultValue(Database.getSymbolValue("core", pdecInstanceName.getValue()+"_CLOCK_FREQUENCY"))
pdecSym_Frequency.setDependencies(pdecFreqCalc, ["core."+pdecInstanceName.getValue()+"_CLOCK_FREQUENCY", "PDEC_PRESC_PRESC"])
#PDEC operation mode
global pdecSym_OperationMode
pdecSym_OperationMode = pdecComponent.createKeyValueSetSymbol("PDEC_CTRLA_MODE", None)
pdecSym_OperationMode.setLabel("Operating Mode")
pdecSym_OperationMode.setReadOnly(True)
pdecNode = ATDF.getNode("/avr-tools-device-file/modules/module@[name=\"PDEC\"]/value-group@[name=\"PDEC_CTRLA__MODE\"]")
pdecValues = []
pdecValues = pdecNode.getChildren()
for index in range(len(pdecValues)):
pdecKeyName = pdecValues[index].getAttribute("name")
pdecKeyValue = pdecValues[index].getAttribute("value")
pdecSym_OperationMode.addKey(pdecKeyName, pdecKeyValue, pdecKeyName)
pdecSym_OperationMode.setDefaultValue(0)
pdecSym_OperationMode.setOutputMode("Key")
pdecSym_OperationMode.setDisplayMode("Description")
###################################################################################################
#################################### QDEC Configuration #######################################
###################################################################################################
pdecSym_QDEC_MENU = pdecComponent.createMenuSymbol("PDEC_QDEC_MENU", pdecSym_OperationMode)
pdecSym_QDEC_MENU.setLabel("Quadrature")
pdecSym_CTRLA_CONF = pdecComponent.createKeyValueSetSymbol("PDEC_CTRLA_CONF", pdecSym_QDEC_MENU)
pdecSym_CTRLA_CONF.setLabel("Operating Mode")
pdecNode = ATDF.getNode("/avr-tools-device-file/modules/module@[name=\"PDEC\"]/value-group@[name=\"PDEC_CTRLA__CONF\"]")
pdecValues = []
pdecValues = pdecNode.getChildren()
for index in range(len(pdecValues)):
pdecKeyName = pdecValues[index].getAttribute("name")
pdecKeyValue = pdecValues[index].getAttribute("value")
pdecSym_CTRLA_CONF.addKey(pdecKeyName, pdecKeyValue, pdecValues[index].getAttribute("caption"))
pdecSym_CTRLA_CONF.setDefaultValue(0)
pdecSym_CTRLA_CONF.setOutputMode("Key")
pdecSym_CTRLA_CONF.setDisplayMode("Description")
input_options = ["Disabled", "IO Pin", "Inverted IO Pin", "Input Event", "Inverted Input Event"]
pdecSym_PhaseA = pdecComponent.createComboSymbol("PDEC_PHASE_A", pdecSym_QDEC_MENU, input_options)
pdecSym_PhaseA.setLabel("Select Quadrature Phase A")
pdecSym_PhaseA.setDefaultValue("IO Pin")
pdecSym_PhaseA.setDependencies(pdecPhaseA, ["PDEC_CTRLA_CONF"])
eventDepList.append("PDEC_PHASE_A")
pdecSym_PhaseB = pdecComponent.createComboSymbol("PDEC_PHASE_B", pdecSym_QDEC_MENU, input_options)
pdecSym_PhaseB.setLabel("Select Quadrature Phase B")
pdecSym_PhaseB.setDefaultValue("IO Pin")
pdecSym_PhaseB.setDependencies(pdecPhaseB, ["PDEC_CTRLA_CONF"])
eventDepList.append("PDEC_PHASE_B")
index_input_options = ["Disabled", "IO Pin", "Inverted IO Pin", "Input Event", "Inverted Input Event"]
pdecSym_Index = pdecComponent.createComboSymbol("PDEC_INDEX", pdecSym_QDEC_MENU, index_input_options)
pdecSym_Index.setLabel("Select Quadrature Index")
pdecSym_Index.setDefaultValue("IO Pin")
pdecSym_Index.setDependencies(pdecIndex, ["PDEC_CTRLA_CONF"])
eventDepList.append("PDEC_INDEX")
pdecSym_FILTER = pdecComponent.createIntegerSymbol("PDEC_FILTER", pdecSym_QDEC_MENU)
pdecSym_FILTER.setLabel("Select Filter Value")
pdecSym_FILTER.setDefaultValue(5)
pdecSym_FILTER.setMin(0)
pdecSym_FILTER.setMax(255)
pdecSym_CTRLA_SWAP = pdecComponent.createBooleanSymbol("PDEC_CTRLA_SWAP", pdecSym_QDEC_MENU)
pdecSym_CTRLA_SWAP.setLabel("Swap Phase A and B")
pdecSym_CTRLA_SWAP.setDefaultValue(False)
pdecSym_CTRLA_ANGULAR = pdecComponent.createIntegerSymbol("PDEC_CTRLA_ANGULAR", pdecSym_QDEC_MENU)
pdecSym_CTRLA_ANGULAR.setLabel("Select No of Bits for Angular Position")
pdecSym_CTRLA_ANGULAR.setDefaultValue(10)
pdecSym_CTRLA_ANGULAR.setMin(9)
pdecSym_CTRLA_ANGULAR.setMax(16)
pdecSym_CTRLA_REVOLUTION = pdecComponent.createIntegerSymbol("PDEC_CTRLA_REVOLUTION", pdecSym_QDEC_MENU)
pdecSym_CTRLA_REVOLUTION.setLabel("No of Bits for Revolution Counter")
pdecSym_CTRLA_REVOLUTION.setReadOnly(True)
pdecSym_CTRLA_REVOLUTION.setDefaultValue(6)
pdecSym_CTRLA_REVOLUTION.setDependencies(pdecRevolutionCalc, ["PDEC_CTRLA_ANGULAR"])
pdecSym_CTRLA_PEREN = pdecComponent.createBooleanSymbol("PDEC_CTRLA_PEREN", pdecSym_QDEC_MENU)
pdecSym_CTRLA_PEREN.setLabel("Enable Period Control")
pdecSym_CTRLA_PEREN.setDefaultValue(True)
pdecSym_CC0_ANGULAR = pdecComponent.createIntegerSymbol("PDEC_CC0_ANGULAR", pdecSym_CTRLA_PEREN)
pdecSym_CC0_ANGULAR.setLabel("Quadrature Pulses per Revolution")
pdecSym_CC0_ANGULAR.setVisible(True)
pdecSym_CC0_ANGULAR.setDefaultValue(pow(2, pdecSym_CTRLA_ANGULAR.getValue()) -1 )
pdecSym_CC0_ANGULAR.setMin(0)
pdecSym_CC0_ANGULAR.setMax(pow(2, pdecSym_CTRLA_ANGULAR.getValue()) - 1)
pdecSym_CC0_ANGULAR.setDependencies(pdecOptionVisible, ["PDEC_CTRLA_PEREN", "PDEC_CTRLA_ANGULAR"])
pdecSym_CC0_REVOLUTION = pdecComponent.createIntegerSymbol("PDEC_CC0_REVOLUTION", pdecSym_CTRLA_PEREN)
pdecSym_CC0_REVOLUTION.setLabel("Max Number of Revolutions")
pdecSym_CC0_REVOLUTION.setVisible(True)
pdecSym_CC0_REVOLUTION.setDefaultValue(pow(2, pdecSym_CTRLA_REVOLUTION.getValue()) - 1)
pdecSym_CC0_REVOLUTION.setMin(0)
pdecSym_CC0_REVOLUTION.setMax(pow(2, pdecSym_CTRLA_REVOLUTION.getValue()) - 1)
pdecSym_CC0_REVOLUTION.setDependencies(pdecOptionVisible, ["PDEC_CTRLA_PEREN", "PDEC_CTRLA_REVOLUTION"])
pdecSym_COMPARE = pdecComponent.createBooleanSymbol("PDEC_COMPARE", pdecSym_QDEC_MENU)
pdecSym_COMPARE.setLabel("Enable Compare Control")
pdecSym_COMPARE.setDefaultValue(False)
pdecSym_CC1_ANGULAR = pdecComponent.createIntegerSymbol("PDEC_CC1_ANGULAR", pdecSym_COMPARE)
pdecSym_CC1_ANGULAR.setLabel("Compare Value for Quadrature Pulses")
pdecSym_CC1_ANGULAR.setVisible(False)
pdecSym_CC1_ANGULAR.setDefaultValue((pow(2, pdecSym_CTRLA_ANGULAR.getValue())-1)/2 )
pdecSym_CC1_ANGULAR.setMin(0)
pdecSym_CC1_ANGULAR.setMax(pow(2, pdecSym_CTRLA_ANGULAR.getValue()) - 1)
pdecSym_CC1_ANGULAR.setDependencies(pdecOptionVisible, ["PDEC_COMPARE", "PDEC_CTRLA_ANGULAR"])
pdecSym_CC1_REVOLUTION = pdecComponent.createIntegerSymbol("PDEC_CC1_REVOLUTION", pdecSym_COMPARE)
pdecSym_CC1_REVOLUTION.setLabel("Compare Value for Revolutions")
pdecSym_CC1_REVOLUTION.setVisible(False)
pdecSym_CC1_REVOLUTION.setDefaultValue((pow(2, pdecSym_CTRLA_REVOLUTION.getValue()) - 1)/2)
pdecSym_CC1_REVOLUTION.setMin(0)
pdecSym_CC1_REVOLUTION.setMax(pow(2, pdecSym_CTRLA_REVOLUTION.getValue()) - 1)
pdecSym_CC1_REVOLUTION.setDependencies(pdecOptionVisible, ["PDEC_COMPARE", "PDEC_CTRLA_REVOLUTION"])
pdecSym_CTRLA_MAXCMP = pdecComponent.createIntegerSymbol("PDEC_CTRLA_MAXCMP", pdecSym_QDEC_MENU)
pdecSym_CTRLA_MAXCMP.setLabel("Select Maximum Consecutive Missing Pulses")
pdecSym_CTRLA_MAXCMP.setDefaultValue(4)
pdecSym_CTRLA_MAXCMP.setVisible(False)
pdecSym_CTRLA_MAXCMP.setDependencies(pdecMAXCMPVisible, ["PDEC_CTRLA_CONF"])
pdecSym_Interrupts_Menu = pdecComponent.createMenuSymbol("PDEC_INTERRUPTS", pdecSym_QDEC_MENU)
pdecSym_Interrupts_Menu.setLabel("Interrupts")
pdecSym_INTENSET_OVF = pdecComponent.createBooleanSymbol("PDEC_INTENSET_OVF", pdecSym_Interrupts_Menu)
pdecSym_INTENSET_OVF.setLabel("Enable Overflow Interrupt")
interruptDepList.append("PDEC_INTENSET_OVF")
#Added for error interrupt
interruptDepList.append("")
pdecSym_INTENSET_VLC = pdecComponent.createBooleanSymbol("PDEC_INTENSET_VLC", pdecSym_Interrupts_Menu)
pdecSym_INTENSET_VLC.setLabel("Enable Velocity Interrupt")
interruptDepList.append("PDEC_INTENSET_VLC")
pdecSym_INTENSET_DIR = pdecComponent.createBooleanSymbol("PDEC_INTENSET_DIR", pdecSym_Interrupts_Menu)
pdecSym_INTENSET_DIR.setLabel("Enable Direction Interrupt")
interruptDepList.append("PDEC_INTENSET_DIR")
pdecSym_INTENSET_MC0 = pdecComponent.createBooleanSymbol("PDEC_INTENSET_MC_0", pdecSym_Interrupts_Menu)
pdecSym_INTENSET_MC0.setLabel("Enable Compare | |
fig.add_subplot(2,len(thetas)+1,i+2)
DFM.hist2d(x_obs[0], x_obs[1], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color='k',
contour_kwargs={'linestyles': 'dashed'},
plot_datapoints=False, fill_contours=False, plot_density=False, ax=sub)
DFM.hist2d(_x[0], _x[1], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color='C0',
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
if i in [0, 1, 2]:
sub.scatter(_x[0][UVred], _x[1][UVred], c='r', s=2)
sub.scatter(_x[0][starburst], _x[1][starburst], c='b', s=2)
sub.text(0.95, 0.95, name, ha='right', va='top', transform=sub.transAxes, fontsize=25)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([])
if i == 0:
sub.set_ylabel(r'$G-R$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[1])
sub.set_yticks([0., 0.5, 1., 1.5])
# R vs FUV-NUV
sub = fig.add_subplot(2,len(thetas)+1,i+len(thetas)+3)
DFM.hist2d(x_obs[0], x_obs[2], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color='k',
contour_kwargs={'linestyles': 'dashed'},
plot_datapoints=False, fill_contours=False, plot_density=False, ax=sub)
DFM.hist2d(_x[0], _x[2], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color='C0',
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
if i in [0, 1, 2]:
sub.scatter(_x[0][UVred], _x[2][UVred], c='r', s=2)
sub.scatter(_x[0][starburst], _x[2][starburst], c='b', s=2)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
if i == 0:
sub.set_ylabel(r'$FUV - NUV$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[2])
_plth0, = sub.plot([], [], c='k', ls='--')
sub.legend([_plth0], ['SDSS'], loc='lower right', handletextpad=0.1,
fontsize=20)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel(r'$M_r$ luminosity', labelpad=10, fontsize=25)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
fig.subplots_adjust(wspace=0.1, hspace=0.1)
ffig = os.path.join(fig_dir, 'simba_UVred.png')
fig.savefig(ffig, bbox_inches='tight')
plt.close()
return None
def simba_Mparticlelim():
''' examine the impact of imposing a star particle limit on SIMBA
'''
#########################################################################
# read in SDSS measurements
#########################################################################
r_edges, gr_edges, fn_edges, _ = dustInfer.sumstat_obs(statistic='2d', return_bins=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
ranges = [(r_edges[0], r_edges[-1]), (-0.05, 1.7), (-1., 4.)]
sdss = Catalog('tinker')
sdss_M_fuv, sdss_M_nuv, _, sdss_M_g, sdss_M_r, _, _ = sdss.data['NSA_ABSMAG'].T
mr_complete = (sdss_M_r < -20.)
x_obs = [-1.*sdss_M_r[mr_complete],
sdss_M_g[mr_complete] - sdss_M_r[mr_complete],
sdss_M_fuv[mr_complete] - sdss_M_nuv[mr_complete]]
sfr0_obs = np.zeros(len(x_obs[0])).astype(bool)
#########################################################################
# read in posteriors
#########################################################################
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run('simba'), 'theta.t%i.dat' % nabc['simba']))
theta_simba = np.median(theta_T, axis=0)
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run('tng'), 'theta.t%i.dat' % nabc['tng']))
theta_tng = np.median(theta_T, axis=0)
#########################################################################
# run FM for different scenarios
#########################################################################
# simba without dust
x_simba_nodust, sim_simba, _ = _sim_observables('simba', np.zeros(6),
no_Mr_cut=True)
mp_lim = sim_simba['logmstar'] > np.log10(1.82e9)
x_simba_nodust_mplim = x_simba_nodust[:,mp_lim]
x_simba, _, _ = _sim_observables('simba', theta_simba,
no_Mr_cut=True)
x_simba_mplim = x_simba[:,mp_lim]
#########################################################################
# plotting
#########################################################################
xs = [x_simba_nodust, x_simba_nodust_mplim, x_simba, x_simba_mplim]
names = ['SIMBA + no dust', 'SIMBA + no dust + $M_*$ limit', 'SIMBA + EDP', 'SIMBA + EDP + $M_*$ limit']
fig = plt.figure(figsize=(5*(len(xs)+1),10))
# SFR-M* relation
sub = fig.add_subplot(2, len(xs)+1, 1)
DFM.hist2d(sim_simba['logmstar'], sim_simba['logsfr.inst'],
levels=[0.68, 0.95], range=[[7.8, 12.], [-4., 2.]],
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(sim_simba['logmstar'][~mp_lim],
sim_simba['logsfr.inst'][~mp_lim],
c='r', s=2)
sub.set_xlabel(r'log( $M_*$ [$M_\odot$] )', labelpad=5, fontsize=25)
sub.set_xlim(9., 12.5)
sub.set_xticks([9., 10., 11., 12.])
sub.set_ylabel(r'log ( SFR $[M_\odot \, yr^{-1}]$ )', fontsize=25)
sub.set_ylim([-3., 2.])
for i, _x, name in zip(range(len(xs)), xs, names):
# R vs (G - R)
sub = fig.add_subplot(2,len(xs)+1,i+2)
DFM.hist2d(x_obs[0], x_obs[1], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color='k',
contour_kwargs={'linestyles': 'dashed'},
plot_datapoints=False, fill_contours=False, plot_density=False, ax=sub)
DFM.hist2d(_x[0], _x[1], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color='C0',
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
if i in [0]:
sub.scatter(_x[0][~mp_lim], _x[1][~mp_lim], c='r', s=2)
sub.text(0.95, 0.95, name, ha='right', va='top', transform=sub.transAxes, fontsize=25)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([])
if i == 0:
sub.set_ylabel(r'$G-R$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[1])
sub.set_yticks([0., 0.5, 1., 1.5])
# R vs FUV-NUV
sub = fig.add_subplot(2,len(xs)+1,i+len(xs)+3)
DFM.hist2d(x_obs[0], x_obs[2], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color='k',
contour_kwargs={'linestyles': 'dashed'},
plot_datapoints=False, fill_contours=False, plot_density=False, ax=sub)
DFM.hist2d(_x[0], _x[2], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color='C0',
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
if i in [0]:
sub.scatter(_x[0][~mp_lim], _x[2][~mp_lim], c='r', s=2)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
if i == 0:
sub.set_ylabel(r'$FUV - NUV$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[2])
_plth0, = sub.plot([], [], c='k', ls='--')
sub.legend([_plth0], ['SDSS'], loc='lower right', handletextpad=0.1,
fontsize=20)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel(r'$M_r$ luminosity', labelpad=10, fontsize=25)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
fig.subplots_adjust(wspace=0.1, hspace=0.1)
ffig = os.path.join(fig_dir, 'simba_Mparticle_limit.png')
fig.savefig(ffig, bbox_inches='tight')
plt.close()
return None
def simba_subpops():
''' examine where different SIMBA subpopulations end up
'''
#########################################################################
# read in SDSS measurements
#########################################################################
r_edges, gr_edges, fn_edges, _ = dustInfer.sumstat_obs(statistic='2d', return_bins=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
ranges = [(r_edges[0], r_edges[-1]), (-0.05, 1.7), (-1., 4.)]
sdss = Catalog('tinker')
sdss_M_fuv, sdss_M_nuv, _, sdss_M_g, sdss_M_r, _, _ = sdss.data['NSA_ABSMAG'].T
sdss_nmgy_fuv, sdss_nmgy_nuv = sdss.data['NSA_NMGY'][:,0], sdss.data['NSA_NMGY'][:,1]
obs_cuts = ((sdss_M_r < -20.) &
(sdss_M_fuv != -999) & (sdss_M_fuv < -10) &
(sdss_M_nuv != -999) & (sdss_M_nuv < -10) &
(sdss_nmgy_fuv > 0) & (sdss_nmgy_nuv > 0)
)
x_obs = [-1.*sdss_M_r[obs_cuts],
sdss_M_g[obs_cuts] - sdss_M_r[obs_cuts],
sdss_M_fuv[obs_cuts] - sdss_M_nuv[obs_cuts]]
sfr0_obs = np.zeros(len(x_obs[0])).astype(bool)
#########################################################################
# read in posteriors
#########################################################################
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run('simba'), 'theta.t%i.dat' % nabc['simba']))
theta_simba = np.median(theta_T, axis=0)
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run('eagle'), 'theta.t%i.dat' % nabc['tng']))
theta_eagle = np.median(theta_T, axis=0)
#########################################################################
# plotting
#########################################################################
thetas = [np.zeros(6), theta_simba, theta_eagle]
names = ['SIMBA + no dust', 'SIMBA + EDA', 'SIMBA + EAGLE EDA']
fig = plt.figure(figsize=(5*(len(thetas)+1),10))
for i, theta, name in zip(range(len(thetas)), thetas, names):
print(theta, name)
_x, sim_simba, sfr0_simba = _sim_observables('simba', theta, no_Mr_cut=True)
# mass particle limit
mp_lim = (sim_simba['logmstar'] < np.log10(1.82e9))
UVred = sfr0_simba
starburst = (sim_simba['logsfr.inst'] > -0.2 + 0.7 * (sim_simba['logmstar'] - 9))
sf = (sim_simba['logsfr.inst'] - sim_simba['logmstar'] >= -10)
q = (sim_simba['logsfr.inst'] - sim_simba['logmstar'] < -10)
subpops = [mp_lim, q, UVred, sf, starburst]
colors = ['k', 'C1', 'r', 'C0', 'b']
if i == 0:
# SFR-M* relation
sub = fig.add_subplot(2, len(thetas)+1, 1)
DFM.hist2d(sim_simba['logmstar'], sim_simba['logsfr.inst'],
levels=[0.68, 0.95], range=[[7.8, 12.], [-4., 2.]],
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
for subpop, cols in zip(subpops, colors):
sub.scatter(sim_simba['logmstar'][subpop],
np.clip(sim_simba['logsfr.inst'][subpop], -2.99, None),
c=cols, s=2)
sub.set_xlabel(r'log( $M_*$ [$M_\odot$] )', labelpad=5, fontsize=25)
sub.set_xlim(9., 12.5)
sub.set_xticks([9., 10., 11., 12.])
sub.set_ylabel(r'log ( SFR $[M_\odot \, yr^{-1}]$ )', fontsize=25)
sub.set_ylim([-3., 2.])
print(_x[0][UVred])
print(_x[1][UVred])
print(_x[2][UVred])
print()
# R vs (G - R)
sub = fig.add_subplot(2,len(thetas)+1,i+2)
DFM.hist2d(x_obs[0], x_obs[1], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color='k',
contour_kwargs={'linestyles': 'dashed'},
plot_datapoints=False, fill_contours=False, plot_density=False, ax=sub)
DFM.hist2d(_x[0], _x[1], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color='C0',
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
for subpop, cols in zip(subpops, colors):
sub.scatter(_x[0][subpop], _x[1][subpop], c=cols, s=2)
sub.text(0.95, 0.95, name, ha='right', va='top', transform=sub.transAxes, fontsize=25)
sub.set_xlim(18., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([])
if i == 0:
sub.set_ylabel(r'$G-R$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[1])
sub.set_yticks([0., 0.5, 1., 1.5])
# R vs FUV-NUV
sub = fig.add_subplot(2,len(thetas)+1,i+len(thetas)+3)
DFM.hist2d(x_obs[0], x_obs[2], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color='k',
contour_kwargs={'linestyles': 'dashed'},
plot_datapoints=False, fill_contours=False, plot_density=False, ax=sub)
DFM.hist2d(_x[0], _x[2], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color='C0',
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
for subpop, cols in zip(subpops, colors):
sub.scatter(_x[0][subpop], _x[2][subpop], c=cols, s=2)
sub.set_xlim(18., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
if i == 0:
sub.set_ylabel(r'$FUV - NUV$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[2])
_plth0, = sub.plot([], [], c='k', ls='--')
sub.legend([_plth0], ['SDSS'], loc='lower right', handletextpad=0.1,
fontsize=20)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel(r'$M_r$ luminosity', labelpad=10, fontsize=25)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
fig.subplots_adjust(wspace=0.1, hspace=0.1)
ffig = os.path.join(fig_dir, 'simba_subpop.png')
fig.savefig(ffig, bbox_inches='tight')
plt.close()
return None
def simba_sfrmin():
''' the minimum SFR in SIMBA is super low compared to TNG or EAGLE:
SFR_min = -5.946548143370941 vs SFR_min = -3.648916 or -3.2225731776106885
This means that the SFR=0 population in SIMBA is attenuated quite
differently than the SFR=0 populations in TNG or EAGLE. In this function,
I'll examine how that might impact things
'''
#########################################################################
# read in SDSS measurements
#########################################################################
r_edges, gr_edges, fn_edges, _ = dustInfer.sumstat_obs(statistic='2d', return_bins=True)
dr | |
'view_fs_ogle_summary': [['name', 'raDeg', 'decDeg'],1008],
'cbats': [['name', 'raDeg', 'decDeg'],1009],
'view_cbats_sn': [['name', 'raDeg', 'decDeg'],1010],
'view_cbats_psn': [['name', 'raDeg', 'decDeg'],1011],
# 2015-03-16 KWS Added fs_brightsnlist_discoveries (bright SN list)
'fs_brightsnlist_discoveries': [['name', 'raDeg', 'decDeg'],1012],
# 2015-04-21 KWS Added fs_asassn_sne and fs_asassn_transients
'fs_asassn_sne': [['ID', 'RA', 'decl', 'Redshift'],1013],
'fs_asassn_transients': [['name', 'raDeg', 'decDeg'],1014],
'fs_tns_transients': [['objectName', 'raDeg', 'decDeg'],1015],
# 2019-10-02 KWS Added tcs_cat_tns catalogue (on the panstarrs1 database)
'tcs_cat_tns': [['tns_name', 'ra', 'decl'],1016],
# 2015-01-26 KWS Added tcs_photpipe_detections for quick & dirty asteroid/fakes crossmatch
'tcs_photpipe_detections': [['id', 'RA', 'Dec'],2000],
# 2015-08-13 KWS Added tcs_tphot_detections for quick & dirty asteroid/fakes crossmatch
'tcs_tphot_detections': [['id', 'ra', 'dec'],2001],
# 2016-02-02 KWS Note that atlas_diff_objects ONLY works with HTM only queries.
'atlas_diff_objects': [['id', 'ra', 'dec'],3000],
# 2016-05-03 KWS Note that atlas_diff_detections ONLY works with HTM only queries.
'atlas_diff_detections': [['id', 'ra', 'dec'],3001],
# 2017-08-30 KWS Added atlas_metadata for ATLAS footprint searching
'atlas_metadata': [['id', 'ra', 'dec'],3002],
# 2017-09-26 KWS Added atlas_metadataddc for ATLAS footprint searching
'atlas_metadataddc': [['id', 'ra', 'dec'],3003],
# 2019-07-05 KWS Search just the views.
'atlas_v_followup1': [['id', 'ra', 'dec'],3004],
'atlas_v_followup2': [['id', 'ra', 'dec'],3005],
'atlas_v_followup3': [['id', 'ra', 'dec'],3006],
'atlas_v_followup4': [['id', 'ra', 'dec'],3007],
'atlas_v_followup5': [['id', 'ra', 'dec'],3008],
'atlas_v_followup6': [['id', 'ra', 'dec'],3009],
'psdb_web_v_followup_bad_presentation': [['id', 'ra_psf', 'dec_psf'],4000],
'psdb_web_v_followup_conf_presentation': [['id', 'ra_psf', 'dec_psf'],4001],
'psdb_web_v_followup_good_presentation': [['id', 'ra_psf', 'dec_psf'],4002],
'psdb_web_v_followup_poss_presentation': [['id', 'ra_psf', 'dec_psf'],4003],
'psdb_web_v_followup_pend_presentation': [['id', 'ra_psf', 'dec_psf'],4004],
'psdb_web_v_followup_attic_presentation': [['id', 'ra_psf', 'dec_psf'],4005],
'psdb_web_v_followup_zoo_presentation': [['id', 'ra_psf', 'dec_psf'],4006],
'psdb_web_v_followup_tbd_presentation': [['id', 'ra_psf', 'dec_psf'],4007],
'psdb_web_v_followup_fast_presentation': [['id', 'ra_psf', 'dec_psf'],4008],
'psdb_web_v_followup_all_presentation': [['id', 'ra_psf', 'dec_psf'],4100],
}
# 2012-02-02 KWS Cone Searcher based on the new SWIG C++ code. Need speed.
# 2012-03-25 KWS Added django so that we can call the Django dict cursor if necessary.
def coneSearch(ra, dec, radius, tableName, htmLevel = 16, queryType = QUICK, conn = None, django = False):
"""coneSearch.
Args:
ra:
dec:
radius:
tableName:
htmLevel:
queryType:
conn:
django:
"""
# 2012-02-02 KWS Require database connections for cone searching
import MySQLdb
# 2012-02-02 KWS Introduced a new SWIG htmCircle library for cone searching
from gkhtm import _gkhtm as htmCircle
# Attempt a cone search of the given tableName. Use internal models if conn
# is None, otherwise use a given database connection (allows it to be called
# externally as part of a script).
# Code returns a list of lists. First value in sublist is separation. Second
# is the row of data requested from the database.
message = ""
if htmLevel not in (16, 20):
# We don't support anything other than Level 16 or Level 20 queries
return "Must be HTM level 16 or 20", []
# Check that RA and DEC are in decimal degrees. If not, assume sexagesimal and attempt to convert
if ':' in str(ra):
ra = sexToDec(ra, ra=True)
if ':' in str(dec):
dec = sexToDec(dec, ra=False)
# 2015-05-29 KWS Sometimes we send RA and Dec in string format, so
# need to make sure that they are doubles.
ra = float(ra)
dec = float(dec)
try:
quickColumns = CAT_ID_RA_DEC_COLS[tableName][0]
except KeyError as e:
return "Table %s not recognised." % tableName, []
htmWhereClause = htmCircle.htmCircleRegion(htmLevel, ra, dec, radius)
cartesians = calculate_cartesians(ra, dec)
cartesianClause = 'and (cx * %.17f + cy * %.17f + cz * %.17f >= cos(%.17f))' % (cartesians[0], cartesians[1], cartesians[2], math.radians(radius/3600.0))
columns = ['*']
if queryType == QUICK:
columns = quickColumns
elif queryType == COUNT:
columns = ['count(*) number']
query = 'select ' + ','.join(columns) + ' from %s' % tableName + htmWhereClause + cartesianClause
#print query
results = []
if conn:
# We have a database connection, so use it to make a call to the database
# DO THE QUERY
try:
if django:
cursor = conn.cursor ()
else:
cursor = conn.cursor (MySQLdb.cursors.DictCursor)
cursor.execute(query)
if django:
resultSet = [ dict( (d[0],c) for d, c in zip(cursor.description, row) ) for row in cursor ]
else:
resultSet = cursor.fetchall ()
except MySQLdb.Error as e:
return "Error %d: %s" % (e.args[0], e.args[1]), []
if resultSet:
if queryType == COUNT:
results = [[0.0, resultSet[0]['number']]]
return "Count", results
# Calculate the angular separation for each row
for row in resultSet:
if tableName == 'tcs_guide_star_cat' or tableName == 'tcs_cat_v_guide_star_ps':
# Guide star cat RA and DEC are in RADIANS
separation = getAngularSeparation(ra, dec, math.degrees(row[CAT_ID_RA_DEC_COLS[tableName][0][1]]), math.degrees(row[CAT_ID_RA_DEC_COLS[tableName][0][2]]))
else:
separation = getAngularSeparation(ra, dec, row[CAT_ID_RA_DEC_COLS[tableName][0][1]], row[CAT_ID_RA_DEC_COLS[tableName][0][2]])
results.append([separation, row])
# Sort by separation
results.sort(key=itemgetter(0))
else:
message = "No matches from %s." % tableName
else:
message = query
return message, results
# 2016-02-02 KWS Pure HTM conesearch. Does NOT use unit cartesian coords.
def coneSearchHTM(ra, dec, radius, tableName, htmLevel = 16, queryType = QUICK, conn = None, django = False, prefix = "htm", suffix = "ID"):
"""HTM only cone search. Assumes a column in the catalogue which by default is called htm<n>ID where <n> is the HTM level.
Args:
ra:
dec:
radius:
tableName:
htmLevel:
queryType:
conn:
django:
prefix: HTM column prefix - default: "htm"
suffix: HTM column suffix - default: "ID"
"""
# 2012-02-02 KWS Require database connections for cone searching
import MySQLdb
# 2012-02-02 KWS Introduced a new SWIG htmCircle library for cone searching
from gkhtm import _gkhtm as htmCircle
# Attempt a cone search of the given tableName. Use internal models if conn
# is None, otherwise use a given database connection (allows it to be called
# externally as part of a script).
# Code returns a list of lists. First value in sublist is separation. Second
# is the row of data requested from the database.
message = ""
if htmLevel not in (16, 20):
# We don't support anything other than Level 16 or Level 20 queries
return "Must be HTM level 16 or 20", []
# Check that RA and DEC are in decimal degrees. If not, assume sexagesimal and attempt to convert
if ':' in str(ra):
ra = sexToDec(ra, ra=True)
if ':' in str(dec):
dec = sexToDec(dec, ra=False)
ra = float(ra)
dec = float(dec)
try:
quickColumns = CAT_ID_RA_DEC_COLS[tableName][0]
except KeyError as e:
return "Table %s not recognised." % tableName, []
# 2020-07-08 KWS htmCircleRegion has been modified to take two optional paramaters. Because
# of necessity to maintain older SWIG version, parameters cannot be named,
# but prefix and suffix are completely optional. Default values are htm and ID
# if omitted. If you need to pass the suffix only, you MUST specify the prefix
# as well. If you don't want the suffix, pass prefix and "".
htmWhereClause = htmCircle.htmCircleRegion(htmLevel, ra, dec, radius, prefix, suffix)
# We now have a query that returns a SUPERSET. We need to trim the superset once
# the query is done.
columns = ['*']
if queryType == QUICK:
columns = quickColumns
query = 'select ' + ','.join(columns) + ' from %s' % tableName + htmWhereClause
#print query
results = []
if conn:
# We have a database connection, so use it to make a call to the database
# DO THE QUERY
try:
if django:
cursor = conn.cursor ()
else:
cursor = conn.cursor (MySQLdb.cursors.DictCursor)
cursor.execute(query)
if django:
resultSet = [ dict( (d[0],c) for d, c in zip(cursor.description, row) ) for row in cursor ]
else:
resultSet = cursor.fetchall ()
except MySQLdb.Error as e:
return "Error %d: %s" % (e.args[0], e.args[1]), []
if resultSet:
# Calculate the angular separation for each row
for row in resultSet:
if tableName == 'tcs_guide_star_cat' or tableName == 'tcs_cat_v_guide_star_ps':
# Guide star cat RA and DEC are in RADIANS
separation = getAngularSeparation(ra, dec, math.degrees(row[CAT_ID_RA_DEC_COLS[tableName][0][1]]), math.degrees(row[CAT_ID_RA_DEC_COLS[tableName][0][2]]))
else:
separation = getAngularSeparation(ra, dec, row[CAT_ID_RA_DEC_COLS[tableName][0][1]], row[CAT_ID_RA_DEC_COLS[tableName][0][2]])
# For HTM only queries, only add the results if the separation is less than the radius.
# This is because HTM queries will always be a superset.
if separation < radius:
results.append([separation, row])
# Sort by separation
results.sort(key=itemgetter(0))
else:
message = "No matches from %s." % tableName
else:
message = query
return message, results
def coneSearchHTMCassandra (cassandraSession, ra, dec, radius, tableName, racol = 'ra', deccol = 'dec', refineResults = True, columns = None):
"""coneSearchHTMCassandra.
Args:
cassandraSession: Connection to Cassandra cluster
ra:
dec:
radius:
tableName:
racol:
deccol:
refineResults:
columns:
"""
| |
antenna rot. corr., vertical channel
'SQI': normalized_coherent_power,
# Signal quality index
'SQIh': normalized_coherent_power,
# Signal quality index, horizontal channel
'SQIv': None, # Signal quality index, vertical channel
'CCOR': None, # Clutter power correction
'CCORh': None, # Clutter power correction, horizontal channel
'CCORv': None, # Clutter power correction, vertical channel
'SIGPOW': None, # Singal Power
'SNR': None, # Raw signal to noise ratio
'SNRh': None, # Raw signal to noise ratio, horizontal channel
'SNRv': None, # Raw signal to noise ration, vertical channel
'DFT': None, # Signal spectrum amplitude
'DFTh': None, # Signal spectrum amplitude, horizontal channel
'DFTv': None, # Signal spectrum amplitude, vertical channel
'LOG': None, # Logarithmic amplitude 10*log Isq + Qsq
'LOGh': None, # Logarithmic amplitude, horizontal channel
'LOGv': None, # Logarithmic amplitude, vertical channel
'CMAP': None, # Censor map
# A '1' ending on differential reflectivity fields indicates that the
# moment has calculated using a 1st LAG algorithm.
'ZDR': corrected_differential_reflectivity,
# Differential reflectivity from corrected timeseries
'UZDR': differential_reflectivity,
# Diff. refl. from uncorrected timeseries
'AZDR': None, # Diff. refl., rainfall atten. corr., corr t.s.
'ZDR1': None, # Diff. refl., corr. t.s., 1st LAG algo.
'UZDR1': None, # Diff. refl., uncorr. t.s., 1st LAG algo.
'AZDR1': None, # Diff. refl., rain. atten. corr., corr. t.s., 1st LAG
'PHI': corrected_differential_phase,
# Differential phase from corrected timeseries
'PHIDP': corrected_differential_phase,
# Differential phase from corrected timeseries
'UPHIDP': differential_phase,
# Diff. phase from uncorrected timeseries.
'PHIH': None, # Diff. phase, corr. t.s., horizontal channel
'UPHIH': None, # Diff. phase, uncorr t.s., hortizontal channel
'KDP': specific_differential_phase,
# Specific differential phase
'RHO': cross_correlation_ratio,
# Cross correlation coefficient from corrected t.s.
'RHOHV': cross_correlation_ratio,
# Cross correlation coefficient from corrected t.s.
'URHOHV': None, # Cross correlation coefficient from uncorr. t.s.
'RHOH': None, # Cross corr., corr. t.s., horizontal transmit only
'URHOH': None, # Cross corr., uncorr t.s., horizontal transmit only
'LDR': linear_depolarization_ratio,
# Linear depolarization ratio from corr. t.s.
'ULDR': None, # Linear depolarization ratio from uncorr. t.s.
}
# UF field mappings
uf_field_mapping = {
# UF 2 letter field: radar field
'DZ': reflectivity,
'CZ': corrected_reflectivity,
'ZE': corrected_reflectivity,
'ZT': total_power,
'UZ': total_power,
'VR': velocity,
'VE': corrected_velocity,
'SW': spectrum_width,
'ZD': differential_reflectivity,
'DR': corrected_differential_reflectivity,
'CD': corrected_differential_reflectivity,
'LR': linear_depolarization_ratio,
'PH': differential_phase,
'KD': specific_differential_phase,
'RH': cross_correlation_ratio,
'SQ': normalized_coherent_power,
'NP': normalized_coherent_power,
'HC': radar_echo_classification,
}
# Mapping used when writing UF files
write_uf_mapping = {
# UF 2 letter field: radar field
reflectivity: 'DZ',
corrected_reflectivity: 'CZ',
total_power: 'ZT',
velocity: 'VR',
corrected_velocity: 'VE',
spectrum_width: 'SW',
differential_reflectivity: 'ZD',
corrected_differential_reflectivity: 'DR',
linear_depolarization_ratio: 'LR',
differential_phase: 'PH',
specific_differential_phase: 'KD',
cross_correlation_ratio: 'RH',
normalized_coherent_power: 'SQ',
radar_echo_classification: 'HC',
}
FIELD_MAPPINGS = { # Required variable
'sigmet': sigmet_field_mapping,
'nexrad_archive': nexrad_archive_field_mapping,
'nexrad_cdm': nexrad_cdm_field_mapping,
'nexrad_level3': nexrad_level3_mapping,
'cfradial': cfradial_field_mapping,
'mdv': mdv_field_mapping,
'rsl': rsl_field_mapping,
'chl': chl_field_mapping,
'gamic': gamic_field_mapping,
'uf': uf_field_mapping,
'write_uf': write_uf_mapping,
}
def velocity_limit(container=None, selection=0):
import pyart
if isinstance(container, pyart.core.Radar):
try:
if selection >= 0 and selection < container.nsweeps:
vel = container.get_nyquist_vel(selection,
check_uniform=False)
else:
vel = container.get_nyquist_vel(0, check_uniform=False)
return (-vel, vel)
except LookupError:
# return (-42., 42.)
return (-15.8, 15.8)
else:
# return (-42., 42.)
return (-15.8, 15.8)
def spectrum_width_limit(container=None, selection=0):
import pyart
if isinstance(container, pyart.core.Radar):
try:
if selection >= 0 and selection < container.nsweeps:
vel = container.get_nyquist_vel(selection,
check_uniform=False)
else:
vel = container.get_nyquist_vel(0, check_uniform=False)
return (0, vel)
except LookupError:
#return (0., 4.)
return (0., 15.8)
else:
#return (0., 4.)
return (0., 15.8)
DEFAULT_FIELD_COLORMAP = {
# field name : colormap
reflectivity: 'pyart_NWSRef',
corrected_reflectivity: 'pyart_NWSRef',
total_power: 'pyart_NWSRef',
unfiltered_reflectivity: 'pyart_NWSRef',
corrected_unfiltered_reflectivity: 'pyart_NWSRef',
reflectivity_vv: 'pyart_NWSRef',
corrected_reflectivity_vv: 'pyart_NWSRef',
unfiltered_reflectivity_vv: 'pyart_NWSRef',
reflectivity_bias: 'pyart_NWSRef',
signal_power_hh: 'pyart_NWSRef',
signal_power_vv: 'pyart_NWSRef',
volumetric_reflectivity: 'pyart_NWSRef',
volumetric_reflectivity_vv: 'pyart_NWSRef',
bird_density: 'pyart_NWSRef',
bird_reflectivity: 'pyart_NWSRef',
radar_cross_section_hh: 'pyart_NWSRef',
radar_cross_section_vv: 'pyart_NWSRef',
spectral_reflectivity_hh: 'pyart_NWSRef',
spectral_reflectivity_vv: 'pyart_NWSRef',
unfiltered_spectral_reflectivity_hh: 'pyart_NWSRef',
unfiltered_spectral_reflectivity_vv: 'pyart_NWSRef',
avg_reflectivity: 'pyart_NWSRef',
quant05_reflectivity: 'pyart_NWSRef',
quant10_reflectivity: 'pyart_NWSRef',
quant20_reflectivity: 'pyart_NWSRef',
quant50_reflectivity: 'pyart_NWSRef',
quant80_reflectivity: 'pyart_NWSRef',
quant90_reflectivity: 'pyart_NWSRef',
quant95_reflectivity: 'pyart_NWSRef',
noisedBm_hh: 'pyart_NWSRef',
noisedBm_vv: 'pyart_NWSRef',
noisedBZ_hh: 'pyart_NWSRef',
noisedBZ_vv: 'pyart_NWSRef',
noisedBADU_hh: 'pyart_NWSRef',
noisedBADU_vv: 'pyart_NWSRef',
noiseADU_hh: 'pyart_NWSRef',
noiseADU_vv: 'pyart_NWSRef',
noise_pos_h: 'pyart_LangRainbow12',
noise_pos_v: 'pyart_LangRainbow12',
signal_to_noise_ratio: 'pyart_Carbone17',
signal_to_noise_ratio_hh: 'pyart_Carbone17',
signal_to_noise_ratio_vv: 'pyart_Carbone17',
clutter_correction_ratio_hh: 'pyart_Carbone17',
clutter_correction_ratio_vv: 'pyart_Carbone17',
visibility: 'pyart_Carbone17',
frequency_of_occurrence: 'pyart_Carbone17',
occurrence: 'pyart_Carbone17',
noisedBZ_hh: 'pyart_NWSRef',
noisedBZ_vv: 'pyart_NWSRef',
sigma_zh: 'pyart_NWSRef',
stat_test_lag1: 'pyart_NWSRef',
stat_test_lag2: 'pyart_NWSRef',
wide_band_noise: 'pyart_NWSRef',
fields_difference: 'pyart_BuDRd18',
field_mask: 'Greys_r',
field_texture: 'Greys_r',
sun_hit_power_h: 'pyart_NWSRef',
sun_hit_power_v: 'pyart_NWSRef',
sun_hit_differential_reflectivity: 'pyart_RefDiff',
sun_est_power_h: 'pyart_NWSRef',
sun_est_power_v: 'pyart_NWSRef',
sun_est_differential_reflectivity: 'pyart_RefDiff',
velocity: 'pyart_BuDRd18',
corrected_velocity: 'pyart_BuDRd18',
unfiltered_velocity: 'pyart_BuDRd18',
dealiased_corrected_velocity: 'pyart_BuDRd18',
dealiased_velocity: 'pyart_BuDRd18',
velocity_vv: 'pyart_BuDRd18',
unfiltered_velocity_vv: 'pyart_BuDRd18',
eastward_wind_component: 'pyart_BuDRd18',
northward_wind_component: 'pyart_BuDRd18',
vertical_wind_component: 'pyart_BuDRd18',
azimuthal_horizontal_wind_component: 'pyart_BuDRd18',
vertical_wind_shear: 'pyart_BuDRd18',
retrieved_velocity: 'pyart_BuDRd18',
retrieved_velocity_std: 'pyart_NWSRef',
velocity_difference: 'pyart_BuDRd18',
wind_speed: 'pyart_NWSRef',
wind_direction: 'pyart_Wild25',
avg_velocity: 'pyart_BuDRd18',
quant05_velocity: 'pyart_BuDRd18',
quant10_velocity: 'pyart_BuDRd18',
quant20_velocity: 'pyart_BuDRd18',
quant50_velocity: 'pyart_BuDRd18',
quant80_velocity: 'pyart_BuDRd18',
quant90_velocity: 'pyart_BuDRd18',
quant95_velocity: 'pyart_BuDRd18',
avg_corrected_velocity: 'pyart_BuDRd18',
quant05_corrected_velocity: 'pyart_BuDRd18',
quant10_corrected_velocity: 'pyart_BuDRd18',
quant20_corrected_velocity: 'pyart_BuDRd18',
quant50_corrected_velocity: 'pyart_BuDRd18',
quant80_corrected_velocity: 'pyart_BuDRd18',
quant90_corrected_velocity: 'pyart_BuDRd18',
quant95_corrected_velocity: 'pyart_BuDRd18',
spectrum_width: 'pyart_NWS_SPW',
corrected_spectrum_width: 'pyart_NWS_SPW',
unfiltered_spectrum_width: 'pyart_NWS_SPW',
spectrum_width_vv: 'pyart_NWS_SPW',
unfiltered_spectrum_width_vv: 'pyart_NWS_SPW',
turbulence: 'pyart_NWS_SPW',
normalized_coherent_power: 'pyart_Carbone17',
differential_reflectivity: 'pyart_RefDiff',
corrected_differential_reflectivity: 'pyart_RefDiff',
unfiltered_differential_reflectivity: 'pyart_RefDiff',
differential_reflectivity_in_precipitation: 'pyart_RefDiff',
differential_reflectivity_in_snow: 'pyart_RefDiff',
differential_reflectivity_column_height: 'pyart_RefDiff',
spectral_differential_reflectivity: 'pyart_RefDiff',
unfiltered_spectral_differential_reflectivity: 'pyart_RefDiff',
cross_correlation_ratio: 'pyart_RefDiff',
corrected_cross_correlation_ratio: 'pyart_RefDiff',
unfiltered_cross_correlation_ratio: 'pyart_RefDiff',
uncorrected_cross_correlation_ratio: 'pyart_RefDiff',
logarithmic_cross_correlation_ratio: 'pyart_RefDiff',
cross_correlation_ratio_in_rain: 'pyart_RefDiff',
spectral_copolar_correlation_coefficient: 'pyart_RefDiff',
unfiltered_spectral_copolar_correlation_coefficient: 'pyart_RefDiff',
mean_phase: 'pyart_Wild25',
differential_phase: 'pyart_Wild25',
unfolded_differential_phase: 'pyart_Wild25',
corrected_differential_phase: 'pyart_Wild25',
uncorrected_differential_phase: 'pyart_Wild25',
uncorrected_unfiltered_differential_phase: 'pyart_Wild25',
system_differential_phase: 'pyart_Wild25',
spectral_phase_hh: 'pyart_Wild25',
spectral_phase_vv: 'pyart_Wild25',
spectral_differential_phase: 'pyart_Wild25',
unfiltered_spectral_phase_hh: 'pyart_Wild25',
unfiltered_spectral_phase_vv: 'pyart_Wild25',
unfiltered_spectral_differential_phase: 'pyart_Wild25',
specific_differential_phase: 'pyart_Theodore16',
corrected_specific_differential_phase: 'pyart_Theodore16',
uncorrected_specific_differential_phase: 'pyart_Theodore16',
uncorrected_unfiltered_specific_differential_phase: 'pyart_Theodore16',
linear_depolarization_ratio: 'pyart_SCook18',
linear_depolarization_ratio_h: 'pyart_SCook18',
linear_depolarization_ratio_v: 'pyart_SCook18',
circular_depolarization_ratio: 'pyart_SCook18',
rain_rate: 'pyart_RRate11',
radar_estimated_rain_rate: 'pyart_RRate11',
corrected_radar_estimated_rain_rate: 'pyart_RRate11',
avg_radar_estimated_rain_rate: 'pyart_RRate11',
quant05_radar_estimated_rain_rate: 'pyart_RRate11',
quant10_radar_estimated_rain_rate: 'pyart_RRate11',
quant20_radar_estimated_rain_rate: 'pyart_RRate11',
quant50_radar_estimated_rain_rate: 'pyart_RRate11',
quant80_radar_estimated_rain_rate: 'pyart_RRate11',
quant90_radar_estimated_rain_rate: 'pyart_RRate11',
quant95_radar_estimated_rain_rate: 'pyart_RRate11',
rainfall_accumulation: 'pyart_RRate11',
sun_hit_h: 'pyart_LangRainbow12',
sun_hit_v: 'pyart_LangRainbow12',
sun_hit_zdr: 'pyart_LangRainbow12',
precipitation_type: 'pyart_LangRainbow12',
radar_echo_classification: 'pyart_LangRainbow12',
corrected_radar_echo_classification: 'pyart_LangRainbow12',
radar_echo_classification_MF: 'pyart_LangRainbow12',
hydroclass_entropy: 'pyart_LangRainbow12',
hydroclass_confidence: 'pyart_LangRainbow12',
proportion_AG: 'pyart_LangRainbow12',
proportion_CR: 'pyart_LangRainbow12',
proportion_LR: 'pyart_LangRainbow12',
proportion_RP: 'pyart_LangRainbow12',
proportion_RN: 'pyart_LangRainbow12',
proportion_VI: 'pyart_LangRainbow12',
proportion_WS: 'pyart_LangRainbow12',
proportion_MH: 'pyart_LangRainbow12',
proportion_IH: 'pyart_LangRainbow12',
probability_AG: 'pyart_LangRainbow12',
probability_CR: 'pyart_LangRainbow12',
probability_LR: 'pyart_LangRainbow12',
probability_RP: 'pyart_LangRainbow12',
probability_RN: 'pyart_LangRainbow12',
probability_VI: 'pyart_LangRainbow12',
probability_WS: 'pyart_LangRainbow12',
probability_MH: 'pyart_LangRainbow12',
probability_IH: 'pyart_LangRainbow12',
radar_echo_id: 'pyart_LangRainbow12',
clutter_exit_code: 'pyart_LangRainbow12',
melting_layer: 'pyart_LangRainbow12',
height_over_iso0: 'pyart_BuDRd18',
specific_attenuation: 'pyart_Carbone17',
path_integrated_attenuation: 'pyart_Carbone17',
specific_differential_attenuation: 'pyart_Carbone17',
path_integrated_differential_attenuation: 'pyart_Carbone17',
corrected_specific_attenuation: 'pyart_Carbone17',
corrected_path_integrated_attenuation: 'pyart_Carbone17',
corrected_specific_differential_attenuation: 'pyart_Carbone17',
corrected_path_integrated_differential_attenuation: 'pyart_Carbone17',
differential_phase_texture: 'pyart_BlueBrown11',
differential_reflectivity_texture: 'pyart_BlueBrown11',
reflectivity_texture: 'pyart_BlueBrown11',
cross_correlation_ratio_texture: 'pyart_BlueBrown11',
height: 'pyart_SCook18',
interpolated_profile: 'pyart_SCook18',
radial_wind_speed: 'pyart_BuDRd18',
#radial_wind_speed_ci:
#radial_wind_speed_status:
doppler_spectrum_width: 'pyart_NWS_SPW',
# doppler_spectrum_mean_error:
atmospherical_structures_type: 'pyart_LangRainbow12',
relative_beta: 'pyart_NWSRef',
absolute_beta: 'pyart_NWSRef',
cnr: 'pyart_Carbone17',
# appropriate colors are matplotlib Greys, gray, etc.
HRV: 'Greys_r', # 'pyart_Gray9',
VIS006: 'Greys_r', # 'pyart_Gray9',
VIS008: 'Greys_r', # 'pyart_Gray9',
IR_016: 'Greys_r', # 'pyart_Gray9_r',
IR_039: 'pyart_NWSRef',
WV_062: 'pyart_NWSRef',
WV_073: 'pyart_NWSRef',
IR_087: 'pyart_NWSRef',
IR_097: 'pyart_NWSRef',
IR_108: 'pyart_NWSRef',
IR_120: 'pyart_NWSRef',
IR_134: 'pyart_NWSRef',
CTH: 'pyart_NWSRef',
HRV_norm: 'Greys_r', # 'pyart_Gray9',
VIS006_norm: 'Greys_r', # 'pyart_Gray9',
VIS008_norm: 'Greys_r', # 'pyart_Gray9',
IR_016_norm: 'Greys_r', # 'pyart_Gray9_r',
# Additional reflectivity like fields
'CZ': 'pyart_NWSRef',
'DZ': 'pyart_NWSRef',
'AZ': 'pyart_NWSRef',
'Z': 'pyart_NWSRef',
'dbz': 'pyart_NWSRef',
'DBZ': 'pyart_NWSRef',
'dBZ': 'pyart_NWSRef',
'DBZH': 'pyart_NWSRef',
'DBZ_S': 'pyart_NWSRef',
'DBZ_K': 'pyart_NWSRef',
'reflectivity_horizontal': 'pyart_NWSRef',
'corr_reflectivity': 'pyart_NWSRef',
}
# map each field to a limit or a limit function
DEFAULT_FIELD_LIMITS = {
# field name : limits
#reflectivity: (-30., 75.),
reflectivity: (-30., 85.),
avg_reflectivity: (-30., 75.),
quant05_reflectivity: (-30., 75.),
quant10_reflectivity: (-30., 75.),
quant20_reflectivity: (-30., 75.),
quant50_reflectivity: (-30., 75.),
quant80_reflectivity: (-30., 75.),
quant90_reflectivity: (-30., 75.),
quant95_reflectivity: (-30., 75.),
bird_reflectivity: (-30., 75.),
corrected_reflectivity: (-30., 75.),
total_power: (-30., 75.),
# unfiltered_reflectivity: (-30., 75.),
unfiltered_reflectivity: (-30., 85.),
corrected_unfiltered_reflectivity: (-30., 75.),
# reflectivity_vv: (-30., 75.),
reflectivity_vv: (-30., 85.),
corrected_reflectivity_vv: (-30., 75.),
# unfiltered_reflectivity_vv: (-30., 75.),
unfiltered_reflectivity_vv: (-30., 85.),
signal_to_noise_ratio: (-5., 30.),
signal_to_noise_ratio_hh: (-5., 30.),
signal_to_noise_ratio_vv: (-5., 30.),
noisedBZ_hh: (-40., 10.),
noisedBZ_vv: (-40., 10.),
reflectivity_bias: (-30., 30.),
volumetric_reflectivity: (20., 60.),
volumetric_reflectivity_vv: (20., 60.),
bird_density: (0., 400.),
radar_cross_section_hh: (-50., 55.),
radar_cross_section_vv: (-50., 55.),
sigma_zh: (0., 10.),
stat_test_lag1: (0., 10.),
stat_test_lag2: (0., 10.),
wide_band_noise: (0., 25.),
# spectral_reflectivity_hh: (-60., 45.),
# spectral_reflectivity_vv: (-60., 45.),
# unfiltered_spectral_reflectivity_hh: (-60., 45.),
# unfiltered_spectral_reflectivity_vv: (-60., 45.),
signal_power_hh: (-130., 0.),
signal_power_vv: (-130., 0.),
sun_hit_power_h: (-120., -90.),
sun_hit_power_v: (-120., -90.),
sun_hit_differential_reflectivity: (-2., 2.),
sun_est_power_h: (-120., -90.),
sun_est_power_v: (-120., -90.),
sun_est_differential_reflectivity: (-2., 2.),
velocity: velocity_limit,
avg_velocity: velocity_limit,
quant05_velocity: velocity_limit,
quant10_velocity: velocity_limit,
quant20_velocity: velocity_limit,
quant50_velocity: velocity_limit,
quant80_velocity: velocity_limit,
quant90_velocity: velocity_limit,
quant95_velocity: velocity_limit,
avg_corrected_velocity: velocity_limit,
quant05_corrected_velocity: velocity_limit,
quant10_corrected_velocity: velocity_limit,
quant20_corrected_velocity: velocity_limit,
quant50_corrected_velocity: velocity_limit,
quant80_corrected_velocity: velocity_limit,
quant90_corrected_velocity: velocity_limit,
quant95_corrected_velocity: velocity_limit,
corrected_velocity: velocity_limit,
unfiltered_velocity: velocity_limit,
velocity_vv: velocity_limit,
unfiltered_velocity_vv: velocity_limit,
dealiased_corrected_velocity: velocity_limit,
dealiased_velocity: velocity_limit,
eastward_wind_component: velocity_limit,
northward_wind_component: velocity_limit,
vertical_wind_component: velocity_limit,
azimuthal_horizontal_wind_component: velocity_limit,
vertical_wind_shear: velocity_limit,
retrieved_velocity: velocity_limit,
retrieved_velocity_std: (0., 15.),
velocity_difference: (-15., 15.),
wind_speed: (0., 50.),
wind_direction: (0., 360.),
spectrum_width: spectrum_width_limit,
corrected_spectrum_width: spectrum_width_limit,
unfiltered_spectrum_width: spectrum_width_limit,
spectrum_width_vv: spectrum_width_limit,
unfiltered_spectrum_width_vv: spectrum_width_limit,
turbulence: | |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 11:55:08 2018
@author: <NAME>
"""
import pytest
import itertools
import numpy as np
import pandas as pd
import scipy.sparse as sps
from sklearn.datasets import make_blobs
from sklearn.linear_model import Ridge
from sklearn.base import is_classifier, is_regressor
from sklearn.exceptions import NotFittedError
from sklearn.feature_extraction.text import CountVectorizer
import pickle
from tests.helpers.testing_help import get_sample_data, get_sample_df
from aikit.tools.helper_functions import diff
from aikit.tools.data_structure_helper import get_type
from aikit.enums import DataTypes
from aikit.cross_validation import create_scoring
from aikit.transformers.base import (
TruncatedSVDWrapper,
_KMeansTransformer,
KMeansTransformer,
_NumImputer,
NumImputer,
BoxCoxTargetTransformer,
_CdfScaler,
CdfScaler,
PCAWrapper,
)
from aikit.transformers.base import _index_with_number, PassThrough, FeaturesSelectorClassifier
from aikit.datasets.datasets import load_titanic
# In[]
dfX, y , *_ = load_titanic()
def test_TruncatedSVDWrapper():
df = get_sample_df(100, seed=123)
cols = []
for j in range(10):
cols.append("num_col_%d" % j)
df["num_col_%d" % j] = np.random.randn(df.shape[0])
# 1) regular case : drop other columns
svd = TruncatedSVDWrapper(n_components=5, columns_to_use=cols)
res1 = svd.fit_transform(df)
assert res1.shape == (100, 5)
assert get_type(res1) == DataTypes.DataFrame
assert list(res1.columns) == ["SVD__%d" % j for j in range(5)]
assert not res1.isnull().any().any()
assert svd.get_feature_names() == list(res1.columns)
# 2) we keep the original columns as well
svd = TruncatedSVDWrapper(n_components=5, columns_to_use=cols, drop_used_columns=False, drop_unused_columns=False)
res2 = svd.fit_transform(df)
assert res2.shape == (100, 5 + df.shape[1])
assert get_type(res2) == DataTypes.DataFrame
assert list(res2.columns) == list(df.columns) + ["SVD__%d" % j for j in range(5)]
assert svd.get_feature_names() == list(df.columns) + ["SVD__%d" % j for j in range(5)]
assert not res2.isnull().any().any()
assert (res2.loc[:, list(df.columns)] == df).all().all()
# 3) we keep only untouch columns
svd = TruncatedSVDWrapper(n_components=5, columns_to_use=cols, drop_used_columns=True, drop_unused_columns=False)
res3 = svd.fit_transform(df)
assert res3.shape == (100, 3 + 5)
assert list(res3.columns) == ["float_col", "int_col", "text_col"] + ["SVD__%d" % j for j in range(5)]
assert svd.get_feature_names() == ["float_col", "int_col", "text_col"] + ["SVD__%d" % j for j in range(5)]
assert (
(res3.loc[:, ["float_col", "int_col", "text_col"]] == df.loc[:, ["float_col", "int_col", "text_col"]])
.all()
.all()
)
###################################
### same thing but with regex ###
###################################
# 1) Regular case : 'drop' other columns
svd = TruncatedSVDWrapper(n_components=5, columns_to_use=["num_col_"], regex_match=True)
res1 = svd.fit_transform(df)
assert res1.shape == (100, 5)
assert get_type(res1) == DataTypes.DataFrame
assert list(res1.columns) == ["SVD__%d" % j for j in range(5)]
assert not res1.isnull().any().any()
assert svd.get_feature_names() == list(res1.columns)
# 2) Keep original columns
svd = TruncatedSVDWrapper(
n_components=5,
columns_to_use=["num_col_"],
drop_used_columns=False,
drop_unused_columns=False,
regex_match=True,
)
res2 = svd.fit_transform(df)
assert res2.shape == (100, 5 + df.shape[1])
assert get_type(res2) == DataTypes.DataFrame
assert list(res2.columns) == list(df.columns) + ["SVD__%d" % j for j in range(5)]
assert svd.get_feature_names() == list(df.columns) + ["SVD__%d" % j for j in range(5)]
assert not res2.isnull().any().any()
assert (res2.loc[:, list(df.columns)] == df).all().all()
# 3) Keep only the un-touch column
svd = TruncatedSVDWrapper(
n_components=5, columns_to_use=["num_col_"], drop_used_columns=True, drop_unused_columns=False, regex_match=True
)
res3 = svd.fit_transform(df)
assert res3.shape == (100, 3 + 5)
assert list(res3.columns) == ["float_col", "int_col", "text_col"] + ["SVD__%d" % j for j in range(5)]
assert svd.get_feature_names() == ["float_col", "int_col", "text_col"] + ["SVD__%d" % j for j in range(5)]
assert (
(res3.loc[:, ["float_col", "int_col", "text_col"]] == df.loc[:, ["float_col", "int_col", "text_col"]])
.all()
.all()
)
# Delta with numpy ###
xx = df.values
columns_to_use = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
svd = TruncatedSVDWrapper(
n_components=5, columns_to_use=columns_to_use, drop_used_columns=True, drop_unused_columns=False
)
res4 = svd.fit_transform(xx)
assert list(res4.columns) == [0, 1, 2] + ["SVD__%d" % i for i in range(5)]
assert svd.get_feature_names() == [0, 1, 2] + ["SVD__%d" % i for i in range(5)]
input_features = ["COL_%d" % i for i in range(xx.shape[1])]
assert svd.get_feature_names(input_features) == ["COL_0", "COL_1", "COL_2"] + ["SVD__%d" % i for i in range(5)]
# Keep
svd = TruncatedSVDWrapper(
n_components=5, columns_to_use=columns_to_use, drop_used_columns=False, drop_unused_columns=False
)
res2 = svd.fit_transform(xx)
assert list(res2.columns) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + ["SVD__%d" % i for i in range(5)]
assert svd.get_feature_names() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + ["SVD__%d" % i for i in range(5)]
assert svd.get_feature_names(input_features) == input_features + ["SVD__%d" % i for i in range(5)]
def test_FeaturesSelectorClassifier_get_feature_names():
vect = CountVectorizer(analyzer="char", ngram_range=(1, 3))
df = get_sample_df(100, seed=123)
xx = vect.fit_transform(df["text_col"])
y = 1 * (np.random.rand(xx.shape[0]) > 0.5)
sel = FeaturesSelectorClassifier(n_components=10)
sel.fit_transform(xx, y)
ff0 = vect.get_feature_names()
ff1 = sel.get_feature_names()
assert len(diff(ff1, list(range(xx.shape[1])))) == 0
ff2 = sel.get_feature_names(input_features=ff0)
assert len(ff1) == len(ff2)
for f1, f2 in zip(ff1, ff2):
assert ff0[f1] == f2
def test_KMeansTransformer():
Xtrain, cl = make_blobs(n_samples=1000, n_features=10, centers=5)
# plt.plot(Xtrain[:,0],Xtrain[:,1],".")
kmeans = _KMeansTransformer(n_clusters=5, result_type="probability")
kmeans.fit(Xtrain)
Xres = kmeans.transform(Xtrain)
X = np.array([[0] * 10])
kmeans.transform(X)
kmeans2 = _KMeansTransformer(n_clusters=5, result_type="probability", temperature=0.1)
Xres2 = kmeans2.fit_transform(Xtrain)
kmeans3 = _KMeansTransformer(n_clusters=5, result_type="probability", temperature=0.01)
Xres3 = kmeans3.fit_transform(Xtrain)
p = Xres.max(axis=1)
p2 = Xres2.max(axis=1)
p3 = Xres3.max(axis=1)
# plt.plot(p,p2,".")
# plt.plot(p,p3,".")
# plt.plot(p2,p3,".")
assert p.mean() > p2.mean()
assert p2.mean() > p3.mean()
assert Xres.shape == (1000, 5)
assert Xres.min() >= 0
assert Xres.max() <= 1
assert pd.isnull(Xres).sum() == 0
assert np.max(np.abs(Xres.sum(axis=1) - 1)) <= 10 ** (-10)
for result_type in ("distance", "log_distance", "inv_distance", "probability"):
kmeans = _KMeansTransformer(n_clusters=5, result_type=result_type, random_state=123)
kmeans.fit(Xtrain)
Xres = kmeans.transform(Xtrain)
Xres2 = kmeans.transform(X)
assert Xres.shape == (1000, 5)
assert Xres.min() >= 0
assert pd.isnull(Xres).sum() == 0
### Same thing but with Wrapper
kmeans = KMeansTransformer(n_clusters=5, result_type="probability")
kmeans.fit(Xtrain)
Xres = kmeans.transform(Xtrain)
kmeans2 = KMeansTransformer(n_clusters=5, result_type="probability", temperature=0.1)
Xres2 = kmeans2.fit_transform(Xtrain)
kmeans3 = KMeansTransformer(n_clusters=5, result_type="probability", temperature=0.01)
Xres3 = kmeans3.fit_transform(Xtrain)
p = Xres.max(axis=1)
p2 = Xres2.max(axis=1)
p3 = Xres3.max(axis=1)
# plt.plot(p,p2,".")
# plt.plot(p,p3,".")
assert p.mean() > p2.mean()
assert p2.mean() > p3.mean()
assert Xres.shape == (1000, 5)
assert Xres.min().min() >= 0
assert Xres.max().max() <= 1
assert Xres.isnull().sum().sum() == 0
assert np.max(np.abs(Xres.sum(axis=1) - 1)) <= 10 ** (-10)
for result_type in ("distance", "log_distance", "inv_distance", "probability"):
kmeans = KMeansTransformer(n_clusters=5, result_type=result_type, random_state=123)
kmeans.fit(Xtrain)
Xres = kmeans.transform(Xtrain)
Xres2 = kmeans.transform(X)
assert Xres.shape == (1000, 5)
assert Xres.min().min() >= 0
assert Xres.isnull().sum().sum() == 0
def test__index_with_number():
x = np.random.randn(10)
assert _index_with_number(x).all()
res = np.array([True] * 10)
x2 = x.copy().astype(np.object)
res[[0, 1, 3]] = False
x2[[0, 1, 3]] = "string"
assert (_index_with_number(x2) == res).all()
def test__NumImputer():
xx, xxd, xxs = get_sample_data(add_na=True)
xxd.index = np.array([0, 1, 2, 3, 4, 10, 11, 12, 12, 14])
# DataFrame entry
for inp in (_NumImputer(), NumImputer(), _NumImputer(add_is_null=False), NumImputer(add_is_null=False)):
xx_out = inp.fit_transform(xxd)
assert (xx_out.index == xxd.index).all()
assert pd.isnull(xxd.loc[0, "col1"]) # Verify that it is still null
assert xx_out.isnull().sum().sum() == 0
assert xx_out["col1"][0] == xxd.loc[~xxd["col1"].isnull(), "col1"].mean()
assert xx_out.shape[0] == xx.shape[0]
assert get_type(xx_out) == get_type(xxd)
if inp.add_is_null:
assert inp.get_feature_names() == ["col0", "col1", "col2", "col3", "col4", "col5", "col6", "col1_isnull"]
assert xx_out.shape[1] == 1 + xxd.shape[1]
assert xx_out["col1_isnull"].iloc[0] == 1
assert xx_out["col1_isnull"].iloc[5] == 1
assert (xx_out["col1_isnull"].iloc[np.array([1, 2, 3, 4, 6, 7, 8, 9])] == 0).all()
else:
assert xx_out.shape[1] == xxd.shape[1]
assert inp.get_feature_names() == ["col0", "col1", "col2", "col3", "col4", "col5", "col6"]
inp = _NumImputer(add_is_null=False, allow_unseen_null=False)
inp.fit(xxd)
xxd2 = xxd.copy()
xxd2.iloc[0, 3] = np.nan
try:
inp.transform(xxd2)
raise AssertionError("Model should have fail its transformation")
except ValueError:
pass
input_features = ["COL_%d" % i for i in range(xx.shape[1])]
# Numpy array
for inp in (_NumImputer(), NumImputer()):
xx_out = inp.fit_transform(xx)
assert pd.isnull(xx[0, 1])
assert pd.isnull(xx_out).sum() == 0
assert xx_out.shape[1] == 1 + xx.shape[1]
assert xx_out.shape[0] == xx.shape[0]
assert get_type(xx_out) == get_type(xx)
assert inp.get_feature_names() == ["0", "1", "2", "3", "4", "5", "6", "1_isnull"]
assert inp.get_feature_names(input_features) == input_features + ["COL_1_isnull"]
assert xx_out[0, 7] == 1
assert xx_out[5, 7] == 1
assert (xx_out[np.array([1, 2, 3, 4, 6, 7, 8, 9]), 7] == 0).all()
# Sparse Array
for inp in (_NumImputer(), NumImputer()):
for f in (sps.coo_matrix, sps.csc_matrix, sps.csr_matrix):
xxsf = f(xxs.copy())
xx_out = inp.fit_transform(xxsf)
assert pd.isnull(xxs[0, 1])
assert pd.isnull(xx_out.todense()).sum() == 0
assert get_type(xx_out) == get_type(xxs)
assert xx_out.shape[1] == 1 + xxs.shape[1]
assert xx_out.shape[0] == xx.shape[0]
assert inp.get_feature_names() == ["0", "1", "2", "3", "4", "5", "6", "1_isnull"]
assert inp.get_feature_names(input_features) == input_features + ["COL_1_isnull"]
assert xx_out.todense()[0, 7] == 1
assert xx_out.todense()[0, 7] == 1
assert (xx_out.todense()[np.array([1, 2, 3, 4, 6, 7, 8, 9]), 7] == 0).all()
xx, xxd, xxs = get_sample_data(add_na=False)
xxd.index = np.array([0, 1, 2, 3, 4, 10, 11, 12, 12, 14])
# DataFrame entry
for inp in (_NumImputer(), NumImputer()):
xx_out = inp.fit_transform(xxd)
assert (xx_out.index == xxd.index).all()
assert xx_out.isnull().sum().sum() == 0
assert xx_out.shape[1] == xxd.shape[1]
assert xx_out.shape[0] == xx.shape[0]
assert get_type(xx_out) == get_type(xxd)
assert inp.get_feature_names() == ["col0", "col1", "col2", "col3", "col4", "col5", "col6"]
# Numpy array
for inp in (_NumImputer(), NumImputer()):
xx_out = inp.fit_transform(xx)
assert | |
<reponame>trussworks/edd<filename>server/main/forms.py
import collections
import json
import logging
from copy import deepcopy
from functools import partial
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models import CharField as DbCharField
from django.db.models import F, Q
from django.db.models import Value as V
from django.db.models.base import Model
from django.db.models.functions import Concat
from django.db.models.manager import BaseManager
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from edd.search.registry import StrainRegistry
from . import models
from .models import (
Assay,
Attachment,
Comment,
Line,
Measurement,
MeasurementType,
MeasurementValue,
MetaboliteExchange,
MetaboliteSpecies,
MetadataType,
Protocol,
Strain,
Study,
StudyPermission,
Update,
)
User = get_user_model()
logger = logging.getLogger(__name__)
class HiddenJSONWidget(forms.widgets.HiddenInput):
"""
A hidden JSON input that will default to an empty object instead of throwing exception.
"""
def value_from_datadict(self, data, files, name):
# default value of empty dict/object
return data.get(name, "{}")
class AutocompleteWidget(forms.widgets.MultiWidget):
""" Custom widget for a paired autocomplete and hidden ID field. """
def __init__(self, attrs=None, model=User, opt=None):
opt = {} if opt is None else opt
_widgets = (
forms.widgets.TextInput(attrs=opt.get("text_attr", {})),
forms.HiddenInput(),
)
self.model = model
super().__init__(_widgets, attrs)
def decompress(self, value):
# if the value is the actual model instance, don't try to look up model
if isinstance(value, Model):
return [self.display_value(value), value.pk]
elif value:
o = self.model.objects.get(pk=value)
return [self.display_value(o), value]
return ["", None]
def display_value(self, value):
return str(value)
def value_from_datadict(self, data, files, name):
widgets = enumerate(self.widgets)
v = [w.value_from_datadict(data, files, name + "_%s" % i) for i, w in widgets]
# v[0] is text of field, v[1] is hidden ID
return v[1]
class MultiAutocompleteWidget(AutocompleteWidget):
"""
Extension to Autocomplete widget that handles multiple autocompleted values.
All values must be lists; either a list of results from decompress, or a list of values
to be passed to decompress.
"""
def __init__(self, **kwargs):
self._separator = kwargs.pop("separator", ",")
super().__init__(**kwargs)
def decompress(self, value):
if isinstance(value, BaseManager):
# delegate decompress for individual items
values = map(super().decompress, value.all())
# zip together into array of two value-arrays
values = list(zip(*values))
if len(values):
# join by the separator string
return [
self._separator.join(map(str, values[0])),
self._separator.join(map(str, values[1])),
]
else:
# there are no values, return "empty" structure
return ["", None]
return super().decompress(value)
def render(self, name, value, attrs=None, renderer=None):
joined = []
widget_count = len(self.widgets)
for _index in range(widget_count):
joined.append([])
if value is None:
value = []
for item in value:
if not isinstance(item, list):
item = self.decompress(item)
for index in range(widget_count):
joined[index].append(item[index] if len(item) > index else "")
for index in range(widget_count):
joined[index] = self._separator.join(map(str, joined[index]))
return super().render(name, joined, attrs)
def value_from_datadict(self, data, files, name):
# value from super will be joined by self._separator, so split it to get the true value
joined = super().value_from_datadict(data, files, name)
if joined:
return joined.split(self._separator)
return []
class UserAutocompleteWidget(AutocompleteWidget):
""" Autocomplete widget for Users """
def __init__(self, attrs=None, opt=None):
opt = {} if opt is None else opt
opt.update(
{
"text_attr": {
"class": "autocomp form-control",
"eddautocompletetype": "User",
}
}
)
super().__init__(attrs=attrs, model=User, opt=opt)
class GroupAutocompleteWidget(AutocompleteWidget):
""" Autocomplete widget for Groups """
def __init__(self, attrs=None, opt=None):
opt = {} if opt is None else opt
opt.update({"text_attr": {"class": "autocomp", "eddautocompletetype": "Group"}})
super().__init__(attrs=attrs, model=Group, opt=opt)
class ProtocolAutocompleteWidget(AutocompleteWidget):
"""Autocomplete widget for Protocols"""
def __init__(self, attrs=None, opt=None):
opt = {} if opt is None else opt
opt.update(
{"text_attr": {"class": "autocomp", "eddautocompletetype": "Protocol"}}
)
super().__init__(attrs=attrs, model=Protocol, opt=opt)
class RegistryValidator:
"""
Validator for Strain objects tied to ICE registry. If using outside the
context of Form validation (e.g. in a Celery task), ensure that the Update
object is created before the callable is called.
See: https://docs.djangoproject.com/en/dev/ref/validators/
"""
def __init__(self, existing_strain=None, existing_entry=None):
"""
If an already-existing Strain object in the database is being updated,
initialize RegistryValidator with existing_strain. If an entry has
already been queried from ICE, initialize with existing_entry.
"""
self.existing_strain = existing_strain
self.existing_entry = existing_entry
def load_part_from_ice(self, registry_id):
if self.existing_entry is not None:
return self.existing_entry
# using the Update to get the correct user for the search
update = Update.load_update()
registry = StrainRegistry()
user_email = update.mod_by.email
try:
with registry.login(update.mod_by):
return registry.get_entry(registry_id)
except Exception as e:
raise ValidationError(
_("Failed to load strain %(uuid)s from ICE for user %(user)s"),
code="ice failure",
params={"user": user_email, "uuid": registry_id},
) from e
def save_strain(self, entry):
try:
if entry and self.existing_strain:
self.existing_strain.name = entry.name
self.existing_strain.registry_id = entry.registry_id
self.existing_strain.registry_url = entry.registry_url
self.existing_strain.save()
elif entry:
# not using get_or_create, so exception is raised if registry_id exists
Strain.objects.create(
name=entry.name,
registry_id=entry.registry_id,
registry_url=entry.registry_url,
)
except Exception as e:
raise ValidationError(
_("Failed to save strain from %(entry)s"),
code="db failure",
params={"entry": entry},
) from e
def validate(self, value):
try:
# handle multi-valued inputs by validating each value individually
if isinstance(value, (list, tuple)):
for v in value:
self.validate(v)
return
qs = Strain.objects.filter(registry_id=value)
if self.existing_strain:
qs = qs.exclude(pk__in=[self.existing_strain])
count = qs.count()
if count == 0:
self.save_strain(self.load_part_from_ice(value))
elif count > 1:
raise ValidationError(
_(
"Selected ICE record is already linked to EDD strains: %(strains)s"
),
code="existing records",
params={"strains": list(qs)},
)
except ValidationError:
raise
except Exception as e:
raise ValidationError(
_("Error querying for an EDD strain with registry_id %(uuid)s"),
code="query failure",
params={"uuid": value},
) from e
def __call__(self, value):
self.validate(value)
class RegistryAutocompleteWidget(AutocompleteWidget):
""" Autocomplete widget for Registry strains """
def __init__(self, attrs=None, opt=None):
opt = {} if opt is None else opt
opt.update(
{
"text_attr": {
"class": "autocomp form-control",
"eddautocompletetype": "Registry",
}
}
)
super().__init__(attrs=attrs, model=Strain, opt=opt)
def decompress(self, value):
""" Overriding since Strain uses registry_id for lookups. """
if isinstance(value, Strain):
return [self.display_value(value), value.registry_id]
elif value:
try:
o = Strain.objects.get(registry_id=value)
return [self.display_value(o), value]
except Strain.DoesNotExist:
pass
return ["", None]
class MultiRegistryAutocompleteWidget(
MultiAutocompleteWidget, RegistryAutocompleteWidget
):
pass
class MetadataTypeAutocompleteWidget(AutocompleteWidget):
""" Autocomplete widget for types of metadata """
def __init__(self, attrs=None, opt=None):
opt = {} if opt is None else opt
opt.update(
{"text_attr": {"class": "autocomp", "eddautocompletetype": "MetadataType"}}
)
super().__init__(attrs=attrs, model=MetadataType, opt=opt)
class MeasurementTypeAutocompleteWidget(AutocompleteWidget):
""" Autocomplete widget for types of metadata """
def __init__(self, attrs=None, opt=None):
""" Set opt with {'text_attr': {'class': 'autocomp autocomp_XXX'}} to override. """
opt = {} if opt is None else opt
my_opt = {
"text_attr": {
"class": "autocomp form-control",
"eddautocompletetype": "MeasurementType",
}
}
my_opt.update(**opt)
super().__init__(attrs=attrs, model=MeasurementType, opt=my_opt)
class SbmlInfoAutocompleteWidget(AutocompleteWidget):
""" Autocomplete widget for parts contained within SBMLTemplate """
def __init__(self, template, model, attrs=None, opt=None):
self._template = template
opt = {} if opt is None else opt
opt.get("text_attr", {}).update({"data-template": template.pk})
super().__init__(attrs=attrs, model=model, opt=opt)
def decompress(self, value):
# if the value is the actual model instance, don't try to look up model
if isinstance(value, self.model):
return [self.display_value(value), value.pk]
elif value:
o = self.lookup(value)
return [self.display_value(o), o.pk]
return ["", None]
def decompress_q(self, value):
return Q(pk=self._int(value))
def lookup(self, value):
try:
return self.model.objects.get(
self.decompress_q(value), sbml_template=self._template
)
except self.model.DoesNotExist:
pass
return None
def value_from_datadict(self, data, files, name):
widgets = enumerate(self.widgets)
v = [w.value_from_datadict(data, files, name + "_%s" % i) for i, w in widgets]
# v[0] is text of field, v[1] is hidden ID
return self.lookup(v[1])
def _int(self, value):
"Try casting a value to int, return None if fails"
try:
return int(value)
except ValueError:
return None
class SbmlExchangeAutocompleteWidget(SbmlInfoAutocompleteWidget):
""" Autocomplete widget for Exchanges in an SBMLTemplate """
def __init__(self, template, attrs=None, opt=None):
opt = {} if opt is None else opt
opt.update(
text_attr={"class": "autocomp", "eddautocompletetype": "MetaboliteExchange"}
)
super().__init__(
template=template, attrs=attrs, model=MetaboliteExchange, opt=opt
)
def decompress_q(self, value):
parent = super().decompress_q(value)
return parent | Q(exchange_name=value)
class SbmlSpeciesAutocompleteWidget(SbmlInfoAutocompleteWidget):
""" Autocomplete widget for Species in an SBMLTemplate """
def __init__(self, template, attrs=None, opt=None):
opt = {} if opt is None else opt
opt.update(
text_attr={"class": "autocomp", "eddautocompletetype": "MetaboliteSpecies"}
)
super().__init__(
template=template, attrs=attrs, model=MetaboliteSpecies, opt=opt
)
def decompress_q(self, value):
parent = super().decompress_q(value)
return parent | Q(species=value)
class CreateStudyForm(forms.ModelForm):
""" Form to create a new study. """
# include hidden field for copying multiple Line instances by ID
lineId = forms.ModelMultipleChoiceField(
queryset=Line.objects.none(), required=False, widget=forms.MultipleHiddenInput
)
class Meta:
model = Study
fields = ["name", "description", "contact"]
labels = {
"name": _("Study Name"),
"description": _("Description"),
"contact": _("Contact"),
}
widgets = {
"name": forms.widgets.TextInput(
attrs={"size": 50, "class": "form-control", "placeholder": "(required)"}
),
"description": forms.widgets.Textarea(
attrs={"cols": 49, "class": "form-control"}
),
"contact": UserAutocompleteWidget(),
}
help_texts = {"name": _(""), "description": _("")}
def __init__(self, *args, **kwargs):
# removes default hard-coded suffix of colon character on all labels
kwargs.setdefault("label_suffix", "")
self._user = kwargs.pop("user", | |
shipping_zone.name,
"countries": [{"code": c.code} for c in shipping_zone.countries],
"channels": [{"name": c.name} for c in shipping_zone.channels.all()],
},
"meta": None,
}
)
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_shipping_zone_deleted(
shipping_zone, subscription_shipping_zone_deleted_webhook
):
# given
webhooks = [subscription_shipping_zone_deleted_webhook]
event_type = WebhookEventAsyncType.SHIPPING_ZONE_DELETED
shipping_zones_query = ShippingZone.objects.filter(pk=shipping_zone.id)
zones_instances = [zone for zone in shipping_zones_query]
shipping_zones_query.delete()
shipping_zone_id = graphene.Node.to_global_id("ShippingZone", zones_instances[0].id)
# when
deliveries = create_deliveries_for_subscriptions(
event_type, zones_instances[0], webhooks
)
# then
expected_payload = json.dumps(
{
"shippingZone": {"id": shipping_zone_id, "name": shipping_zone.name},
"meta": None,
}
)
assert zones_instances[0].id is not None
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_created(product, subscription_product_created_webhook):
webhooks = [subscription_product_created_webhook]
event_type = WebhookEventAsyncType.PRODUCT_CREATED
product_id = graphene.Node.to_global_id("Product", product.id)
deliveries = create_deliveries_for_subscriptions(event_type, product, webhooks)
expected_payload = json.dumps({"product": {"id": product_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_updated(product, subscription_product_updated_webhook):
webhooks = [subscription_product_updated_webhook]
event_type = WebhookEventAsyncType.PRODUCT_UPDATED
product_id = graphene.Node.to_global_id("Product", product.id)
deliveries = create_deliveries_for_subscriptions(event_type, product, webhooks)
expected_payload = json.dumps({"product": {"id": product_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_deleted(product, subscription_product_deleted_webhook):
webhooks = [subscription_product_deleted_webhook]
event_type = WebhookEventAsyncType.PRODUCT_DELETED
product_id = graphene.Node.to_global_id("Product", product.id)
deliveries = create_deliveries_for_subscriptions(event_type, product, webhooks)
expected_payload = json.dumps({"product": {"id": product_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_variant_created(variant, subscription_product_variant_created_webhook):
webhooks = [subscription_product_variant_created_webhook]
event_type = WebhookEventAsyncType.PRODUCT_VARIANT_CREATED
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
deliveries = create_deliveries_for_subscriptions(event_type, variant, webhooks)
expected_payload = json.dumps({"productVariant": {"id": variant_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_variant_updated(variant, subscription_product_variant_updated_webhook):
webhooks = [subscription_product_variant_updated_webhook]
event_type = WebhookEventAsyncType.PRODUCT_VARIANT_UPDATED
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
deliveries = create_deliveries_for_subscriptions(event_type, variant, webhooks)
expected_payload = json.dumps({"productVariant": {"id": variant_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_variant_deleted(variant, subscription_product_variant_deleted_webhook):
webhooks = [subscription_product_variant_deleted_webhook]
event_type = WebhookEventAsyncType.PRODUCT_VARIANT_DELETED
variant_id = graphene.Node.to_global_id("ProductVariant", variant.id)
deliveries = create_deliveries_for_subscriptions(event_type, variant, webhooks)
expected_payload = json.dumps({"productVariant": {"id": variant_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_variant_out_of_stock(
stock, subscription_product_variant_out_of_stock_webhook
):
webhooks = [subscription_product_variant_out_of_stock_webhook]
event_type = WebhookEventAsyncType.PRODUCT_VARIANT_OUT_OF_STOCK
variant_id = graphene.Node.to_global_id("ProductVariant", stock.product_variant.id)
deliveries = create_deliveries_for_subscriptions(event_type, stock, webhooks)
expected_payload = json.dumps({"productVariant": {"id": variant_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_product_variant_back_in_stock(
stock, subscription_product_variant_back_in_stock_webhook
):
webhooks = [subscription_product_variant_back_in_stock_webhook]
event_type = WebhookEventAsyncType.PRODUCT_VARIANT_BACK_IN_STOCK
variant_id = graphene.Node.to_global_id("ProductVariant", stock.product_variant.id)
deliveries = create_deliveries_for_subscriptions(event_type, stock, webhooks)
expected_payload = json.dumps({"productVariant": {"id": variant_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_order_created(order, subscription_order_created_webhook):
webhooks = [subscription_order_created_webhook]
event_type = WebhookEventAsyncType.ORDER_CREATED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_order_confirmed(order, subscription_order_confirmed_webhook):
webhooks = [subscription_order_confirmed_webhook]
event_type = WebhookEventAsyncType.ORDER_CONFIRMED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_order_fully_paid(order, subscription_order_fully_paid_webhook):
webhooks = [subscription_order_fully_paid_webhook]
event_type = WebhookEventAsyncType.ORDER_FULLY_PAID
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_order_updated(order, subscription_order_updated_webhook):
webhooks = [subscription_order_updated_webhook]
event_type = WebhookEventAsyncType.ORDER_UPDATED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_order_cancelled(order, subscription_order_cancelled_webhook):
webhooks = [subscription_order_cancelled_webhook]
event_type = WebhookEventAsyncType.ORDER_CANCELLED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_order_fulfilled(order, subscription_order_fulfilled_webhook):
webhooks = [subscription_order_fulfilled_webhook]
event_type = WebhookEventAsyncType.ORDER_FULFILLED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_draft_order_created(order, subscription_draft_order_created_webhook):
webhooks = [subscription_draft_order_created_webhook]
event_type = WebhookEventAsyncType.DRAFT_ORDER_CREATED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_draft_order_updated(order, subscription_draft_order_updated_webhook):
webhooks = [subscription_draft_order_updated_webhook]
event_type = WebhookEventAsyncType.DRAFT_ORDER_UPDATED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_draft_order_deleted(order, subscription_draft_order_deleted_webhook):
webhooks = [subscription_draft_order_deleted_webhook]
event_type = WebhookEventAsyncType.DRAFT_ORDER_DELETED
order_id = graphene.Node.to_global_id("Order", order.id)
deliveries = create_deliveries_for_subscriptions(event_type, order, webhooks)
expected_payload = json.dumps({"order": {"id": order_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_sale_created(sale, subscription_sale_created_webhook):
# given
webhooks = [subscription_sale_created_webhook]
event_type = WebhookEventAsyncType.SALE_CREATED
expected_payload = generate_sale_payload(sale)
# when
deliveries = create_deliveries_for_subscriptions(event_type, sale, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_sale_updated(sale, subscription_sale_updated_webhook):
# given
webhooks = [subscription_sale_updated_webhook]
event_type = WebhookEventAsyncType.SALE_UPDATED
expected_payload = generate_sale_payload(sale)
# when
deliveries = create_deliveries_for_subscriptions(event_type, sale, webhooks)
# hen
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_sale_deleted(sale, subscription_sale_deleted_webhook):
# given
webhooks = [subscription_sale_deleted_webhook]
event_type = WebhookEventAsyncType.SALE_DELETED
expected_payload = generate_sale_payload(sale)
# when
deliveries = create_deliveries_for_subscriptions(event_type, sale, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_invoice_requested(fulfilled_order, subscription_invoice_requested_webhook):
# given
webhooks = [subscription_invoice_requested_webhook]
event_type = WebhookEventAsyncType.INVOICE_REQUESTED
invoice = fulfilled_order.invoices.first()
expected_payload = generate_invoice_payload(invoice)
# when
deliveries = create_deliveries_for_subscriptions(event_type, invoice, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_invoice_deleted(fulfilled_order, subscription_invoice_deleted_webhook):
# given
webhooks = [subscription_invoice_deleted_webhook]
event_type = WebhookEventAsyncType.INVOICE_DELETED
invoice = fulfilled_order.invoices.first()
expected_payload = generate_invoice_payload(invoice)
# when
deliveries = create_deliveries_for_subscriptions(event_type, invoice, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_invoice_sent(fulfilled_order, subscription_invoice_sent_webhook):
# given
webhooks = [subscription_invoice_sent_webhook]
event_type = WebhookEventAsyncType.INVOICE_SENT
invoice = fulfilled_order.invoices.first()
expected_payload = generate_invoice_payload(invoice)
# when
deliveries = create_deliveries_for_subscriptions(event_type, invoice, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_fulfillment_created(fulfillment, subscription_fulfillment_created_webhook):
# given
webhooks = [subscription_fulfillment_created_webhook]
event_type = WebhookEventAsyncType.FULFILLMENT_CREATED
expected_payload = generate_fulfillment_payload(fulfillment)
# when
deliveries = create_deliveries_for_subscriptions(event_type, fulfillment, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_fulfillment_canceled(fulfillment, subscription_fulfillment_canceled_webhook):
# given
webhooks = [subscription_fulfillment_canceled_webhook]
event_type = WebhookEventAsyncType.FULFILLMENT_CANCELED
expected_payload = generate_fulfillment_payload(fulfillment)
# when
deliveries = create_deliveries_for_subscriptions(event_type, fulfillment, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_customer_created(customer_user, subscription_customer_created_webhook):
# given
webhooks = [subscription_customer_created_webhook]
event_type = WebhookEventAsyncType.CUSTOMER_CREATED
expected_payload = json.dumps(generate_customer_payload(customer_user))
# when
deliveries = create_deliveries_for_subscriptions(
event_type, customer_user, webhooks
)
# then
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_customer_updated(customer_user, subscription_customer_updated_webhook):
# given
webhooks = [subscription_customer_updated_webhook]
event_type = WebhookEventAsyncType.CUSTOMER_UPDATED
expected_payload = json.dumps(generate_customer_payload(customer_user))
# when
deliveries = create_deliveries_for_subscriptions(
event_type, customer_user, webhooks
)
# then
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_collection_created(
collection_with_products, subscription_collection_created_webhook
):
# given
collection = collection_with_products[0].collections.first()
webhooks = [subscription_collection_created_webhook]
event_type = WebhookEventAsyncType.COLLECTION_CREATED
expected_payload = generate_collection_payload(collection)
# when
deliveries = create_deliveries_for_subscriptions(event_type, collection, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_collection_updated(
collection_with_products, subscription_collection_updated_webhook
):
# given
webhooks = [subscription_collection_updated_webhook]
collection = collection_with_products[0].collections.first()
event_type = WebhookEventAsyncType.COLLECTION_UPDATED
expected_payload = generate_collection_payload(collection)
# when
deliveries = create_deliveries_for_subscriptions(event_type, collection, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_collection_deleted(
collection_with_products, subscription_collection_deleted_webhook
):
# given
webhooks = [subscription_collection_deleted_webhook]
collection = collection_with_products[0].collections.first()
event_type = WebhookEventAsyncType.COLLECTION_DELETED
expected_payload = generate_collection_payload(collection)
# when
deliveries = create_deliveries_for_subscriptions(event_type, collection, webhooks)
# then
assert deliveries[0].payload.payload == json.dumps(expected_payload)
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_checkout_create(checkout, subscription_checkout_created_webhook):
webhooks = [subscription_checkout_created_webhook]
event_type = WebhookEventAsyncType.CHECKOUT_CREATED
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
deliveries = create_deliveries_for_subscriptions(event_type, checkout, webhooks)
expected_payload = json.dumps({"checkout": {"id": checkout_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_checkout_update(checkout, subscription_checkout_updated_webhook):
webhooks = [subscription_checkout_updated_webhook]
event_type = WebhookEventAsyncType.CHECKOUT_UPDATED
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
deliveries = create_deliveries_for_subscriptions(event_type, checkout, webhooks)
expected_payload = json.dumps({"checkout": {"id": checkout_id}, "meta": None})
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) == len(webhooks)
assert deliveries[0].webhook == webhooks[0]
def test_page_created(page, subscription_page_created_webhook):
# given
webhooks = [subscription_page_created_webhook]
event_type = WebhookEventAsyncType.PAGE_CREATED
expected_payload = json.dumps(generate_page_payload(page))
# when
deliveries = create_deliveries_for_subscriptions(event_type, page, webhooks)
# then
assert deliveries[0].payload.payload == expected_payload
assert len(deliveries) | |
options:
self.multiprocess_min_port = int(options[ARG_MULTIPROCESS_MIN_PORT])
if self.IsApiServer():
assert self.port == self.api_port
if self.IsBackend():
self.InitBackendEntry()
if not self.Type():
self.SetType(DevProcess.TYPE_MASTER)
def InitBackendEntry(self):
"""Finds the entry for the backend this process represents, if any."""
for backend in self.backends:
if backend.name == self.backend_id:
self.backend_entry = backend
if not self.backend_entry:
raise Error('No backend entry found for: ' % self)
def HttpServer(self):
"""Returns the HTTPServer used by this process."""
return self.http_server
def Address(self):
"""Returns the address of this process."""
return 'http://%s:%d' % (self.host, self.port)
def SetHttpServer(self, http_server):
"""Sets the http_server to be used when handling requests.
Args:
http_server: An HTTPServer that receives requests.
"""
self.http_server = http_server
self.handle_requests = HandleRequestThread()
self.handle_requests.start()
def StartChildren(self, argv):
"""Starts the set of child processes."""
self.children = []
base_port = self.multiprocess_min_port
next_port = base_port
self.child_app_instance = ChildProcess(self.host, next_port,
app_instance=0)
self.children.append(self.child_app_instance)
next_port += 1
for backend in self.backends:
base_port += 100
next_port = base_port
for i in xrange(backend.instances):
self.children.append(ChildProcess(self.host, next_port,
backend_id=backend.name,
instance_id=i))
next_port += 1
self.children.append(ChildProcess(self.host, base_port + 99,
backend_id=backend.name))
base_port += 100
next_port = base_port
self.child_api_server = ChildProcess(self.host, next_port, api_server=True)
self.children.append(self.child_api_server)
if self.multiprocess_min_port == 0:
self.AssignPortsRandomly()
self.child_api_server.host = API_SERVER_HOST
self.api_port = self.child_api_server.port
for child in self.children:
child.Start(argv, self.api_port)
for child in self.children:
child.WaitForConnection()
message = '\n\nMultiprocess Setup Complete:'
for child in self.children:
message += '\n %s' % child
message += '\n'
logging.info(message)
for child in self.children:
child.EnableStartRequests()
def AssignPortsRandomly(self):
"""Acquires a random port for each child process."""
bound = []
for child in self.children:
sock = socket.socket()
sock.bind(('localhost', 0))
bound.append(sock)
child.SetHostPort(self.host, sock.getsockname()[1])
for sock in bound:
sock.close()
def __str__(self):
result = '[%s]' % self.Type()
if self.desc:
result += ' [%s]' % self.desc
return result
def SetType(self, process_type):
if process_type not in DevProcess.TYPES:
raise Error('Unknown process type: %s' % process_type)
if self.process_type is not None:
raise Error('Process type cannot be set more than once.')
self.process_type = process_type
def Type(self):
return self.process_type
def IsDefault(self):
"""Indicates whether this is the default dev_appserver process."""
return self.Type() is None
def IsMaster(self):
"""Indicates whether this is the master process."""
return self.Type() == DevProcess.TYPE_MASTER
def IsSubprocess(self):
"""Indicates that this is a subprocessess of the dev_appserver."""
return not (self.IsDefault() or self.IsMaster())
def IsApiServer(self):
"""Indicates whether this is the api server process."""
return self.Type() == DevProcess.TYPE_API_SERVER
def IsAppInstance(self):
"""Indicates whether this process represents an application instance."""
return self.Type() == DevProcess.TYPE_APP_INSTANCE
def IsBackend(self):
"""Indicates whether this process represents a backend."""
return self.IsBackendBalancer() or self.IsBackendInstance()
def IsBackendBalancer(self):
"""Indicates whether this process represents a backend load balancer."""
return self.Type() == DevProcess.TYPE_BACKEND_BALANCER
def IsBackendInstance(self):
"""Indicates whether this process represents a backend instance."""
return self.Type() == DevProcess.TYPE_BACKEND_INSTANCE
def IsBalancer(self):
"""Indicates whether this process represents a load balancer."""
return self.IsMaster() or self.IsBackendBalancer()
def IsInstance(self):
"""Indicates whether this process represents an instance."""
return self.IsAppInstance() or self.IsBackendInstance()
def InitBalanceSet(self):
"""Construct a list of instances to balance traffic over."""
if self.IsMaster():
self.balance_set = [ self.child_app_instance.port ]
if self.IsBackendBalancer():
self.balance_set = []
for instance in xrange(self.backend_entry.instances):
port = backends_api._get_dev_port(self.backend_id, instance)
self.balance_set.append(port)
def GetBalanceSet(self):
"""Return the set of ports over which this process balances requests."""
return self.balance_set
def FailFast(self):
"""Indicates whether this process has fail-fast behavior."""
if not self.backend_entry:
return False
if self.backend_entry.failfast:
return True
return False
def PrintStartMessage(self, app_id, host, port):
"""Print the start message for processes that are started automatically."""
url = 'http://%s:%d' % (host, port)
admin_url = '%s/_ah/admin' % url
if not self.IsSubprocess():
logging.info('Running application %s on port %d: %s',
app_id, port, url)
logging.info('Admin console is available at: %s',
admin_url)
def Children(self):
"""Returns the children of this process."""
return self.children
def MaybeConfigureRemoteDataApis(self):
"""Set up stubs using remote_api as appropriate.
If this is the API server (or is not multiprocess), return False.
Otherwise, set up the stubs for data based APIs as remote stubs pointing at
the to the API server and return True.
"""
if self.IsDefault() or self.IsApiServer():
return False
services = ['datastore_v3', 'memcache', 'taskqueue']
remote_api_stub.ConfigureRemoteApi(
self.app_id, PATH_DEV_API_SERVER, lambda: ('', ''),
servername='%s:%d' % (API_SERVER_HOST, self.api_port),
services=services)
return True
def NewAppInfo(self, appinfo):
"""Called when a new appinfo is read from disk on each request.
The only action we take is to apply backend settings, such as the 'start'
directive, which adds a handler for /_ah/start.
Args:
appinfo: An AppInfoExternal to be used on the next request.
"""
if self.backends:
appinfo.backends = self.backends
if self.IsBackend():
appinfo.ApplyBackendSettings(self.backend_id)
def UpdateEnv(self, env_dict):
"""Copies backend port information to the supplied environment dictionary.
This information is used by the Backends API to resolve backend and instance
addresses in the dev_appserver.
User-supplied code has no access to the default environment. This method
will copy the environment variables needed for the backends api from the
default environment to the environment where user supplied code runs.
Args:
env_dict: Dictionary with the new environment.
"""
if self.backend_id:
env_dict['BACKEND_ID'] = self.backend_id
if self.instance_id is not None:
env_dict['INSTANCE_ID'] = str(self.instance_id)
for key in os.environ:
if key.startswith('BACKEND_PORT'):
env_dict[key] = os.environ[key]
def ProcessRequest(self, request, client_address):
"""Handles the SocketServer process_request call.
If the request is to a backend the request will be handled by a separate
thread. If the backend is busy a 503 response will be sent.
If this is a balancer instance each incoming request will be forwarded to
its own thread and handled there.
If no backends are configured this override has no effect.
Args:
http_server: The http server handling the request
request: the request to process
client_address: the client address
"""
assert not self.IsDefault()
if self.IsBalancer():
ForwardRequestThread(request, client_address).start()
return
elif self.IsApiServer():
HandleRequestDirectly(request, client_address)
return
assert self.IsAppInstance() or self.IsBackendInstance()
if self.handle_requests.Active():
if self.FailFast():
logging.info('respond busy')
RespondBusyHandler(request, client_address)
return
self.handle_requests.Enqueue(request, client_address)
def HandleRequest(self, request):
"""Hook that allows the DevProcess a chance to respond to requests.
This hook is invoked just before normal request dispatch occurs in
dev_appserver.py.
Args:
request: The request to be handled.
Returns:
bool: Indicates whether the request was handled here. If False, normal
request handling should proceed.
"""
if self.IsBackendInstance() and not self.started:
if request.path != '/_ah/start':
request.send_response(httplib.FORBIDDEN,
'Waiting for start request to finish.')
return True
return False
def RequestComplete(self, request, response):
"""Invoked when the process has finished handling a request."""
rc = response.status_code
if request.path == '/_ah/start':
if (rc >= 200 and rc < 300) or rc == 404:
self.started = True
def UpdateSystemStub(self, system_service_stub):
"""Copies info about the backends into the system stub."""
if self.IsDefault():
return
system_service_stub.set_backend_info(self.backends)
class HandleRequestThread(threading.Thread):
"""Thread for handling HTTP requests.
Instances needs to be able to respond with 503 when busy with other requests,
therefore requests are accepted in the main thread and forwarded to the
serving thread for processing. If the serving thread is busy with other
requests and the max pending queue length is reached a 503 error is sent back.
Args:
http_server: Http server class handling the request.
max_pending_requests: The maximum number of pending requests in the queue.
"""
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
SetThreadName(self, 'HandleRequestThread')
self.active = False
self.pending = Queue.Queue()
def Active(self):
"""Indicates whether this thread is busy handling a request."""
return self.active
def Enqueue(self, request, client_address):
"""Adds the indicated request to the pending request queue."""
self.pending.put_nowait((request, client_address))
def run(self):
"""Takes requests from the queue and handles them."""
while True:
request, client_address = self.pending.get()
self.active = True
try:
HandleRequestDirectly(request, client_address)
except Exception, e:
logging.info('Exception in HandleRequestThread', exc_info=1)
finally:
self.active = False
class RespondBusyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler that always will send back a 503 error."""
def __init__(self, request, client_address):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(
self, request, client_address, HttpServer())
def handle_one_request(self):
"""Override."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
self.send_error(httplib.SERVICE_UNAVAILABLE, 'Busy.')
class ForwardRequestThread(threading.Thread):
"""Forwards an incoming request in a separate thread."""
def __init__(self, request, client_address):
threading.Thread.__init__(self)
self.request = request
self.client_address = client_address
self.setDaemon(True)
SetThreadName(self, 'ForwardRequestThread')
def run(self):
ForwardRequestHandler(self.request, self.client_address)
class ForwardRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Forwards the incoming request to next free backend instance."""
def __init__(self,
request,
client_address,
connection_handler=httplib.HTTPConnection):
"""Constructor extending BaseHTTPRequestHandler.
Args:
request: The incoming request.
client_address: A (ip, port) tuple with the address of the client.
backend: The HTTPServer that received the request.
connection_handler: http library to use when balancer the connection to
the next available backend | |
sampleGiven(self, value):
return value[self.value]
def evaluateInner(self, context):
return StarredDistribution(valueInContext(self.value, context), self.lineno)
def __str__(self):
return f'*{self.value}'
class MethodDistribution(Distribution):
"""Distribution resulting from passing distributions to a method of a fixed object"""
def __init__(self, method, obj, args, kwargs, valueType=None):
args = tuple(toDistribution(arg) for arg in args)
kwargs = { name: toDistribution(arg) for name, arg in kwargs.items() }
if valueType is None:
valueType = typing.get_type_hints(method).get('return')
super().__init__(*args, *kwargs.values(), valueType=valueType)
self.method = method
self.object = obj
self.arguments = args
self.kwargs = kwargs
def sampleGiven(self, value):
args = []
for arg in self.arguments:
if isinstance(arg, StarredDistribution):
args.extend(value[arg.value])
else:
args.append(value[arg])
kwargs = { name: value[arg] for name, arg in self.kwargs.items() }
return self.method(self.object, *args, **kwargs)
def evaluateInner(self, context):
obj = valueInContext(self.object, context)
arguments = tuple(valueInContext(arg, context) for arg in self.arguments)
kwargs = { name: valueInContext(arg, context) for name, arg in self.kwargs.items() }
return MethodDistribution(self.method, obj, arguments, kwargs)
def isEquivalentTo(self, other):
if not type(other) is MethodDistribution:
return False
return (self.method == other.method
and areEquivalent(self.object, other.object)
and areEquivalent(self.arguments, other.arguments)
and areEquivalent(self.kwargs, other.kwargs))
def __str__(self):
args = argsToString(itertools.chain(self.arguments, self.kwargs.items()))
return f'{self.object}.{self.method.__name__}{args}'
def distributionMethod(method):
"""Decorator for wrapping a method so that it can take distributions as arguments."""
def helper(wrapped, self, *args, **kwargs):
args = tuple(toDistribution(arg) for arg in args)
kwargs = { name: toDistribution(arg) for name, arg in kwargs.items() }
if any(needsSampling(arg) for arg in itertools.chain(args, kwargs.values())):
return MethodDistribution(method, self, args, kwargs)
elif any(needsLazyEvaluation(arg)
for arg in itertools.chain(args, kwargs.values())):
# see analogous comment in distributionFunction
return makeDelayedFunctionCall(helper, (method, self) + args, kwargs)
else:
return method(self, *args, **kwargs)
return unpacksDistributions(decorator.decorate(method, helper, kwsyntax=True))
class AttributeDistribution(Distribution):
"""Distribution resulting from accessing an attribute of a distribution"""
def __init__(self, attribute, obj):
super().__init__(obj)
self.attribute = attribute
self.object = obj
def sampleGiven(self, value):
obj = value[self.object]
return getattr(obj, self.attribute)
def evaluateInner(self, context):
obj = valueInContext(self.object, context)
return AttributeDistribution(self.attribute, obj)
def supportInterval(self):
obj = self.object
if isinstance(obj, Options):
attrs = (getattr(opt, self.attribute) for opt in obj.options)
mins, maxes = zip(*(supportInterval(attr) for attr in attrs))
l = None if any(sl is None for sl in mins) else min(mins)
r = None if any(sr is None for sr in maxes) else max(maxes)
return l, r
return None, None
def isEquivalentTo(self, other):
if not type(other) is AttributeDistribution:
return False
return (self.attribute == other.attribute
and areEquivalent(self.object, other.object))
def __call__(self, *args):
vty = self.object._valueType
retTy = None
if vty is not object:
func = getattr(vty, self.attribute, None)
if func:
if isinstance(func, property):
func = func.fget
retTy = typing.get_type_hints(func).get('return')
return OperatorDistribution('__call__', self, args, valueType=retTy)
def __str__(self):
return f'{self.object}.{self.attribute}'
class OperatorDistribution(Distribution):
"""Distribution resulting from applying an operator to one or more distributions"""
def __init__(self, operator, obj, operands, valueType=None):
operands = tuple(toDistribution(arg) for arg in operands)
if valueType is None:
valueType = self.inferType(obj, operator)
super().__init__(obj, *operands, valueType=valueType)
self.operator = operator
self.object = obj
self.operands = operands
@staticmethod
def inferType(obj, operator):
if issubclass(obj._valueType, (float, int)):
return float
return None
def sampleGiven(self, value):
first = value[self.object]
rest = [value[child] for child in self.operands]
op = getattr(first, self.operator)
result = op(*rest)
# handle horrible int/float mismatch
# TODO what is the right way to fix this???
if result is NotImplemented and isinstance(first, int):
first = float(first)
op = getattr(first, self.operator)
result = op(*rest)
return result
def evaluateInner(self, context):
obj = valueInContext(self.object, context)
operands = tuple(valueInContext(arg, context) for arg in self.operands)
return OperatorDistribution(self.operator, obj, operands)
def supportInterval(self):
if self.operator in ('__add__', '__radd__', '__sub__', '__rsub__', '__truediv__'):
assert len(self.operands) == 1
l1, r1 = supportInterval(self.object)
l2, r2 = supportInterval(self.operands[0])
if l1 is None or l2 is None or r1 is None or r2 is None:
return None, None
if self.operator == '__add__' or self.operator == '__radd__':
l = l1 + l2
r = r1 + r2
elif self.operator == '__sub__':
l = l1 - r2
r = r1 - l2
elif self.operator == '__rsub__':
l = l2 - r1
r = r2 - l1
elif self.operator == '__truediv__':
if l2 > 0:
l = l1 / r2 if l1 >= 0 else l1 / l2
r = r1 / l2 if r1 >= 0 else r1 / r2
else:
l, r = None, None # TODO improve
return l, r
return None, None
def isEquivalentTo(self, other):
if not type(other) is OperatorDistribution:
return False
return (self.operator == other.operator
and areEquivalent(self.object, other.object)
and areEquivalent(self.operands, other.operands))
def __str__(self):
return f'{self.object}.{self.operator}{argsToString(self.operands)}'
# Operators which can be applied to distributions.
# Note that we deliberately do not include comparisons and __bool__,
# since Scenic does not allow control flow to depend on random variables.
allowedOperators = (
'__neg__',
'__pos__',
'__abs__',
'__add__', '__radd__',
'__sub__', '__rsub__',
'__mul__', '__rmul__',
'__truediv__', '__rtruediv__',
'__floordiv__', '__rfloordiv__',
'__mod__', '__rmod__',
'__divmod__', '__rdivmod__',
'__pow__', '__rpow__',
'__round__',
'__getitem__',
)
def makeOperatorHandler(op):
def handler(self, *args):
return OperatorDistribution(op, self, args)
return handler
for op in allowedOperators:
setattr(Distribution, op, makeOperatorHandler(op))
import scenic.core.type_support as type_support
class MultiplexerDistribution(Distribution):
"""Distribution selecting among values based on another distribution."""
def __init__(self, index, options):
self.index = index
self.options = tuple(toDistribution(opt) for opt in options)
assert len(self.options) > 0
valueType = type_support.unifyingType(self.options)
super().__init__(index, *self.options, valueType=valueType)
def sampleGiven(self, value):
idx = value[self.index]
assert 0 <= idx < len(self.options), (idx, len(self.options))
return value[self.options[idx]]
def evaluateInner(self, context):
return type(self)(valueInContext(self.index, context),
(valueInContext(opt, context) for opt in self.options))
def isEquivalentTo(self, other):
if not type(other) == type(self):
return False
return (areEquivalent(self.index, other.index)
and areEquivalent(self.options, other.options))
## Simple distributions
class Range(Distribution):
"""Uniform distribution over a range"""
def __init__(self, low, high):
low = type_support.toScalar(low, f'Range endpoint {low} is not a scalar')
high = type_support.toScalar(high, f'Range endpoint {high} is not a scalar')
super().__init__(low, high, valueType=float)
self.low = low
self.high = high
def __contains__(self, obj):
return self.low <= obj and obj <= self.high
def clone(self):
return type(self)(self.low, self.high)
def bucket(self, buckets=None):
if buckets is None:
buckets = 5
if not isinstance(buckets, int) or buckets < 1:
raise RuntimeError(f'Invalid buckets for Range.bucket: {buckets}')
if not isinstance(self.low, float) or not isinstance(self.high, float):
raise RuntimeError(f'Cannot bucket Range with non-constant endpoints')
endpoints = numpy.linspace(self.low, self.high, buckets+1)
ranges = []
for i, left in enumerate(endpoints[:-1]):
right = endpoints[i+1]
ranges.append(Range(left, right))
return Options(ranges)
def sampleGiven(self, value):
return random.uniform(value[self.low], value[self.high])
def evaluateInner(self, context):
low = valueInContext(self.low, context)
high = valueInContext(self.high, context)
return Range(low, high)
def isEquivalentTo(self, other):
if not type(other) is Range:
return False
return (areEquivalent(self.low, other.low)
and areEquivalent(self.high, other.high))
def __str__(self):
return f'Range({self.low}, {self.high})'
class Normal(Distribution):
"""Normal distribution"""
def __init__(self, mean, stddev):
mean = type_support.toScalar(mean, f'Normal mean {mean} is not a scalar')
stddev = type_support.toScalar(stddev, f'Normal stddev {stddev} is not a scalar')
super().__init__(mean, stddev, valueType=float)
self.mean = mean
self.stddev = stddev
@staticmethod
def cdf(mean, stddev, x):
return (1 + math.erf((x - mean) / (sqrt2 * stddev))) / 2
@staticmethod
def cdfinv(mean, stddev, x):
import scipy # slow import not often needed
return mean + (sqrt2 * stddev * scipy.special.erfinv(2*x - 1))
def clone(self):
return type(self)(self.mean, self.stddev)
def bucket(self, buckets=None):
if not isinstance(self.stddev, float): # TODO relax restriction?
raise RuntimeError(f'Cannot bucket Normal with non-constant standard deviation')
if buckets is None:
buckets = 5
if isinstance(buckets, int):
if buckets < 1:
raise RuntimeError(f'Invalid buckets for Normal.bucket: {buckets}')
elif buckets == 1:
endpoints = []
elif buckets == 2:
endpoints = [0]
else:
left = self.stddev * (-(buckets-3)/2 - 0.5)
right = self.stddev * ((buckets-3)/2 + 0.5)
endpoints = numpy.linspace(left, right, buckets-1)
else:
endpoints = tuple(buckets)
for i, v in enumerate(endpoints[:-1]):
if v >= endpoints[i+1]:
raise RuntimeError('Non-increasing bucket endpoints for '
f'Normal.bucket: {endpoints}')
if len(endpoints) == 0:
return Options([self.clone()])
buckets = [(-math.inf, endpoints[0])]
buckets.extend((v, endpoints[i+1]) for i, v in enumerate(endpoints[:-1]))
buckets.append((endpoints[-1], math.inf))
pieces = []
probs = []
for left, right in buckets:
pieces.append(self.mean + TruncatedNormal(0, self.stddev, left, right))
prob = (Normal.cdf(0, self.stddev, right)
- Normal.cdf(0, self.stddev, left))
probs.append(prob)
assert math.isclose(math.fsum(probs), 1), probs
return Options(dict(zip(pieces, probs)))
def sampleGiven(self, value):
return random.gauss(value[self.mean], value[self.stddev])
def evaluateInner(self, context):
mean = valueInContext(self.mean, context)
stddev = valueInContext(self.stddev, context)
return Normal(mean, stddev)
def isEquivalentTo(self, other):
if not type(other) is Normal:
return False
return (areEquivalent(self.mean, other.mean)
and areEquivalent(self.stddev, other.stddev))
def __str__(self):
return f'Normal({self.mean}, {self.stddev})'
class TruncatedNormal(Normal):
"""Truncated normal distribution."""
def __init__(self, mean, stddev, low, high):
if (not isinstance(low, (float, int))
or not isinstance(high, (float, int))): # TODO relax restriction?
raise RuntimeError('Endpoints of TruncatedNormal must be constant')
super().__init__(mean, stddev)
self.low = low
self.high = high
def clone(self):
return type(self)(self.mean, self.stddev, self.low, self.high)
def bucket(self, buckets=None):
if not isinstance(self.stddev, float): # TODO relax restriction?
raise RuntimeError('Cannot bucket TruncatedNormal with '
'non-constant standard deviation')
if buckets is None:
buckets = 5
if isinstance(buckets, int):
if buckets < 1:
raise RuntimeError(f'Invalid buckets for TruncatedNormal.bucket: {buckets}')
endpoints = numpy.linspace(self.low, self.high, buckets+1)
else:
endpoints = tuple(buckets)
if len(endpoints) < 2:
raise RuntimeError('Too few bucket endpoints for '
f'TruncatedNormal.bucket: {endpoints}')
if endpoints[0] != self.low or endpoints[-1] != self.high:
raise RuntimeError(f'TruncatedNormal.bucket endpoints {endpoints} '
'do not match domain')
for i, v in enumerate(endpoints[:-1]):
if v >= endpoints[i+1]:
raise RuntimeError('Non-increasing bucket endpoints for '
f'TruncatedNormal.bucket: {endpoints}')
pieces, probs = [], []
for i, left in enumerate(endpoints[:-1]):
right = endpoints[i+1]
pieces.append(TruncatedNormal(self.mean, self.stddev, left, right))
prob = (Normal.cdf(self.mean, self.stddev, right)
- Normal.cdf(self.mean, self.stddev, left))
probs.append(prob)
return Options(dict(zip(pieces, probs)))
def sampleGiven(self, value):
# TODO switch to method less prone to underflow?
mean, stddev = value[self.mean], value[self.stddev]
alpha = (self.low - mean) / stddev
beta = (self.high - mean) / stddev
alpha_cdf = Normal.cdf(0, 1, alpha)
beta_cdf = Normal.cdf(0, 1, beta)
if beta_cdf - alpha_cdf < 1e-15:
warnings.warn('low precision when sampling TruncatedNormal')
unif = random.random()
p = alpha_cdf + unif * (beta_cdf - alpha_cdf)
return mean + (stddev * Normal.cdfinv(0, 1, p))
def evaluateInner(self, context):
mean = valueInContext(self.mean, context)
stddev = valueInContext(self.stddev, context)
return TruncatedNormal(mean, stddev, self.low, self.high)
def isEquivalentTo(self, other):
if not type(other) is TruncatedNormal:
return False
return (areEquivalent(self.mean, other.mean)
and areEquivalent(self.stddev, other.stddev)
and self.low == other.low and self.high == other.high)
def __str__(self):
return f'TruncatedNormal({self.mean}, {self.stddev}, {self.low}, {self.high})'
class DiscreteRange(Distribution):
"""Distribution over a range of integers."""
def __init__(self, low, high, weights=None):
if not isinstance(low, int):
raise RuntimeError(f'DiscreteRange endpoint {low} is not a constant integer')
if not isinstance(high, int):
raise RuntimeError(f'DiscreteRange endpoint {high} is not a constant integer')
if not low <= high:
raise RuntimeError(f'DiscreteRange lower bound {low} is above upper bound {high}')
if weights is None:
weights = (1,) * (high - low + 1)
else:
weights = tuple(weights)
assert len(weights) == high - low + 1
super().__init__(valueType=int)
self.low = low
self.high = high
self.weights = weights
self.cumulativeWeights = tuple(itertools.accumulate(weights))
self.options | |
<reponame>baldurk/Vulkan-LoaderAndValidationLayers<gh_stars>1-10
#!/usr/bin/env python3
#
# Copyright (c) 2015-2016 The Khronos Group Inc.
# Copyright (c) 2015-2016 Valve Corporation
# Copyright (c) 2015-2016 LunarG, Inc.
# Copyright (c) 2015-2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
import argparse
import os
import sys
import re
import vulkan
from source_line_info import sourcelineinfo
# vk_helper.py overview
# This script generates code based on vulkan input header
# It generate wrappers functions that can be used to display
# structs in a human-readable txt format, as well as utility functions
# to print enum values as strings
def handle_args():
parser = argparse.ArgumentParser(description='Perform analysis of vogl trace.')
parser.add_argument('input_file', help='The input header file from which code will be generated.')
parser.add_argument('--rel_out_dir', required=False, default='vktrace_gen', help='Path relative to exec path to write output files. Will be created if needed.')
parser.add_argument('--abs_out_dir', required=False, default=None, help='Absolute path to write output files. Will be created if needed.')
parser.add_argument('--gen_enum_string_helper', required=False, action='store_true', default=False, help='Enable generation of helper header file to print string versions of enums.')
parser.add_argument('--gen_struct_wrappers', required=False, action='store_true', default=False, help='Enable generation of struct wrapper classes.')
parser.add_argument('--gen_struct_sizes', required=False, action='store_true', default=False, help='Enable generation of struct sizes.')
parser.add_argument('--gen_cmake', required=False, action='store_true', default=False, help='Enable generation of cmake file for generated code.')
parser.add_argument('--gen_graphviz', required=False, action='store_true', default=False, help='Enable generation of graphviz dot file.')
#parser.add_argument('--test', action='store_true', default=False, help='Run simple test.')
return parser.parse_args()
# TODO : Ideally these data structs would be opaque to user and could be wrapped
# in their own class(es) to make them friendly for data look-up
# Dicts for Data storage
# enum_val_dict[value_name] = dict keys are the string enum value names for all enums
# |-------->['type'] = the type of enum class into which the value falls
# |-------->['val'] = the value assigned to this particular value_name
# '-------->['unique'] = bool designating if this enum 'val' is unique within this enum 'type'
enum_val_dict = {}
# enum_type_dict['type'] = the type or class of of enum
# '----->['val_name1', 'val_name2'...] = each type references a list of val_names within this type
enum_type_dict = {}
# struct_dict['struct_basename'] = the base (non-typedef'd) name of the struct
# |->[<member_num>] = members are stored via their integer placement in the struct
# | |->['name'] = string name of this struct member
# ... |->['full_type'] = complete type qualifier for this member
# |->['type'] = base type for this member
# |->['ptr'] = bool indicating if this member is ptr
# |->['const'] = bool indicating if this member is const
# |->['struct'] = bool indicating if this member is a struct type
# |->['array'] = bool indicating if this member is an array
# |->['dyn_array'] = bool indicating if member is a dynamically sized array
# '->['array_size'] = For dyn_array, member name used to size array, else int size for static array
struct_dict = {}
struct_order_list = [] # struct names in order they're declared
# Store struct names that require #ifdef guards
# dict stores struct and enum definitions that are guarded by ifdef as the key
# and the txt of the ifdef is the value
ifdef_dict = {}
# typedef_fwd_dict stores mapping from orig_type_name -> new_type_name
typedef_fwd_dict = {}
# typedef_rev_dict stores mapping from new_type_name -> orig_type_name
typedef_rev_dict = {} # store new_name -> orig_name mapping
# types_dict['id_name'] = identifier name will map to it's type
# '---->'type' = currently either 'struct' or 'enum'
types_dict = {} # store orig_name -> type mapping
# Class that parses header file and generates data structures that can
# Then be used for other tasks
class HeaderFileParser:
def __init__(self, header_file=None):
self.header_file = header_file
# store header data in various formats, see above for more info
self.enum_val_dict = {}
self.enum_type_dict = {}
self.struct_dict = {}
self.typedef_fwd_dict = {}
self.typedef_rev_dict = {}
self.types_dict = {}
self.last_struct_count_name = ''
def setHeaderFile(self, header_file):
self.header_file = header_file
def get_enum_val_dict(self):
return self.enum_val_dict
def get_enum_type_dict(self):
return self.enum_type_dict
def get_struct_dict(self):
return self.struct_dict
def get_typedef_fwd_dict(self):
return self.typedef_fwd_dict
def get_typedef_rev_dict(self):
return self.typedef_rev_dict
def get_types_dict(self):
return self.types_dict
# Parse header file into data structures
def parse(self):
# parse through the file, identifying different sections
parse_enum = False
parse_struct = False
member_num = 0
# TODO : Comment parsing is very fragile but handles 2 known files
block_comment = False
prev_count_name = ''
ifdef_txt = ''
ifdef_active = 0
exclude_struct_list = ['VkPlatformHandleXcbKHR', 'VkPlatformHandleX11KHR']
with open(self.header_file) as f:
for line in f:
if True in [ifd_txt in line for ifd_txt in ['#ifdef ', '#ifndef ']]:
ifdef_txt = line.split()[1]
ifdef_active = ifdef_active + 1
continue
if ifdef_active != 0 and '#endif' in line:
ifdef_active = ifdef_active - 1
if block_comment:
if '*/' in line:
block_comment = False
continue
if '/*' in line:
if '*/' in line: # single line block comment
continue
block_comment = True
elif 0 == len(line.split()):
#print("Skipping empty line")
continue
elif line.split()[0].strip().startswith("//"):
#print("Skipping commented line %s" % line)
continue
elif 'typedef enum' in line:
(ty_txt, en_txt, base_type) = line.strip().split(None, 2)
#print("Found ENUM type %s" % base_type)
if '{' == base_type:
base_type = 'tmp_enum'
parse_enum = True
default_enum_val = 0
self.types_dict[base_type] = 'enum'
elif 'typedef struct' in line or 'typedef union' in line:
if True in [ex_type in line for ex_type in exclude_struct_list]:
continue
(ty_txt, st_txt, base_type) = line.strip().split(None, 2)
if ' ' in base_type:
(ignored, base_type) = base_type.strip().split(None, 1)
#print("Found STRUCT type: %s" % base_type)
# Note: This really needs to be updated to handle one line struct definition, like
# typedef struct obj##_T { uint64_t handle; } obj;
if ('{' == base_type or not (' ' in base_type)):
base_type = 'tmp_struct'
parse_struct = True
self.types_dict[base_type] = 'struct'
# elif 'typedef union' in line:
# (ty_txt, st_txt, base_type) = line.strip().split(None, 2)
# print("Found UNION type: %s" % base_type)
# parse_struct = True
# self.types_dict[base_type] = 'struct'
elif '}' in line and (parse_enum or parse_struct):
if len(line.split()) > 1: # deals with embedded union in one struct
parse_enum = False
parse_struct = False
self.last_struct_count_name = ''
member_num = 0
(cur_char, targ_type) = line.strip().split(None, 1)
if 'tmp_struct' == base_type:
base_type = targ_type.strip(';')
if True in [ex_type in base_type for ex_type in exclude_struct_list]:
del self.struct_dict['tmp_struct']
continue
#print("Found Actual Struct type %s" % base_type)
self.struct_dict[base_type] = self.struct_dict['tmp_struct']
self.struct_dict.pop('tmp_struct', 0)
struct_order_list.append(base_type)
self.types_dict[base_type] = 'struct'
self.types_dict.pop('tmp_struct', 0)
elif 'tmp_enum' == base_type:
base_type = targ_type.strip(';')
#print("Found Actual ENUM type %s" % base_type)
for n in self.enum_val_dict:
if 'tmp_enum' == self.enum_val_dict[n]['type']:
self.enum_val_dict[n]['type'] = base_type
# self.enum_val_dict[base_type] = self.enum_val_dict['tmp_enum']
# self.enum_val_dict.pop('tmp_enum', 0)
self.enum_type_dict[base_type] = self.enum_type_dict['tmp_enum']
self.enum_type_dict.pop('tmp_enum', 0)
self.types_dict[base_type] = 'enum'
self.types_dict.pop('tmp_enum', 0)
if ifdef_active:
ifdef_dict[base_type] = ifdef_txt
self.typedef_fwd_dict[base_type] = targ_type.strip(';')
self.typedef_rev_dict[targ_type.strip(';')] = base_type
#print("fwd_dict: %s = %s" % (base_type, targ_type))
elif parse_enum:
#if 'VK_MAX_ENUM' not in line and '{' not in line:
if True not in [ens in line for ens in ['{', '_MAX_ENUM', '_BEGIN_RANGE', '_END_RANGE', '_NUM = ', '_ENUM_RANGE']]:
self._add_enum(line, base_type, default_enum_val)
default_enum_val += 1
elif parse_struct:
if ';' in line:
self._add_struct(line, base_type, member_num)
member_num = member_num + 1
# populate enum dicts based on enum lines
def _add_enum(self, line_txt, enum_type, def_enum_val):
#print("Parsing enum line %s" % line_txt)
if '=' in line_txt:
(enum_name, eq_char, enum_val) = line_txt.split(None, 2)
else:
enum_name = line_txt.split(',')[0]
enum_val = str(def_enum_val)
self.enum_val_dict[enum_name] = {}
self.enum_val_dict[enum_name]['type'] = enum_type
# strip comma and comment, then extra split in case of no comma w/ comments
enum_val = enum_val.strip().split(',', 1)[0]
self.enum_val_dict[enum_name]['val'] = enum_val.split()[0]
# Perform conversion of VK_BIT macro
if 'VK_BIT' in self.enum_val_dict[enum_name]['val']:
vk_bit_val = self.enum_val_dict[enum_name]['val']
bit_shift = int(vk_bit_val[vk_bit_val.find('(')+1:vk_bit_val.find(')')], 0)
self.enum_val_dict[enum_name]['val'] = str(1 << bit_shift)
else:
# account for negative values surrounded by parens
self.enum_val_dict[enum_name]['val'] = self.enum_val_dict[enum_name]['val'].strip(')').replace('-(', '-')
# Try to cast to int to determine if enum value is unique
try:
#print("ENUM val:", self.enum_val_dict[enum_name]['val'])
int(self.enum_val_dict[enum_name]['val'], 0)
self.enum_val_dict[enum_name]['unique'] = True
#print("ENUM has num value")
except ValueError:
self.enum_val_dict[enum_name]['unique'] = False
#print("ENUM is not a number value")
# Update enum_type_dict as well
if not enum_type in self.enum_type_dict:
self.enum_type_dict[enum_type] = []
self.enum_type_dict[enum_type].append(enum_name)
| |
cosmo_fns=None, DA=None, H=None,
rescale_da=1., rescale_h=1.):
"""
Transform constraints on D_A(z) and H(z) into constraints on the LSS
distance measures, D_V(z) and F(z).
Parameters
----------
z : float
Redshift of the current bin
F : array_like
Fisher matrix for the current bin
paramnames : list
Ordered list of parameter names for the Fisher matrix
cosmo_fns : tuple of functions, optional
Functions for H(z), r(z), D(z), f(z). If specified, these are used to
calculate H(z) and D_A(z).
DA, H : float, optional
Values of H(z) and D_A(z) at the current redshift. These will be used
if cosmo_fns is not specified.
rescale_da, rescale_h : float, optional
Scaling factors for H and D_A in the original Fisher matrix.
Returns
-------
Fnew : array_like
Updated Fisher matrix, with D_A replaced by D_V, and H replaced by F.
newnames : list
Updated list of parameter names.
"""
# Calculate distances
if cosmo_fns is not None:
HH, rr, DD, ff = cosmo_fns
DA = rr(z) / (1. + z)
H = HH(z)
DV = ((1.+z)**2. * DA**2. * C*z / H)**(1./3.)
FF = (1.+z) * DA * H / C
# Indices of parameters to be replaced (DA <-> DV, H <-> F)
idv = ida = paramnames.index('DA')
iff = ih = paramnames.index('H')
# Construct extension operator, d(D_A, H)/d(D_V, F)
S = np.eye(F.shape[0])
S[ida,idv] = DA / DV # d D_A / d D_V
S[ida,iff] = DA / (3.*FF) # d D_A / d F
S[ih,idv] = - H / DV # d H / d D_V
S[ih,iff] = 2.*H / (3.*FF) # d H / d F
# Rescale Fisher matrix for DA, H
F[ida,:] /= rescale_da; F[:,ida] /= rescale_da
F[ih,:] /= rescale_h; F[:,ih] /= rescale_h
# Multiply old Fisher matrix by extension operator to get new Fisher matrix
Fnew = np.dot(S.T, np.dot(F, S))
# Rename parameters
newnames = copy.deepcopy(paramnames)
newnames[ida] = 'DV'
newnames[ih] = 'F'
return Fnew, newnames
def expand_fisher_matrix(z, derivs, F, names, exclude=[], fsigma8=False):
"""
Transform Fisher matrix with (f, aperp, apar) parameters into one with
dark energy EOS parameters (Omega_k, Omega_DE, w0, wa, h, gamma) instead.
Parameters
----------
z : float
Central redshift of the survey.
derivs : 2D list of interp. fns.
Array of interpolation functions used to evaluate derivatives needed to
transform to new parameter set. Precompute this using the
eos_fisher_matrix_derivs() function.
F : array_like
Fisher matrix for the old parameters.
names : list
List of names of the parameters in the current Fisher matrix.
exclude : list, optional
Prevent a subset of the functions [f, aperp, apar] from being converted
to EOS parameters. e.g. exclude = [1,] will prevent aperp from
contributing to the EOS parameter constraints.
fsigma8 : bool, optional
Whether the projection should be done from f(z) or f(z)*sigma_8(z).
Default: False.
Returns
-------
Fnew : array_like
Fisher matrix for the new parameters.
paramnames : list, optional
Names parameters in the expanded Fisher matrix.
"""
a = 1. / (1. + z)
# Define mapping between old and new Fisher matrices (including expanded P(k) terms)
old = copy.deepcopy(names)
Nold = len(old)
if not fsigma8:
oldidxs = [old.index(p) for p in ['f', 'aperp', 'apar']]
else:
oldidxs = [old.index(p) for p in ['fs8', 'aperp', 'apar']]
# Insert new parameters immediately after 'apar'
new_params = ['omegak', 'omegaDE', 'w0', 'wa', 'h', 'gamma', 'sigma_8']
new = old[:old.index('apar')+1]
new += new_params
new += old[old.index('apar')+1:]
newidxs = [new.index(p) for p in new_params]
Nnew = len(new)
# Construct extension operator, d(f,aperp,par)/d(beta)
S = np.zeros((Nold, Nnew))
for i in range(Nold):
for j in range(Nnew):
# Check if this is one of the indices that is being replaced
if i in oldidxs and j in newidxs:
# Old parameter is being replaced
ii = oldidxs.index(i) # newidxs
jj = newidxs.index(j)
if ii not in exclude:
S[i,j] = derivs[ii][jj](a)
else:
if old[i] == new[j]: S[i,j] = 1.
# Multiply old Fisher matrix by extension operator to get new Fisher matrix
Fnew = np.dot(S.T, np.dot(F, S))
return Fnew, new
def fisher_with_excluded_params(F, excl, names):
"""
Return Fisher matrix with certain rows/columns excluded
"""
# Remove excluded parameters from Fisher matrix
Nnew = F.shape[0] - len(excl)
msk = [i not in excl for i in range(F.shape[0])] # 1D mask
Fnew = F[np.outer(msk,msk)].reshape((Nnew, Nnew))
# Remove names of excluded parameters
excl_names = [names[i] for i in excl]
new_names = copy.deepcopy(names)
for n in excl_names: del new_names[new_names.index(n)]
return Fnew, new_names
def indices_for_param_names(paramnames, params, warn=True):
"""
Returns indices of parameters
Parameters
----------
paramnames : list
Full (ordered) list of parameters used in Fisher matrix.
params : list
List of parameter names to find indices for. If the name is given with
a trailing asterisk, e.g. 'pk*', all parameters *starting* with 'pk' will
be matched. Matches will not be made mid-string.
warn : bool, optional
Whether to show a warning if a parameter is not found. Default: true.
Returns
-------
idxs : list
List of indices of parameters specified in 'params'
"""
if type(params) is str: params = [params,] # Convert single param. string to list
idxs = []
for p in params:
if p[-1] == "*":
# Wildcard; match all params
for i in range(len(paramnames)):
if p[:-1] == paramnames[i][:len(p[:-1])]:
idxs.append(i)
else:
# Add index of parameter to list
if p in paramnames:
idxs.append( paramnames.index(p) )
else:
# Parameter not found; throw a warning
if warn: print("\tindices_for_param_names(): %s not found in param list." % p)
return np.array(idxs)
def load_param_names(fname):
"""
Load parameter names from Fisher matrix with a given filename.
(Names are stored as a header comment.)
"""
f = open(fname, 'r')
hdr = f.readline()
if hdr[0] == '#':
names = hdr.split()[1:] # (trim leading #)
else:
raise ValueError("Unable to process line 0 of %s as a header." % fname)
f.close()
return names
def fisher( zmin, zmax, cosmo, expt, cosmo_fns, return_pk=False, kbins=None,
massive_nu_fn=None, Neff_fn=None, transfer_fn=None, cv_limited=False,
switches=[], kmin=1e-7, kmax=1.0):
"""
Return Fisher matrix (an binned power spectrum, with errors) for given
fiducial cosmology and experimental settings.
Parameters
----------
zmin, zmax : float
Redshift window of survey
cosmo : dict
Dictionary of fiducial cosmological parameters
expt : dict
Dictionary of experimental parameters
cosmo_fns : tuple of functions
Tuple of cosmology functions of redshift, {H(z), r(z), D(z), f(z), }.
These should all be callable functions.
return_pk : bool, optional
If set to True, returns errors and fiducial values for binned power
spectrum.
kbins : int, optional
If return_pk=True, defines the bin edges in k for the binned P(k).
massive_nu_fn : interpolation fn.
Interpolating function for calculating derivative of log[P(k)] with
respect to neutrino mass, Sum(mnu)
Neff_fn : interpolation fn.
Interpolating function for calculating derivative of log[P(k)] with
respect to effective number of neutrinos.
switches : list, optional
List of additional parameters to include in the Fisher matrix. Options
are 'sdbias' (scale-dep. bias parameter b_1), and 'mg' (modified
gravity parameters gamma0, gamma1, eta0, eta1, A_xi, k_mg)
transfer_fn : interpolation fn.
Interpolating function for calculating T(k) and its derivative w.r.t k.
Returns
-------
F : array_like (2D)
Fisher matrix for the parameters (aperp, apar, bHI, A, sigma_nl).
pk : array_like, optional
If return_pk=True, returns errors and fiducial values for binned power
spectrum.
"""
# Copy, to make sure we don't modify input expt or cosmo
cosmo = copy.deepcopy(cosmo)
expt = copy.deepcopy(expt)
# Fetch/precompute cosmology functions
HH, rr, DD, ff = cosmo_fns
# Sanity check: k bins must be defined if return_pk is True
if return_pk and kbins is None:
raise NameError("If return_pk=True, kbins must be | |
curve grows 'into' the surface
elif (test_u_direction[3] > 0): # compare projection path to surface normal: dot product positive
direction = -1 # > curve grows 'out of' the surface
# initialize error
error = 1.0
# set up binary search loop
loop_count = 0
while (error > tol and loop_count < 100):
test_u = (test_span[1] + test_span[0]) / 2 # pick u in the center of the search span
test = isect_test(curve, surf, test_u) # project curve(u) onto surface
error = test[2] # set current intersection error
if ((test[3]*direction) < 0): # is the projection still coming from outside the surface?
test_span = [test_u, test_span[1]] # > use second half of current span for the next search
if ((test[3]*direction) > 0): # is the projection coming from inside the surface?
test_span = [test_span[0], test_u] # > use first half of current span for the next search
loop_count=loop_count + 1
print 'step ', loop_count, ' u ', test_u, ' error ', test[2]
if error > tol:
print 'no intersection found within ', tol
isect_curve_surf = 'NONE'
else:
isect_curve_surf = [test[0], test_u, test[4]]
return isect_curve_surf
#################################################################################################
#################################################################################################
#### SECTION 2: PYTHON FEATURE CLASSES - PARAMETRIC LINKING BETWEEN OBJECTS - IN PROGRESS
# poles = 3D points with weights, as [[x,y,z],w], or [x,y,z] (these are leftovers waiting to receive weights).
# need to make a class for this...but i'm hoping to have FreeCAD do it for me :)
#### new classes needed to implement linked parametric behavior:
### polyline of control points created from a variety of sketches.
## 4 points, for use in Bezier cubic curves.
# ControlPoly4_3L(sketch) # made from a single sketch containing 3 line objects
# ControlPoly4_2N(sketch0, sketch1) # made from 2 node sketches. each node sketch contain one line (tangent), and one circle (endpoint) located at one end of the line.
# ControlPoly4_Arc(sketch) # made from a single sketch containing 1 arc object - need to rename. will now be used
# to convert the first element in the input sketch. arc, ellipse, parabola, line, etc.
## 6 points, for use in 6 point NURBS cubic curves.
# ControlPoly6_5L(sketch) # made from a single sketch containing 5 line objects
# ControlPoly6_2N(sketch0, sketch1) # made from 2 node sketches. each node sketch contain 2 lines, and one circle.
# ControlPoly6_Arc(sketch) # made from a single sketch containing 1 arc object
### polyhedra of control points created from loops of CubicControlPolys.
## 4 points by 4 point grids, for use in BezierXBezier Bicubic surface patches.
# ControlGrid44_4(poly0, poly1, poly2, poly3) # made from 4 CubicControlPoly4.
# ControlGrid44_3(poly0, poly1, poly2) # made from 3 CubicControlPoly4. degenerate grid.
## 6 points by 4 point grids, for use in BezierX6P Bicubic surface patches.
# ControlGrid64_4(poly0, poly1, poly2, poly3) # made from 2 CubicControlPoly6 and 2 CubicControlPoly4.
# ControlGrid64_3(poly0, poly1, poly2) # made from 2 CubicControlPoly4 and 1 CubicControlPoly6. degenerate grid.
## 6 points by 6 point grids, for use in 6PX6P Bicubic surface patches.
# ControlGrid66_4(poly0, poly1, poly2, poly3) # made from 4 CubicControlPoly6.
# ControlGrid66_3(poly0, poly1, poly2) # made from 3 CubicControlPoly6. degenerate grid.
### cubic curves created from CubicControlPolys
# CubicCurve_4
# CubicCurve_6
### BiCubic surfaces created from CubicControlGrids
# CubicSurface_44
# CubicSurface_64
# CubicSurface_66
#### CLASS RECAP
# ControlPoly4_3L(sketch) #tested-works 2016-08-04
# ControlPoly4_2N(sketch0, sketch1) #tested-works 2016-08-04
# ControlPoly4_Arc(sketch) #tested-works 2016-08-05
# ControlPoly6_5L(sketch) #tested-works 2016-08-06
# ControlPoly6_2N(sketch0, sketch1) #tested-works 2016-08-06
# ControlPoly6_Arc(sketch) #tested-works 2016-08-17
# ControlGrid44_4(poly0, poly1, poly2, poly3) #tested-works 2016-08-05
# ControlGrid44_3(poly0, poly1, poly2) #tested-works 2016-08-21 - works, but result is not very good
# ControlGrid64_4(poly0, poly1, poly2, poly3) #tested-works 2016-08-14
# ControlGrid64_3(poly0, poly1, poly2) #tested-works 2016-08-21 - works, but result is quite ugly
# ControlGrid66_4(poly0, poly1, poly2, poly3) #tested-works 2016-08-06
# ControlGrid64_3(poly4_0, poly6_1, poly4_2)
# ControlGrid64_3_BySurface(CubicSurface_44)
# CubicCurve_4 #tested-works 2016-08-04
# CubicCurve_6 #tested-works 2016-08-06
# ControlPoly6_Bezier(CubicCurve4_0) #tested-works 2016-08-17
# ControlPoly6_FilletBezier(CubicCurve4_0,CubicCurve4_1) #tested-works 2016-08-20
# CubicSurface_44 #tested-works 2016-08-04
# CubicSurface_64 #tested-works 2016-08-14
# CubicSurface_66 #tested-works 2016-08-06
# Point_onCurve(NL_Curve,u) #tested-works 2016-11-16
# ControlPoly4_segment(NL_Curve,Point_onCurve_0,Point_onCurve_1)
#### used legacy functions:
#
# orient_a_to_b(lista,listb)
# Bezier_Cubic_curve(WeightedPoles)
# Bezier_Bicubic_surf(WeightedPoles)
# NURBS_Cubic_6P_curve(WeightedPoles)
# NURBS_Cubic_66_surf(WeightedPoles)
# NURBS_Cubic_64_surf(WeightedPoles)
#
#################################################################################################
#### control polygons (+sketch to input)
class ControlPoly4_3L:
def __init__(self, obj , sketch):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlPoly4_3L class Init\n")
obj.addProperty("App::PropertyLink","Sketch","ControlPoly4_3L","reference Sketch").Sketch = sketch
obj.addProperty("Part::PropertyGeometryList","Legs","ControlPoly4_3L","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlPoly4_3L","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlPoly4_3L","Weights").Weights = [1.0,1.0,1.0,1.0]
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
# get all points on first three lines...error check later
p00s=fp.Sketch.Geometry[0].StartPoint
p01s=fp.Sketch.Geometry[0].EndPoint
p10s=fp.Sketch.Geometry[1].StartPoint
p11s=fp.Sketch.Geometry[1].EndPoint
p20s=fp.Sketch.Geometry[2].StartPoint
p21s=fp.Sketch.Geometry[2].EndPoint
# to world
mat=fp.Sketch.Placement.toMatrix()
p00=mat.multiply(p00s)
p01=mat.multiply(p01s)
p20=mat.multiply(p20s)
p21=mat.multiply(p21s)
#for now assume
fp.Poles=[p00,p01,p20,p21]
# prepare the lines to draw the polyline
Leg0=Part.LineSegment(p00,p01)
Leg1=Part.LineSegment(p01,p20)
Leg2=Part.LineSegment(p20,p21)
#set the polygon legs property
fp.Legs=[Leg0, Leg1, Leg2]
# define the shape for visualization
fp.Shape = Part.Shape(fp.Legs)
class ControlPoly4_2N:
def __init__(self, obj , sketch0, sketch1):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlPoly4_2N class Init\n")
obj.addProperty("App::PropertyLink","Sketch0","ControlPoly4_2N","reference Sketch").Sketch0 = sketch0
obj.addProperty("App::PropertyLink","Sketch1","ControlPoly4_2N","reference Sketch").Sketch1 = sketch1
obj.addProperty("Part::PropertyGeometryList","Legs","ControlPoly4_2N","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlPoly4_2N","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlPoly4_2N","Weights").Weights = [1.0,1.0,1.0,1.0]
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
# process Sketch0
obj00=fp.Sketch0.Geometry[0]
obj01=fp.Sketch0.Geometry[1]
if obj00.__class__==Part.Circle:
cir0=obj00
if obj01.__class__==Part.Circle:
cir0=obj01
if obj00.__class__==Part.LineSegment:
lin0=obj00
if obj01.__class__==Part.LineSegment:
lin0=obj01
p00s=cir0.Center
if lin0.StartPoint==p00s:
p01s=lin0.EndPoint
elif lin0.EndPoint==p00s:
p01s=lin0.StartPoint
# to world
mat0=fp.Sketch0.Placement.toMatrix()
p00=mat0.multiply(p00s)
p01=mat0.multiply(p01s)
# process Sketch1
obj10=fp.Sketch1.Geometry[0]
obj11=fp.Sketch1.Geometry[1]
if obj10.__class__==Part.Circle:
cir1=obj10
if obj11.__class__==Part.Circle:
cir1=obj11
if obj10.__class__==Part.LineSegment:
lin1=obj10
if obj11.__class__==Part.LineSegment:
lin1=obj11
p11s=cir1.Center
if lin1.StartPoint==p11s:
p10s=lin1.EndPoint
elif lin1.EndPoint==p11s:
p10s=lin1.StartPoint
# to world
mat1=fp.Sketch1.Placement.toMatrix()
p10=mat1.multiply(p10s)
p11=mat1.multiply(p11s)
# set the poles
fp.Poles=[p00,p01,p10,p11]
# prepare the polygon
Leg0=Part.LineSegment(p00,p01)
Leg1=Part.LineSegment(p01,p10)
Leg2=Part.LineSegment(p10,p11)
#set the polygon legs property
fp.Legs=[Leg0, Leg1, Leg2]
# define the shape for visualization
fp.Shape = Part.Shape(fp.Legs)
class ControlPoly4_Arc:
def __init__(self, obj , sketch):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlPoly4_Arc class Init\n")
obj.addProperty("App::PropertyLink","Sketch","ControlPoly4_Arc","reference Sketch").Sketch = sketch
obj.addProperty("Part::PropertyGeometryList","Legs","ControlPoly4_Arc","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlPoly4_Arc","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlPoly4_Arc","Weights").Weights = [1.0,1.0,1.0,1.0]
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
# process the sketch arc...error check later
ArcNurbs=fp.Sketch.Shape.Edges[0].toNurbs().Edge1.Curve
ArcNurbs.increaseDegree(3)
p0=ArcNurbs.getPole(1)
p1=ArcNurbs.getPole(2)
p2=ArcNurbs.getPole(3)
p3=ArcNurbs.getPole(4)
# already to world?
#mat=fp.Sketch.Placement.toMatrix()
#p0=mat.multiply(p0s)
#p1=mat.multiply(p1s)
#p2=mat.multiply(p2s)
#p3=mat.multiply(p3s)
fp.Poles=[p0,p1,p2,p3]
# set the weights
fp.Weights = ArcNurbs.getWeights()
# prepare the lines to draw the polyline
Leg0=Part.LineSegment(p0,p1)
Leg1=Part.LineSegment(p1,p2)
Leg2=Part.LineSegment(p2,p3)
#set the polygon legs property
fp.Legs=[Leg0, Leg1, Leg2]
# define the shape for visualization
fp.Shape = Part.Shape(fp.Legs)
class ControlPoly6_5L:
def __init__(self, obj , sketch):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlPoly6_5L class Init\n")
obj.addProperty("App::PropertyLink","Sketch","ControlPoly4_3L","reference Sketch").Sketch = sketch
obj.addProperty("Part::PropertyGeometryList","Legs","ControlPoly4_3L","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlPoly4_3L","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlPoly4_3L","Weights").Weights = [1.0,1.0,1.0,1.0,1.0,1.0]
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
# get all points on first three lines...error check later
p00s=fp.Sketch.Geometry[0].StartPoint
p01s=fp.Sketch.Geometry[0].EndPoint
p10s=fp.Sketch.Geometry[1].StartPoint
p11s=fp.Sketch.Geometry[1].EndPoint
p20s=fp.Sketch.Geometry[2].StartPoint
p21s=fp.Sketch.Geometry[2].EndPoint
p30s=fp.Sketch.Geometry[3].StartPoint
p31s=fp.Sketch.Geometry[3].EndPoint
p40s=fp.Sketch.Geometry[4].StartPoint
p41s=fp.Sketch.Geometry[4].EndPoint
# to world
mat=fp.Sketch.Placement.toMatrix()
p00=mat.multiply(p00s)
p01=mat.multiply(p01s)
p20=mat.multiply(p20s)
p21=mat.multiply(p21s)
p40=mat.multiply(p40s)
p41=mat.multiply(p41s)
#for now assume
fp.Poles=[p00,p01,p20,p21,p40,p41]
# prepare the lines to draw the polyline
Leg0=Part.LineSegment(p00,p01)
Leg1=Part.LineSegment(p01,p20)
Leg2=Part.LineSegment(p20,p21)
Leg4=Part.LineSegment(p21,p40)
Leg5=Part.LineSegment(p40,p41)
#set the polygon legs property
fp.Legs=[Leg0, Leg1, Leg2, Leg4, Leg5]
# define the shape for visualization
fp.Shape = Part.Shape(fp.Legs)
class ControlPoly6_2N:
def __init__(self, obj , sketch0, sketch1):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlPoly6_2N class Init\n")
obj.addProperty("App::PropertyLink","Sketch0","ControlPoly6_2N","reference Sketch").Sketch0 = sketch0
obj.addProperty("App::PropertyLink","Sketch1","ControlPoly6_2N","reference Sketch").Sketch1 = sketch1
obj.addProperty("Part::PropertyGeometryList","Legs","ControlPoly6_2N","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlPoly6_2N","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlPoly6_2N","Weights").Weights = [1.0,1.0,1.0,1.0,1.0,1.0]
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
# process Sketch0
obj00=fp.Sketch0.Geometry[0]
obj01=fp.Sketch0.Geometry[1]
obj02=fp.Sketch0.Geometry[2]
# must draw the pieces in the correct order! improve later
p00s=obj00.Center
p01s=obj01.EndPoint
p02s=obj02.EndPoint
# to world
mat0=fp.Sketch0.Placement.toMatrix()
p00=mat0.multiply(p00s)
p01=mat0.multiply(p01s)
p02=mat0.multiply(p02s)
# process Sketch1
obj10=fp.Sketch1.Geometry[0]
obj11=fp.Sketch1.Geometry[1]
obj12=fp.Sketch1.Geometry[2]
# must draw the pieces in the correct order! improve later
p12s=obj10.Center
p11s=obj11.EndPoint
p10s=obj12.EndPoint
# to world
mat1=fp.Sketch1.Placement.toMatrix()
p10=mat1.multiply(p10s)
p11=mat1.multiply(p11s)
p12=mat1.multiply(p12s)
# set the poles
fp.Poles=[p00,p01,p02,p10,p11, p12]
# prepare the polygon
Leg0=Part.LineSegment(p00,p01)
Leg1=Part.LineSegment(p01,p02)
Leg2=Part.LineSegment(p02,p10)
Leg3=Part.LineSegment(p10,p11)
Leg4=Part.LineSegment(p11,p12)
#set the polygon legs property
fp.Legs=[Leg0, Leg1, Leg2, Leg3, Leg4]
# define the shape for visualization
fp.Shape = Part.Shape(fp.Legs)
class ControlPoly6_Arc:
def __init__(self, obj , sketch):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlPoly6_Arc class Init\n")
obj.addProperty("App::PropertyLink","Sketch","ControlPoly6_Arc","reference Sketch").Sketch = sketch
obj.addProperty("Part::PropertyGeometryList","Legs","ControlPoly6_Arc","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlPoly6_Arc","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlPoly6_Arc","Weights").Weights = [1.0,1.0,1.0,1.0]
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
# process the sketch arc...error check later
ArcNurbs=fp.Sketch.Shape.Edges[0].toNurbs().Edge1.Curve
ArcNurbs.increaseDegree(3)
start=ArcNurbs.FirstParameter
end=ArcNurbs.LastParameter
knot1=start+(end-start)/3.0
knot2=end-(end-start)/3.0
ArcNurbs.insertKnot(knot1)
ArcNurbs.insertKnot(knot2)
p0=ArcNurbs.getPole(1)
p1=ArcNurbs.getPole(2)
p2=ArcNurbs.getPole(3)
p3=ArcNurbs.getPole(4)
p4=ArcNurbs.getPole(5)
p5=ArcNurbs.getPole(6)
# already to world?
#mat=fp.Sketch.Placement.toMatrix()
#p0=mat.multiply(p0s)
#p1=mat.multiply(p1s)
#p2=mat.multiply(p2s)
#p3=mat.multiply(p3s)
fp.Poles=[p0,p1,p2,p3,p4,p5]
# set the weights
fp.Weights = ArcNurbs.getWeights()
# prepare the lines to draw the polyline
Leg0=Part.LineSegment(p0,p1)
Leg1=Part.LineSegment(p1,p2)
Leg2=Part.LineSegment(p2,p3)
Leg3=Part.LineSegment(p3,p4)
Leg4=Part.LineSegment(p4,p5)
#set the polygon legs property
fp.Legs=[Leg0, Leg1, Leg2, Leg3, Leg4]
# define the shape for visualization
fp.Shape = Part.Shape(fp.Legs)
###################################################################################################
#### control grids (+poly to input)
class ControlGrid44_4:
def __init__(self, obj , poly0, poly1, poly2, poly3):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlGrid44_4 class Init\n")
obj.addProperty("App::PropertyLink","Poly0","ControlGrid44_4","control polygon").Poly0 = poly0
obj.addProperty("App::PropertyLink","Poly1","ControlGrid44_4","control polygon").Poly1 = poly1
obj.addProperty("App::PropertyLink","Poly2","ControlGrid44_4","control polygon").Poly2 = poly2
obj.addProperty("App::PropertyLink","Poly3","ControlGrid44_4","control polygon").Poly3 = poly3
obj.addProperty("Part::PropertyGeometryList","Legs","ControlGrid44_4","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlGrid44_4","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlGrid44_4","Weights").Weights
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
poles1=fp.Poly0.Poles
poles2=fp.Poly1.Poles
poles3=fp.Poly2.Poles
poles4=fp.Poly3.Poles
weights1=fp.Poly0.Weights
weights2=fp.Poly1.Weights
weights3=fp.Poly2.Weights
weights4=fp.Poly3.Weights
quad12 = orient_a_to_b(poles1,poles2)
quad23 = orient_a_to_b(poles2,poles3)
quad34 = orient_a_to_b(poles3,poles4)
quad41 = orient_a_to_b(poles4,poles1)
if quad12[0]!=poles1[0] and quad12[0]==poles1[-1]:
weights1=weights1[::-1]
if quad23[0]!=poles2[0] and quad23[0]==poles2[-1]:
weights2=weights2[::-1]
if quad34[0]!=poles3[0] and quad34[0]==poles3[-1]:
weights3=weights3[::-1]
if quad41[0]!=poles4[0] and quad41[0]==poles4[-1]:
weights4=weights4[::-1]
p00 = quad12[0]
p01 = quad12[1]
p02 = quad12[2]
p03 = quad12[3]
p13 = quad23[1]
p23 = quad23[2]
p33 = quad23[3]
p32 = quad34[1]
p31 = quad34[2]
p30 = quad34[3]
p20 = quad41[1]
p10 = quad41[2]
p11 = p00 + (p01 - p00) + (p10 - p00)
p12 = p03 + (p02 - p03) + (p13 - p03)
p21 = p30 + (p31 - p30) + (p20 - p30)
p22 = p33 + (p23 - p33) + (p32 - p33)
fp.Poles = [p00 ,p01, p02, p03,
p10, p11, p12, p13,
p20, p21, p22, p23,
p30, p31, p32, p33]
w00 = weights1[0]
w01 = weights1[1]
w02 = weights1[2]
w03 = weights1[3]
w13 = weights2[1]
w23 = weights2[2]
w33 = weights2[3]
w32 = weights3[1]
w31 = weights3[2]
w30 = weights3[3]
w20 = weights4[1]
w10 = weights4[2]
w11 = w01*w20
w12 = w02*w13
w21 = w32*w10
w22 = w23*w31
fp.Weights = [w00 ,w01, w02, w03,
w10, w11, w12, w13,
w20, w21, w22, w23,
w30, w31, w32, w33]
Legs=[0]*24
for i in range(0,3):
Legs[i]=Part.LineSegment(fp.Poles[i],fp.Poles[i+1])
for i in range(3,6):
Legs[i]=Part.LineSegment(fp.Poles[i+1],fp.Poles[i+2])
for i in range(6,9):
Legs[i]=Part.LineSegment(fp.Poles[i+2],fp.Poles[i+3])
for i in range(9,12):
Legs[i]=Part.LineSegment(fp.Poles[i+3],fp.Poles[i+4])
for i in range(12,16):
Legs[i]=Part.LineSegment(fp.Poles[i-12],fp.Poles[i-8])
for i in range(16,20):
Legs[i]=Part.LineSegment(fp.Poles[i-12],fp.Poles[i-8])
for i in range(20,24):
Legs[i]=Part.LineSegment(fp.Poles[i-12],fp.Poles[i-8])
fp.Legs=Legs
fp.Shape = Part.Shape(fp.Legs)
class ControlGrid44_3:
def __init__(self, obj , poly0, poly1, poly2):
''' Add the properties '''
FreeCAD.Console.PrintMessage("\nControlGrid44_3 class Init\n")
obj.addProperty("App::PropertyLink","Poly0","ControlGrid44_3","control polygon").Poly0 = poly0
obj.addProperty("App::PropertyLink","Poly1","ControlGrid44_3","control polygon").Poly1 = poly1
obj.addProperty("App::PropertyLink","Poly2","ControlGrid44_3","control polygon").Poly2 = poly2
obj.addProperty("Part::PropertyGeometryList","Legs","ControlGrid44_3","control segments").Legs
obj.addProperty("App::PropertyVectorList","Poles","ControlGrid44_3","Poles").Poles
obj.addProperty("App::PropertyFloatList","Weights","ControlGrid44_3","Weights").Weights
obj.addProperty("App::PropertyFloat","TweakWeight11","ControlGrid44_3","Weights").TweakWeight11 = 1.0
obj.Proxy = self
def execute(self, fp):
'''Do something when doing a recomputation, this method is mandatory'''
poles1=fp.Poly0.Poles
poles2=fp.Poly1.Poles
poles3=fp.Poly2.Poles
weights1=fp.Poly0.Weights
weights2=fp.Poly1.Weights
weights3=fp.Poly2.Weights
quad12 = orient_a_to_b(poles1,poles2)
quad23 = orient_a_to_b(poles2,poles3)
quad31 = orient_a_to_b(poles3,poles1)
if quad12[0]!=poles1[0] and quad12[0]==poles1[-1]:
weights1=weights1[::-1]
if quad23[0]!=poles2[0] and quad23[0]==poles2[-1]:
weights2=weights2[::-1]
if quad31[0]!=poles3[0] and quad31[0]==poles3[-1]:
weights3=weights3[::-1]
# make sure this is a degenerate quadrangle, i.e. a triangle
if (quad31[3] != quad12[0]):
print 'edge loop does not form a triangle'
#no further error handling is implemented
p00 = quad12[0]
p01 = quad12[1]
p02 = quad12[2]
p03 = quad12[3]
p13 = quad23[1]
p23 = quad23[2]
p33 = quad23[3]
p32 = quad31[1]
p31 = quad31[2]
p30 = p00
p20 = p00
p10 = p00
p11 | |
#!/usr/bin/env python
# Copyright (c) 2018, 2019, 2020 CNRS and Airbus S.A.S
# Author: <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy, hpp.corbaserver
from .client import HppClient
from dynamic_graph_bridge_msgs.msg import Vector
from geometry_msgs.msg import TransformStamped
from sensor_msgs.msg import JointState
from tf import TransformBroadcaster
from std_msgs.msg import Empty, UInt32
from std_srvs.srv import SetBool, SetBoolRequest
from math import cos, sin
from threading import Lock
import traceback
import ros_tools
### \brief Estimation based on HPP constraint solver.
##
## This class solves the following problem.
##
## Given:
## \li the current encoder values,
## \li tag poses (attached to objects) in the robot camera frame,
## \li a set of contraints encoding the semantic,
## estimate the complete robot configuration, including the robot base and, optionally, the object poses.
##
## The semantic of the problem (foot on the ground, object on the table...)
## can be expressed with HPP constraints. The cost is a mixture of:
## \li visual tag constraints: it penalizes the error between the tag position from CV and from estimation.
## \li current robot pose: it penalizes the error between the current robot pose from encoders and from estimation.
##
## There are two ways of specifying the semantic constraints:
## \li Core framework: set the ROS parameter "default_constraints" to a list of constraint names.
## \li Manipulation framework:
## - the current state of the manipulation graph is estimated using the last configuration in
## HPP server. It is a mixture of the result of the previous estimation and of the encoder values.
## - if the current state cannot be estimated, it is assumed it has not changed since last iteration.
## - the constraint of this state are used for estimation.
##
## Connection with HPP is handled by agimus_hpp.client.HppClient.
class Estimation(HppClient):
## Subscribed topics (prefixed by "/agimus")
subscribersDict = {
"estimation": {
"request" : [Empty, "estimation" ],
},
"vision": {
"tags": [TransformStamped, "get_visual_tag"],
},
"sot": {
"base_pose_estimation": [TransformStamped, "get_base_pose_estimation"],
},
}
## Provided services (prefixed by "/agimus")
servicesDict = {
"estimation": {
"continuous_estimation" : [SetBool, "continuous_estimation" ],
},
}
## Published topics (prefixed by "/agimus")
publishersDict = {
"estimation": {
# "estimation" : [ Vector, 1],
"semantic" : [ Vector, 1],
"state_id" : [ UInt32, 1],
},
}
def __init__ (self, continuous_estimation = False,
joint_states_topic="/joint_states",
visual_tags_enabled=True):
super(Estimation, self).__init__ (context = "estimation")
self.locked_joints = []
self.tf_pub = TransformBroadcaster()
self.tf_root = "world"
self.mutex = Lock()
self.robot_name = rospy.get_param("~robot_name", "")
self.last_stamp_is_ready = False
self.last_stamp = rospy.Time.now()
self.last_visual_tag_constraints = list()
self.current_stamp = rospy.Time.now()
self.current_visual_tag_constraints = list()
self.visual_tags_enabled = visual_tags_enabled
self.continuous_estimation (SetBoolRequest(continuous_estimation))
self.estimation_rate = 50 # Hz
self.subscribers = ros_tools.createSubscribers (self, "/agimus", self.subscribersDict)
self.publishers = ros_tools.createPublishers ("/agimus", self.publishersDict)
self.services = ros_tools.createServices (self, "/agimus", self.servicesDict)
self.joint_state_subs = rospy.Subscriber (joint_states_topic, JointState, self.get_joint_state)
def continuous_estimation(self, msg):
self.run_continuous_estimation = msg.data
rospy.loginfo ("Run continuous estimation: {0}".format(self.run_continuous_estimation))
return True, "ok"
def spin (self):
rate = rospy.Rate(self.estimation_rate)
while not rospy.is_shutdown():
if self.run_continuous_estimation and self.last_stamp_is_ready:
rospy.logdebug("Runnning estimation...")
self.estimation()
else:
rospy.logdebug ("run continuous estimation."
+"run_continuous_estimation={0}, last_stamp_is_ready={1}"
.format(self.run_continuous_estimation, self.last_stamp_is_ready))
rate.sleep()
def estimation (self, msg=None):
self.mutex.acquire()
try:
hpp = self.hpp()
q_current = hpp.robot.getCurrentConfig()
self._initialize_constraints (q_current)
# The optimization expects a configuration which already satisfies the constraints
projOk, q_projected, error = hpp.problem.applyConstraints (q_current)
if projOk:
optOk, q_estimated, error = hpp.problem.optimize (q_projected)
if not optOk:
from numpy.linalg import norm
errNorm = norm(error)
if errNorm > 1e-2:
rospy.logwarn_throttle (1 ,"Optimisation failed ? error norm: {0}".format(errNorm))
rospy.logdebug_throttle (1 ,"estimated == projected: {0}".format(q_projected==q_estimated))
else:
rospy.loginfo_throttle (1 ,"Optimisation failed ? error norm: {0}".format(errNorm))
rospy.logdebug_throttle (1 ,"Error {0}".format(error))
valid, msg = hpp.robot.isConfigValid (q_estimated)
if not valid:
rospy.logwarn_throttle (1, "Estimation in collision: {0}".format(msg))
self.publishers["estimation"]["semantic"].publish (q_estimated)
self.publish_state (hpp)
else:
hpp.robot.setCurrentConfig (q_current)
q_estimated = q_current
rospy.logwarn_throttle (1, "Could not apply the constraints {0}".format(error))
except Exception as e:
rospy.logerr_throttle (1, str(e))
rospy.logerr_throttle (1, traceback.format_exc())
finally:
self.last_stamp_is_ready = False
self.mutex.release()
## Publish tranforms to tf
# By default, only the child joints of universe are published.
def publish_state (self, hpp):
robot_name = hpp.robot.getRobotName()
if not hasattr(self, 'universe_child_joint_names'):
self.universe_child_joint_names = [ jn for jn in hpp.robot.getJointNames() if "universe" == hpp.robot.getParentJointName(jn) ]
rospy.loginfo("Will publish joints {0}".format(self.universe_child_joint_names))
for jn in self.universe_child_joint_names:
links = hpp.robot.getLinkNames(jn)
for l in links:
T = hpp.robot.getLinkPosition (l)
if l.startswith(robot_name):
name = l[len(robot_name)+1:]
else:
name = l
self.tf_pub.sendTransform (T[0:3], T[3:7], self.last_stamp, name, self.tf_root)
# Publish the robot link as estimated.
robot_joints = filter(lambda x: x.startswith(robot_name), hpp.robot.getAllJointNames())
for jn in robot_joints:
links = hpp.robot.getLinkNames(jn)
for name in links:
T = hpp.robot.getLinkPosition (name)
self.tf_pub.sendTransform (T[0:3], T[3:7], self.last_stamp, name, self.tf_root)
def _initialize_constraints (self, q_current):
from CORBA import UserException
hpp = self.hpp()
hpp.problem.resetConstraints()
if hasattr(self, "manip"): # hpp-manipulation:
# Guess current state
# TODO Add a topic that provides to this node the expected current state (from planning)
manip = self.manip ()
try:
state_id = manip.graph.getNode (q_current)
rospy.loginfo_throttle(1, "At {0}, current state: {1}".format(self.last_stamp, state_id))
except UserException:
if hasattr(self, "last_state_id"): # hpp-manipulation:
state_id = self.last_state_id
rospy.logwarn_throttle(1, "At {0}, assumed last state: {1}".format(self.last_stamp, state_id))
else:
state_id = rospy.get_param ("~default_state_id")
rospy.logwarn_throttle(1, "At {0}, assumed default current state: {1}".format(self.last_stamp, state_id))
self.last_state_id = state_id
self.publishers["estimation"]["state_id"].publish (state_id)
# copy constraint from state
manip.problem.setConstraints (state_id, True)
hpp.problem.addLockedJointConstraints("unused", self.locked_joints)
else:
# hpp-corbaserver: setNumericalConstraints
default_constraints = rospy.get_param ("~default_constraints")
hpp.problem.addLockedJointConstraints("unused", self.locked_joints)
hpp.problem.addNumericalConstraints ("constraints",
default_constraints,
[ 0 for _ in default_constraints ])
# TODO we should solve the constraints, then add the cost and optimize.
if len(self.last_visual_tag_constraints) > 0:
rospy.loginfo_throttle(1, "Adding {0}".format(self.last_visual_tag_constraints))
hpp.problem.addNumericalConstraints ("unused", self.last_visual_tag_constraints,
[ 1 for _ in self.last_visual_tag_constraints ])
hpp.problem.setNumericalConstraintsLastPriorityOptional (True)
def get_joint_state (self, js_msg):
from CORBA import UserException
self.mutex.acquire()
try:
hpp = self.hpp()
robot_name = hpp.robot.getRobotName()
if len(robot_name) > 0: robot_name = robot_name + "/"
for jn, q in zip(js_msg.name, js_msg.position):
name = robot_name + jn
jt = hpp.robot.getJointType(name)
if jt.startswith("JointModelRUB"):
assert hpp.robot.getJointConfigSize(name) == 2, name + " is not of size 2"
qjoint = [cos(q), sin(q)]
else:
assert hpp.robot.getJointConfigSize(name) == 1, name + " is not of size 1"
# Check joint bounds
bounds = hpp.robot.getJointBounds(name)
if q-bounds[0] < -1e-3 or q-bounds[1] > 1e-3:
rospy.logwarn_throttle(1, "Current state {1} of joint {0} out of bounds {2}"
.format(name, q, bounds))
qjoint = [min(bounds[1],max(bounds[0],q)),]
hpp.problem.createLockedJoint ('lock_' + name, name, qjoint)
if len(self.locked_joints) == 0:
self.locked_joints = tuple(['lock_'+robot_name+n for n in js_msg.name])
except UserException as e:
rospy.logerr ("Cannot get joint state: {0}".format(e))
finally:
self.mutex.release()
def _get_transformation_constraint (self,
joint1, joint2, transform,
prefix = "", orientationWeight = 1.):
hpp = self.hpp()
# Create a relative transformation constraint
j1 = joint1
j2 = joint2
name = prefix + j1 + "_" + j2
T = [ transform.translation.x,
transform.translation.y,
transform.translation.z,
transform.rotation.x,
transform.rotation.y,
transform.rotation.z,
transform.rotation.w,]
if orientationWeight == 1.:
names = ["T_"+name, ]
hpp.problem.createTransformationConstraint (names[0], j1, j2, T, [True,]*6)
else:
from hpp import Quaternion
names = ["P_"+name, "sO_"+name]
hpp.problem.createPositionConstraint (names[0], j1, j2, T[:3], [0,0,0], [True,]*3)
hpp.problem.createOrientationConstraint ("O_"+name, j1, j2, Quaternion(T[3:]).inv().toTuple(), [True,]*3)
hpp.problem.scCreateScalarMultiply (names[1], orientationWeight, "O_"+name)
return names
def get_visual_tag (self, tsmsg):
stamp = tsmsg.header.stamp
if stamp < self.current_stamp: return
rot = tsmsg.transform.rotation
# Compute scalar product between Z axis of camera and of tag.
# TODO Add a weight between translation and orientation
# It should depend on:
| |
<reponame>KevinVolkSTScI/wfss_overlap<filename>wfss_overlap_tool.py
#! /usr/bin/env python
#
"""
Tool to access the possibility of overlap of spectra for a field of view.
This code takes as input either a pair or Mirage source input files (one for
stars and one for galaxies) or a scene image made from such input files and
allows the user to investigate the resulting scene at arbitrary rotations
to determine if spectral order overlap will be an issue for any given source
in the field.
"""
import sys
import os
import tkinter as Tk
import tkinter.ttk
import tkinter.messagebox
import tkinter.filedialog
from tkinter.scrolledtext import ScrolledText
import numpy
from astropy.io import fits
import fits_image_display
import general_utilities
import scene_image
import wfss_scene
BGCOL = '#F8F8FF'
FILTERNAMES = ['F090W', 'F115W', 'F140M', 'F150W', 'F158M', 'F200W']
GRISMNAMES = ['GR150R', 'GR150C']
class WFSSOverlap(Tk.Frame):
"""
This class brings up the WFSS Overlap Tool window.
Parameters
----------
Tk.Frame: The base class of the object, matching a Tkinter root or
Toplevel variable
Returns
-------
The class variable is returned, effectively.
"""
def __init__(self, parent=None, **args):
self.scene_image = None
self.convolved_image = None
self.filtername = 'F090W'
self.grismname = 'GR150R'
self.position = None
self.imagewin = None
self.last_type = None
self.siaf = True
self.imagefilename = None
self.indpi = None
for loop in range(len(sys.argv)):
if '--simple' in sys.argv[loop]:
self.siaf = False
if parent is not None:
# initialize the window and make the plot area.
Tk.Frame.__init__(self, parent, args)
self.root = parent
self.make_wfss_window()
def make_wfss_window(self):
"""
Make the main control window.
Parameters
----------
No parameters.
Returns
-------
None
"""
if self.root is not None:
wfsswindow = self.root
else:
wfsswindow = Tk.Toplevel()
wfsswindow.title("WFSS Overlap Tool")
wfsswindow.config(bg=BGCOL)
frame1 = Tk.Frame(wfsswindow)
frame1.pack(side=Tk.TOP)
self.message_area = ScrolledText(
frame1, height=5, width=100, bd=1, relief=Tk.RIDGE, wrap=Tk.NONE)
self.message_area.pack(side=Tk.TOP)
filterframe = Tk.Frame(frame1)
filterframe.pack(side=Tk.TOP)
self.filtervar = Tk.IntVar()
for loop in range(len(FILTERNAMES)):
rb1 = Tk.Radiobutton(filterframe, text=FILTERNAMES[loop],
variable=self.filtervar, value=loop,
command=self.set_filter)
rb1.pack(side=Tk.LEFT)
self.filtervar.set(0)
self.filtername = FILTERNAMES[0]
grismframe = Tk.Frame(frame1)
grismframe.pack(side=Tk.TOP)
self.grismvar = Tk.IntVar()
for loop in range(len(GRISMNAMES)):
rb1 = Tk.Radiobutton(grismframe, text=GRISMNAMES[loop],
variable=self.grismvar, value=loop,
command=self.set_grism)
rb1.pack(side=Tk.LEFT)
self.grismvar.set(0)
self.grismname = GRISMNAMES[0]
fields = Tk.Frame(frame1)
fields.pack()
lab1 = Tk.Label(fields, text='Background (ADU/s):')
lab1.grid(row=0, column=0)
self.background_entry = Tk.Entry(fields, width=30)
self.background_entry.grid(row=0, column=1)
self.background_entry.insert(0, '0.1')
lab1 = Tk.Label(fields, text='Point Source File Name:')
lab1.grid(row=1, column=0)
self.star_entry = Tk.Entry(fields, width=30)
self.star_entry.grid(row=1, column=1)
lab1 = Tk.Label(fields, text='Extended Source File Name:')
lab1.grid(row=2, column=0)
self.galaxy_entry = Tk.Entry(fields, width=30)
self.galaxy_entry.grid(row=2, column=1)
lab1 = Tk.Label(fields, text='File Path:')
lab1.grid(row=3, column=0)
self.path_entry = Tk.Entry(fields, width=30)
self.path_entry.grid(row=3, column=1)
wd = os.getcwd()
if len(wd) == 0:
wd = './'
if wd[-1] != '/':
wd = wd+'/'
self.path_entry.insert(0, wd)
self.filepath1 = Tk.Button(
fields, text='Select File',
command=lambda: self.select_directory(
self.star_entry, self.path_entry))
self.filepath1.grid(row=1, column=2)
self.filepath2 = Tk.Button(
fields, text='Select File',
command=lambda: self.select_directory(
self.galaxy_entry, None))
self.filepath2.grid(row=2, column=2)
self.filepath3 = Tk.Button(
fields, text='Select Directory',
command=lambda: self.select_directory(None, self.path_entry))
self.filepath3.grid(row=3, column=2)
self.filepath4 = Tk.Button(
fields, text='Select Directory',
command=lambda: self.select_directory(None, self.psf_path_entry))
self.filepath4.grid(row=4, column=2)
lab1 = Tk.Label(fields, text='WFSS PSF Path:')
lab1.grid(row=4, column=0)
self.psf_path_entry = Tk.Entry(fields, width=30)
self.psf_path_entry.grid(row=4, column=1)
wd = os.getcwd()
if len(wd) == 0:
wd = './'
if wd[-1] != '/':
wd = wd+'/'
self.psf_path_entry.insert(0, wd)
lab1 = Tk.Label(fields, text='Sky Position (degrees):')
lab1.grid(row=5, column=0)
self.position_entry = Tk.Entry(fields, width=30)
self.position_entry.grid(row=5, column=1)
buttonframe = Tk.Frame(frame1)
buttonframe.pack()
b1 = Tk.Button(
buttonframe, text='Compute Scene', command=self.make_scene)
b1.pack(side=Tk.LEFT)
b1 = Tk.Button(
buttonframe, text='Save Scene Image', command=self.save_scene)
b1.pack(side=Tk.LEFT)
b1 = Tk.Button(
buttonframe, text='Read Scene Image', command=self.load_scene)
b1.pack(side=Tk.LEFT)
b1 = Tk.Button(
buttonframe, text='Display Scene Image', command=self.display_scene)
b1.pack(side=Tk.LEFT)
f1 = Tk.Frame(frame1)
f1.pack()
typeframe = Tk.Frame(f1)
typeframe.pack(side=Tk.LEFT)
lab1 = Tk.Label(typeframe, text='Display type:')
lab1.pack(side=Tk.LEFT)
tframe = Tk.Frame(typeframe)
tframe.pack(side=Tk.LEFT)
self.typevar = Tk.IntVar()
rb1 = Tk.Radiobutton(tframe, text='Scene', variable=self.typevar,
value=0, command=lambda: self.redisplay(None))
rb1.pack(side=Tk.LEFT)
rb1 = Tk.Radiobutton(tframe, text='POM', variable=self.typevar,
value=1, command=lambda: self.redisplay(None))
rb1.pack(side=Tk.LEFT)
rb1 = Tk.Radiobutton(tframe, text='Image Area', variable=self.typevar,
value=2, command=lambda: self.redisplay(None))
rb1.pack(side=Tk.LEFT)
self.typevar.set(2)
showframe = Tk.Frame(f1)
showframe.pack(side=Tk.LEFT)
lab1 = Tk.Label(showframe, text=' Image type:')
lab1.pack(side=Tk.LEFT)
tframe = Tk.Frame(showframe)
tframe.pack()
self.imagevar = Tk.IntVar()
rb1 = Tk.Radiobutton(tframe, text='Direct', variable=self.imagevar,
value=0, command=lambda: self.redisplay(None))
rb1.pack(side=Tk.LEFT)
rb1 = Tk.Radiobutton(tframe, text='Dispersed', variable=self.imagevar,
value=1, command=lambda: self.redisplay(None))
rb1.pack(side=Tk.LEFT)
rb1 = Tk.Radiobutton(tframe, text='Unconvolved',
variable=self.imagevar,
value=2, command=lambda: self.redisplay(None))
rb1.pack(side=Tk.LEFT)
self.imagevar.set(1)
anglefield = Tk.Frame(frame1)
anglefield.pack()
self.anglevar = Tk.DoubleVar()
self.angle_slider = Tk.Scale(anglefield, orient=Tk.HORIZONTAL,
length = 720, from_=0., to=360.,
resolution=0.01,
label='Rotation Angle (degrees)',
variable=self.anglevar)
self.angle_slider.bind("<ButtonRelease-1>", self.apply_angle)
self.angle_slider.bind("<KeyPress>", self.apply_angle)
self.angle_slider.pack()
stepframe = Tk.Frame(frame1)
stepframe.pack()
# lab1 = Tk.Label(stepframe, text='Step Mode')
# lab1.pack(side=Tk.LEFT)
self.step_button = Tk.Button(
stepframe, text='Step +', command=lambda: self.run_step(True))
self.step_button.pack(side=Tk.LEFT)
self.step_button = Tk.Button(
stepframe, text='Step -', command=lambda: self.run_step(False))
self.step_button.pack(side=Tk.LEFT)
lab1 = Tk.Label(stepframe, text='Minimum')
lab1.pack(side=Tk.LEFT)
self.min_angle_entry = Tk.Entry(stepframe, width=7)
self.min_angle_entry.pack(side=Tk.LEFT)
self.min_angle_entry.insert(0, '0.0')
lab1 = Tk.Label(stepframe, text='Maximum')
lab1.pack(side=Tk.LEFT)
self.max_angle_entry = Tk.Entry(stepframe, width=7)
self.max_angle_entry.pack(side=Tk.LEFT)
self.max_angle_entry.insert(0, '360.0')
lab1 = Tk.Label(stepframe, text='Step')
lab1.pack(side=Tk.LEFT)
self.angle_step_entry = Tk.Entry(stepframe, width=7)
self.angle_step_entry.pack(side=Tk.LEFT)
self.angle_step_entry.insert(0, '0.5')
# b1 = Tk.Button(
# stepframe, text='Movie', command=self.run_movie)
# b1.pack(side=Tk.LEFT)
self.step_button.pack(side=Tk.LEFT)
buttonframe = Tk.Frame(frame1)
buttonframe.pack()
b1 = Tk.Button(buttonframe, text='Close', command=self.root.destroy)
b1.pack(side=Tk.LEFT)
def set_filter(self):
"""
Set the filter name according to the radio button selection.
Parameters
----------
No parameters
Returns
-------
None
"""
var = self.filtervar.get()
self.filtername = FILTERNAMES[var]
def set_grism(self):
"""
Set the grism name according to the radio button selection.
Parameters
----------
No parameters
Returns
-------
None
"""
var = self.grismvar.get()
self.grismname = GRISMNAMES[var]
def apply_angle(self, event):
"""
Apply a selected rotation angle value from the slider.
Parameters
----------
event : tkinter event variable
Returns
-------
None.
"""
self.redisplay(None)
general_utilities.put_message(
self.message_area,
'Angle set to: %f degrees.\n' % (self.anglevar.get()))
def redisplay(self, event):
"""
Redisplay the image in the window, possibly due to an event.
Parameters
----------
event : nominally a tkinter event variable, but can be None as well.
Returns
-------
None.
"""
image_option = self.imagevar.get()
if self.imagewin is None:
pass
else:
option = False
angle = self.anglevar.get()
display_option = self.typevar.get()
image_option = self.imagevar.get()
total_option = image_option*10+display_option
rotated_image = scene_image.rotate_image(self.scene_image, angle)
self.generate_image(rotated_image)
if (self.last_type is None) or (total_option != self.last_type):
option = True
self.last_type = total_option
self.imagewin.displayImage(getrange=option, angle=angle)
def make_scene(self):
"""
Create the scene image.
Parameters
----------
None
Returns
-------
None
"""
try:
background = float(self.background_entry.get())
background = max(background, 0.)
general_utilities.put_message(
self.message_area,
'Background level set to: %f ADU/s.\n' % (background))
starname = self.star_entry.get()
galaxies_image = None
extname = self.galaxy_entry.get()
position_str = self.position_entry.get()
psf_path = self.psf_path_entry.get()
if len(psf_path) == 0:
psf_path = './'
if psf_path[-1] != '/':
psf_path = psf_path+'/'
if ',' in position_str:
position = position_str.split(',')
else:
position_str = position_str.lstrip(' ')
position_str = position_str.rstrip(' ')
position = position_str.split(' ')
newpos = []
for loop in range(len(position)):
try:
p1 = float(position[loop])
newpos.append(p1)
except:
pass
if len(newpos) >= 2:
position = [newpos[0], newpos[1]]
position = numpy.asarray(position)
else:
general_utilities.put_message(
self.message_area,
'Position string not properly defined, will use mean of star positions.\n')
position = None
path = self.path_entry.get()
if len(path) == 0:
path = './'
if path[-1] != '/':
path = path+'/'
if position is None:
position = self.mean_position(path+starname)
outstr = '%15.8f %15.8f' % (position[0], position[1])
general_utilities.put_value(outstr, self.position_entry)
simple = not self.siaf
stars_image, star_list = scene_image.make_star_image(
path+starname, position, self.filtername, path=psf_path,
simple=simple)
if not stars_image is None:
general_utilities.put_message(
self.message_area,
'Have made star scene image from file %s.\n' % (starname))
else:
general_utilities.put_message(
self.message_area,
'Error in making the star scene image from file %s.\n' % (
starname))
return
try:
galaxies_image = scene_image.make_galaxy_image(
path+extname, position, self.filtername, path=psf_path,
simple=simple)
except:
galaxies_image = None
if not galaxies_image is None:
general_utilities.put_message(
self.message_area,
'Have made galaxies scene image from file %s.\n' % (extname))
stars_image = stars_image+galaxies_image
stars_image = stars_image+background
self.scene_image = stars_image
except:
general_utilities.put_message(
self.message_area,
'An error encountered making the scene image.\n')
def save_scene(self):
"""
Save the current scene image to a FITS file.
Parameters
----------
None
Returns
-------
None
"""
try:
outfile = tkinter.filedialog.asksaveasfilename(
filetypes=[('FITS', '*.fits')], title='Output FITS File Name')
if (isinstance(outfile, type('string'))) and (len(outfile) > 0):
hdu = fits.PrimaryHDU(self.scene_image)
hdu.writeto(outfile)
values = outfile.split('/')
filename = values[-1]
general_utilities.put_message(
self.message_area,
'Have saved the current scene image to: %s.\n' % (filename))
except:
pass
def load_scene(self):
"""
Load a scene image from a FITS file.
Parameters
----------
None
Returns
-------
None
"""
try:
infile = tkinter.filedialog.askopenfilename(
filetypes=[('FITS', '*.fits')], title='Output FITS File Name')
if (isinstance(infile, type('string'))) and (len(infile) > 0):
try:
newimage = fits.getdata(infile)
except:
try:
newimage = fits.getdata(infile, ext=1)
except:
newimage = None
else:
newimage = None
dims = newimage.shape
if (dims[0] == 3631) and (dims[1] == 3631) and (len(dims) == 2):
self.scene_image = newimage
general_utilities.put_message(
self.message_area,
'Have read the image from: %s.\n' % (infile))
else:
general_utilities.put_message(
self.message_area,
'Could not read | |
from __future__ import print_function
from .backtrack import mfe, boltzmann_sample, enumerative_backtrack
from .parameters import get_params
from .util.wrapped_array import WrappedArray, initialize_matrix
from .util.secstruct_util import *
from .util.output_util import _show_results, _show_matrices
from .util.sequence_util import initialize_sequence_and_ligated, initialize_all_ligated, get_num_strand_connections
from .util.constants import KT_IN_KCAL
from .util.assert_equal import assert_equal
from .derivatives import _get_log_derivs
from math import log, exp
##################################################################################################
def partition( sequences, circle = False, params = '', mfe = False, calc_bpp = False,
n_stochastic = 0, do_enumeration = False, structure = None, force_base_pairs = None, no_coax = False,
verbose = False, suppress_all_output = False,
deriv_params = None,
calc_Kd_deriv_DP = False, use_simple_recursions = False, deriv_check = False ):
'''
Wrapper function into Partition() class
Returns Partition object p which holds results like:
p.Z = final partition function (where unfolded state has unity weight)
p.bpp = matrix of base pair probabilities (if requested by user with calc_bpp = True)
p.struct_MFE = minimum free energy secondary structure in dot-parens notation
p.bps_MFE = minimum free energy secondary structure as sorted list of base pairs
p.dZ_dKd_DP = derivative of Z w.r.t. Kd computed in-line with dynamic programming (if requested by user with calc_Kd_deriv_DP = True)
'''
if isinstance(params,str): params = get_params( params, suppress_all_output )
if no_coax: params.K_coax = 0.0
p = Partition( sequences, params )
p.calc_all_elements = calc_bpp or (deriv_params != None)
p.use_simple_recursions = use_simple_recursions
p.circle = circle
p.options.calc_deriv_DP = calc_Kd_deriv_DP
p.structure = get_structure_string( structure )
p.force_base_pairs = get_structure_string( force_base_pairs )
p.suppress_all_output = suppress_all_output
p.deriv_params = deriv_params
p.deriv_check = deriv_check
p.run()
if calc_bpp: p.get_bpp_matrix()
if mfe: p.calc_mfe()
if n_stochastic > 0: p.stochastic_backtrack( n_stochastic )
if do_enumeration: p.enumerative_backtrack()
if verbose: p.show_matrices()
if not suppress_all_output: p.show_results()
p.run_cross_checks()
return p
##################################################################################################
class Partition:
'''
Statistical mechanical model for RNA folding, testing a bunch of extensions and with lots of cross-checks.
(C) <NAME>, Stanford University, 2018
'''
def __init__( self, sequences, params ):
'''
Required user input.
sequences = string with sequence, or array of strings (sequences of interacting strands)
params = AlphaFoldParams object
'''
self.sequences = sequences
self.params = params
self.circle = False # user can update later --> circularize sequence
self.use_simple_recursions = False
self.calc_all_elements = False
self.calc_bpp = False
self.base_pair_types = params.base_pair_types
self.suppress_all_output = False
self.structure = None
self.force_base_pairs = None
self.deriv_params = None
self.options = PartitionOptions()
# for output:
self.Z = 0
self.dG = None
self.bpp = []
self.bps_MFE = []
self.struct_MFE = ''
self.struct_stochastic = []
self.struct_enumerate = []
self.log_derivs = []
self.derivs = []
return
##############################################################################################
def run( self ):
'''
Do the dynamic programming to fill partition function matrices
'''
initialize_sequence_information( self ) # N, sequence, ligated, all_ligated
initialize_dynamic_programming_matrices( self ) # ( Z_BP, C_eff, Z_linear, Z_cut, Z_coax, etc. )
initialize_force_base_pair( self )
# do the dynamic programming
for offset in range( 1, self.N ): #length of subfragment
for i in range( self.N ): #index of subfragment
if (not self.calc_all_elements) and ( i + offset ) >= self.N: continue
j = (i + offset) % self.N; # N cyclizes
for Z in self.Z_all: Z.update( self, i, j )
for i in range( self.N): self.Z_final.update( self, i )
self.log_derivs = self.get_log_derivs( self.deriv_params )
fill_in_outputs( self )
# boring member functions -- defined later.
def get_bpp_matrix( self ): _get_bpp_matrix( self ) # fill base pair probability matrix
def calc_mfe( self ): _calc_mfe( self )
def stochastic_backtrack( self, N ): _stochastic_backtrack( self, N )
def enumerative_backtrack( self ): _enumerative_backtrack( self )
def show_results( self ): _show_results( self )
def show_matrices( self ): _show_matrices( self )
def get_log_derivs( self, deriv_params ): return _get_log_derivs( self, deriv_params )
def run_cross_checks( self ): _run_cross_checks( self )
def num_strand_connections( self ): return get_num_strand_connections( self.sequences, self.circle)
##################################################################################################
def fill_in_outputs( self ):
self.Z = self.Z_final.val(0)
if self.Z > 0.0: self.dG = -KT_IN_KCAL * log( self.Z )
self.dZ_dKd_DP = self.Z_final.deriv(0)
self.derivs = []
if self.deriv_params:
for n,log_deriv in enumerate(self.log_derivs):
param_val = self.params.get_parameter_value( self.deriv_params[n] )
val = 0.0
if param_val != 0.0: val = log_deriv * self.Z /param_val
self.derivs.append( val )
def initialize_sequence_information( self ):
'''
Create sequence information from sequences of strands:
INPUT:
sequences = sequences of interacting strands (array of strings)
circle = user asks for nucleotides N and 1 to be ligated ('circularized') (bool)
OUTPUT:
sequence = concatenated sequence (string, length N)
is_ligated = is not a cut ('nick','chainbreak') (Array of bool, length N)
all_ligated = no cutpoint exists between i and j (N X N)
'''
# initialize sequence
self.sequence, self.ligated, self.sequences = initialize_sequence_and_ligated( self.sequences, self.circle, use_wrapped_array = self.use_simple_recursions )
self.N = len( self.sequence )
self.all_ligated = initialize_all_ligated( self.ligated )
##################################################################################################
class PartitionOptions:
def __init__( self ):
self.calc_deriv_DP = False
self.calc_contrib = False
##################################################################################################
def initialize_dynamic_programming_matrices( self ):
'''
A bunch of zero matrices. Only non-trivial thing is
initialization of (i,i) [diagonal]:
Z_BP(i,i) = 0
C_eff(i,i) = C_init (units of M)
Z_linear(i,i) = 1
And: the order of initialization in the list Z_all will
determine the actual order of updates during dynamic programmming at each (i,j).
'''
from .recursions.explicit_recursions import update_Z_BPq, update_Z_BP, update_Z_cut, update_Z_coax, update_C_eff_basic, update_C_eff_no_BP_singlet, update_C_eff_no_coax_singlet, update_C_eff, update_Z_final, update_Z_linear
from .recursions.explicit_dynamic_programming import DynamicProgrammingMatrix, DynamicProgrammingList
if self.use_simple_recursions: # over-ride with simpler recursions that are easier for user to input.
from .recursions.recursions import update_Z_BPq, update_Z_BP, update_Z_cut, update_Z_coax, update_C_eff_basic, update_C_eff_no_BP_singlet, update_C_eff_no_coax_singlet, update_C_eff, update_Z_final, update_Z_linear
from .recursions.dynamic_programming import DynamicProgrammingMatrix, DynamicProgrammingList
N = self.N
# Collection of all N X N dynamic programming matrices -- order in this list will
# determine order of updates.
self.Z_all = Z_all = []
# some preliminary helpers
self.Z_cut = DynamicProgrammingMatrix( N, DPlist = Z_all, update_func = update_Z_cut, options = self.options, name = 'Z_cut' );
# base pairs and co-axial stacks
self.Z_BPq = {}
for base_pair_type in self.base_pair_types:
# the bpt = base_pair_type holds the base_pair_type info in the lambda (Python FAQ)
update_func = lambda partition,i,j,bpt=base_pair_type: update_Z_BPq(partition,i,j,bpt)
self.Z_BPq[ base_pair_type ] = DynamicProgrammingMatrix( N, DPlist = Z_all,
update_func = update_func, options = self.options, name = 'Z_BPq_%s' % base_pair_type.get_tag() )
self.Z_BP = DynamicProgrammingMatrix( N, DPlist = Z_all, update_func = update_Z_BP, options = self.options, name = 'Z_BP' );
self.Z_coax = DynamicProgrammingMatrix( N, DPlist = Z_all, update_func = update_Z_coax, options = self.options, name = 'Z_coax' );
# C_eff makes use of information on Z_BP, so compute last
C_init = self.params.C_init
self.C_eff_basic = DynamicProgrammingMatrix( N, diag_val = C_init, DPlist = Z_all, update_func = update_C_eff_basic, options = self.options, name = 'C_eff_basic' );
self.C_eff_no_BP_singlet = DynamicProgrammingMatrix( N, diag_val = C_init, DPlist = Z_all, update_func = update_C_eff_no_BP_singlet, options = self.options, name = 'C_eff_basic_no_BP_singlet' );
self.C_eff_no_coax_singlet = DynamicProgrammingMatrix( N, diag_val = C_init, DPlist = Z_all, update_func = update_C_eff_no_coax_singlet, options = self.options, name = 'C_eff_basic_no_coax_singlet' );
self.C_eff = DynamicProgrammingMatrix( N, diag_val = C_init, DPlist = Z_all, update_func = update_C_eff, options = self.options, name = 'C_eff' );
self.Z_linear = DynamicProgrammingMatrix( N, diag_val = 1.0, DPlist = Z_all, update_func = update_Z_linear, options = self.options, name = 'Z_linear' );
# Last DP 1-D list (not a 2-D N x N matrix)
self.Z_final = DynamicProgrammingList( N, update_func = update_Z_final, options = self.options, name = 'Z_final' )
self.params.check_C_eff_stack()
##################################################################################################
def initialize_force_base_pair( self ):
self.allow_base_pair = None
self.in_forced_base_pair = None
if self.structure != None:
assert( self.force_base_pairs == None )
bp_list = bps_from_secstruct( self.structure )
elif self.force_base_pairs != None:
assert( self.structure == None )
bp_list = bps_from_secstruct( self.force_base_pairs )
else:
return
N = self.N
self.in_forced_base_pair = [False] * N
if self.use_simple_recursions: self.in_forced_base_pair = WrappedArray( N, False )
for i,j in bp_list:
self.in_forced_base_pair[ i ] = True
self.in_forced_base_pair[ j ] = True
self.Z_linear.set_val( i, i, 0.0 )
self.Z_linear.set_val( j, j, 0.0 )
self.C_eff.set_val( i, i, 0.0 )
self.C_eff.set_val( j, j, 0.0 )
if self.structure != None:
self.allow_base_pair = initialize_matrix( N, False )
for i,j in bp_list:
self.allow_base_pair[ i ][ j ] = True
self.allow_base_pair[ j ][ i ] = True
else:
assert( self.force_base_pairs != None )
# allow the specified base pairs (indeed, force them), but
# now disallow any base pairs that might cross with them.
self.allow_base_pair = initialize_matrix( N, True )
for i,j in bp_list:
# no crossing pairs
for m in range( i+1, j ):
for n in range( j+1, i+N ):
if | |
__author__ = 'abel'
import tempfile
import os
import numpy as np
def int_to_xyz(molecule, no_dummy=True):
internal = molecule.get_full_z_matrix()
coordinates = [[0.0, 0.0, 0.0]]
for line in internal[1:]:
bi = int(line[0]) #bond index
B = line[1] #bond value
ai = int(line[2]) #Angle index
A = line[3] #Angle value
ci = int(line[4]) #Dihedral index
C = line[5] #Dihedral value
bond = np.array(coordinates[ai-1]) - np.array(coordinates[bi-1])
if np.linalg.norm(bond) == 0:
bond = np.array([1, 0, 0])
bond2 = np.array(coordinates[ci-1]) - np.array(coordinates[ai-1])
if np.linalg.norm(bond2) == 0:
bond2 = np.array([0, 1, 0])
origin = bond/np.linalg.norm(bond)*B
ref2 = bond
ref3 = np.cross(bond, bond2)
# Check case of linear structure
if np.linalg.norm(ref3) == 0:
ref3 = [0.0, 0.0, 0.1]
inter = np.dot(rotation_matrix(ref3, np.deg2rad(A)), origin)
final = np.dot(rotation_matrix(ref2, np.deg2rad(C)), inter)
final = final + np.array(coordinates[bi-1])
coordinates.append(final)
coordinates = np.array(coordinates)
if no_dummy:
# mask = np.argwhere(molecule.get_atomic_elements_with_dummy()[:,0] == 'X')
mask = np.argwhere((molecule.get_atomic_elements_with_dummy()[:,0] == 'X') |
(molecule.get_atomic_elements_with_dummy()[:,0] == 'x')).flatten()
coordinates = np.delete(coordinates,mask,axis=0)
return np.array(coordinates, dtype=float)
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
if np.dot(axis, axis) == 0.0:
print 'Warning, reference rotation axis module is 0'
exit()
axis = np.asarray(axis)
theta = np.asarray(theta)
axis = axis/np.sqrt(np.dot(axis, axis))
a = np.cos(theta/2)
b, c, d = -axis*np.sin(theta/2)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
class Structure:
def __init__(self,
coordinates=None,
internal=None,
z_matrix=None,
int_label=None,
atom_types=None,
atomic_elements=None,
atomic_numbers=None,
connectivity=None,
file_name=None,
charge=0,
multiplicity=1,
#Buscar un lloc millor
int_weights=None):
self._coordinates = coordinates
self._internal = internal
self._z_matrix = z_matrix
self._int_label = int_label
self._atom_types = atom_types
self._atomic_numbers = atomic_numbers
self._connectivity = connectivity
self._atomic_elements = atomic_elements
self._charge = charge
self._multiplicity = multiplicity
self._file_name = file_name
self._int_weights = int_weights
self._atomic_masses = None
self._number_of_atoms = None
self._number_of_internal = None
self._energy = None
self._modes = None
self._full_z_matrix = None
def get_coordinates(self):
if self._coordinates is None:
self._coordinates = int_to_xyz(self)
return self._coordinates.copy()
def set_coordinates(self, coordinates):
self._coordinates = coordinates
self._number_of_atoms = None
self._energy = None
def get_internal(self):
if self._internal is None:
print('No internal coordinates available\n Load internal file')
exit()
return self._internal.copy()
def set_internal(self, internal):
self._internal = internal
self._energy = None
self._coordinates = int_to_xyz(self)
self._full_z_matrix = None
def get_full_z_matrix(self):
if self._full_z_matrix is None:
num_z_atoms = self.get_z_matrix().shape[0]
self._full_z_matrix = np.zeros((num_z_atoms,6))
for row, i in enumerate(self.get_z_matrix()[1:]):
for col, k in enumerate(i[0]):
try:
self._full_z_matrix[row+1, col] = float(k)
except ValueError:
self._full_z_matrix[row+1, col] = self.get_int_dict()[k]
return self._full_z_matrix
def get_z_matrix(self):
if self._z_matrix is None:
print('No Z-matrix available\n Load zmatrix file')
exit()
return self._z_matrix
def set_z_matrix(self, z_matrix):
self._z_matrix = z_matrix
def get_int_label(self):
return self._int_label
def set_int_label(self, int_label):
self._int_label = int_label
def get_int_dict(self):
self._internal_dict = {}
for i, label in enumerate(self.get_int_label()[:,0]):
self._internal_dict.update({label:self.get_internal()[i, 0]})
return self._internal_dict
def get_int_weights(self):
return self._int_weights
def set_int_weights(self, int_weights):
self._int_weights = int_weights
def get_atomic_elements_with_dummy(self):
# print([i for i in self._atomic_elements if i != "X"])
return self._atomic_elements
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, file_name):
self._file_name = file_name
@property
def charge(self):
return self._charge
@charge.setter
def charge(self, charge):
self._charge = charge
@property
def multiplicity(self):
return self._multiplicity
@multiplicity.setter
def multiplicity(self, multiplicity):
self._multiplicity = multiplicity
def get_atom_types(self):
if self._atom_types is None:
print('No atom types available')
exit()
return self._atom_types
def set_atom_types(self, atom_types):
self._atom_types = atom_types
def get_atomic_numbers(self):
if self._atomic_numbers is None:
self._atomic_numbers = np.array(range(1, self.get_number_of_atoms()+1))[None].T
return self._atomic_numbers
def set_atomic_numbers(self, atomic_numbers):
self._atomic_numbers = atomic_numbers
def get_atomic_elements(self):
if self._atomic_elements is None:
self._atomic_elements = np.array(atom_data)[[self.get_atomic_numbers()]][:,1:2]
return np.array([[i[0].upper()] for i in self._atomic_elements if i != "X"], dtype=str)
def set_atomic_elements(self, atomic_elements):
self._atomic_elements = atomic_elements
def get_connectivity(self):
if self._connectivity is None:
print('No atom connectivity available')
exit()
return self._connectivity
def set_connectivity(self, connectivity):
self._connectivity = connectivity
# Real methods
def get_number_of_atoms(self):
if self._number_of_atoms is None:
self._number_of_atoms = self.get_coordinates().shape[0]
return self._number_of_atoms
def get_number_of_internal(self):
if self._number_of_internal is None:
self._number_of_internal = self.get_internal().shape[0]
return self._number_of_internal
def get_energy(self, method=None):
if self._energy is None:
self._energy = method.single_point(self)
return self._energy
def get_modes(self, method=None):
if self._modes is None:
self._modes, self._energy = method.vibrations(self)
return self._modes
def get_atomic_masses(self):
if self._atomic_masses is None:
try:
masses_string = np.array(atom_data)[:,3:4][[np.where(np.array(atom_data)==element)[0][0] for element in self.get_atomic_elements()]]
self._atomic_masses = np.array(masses_string, dtype=float)
except TypeError:
print('Error reading element labels')
exit()
return self._atomic_masses
atom_data = [
[ 0, "X", "X", 0], # 0
[ 1, "H", "Hydrogen", 1.00794], # 1
[ 2, "He", "Helium", 4.002602], # 2
[ 3, "Li", "Lithium", 6.941], # 3
[ 4, "Be", "Beryllium", 9.012182], # 4
[ 5, "B", "Boron", 10.811], # 5
[ 6, "C", "Carbon", 12.0107], # 6
[ 7, "N", "Nitrogen", 14.0067], # 7
[ 8, "O", "Oxygen", 15.9994], # 8
[ 9, "F", "Fluorine", 18.9984032], # 9
[ 10, "Ne", "Neon", 20.1797], # 10
[ 11, "Na", "Sodium", 22.98976928], # 11
[ 12, "Mg", "Magnesium", 24.3050], # 12
[ 13, "Al", "Aluminium", 26.9815386], # 13
[ 14, "Si", "Silicon", 28.0855], # 14
[ 15, "P", "Phosphorus", 30.973762], # 15
[ 16, "S", "Sulfur", 32.065], # 16
[ 17, "Cl", "Chlorine", 35.453], # 17
[ 18, "Ar", "Argon", 39.948], # 18
[ 19, "K", "Potassium", 39.0983], # 19
[ 20, "Ca", "Calcium", 40.078], # 20
[ 21, "Sc", "Scandium", 44.955912], # 21
[ 22, "Ti", "Titanium", 47.867], # 22
[ 23, "V", "Vanadium", 50.9415], # 23
[ 24, "Cr", "Chromium", 51.9961], # 24
[ 25, "Mn", "Manganese", 54.938045], # 25
[ 26, "Fe", "Iron", 55.845], # 26
[ 27, "Co", "Cobalt", 58.933195], # 27
[ 28, "Ni", "Nickel", 58.6934], # 28
[ 29, "Cu", "Copper", 63.546], # 29
[ 30, "Zn", "Zinc", 65.38], # 30
[ 31, "Ga", "Gallium", 69.723], # 31
[ 32, "Ge", "Germanium", 72.64], # 32
[ 33, "As", "Arsenic", 74.92160], # 33
[ 34, "Se", "Selenium", 78.96], # 34
[ 35, "Br", "Bromine", 79.904], # 35
[ 36, "Kr", "Krypton", 83.798], # 36
[ 37, "Rb", "Rubidium", 85.4678], # 37
[ 38, "Sr", "Strontium", 87.62], # 38
[ 39, "Y", "Yttrium", 88.90585], # 39
[ 40, "Zr", "Zirconium", 91.224], # 40
[ 41, "Nb", "Niobium", 92.90638], # 41
[ 42, "Mo", "Molybdenum", 95.96], # 42
[ 43, "Tc", "Technetium", 0], # 43
[ 44, "Ru", "Ruthenium", 101.07], # 44
[ 45, "Rh", "Rhodium", 102.90550], # 45
[ 46, "Pd", "Palladium", 106.42], # 46
[ 47, "Ag", "Silver", 107.8682], # 47
[ 48, "Cd", "Cadmium", 112.411], # 48
[ 49, "In", "Indium", 114.818], # 49
[ 50, "Sn", "Tin", 118.710], # 50
[ 51, "Sb", "Antimony", 121.760], # 51
[ 52, "Te", "Tellurium", 127.60], # 52
[ 53, "I", "Iodine", 126.90447], # 53
[ 54, "Xe", "Xenon", 131.293], # 54
[ 55, "Cs", "Caesium", 132.9054519], # 55
[ 56, "Ba", "Barium", 137.327], # 56
[ 57, "La", "Lanthanum", 138.90547], # 57
[ 58, "Ce", "Cerium", 140.116], # 58
[ 59, "Pr", "Praseodymium", 140.90765], # 59
[ 60, "Nd", "Neodymium", 144.242], # 60
[ 61, "Pm", "Promethium", 0], # 61
[ 62, "Sm", "Samarium", 150.36], # 62
[ 63, "Eu", "Europium", 151.964], # 63
[ 64, "Gd", "Gadolinium", 157.25], # 64
[ 65, "Tb", "Terbium", 158.92535], # 65
[ 66, "Dy", "Dysprosium", 162.500], # 66
[ 67, "Ho", "Holmium", 164.93032], # 67
[ 68, "Er", "Erbium", 167.259], # 68
[ 69, "Tm", "Thulium", 168.93421], # 69
[ 70, "Yb", "Ytterbium", 173.054], # 70
[ 71, "Lu", "Lutetium", 174.9668], # 71
[ 72, "Hf", "Hafnium", 178.49], # 72
[ 73, "Ta", "Tantalum", 180.94788], # 73
[ 74, "W", "Tungsten", 183.84], # 74
[ 75, "Re", "Rhenium", 186.207], # 75
[ 76, "Os", "Osmium", 190.23], # 76
[ 77, "Ir", "Iridium", 192.217], # 77
[ 78, "Pt", "Platinum", 195.084], # 78
[ 79, "Au", "Gold", 196.966569], # 79
[ 80, "Hg", "Mercury", 200.59], # 80
[ 81, "Tl", "Thallium", 204.3833], # 81
[ 82, "Pb", "Lead", 207.2], # 82
[ 83, "Bi", "Bismuth", 208.98040], # 83
[ 84, "Po", "Polonium", 0], # 84
[ 85, "At", "Astatine", 0], # 85
[ 86, "Rn", "Radon", 0], # 86
[ 87, "Fr", "Francium", 0], # 87
[ 88, "Ra", "Radium", 0], # 88
| |
## Data processing
## Processing JSON file
import csv
import os
import collections
import json
import logging
from copy import deepcopy
import numpy as np
import torch
PAD = '<pad>'
UNK = '<unk>'
USR = 'YOU:'
SYS = 'THEM:'
BOD = '<d>'
EOD = '</d>'
BOS = '<s>'
EOS = '<eos>'
SEL = '<selection>'
#SPECIAL_TOKENS_DEAL = [PAD, UNK, USR, SYS, BOD, EOS]
#SPECIAL_TOKENS = [PAD, UNK, USR, SYS, BOS, BOD, EOS, EOD]
SPECIAL_TOKENS = [PAD, UNK, EOS, BOS]
STOP_TOKENS = [EOS, SEL]
DECODING_MASKED_TOKENS = [PAD, UNK, USR, SYS, BOD]
DATA_VER = 2
def normalize_slot_name(slot):
slot = slot.replace('At', ' at').replace('By', ' by') # Inform slot
slot = slot.replace('Addr', 'address').replace('Ref', 'reference') # Request slot
slot = slot.lower()
return slot
def normalize_inform_act(act):
domain=act.split('-')[0]
slot = act.split('-')[2]
if slot == 'price':
slot = 'pricerange'
elif slot == 'stay':
slot = 'book stay'
elif slot == "day":
slot = 'book day'
elif slot == "people":
slot = 'book people'
elif slot == "time":
slot = 'book time'
elif slot == "leave":
slot = "leave at"
elif slot == "arrive":
slot = "arrive by"
elif slot == "depart":
slot = "departure"
elif slot =="dest":
slot = "destination"
if act == 'train-inform-day':
slot = 'day'
return domain+'-inform-'+slot
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_len, label_id, output_ids, db_feat):
self.input_ids = input_ids
self.input_len = input_len
self.label_id = label_id
self.output_ids = output_ids # system
self.db_feat = db_feat
class Processor(object):
"""Processor for the belief tracking dataset"""
logger = logging.getLogger()
def __init__(self, config):
self.input_template = {'guid':'', 'prev_sys': '', 'user': '', 'sys': '', 'sys_delex':'',
'bs': {}, 'user_domain':{}, 'user_act': {},
'source':{}, 'kb':'', 'db':{}}
# Ontology for SUMBT
ontology = json.load(open(os.path.join(config.data_dir, "ontology.json"), "r"))
for slot in ontology.keys():
if not "none" in ontology[slot]:
ontology[slot].append("none")
ontology_act = json.load(open(os.path.join(config.data_dir, "ontology_act.json"), "r"))
ontology_request = json.load(open(os.path.join(config.data_dir, "ontology_req.json"), "r"))
# sorting the ontology according to the alphabetic order of the slots
self.ontology = collections.OrderedDict(sorted(ontology.items()))
self.target_slot = list(self.ontology.keys())
# Belief state
for slot in self.target_slot:
self.input_template['bs'][slot] = 'none'
# domain
# act (general, inform, request)
self.ontology_domain = []
self.ontology_act = []
self.act_idx=[0] #starting idx of general, inform, request
for domain_act in ontology_act:
domain = domain_act.split('-')[0]
if 'general' in domain:
self.ontology_act.append(domain_act)
self.input_template['user_act'][domain_act] = False # initial value is false
else:
if domain not in self.ontology_domain:
self.ontology_domain.append(domain)
self.input_template['user_domain'][domain] = False
self.act_idx.append(len(self.ontology_act))
for slot in ontology.keys():
domain = slot.split('-')[0]
slot = slot.split('-')[1]
act = domain+'-inform-'+slot
self.ontology_act.append(act)
self.input_template['user_act'][act] = False
self.act_idx.append(len(self.ontology_act))
for slot in ontology_request:
domain = slot.split('-')[0]
slot = slot.split('-')[1]
act = domain+'-request-'+slot
self.ontology_act.append(act)
self.input_template['user_act'][act] = False
# Policy and Decoder (For MultiWOZ)
self.bs_size = len(self.ontology)
self.db_size = config.db_size
self.max_vocab_size = config.dec_max_vocab_size
self.vocab = None
self.vocab_dict = None
self.unk_id = None
self.pad_id = None
self.dec_max_seq_len = config.dec_max_seq_len
self.load_dec_dict(config.data_dir)
def load_dec_dict(self, data_dir):
vocab_dir = os.path.join(data_dir, "dec_vocab.json")
if os.path.exists(vocab_dir):
self.vocab_dict = json.load(open(vocab_dir,'r'))
self.vocab = list(self.vocab_dict)
self.unk_id = self.vocab_dict[UNK]
self.pad_id = self.vocab_dict[PAD]
def get_train_examples(self, data_dir, accumulation=False):
"""See base class."""
created_dir = os.path.join(data_dir, "train_created.json")
original_dir = os.path.join(data_dir, "train.json")
if os.path.exists(created_dir):
data = json.load(open(created_dir, 'r'))
self.load_dec_dict(data_dir)
else:
data = self._create_examples(json.load(open(original_dir, 'r')), "train", accumulation)
json.dump(data, open(created_dir, 'w'), indent=4)
vocab_dir = os.path.join(data_dir, "dec_vocab.json")
json.dump(self.vocab_dict, open(vocab_dir, 'w'), indent=4)
return data
def get_dev_examples(self, data_dir, accumulation=False):
"""See base class."""
created_dir = os.path.join(data_dir, "val_created.json")
original_dir = os.path.join(data_dir, "val.json")
if os.path.exists(created_dir):
data = json.load(open(created_dir, 'r'))
else:
data = self._create_examples(json.load(open(original_dir, 'r')), "dev", accumulation)
json.dump(data, open(created_dir, 'w'), indent=4)
return data
def get_test_examples(self, data_dir, accumulation=False):
"""See base class."""
created_dir = os.path.join(data_dir, "test_created.json")
original_dir = os.path.join(data_dir, "test.json")
if os.path.exists(created_dir):
data = json.load(open(created_dir, 'r'))
else:
data = self._create_examples(json.load(open(original_dir, 'r')), "test", accumulation)
json.dump(data, open(created_dir, 'w'), indent=4)
return data
def get_labels(self):
""" return list of value numbers, number of slots (for request), number of general acts """
return [ self.ontology[slot] for slot in self.target_slot], self.ontology_domain, self.ontology_act
def _construct_vocab(self, vocab_count):
# Note: construct vocab using only system utterances
vocab_count = sorted(vocab_count.items(), key=(lambda x:x[1]), reverse = True)
raw_vocab_size = len(vocab_count)
keep_vocab_size = min(self.max_vocab_size, raw_vocab_size)
num_all_words = np.sum([c for t, c in vocab_count])
oov_rate = 100 - 100*np.sum([c for t, c in vocab_count[0:keep_vocab_size]]) / float(num_all_words)
self.logger.info('cut off at word {} with frequency={},\n'.format(vocab_count[keep_vocab_size - 1][0],
vocab_count[keep_vocab_size - 1][1]) +
'OOV rate = {:.2f}%'.format(oov_rate))
vocab_count = vocab_count[0:keep_vocab_size]
self.vocab = SPECIAL_TOKENS + [t for t, cnt in vocab_count if t not in SPECIAL_TOKENS]
self.vocab_dict = {t: idx for idx, t in enumerate(self.vocab)}
self.unk_id = self.vocab_dict[UNK]
self.pad_id = self.vocab_dict[PAD]
self.logger.info("Raw vocab size {} in train set and final vocab size {}".format(raw_vocab_size, len(self.vocab)))
def _sent2id(self, sent):
return [self.vocab_dict.get(t, self.unk_id) for t in sent]
def id2sent(self, id_list):
return [self.vocab[i] for i in id_list]
def _create_examples(self, dialogs, set_type, accumulation=False):
"""Creates examples and consturct decoder vocabulary"""
examples = []
req_err_list = {}
req_num = 0
req_err = 0
info_err_list = {}
info_num = 0
info_err = 0
num_turn = 0
no_act = 0
multi_domain = 0
vocab_count = {}
for did, dialog in dialogs.items():
prev_sys_text = ''
tid = 0
input = deepcopy(self.input_template)
# processed_goal = self._process_goal(dialog['goal']) #TODO: why goal processing is needed in LaRL?
for turn in dialog['log']:
input['guid'] = "%s-%s-%s" % (set_type, did, tid) # line[0]: dialogue index, line[1]: turn index
input['prev_sys'] = prev_sys_text
if len(turn['metadata']) > 0: # sys turn
next_sys_text = turn['text'].lower()
next_sys_delex_text = turn['text_delex'].lower()
input['sys'] = next_sys_text
input['sys_delex'] = next_sys_delex_text
# Consstruct decoder vocab
if set_type == 'train':
for word in next_sys_delex_text.split(): # TODO!!
if word in vocab_count:
vocab_count[word] += 1
else:
vocab_count[word] = 1
# extract slot-values in belief states
for domain, slot_value in turn['metadata'].items():
for slot, value in slot_value['semi'].items():
slot = normalize_slot_name(slot)
if value != '' and value != 'none':
domain_slot = domain.lower().strip() + '-' + slot.lower().strip()
input['bs'][domain_slot] = value
for slot, value in slot_value['book'].items():
if slot == 'booked':
continue
else:
slot = normalize_slot_name(slot)
domain_slot = domain.lower().strip() + '-book ' + slot.lower().strip()
input['bs'][domain_slot] = value
# extract db features
db_feat_temp = turn['db']
new_booking_db_feat = db_feat_temp[-3:]
db_feat_temp = db_feat_temp[:-3]
for item in new_booking_db_feat:
if item == 0:
db_feat_temp.extend([1.0, 0.0])
else:
db_feat_temp.extend([0.0, 1.0])
input['db'] = db_feat_temp # Number of found DB search results
assert len(db_feat_temp) == 62
if 'KB' in turn:
input['kb'] = turn['KB']
input['source'] = turn['source']
else:
#print('ERROR: %s' % input['guid'])
input['kb'] = 0
input['source'] = {}
if input['user'] != '':
tid += 1
examples.append(input)
input = deepcopy(self.input_template)
prev_sys_text = next_sys_text
else: # user turn
num_turn += 1
input['user'] = turn['text'].lower()
if len(turn['dialog_act']) == 0:
no_act += 1
# extract user dialog act, domain
for act, pairs in turn['dialog_act'].items():
act = act.lower()
domain = act.split('-')[0]
if domain in self.ontology_domain:
input['user_domain'][domain] = True
if act in self.ontology_act: # general
input['user_act'][act] = True
elif 'inform' in act:
for pair in pairs:
domain = act.split('-')[0].lower()
slot = normalize_slot_name(pair[0])
if slot != 'none':
domain_slot = domain+ '-inform-' + slot
domain_slot = normalize_inform_act(domain_slot)
if domain_slot in self.ontology_act: # for some typos
input['user_act'][domain_slot] = True
info_num += 1
else:
if domain_slot not in info_err_list:
info_err_list[domain_slot]=None
info_err += 1
elif 'request' in act:
for pair in pairs:
domain = act.split('-')[0].lower()
slot = normalize_slot_name(pair[0])
domain_slot = domain+ '-request-' + slot
if domain_slot in self.ontology_act: # for some typos
input['user_act'][domain_slot] = True
req_num += 1
else:
if domain_slot not in req_err_list:
req_err_list[domain_slot]=None
req_err += 1
if sum(input['user_domain'].values()) > 1:
multi_domain += 1
if set_type == 'train':
self._construct_vocab(vocab_count)
print("Inform numbers: %d, Errors(Inform slots not exist) : %d" % (info_num, info_err))
print("Request numbers: %d, Errors(request slots not exist) : %d" % (req_num, req_err))
print("User Act tagging error: total turns %d: no_act %d (%.3e) multi_domain %d (%.3e) "
% (num_turn, no_act, no_act/num_turn, multi_domain, multi_domain/num_turn))
return examples
def convert_examples_to_features(self, examples, labels, max_seq_length, tokenizer, max_turn_length):
"""Loads a data file into a list of `InputBatch`s."""
label_list, domain_list, act_list = labels
label_map = [{label: i for i, label in enumerate(labels)} for labels in label_list]
slot_dim = len(label_list)
domain_dim = len(domain_list)
act_dim = len(act_list)
padding_label = ([-1]*slot_dim, [-1]*domain_dim, [-1]*act_dim)
features = []
prev_dialogue_idx = None
all_padding = [0] * max_seq_length
all_padding_out = [0] * self.dec_max_seq_len
all_padding_len = [0, 0]
all_padding_db = [0] * self.db_size
max_turn = 0
for (_, example) in enumerate(examples):
if max_turn < | |
<filename>reflred/bruker.py
"""
Data loader for Bruker table-top X-ray source raw file format.
"""
import sys
import struct
import logging
import numpy
if sys.version_info[0] >= 3:
def tostr(s):
return s.decode('ascii')
else:
def tostr(s):
return s
MEAS_FLAG = {
0: 'unmeasured',
1: 'measured',
2: 'active',
3: 'aborted',
4: 'interrupted',
10: '.SIM file K.S.',
}
SCAN_TYPE = {
0: 'locked coupled', # twotheta, with theta = twotheta/2
1: 'unlocked coupled', # twotheta, with theta = twotheta/2 + initial offset
2: 'detector scan', # twotheta
3: 'rocking curve', # theta
4: 'chi scan', # sample tilt
5: 'phi scan', # sample rotation
6: 'x scan', # sample translation x
7: 'y scan', # sample translation y
8: 'z scan', # sample translation z
9: 'aux1 scan',
10: 'aux2 scan',
11: 'aux3 scan',
12: 'psi scan',
13: 'hkl scan',
14: 'rec. space scan',
20: 'unlocked coupled HR XRD',
129: 'PSD fixed scan',
130: 'PSD fast scan',
}
VARYING_BIT = (
'two_theta',
'theta',
'chi',
'phi',
'x',
'y',
'z',
'aux1',
'aux2',
'aux3',
'time',
'temp',
)
def _make_struct(fields, data, offset=0):
names = list(zip(*fields))[0]
types = "<"+"".join(dtype for _, dtype in fields)
values = struct.unpack_from(types, data, offset=offset)
new_values = []
for (_, t), v in zip(fields, values):
if t[-1] == 's':
try:
output = tostr(v).strip('\0')
except UnicodeDecodeError:
output = "ERROR: could not decode"
new_values.append(output)
else:
new_values.append(v)
offset += struct.calcsize(types)
return dict(zip(names, new_values)), offset
RAW_HEADER = (
('raw_id', '8s'), # 0
('meas_flag', 'i'), # 8 0: unmeasured, 1: measured, 2: active, 3: aborted, 4: interrupted
('no_of_tot_meas_ranges', 'i'), # 12
('date', '10s'), # 16 mm/dd/yy
('time', '10s'), # 26 hh:mm:ss
('user', '72s'), # 36
('site', '218s'), # 108
('samplename', '60s'), # 326
('comment', '160s'), # 386
('res0', '2s'), # 546 2 byte alignment
('goniom_model', 'i'), # 548
('goniom_stage', 'i'), # 552
('sample_changer', 'i'), # 556
('goniom_ctrl', 'i'), # 560
('goniom_radius', 'f'), # 564 diameter of the goniom (mm)
('fixed_inc_divsli', 'f'), # 568 diverg.slit fix incidence
('fixed_inc_samplesli', 'f'), # 572 sample slit fix inc.
('fixed_inc_soller', 'i'), # 576 sollerslit 0: None, 1: 2, 2: 4 deg
('fixed_inc_monochromator', 'i'), # 580
('fixed_dif_antisli', 'f'), # 584 anti slit diffracted site
('fixed_dif_detsli', 'f'), # 588 detecgtor slit diffr. site
('fixed_dif_2ndsoller', 'i'), # 592 soller slit 0: None, 1: 2, 2: 4 deg
('fixed_dif_thinfilm', 'i'), # 596 thin film 0: None, 1: 0.4, 2: 0.15, 3: 0.08 deg
('fixed_dif_betafilter', 'i'), # 600
('fixed_dif_analyzer', 'i'), # 604
('anode', '4s'), # 608 anode material
#('res1', '4s'), # 612 8 byte alignment
('actual_QUANT_offset', 'f'), # 612 the actual offset used by QUANT
('alpha_average', 'd'), # 624 (alpha_1+alpha_2)/2 Ang
('alpha_1', 'd'), # 632 alpha1 wavelength Ang
('alpha_2', 'd'), # 640 alpha2 wavelength Ang
('beta', 'd'), # 648
('alpha_21', 'd'), # 652 alpha_2/alpha_1
('wave_unit', '4s'), # 656 A or nm; not really used
('beta_rel_int', 'f'), # 660 relative int.of beta
('total_sample_run_time', 'f'),# 664
#('reserved', '36s'), # 668
('reserved', '32s'), # 668
('PSDopening', 'f'), # 700 angular opening in deg
('no_of_tot_meas_ranges_in_dq1', 'i'), # 704
('reserved_1', '1s'), # 708
('further_dql_reading', '1s'), # 709
('cQuantSequence', '1s'), # 710
('HWindicator', '1s'), # 711
)
RAW_RANGE_HEADER = (
('length_of_RAW_RANGE_HEADER', 'i'),# 0 header only, not optional parameters
('no_of_measured_data', 'i'), # 4
('theta_start', 'd'), # 8
('two_theta_start', 'd'), # 16
('chi_start', 'd'), # 24
('phi_start', 'd'), # 32
('x_start', 'd'), # 40
('y_start', 'd'), # 48
('z_start', 'd'), # 56
('divslit_start', 'd'), # 64 Div.slit start if divslit_code==fix
('divslit_code', '6s'), # 72 V20, V6, V4, Vxxx, unkn, or fix
('res2', '2s'), # 78
('antislit_start', 'd'), # 80 Anti.slit start if antislit_code==fix
('antislit_code', '6s'), # 88 V20, V6, V4, Vxxx, unkn, or fix
('res3', '2s'), # 94
('detector_1', 'i'), # 96
('det_HV_1', 'f'), # 100 high voltage
('det_AG_1', 'f'), # 104 amplifier gain
('det_LL_1', 'f'), # 108 lower level
('det_UL_1', 'f'), # 112 upper level
('detector_2', 'i'), # 116 measuring channel
('res4', '8s'), # 120
('det_LL_2', 'f'), # 128 Lower level in % of full scale
('det_UL_2', 'f'), # 132 Upper level in % of full scale
('detslit_code', '5s'), # 136 in, out, unkn
('res5', '3s'), # 141
('aux1_start', 'd'), # 144 start of auxil. axis 1 (up to now 0.0 )
('aux2_start', 'd'), # 152
('aux3_start', 'd'), # 160
('scan_mode', 'i'), # 168 0: step (SS), 1: continuous (SC)
('res6', '4s'), # 172
('increment_1', 'd'), # 176 increment of the scan
('increment_2', 'd'), # 184 unused, so 0.0
('step_time', 'f'), # 192
('scan_type', 'i'), # 196 SCAN_TYPE
('meas_delay_time', 'f'), # 200 delay time before measurement
('range_sample_started', 'f'), # 204 the time the range was started relative to the sample
('rot_speed', 'f'), # 208
('temperature', 'f'), # 212 temperature in K
('heating_cooling_rate', 'f'), # 216
('temp_delay_time', 'f'), # 220
('generator_voltage', 'i'), # 224 kV
('generator_current', 'i'), # 228 mA
('display_plane_number', 'i'), # 232 needed for 3d display
('res7', '4s'), # 236
('act_used_lambda', 'd'), # 240 actually selected wavelength
('varying_parameters', 'i'), # 248 VARYING_BIT
('data_record_length', 'i'), # 252 4 + 8 x num bits in varying_parameters
('total_size_of_extra_records', 'i'), # 256
('smoothing_width', 'f'), # 260
('sim_meas_cond', 'i'), # 264
('res8', '4s'), # 268
('increment_3', 'd'), # 272
('reserved', '24s'), # 280
)
EXTRA_RECORD = {
100: ( # OSC
('osci_circle', 'i'),
('res9', '4s'),
('osci_amplitude', 'd'),
('osci_speed', 'f'),
('reserved', '12s'),
),
110: ( # PSD
('act_two_theta', 'd'),
('first_unused_channel', 'i'),
('reserved', '20s'),
),
120: ( # OQM
('mode', 'i'),
('two_theta_actual', 'f'),
('time_actual', 'f'),
('counts', 'f'),
('compound', '32s'),
('peak_id', 'i'),
('reserved', '60s'),
),
150: ( # ORM
('first_two_theta', 'f'),
('last_two_theta', 'f'),
('reserved', '16s'),
),
190: ( # OLC
('two_theta_offset', 'f'),
('int_offset', 'f'),
('reserved', '16s'),
),
200: ( # AD
('act_two_theta', 'd'),
('tthbeg', 'f'),
('tthend', 'f'),
('chibeg', 'f'),
('chiend', 'f'),
('normal', 'i'),
('program', '20s'),
('reserved', '16s'),
),
300: ( # HRXRD
('szSubstrate', '40s'),
('sz_hkl', '9s'),
('sz_mno', '9s'),
('sz_pqr', '9s'),
('szDiffractionSetup', '1s'),
('fAnalyzerOffs', 'f'),
('iIncidenceFlag', 'i'),
('iAlignmentSet', 'i'),
('fLambdaTheory', 'd'),
('fLambdaDetermined', 'd'),
('iHRXRDScanType', 'i'),
('iMonochrAnalysComb', 'i'),
('fSim_FF', 'd'),
('fCoupleFactor', 'd'),
('fThetaBragg', 'd'),
('fTauBragg', 'd'),
('fTauMis', 'd'),
('fLattice', 'd'),
('fPsi', 'd'),
('fScanRelStart', 'd'),
('fScanRelStop', 'd'),
('fOmegaMiddle', 'd'),
('f2ThetaMiddle', 'd'),
('fThetaMiddle', 'd'),
('fPhiMiddle', 'd'),
('fChiMiddle', 'd'),
('fXMiddle', 'd'),
('fYMiddle', 'd'),
('fZMiddle', 'd'),
('fQParallelMiddle', 'd'),
('fQVerticalMiddle', 'd'),
('fHeidenhainPeak', 'd'),
('fXOriginal', 'd'),
('fYOriginal', 'd'),
('fZOriginal', 'd'),
('fOmegaOrigianl', 'd'),
('fOmege1Original', 'd'),
('fOmega2Original', 'd'),
('iSimLength', 'i'),
# needs simdat, simusubstrate, layer block header
),
10140: ( # OCM
('comment', '0s'),
# needs comment, which is total length of the record
),
10250: ( # REFSIM
('ABS_FLAG', '1s'),
('SIZ_FLAG', '1s'),
('BEAM_FLAG', '1s'),
('SWIDTH_FLAG', '1s'),
('SDIST_FLAG', '1s'),
('res1', '3s'),
('SamSize', 'f'),
('BeamSize', 'f'),
('slitwidth', 'f'),
('slitdist', 'f'),
('SimType', '1s'),
('res2', '3s'),
('abs_fac', 'f'),
('layers', 'h'),
('multis', 'h'),
('variable_data1', 'i'),
('variable_data2', 'i'),
# needs rlayer, mulit
),
}
def load(filename):
"""
Load Bruker X-ray data from *filename*.
"""
with open(filename, 'rb') as f:
data = f.read()
if data[:7] != b"RAW1.01":
raise ValueError("Could not load %r: not a Bruker XRD RAW file"%
filename)
return loads(data)
def loads(data):
"""
Load Bruker X-ray data from the byte stream contained in *data*.
"""
if data[:7] != b"RAW1.01":
raise ValueError("not a Bruker XRD RAW file")
# process the main header
offset = 0
header, offset = _make_struct(RAW_HEADER, data, offset)
#assert offset == 712
# for each range, read in the range
ranges = []
for _ in range(header['no_of_tot_meas_ranges']):
# range starts with the range header
rheader, offset = _make_struct(RAW_RANGE_HEADER, data, offset)
extra_length = rheader['total_size_of_extra_records']
# can have multiple extra records, so append them as they appear
rheader['extra'] = []
while extra_length > 0:
# find the extra record type, and use it to parse the extra data
rectype, reclen = struct.unpack_from('ii', data, offset=offset)
if rectype not in EXTRA_RECORD:
raise ValueError('unknown measurement type %d'%rectype)
rextra, _ = _make_struct(EXTRA_RECORD[rectype], data, offset+8)
# note: some extra records (e.g., HRXRD simulations) have variable
# data stored after the record. We're skipping this for now.
# plug record type and length into each record
rextra['record_type'] = rectype
rextra['record_length'] = reclen
# save the extra in the range header
rheader['extra'].append(rextra)
# move to the next extra header
offset += reclen
extra_length -= reclen
# | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""ResNe(X)t Head helper."""
import torch
import torch.nn as nn
from detectron2.layers import ROIAlign
class ResNetRoIHead(nn.Module):
"""
ResNe(X)t RoI head.
"""
def __init__(
self,
dim_in,
num_classes,
pool_size,
resolution,
scale_factor,
dropout_rate=0.0,
act_func="softmax",
aligned=True,
):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetRoIHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
resolution (list): the list of spatial output size from the ROIAlign.
scale_factor (list): the list of ratio to the input boxes by this
number.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
aligned (bool): if False, use the legacy implementation. If True,
align the results more perfectly.
Note:
Given a continuous coordinate c, its two neighboring pixel indices
(in our pixel model) are computed by floor (c - 0.5) and ceil
(c - 0.5). For example, c=1.3 has pixel neighbors with discrete
indices [0] and [1] (which are sampled from the underlying signal at
continuous coordinates 0.5 and 1.5). But the original roi_align
(aligned=False) does not subtract the 0.5 when computing neighboring
pixel indices and therefore it uses pixels with a slightly incorrect
alignment (relative to our pixel model) when performing bilinear
interpolation.
With `aligned=True`, we first appropriately scale the ROI and then
shift it by -0.5 prior to calling roi_align. This produces the
correct neighbors; It makes negligible differences to the model's
performance if ROIAlign is used together with conv layers.
"""
super(ResNetRoIHead, self).__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
temporal_pool = nn.AvgPool3d(
[pool_size[pathway][0], 1, 1], stride=1
)
self.add_module("s{}_tpool".format(pathway), temporal_pool)
roi_align = ROIAlign(
resolution[pathway],
spatial_scale=1.0 / scale_factor[pathway],
sampling_ratio=0,
aligned=aligned,
)
self.add_module("s{}_roi".format(pathway), roi_align)
spatial_pool = nn.MaxPool2d(resolution[pathway], stride=1)
self.add_module("s{}_spool".format(pathway), spatial_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=1)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(act_func)
)
def forward(self, inputs, bboxes):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
t_pool = getattr(self, "s{}_tpool".format(pathway))
out = t_pool(inputs[pathway])
assert out.shape[2] == 1
out = torch.squeeze(out, 2)
roi_align = getattr(self, "s{}_roi".format(pathway))
out = roi_align(out, bboxes)
s_pool = getattr(self, "s{}_spool".format(pathway))
pool_out.append(s_pool(out))
# B C H W.
x = torch.cat(pool_out, 1)
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
x = x.view(x.shape[0], -1)
x = self.projection(x)
x = self.act(x)
return x
class ResNetBasicHead(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(
self,
dim_in,
num_classes,
pool_size,
dropout_rate=0.0,
act_func="softmax",
):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(ResNetBasicHead, self).__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
if pool_size[pathway] is None:
avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
self.add_module("pathway{}_avgpool".format(pathway), avg_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(act_func)
)
def forward(self, inputs):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
m = getattr(self, "pathway{}_avgpool".format(pathway))
pool_out.append(m(inputs[pathway]))
x = torch.cat(pool_out, 1)
# (N, C, T, H, W) -> (N, T, H, W, C).
x = x.permute((0, 2, 3, 4, 1))
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
x = self.projection(x)
# no sigmoid of softmax needed for regression
x = x.view(x.shape[0], -1)
return x
class X3DHead(nn.Module):
"""
X3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(
self,
dim_in,
dim_inner,
dim_out,
num_classes,
pool_size,
dropout_rate=0.0,
act_func="softmax",
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
bn_lin5_on=False,
):
"""
The `__init__` method of any subclass should also contain these
arguments.
X3DHead takes a 5-dim feature tensor (BxCxTxHxW) as input.
Args:
dim_in (float): the channel dimension C of the input.
num_classes (int): the channel dimensions of the output.
pool_size (float): a single entry list of kernel size for
spatiotemporal pooling for the TxHxW dimensions.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
bn_lin5_on (bool): if True, perform normalization on the features
before the classifier.
"""
super(X3DHead, self).__init__()
self.pool_size = pool_size
self.dropout_rate = dropout_rate
self.num_classes = num_classes
self.act_func = act_func
self.eps = eps
self.bn_mmt = bn_mmt
self.inplace_relu = inplace_relu
self.bn_lin5_on = bn_lin5_on
self._construct_head(dim_in, dim_inner, dim_out, norm_module)
def _construct_head(self, dim_in, dim_inner, dim_out, norm_module):
self.conv_5 = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
padding=(0, 0, 0),
bias=False,
)
self.conv_5_bn = norm_module(
num_features=dim_inner, eps=self.eps, momentum=self.bn_mmt
)
self.conv_5_relu = nn.ReLU(self.inplace_relu)
if self.pool_size is None:
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = nn.AvgPool3d(self.pool_size, stride=1)
self.lin_5 = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=(1, 1, 1),
stride=(1, 1, 1),
padding=(0, 0, 0),
bias=False,
)
if self.bn_lin5_on:
self.lin_5_bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.lin_5_relu = nn.ReLU(self.inplace_relu)
if self.dropout_rate > 0.0:
self.dropout = nn.Dropout(self.dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(dim_out, self.num_classes, bias=True)
# Softmax for evaluation and testing.
if self.act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif self.act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(self.act_func)
)
def forward(self, inputs):
# In its current design the X3D head is only useable for a single
# pathway input.
assert len(inputs) == 1, "Input tensor does not contain 1 pathway"
x = self.conv_5(inputs[0])
x = self.conv_5_bn(x)
x = self.conv_5_relu(x)
x = | |
#!/usr/bin/env python
# sections marked by "FIGURE_START, shouldbeone" indicate that a new figure is going to be made.
r'''
COMPANION SCRIPT #1, TESTED ONLY ON PYTHON 2.7, FOR:
Mannige RV (2017) (article title TBD).
'''
output_fig_dir = '../manuscript/automated_figures/'
# This takes a LONG time to calculate (many
length_dependent_csv_file = "local_imports/data_length.csv"
master_ss_file = "local_imports/data_ss.csv"
fake_ss_file = "local_imports/data_fake_ss.csv"
type_to_label = {'L':'$L$ (residues)','phi':'$\phi (^\circ)$', 'psi':'$\psi (^\circ)$', 'omega':'$\omega$', 'rg':'$r_{\mathrm{g}}$','re':'$r_{\mathrm{e}}$',
'd':'$d$', 'd2':'$|d|$' ,'theta':r'$\theta (^\circ)$', 'R':'$\mathcal{R}$'} #, (\phi+\psi+2\pi)/4\pi$'}
type_to_label = {'L':'$L$ (residues)','phi':'$\phi$', 'psi':'$\psi$', 'omega':'$\omega$', 'rg':'$r_{\mathrm{g}}$','re':'$r_{\mathrm{e}}$',
'd':'$d$', 'd2':'$|d|$' ,'theta':r'$\theta$', 'R':'$\mathcal{R}$'} #, (\phi+\psi+2\pi)/4\pi$'}
show_graphs = 1 # just uses a simple command line that
pdf_viewer = "evince" # This is the viewer of choice for the author (based-Linux).
# GLOBAL IMPORTS:
import os, sys, copy, random, string
import matplotlib.pyplot as plt # For utilizing colormap interpolation
from matplotlib import lines
from matplotlib.colors import LogNorm
import numpy as np
import Bio.PDB # Biopython's PDB module
import pandas as pd
import matplotlib as mpl
from scipy import interpolate
import scipy.ndimage
import scipy.stats # for KDE
import urllib
if (sys.version_info > (3, 0)):
import urllib.request as urllib
# LOCAL IMPORTS
sys.path.insert(0, "./local_imports/") # for the local imports
import Geometry, PeptideBuilder, locallib
# -----------------------
# A PRETTY GRAPH IMPORT
seaborn_exists = 1
try:
import seaborn as sns
except:
seaborn_exists = 0
if seaborn_exists:
sns.set_style('ticks') # *
#sns.set_context("paper", font_scale=1.5)#, rc={"lines.linewidth": 4})
# -----------------------
# ===================================================================================
# SUPER IMPORTANT FOR ASTHETICS (call after every sns.style() function)
def set_grays():
black_color = np.ones(3)*.5 # Darker is closer to 0; lighter is closer to 1
text_color = np.ones(3)*.35 #
# CHECK ALL OTHER PARAMETERS: print plt.rcParams
#sns.set_style("whitegrid")
plt.rc_context({'axes.labelcolor':text_color,'axes.linewidth': 1.0,'legend.edgecolor': black_color,
'lines.color':black_color,'lines.linewidth':1.0,'text.color':text_color,'axes.edgecolor':black_color,
'xtick.color':text_color,'ytick.color':text_color})
#to retreive any of the parameter values, just do, e.g.:
#some_value = plt.rcParams['lines.color']
# Shows all other parameters to muck about with:
#print plt.rcParams
# For using latex commands in labels, etc.
#mpl.rc('text',usetex=True)
#mpl.rc('text.latex', preamble='\usepackage{color}')
#set_grays()
# ===================================================================================
import itertools
sns_palette = sns.color_palette() #"colorblind") # another possibility: sns.color_palette()
palette = itertools.cycle(sns_palette)
marker_shapes = itertools.cycle(['o','s', '^','p'])
# ===================================================================================
# SETTING UP A CUSTOM COLORMAP
#
COLORSWITCH = 0.5; bc = [1,1,1] # background (white)
import colorsys
r = [colorsys.rgb_to_hsv(1,0,0)[0],0.75,0.5]
y = [colorsys.rgb_to_hsv(1,1,0)[0],0.5,0.75]
c3 = colorsys.hsv_to_rgb(*r) # the '*' converts [a,b,c] into a,b,c
c4 = colorsys.hsv_to_rgb(*y)
# Now the color map dictionary
cdict = {
'red': ((0.00, c3[0], c3[0]), (COLORSWITCH, bc[0], bc[0]), (1.0, c4[0], c4[0])),
'green': ((0.00, c3[1], c3[1]), (COLORSWITCH, bc[1], bc[1]), (1.0, c4[1], c4[1])),
'blue': ((0.00, c3[2], c3[2]), (COLORSWITCH, bc[2], bc[2]), (1.0, c4[2], c4[2]))
}
ss_name_to_label = {'H':r'$\alpha$','E':r'$\beta$','G':r'$3_{10}$','P':r'$\mathrm{ppII}$'}
ss_name_to_label_long = {'H':r'$\alpha$ (H)','E':r'E ($\beta$)','G':r'G ($3_{10}$)','P':r'P ($\mathrm{pp2}$)'}
from matplotlib.colors import LinearSegmentedColormap # For making your own colormaps
cmap = LinearSegmentedColormap('chirality_r', cdict)
plt.register_cmap(cmap=cmap)
ss_codes = {}
ss_codes['dssp'] = {
'H':0, # (0) Alpha - helix
'G':1, # (1) 3.10 - helix
'I':2, # (2) Pi - helix
'T':3, # (3) hydrogen bonded turn
'S':4, # (4) bend
' ':5, # (5) coil
'B':6, # (6) residue in isolated beta-bridge
'E':7, # (7) extended strand, participates in beta ladder
'U':8 # (8) not found
}
#code taken from <NAME> and <NAME> (1999)
ss_codes['xtlsstr'] = {
'H': 0, # Alpha -helix
'G': 1, # 3.10 - helix
'g': 2, # 3.10 - helix C-cap
'T': 3, # turns (hydrogen-bonded)
'N': 4, # turns -'like' (unhydrogen-bonded)
'P': 5, # polyproline II type (3-1) helix
'p': 6, # polyproline II type (3-1) helix
'-': 7, # coils
'E': 8, # extended strand
'U': 9, # not found
}
# code taken from Cubellis et al., 2005
ss_codes['segno'] = {
'H':0,# Alpha -helix
'G':1,# 3.10 -helix
'I':2,# pi -helix
'P':3,# Polyproline II
'O':4,# coil
'E':5,# extended strand (in beta-sheet)
'b':6,# extended strand (alone)
'U':7,# not found
}
def do_d(X):
return X
#return np.abs(X)
def do_theta(X):
return X #-1.0*np.cos(np.radians(X)/2.0)
#return X
def prepare_master_ss_file():
## SS Database study
# Mansiaux, <NAME> (2011) Assignment of PolyProline II Conformation and Analysis
# of Sequence-Structure Relationship. PLoS One 6(3): e18401.
ss_database_filename = "local_imports/PPII/PPII_cullpdb_pc30_res2.5_R1.0_d090511_chains7172.db"
#obtained from: http://www.dsimb.inserm.fr/~debrevern/DOWN/DB/PPII/PPII_cullpdb_pc30_res2.5_R1.0_d090511_chains7172.db
#reported in: http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0018401
if not os.path.isfile(ss_database_filename):
#print "Local PPII database '%s' not available. Exiting." %(ss_database_filename)
url = 'http://www.dsimb.inserm.fr/~debrevern/DOWN/DB/PPII/PPII_cullpdb_pc30_res2.5_R1.0_d090511_chains7172.db'
urllib.urlretrieve(url, ss_database_filename)
#Download from:
#http://www.dsimb.inserm.fr/~debrevern/DOWN/DB/PPII/PPII_cullpdb_pc30_res2.5_R1.0_d090511_chains7172.db
# Ignore all lines starting with ">"
ppII_colnames = ['aminoacids', 'proteinblocks', 'dssp', 'xtlsstr', 'segno', 'pross',
'relacc','phi','psi','omega','calphax','calphay','calphaz','temp', 'u1', 'u2','u3'] # the last three are 'u' for unknown
ppII_df = pd.read_csv(ss_database_filename, sep='\s+', header=None, names=ppII_colnames, comment='>')
ppII_df = ppII_df[(np.abs(ppII_df['phi']) != 999)
& (np.abs(ppII_df['psi']) != 999)
& (np.abs(ppII_df['omega']) != 999)
& ( (np.abs(ppII_df['phi']) > 1) & (np.abs(ppII_df['phi']) > 1) ) # a weird space where phi=psi=0
]
#df = ppII_df[(ppII_df['phi'] > 0)]
#sns.jointplot(df['phi'],df['psi'], kind="hex", color="k"); plt.show();
#locallib.plot_ramachandran_histogram(df['phi'],df['psi'],levels=[0.9], fill=1,lines=0, show = 1,smooth=2)#!!!
#exit()
print "obtaining d,theta,R"
ds = []
thetas = []
Rs = []
total = len(ppII_df['phi'])
counter = 0
for phi,psi,omega in zip(ppII_df['phi'],ppII_df['psi'],ppII_df['omega']):
d = 999
theta = 999
R = 999
if phi and psi and omega:
if -180.0 < phi and phi < 180.0 and -180.0 < psi and psi < 180.0:
#h, theta, d = locallib.calculate_handedness_from_theory(phi, psi, omega)
d,theta,rs = locallib.calculate_d_theta_r(phi, psi, omega)
R = locallib.R(phi=phi, psi=psi)
ds.append(d)
thetas.append(theta)
Rs.append(R)
counter += 1
if counter % 1000 == 0:
sys.stdout.write("\r\tCompleted %d percent\t\t" %(int(100.*float(counter)/total)))
sys.stdout.flush()
print
print "populating the new column entries"
ppII_df = ppII_df.assign(R=Rs)
ppII_df = ppII_df.assign(d=ds)
ppII_df = ppII_df.assign(theta=thetas)
print "Saving to fn='%s' [open using 'pd.read_csv(fn)]'" %(master_ss_file)
ppII_df.to_csv(master_ss_file)
print "DONE"
def prepare_main_csv_file():
# Generates structures for cis and trans backbones, calculates for each structure
# the radius of gyration (rg) and the end-to-end distance (re), and saves them to a csv file
# Relevant values saved to the file:
columns = ['L','phi','psi','omega','d','theta','rg','re','R','R2']
step_size = 1 # angle step size
directory = os.path.split(length_dependent_csv_file)[0]
if not os.path.isdir(directory):
os.makedirs(directory)
print "Populating the file '%s' with the values %s"%(length_dependent_csv_file,str(columns))
outputf = locallib.open_file(length_dependent_csv_file,'w')
outputf.write(",".join(columns)+"\n")
counter = 0
phipsi_range = np.arange(-180,180.1,step_size)
omegas = [180,0]
lengths = [16*2,16] # length of peptide
total_points = len(omegas)*len(lengths)*(len(phipsi_range)**2) # just for counting
for L in lengths:
for omega in [180,0]:
print "\n--------\nCALCULATING VALUES FOR OMEGA = "+str(omega)+" PEPTIDES"
for phi in phipsi_range:
for psi in phipsi_range:
counter += 1
if counter % 20 == 0.0:
str_phi = "{0: >4}".format(int(phi))
str_psi = "{0: >4}".format(int(psi))
sys.stdout.write("\r\tTotal completed %d percent\t" %(int(100.*float(counter)/total_points)))
sys.stdout.flush()
# ------------------
# Building a peptide (Tien et al., PeptideBuilder, PeerJ, 2013)
angles = []
for n in range(int(L)):
angles.append((phi,psi,omega))
st1 = []
st1 = locallib.build_structure(angles)
# Only rg and re require structures to get values out
rg = locallib.calculate_rg(st1)
re = locallib.calculate_re(st1)
# ------------------
# d, theta, and R do not need structures to be calculated
d, theta, rs = locallib.calculate_d_theta_r(phi, psi, omega)
R = locallib.R(phi=phi, psi=psi)
R2 = (phi + psi + 360.)/(720.) # min = -360, max = 360
str_vals = []
for v in [L,phi,psi,omega,d,theta,rg,re,R,R2]:
str_vals.append(str(v))
outputf.write(",".join(str_vals)+"\n")
sys.stdout.write("\n")
#
outputf.close()
print "DONE"
def write_fake_secondary_structure_database(sstype = 'segno',omega=180.0,sample_points=100000, sigma=5):
fake_ss_centroids = []
angle_range = [-45-90,-45,45,45+90]
for x in angle_range:
for y in angle_range:
if not (x,y) in fake_ss_centroids:
fake_ss_centroids.append((x,y,omega))
phi_start = -180
psi_start = -180
dictionary = {'phi':[],'psi':[],'omega':[],'d':[],'theta':[],'R':[],'R2':[],sstype:[]}
ss_index = 0 # This will act as the secondary structure name
for phi,psi,omega in fake_ss_centroids:
ss_index+=1
sys.stdout.write("\r\tSecondary structures generated: %d of %d\t\t" %(ss_index,len(fake_ss_centroids)))
sys.stdout.flush()
phis = []
psis = []
for trial in range(sample_points):
rama_phi = random.gauss(phi, sigma=sigma)
rama_psi = random.gauss(psi, sigma=sigma)
phis.append(rama_phi)
psis.append(rama_psi)
'''
rama_phi2 = (rama_phi - phi_start) % 360. + phi_start # 'phi_start' declared at the beginning of this file
rama_psi2 = (rama_psi - psi_start) % 360. + psi_start # 'psi_start' declared at the beginning of this file
phis.append(rama_phi2)
psis.append(rama_psi2)
if rama_phi != rama_phi2 or rama_psi != rama_psi2:
phis.append(rama_phi)
psis.append(rama_psi)
'''
#sns.jointplot(np.array(phis), np.array(psis)); plt.show();
for phi,psi in zip(phis,psis):
original_phi = (phi - phi_start ) % 360. + phi_start # 'phi_start' declared at the beginning of this file
original_psi = (psi - psi_start ) % 360. + psi_start # 'psi_start' declared at the beginning of this file
d,theta,rs = locallib.calculate_d_theta_r(original_phi,original_psi,omega) # Miyazawa values Mannige, PeerJ, 2017
R = locallib.R(phi=original_phi,psi=original_psi) # Ramachandran number from Mannige, Kundu, Whitelam, PLoS ONE, 2016
R2 = (original_phi+original_psi + 360.)/(720.) # min = -360, max = 360
dictionary['phi'].append(phi)
dictionary['psi'].append(psi)
dictionary['omega'].append(omega)
dictionary['d'].append(d)
dictionary['theta'].append(theta)
dictionary['R'].append(R)
dictionary['R2'].append(R2)
dictionary[sstype].append(ss_index)
print
df = pd.DataFrame(dictionary)
df.to_csv(fake_ss_file)
print "Done"
#write_fake_secondary_structure_database()
#exit()
#
def calc_rg(d,L):
#return 0.475398 * L * d
#Delta = (0.477931 + 0.955227/2)/2
Delta = 0.48
return Delta * L * np.abs(d)
#
def get_rg_estimates(x,y=None,c='k',show=0,L=None):
x2 = []
y2 = []
minval = np.min(x)
maxval = np.max(x)
steps = float(maxval-minval)/200.0
for currentx in np.arange(minval,maxval+steps/2.,steps):
x2.append(currentx)
y2.append(calc_rg(currentx,L))
#
if show and y:
plt.plot(x2, y2, c=c)
plt.show()
#
return x2,y2
#
def calc_re(d,L):
#return 0.70833*d*(L**(2.0 - 0.90765))
#Delta = (0.477931*2 + 0.955227)/2
Delta = 0.48*2
return Delta * L * np.abs(d)
#
def get_re_estimates(x,y=None,c='k',show=0,L=None):
x2 = []
y2 = []
minval = np.min(x)
maxval = np.max(x)
steps = float(maxval-minval)/200.0
for currentx in np.arange(minval,maxval+steps/2.,steps):
x2.append(currentx)
y2.append(calc_re(currentx,L))
#
if show and y:
plt.plot(x2, y2,c=c)
plt.show()
#
return x2,y2
def get_his(v,extent=None,bins=120,norm=1):
extent = [v.min(),v.max()]
a,b = np.histogram(v,bins=bins,range=extent)
X = []; Y = [];
for i in range(len(a)):
X.append(float(b[i]+b[i+1])/2)
Y.append(float(a[i])/2)
#
X = np.array(X)
Y = np.array(Y)
if norm:
Y = Y/Y.max()
return X,Y
if 0:
# The radius of gyration (rg) and the end-to-end distance (re) correlated well with Miyazawa's d.
# However, there is a scaling factor that appears to be required for perfect corrrelation, and that
# scaling factor will likely be correlated with $N$. Here, we generate a set of Rg, Re, and d, and
# try a bunch of scaling factors to see if one allows for perfect correlation.
# First, we draw the relationship between the deviation in values such as Rg/d as a function of N
step = 40
show = 1 # if you want to know what is going on (graphically)
ci = -1
colors = ['b','r','g','k']
Ns = []
rgs_over_d = []
res_over_d = []
columns = ['L','phi','psi','omega','d','theta','rg','re','R']
df = pd.DataFrame(columns=columns)
total_points = len(np.arange(-180,181,step))**2.0
for L in np.arange(1,9)*5:
L = float(L)
ds = []
rgs = []
res = | |
import time
import os
import io
import zipfile
import pickle
from typing import Union, Type, Optional, Dict, Any, List, Tuple, Callable
from abc import ABC, abstractmethod
from collections import deque
import gym
import torch as th
import numpy as np
from stable_baselines3.common import logger
from stable_baselines3.common.policies import BasePolicy, get_policy_from_name
from stable_baselines3.common.utils import set_random_seed, get_schedule_fn, update_learning_rate, get_device
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, unwrap_vec_normalize, VecNormalize, VecTransposeImage
from stable_baselines3.common.preprocessing import is_image_space
from stable_baselines3.common.save_util import data_to_json, json_to_data, recursive_getattr, recursive_setattr
from stable_baselines3.common.type_aliases import GymEnv, TensorDict, RolloutReturn, MaybeCallback
from stable_baselines3.common.callbacks import BaseCallback, CallbackList, ConvertCallback, EvalCallback
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.buffers import ReplayBuffer
class BaseRLModel(ABC):
"""
The base RL model
:param policy: (Type[BasePolicy]) Policy object
:param env: (Union[GymEnv, str]) The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param policy_base: (Type[BasePolicy]) The base policy used by this method
:param learning_rate: (float or callable) learning rate for the optimizer,
it can be a function of the current progress (from 1 to 0)
:param policy_kwargs: (Dict[str, Any]) Additional arguments to be passed to the policy on creation
:param verbose: (int) The verbosity level: 0 none, 1 training information, 2 debug
:param device: (Union[th.device, str]) Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: (bool) Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: (bool) Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: (bool) When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: (Optional[int]) Seed for the pseudo random generators
:param use_sde: (bool) Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: (int) Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
"""
def __init__(self,
policy: Type[BasePolicy],
env: Union[GymEnv, str],
policy_base: Type[BasePolicy],
learning_rate: Union[float, Callable],
policy_kwargs: Dict[str, Any] = None,
verbose: int = 0,
device: Union[th.device, str] = 'auto',
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1):
if isinstance(policy, str) and policy_base is not None:
self.policy_class = get_policy_from_name(policy_base, policy)
else:
self.policy_class = policy
self.device = get_device(device)
if verbose > 0:
print(f"Using {self.device} device")
self.env = None # type: Optional[GymEnv]
# get VecNormalize object if needed
self._vec_normalize_env = unwrap_vec_normalize(env)
self.verbose = verbose
self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs
self.observation_space = None # type: Optional[gym.spaces.Space]
self.action_space = None # type: Optional[gym.spaces.Space]
self.n_envs = None
self.num_timesteps = 0
self.eval_env = None
self.seed = seed
self.action_noise = None # type: Optional[ActionNoise]
self.start_time = None
self.policy = None
self.learning_rate = learning_rate
self.lr_schedule = None # type: Optional[Callable]
self._last_obs = None # type: Optional[np.ndarray]
# When using VecNormalize:
self._last_original_obs = None # type: Optional[np.ndarray]
self._episode_num = 0
# Used for gSDE only
self.use_sde = use_sde
self.sde_sample_freq = sde_sample_freq
# Track the training progress (from 1 to 0)
# this is used to update the learning rate
self._current_progress = 1
# Buffers for logging
self.ep_info_buffer = None # type: Optional[deque]
self.ep_success_buffer = None # type: Optional[deque]
# For logging
self._n_updates = 0 # type: int
# Create and wrap the env if needed
if env is not None:
if isinstance(env, str):
if create_eval_env:
eval_env = gym.make(env)
if monitor_wrapper:
eval_env = Monitor(eval_env, filename=None)
self.eval_env = DummyVecEnv([lambda: eval_env])
if self.verbose >= 1:
print("Creating environment from the given name, wrapped in a DummyVecEnv.")
env = gym.make(env)
if monitor_wrapper:
env = Monitor(env, filename=None)
env = DummyVecEnv([lambda: env])
env = self._wrap_env(env)
self.observation_space = env.observation_space
self.action_space = env.action_space
self.n_envs = env.num_envs
self.env = env
if not support_multi_env and self.n_envs > 1:
raise ValueError("Error: the model does not support multiple envs requires a single vectorized"
" environment.")
def _wrap_env(self, env: GymEnv) -> VecEnv:
if not isinstance(env, VecEnv):
if self.verbose >= 1:
print("Wrapping the env in a DummyVecEnv.")
env = DummyVecEnv([lambda: env])
if is_image_space(env.observation_space) and not isinstance(env, VecTransposeImage):
if self.verbose >= 1:
print("Wrapping the env in a VecTransposeImage.")
env = VecTransposeImage(env)
return env
@abstractmethod
def _setup_model(self) -> None:
"""
Create networks, buffer and optimizers
"""
raise NotImplementedError()
def _get_eval_env(self, eval_env: Optional[GymEnv]) -> Optional[GymEnv]:
"""
Return the environment that will be used for evaluation.
:param eval_env: (Optional[GymEnv]))
:return: (Optional[GymEnv])
"""
if eval_env is None:
eval_env = self.eval_env
if eval_env is not None:
eval_env = self._wrap_env(eval_env)
assert eval_env.num_envs == 1
return eval_env
def _setup_lr_schedule(self) -> None:
"""Transform to callable if needed."""
self.lr_schedule = get_schedule_fn(self.learning_rate)
def _update_current_progress(self, num_timesteps: int, total_timesteps: int) -> None:
"""
Compute current progress (from 1 to 0)
:param num_timesteps: current number of timesteps
:param total_timesteps:
"""
self._current_progress = 1.0 - float(num_timesteps) / float(total_timesteps)
def _update_learning_rate(self, optimizers: Union[List[th.optim.Optimizer], th.optim.Optimizer]) -> None:
"""
Update the optimizers learning rate using the current learning rate schedule
and the current progress (from 1 to 0).
:param optimizers: (Union[List[th.optim.Optimizer], th.optim.Optimizer])
An optimizer or a list of optimizers.
"""
# Log the current learning rate
logger.logkv("learning_rate", self.lr_schedule(self._current_progress))
if not isinstance(optimizers, list):
optimizers = [optimizers]
for optimizer in optimizers:
update_learning_rate(optimizer, self.lr_schedule(self._current_progress))
@staticmethod
def safe_mean(arr: Union[np.ndarray, list, deque]) -> np.ndarray:
"""
Compute the mean of an array if there is at least one element.
For empty array, return NaN. It is used for logging only.
:param arr:
:return:
"""
return np.nan if len(arr) == 0 else np.mean(arr)
def get_env(self) -> Optional[VecEnv]:
"""
Returns the current environment (can be None if not defined).
:return: (Optional[VecEnv]) The current environment
"""
return self.env
def get_vec_normalize_env(self) -> Optional[VecNormalize]:
"""
Return the ``VecNormalize`` wrapper of the training env
if it exists.
:return: Optional[VecNormalize] The ``VecNormalize`` env.
"""
return self._vec_normalize_env
@staticmethod
def check_env(env: GymEnv, observation_space: gym.spaces.Space, action_space: gym.spaces.Space):
"""
Checks the validity of the environment to load vs the one used for training.
Checked parameters:
- observation_space
- action_space
:param env: (GymEnv)
:param observation_space: (gym.spaces.Space)
:param action_space: (gym.spaces.Space)
"""
if (observation_space != env.observation_space
# Special cases for images that need to be transposed
and not (is_image_space(env.observation_space)
and observation_space == VecTransposeImage.transpose_space(env.observation_space))):
raise ValueError(f'Observation spaces do not match: {observation_space} != {env.observation_space}')
if action_space != env.action_space:
raise ValueError(f'Action spaces do not match: {action_space} != {env.action_space}')
def set_env(self, env: GymEnv) -> None:
"""
Checks the validity of the environment, and if it is coherent, set it as the current environment.
Furthermore wrap any non vectorized env into a vectorized
checked parameters:
- observation_space
- action_space
:param env: The environment for learning a policy
"""
self.check_env(env, self.observation_space, self.action_space)
# it must be coherent now
# if it is not a VecEnv, make it a VecEnv
env = self._wrap_env(env)
self.n_envs = env.num_envs
self.env = env
def get_torch_variables(self) -> Tuple[List[str], List[str]]:
"""
Get the name of the torch variable that will be saved.
``th.save`` and ``th.load`` will be used with the right device
instead of the default pickling strategy.
:return: (Tuple[List[str], List[str]])
name of the variables with state dicts to save, name of additional torch tensors,
"""
state_dicts = ["policy"]
return state_dicts, []
@abstractmethod
def learn(self, total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 100,
tb_log_name: str = "run",
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True) -> 'BaseRLModel':
"""
Return a trained model.
:param total_timesteps: (int) The total number of samples to train on
:param callback: (function (dict, dict)) -> boolean function called at every steps with state of the algorithm.
It takes the local and global variables. If it returns False, training is aborted.
:param log_interval: (int) The number of timesteps before logging.
:param tb_log_name: (str) the name of the run for tensorboard log
:param reset_num_timesteps: (bool) whether or not to reset the current timestep number (used in logging)
:param eval_env: (gym.Env) Environment that will be used to evaluate the agent
| |
<filename>TurtleArt/talogo.py
# -*- coding: utf-8 -*-
#Copyright (c) 2007-8, Playful Invention Company.
#Copyright (c) 2008-11, <NAME>
#Copyright (c) 2008-10, <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import gtk
from time import time
from operator import isNumberType
from UserDict import UserDict
from taconstants import TAB_LAYER, DEFAULT_SCALE, PREFIX_DICTIONARY
from tapalette import block_names, value_blocks
from tautils import get_pixbuf_from_journal, convert, data_from_file, \
text_media_type, round_int, debug_output
from util.RtfParser import RtfTextOnly
from gettext import gettext as _
media_blocks_dictionary = {} # new media blocks get added here
primitive_dictionary = {} # new block primitives get added here
class noKeyError(UserDict):
__missing__ = lambda x, y: 0
class symbol:
def __init__(self, name):
self.name = name
self.nargs = None
self.fcn = None
def __str__(self):
return self.name
def __repr__(self):
return '#' + self.name
class logoerror(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class HiddenBlock:
def __init__(self, name, value=None):
self.name = name
self.values = []
if value is not None:
self.values.append(value)
self.primitive = None
else:
self.primitive = name
self.connections = []
self.docks = []
# Utility functions
def _just_stop():
""" yield False to stop stack """
yield False
def _millisecond():
""" Current time in milliseconds """
return time() * 1000
class LogoCode:
""" A class for parsing Logo code """
def __init__(self, tw):
self.tw = tw
self.oblist = {}
DEFPRIM = {'(': [1, lambda self, x: self._prim_opar(x)],
'define': [2, self._prim_define],
'nop': [0, lambda self: None]}
for p in iter(DEFPRIM):
if len(DEFPRIM[p]) == 2:
self.def_prim(p, DEFPRIM[p][0], DEFPRIM[p][1])
else:
self.def_prim(p, DEFPRIM[p][0], DEFPRIM[p][1], DEFPRIM[p][2])
self.symtype = type(self._intern('print'))
self.listtype = type([])
self.symnothing = self._intern('%nothing%')
self.symopar = self._intern('(')
self.iline = None
self.cfun = None
self.arglist = None
self.ufun = None
self.procstop = False
self.running = False
self.istack = []
self.stacks = {}
self.boxes = {'box1': 0, 'box2': 0}
self.heap = []
self.iresults = None
self.step = None
self.bindex = None
self.hidden_turtle = None
self.keyboard = 0
self.trace = 0
self.update_values = False
self.gplay = None
self.filepath = None
self.pixbuf = None
self.dsobject = None
self.start_time = None
self.body_height = int((self.tw.canvas.height / 40) * self.tw.scale)
self.scale = DEFAULT_SCALE
def stop_logo(self):
""" Stop logo is called from the Stop button on the toolbar """
self.tw.step_time = 0
self.step = _just_stop()
for plugin in self.tw._plugins:
plugin.stop()
if self.tw.gst_available:
from tagplay import stop_media
stop_media(self)
self.tw.active_turtle.show()
def def_prim(self, name, args, fcn, rprim=False):
""" Define the primitives associated with the blocks """
sym = self._intern(name)
sym.nargs, sym.fcn = args, fcn
sym.rprim = rprim
def _intern(self, string):
""" Add any new objects to the symbol list. """
if string in self.oblist:
return self.oblist[string]
sym = symbol(string)
self.oblist[string] = sym
return sym
def run_blocks(self, blk, blocks, run_flag):
""" Given a block to run... """
for k in self.stacks.keys():
self.stacks[k] = None
self.stacks['stack1'] = None
self.stacks['stack2'] = None
self.tw.saving_svg = False
# Save state in case there is a hidden macro expansion
self.save_blocks = None
self.save_blk = blk
self.save_while_blks = []
if self.trace > 0:
self.update_values = True
else:
self.update_values = False
for b in blocks:
b.unhighlight()
for b in blocks:
# Hidden macro expansions
if b.name in ['while', 'until', 'forever']:
action_blk, new_blocks = self._expand_forever(b, blk, blocks)
blocks = new_blocks[:]
if b == blk:
blk = action_blk
for b in blocks:
if b.name == 'hat1':
code = self._blocks_to_code(b)
self.stacks['stack1'] = self._readline(code)
elif b.name == 'hat2':
code = self._blocks_to_code(b)
self.stacks['stack2'] = self._readline(code)
elif b.name == 'hat':
if b.connections is not None and len(b.connections) > 1 and \
b.connections[1] is not None:
code = self._blocks_to_code(b)
try:
x = b.connections[1].values[0]
except IndexError:
self.tw.showlabel('#nostack')
return None
if type(convert(x, float, False)) == float:
if int(float(x)) == x:
x = int(x)
self.stacks['stack3' + str(x)] = self._readline(code)
code = self._blocks_to_code(blk)
if self.save_blocks is not None:
# Undo any hidden macro expansion
blocks = self.save_blocks[:]
blk = self.save_blk
for b in self.save_while_blks:
if b[1] is not None:
b[0].connections[0].connections[b[1]] = b[0]
if b[2] is not None:
b[0].connections[-1].connections[b[2]] = b[0]
if b[3] is not None:
b[0].connections[-2].connections[b[3]] = b[0]
if run_flag:
# debug_output("running code: %s" % (code), self.tw.running_sugar)
self.start_time = time()
self._setup_cmd(code)
if not self.tw.hide:
self.tw.display_coordinates()
else:
return code
def _blocks_to_code(self, blk):
""" Convert a stack of blocks to pseudocode. """
if blk is None:
return ['%nothing%', '%nothing%']
code = []
dock = blk.docks[0]
if len(dock) > 4: # There could be a '(', ')', '[' or ']'.
code.append(dock[4])
if blk.name == 'savesvg':
self.tw.saving_svg = True
if blk.primitive is not None: # make a tuple (prim, blk)
# special case: expand 'while' and 'until' primitives
try:
code.append((blk.primitive,
self.tw.block_list.list.index(blk)))
except ValueError:
code.append(blk.primitive) # Hidden block
elif len(blk.values) > 0: # Extract the value from content blocks.
if blk.name == 'number':
try:
code.append(float(blk.values[0]))
except ValueError:
code.append(float(ord(blk.values[0][0])))
elif blk.name == 'string' or \
blk.name == 'title': # deprecated block
if type(blk.values[0]) == float or type(blk.values[0]) == int:
if int(blk.values[0]) == blk.values[0]:
blk.values[0] = int(blk.values[0])
code.append('#s' + str(blk.values[0]))
else:
code.append('#s' + blk.values[0])
elif blk.name in PREFIX_DICTIONARY:
if blk.values[0] is not None:
code.append(PREFIX_DICTIONARY[blk.name] + \
str(blk.values[0]))
else:
code.append(PREFIX_DICTIONARY[blk.name] + 'None')
elif blk.name in media_blocks_dictionary:
code.append('#smedia_' + blk.name.upper())
else:
return ['%nothing%']
else:
return ['%nothing%']
if blk.connections is not None and len(blk.connections) > 0:
for i in range(1, len(blk.connections)):
b = blk.connections[i]
dock = blk.docks[i]
if len(dock) > 4: # There could be a '(', ')', '[' or ']'.
for c in dock[4]:
code.append(c)
if b is not None:
code.extend(self._blocks_to_code(b))
elif blk.docks[i][0] not in ['flow', 'unavailable']:
code.append('%nothing%')
return code
def _setup_cmd(self, string):
""" Execute the psuedocode. """
self.hidden_turtle = self.tw.active_turtle
self.hidden_turtle.hide() # Hide the turtle while we are running.
self.procstop = False
blklist = self._readline(string)
self.step = self._start_eval(blklist)
def _readline(self, line):
"""
Convert the pseudocode into a list of commands.
The block associated with the command is stored as the second element
in a tuple, e.g., (#forward, 16)
"""
# debug_output(line, self.tw.running_sugar)
res = []
while line:
token = line.pop(0)
bindex = None
if type(token) == tuple:
(token, bindex) = token
if isNumberType(token):
res.append(token)
elif token.isdigit():
res.append(float(token))
elif token[0] == '-' and token[1:].isdigit():
res.append(-float(token[1:]))
elif token[0] == '"':
res.append(token[1:])
elif token[0:2] == "#s":
res.append(token[2:])
elif token == '[':
res.append(self._readline(line))
elif token == ']':
return res
elif bindex is None or type(bindex) is not int:
res.append(self._intern(token))
else:
res.append((self._intern(token), bindex))
return res
def _start_eval(self, blklist):
""" Step through the list. """
if self.tw.running_sugar:
self.tw.activity.stop_turtle_button.set_icon("stopiton")
elif self.tw.interactive_mode:
self.tw.toolbar_shapes['stopiton'].set_layer(TAB_LAYER)
self.running = True
self.icall(self.evline, blklist)
yield True
if self.tw.running_sugar:
self.tw.activity.stop_turtle_button.set_icon("stopitoff")
elif self.tw.interactive_mode:
self.tw.toolbar_shapes['stopiton'].hide()
yield False
self.running = False
def icall(self, fcn, *args):
""" Add a function and its arguments to the program stack. """
self.istack.append(self.step)
self.step = fcn(*(args))
def evline(self, blklist):
""" Evaluate a line of code from the list. """
oldiline = self.iline
self.iline = blklist[:]
self.arglist = None
while self.iline:
token = self.iline[0]
self.bindex = None
if type(token) == tuple:
(token, self.bindex) = self.iline[0]
# If the blocks are visible, highlight the current block.
if not self.tw.hide and self.bindex is not None:
self.tw.block_list.list[self.bindex].highlight()
# In debugging modes, we pause between steps and show the turtle.
if self.tw.step_time > 0:
self.tw.active_turtle.show()
endtime = _millisecond() + self.tw.step_time * 100.
while _millisecond() < endtime:
yield True
self.tw.active_turtle.hide()
# 'Stand-alone' booleans are handled here.
if token == self.symopar:
token = self.iline[1]
if type(token) == tuple:
(token, self.bindex) = self.iline[1]
| |
# coding=utf-8
# Copyright 2021 The jax_verify Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides optimizers to use with the NonConvexBound in `nonconvex.py`.
"""
import abc
import math
from typing import Tuple, Callable, Dict, Optional, List
import jax
import jax.numpy as jnp
from jax_verify.src import bound_propagation
from jax_verify.src import ibp
from jax_verify.src import utils
from jax_verify.src.nonconvex import nonconvex
import numpy as np
import optax
Tensor = jnp.ndarray
Index = bound_propagation.Index
ParamSet = nonconvex.ParamSet
BranchPlane = Tuple[Index, int, float]
BranchConstraint = Tuple[BranchPlane, int]
class BoundOptimizer(metaclass=abc.ABCMeta):
"""Abstract Class to define the API of optimizer.
Each subclass defines an optimization algorithm to solve the optimization
problem defined by a NonConvexBound. This is done by overloading the
`optimize` function.
"""
@abc.abstractmethod
def optimize_fun(self, non_convex_bound: nonconvex.NonConvexBound
) -> Callable[[ParamSet, ParamSet], ParamSet]:
pass
class OptimizingConcretizer(nonconvex.Concretizer):
"""Concretizer based on optimizing the intermediate bounds.
This needs to be initialized with an optimizer, and concrete bounds will be
obtained by solving the relaxation for the intermediate activations.
"""
def __init__(
self, optimizer: BoundOptimizer,
max_parallel_nodes: int = 512,
branching_constraints: Optional[List[BranchConstraint]] = None,
branching_optimizer: Optional[optax.GradientTransformation] = None,
branching_opt_number_steps: int = 0):
self._optimizer = optimizer
self._max_parallel_nodes = max_parallel_nodes
self._branching_constraints = []
self._branching_optimizer = None
self._branching_opt_number_steps = branching_opt_number_steps
if branching_constraints is not None:
self._branching_constraints = branching_constraints
# Keep the optimizer for the branching constraints dual variables.
if (branching_optimizer is None) or (branching_opt_number_steps == 0):
raise ValueError('If branching constraints are imposed, an optimizer '
'and a number of optimization steps for the lagrangian'
' variables corresponding to them needs to be '
'provided.')
self._branching_optimizer = branching_optimizer
self._branching_opt_number_steps = branching_opt_number_steps
def accept_input(self, *_args, **_kwargs):
# There is no need to update anything.
pass
def accept_primitive(self, *_args, **_kwargs):
# For now, the optimizer is not dependent on what propagation has been
# achieved, so just reuse the same.
# Potentially, in the future, we could handle adapting the hyperparameters.
pass
def get_bounds(self, to_opt_bound: nonconvex.NonConvexBound
) -> bound_propagation.Bound:
optimize_fun = self._optimizer.optimize_fun(to_opt_bound)
def optimize_chunk(chunk_index: int) -> Tuple[Tensor, Tensor]:
var_shapes, chunk_objectives = _create_opt_problems(
to_opt_bound, chunk_index, self._max_parallel_nodes)
ini_var_set = {key: 0.5 * jnp.ones(shape)
for key, shape in var_shapes.items()}
def solve_problem(objectives: ParamSet) -> Tensor:
# Optimize the bound for primal variables.
opt_var_set = optimize_fun(objectives, ini_var_set)
# Compute the resulting bound
_, bound_vals = to_opt_bound.dual(jax.lax.stop_gradient(opt_var_set),
objectives)
return bound_vals
if any(node_idx <= to_opt_bound.index
for ((node_idx, *_), _) in self._branching_constraints):
# There exists constraints that needs to be taken into account.
# The dual vars per constraint are scalars, but we need to apply them
# for each of the optimization objective.
nb_targets = chunk_objectives[to_opt_bound.index].shape[0]
# Create the dual variables for them.
active_branching_constraints = [
(node_idx, neuron_idx, val, side)
for (node_idx, neuron_idx, val), side in self._branching_constraints
if node_idx <= to_opt_bound.index
]
nb_constraints = len(active_branching_constraints)
dual_vars = [jnp.zeros([nb_targets])] * nb_constraints
# Define the objective function to optimize. The branching constraints
# are lifted into the objective function.
def unbranched_objective(dual_vars: ParamSet) -> Tuple[float, Tensor]:
objectives = chunk_objectives.copy()
base_term = jnp.zeros([nb_targets])
for ((node_idx, neuron_idx, val, side),
branch_dvar) in zip(active_branching_constraints, dual_vars):
# Adjust the objective function to incorporate the dual variables.
if node_idx not in objectives:
objectives[node_idx] = jnp.zeros(var_shapes[node_idx])
# The branching constraint is encoded as:
# side * neuron >= side * val
# (when side==1, this is neuron >= lb,
# and when side==-1, this is -neuron >= -ub )
# To put in a canonical form \lambda_b() <= 0, this is:
# \lambda_b() = side * val - side * neuron
# Lifting the branching constraints takes us from the problem:
# min_{z} f(z)
# s.t. \mu_i() <= z_i <= \eta_i() \forall i
# \lambda_b() <= 0 \forall b
#
# to
# max_{\rho_b} min_{z} f(z) + \rho_b \lambda_b()
# s.t \mu_i() <= z_i <= \eta_i() \forall i
# s.t rho_b >= 0
# Add the term corresponding to the dual variables to the linear
# objective function.
coeff_to_add = -side * branch_dvar
index_to_update = jax.ops.index[:, neuron_idx]
flat_node_obj = jnp.reshape(objectives[node_idx], (nb_targets, -1))
flat_updated_node_obj = jax.ops.index_add(flat_node_obj,
index_to_update,
coeff_to_add)
updated_node_obj = jnp.reshape(flat_updated_node_obj,
var_shapes[node_idx])
objectives[node_idx] = updated_node_obj
# Don't forget the terms based on the bound.
base_term = base_term + (side * val * branch_dvar)
network_term = solve_problem(objectives)
bound = network_term + base_term
return bound.sum(), bound
def evaluate_bound(ini_dual_vars: List[Tensor]) -> Tensor:
ini_state = self._branching_optimizer.init(ini_dual_vars)
eval_and_grad_fun = jax.grad(unbranched_objective, argnums=0,
has_aux=True)
# The carry consists of:
# - The best set of dual variables seen so far.
# - The current set of dual variables.
# - The best bound obtained so far.
# - The state of the optimizer.
# For each of the step, we will:
# - Evaluate the bounds by the current set of dual variables.
# - Update the best set of dual variables if progress was achieved.
# - Do an optimization step on the current set of dual variables.
# This way, we are guaranteed that we keep track of the dual variables
# producing the best bound at the end.
def opt_step(
carry: Tuple[List[Tensor], List[Tensor],
Tensor, optax.OptState], _
) -> Tuple[Tuple[List[Tensor], List[Tensor],
Tensor, optax.OptState], None]:
best_lagdual, lagdual, best_bound, state = carry
# Compute the bound and their gradients.
lagdual_grads, new_bound = eval_and_grad_fun(lagdual)
# Update the lagrangian dual variables for the best bound seen.
improve_best = new_bound > best_bound
new_best_lagdual = []
for best_dvar, new_dvar in zip(best_lagdual, lagdual):
new_best_lagdual.append(jnp.where(improve_best,
new_dvar, best_dvar))
# Update the best bound seen
new_best_bound = jnp.maximum(best_bound, new_bound)
# Perform optimization step
updates, new_state = self._branching_optimizer.update(
lagdual_grads, state, lagdual)
unc_dual = optax.apply_updates(lagdual, updates)
new_lagdual = jax.tree_map(lambda x: jnp.maximum(x, 0.), unc_dual)
return ((new_best_lagdual, new_lagdual, new_best_bound, new_state),
None)
dummy_bound = -float('inf')*jnp.ones([nb_targets])
initial_carry = (ini_dual_vars, ini_dual_vars, dummy_bound, ini_state)
(best_lagdual, *_), _ = jax.lax.scan(
opt_step, initial_carry, None,
length=self._branching_opt_number_steps)
_, bound_vals = unbranched_objective(
jax.lax.stop_gradient(best_lagdual))
return bound_vals
bound_vals = evaluate_bound(dual_vars)
else:
bound_vals = solve_problem(chunk_objectives)
chunk_lbs, chunk_ubs = _unpack_opt_problem(bound_vals)
return chunk_lbs, chunk_ubs
return _chunked_optimization(to_opt_bound.shape,
self._max_parallel_nodes,
optimize_chunk)
def _chunked_optimization(
bound_shape: Tuple[int, ...],
max_parallel_nodes: int,
optimize_chunk: Callable[[int], Tuple[Tensor, Tensor]],
) -> ibp.IntervalBound:
"""Perform optimization of the target in chunks.
Args:
bound_shape: Shape of the bound to compute
max_parallel_nodes: How many activations to optimize at once. If =0, perform
optimize all the nodes simultaneously.
optimize_chunk: Function to optimize a chunk and return updated bounds.
Returns:
bounds: Optimized bounds.
"""
nb_opt = int(np.prod(bound_shape))
if (max_parallel_nodes == 0) or (nb_opt <= max_parallel_nodes):
flat_lbs, flat_ubs = optimize_chunk(0)
else:
nb_opt_chunk = math.ceil(nb_opt / max_parallel_nodes)
chunk_indices = jnp.arange(nb_opt_chunk)
(map_lbs, map_ubs) = jax.lax.map(optimize_chunk, chunk_indices)
# Remove the padding elements
flat_lbs = jnp.reshape(map_lbs, (-1,))[:nb_opt]
flat_ubs = jnp.reshape(map_ubs, (-1,))[:nb_opt]
lbs = jnp.reshape(flat_lbs, bound_shape)
ubs = jnp.reshape(flat_ubs, bound_shape)
bounds = ibp.IntervalBound(lbs, ubs)
return bounds
def _create_opt_problems(
non_convex_bound: nonconvex.NonConvexBound,
chunk_index: int,
nb_parallel_nodes: int,
) -> Tuple[Dict[Index, Tuple[int, ...]], ParamSet]:
"""Define the objective function and the necessary variables shape.
Iteratively yields the objectives to minimize in order to limit memory usage.
Args:
non_convex_bound: Bound for which to create the optimization problems.
chunk_index: Index of the optimization chunk to generate.
nb_parallel_nodes: How large should the optimization chunks be. If 0,
optimize all problems at once.
Returns:
var_to_opt: shapes of the variables to optimize to compute the bounds.
objectives_by_layer: Objectives to minimize, in the form of a dictionary
mapping the position of activations to the linear coefficients of the
objective function.
"""
# Create the objective matrix
lb_obj = utils.objective_chunk(
non_convex_bound.shape, chunk_index, nb_parallel_nodes)
# Get the objective for the upper bounds.
ub_obj = -lb_obj
obj = jnp.concatenate([lb_obj, ub_obj], axis=0)
# Generate the shape of the variables necessary to solve the problem
var_to_opt = {}
for pos, var_shape in non_convex_bound.variables.items():
var_to_opt[pos] = (obj.shape[0],) + var_shape
objectives_by_layer = {non_convex_bound.index: obj}
return var_to_opt, objectives_by_layer
def _unpack_opt_problem(dual_vals: Tensor) -> Tuple[Tensor, Tensor]:
"""Extract the lower bounds and upper bounds from the result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.