text stringlengths 0 1.05M | meta dict |
|---|---|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from collections import defaultdict
import copy
class BehaviorTree():
"""
Object representing a decomposed tree of behavior. Behaves much like
a dictionary, with an additional representation of the tree structure
of the data.
Parameters
----------
structure : list, optional
Representation of parent-child relationships between nodes
contents : defaultdict, optional
Initial node values
units : defaultdict, optional
Mapping of node names to units
Examples
--------
>>> tree = BehaviorTree(['Consumption',
['AS Prob', 'Intensity']])
>>> tree['Consumption'] = 5
>>> tree['AS Prob'] = 1
>>> tree['Intensity'] = 2
>>> tree.contents
defaultdict(float, {'AS Prob': 1, 'Consumption': 5, 'Intensity': 2})
>>> tree.structure
['Consumption', ['AS Prob', 'Intensity']]
>>> print(tree)
Consumption: 5
AS Prob: 1
Intensity: 2
"""
def __init__(self, structure=None, contents=None, units=None):
if contents is None:
contents = defaultdict(float)
if units is None:
units = defaultdict(str)
self.contents = contents
self.structure = structure
self.units = units
def copy(self):
"""
Return a copy of the tree.
"""
return BehaviorTree(structure=copy.deepcopy(self.structure),
contents=copy.deepcopy(self.contents),
units=copy.deepcopy(self.units))
def __str__(self):
"""
Return a string representation for printing.
"""
if self.structure is None:
return self.contents.__str__()
else:
return self._display_tree()
def __getitem__(self, key):
"""
Return the value of the node specified by `key`
Parameters
----------
key : string
Name of a node in the tree
"""
return self.contents[key]
def __setitem__(self, key, value):
"""
Set the value of `key` to `value`.
Parameters
----------
key : string
Name of a node in the tree
value
Value to be used for that node
"""
self.contents[key] = value
def __iter__(self):
"""
Return an iterator over the nodes in the tree.
"""
return iter(self.contents)
def _display_tree(self, level=0):
"""
Return a string representation of the tree structure,
based on relationships in the attribute `structure`.
Parameters
----------
level : int
The indentation level for display
"""
struct = self.structure
if isinstance(struct, list):
root = struct[0]
children = struct[1]
else:
root = struct
children = []
if isinstance(self[root], list):
formatted_value = " " .join(
["{:.6f}".format(v) for v in self[root]])
else:
formatted_value = "{:.6f}".format(self[root])
result = "\n" + " " * 5 * level + \
"{}: {} {}".format(root,
formatted_value,
self.units[root])
for child in children:
current_tree = self.copy()
current_tree.structure = child
result += current_tree._display_tree(level + 1)
return result
@staticmethod
def merge(*args):
"""
Merge several trees into one compound tree.
Parameters
----------
*args : variable-length argument list of BehaviorTree objects
One or more trees to be merged.
Returns
-------
new_tree : BehaviorTree
Tree with same structure as input trees, where each node
is a list containing the node values from the input trees.
Examples
--------
>>> tree = BehaviorTree(contents={'a': 1, 'b': 2})
>>> tree2 = BehaviorTree(contents={'a': -1.5, 'b': 20})
>>> BehaviorTree.merge(tree, tree2).contents
defaultdict(list, {'a': [1, -1.5], 'b': [2, 20]})
"""
if len(args) == 0:
raise TypeError("Expected at least one argument")
new_tree = BehaviorTree(structure=args[0].structure,
contents=defaultdict(list))
for tree in args:
assert tree.structure == new_tree.structure
for k in tree:
new_tree[k].append(tree[k])
return new_tree
def summarize(self, f):
"""
Computes a summary statistic for the nodes in a tree.
Parameters
----------
f : function
A function to be applied to the contents in each node. Most
often, this will take in a list and return a single number.
Returns
-------
A new tree with the same structure, where each node value tree[k]
is replaced by f(tree[k]).
"""
results = self.copy()
for k in results:
results[k] = f(results[k])
return results
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/behavior/behavior_tree.py",
"copies": "3",
"size": "5368",
"license": "bsd-2-clause",
"hash": -8635206925353445000,
"line_mean": 28.3333333333,
"line_max": 73,
"alpha_frac": 0.5219821162,
"autogenerated": false,
"ratio": 4.700525394045534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 183
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import caniusepython3 as ciu
from . import utils
from . import classifier_finder
class pypi_scanner(object):
def __init__(self):
pass
def _get_all_github_packages(self):
with ciu.pypi.pypi_client() as client:
list_of_packages = client.search({'home_page': 'github'})
list_of_package_names = [v['name'] for v in list_of_packages]
return list_of_package_names
def _browse_classifier(self, classifiers):
"""
classifiers - list of classifiers
"""
with ciu.pypi.pypi_client() as client:
list_of_packages = client.browse(list(classifiers))
list_of_package_names = [v[0] for v in list_of_packages]
return list_of_package_names
def _get_all_python_packages(self):
with ciu.pypi.pypi_client() as client:
list_of_package_names = client.list_packages()
return list_of_package_names
def _get_all_python3_packages(self):
c = classifier_finder.classifier_finder('Programming Language :: Python :: 3')
python_classifiers = c.get_classifiers()
return self._browse_classifier(python_classifiers)
def _get_python2_only_packages(self):
"""
returns a list of all PyPI packages that
is python 2 compatible only.
"""
python_packages = set(self._get_all_python_packages())
python3_packages = set(self._get_all_python3_packages())
python2_only_packages = python_packages.difference(python3_packages)
return list(python2_only_packages)
def get_python2_github_packages(self):
"""
returns a list of python 2 only packages with github repos
"""
python2_only_packages = set(self._get_python2_only_packages())
github_packages = set(self._get_all_github_packages())
python2_github_packages = python2_only_packages.intersection(github_packages)
return list(python2_github_packages) | {
"repo_name": "PythonCharmers/autoporter",
"path": "pypi_scanner.py",
"copies": "1",
"size": "2078",
"license": "mit",
"hash": 5686388699354582000,
"line_mean": 32.5322580645,
"line_max": 86,
"alpha_frac": 0.6385948027,
"autogenerated": false,
"ratio": 3.9356060606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5074200863306061,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import pandas as pd
from mousestyles.path_diversity.path_features import angle_between
def compute_advanced(path_obj):
r"""
Returns dictionary containing several advanced features of path.
The features are the radius, center angles, area covered by the path,
area of the rectangle spanned by the path, and
absolute distance between start and end points.
Parameters
----------
path_obj : pandas.DataFrame
CX and CY must be contained.
The length must be greater than 2.
Returns
-------
radius : list
each element is the distance between center point and
each point in the path. The length equals to the length
of the path_obj.
center_angles : list
each element is the center angle generated by 2 adjacent
radius. The length equals to the length of the radius minus 1.
area_cov : numpy float object
area covered by the path.
Computed by radius and center angles.
area_rec : numpy float object
area of the rectangle spanned by the path.
abs_distance : numpy float object
the distance between the start and end points in a path.
Examples
--------
>>> movement = data.load_movement(1,2,1)
>>> adv_features = compute_advanced(movement[5:10])
"""
if not isinstance(path_obj, pd.core.frame.DataFrame):
raise TypeError("path_obj must be pandas DataFrame")
if not set(path_obj.keys()).issuperset(['x', 'y']):
raise ValueError("the keys of path_obj must contain 'x', 'y'")
if len(path_obj) <= 2:
raise ValueError("path_obj must contain at least 3 rows")
# Computes edge points
edge_points = {'xmin': np.min(path_obj.x), 'xmax': np.max(path_obj.x),
'ymin': np.min(path_obj.y), 'ymax': np.max(path_obj.y)}
# Computes area of rectangle
area_rec = (edge_points['xmax'] - edge_points['xmin']) * \
(edge_points['ymax'] - edge_points['ymin'])
# Computes center point
center = {'x': (edge_points['xmin'] + edge_points['xmax']) / 2,
'y': (edge_points['ymin'] + edge_points['ymax']) / 2}
# Computes radius
indices = path_obj.index
vectors = [path_obj.loc[i, 'x':'y'] -
[center['x'], center['y']] for i in indices]
radius = [np.linalg.norm(v) for v in vectors]
# Computes center angles
center_angles = [angle_between(list(v1), list(v2)) for
v1, v2 in zip(vectors[1:], vectors[:-1])]
# Computes area covered
zipped = zip(radius[1:], radius[:-1], center_angles)
areas = [v1 * v2 * np.sin(theta) / 2 for v1, v2, theta in zipped]
area_cov = sum(areas)
# Computes distance between start and end points
initial = path_obj.loc[path_obj.index[0], 'x':'y']
end = path_obj.loc[path_obj.index[-1], 'x':'y']
abs_distance = np.sqrt((end['x'] - initial['x']) ** 2 +
(end['y'] - initial['y']) ** 2)
return {'radius': radius, 'center_angles': center_angles,
'area_cov': area_cov, 'area_rec': area_rec,
'abs_distance': abs_distance}
| {
"repo_name": "changsiyao/mousestyles",
"path": "mousestyles/path_diversity/path_features_advanced.py",
"copies": "3",
"size": "3266",
"license": "bsd-2-clause",
"hash": 1282599519518683400,
"line_mean": 33.7446808511,
"line_max": 74,
"alpha_frac": 0.6059399878,
"autogenerated": false,
"ratio": 3.7800925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5886032580392593,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
"""
label
==========
Module for string-like labels
"""
__all__ = ['prime_label', 'unprime_label', 'noprime_label', 'prime_level',
'unique_label']
import uuid
class Label(str):
"""Wrapper class for priming labels"""
def __new__(cls, value, **kwargs):
return str.__new__(cls, value)
def __init__(self, label, parent=None):
self._parent = parent
if isinstance(label, Label):
# if input is a Label object copy its properties
if parent is None:
self._parent = label._parent
@property
def parent(self):
"""Return parent label"""
return self._parent
@property
def origin(self):
"""Return origin label"""
origin = self
while hasattr(origin, "parent"):
origin = origin.parent
return origin
@property
def parents(self):
"""Return number of parents for label"""
tmp = self
level = 0
while hasattr(tmp, "parent"):
if tmp.parent is not None:
tmp = tmp.parent
level += 1
else:
break
return level
def prime_label(label, prime="'"):
"""Put a prime on a label object"""
return Label(str(label) + prime, parent=label)
def unprime_label(label, prime="'"):
"""Remove one prime from label object"""
try:
parent = label.parent
except AttributeError:
raise ValueError("label is not primed")
if str(parent) + prime == label:
return parent
else:
raise ValueError("label is not primed with \"" + prime + "\"")
def noprime_label(label):
"""Remove all primes from a label object"""
try:
return label.origin
except AttributeError:
return label
def prime_level(label):
"""Return number of primes on label object"""
try:
return label.parents
except AttributeError:
return 0
def unique_label():
"""Generate a long, random string that is very likely to be unique."""
return str(uuid.uuid4())
| {
"repo_name": "andrewdarmawan/tncontract",
"path": "tncontract/label.py",
"copies": "1",
"size": "2196",
"license": "mit",
"hash": -1028319730142160600,
"line_mean": 22.8695652174,
"line_max": 74,
"alpha_frac": 0.5683060109,
"autogenerated": false,
"ratio": 4.339920948616601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 92
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
"""
matrices
==========
Often used matrices
"""
import numpy as np
#
# Pauli spin 1/2 operators:
#
def sigmap():
return np.matrix([[0., 1.], [0., 0.]])
def sigmam():
return np.matrix([[0., 0.], [1., 0.]])
def sigmax():
return sigmam() + sigmap()
def sigmay():
return -1j * sigmap() + 1j * sigmam()
def sigmaz():
return np.matrix([[1., 0.], [0., -1.]])
def destroy(dim):
"""
Destruction (lowering) operator.
Parameters
----------
dim : int
Dimension of Hilbert space.
"""
return np.matrix(np.diag(np.sqrt(range(1, dim)), 1))
def create(dim):
"""
Creation (raising) operator.
Parameters
----------
dim : int
Dimension of Hilbert space.
"""
return destroy(dim).getH()
def identity(dim):
"""
Identity operator
Parameters
----------
dim : int
Dimension of Hilbert space.
"""
return np.matrix(np.identity(dim))
def basis(dim, i):
"""
dim x 1 column vector with all zeros except a one at row i
"""
vec = np.zeros(dim)
vec[i] = 1.0
return np.matrix(vec).T
| {
"repo_name": "andrewdarmawan/tncontract",
"path": "tncontract/matrices.py",
"copies": "1",
"size": "1234",
"license": "mit",
"hash": -4919004184372474000,
"line_mean": 14.425,
"line_max": 62,
"alpha_frac": 0.5380875203,
"autogenerated": false,
"ratio": 3.437325905292479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9475413425592479,
"avg_score": 0,
"num_lines": 80
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
"""
onedim_core
==========
Core module for onedimensional tensor networks
"""
__all__ = ['MatrixProductState', 'MatrixProductStateCanonical',
'MatrixProductOperator', 'OneDimensionalTensorNetwork',
'check_canonical_form_mps',
'contract_mps_mpo', 'contract_multi_index_tensor_with_one_dim_array',
'contract_virtual_indices', 'frob_distance_squared',
'inner_product_mps', 'ladder_contract', 'left_canonical_form_mps',
'mps_complex_conjugate', 'reverse_mps', 'right_canonical_form_mps',
'svd_compress_mps', 'variational_compress_mps', 'tensor_to_mpo',
'tensor_to_mps',
'right_canonical_to_canonical', 'left_canonical_to_canonical',
'canonical_to_right_canonical', 'canonical_to_left_canonical',
]
import numpy as np
from tncontract import tensor as tsr
from tncontract.label import unique_label
from tncontract.onedim.onedim_utils import init_mps_allzero
class OneDimensionalTensorNetwork:
"""
A one-dimensional tensor network. MatrixProductState and
MatrixProductOperator are subclasses of this class.
An instance of `OneDimensionalTensorNetwork` contains a one-dimensional
array of tensors in its `data` attribute. This one dimensional array is
specified in the `tensors` argument when initialising the array. Each
tensor in `data` requires a left index and a right index. The right index
is taken to be contracted with the left index of the next tensor in the
array, while the left index is taken to be contracted with the right index
of the previous tensor in the array. All left indices are assumed to have
the same label, and likewise for the right indices. They are specified in
the initialisation of array (by default they are assumed to be "left" and
"right" respectively) and will be stored in the attributes `left_label` and
`right_label` of the OneDimensionalTensorNetwork instance.
"""
def __init__(self, tensors, left_label="left", right_label="right"):
self.left_label = left_label
self.right_label = right_label
# Copy input tensors to the data attribute
self.data = np.array([x.copy() for x in tensors])
# Every tensor will have three indices corresponding to "left", "right"
# and "phys" labels. If only two are specified for left and right
# boundary tensors (for open boundary conditions) an extra dummy index
# of dimension 1 will be added.
for x in self.data:
if left_label not in x.labels: x.add_dummy_index(left_label)
if right_label not in x.labels: x.add_dummy_index(right_label)
# Container emulation
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return self.data.__len__()
def __getitem__(self, key):
return self.data.__getitem__(key)
def __setitem__(self, key, value):
self.data.__setitem__(key, value)
def copy(self):
"""Alternative the standard copy method, returning a
OneDimensionalTensorNetwork that is not
linked in memory to the previous ones."""
return OneDimensionalTensorNetwork([x.copy() for x in self],
self.left_label, self.right_label)
def reverse(self):
self.data = self.data[::-1]
temp = self.left_label
self.left_label = self.right_label
self.right_label = temp
def complex_conjugate(self):
"""Will complex conjugate every entry of every tensor in array."""
for x in self.data:
x.conjugate()
def swap_gate(self, i, threshold=1e-15):
"""
Apply a swap gate swapping all "physical" (i.e., non-"left" and
non-"right") indices for site i and i+1 of a
OneDimensionalTensorNetwork.
Parameters
----------
i : int
threshold : float
Lower bound on the magnitude of singular values to keep. Singular
values less than or equal to this value will be truncated.
Notes
-----
The swap is implemented by SVD as described
in Y.-Y. Shi et al, Phys. Rev. A 74, 022320 (2006).
"""
A = self[i]
B = self[i + 1]
A_phys_labels = [l for l in A.labels if l != self.left_label and
l != self.right_label]
B_phys_labels = [l for l in B.labels if l != self.left_label and
l != self.right_label]
A.prime_label(A_phys_labels)
t = tsr.contract(A, B, self.right_label, self.left_label)
U, V, _ = tsr.truncated_svd(t, [self.left_label] + B_phys_labels,
chi=0, threshold=threshold, absorb_singular_values='both')
U.replace_label('svd_in', self.right_label)
self[i] = U
V.unprime_label(A_phys_labels)
V.replace_label('svd_out', self.left_label)
self[i + 1] = V
def replace_labels(self, old_labels, new_labels):
"""Run `Tensor.replace_label` method on every tensor in `self` then
replace `self.left_label` and `self.right_label` appropriately."""
if not isinstance(old_labels, list):
old_labels = [old_labels]
if not isinstance(new_labels, list):
new_labels = [new_labels]
for x in self.data:
x.replace_label(old_labels, new_labels)
if self.left_label in old_labels:
self.left_label = new_labels[old_labels.index(self.left_label)]
if self.right_label in old_labels:
self.right_label = new_labels[old_labels.index(self.right_label)]
def standard_virtual_labels(self, suffix=""):
"""Replace `self.left_label` with "left"+`suffix` and
`self.right_label` with "right"+`suffix`."""
self.replace_labels([self.left_label, self.right_label],
["left" + suffix, "right" + suffix])
def unique_virtual_labels(self):
"""Replace `self.left_label` and `self.right_label` with unique labels
generated by tensor.unique_label()."""
self.replace_labels([self.left_label, self.right_label],
[unique_label(), unique_label()])
def leftdim(self, site):
"""Return left index dimesion for site"""
return self.data[site].index_dimension(self.left_label)
def rightdim(self, site):
"""Return right index dimesion for site"""
return self.data[site].index_dimension(self.right_label)
def bonddims(self):
"""Return list of all bond dimensions"""
if self.nsites == 0:
return []
bonds = [self.leftdim(0)]
for i in range(self.nsites):
bonds.append(self.rightdim(i))
return bonds
@property
def nsites(self):
return len(self.data)
@property
def nsites_physical(self):
return self.nsites
class MatrixProductState(OneDimensionalTensorNetwork):
"""Matrix product state"is a list of tensors, each having and index
labelled "phys" and at least one of the indices "left", "right"
Input is a list of tensors, with three up to three index labels, If the
labels aren't already specified as "left", "right", "phys" need to specify
which labels correspond to these using arguments left_label, right_label,
phys_label. The tensors input will be copied, and will not point in memory
to the original ones."""
def __init__(self, tensors, left_label="left", right_label="right",
phys_label="phys"):
OneDimensionalTensorNetwork.__init__(self, tensors,
left_label=left_label, right_label=right_label)
self.phys_label = phys_label
def __repr__(self):
return ("MatrixProductState(tensors=%r, left_label=%r, right_label=%r,"
"phys_label=%r)" % (self.data, self.left_label, self.right_label,
self.phys_label))
def __str__(self):
return ("MatrixProductState object: " +
"sites = " + str(len(self)) +
", left_label = " + self.left_label +
", right_label = " + self.right_label +
", phys_label = " + self.phys_label)
def copy(self):
"""Return an MPS that is not linked in memory to the original."""
return MatrixProductState([x.copy() for x in self], self.left_label,
self.right_label, self.phys_label)
def left_canonise(self, start=0, end=-1, chi=None, threshold=1e-14,
normalise=False, qr_decomposition=False):
"""
Perform left canonisation of MPS.
Left canonisation refers to putting the MatrixProductState in a form
where the tensors are isometric maps from the left and physical
indices to the right index. This is achieved using successive
singular-value decompositions and exploiting the gauge freedom of the
MPS. For more details, see U. Schollwock, Ann. Phys. 326 (2011) 96-192.
If no arguments are supplied, every tensor will be put in this form,
i.e. the MPS will be put in left-canonical form. Canonisation of
a segment of also possible by specifying the `start` and `end`
parameters. Truncating singular values can be performed by specifying
`chi` and `threshold`. If `normalise`=True and the entire MPS is to be
left-canonised, the resulting MPS will represent a normalised state. If
only a segment of the MPS is to be left canonised, then `normalise`
will have no effect (the resulting state will have same norm as input).
Parameters
----------
start : int
end : int
The segment of the MPS to be left canonised. All tensors from
`start` to `end`-1 will be left canonised. `end`=-1 implies that
the MPS will be canonised to the right boundary.
chi : int
Maximum number of singular values of each tensor to keep after
performing singular-value decomposition.
threshold : float
Lower bound on the magnitude of singular values to keep. Singular
values less than or equal to this value will be truncated.
normalise : bool
False value indicates resulting state will have same norm as
original. True value indicates that, if the entire MPS is to be
left canonised, it will be divided by a factor such that it is
normalised (have norm=1). Has no effect if only a segment of the
MPS is to be left canonised (resulting state will have the same
norm as input).
qr_decomposition : bool
True specifies that a QR decomposition is performed rather than an
SVD (which may improve performance). No truncation of singular
values is possible with a QR decomposition, thus `chi` and
`threshold` arguments are ignored.
"""
N = len(self)
if end == -1:
end = N
if qr_decomposition:
for i in range(start, end):
if i == N - 1:
# The final QR has no right index, so R are just
# scalars. R is the norm of the state.
norm = np.linalg.norm(self[i].data)
#If the norm of the state is zero, convert self a zero product state.
if norm==0.0:
for k in range(N):
self[k].data=np.zeros((self[k].index_dimension(self.phys_label), 1,1))
self[k].labels=[self.phys_label, self.left_label, self.right_label]
return
if normalise == True and start == 0: # Whole chain is canonised
self[i].data = self[i].data / norm
return
else:
qr_label = unique_label()
Q, R = tsr.tensor_qr(self[i], [self.phys_label,
self.left_label], qr_label=qr_label)
# Replace tensor at site i with Q
Q.replace_label(qr_label + "in", self.right_label)
self[i] = Q
# Absorb R into next tensor
self[i + 1] = tsr.contract(R, self[i + 1], self.right_label,
self.left_label)
self[i + 1].replace_label(qr_label + "out", self.left_label)
else:
# At each step will divide by a constant so that the largest singular
# value of S is 1. Will store the product of these constants in `norm`
norm = 1
for i in range(start, end):
if i == N - 1:
# The final SVD has no right index, so S and V are just scalars.
# S is the norm of the state.
#If the norm of the state is zero, convert self a zero product state.
if np.linalg.norm(self[i].data)==0.0:
for k in range(N):
self[k].data=np.zeros((self[k].index_dimension(self.phys_label), 1,1))
self[k].labels=[self.phys_label, self.left_label, self.right_label]
return
if normalise == True and start == 0: # Whole chain is canonised
self[i].data = self[i].data / np.linalg.norm(self[i].data)
else:
self[i].data = self[i].data * norm
return
else:
svd_label = unique_label()
U, S, V = tsr.tensor_svd(self[i], [self.phys_label,
self.left_label], svd_label=svd_label)
# Truncate to threshold and to specified chi
singular_values = np.diag(S.data)
largest_singular_value = singular_values[0]
if largest_singular_value==0.0:
#Return an MPS of same size but all entries zero
#And virtual bond dimension 1
#i.e. a product state of zeros
for k in range(N):
self[k].data=np.zeros((self[k].index_dimension(self.phys_label), 1,1))
self[k].labels=[self.phys_label, self.left_label, self.right_label]
return
# Normalise S
singular_values = singular_values / largest_singular_value
norm *= largest_singular_value
singular_values_to_keep = singular_values[singular_values >
threshold]
if chi:
singular_values_to_keep = singular_values_to_keep[:chi]
S.data = np.diag(singular_values_to_keep)
# Truncate corresponding singular index of U and V
U.data = U.data[:, :, 0:len(singular_values_to_keep)]
V.data = V.data[0:len(singular_values_to_keep)]
U.replace_label(svd_label + "in", self.right_label)
self[i] = U
self[i + 1] = tsr.contract(V, self[i + 1], self.right_label,
self.left_label)
self[i + 1] = tsr.contract(S, self[i + 1], [svd_label + "in"],
[svd_label + "out"])
self[i + 1].replace_label(svd_label + "out", self.left_label)
# Reabsorb normalisation factors into next tensor
# Note if i==N-1 (end of chain), this will not be reached
# and normalisation factors will be taken care of in the earlier
# block.
if i == end - 1:
self[i + 1].data *= norm
def right_canonise(self, start=0, end=-1, chi=None, threshold=1e-14,
normalise=False, qr_decomposition=False):
"""Perform right canonisation of MPS. Identical to `left_canonise`
except that process is mirrored (i.e. canonisation is performed from
right to left). `start` and `end` specify the interval to be canonised.
Notes
-----
The first tensor to be canonised is `end`-1 and the final tensor to be
canonised is `start`"""
self.reverse()
N = len(self)
if end == -1:
end = N
self.left_canonise(start=N - end, end=N - start, chi=chi,
threshold=threshold, normalise=normalise,
qr_decomposition=qr_decomposition)
self.reverse()
def replace_labels(self, old_labels, new_labels):
"""run `tensor.replace_label` method on every tensor in `self` then
replace `self.left_label`, `self.right_label` and `self.phys_label`
appropriately."""
if not isinstance(old_labels, list):
old_labels = [old_labels]
if not isinstance(new_labels, list):
new_labels = [new_labels]
for x in self.data:
x.replace_label(old_labels, new_labels)
if self.left_label in old_labels:
self.left_label = new_labels[old_labels.index(self.left_label)]
if self.right_label in old_labels:
self.right_label = new_labels[old_labels.index(self.right_label)]
if self.phys_label in old_labels:
self.phys_label = new_labels[old_labels.index(self.phys_label)]
def standard_labels(self, suffix=""):
"""
overwrite self.labels, self.left_label, self.right_label,
self.phys_label with standard labels "left", "right", "phys"
"""
self.replace_labels([self.left_label, self.right_label,
self.phys_label], ["left" + suffix, "right" + suffix, "phys" + suffix])
def check_canonical_form(self, threshold=1e-14, print_output=True):
"""Determines which tensors in the MPS are left canonised, and which
are right canonised. Returns the index of the first tensor (starting
from left) that is not left canonised, and the first tensor (starting
from right) that is not right canonised. If print_output=True, will
print useful information concerning whether a given MPS is in a
canonical form (left, right, mixed)."""
mps_cc = mps_complex_conjugate(self)
first_site_not_left_canonised = len(self) - 1
for i in range(len(self) - 1):
I = tsr.contract(self[i], mps_cc[i],
[self.phys_label, self.left_label],
[mps_cc.phys_label, mps_cc.left_label])
# Check if tensor is left canonised.
if np.linalg.norm(I.data - np.identity(I.data.shape[0])) > threshold:
first_site_not_left_canonised = i
break
first_site_not_right_canonised = 0
for i in range(len(self) - 1, 0, -1):
I = tsr.contract(self[i], mps_cc[i],
[self.phys_label, self.right_label],
[mps_cc.phys_label, mps_cc.right_label])
# Check if tensor is right canonised.
if np.linalg.norm(I.data - np.identity(I.data.shape[0])) > threshold:
first_site_not_right_canonised = i
break
if print_output:
if first_site_not_left_canonised == first_site_not_right_canonised:
if first_site_not_left_canonised == len(self) - 1:
if abs(np.linalg.norm(self[-1].data) - 1) > threshold:
print("MPS in left canonical form (unnormalised)")
else:
print("MPS in left canonical form (normalised)")
elif first_site_not_left_canonised == 0:
if abs(np.linalg.norm(self[0].data) - 1) > threshold:
print("MPS in right canonical form (unnormalised)")
else:
print("MPS in right canonical form (normalised)")
else:
print("MPS in mixed canonical form with orthogonality "
"centre at site " +
str(first_site_not_right_canonised))
else:
if first_site_not_left_canonised == 0:
print("No tensors left canonised")
else:
print("Tensors left canonised up to site " +
str(first_site_not_left_canonised))
if first_site_not_right_canonised == len(self) - 1:
print("No tensors right canonised")
else:
print("Tensors right canonised up to site " +
str(first_site_not_right_canonised))
return (first_site_not_left_canonised, first_site_not_right_canonised)
def svd_compress(self, chi=None, threshold=1e-15, normalise=False,
reverse=False):
"""Compress MPS to a given bond dimension `chi` or to a minimum
singular value `threshold` using SVD compression as described in U.
Schollwock, Ann. Phys. 326 (2011) 96-192. This is achieved by
performing two successive canonisations. If `reverse` is False,
canonisation is first performed from left to right (with QR
decomposition) then the resulting state is canonised from right to left
(using SVD decomposition). The resulting MPS is in right canonical
form. If `reverse` is True this is mirrored, resulting in a state in
left canonical form. """
if reverse:
self.reverse()
self.left_canonise(normalise=False, qr_decomposition=True)
# Normalise the state temporarily
norm = self.norm(canonical_form="left")
self[-1].data /= norm
self.right_canonise(chi=chi, threshold=threshold, normalise=False)
if normalise == False:
self[0].data *= norm
if reverse:
self.reverse()
def variational_compress(self, chi, max_iter=10, initial_guess=None,
tolerance=1e-15, normalise=False):
"""Compress MPS to a given bond dimension `chi` or to the same bond
dimensions as an optional input MPS `initial_guess` using an iterative
compression procedure described in U. Schollwock, Ann. Phys. 326 (2011)
96-192. The algorithm will start from an initial guess for the target
MPS, either computed with the `svd_compress` method or, if supplied,
with the `initial_guess` keyword argument. It will sweep over the
chain, successively optimising individual tensors until convergence.
The output MPS will be in right canonical form. Should be more
accurate, albeit slower, than `svd_compress` method.
Parameters
----------
chi : int
Bond dimension of resulting MPS.
max_iter : int
Maximum number of full updates to perform, where a full update
consists of a sweep from right to left, then left to right. If
convergence is not reached after `max_iter` full updates, an error
will be returned.
initial_guess : MatrixProductState
Starting point for variational algorithm. Output MPS will have the
same bond dimension as `initial_guess`. If not provided, an SVD
compression of the input MPS will be computed and used as the
starting point.
tolerance : float
After a full update is completed, the difference in norm with the
target state for the last two sweeps is computed. The algorithm
will be regarded as having converged and will stop if this
difference is less than `tolerance`.
"""
if initial_guess == None:
mps = self.copy()
# Make sure state is in left canonical form to start
mps.svd_compress(chi=chi, reverse=True)
else:
mps = initial_guess
# Put state in left canonical form
mps.left_canonise(qr_decomposition=True)
# Give mps1 unique labels
mps.replace_labels([mps.left_label, mps.right_label, mps.phys_label],
[unique_label(), unique_label(), unique_label()])
le_label = unique_label()
left_environments = ladder_contract(mps, self, mps.phys_label,
self.phys_label, return_intermediate_contractions=True,
right_output_label=le_label, complex_conjugate_array1=True)
def variational_sweep(mps1, mps2, left_environments):
"""Iteratively update mps1, to minimise frobenius distance to mps2
by sweeping from right to left. Expects mps1 to be in right
canonical form."""
# Get the base label of left_environments
le_label = left_environments[0].labels[0][:-1]
# Generate some unique labels to avoid conflicts
re_label = unique_label()
lq_label = unique_label()
right_environments = []
norms = [mps1[-1].norm()]
for i in range(mps2.nsites - 1, 0, -1):
# Optimise the tensor at site i by contracting with left and
# right environments
updated_tensor = tsr.contract(mps2[i], left_environments[i - 1],
mps2.left_label, le_label + "2")
if i != mps2.nsites - 1:
updated_tensor = tsr.contract(updated_tensor,
right_environment, mps2.right_label, re_label + "2")
updated_tensor.replace_label(re_label + "1",
mps1.right_label)
updated_tensor.replace_label([le_label + "1", mps2.phys_label]
, [mps1.left_label, mps1.phys_label])
# Right canonise the tensor at site i using LQ decomposition
# Absorb L into tensor at site i-1
L, Q = tsr.tensor_lq(updated_tensor, mps1.left_label,
lq_label=lq_label)
Q.replace_label(lq_label + "out", mps1.left_label)
L.replace_label(lq_label + "in", mps1.right_label)
mps1[i] = Q
mps1[i - 1] = tsr.contract(mps1[i - 1], L, mps1.right_label,
mps1.left_label)
# Compute norm of mps
# Taking advantage of canonical form
norms.append(mps1[i - 1].norm())
# Compute next column of right_environment
if i == mps2.nsites - 1:
right_environment = tsr.contract(tsr.conjugate(mps1[i]),
mps2[i], mps1.phys_label, self.phys_label)
right_environment.remove_all_dummy_indices(
labels=[mps1.right_label, mps2.right_label])
else:
right_environment.contract(tsr.conjugate(mps1[i]),
re_label + "1", mps1.right_label)
right_environment.contract(mps2[i], [mps1.phys_label,
re_label + "2"], [self.phys_label, self.right_label])
right_environment.replace_label([mps1.left_label,
mps2.left_label], [re_label + "1", re_label + "2"])
right_environments.append(right_environment.copy())
# At second last site, compute final tensor
if i == 1:
updated_tensor = tsr.contract(mps2[0], right_environment,
mps2.right_label, re_label + "2")
updated_tensor.replace_label([mps2.phys_label,
re_label + "1"],
[mps1.phys_label, mps1.right_label])
mps1[0] = updated_tensor
return right_environments, np.array(norms)
for i in range(max_iter):
left_environments, norms1 = variational_sweep(mps, self,
left_environments)
mps.reverse()
self.reverse()
le_label = left_environments[0].labels[0][:-1]
left_environments, norms2 = variational_sweep(mps, self,
left_environments)
mps.reverse()
self.reverse()
# Compute differences between norms of successive updates in second
# sweep. As shown in U. Schollwock, Ann. Phys. 326 (2011) 96-192,
# these quantities are equivalent to the differences between the
# frobenius norms between the target state and the variational
# state.
if np.all(np.abs(norms2[1:] - norms2[:-1]) / norms2[1:] < tolerance):
mps.replace_labels([mps.left_label, mps.right_label,
mps.phys_label], [self.left_label, self.right_label,
self.phys_label])
if normalise == True:
mps[-1].data /= mps.norm(canonical_form="left")
return mps
elif i == max_iter - 1: # Has reached the last iteration
raise RuntimeError("variational_compress did not converge.")
def physical_site(self, n):
""" Return position of n'th physical (pos=n). Implemented for
comaptibility with MatrixProductStateCanonical."""
return n
def physdim(self, site):
"""Return physical index dimesion for site"""
return self.data[site].index_dimension(self.phys_label)
def norm(self, canonical_form=False):
"""Return norm of mps.
Parameters
----------
canonical_form : str
If `canonical_form` is "left", the state will be assumed to be in
left canonical form, if "right" the state will be assumed to be in
right canonical form. In these cases the norm can be read off the
last tensor (much more efficient).
"""
if canonical_form == "left":
return np.linalg.norm(self[-1].data)
elif canonical_form == "right":
return np.linalg.norm(self[0].data)
else:
return np.sqrt(inner_product_mps(self, self))
def apply_gate(self, gate, firstsite, gate_outputs=None, gate_inputs=None,
chi=None, threshold=1e-15, canonise='left'):
"""
Apply Tensor `gate` on sites `firstsite`, `firstsite`+1, ...,
`firstsite`+`nsites`-1, where `nsites` is the length of gate_inputs.
The physical index of the nth site is contracted with the nth label of
`gate_inputs`. After the contraction the MPS is put back into the
original form by SVD, and the nth sites physical index is given
by the nth label of `gate_outputs` (but relabeled to `self.phys_label`
to preserve the original MPS form).
Parameters
----------
gate : Tensor
Tensor representing the multisite gate.
firstsite : int
First site of MPS involved in the gate
gate_outputs : list of str, optional
Output labels corresponding to the input labels given by
`gate_inputs`. Must have the same length as `gate_inputs`.
If `None` the first half of `gate.labels` will be taken as output
labels.
gate_inputs : list of str, optional
Input labels. The first index of the list is contracted with
`firstsite`, the second with `firstsite`+1 etc.
If `None` the second half of `gate.labels` will be taken as input
labels.
threshold : float
Lower bound on the magnitude of singular values to keep. Singular
values less than or equal to this value will be truncated.
chi : int
Maximum number of singular values of each tensor to keep after
performing singular-value decomposition.
canonise : str {'left', 'right'}
Direction in which to canonise the sites after applying gate.
Notes
-----
At the end of the gate all physical indices are relabeled to
`self.phys_label`.
Only use this for gates acting on small number of sites.
"""
# Set gate_outputs and gate_inputs to default values if not given
if gate_outputs is None and gate_inputs is None:
gate_outputs = gate.labels[:int(len(gate.labels) / 2)]
gate_inputs = gate.labels[int(len(gate.labels) / 2):]
elif gate_outputs is None:
gate_outputs = [x for x in gate.labels if x not in gate_inputs]
elif gate_inputs is None:
gate_inputs = [x for x in gate.labels if x not in gate_outputs]
nsites = len(gate_inputs)
if len(gate_outputs) != nsites:
raise ValueError("len(gate_outputs) != len(gate_inputs)")
# contract the sites first
t = contract_virtual_indices(self, firstsite, firstsite + nsites,
periodic_boundaries=False)
# contract all physical indices with gate input indices
t = tsr.contract(t, gate, self.phys_label, gate_inputs)
# split big tensor into MPS form by exact SVD
if canonise == 'right':
phys_labels = gate_outputs[::-1]
left_label = 'right'
right_label = 'left'
else:
phys_labels = gate_outputs
left_label = 'left'
right_label = 'right'
mps = tensor_to_mps(t, phys_labels=phys_labels,
mps_phys_label=self.phys_label, left_label=left_label,
right_label=right_label, chi=chi, threshold=threshold)
if canonise == 'right':
mps.reverse()
self.data[firstsite:firstsite + nsites] = mps.data
def expval(self, gate, firstsite,
left_canonised_up_to=0, right_canonised_up_to=-1,
gate_outputs=None, gate_inputs=None,
):
"""
Compute multi-site expectation value for operator `gate` applied to
`firstsite`, `firstsite+1`, ... `firstsite_+n-1` where `n` is the
number of gate inputs.
Assumes that MPS has been canonised such that everything to the left
of `left_canonised_up_to` is left-canonised and everything to the right
of `right_canonised_up_to` is right-canonised.
Parameters
----------
gate : Tensor
Tensor representing the multisite gate.
firstsite : int
First site of MPS involved in the gate
gate_outputs : list of str, optional
Output labels corresponding to the input labels given by
`gate_inputs`. Must have the same length as `gate_inputs`.
If `None` the first half of `gate.labels` will be taken as output
labels.
gate_inputs : list of str, optional
Input labels. The first index of the list is contracted with
`firstsite`, the second with `firstsite`+1 etc.
If `None` the second half of `gate.labels` will be taken as input
labels.
left_canonised_up_to : int
Everything to the left of this is assumed to be left-canonised.
right_canonised_up_to : int
Everything to the right of this is assumed to be right-canonised.
Returns
------
t : Tensor
Contracted tensor <mps|O|mps>
Notes
-----
The MPS is left-canonised up to `firstsite` and right-canonised up to
`firstsite+n` after the operation.
"""
# Set gate_outputs and gate_inputs to default values if not given
if gate_outputs is None and gate_inputs is None:
gate_outputs = gate.labels[:int(len(gate.labels) / 2)]
gate_inputs = gate.labels[int(len(gate.labels) / 2):]
elif gate_outputs is None:
gate_outputs = [x for x in gate.labels if x not in gate_inputs]
elif gate_inputs is None:
gate_inputs = [x for x in gate.labels if x not in gate_outputs]
nsites = len(gate_inputs)
if len(gate_outputs) != nsites:
raise ValueError("len(gate_outputs) != len(gate_inputs)")
N = len(self)
if right_canonised_up_to == -1:
right_canonised_up_to = N
# Mover left/right orthogonality centers to firstsite/firtsite+n
if left_canonised_up_to < firstsite:
self.left_canonise(left_canonised_up_to, firstsite)
if right_canonised_up_to > firstsite + nsites:
self.right_canonise(firstsite + nsites, right_canonised_up_to)
# contract the MPS sites first
t = contract_virtual_indices(self, firstsite, firstsite + nsites,
periodic_boundaries=False)
td = t.copy()
td.conjugate()
# contract all physical indices with gate indices
exp = tsr.contract(t, gate, self.phys_label, gate_inputs)
exp = tsr.contract(td, exp, self.phys_label, gate_outputs)
# contract boundary indices
exp.tr(self.left_label, self.left_label, index1=0,
index2=1)
exp.tr(self.right_label, self.right_label, index1=0,
index2=1)
return exp
def ptrace(self, firstsite, lastsite=None,
left_canonised_up_to=0, right_canonised_up_to=-1):
"""
Compute local density matrix for sites `firstsite` to `lastsite`
assuming left and right canonisation up to boundaries.
Parameters
----------
firstsite : int
First physical site of MPS not traced out
lastsite : int
Last physical site of MPS not traced out. By default `lastsite` is
set to `firstsite`.
left_canonised_up_to : int
Everything to the left of this is assumed to be left-canonised.
right_canonised_up_to : int
Everything to the right of this is assumed to be right-canonised.
Returns
------
t : Tensor
Local density matrix
Notes
-----
The MPS is left-canonised up to `firstsite` and right-canonised up to
`firstsite+n` after the operation.
"""
if right_canonised_up_to == -1:
right_canonised_up_to = self.nsites
if lastsite is None:
lastsite = firstsite
# Mover left/right orthogonality centers to firstsite/firtsite+n
if left_canonised_up_to < firstsite:
self.left_canonise(left_canonised_up_to, firstsite)
if right_canonised_up_to > lastsite:
self.right_canonise(lastsite + 1, right_canonised_up_to)
start = self.physical_site(firstsite)
end = self.physical_site(lastsite)
t = contract_virtual_indices(self, start, end + 1,
periodic_boundaries=False)
td = t.copy()
td.conjugate()
# rename physical labels
for i, l in enumerate(t.labels):
if l == self.phys_label:
t.labels[i] = l + "_out" + str(i)
td.labels[i] = l + "_in" + str(i)
rho = (t[self.left_label, self.right_label]
* td[self.left_label, self.right_label])
return rho
class MatrixProductStateCanonical(OneDimensionalTensorNetwork):
"""
Matrix product state in canonical form with every other tensor assumed to
be a diagonal matrix of singular values. The site numbering is
0 1 2 3 ... N-2 N-1
Lambda Gamma Lambda Gamma ... Gamma Lambda
where the Gammas are rank three tensors and the Lambdas diagonal matrices
of singular values. The left-mots and right-most Lambda matrices are
trivial one-by-one matrices inserted for convenience. For a canonical form
MPS created from a right-canonical MPS using e.g.
`right_canonical_to_canonical` the right-most Lambda is equal to the norm
of the state, and vice versa if created from a left-canonical MPS.
The n'th physical site index (i=2*n+1) can conveniently be accessed with
the `physical_site` method.
Notes
-----
Convenient for TEBD type algorithms. Many methods assume canonical form
for efficiency.
See U. Schollwock, Ann. Phys. 326 (2011) 96-192 section 4.6.
"""
def __init__(self, tensors, left_label="left", right_label="right",
phys_label="phys"):
OneDimensionalTensorNetwork.__init__(self, tensors,
left_label=left_label, right_label=right_label)
self.phys_label = phys_label
def __repr__(self):
return ("MatrixProductStateCanonical(tensors=%r, left_label=%r,"
"right_label=%r, phys_label=%r)" % (self.data, self.left_label,
self.right_label, self.phys_label))
def __str__(self):
return ("MatrixProductStateCanonical object: " +
"sites (incl. singular value sites)= " + str(len(self)) +
", left_label = " + self.left_label +
", right_label = " + self.right_label +
", phys_label = " + self.phys_label)
def copy(self):
"""Return an MPS that is not linked in memory to the original."""
return MatrixProductStateCanonical([x.copy() for x in self],
self.left_label, self.right_label, self.phys_label)
def replace_labels(self, old_labels, new_labels):
"""run `tensor.replace_label` method on every tensor in `self` then
replace `self.left_label`, `self.right_label` and `self.phys_label`
appropriately."""
if not isinstance(old_labels, list):
old_labels = [old_labels]
if not isinstance(new_labels, list):
new_labels = [new_labels]
for x in self.data:
x.replace_label(old_labels, new_labels)
if self.left_label in old_labels:
self.left_label = new_labels[old_labels.index(self.left_label)]
if self.right_label in old_labels:
self.right_label = new_labels[old_labels.index(self.right_label)]
if self.phys_label in old_labels:
self.phys_label = new_labels[old_labels.index(self.phys_label)]
def standard_labels(self, suffix=""):
"""
overwrite self.labels, self.left_label, self.right_label,
self.phys_label with standard labels "left", "right", "phys"
"""
self.replace_labels([self.left_label, self.right_label,
self.phys_label], ["left" + suffix, "right" + suffix, "phys" + suffix])
def physical_site(self, n):
""" Return position of n'th physical (pos=2*n+1)"""
return 2 * n + 1
def singular_site(self, n):
""" Return position of n'th singular value site (pos=2*n)"""
return 2 * n
def physdim(self, site):
"""Return physical index dimesion for `physical_site(site)`"""
return self.data[self.physical_site(site)].index_dimension(
self.phys_label)
def singulardim(self, site):
"""Return chi for chi by chi singular matrix at
`singular_site(site)`"""
return self.data[self.singular_site(site)].index_dimension(
self.left_label)
def bonddims(self):
"""Return list of all bond dimensions. Note that for
MatrixProductStateCanonical every other site is a diagonal chi by chi
matrix. Hence, this function returns an output of the form
[chi0, chi0, chi1, chi1, chi2, chi2, ...]"""
return super(MatrixProductStateCanonical, self).bonddims()
@property
def nsites_physical(self):
return int((self.nsites - 1) / 2)
def norm(self, canonical_form=True):
"""Return norm of mps.
Parameters
----------
canonical_form : bool
If `canonical_form` is `True`, the state will be assumed to be in
canonical form. In this case the norm can be read off from the
edge singular value matrices (much more efficient).
"""
if canonical_form is True:
return np.linalg.norm(self[-1].data) * np.linalg.norm(self[0].data)
else:
return np.sqrt(inner_product_mps(self, self))
def check_canonical_form(self, threshold=1e-14, print_output=True):
"""Check if MPS is in canonical form, by checking for every site:
1) if A=Lambda Gamma satisfies
`A[phys_label, left_label]*Ad[phys_label, left_label]` where `Ad` is
the conjugate tensor,
2) if B=Gamma Lambda satisfies
`B[phys_label, right_label]*Bd[phys_label, right_label]` where `Bd` is
the conjugate tensor.
Returns a list of sites not satisfying 1), a list not satisfying 2),
and a list containing any un-normalised left-most or right-most sites.
If print_output=True, will print useful information concerning whether
a given MPS is in canonical form."""
not_left_canonised = []
not_right_canonised = []
not_normalised = []
for i in range(self.nsites_physical):
A = (self[self.physical_site(i) - 1][self.right_label,]
* self[self.physical_site(i)][self.left_label,])
Ad = tsr.conjugate(A)
I = tsr.contract(A, Ad,
[self.phys_label, self.left_label],
[self.phys_label, self.left_label])
# Check if tensor is left canonised.
if np.linalg.norm(I.data - np.identity(I.data.shape[0])) > threshold:
if i == 0 or i == self.nsites_physical - 1:
If = I.data.flatten()
if len(If[np.abs(If) > threshold]) > 1:
not_left_canonised.append(i)
else:
not_normalised.append(i)
else:
not_left_canonised.append(i)
for i in range(self.nsites_physical):
B = (self[self.physical_site(i)][self.right_label,]
* self[self.physical_site(i) + 1][self.left_label,])
Bd = tsr.conjugate(B)
I = tsr.contract(B, Bd,
[self.phys_label, self.right_label],
[self.phys_label, self.right_label])
# Check if tensor is right canonised.
if np.linalg.norm(I.data - np.identity(I.data.shape[0])) > threshold:
if i == 0 or i == self.nsites_physical - 1:
If = I.data.flatten()
if len(If[np.abs(If) > threshold]) > 1:
not_right_canonised.append(i)
else:
not_normalised.append(i)
else:
not_right_canonised.append(i)
if print_output:
if len(not_left_canonised) == 0 and len(not_right_canonised) == 0:
if len(not_normalised) == 0:
print("MPS in canonical form (normalised)")
else:
print("MPS in canonical form (unnormalised)")
else:
print("Physical sites not left-canonical:")
print(not_left_canonised)
print("Physical sites not right-canonical:")
print(not_right_canonised)
return not_left_canonised, not_right_canonised, not_normalised
def compress_bond(self, singular_site, chi=None, threshold=1e-15):
""" Compress bonds connecting to `singular_site(singular_site)` by
truncating singular values.
"""
# contract the MPS sites first
site = self.singular_site(singular_site)
if self.singulardim(site) == 1:
return
start = site - 2
end = site + 2
self[end - 1].prime_label(self.phys_label)
t = contract_virtual_indices(self, start, end + 1,
periodic_boundaries=False)
# Remember singular values
S1_inv = self[start].copy()
S1_inv.inv()
S2_inv = self[end].copy()
S2_inv.inv()
# SVD and compress
U, S, V = tsr.truncated_svd(t, [self.phys_label, self.left_label],
chi=chi, threshold=threshold, absorb_singular_values=None)
U.replace_label("svd_in", self.right_label)
V.replace_label("svd_out", self.left_label)
S.replace_label(["svd_out", "svd_in"], [self.left_label,
self.right_label])
self[start + 1] = S1_inv[self.right_label,] * U[self.left_label,]
self[start + 2] = S
self[end - 1] = V[self.right_label,] * S2_inv[self.left_label,]
self[end - 1].unprime_label(self.phys_label)
def compress_all(self, chi=None, threshold=1e-15, normalise=False):
raise NotImplementedError
def apply_gate(self, gate, firstsite, gate_outputs=None, gate_inputs=None,
chi=None, threshold=1e-15):
"""
Apply multi-site gate to `physical_site(firstsite)`,
`physical_site(firstsite+1)`, ... and perform optimal compression,
assuming canonical form.
Currently only implemented for 1-site and 2-site gates.
Parameters
----------
gate : Tensor
Tensor representing the multisite gate.
firstsite : int
First site of MPS involved in the gate
gate_outputs : list of str, optional
Output labels corresponding to the input labels given by
`gate_inputs`. Must have the same length as `gate_inputs`.
If `None` the first half of `gate.labels` will be taken as output
labels.
gate_inputs : list of str, optional
Input labels. The first index of the list is contracted with
`firstsite`, the second with `firstsite`+1 etc.
If `None` the second half of `gate.labels` will be taken as input
labels.
threshold : float
Lower bound on the magnitude of singular values to keep. Singular
values less than or equal to this value will be truncated.
chi : int
Maximum number of singular values of each tensor to keep after
performing singular-value decomposition.
Notes
-----
At the end of the gate all physical indices are relabeled to
`self.phys_label`.
Only use this for gates acting on small number of sites.
"""
# Set gate_outputs and gate_inputs to default values if not given
if gate_outputs is None and gate_inputs is None:
gate_outputs = gate.labels[:int(len(gate.labels) / 2)]
gate_inputs = gate.labels[int(len(gate.labels) / 2):]
elif gate_outputs is None:
gate_outputs = [x for x in gate.labels if x not in gate_inputs]
elif gate_inputs is None:
gate_inputs = [x for x in gate.labels if x not in gate_outputs]
nsites = len(gate_inputs)
if len(gate_outputs) != nsites:
raise ValueError("len(gate_outputs) != len(gate_inputs)")
if nsites > 2:
raise NotImplementedError("gate acting on more than two sites.")
# contract the MPS sites first
start = self.physical_site(firstsite) - 1
end = self.physical_site(firstsite + nsites - 1) + 1
t = contract_virtual_indices(self, start, end + 1,
periodic_boundaries=False)
# contract all physical indices with gate input indices
t = tsr.contract(t, gate, self.phys_label, gate_inputs)
# split big tensor into MPS form by exact SVD
S1_inv = self[start].copy()
S1_inv.inv()
S2_inv = self[end].copy()
S2_inv.inv()
if nsites == 1:
t.replace_label([gate_outputs[0]], [self.phys_label])
t = S1_inv[self.right_label,] * t[self.left_label,]
self[start + 1] = t[self.right_label,] * S2_inv[self.left_label,]
# if chi is not None:
# self.compress_bond(firstsite)
# self.compress_bond(firstsite+1)
elif nsites == 2:
U, S, V = tsr.truncated_svd(t, [gate_outputs[0], self.left_label],
chi=chi, threshold=threshold, absorb_singular_values=None)
U.replace_label(["svd_in", gate_outputs[0]],
[self.right_label, self.phys_label])
V.replace_label(["svd_out", gate_outputs[1]],
[self.left_label, self.phys_label])
S.replace_label(["svd_out", "svd_in"], [self.left_label,
self.right_label])
self[start + 1] = S1_inv[self.right_label,] * U[self.left_label,]
self[start + 2] = S
self[start + 3] = V[self.right_label,] * S2_inv[self.left_label,]
def swap_gate(self, i, chi=None, threshold=1e-15):
"""
Apply a swap gate swapping all "physical" (i.e., non-"left" and
non-"right") indices for site `physical_site(i)` and
`physical_site(i+1)` of a MatrixProductStateCanonical object.
Parameters
----------
i : int
threshold : float
Lower bound on the magnitude of singular values to keep. Singular
values less than or equal to this value will be truncated.
chi : int
Maximum number of singular values of each tensor to keep after
performing singular-value decomposition.
Notes
-----
The swap is implemented by SVD as described
in Y.-Y. Shi et al, Phys. Rev. A 74, 022320 (2006).
"""
# contract the MPS sites first
start = self.physical_site(i) - 1
end = self.physical_site(i + 1) + 1
self[start + 1].prime_label(self.phys_label)
t = contract_virtual_indices(self, start, end + 1,
periodic_boundaries=False)
# remember inverse singular values
S1_inv = self[start].copy()
S1_inv.inv()
S2_inv = self[end].copy()
S2_inv.inv()
U, S, V = tsr.truncated_svd(t, [self.left_label, self.phys_label],
chi=chi, threshold=threshold, absorb_singular_values=None)
V.unprime_label(self.phys_label)
U.replace_label("svd_in", self.right_label)
V.replace_label('svd_out', self.left_label)
S.replace_label(["svd_out", "svd_in"], [self.left_label,
self.right_label])
self[start + 1] = S1_inv[self.right_label,] * U[self.left_label,]
self[start + 2] = S
self[start + 3] = V[self.right_label,] * S2_inv[self.left_label,]
def expval(self, gate, firstsite, gate_outputs=None, gate_inputs=None):
"""
Compute multi-site expectation value for operator `gate` applied to
`physical_site(firstsite)`, `physical_site(firstsite+1)`, ...,
assuming canonical form.
Parameters
----------
gate : Tensor
Tensor representing the multisite gate.
firstsite : int
First site of MPS involved in the gate
gate_outputs : list of str, optional
Output labels corresponding to the input labels given by
`gate_inputs`. Must have the same length as `gate_inputs`.
If `None` the first half of `gate.labels` will be taken as output
labels.
gate_inputs : list of str, optional
Input labels. The first index of the list is contracted with
`firstsite`, the second with `firstsite`+1 etc.
If `None` the second half of `gate.labels` will be taken as input
labels.
Returns
------
t : Tensor
Contracted tensor <mps|O|mps>
"""
# Set gate_outputs and gate_inputs to default values if not given
if gate_outputs is None and gate_inputs is None:
gate_outputs = gate.labels[:int(len(gate.labels) / 2)]
gate_inputs = gate.labels[int(len(gate.labels) / 2):]
elif gate_outputs is None:
gate_outputs = [x for x in gate.labels if x not in gate_inputs]
elif gate_inputs is None:
gate_inputs = [x for x in gate.labels if x not in gate_outputs]
nsites = len(gate_inputs)
if len(gate_outputs) != nsites:
raise ValueError("len(gate_outputs) != len(gate_inputs)")
# contract the MPS sites first
start = self.physical_site(firstsite) - 1
end = self.physical_site(firstsite + len(gate_inputs) - 1) + 1
t = contract_virtual_indices(self, start, end + 1,
periodic_boundaries=False)
td = t.copy()
td.conjugate()
# contract all physical indices with gate indices
exp = t[self.phys_label,] * gate[gate_inputs]
exp = td[self.phys_label,] * exp[gate_outputs]
# contract boundary indices
exp.tr(self.left_label, self.left_label, index1=0, index2=1)
exp.tr(self.right_label, self.right_label, index1=0, index2=1)
return exp
def ptrace(self, firstsite, lastsite=None):
"""
Compute local density matrix for sites
`physical_site(firstsite)` to `physical_site(lastsite)`
assuming canonical form.
Parameters
----------
firstsite : int
First physical site of MPS not traced out
lastsite : int
Last physical site of MPS not traced out. By default `lastsite` is
set to `firstsite`.
Returns
------
t : Tensor
Local density matrix
"""
if lastsite is None:
lastsite = firstsite
start = self.physical_site(firstsite) - 1
end = self.physical_site(lastsite) + 1
t = contract_virtual_indices(self, start, end + 1,
periodic_boundaries=False)
td = t.copy()
td.conjugate()
# rename physical labels
for i, l in enumerate(t.labels):
if l == self.phys_label:
t.labels[i] = l + "_out" + str(i)
td.labels[i] = l + "_in" + str(i)
rho = (t[self.left_label, self.right_label]
* td[self.left_label, self.right_label])
return rho
class MatrixProductOperator(OneDimensionalTensorNetwork):
# TODO currently assumes open boundaries
"""Matrix product operator "is a list of tensors, each having and index
labelled "phys" and at least one of the indices "left", "right"
Input is a list of tensors, with three up to three index labels, If the
labels aren't already specified as "left", "right", "physin", "physout"
need to specify which labels correspond to these using
arguments left_label, right_label, physin_label and physout_label. """
def __init__(self, tensors, left_label="left", right_label="right",
physout_label="physout", physin_label="physin"):
OneDimensionalTensorNetwork.__init__(self, tensors, left_label,
right_label)
self.physout_label = physout_label
self.physin_label = physin_label
def __repr__(self):
return ("MatrixProductOperator(tensors=%r, left_label=%r,"
" right_label=%r, physout_label=%r, phsin_labe=%r)"
% (self.data, self.left_label, self.right_label,
self.physout_label, self.physin_label))
def __str__(self):
return ("MatrixProductOperator object: " +
"sites = " + str(len(self)) +
", left_label = " + self.left_label +
", right_label = " + self.right_label +
", physout_label = " + self.physout_label +
", physin_label = " + self.physin_label)
###TODO replace copy method
def physoutdim(self, site):
"""Return output physical index dimesion for site"""
return self.data[site].index_dimension(self.physout_label)
def physindim(self, site):
"""Return input physical index dimesion for site"""
return self.data[site].index_dimension(self.physin_label)
def contract_multi_index_tensor_with_one_dim_array(tensor, array, label1,
label2):
"""Will contract a one dimensional tensor array of length N
with a single tensor with N indices with label1.
All virtual indices are also contracted.
Each tensor in array is assumed to have an index with label2.
Starting from the left, the label2 index of each tensor
is contracted with the first uncontracted label1 index of tensor
until every tensor in array is incorporated.
It is assumed that only the indices to be contracted have the labels label1
label2."""
# To avoid possible label conflicts, rename labels temporarily
temp_label = unique_label()
tensor.replace_label(label1, temp_label)
C = tsr.contract(tensor, array[0], temp_label, label2, index_slice1=[0])
for i in range(1, len(array)):
# TODO make this work
C = tsr.contract(C, array[i], [array.right_label, temp_label],
[array.left_label, label2], index_slice1=[0, 1])
# Contract boundaries of array
C.contract_internal(array.right_label, array.left_label)
# Restore original labelling to tensor
tensor.replace_label(temp_label, label1)
return C
def contract_virtual_indices(array_1d, start=0, end=None,
periodic_boundaries=True):
"""
Return a Tensor by contracting all virtual indices of a segment of a
OneDimensionalTensorNetwork.
Params
-----
array_1d : OneDimensionalTensorNetwork
start : int
First site of segment to be contracted
end : int
Last site of segment to be contracted
periodic_boundaries : bool
If `True` leftmost and rightmost virtual indices are contracted.
"""
C = array_1d[start].copy()
for x in array_1d[start + 1:end]:
C = tsr.contract(C, x, array_1d.right_label, array_1d.left_label)
if periodic_boundaries:
# Contract left and right boundary indices (periodic boundaries)
# Note that this will simply remove boundary indices of dimension one.
C.contract_internal(array_1d.right_label, array_1d.left_label)
return C
def left_canonical_form_mps(orig_mps, chi=0, threshold=1e-14,
normalise=False):
"""
Computes left canonical form of an MPS
See also
--------
Tensor.left_canonise()
"""
mps = orig_mps.copy()
mps.left_canonise(chi=chi, threshold=threshold, normalise=normalise)
return mps
def right_canonical_form_mps(orig_mps, chi=0, threshold=1e-14,
normalise=False):
"""Computes left canonical form of an MPS"""
mps = orig_mps.copy()
mps.right_canonise(chi=chi, threshold=threshold, normalise=normalise)
return mps
def canonical_form_mps(orig_mps, chi=0, threshold=1e-14,
normalise=False):
"""Computes canonical form of an MPS"""
mps = orig_mps.copy()
mps.right_canonise(chi=chi, threshold=threshold, normalise=normalise)
return right_canonical_to_canonical(mps, threshold=threshold)
def reverse_mps(orig_mps):
mps = orig_mps.copy()
mps.reverse()
return mps
def check_canonical_form_mps(mps, threshold=1e-14, print_output=True):
return mps.check_canonical_form(threshold=threshold,
print_output=print_output)
def svd_compress_mps(orig_mps, chi, threshold=1e-15, normalise=False):
"""Simply right canonise the left canonical form according to Schollwock"""
mps = left_canonical_form_mps(orig_mps, threshold=threshold,
normalise=normalise)
return right_canonical_form_mps(mps, chi=chi, threshold=threshold,
normalise=normalise)
def variational_compress_mps(mps, chi, max_iter=10, initial_guess=None,
tolerance=1e-15):
return mps.variational_compress(chi, max_iter=max_iter,
initial_guess=initial_guess, tolerance=tolerance)
def mps_complex_conjugate(mps):
"""Will take complex conjugate of every entry of every tensor in mps,
and append label_suffix to every label"""
new_mps = mps.copy()
for x in new_mps.data:
x.conjugate()
return new_mps
def ladder_contract(array1, array2, label1, label2, start=0, end=None,
complex_conjugate_array1=False, left_output_label="left",
right_output_label="right", return_intermediate_contractions=False):
"""
Contract two one-dimensional tensor networks. Indices labelled `label1` in
`array1` and indices labelled `label2` in `array2` are contracted pairwise
and all virtual indices are contracted. The contraction pattern
resembles a ladder when represented graphically.
Parameters
----------
array1 : OneDimensionalTensorNetwork
array2 : OneDimensionalTensorNetwork
The one-dimensional networks to be contracted.
label1 : str
label2 : str
The index labelled `label1` is contracted with the index labelled
`label2` for every site in array.
start : int
end : int
The endpoints of the interval to be contracted. The leftmost tensors
involved in the contraction are `array1[start]` and `array2[start]`,
while the rightmost tensors are `array2[end]` and `array2[end]`.
complex_conjugate_array1 : bool
Whether the complex conjugate of `array1` will be used, rather than
`array1` itself. This is useful if, for instance, the two arrays are
matrix product states and the inner product is to be taken (Note that
inner_product_mps could be used in this case).
right_output_label : str
Base label assigned to right-going indices of output tensor.
Right-going indices will be assigned labels `right_output_label`+"1"
and `right_output_label`+"2" corresponding, respectively, to `array1`
and `array2`.
left_output_label : str
Base label assigned to left-going indices of output tensor. Left-going
indices will be assigned labels `left_output_label`+"1" and
`left_output_label`+"2" corresponding, respectively, to `array1` and
`array2`.
return_intermediate_contractions : bool
If true, a list of tensors is returned. If the contraction is performed
from left to right (see Notes below), the i-th entry contains the
contraction up to the i-th contracted pair. If contraction is performed
from right to left, this order is reversed (so the last entry
corresponds to the contraction of the right-most pair tensors, which
are first to be contracted).
Returns
-------
tensor : Tensor
Tensor obtained by contracting the two arrays. The tensor may have left
indices, right indices, both or neither depending on the interval
specified.
intermediate_contractions : list
If `return_intermediate_contractions` is true a list
`intermediate_contractions` is returned containing a list of tensors
corresponding to contraction up to a particular column.
Notes
-----
If the interval specified contains the left open boundary, contraction is
performed from left to right. If not and if interval contains right
boundary, contraction is performed from right to left. If the interval
does not contain either boundary, contraction is performed from left to
right.
"""
# If no end specified, will contract to end
if end == None:
end = min(array1.nsites, array2.nsites) - 1 # index of the last site
if end < start:
raise ValueError("Badly defined interval (end before start).")
a1 = array1.copy()
a2 = array2.copy()
if complex_conjugate_array1:
a1.complex_conjugate()
# Give all contracted indices unique labels so no conflicts with other
# labels in array1, array2
a1.unique_virtual_labels()
a2.unique_virtual_labels()
rung_label = unique_label()
a1.replace_labels(label1, rung_label)
a2.replace_labels(label2, rung_label)
intermediate_contractions = []
if start == 0: # Start contraction from left
for i in range(0, end + 1):
if i == 0:
C = tsr.contract(a1[0], a2[0], rung_label, rung_label)
else:
C.contract(a1[i], a1.right_label, a1.left_label)
C.contract(a2[i], [a2.right_label, rung_label],
[a2.left_label, rung_label])
if return_intermediate_contractions:
t = C.copy()
t.replace_label([a1.right_label, a2.right_label],
[right_output_label + "1", right_output_label + "2"])
# Remove dummy indices except the right indices
t.remove_all_dummy_indices(labels=[x for x in t.labels if x
not in [right_output_label + "1", right_output_label + "2"]])
intermediate_contractions.append(t)
C.replace_label([a1.right_label, a2.right_label],
[right_output_label + "1", right_output_label + "2"])
C.remove_all_dummy_indices()
elif end == a1.nsites - 1 and end == a2.nsites - 1: # Contract from the right
for i in range(end, start - 1, -1):
if i == end:
C = tsr.contract(a1[end], a2[end], rung_label, rung_label)
else:
C.contract(a1[i], a1.left_label, a1.right_label)
C.contract(a2[i], [a2.left_label, rung_label],
[a2.right_label, rung_label])
if return_intermediate_contractions:
t = C.copy()
t.replace_label([a1.left_label, a2.left_label],
[left_output_label + "1", left_output_label + "2"])
# Remove dummy indices except the left indices
t.remove_all_dummy_indices(labels=[x for x in t.labels if x
not in [left_output_label + "1", left_output_label + "2"]])
intermediate_contractions.insert(0, t)
C.replace_label([a1.left_label, a2.left_label],
[left_output_label + "1", left_output_label + "2"])
C.remove_all_dummy_indices()
else:
# When an interval does not contain a boundary, contract in pairs first
# then together
for i in range(start, end + 1):
t = tsr.contract(a1[i], a2[i], rung_label, rung_label)
if i == start:
C = t
else:
C.contract(t, [a1.right_label, a2.right_label],
[a1.left_label, a2.left_label])
if return_intermediate_contractions:
t = C.copy()
t.replace_label([a1.right_label, a2.right_label, a1.left_label,
a2.left_label], [right_output_label + "1",
right_output_label + "2", left_output_label + "1",
left_output_label + "2"])
# Remove dummy indices except the left and right indices
t.remove_all_dummy_indices(labels=[x for x in t.labels if x
not in [right_output_label + "1", right_output_label + "2",
left_output_label + "1", left_output_label + "2"]])
t.remove_all_dummy_indices()
intermediate_contractions.append(t)
C.replace_label([a1.right_label, a2.right_label, a1.left_label,
a2.left_label], [right_output_label + "1", right_output_label + "2",
left_output_label + "1", left_output_label + "2"])
C.remove_all_dummy_indices()
if return_intermediate_contractions:
return intermediate_contractions
else:
return C
def inner_product_mps(mps_bra, mps_ket, complex_conjugate_bra=True,
return_whole_tensor=False):
"""Compute the inner product of two MatrixProductState objects."""
# If MPS are in canonical form, convert left-canonical first
if isinstance(mps_bra, MatrixProductStateCanonical):
mps_bra_tmp = canonical_to_left_canonical(mps_bra)
else:
mps_bra_tmp = mps_bra
if isinstance(mps_ket, MatrixProductStateCanonical):
mps_ket_tmp = canonical_to_left_canonical(mps_ket)
else:
mps_ket_tmp = mps_ket
t = ladder_contract(mps_bra_tmp, mps_ket_tmp, mps_bra.phys_label,
mps_ket.phys_label, complex_conjugate_array1=complex_conjugate_bra)
if return_whole_tensor:
return t
else:
return t.data
def frob_distance_squared(mps1, mps2):
ip = inner_product_mps
return ip(mps1, mps1) + ip(mps2, mps2) - 2 * np.real(ip(mps1, mps2))
def contract_mps_mpo(mps, mpo):
"""Will contract the physical index of mps with the physin index of mpo.
Left and right indices will be combined. The resulting MPS will have the
same left and right labels as mps and the physical label will be
mpo.physout_label"""
if isinstance(mps, MatrixProductStateCanonical):
raise NotImplementedError(("Function not implemented for"
+ "MatrixProductStateCanonical"))
N = len(mps)
new_mps = []
for i in range(N):
new_tensor = tsr.contract(mps[i], mpo[i], mps.phys_label,
mpo.physin_label)
new_tensor.consolidate_indices()
new_mps.append(new_tensor)
new_mps = MatrixProductState(new_mps, mps.left_label, mps.right_label,
mpo.physout_label)
return new_mps
def tensor_to_mps(tensor, phys_labels=None, mps_phys_label='phys',
left_label='left', right_label='right', chi=0, threshold=1e-15):
"""
Split a tensor into MPS form by exact SVD
Parameters
----------
tensor : Tensor
phys_labels list of str, optional
Can be used to specify the order of the physical indices for the MPS.
mps_phys_label : str
Physical labels of the resulting MPS will be renamed to this value.
left_label : str
Label for index of `tensor` that will be regarded as the leftmost index
of the resulting MPS if it exists (must be unique).
Also used as `left_label` for the resulting MPS.
right_label : str
Label for index of `tensor` that will be regarded as the rightmost
index of the resulting MPS if it exists (must be unique).
Also used as `right_label` for the resulting MPS.
chi : int, optional
Maximum number of singular values of each tensor to keep after
performing singular-value decomposition.
threshold : float
Lower bound on the magnitude of singular values to keep. Singular
values less than or equal to this value will be truncated.
Notes
-----
The resulting MPS is left-canonised.
"""
if phys_labels is None:
phys_labels = [x for x in tensor.labels if x not in
[left_label, right_label]]
nsites = len(phys_labels)
V = tensor.copy()
mps = []
for k in range(nsites - 1):
U, V, _ = tsr.truncated_svd(V, [left_label] * (left_label in V.labels)
+ [phys_labels[k]], chi=chi, threshold=threshold,
absorb_singular_values='right')
U.replace_label('svd_in', right_label)
U.replace_label(phys_labels[k], mps_phys_label)
mps.append(U)
# t = tsr.contract(S, V, ['svd_in'], ['svd_out'])
V.replace_label('svd_out', left_label)
V.replace_label(phys_labels[nsites - 1], mps_phys_label)
mps.append(V)
return MatrixProductState(mps, phys_label=mps_phys_label,
left_label=left_label, right_label=right_label)
def tensor_to_mpo(tensor, physout_labels=None, physin_labels=None,
mpo_physout_label='physout', mpo_physin_label='physin',
left_label='left', right_label='right', chi=0, threshold=1e-15):
"""
Split a tensor into MPO form by exact SVD
Parameters
----------
tensor : Tensor
physout_labels : list of str, optional
The output physical indices for the MPO. First site of MPO has output
index corresponding to physout_labels[0] etc.
If `None` the first half of `tensor.labels` will be taken as output
labels.
physin_labels : list of str, optional
The input physical indices for the MPO. First site of MPO has input
index corresponding to physin_labels[0] etc.
If `None` the second half of `tensor.labels` will be taken as input
labels.
mpo_phys_label : str
Physical input labels of the resulting MPO will be renamed to this.
mpo_phys_label : str
Physical output labels of the resulting MPO will be renamed to this.
left_label : str
Label for index of `tensor` that will be regarded as the leftmost index
of the resulting MPO if it exists (must be unique).
Also used as `left_label` for the resulting MPO.
right_label : str
Label for index of `tensor` that will be regarded as the rightmost
index of the resulting MPO if it exists (must be unique).
Also used as `right_label` for the resulting MPO.
chi : int, optional
Maximum number of singular values of each tensor to keep after
performing singular-value decomposition.
threshold : float
Lower bound on the magnitude of singular values to keep. Singular
values less than or equal to this value will be truncated.
"""
# Set physout_labels and physin_labels to default values if not given
phys_labels = [x for x in tensor.labels if x not in
[left_label, right_label]]
if physout_labels is None and physin_labels is None:
physout_labels = phys_labels[:int(len(phys_labels) / 2)]
physin_labels = phys_labels[int(len(phys_labels) / 2):]
elif physout_labels is None:
physout_labels = [x for x in phys_labels if x not in physin_labels]
elif physin_labels is None:
physin_labels = [x for x in phys_labels if x not in physout_labels]
nsites = len(physin_labels)
if len(physout_labels) != nsites:
raise ValueError("len(physout_labels) != len(physin_labels)")
V = tensor.copy()
mpo = []
for k in range(nsites - 1):
U, V, _ = tsr.truncated_svd(V, [left_label] * (left_label in V.labels)
+ [physout_labels[k], physin_labels[k]],
chi=chi, threshold=threshold)
U.replace_label('svd_in', right_label)
U.replace_label(physout_labels[k], mpo_physout_label)
U.replace_label(physin_labels[k], mpo_physin_label)
mpo.append(U)
V.replace_label('svd_out', left_label)
V.replace_label(physout_labels[nsites - 1], mpo_physout_label)
V.replace_label(physin_labels[nsites - 1], mpo_physin_label)
mpo.append(V)
return MatrixProductOperator(mpo, physout_label=mpo_physout_label,
physin_label=mpo_physin_label, left_label=left_label,
right_label=right_label)
def right_canonical_to_canonical(mps, threshold=1e-14):
"""
Turn an MPS in right canonical form into an MPS in canonical form
"""
N = mps.nsites
S_prev = tsr.Tensor([[1.0]], labels=[mps.left_label, mps.right_label])
S_prev_inv = S_prev.copy()
B = mps[0]
tensors = []
svd_label = unique_label()
for i in range(N):
U, S, V = tsr.tensor_svd(B, [mps.phys_label, mps.left_label],
svd_label=svd_label)
# Truncate to threshold
singular_values = np.diag(S.data)
singular_values_to_keep = singular_values[singular_values >
threshold]
S.data = np.diag(singular_values_to_keep)
# Truncate corresponding singular index of U and V
U.data = U.data[:, :, 0:len(singular_values_to_keep)]
V.data = V.data[0:len(singular_values_to_keep)]
U.replace_label(svd_label + "in", mps.right_label)
V.replace_label(svd_label + "out", mps.left_label)
S.replace_label([svd_label + "out", svd_label + "in"],
[mps.left_label, mps.right_label])
G = S_prev_inv[mps.right_label,] * U[mps.left_label,]
tensors.append(S_prev)
tensors.append(G)
if i == N - 1:
# The final SVD has no right index, so S and V are just scalars.
# S is the norm of the state.
tensors.append(S)
else:
V = S[mps.right_label,] * V[mps.left_label,]
B = V[mps.right_label,] * mps[i + 1][mps.left_label,]
# Store S and S^{-1} for next iteration
S_prev = S.copy()
S_prev_inv = S_prev.copy()
S_prev_inv.inv()
# S_prev_inv.data = np.diag(1./singular_values_to_keep)
# Construct MPS in canonical form
return MatrixProductStateCanonical(tensors,
left_label=mps.left_label, right_label=mps.right_label,
phys_label=mps.phys_label)
def left_canonical_to_canonical(mps, threshold=1e-14):
"""
Turn an MPS in left canonical form into an MPS in canonical form
"""
mpsr = reverse_mps(mps)
mpsc = right_canonical_to_canonical(mpsr, threshold=threshold)
mpsc.reverse()
return mpsc
def canonical_to_right_canonical(mps):
"""
Turn an MPS in canonical form into an MPS in right canonical form
"""
N = mps.nsites_physical
tensors = []
for i in range(N - 1):
tensors.append(mps[mps.physical_site(i)][mps.right_label,]
* mps[mps.physical_site(i) + 1][mps.left_label,])
tensors.append(mps[mps.physical_site(N - 1)])
tensors[0].data = (tensors[0].data * np.linalg.norm(mps[-1].data)
* np.linalg.norm(mps[0].data))
return MatrixProductState(tensors,
left_label=mps.left_label, right_label=mps.right_label,
phys_label=mps.phys_label)
def canonical_to_left_canonical(mps):
"""
Turn an MPS in canonical form into an MPS in left canonical form
"""
N = mps.nsites_physical
tensors = []
tensors.append(mps[mps.physical_site(0)])
for i in range(1, N):
tensors.append(mps[mps.physical_site(i) - 1][mps.right_label,]
* mps[mps.physical_site(i)][mps.left_label,])
tensors[-1].data = (tensors[-1].data * np.linalg.norm(mps[-1].data)
* np.linalg.norm(mps[0].data))
return MatrixProductState(tensors,
left_label=mps.left_label, right_label=mps.right_label,
phys_label=mps.phys_label)
| {
"repo_name": "andrewdarmawan/tncontract",
"path": "tncontract/onedim/onedim_core.py",
"copies": "1",
"size": "84888",
"license": "mit",
"hash": -5950812712196611000,
"line_mean": 43.0290456432,
"line_max": 112,
"alpha_frac": 0.5755466026,
"autogenerated": false,
"ratio": 3.984042802834749,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5059589405434749,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
"""
onedim_utils
==========
Module with various functions for MPS/MPOs.
"""
__all__ = ['init_mps_random', 'init_mps_allzero', 'init_mps_logical',
'onebody_sum_mpo', 'expvals_mps', 'ptrace_mps']
import numpy as np
from tncontract import tensor as tnc
from tncontract.onedim import onedim_core as onedim
# from tncontract import onedim as onedim
def init_mps_random(nsites, physdim, bonddim=1, left_label='left',
right_label='right', phys_label='phys'):
"""
Create an MPS with `nsites` sites and random tensors with physical
dimensions given by `physdim` and bond dimensions given by
`bonddim`. Open boundary conditions are used. The MPS is not normalized.
Parameters
----------
nsites : int
physdim : int or list of ints
bonddim : int or list of ints, optional
The nth element of `bonddim` determines the right and left index of
the tensors at sites n and n+1, respectively. The length of `bonddim`
should be `nsites`-1. If `bonddim` is an int this is this is used for
all bonds.
left_label : str
right_label : str
phys_label : str
"""
if not np.iterable(physdim):
physdim = [physdim] * nsites
if not np.iterable(bonddim):
bonddim = [bonddim] * (nsites - 1)
bonddim = [1] + bonddim + [1]
tensors = []
for i in range(nsites):
rt = tnc.Tensor(np.random.rand(
physdim[i], bonddim[i], bonddim[i + 1]),
[phys_label, left_label, right_label])
# Normalize matrix to avoid norm blowing up
U, S, V = tnc.tensor_svd(rt, [phys_label, left_label])
S.data = S.data / S.data[0, 0]
rt = U["svd_in",] * S["svd_out",]
rt = rt["svd_in",] * V["svd_out",]
tensors.append(rt)
return onedim.MatrixProductState(tensors, left_label=left_label,
right_label=right_label, phys_label=phys_label)
def init_mps_allzero(nsites, physdim, left_label='left',
right_label='right', phys_label='phys'):
"""
Create an MPS with `nsites` sites in the "all zero" state |00..0>.
Parameters
----------
nsites : int
physdim : int or list of ints
left_label : str
right_label : str
phys_label : str
"""
if not np.iterable(physdim):
physdim = [physdim] * nsites
tensors = []
for j in range(nsites):
t = np.zeros(physdim[j])
t[0] = 1.0
t = tnc.Tensor(t.reshape(physdim[j], 1, 1), [phys_label, left_label,
right_label])
tensors.append(t)
return onedim.MatrixProductState(tensors, left_label=left_label,
right_label=right_label, phys_label=phys_label)
def init_mps_logical(nsites, basis_state, physdim, left_label='left',
right_label='right', phys_label='phys'):
"""
Create an MPS with `nsites` sites in the logical basis state |ijk..l>.
Parameters
----------
nsites : int
basis_state : int or list of ints
Site `i` will be in the state |`basis_state[i]`> (or simply
|`basis_state`> if a single int is provided).
physdim : int or list of ints
left_label : str
right_label : str
phys_label : str
"""
if not np.iterable(physdim):
physdim = [physdim] * nsites
tensors = []
for j in range(nsites):
t = np.zeros(physdim[j])
t[basis_state[j]] = 1.0
t = tnc.Tensor(t.reshape(physdim[j], 1, 1), [phys_label, left_label,
right_label])
tensors.append(t)
return onedim.MatrixProductState(tensors, left_label=left_label,
right_label=right_label, phys_label=phys_label)
def onebody_sum_mpo(terms, output_label=None):
"""
Construct an MPO from a sum of onebody operators, using the recipe from
the Supplemental Material of [1]_ (Eqs. (3) and (4))
Parameters
---------
terms : list
A list containing the terms in the sum. Each term should be 2D
array-like, e.g., a rank-two Tensor or numpy array.
output_label : str, optional
Specify the label corresponding to the output index. Must be the same
for each element of `terms`. If not specified the first index is taken
to be the output index.
Returns
------
MatrixProductOperator
References
----------
.. [1] E. Sanchez-Burillo et al., Phys. Rev. Lett. 113, 263604 (2014)
"""
tensors = []
for i, term1 in enumerate(terms):
if output_label is not None:
term = term1.copy()
term.move_index(output_label, 0)
else:
term = term1
if i == 0:
B = np.zeros(shape=term.shape + (2,), dtype=complex)
for k in range(term.shape[0]):
for l in range(term.shape[1]):
B[k, l, :] = [term[k, l], k == l]
tensors.append(tnc.Tensor(B, ['physout', 'physin', 'right']))
elif i == len(terms) - 1:
B = np.zeros(shape=term.shape + (2,), dtype=complex)
for k in range(term.shape[0]):
for l in range(term.shape[1]):
B[k, l, :] = [k == l, term[k, l]]
tensors.append(tnc.Tensor(B, ['physout', 'physin', 'left']))
else:
B = np.zeros(shape=term.shape + (2, 2), dtype=complex)
for k in range(term.shape[0]):
for l in range(term.shape[1]):
B[k, l, :, :] = [[k == l, 0], [term[k, l], k == l]]
tensors.append(tnc.Tensor(B, ['physout', 'physin',
'left', 'right']))
return onedim.MatrixProductOperator(tensors, left_label='left',
right_label='right', physin_label='physin', physout_label='physout')
def expvals_mps(mps, oplist=[], sites=None, output_label=None, canonised=None):
# TODO: Why canonised gives strange results?
"""
Return single site expectation values <op>_i for all i
Parameters
----------
mps : MatrixProductState
oplist : list or Tensor
List of rank-two tensors representing the operators at each site.
If a single `Tensor` is given this will be used for all sites.
sites : int or list of ints, optional
Sites for which to compute expectation values. If None all
sites will be returned.
output_label : str, optional
Specify the label corresponding to the output index. Must be the same
for each element of `terms`. If not specified the first index is taken
to be the output index.
canonised : {'left', 'right', None}, optional
Flag to specify theat `mps` is already in left or right canonical form.
Returns
------
array
Complex array of same length as `mps` of expectation values.
Notes
-----
After the function call, `mps` will be in left (right) canonical form for
`canonised = 'right'` (`canonised = 'left'`).
"""
if sites is None:
sites = range(len(mps))
if not np.iterable(sites):
sites = [sites]
N = len(sites)
expvals = np.zeros(N, dtype=complex)
if not isinstance(oplist, list):
oplist_new = [oplist] * N
else:
oplist_new = oplist
if canonised == 'left':
mps.reverse()
oplist_new = oplist_new[::-1]
elif canonised != 'right':
mps.right_canonise()
center = 0
for i, site in enumerate(sites):
# Mover orthogonality center to site k
mps.left_canonise(center, site)
center = site
# compute exp value for site k
op = oplist_new[i]
A = mps[site]
if output_label is None:
out_label = op.labels[0]
in_label = op.labels[1]
else:
out_label = output_label
in_label = [x for x in op.labels if x is not out_label][0]
Ad = A.copy()
Ad.conjugate()
exp = tnc.contract(A, op, mps.phys_label, in_label)
exp = tnc.contract(Ad, exp, mps.phys_label, out_label)
exp.contract_internal(mps.left_label, mps.left_label, index1=0,
index2=1)
exp.contract_internal(mps.right_label, mps.right_label, index1=0,
index2=1)
expvals[i] = exp.data
if canonised == 'left':
mps.reverse()
oplist_new = oplist_new[::-1]
expvals = expvals[::-1]
return expvals
def ptrace_mps(mps, sites=None, canonised=None):
# TODO: Why canonised gives strange results?
"""
Return single site reduced density matrix rho_i for all i in sites.
Parameters
----------
mps : MatrixProductState
sites : int or list of ints, optional
Sites for which to compute the reduced density matrix. If None all
sites will be returned.
canonised : {'left', 'right', None}, optional
Flag to specify theat `mps` is already in left or right canonical form.
Returns
------
list
List of same length as `mps` with rank-two tensors representing the
reduced density matrices.
Notes
-----
`mps` will be in left canonical form after the function call.
"""
rho_list = []
if canonised == 'left':
mps.reverse()
elif canonised != 'right':
mps.right_canonise()
if sites is None:
sites = range(len(mps))
if not np.iterable(sites):
sites = [sites]
center = 0
for site in sites:
# Mover orthogonality center to site k
mps.left_canonise(center, site)
center = site
A = mps[center]
Ad = A.copy()
Ad.conjugate()
Ad.prime_label(mps.phys_label)
rho = tnc.contract(A, Ad, [mps.left_label, mps.right_label],
[mps.left_label, mps.right_label])
rho_list.append(rho)
if canonised == 'left':
mps.reverse()
rho_list = rho_list[::-1]
return rho_list
| {
"repo_name": "andrewdarmawan/tncontract",
"path": "tncontract/onedim/onedim_utils.py",
"copies": "1",
"size": "10246",
"license": "mit",
"hash": 2428405209330774500,
"line_mean": 31.7348242812,
"line_max": 108,
"alpha_frac": 0.5649033769,
"autogenerated": false,
"ratio": 3.632045373980858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4696948750880858,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
"""
square_lattice
==========
Core module for square lattice tensor networks
"""
import numpy as np
import tncontract.onedim as od
import tncontract as tn
class SquareLatticeTensorNetwork():
"""Base class for square lattices, e.g. square-lattice PEPS and PEPO.
The argument "tensors" is a two-dimensional array (a list of lists or 2D
numpy array) of Tensor objects.
Each tensor is expected to have have four indices: up, down, left, right.
The labels corresponding these indices are specified using the required
arguments : left_label, right_label, up_label, down_label
If the state has open boundaries, the edge indices of tensors on the
boundary should have dimension 1. If not, the tensors will be put in this
form."""
def __init__(self, tensors, up_label="up", right_label="right",
down_label="down", left_label="left",
copy_data=True):
self.up_label = up_label
self.right_label = right_label
self.down_label = down_label
self.left_label = left_label
if copy_data:
# Creates copies of tensors in memory
copied_tensors = []
for row in tensors:
copied_tensors.append([x.copy() for x in row])
self.data = np.array(copied_tensors)
else:
# This will not create copies of tensors in memory
# (just link to originals)
self.data = np.array(tensors)
# Every tensor will have four indices corresponding to
# "left", "right" and "up", "down" labels.
for i, x in np.ndenumerate(self.data):
if left_label not in x.labels: x.add_dummy_index(left_label)
if right_label not in x.labels: x.add_dummy_index(right_label)
if up_label not in x.labels: x.add_dummy_index(up_label)
if down_label not in x.labels: x.add_dummy_index(down_label)
# Add container emulation
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return self.data.__len__()
def __getitem__(self, key):
return self.data.__getitem__(key)
def __setitem__(self, key, value):
self.data.__setitem__(key, value)
def copy(self):
"""Return a copy of SquareLatticeTensorNetwork that is not linked in
memory to the original."""
return SquareLatticeTensorNetwork(self.data,
up_label=self.up_label, right_label=self.right_label,
down_label=self.down_label, left_label=self.left_label,
copy_data=True)
@property
def shape(self):
return self.data.shape
def is_left_right_periodic(self):
"""Check whether state is periodic by checking whether the left
indices of the first column have dimension greater than one"""
for x in self[:, 0]:
if x.index_dimension(self.left_label) > 1:
return True
return False
def can_contract(self):
"""Check whether the virtual indices of the tensor network can be
contracted, based on bond dimensions."""
rows, cols = self.data.shape
left_unmatched=[]
up_unmatched=[]
for i in range(1,rows):
for j in range(1,cols):
#Check all horizontal and vertical bonds
if (self[i,j].index_dimension(self.up_label)!=
self[i-1,j].index_dimension(self.down_label)):
left_unmatched.append((i,j))
if (self[i,j].index_dimension(self.left_label)!=
self[i,j-1].index_dimension(self.right_label)):
up_unmatched.append((i,j))
if len(left_unmatched) == 0 and len(up_unmatched) == 0:
return True
else:
print("Unmatched bonds found between the following sites:")
for k in left_unmatched:
print("("+str(k[0]-1)+", "+ str(k[1])+")"+" and "+str(k))
for k in up_unmatched:
print("("+str(k[0])+", "+ str(k[1]-1)+")"+" and "+str(k))
return False
def exact_contract(self, until_column=-1):
"""Will perform exact contraction of all virtual indices of the square
lattice, starting from the top-left, contracting the whole first
column, then contracting one column at a time."""
rows, cols = self.data.shape
mpo = column_to_mpo(self, 0)
C = od.contract_virtual_indices(mpo)
for i in range(1, cols):
if i == until_column + 1:
# Return the contraction early
return C
mpo = column_to_mpo(self, i)
C = od.contract_multi_index_tensor_with_one_dim_array(C, mpo,
self.right_label, self.left_label)
C.remove_all_dummy_indices([self.left_label, self.up_label,
self.down_label])
return C
def mps_contract(self, chi, compression_type="svd", until_column=-1,
max_iter=10, tolerance=1e-14, return_all_columns = False):
"""Approximately contract a square lattice tensor network using MPS
evolution and compression. Will contract from left to right.
If `return_all_columns` is true, will return a list of MPS
corresponding to the contraction up to each column.
"""
nrows, ncols = self.shape
if return_all_columns:
column_list=[]
# Divide matrix product state by its norm after each compression
# but keep these factors in the variable `norm`
norm = np.longdouble(1)
for col in range(ncols - 1):
if col == 0:
mps_to_compress = column_to_mpo(self, 0)
else:
column_mpo = column_to_mpo(self, col)
mps_to_compress = od.contract_mps_mpo(compressed_mps,
column_mpo)
if compression_type == "svd":
compressed_mps = od.svd_compress_mps(mps_to_compress, chi,
normalise=False, threshold=tolerance)
# Normalise MPS (although keep normalisation factor in `norm`)
mps_norm = compressed_mps.norm(canonical_form="right")
#Return 0 if the norm of the MPS is zero
if mps_norm==0.0:
return 0.0
compressed_mps[0].data = compressed_mps[0].data / mps_norm
norm *= mps_norm
elif compression_type == "variational":
compressed_mps = mps_to_compress.variational_compress(
chi, max_iter=max_iter, tolerance=tolerance)
# Normalise MPS (although keep normalisation factor in `norm`)
mps_norm = compressed_mps.norm(canonical_form="left")
#Return 0 if the norm of the MPS is zero
if mps_norm==0.0:
return 0.0
compressed_mps[-1].data = compressed_mps[-1].data / mps_norm
norm *= mps_norm
if return_all_columns:
mps_copy=compressed_mps.copy()
mps_copy[0].data *= norm
column_list.append(mps_copy)
if col == until_column:
if return_all_columns:
return column_list
elif compression_type == "svd":
#Convert to longdouble to store small or large values
#Note this will be stored on the first tensor
compressed_mps[0].data = np.longdouble(
compressed_mps[0].data)
compressed_mps[0].data *= norm
return compressed_mps
elif compression_type == "variational":
compressed_mps[-1].data *= norm
return compressed_mps
# For final column, compute contraction exactly
final_column_mps = column_to_mpo(self, ncols - 1)
full_contraction = od.inner_product_mps(compressed_mps,
final_column_mps, return_whole_tensor=True,
complex_conjugate_bra=False) * norm
if return_all_columns:
column_list.append(full_contraction)
return column_list
else:
return full_contraction
def col_to_1D_array(self, col):
"""
Will extract column col from square_tn (which is assumed to be a
SquareLatticeTensorNetwork object), and convert the column into a
MatrixProductState object (if first or last column without periodic
boundary conditions) or a MatrixProductOperator object.
"""
new_data = self[:, col].copy()
return od.OneDimensionalTensorNetwork(new_data,
left_label=self.up_label,
right_label=self.down_label)
def fliplr(self):
"""
Returns left-right mirror image of TN. Note: will not modify labels of
constituent tensors, but will switch the `left_label` and `right_label`
attributes of `SquareLatticeTensorNetwork`.
"""
mirror_tn=self.copy()
mirror_data=np.fliplr(mirror_tn)
mirror_tn.data=mirror_data
mirror_tn.right_label=self.left_label
mirror_tn.left_label=self.right_label
return mirror_tn
class SquareLatticePEPS(SquareLatticeTensorNetwork):
def __init__(self, tensors, up_label="up", right_label="right",
down_label="down", left_label="left", phys_label="phys",
copy_data=True):
SquareLatticeTensorNetwork.__init__(self, tensors, up_label,
right_label, down_label, left_label,
copy_data=copy_data)
self.phys_label = phys_label
def copy(self):
"""Return a copy of SquareLatticePEPS that is not linked in
memory to the original."""
return SquareLatticePEPS(self.data,
up_label=self.up_label, right_label=self.right_label,
down_label=self.down_label, left_label=self.left_label,
phys_label=self.phys_label, copy_data=True)
def outer_product(self, physin_label="physin", physout_label="physout"):
"""
Take the outer product of this PEPS with itself, returning a PEPO.
The outer product of each tensor in the PEPS is taken and
virtual indices are consolidated. Returns an instance of SquareLatticePEPO."""
tensor_array=[]
for row in range(self.shape[0]):
new_row=[]
for col in range(self.shape[1]):
#This takes the outer product of two tensors
#Without contracting any indices
outer = tn.contract(self[row,col], self[row,col], [], [])
#Replace the first physical label with physin label
outer.labels[outer.labels.index(self.phys_label)]=physin_label
#Replace the second physical label with physin label
outer.labels[outer.labels.index(self.phys_label)]=physout_label
#Consolidate indices
outer.consolidate_indices(labels=[self.left_label,
self.right_label, self.up_label, self.down_label])
new_row.append(outer)
tensor_array.append(new_row)
return SquareLatticePEPO(tensor_array, up_label=self.up_label,
down_label=self.down_label, right_label=self.right_label,
left_label=self.left_label, physin_label=physin_label,
physout_label=physout_label)
#Alias for outer_product
density_operator = outer_product
def inner_product_peps(peps_ket, peps_bra, exact_contract="True",
complex_conjugate_bra=True, compression_type="svd", chi=2,
max_iter=10, tolerance=1e-14, contract_virtual=True):
new_tensors=[] #Tensors formed by contracting the physical indices of peps_ket and bra
for i in range(peps_ket.shape[0]):
new_row=[]
for j in range(peps_ket.shape[1]):
t=(tn.tensor.conjugate(peps_bra[i,j])["phys"]*
peps_ket[i,j]["phys"])
t.consolidate_indices()
new_row.append(t)
new_tensors.append(new_row)
ip=SquareLatticeTensorNetwork(new_tensors)
if not contract_virtual:
return ip
if exact_contract:
return ip.exact_contract()
else:
return ip.mps_contract(chi, compression_type=compression_type,
max_iter=max_iter, tolerance=tolerance)
def outer_product_peps(peps1, peps2, physin_label="physin",
physout_label="physout"):
"""Return the outer product of two PEPS networks i.e. if `peps1` and
`peps2` correspond to two PEPS |a> and |b> then outer_product_peps(peps1,
peps2) returns the density operator corresponding to |a><b|, where "physin"
is the physical index associated with <b|" and "physout" is associated with
|a>. Assumes that input PEPS are the same size. The output physin label
replaces the phys label of `peps2` and the output label physout replaces
the phys label of `peps1`."""
#TODO input PEPS must have the same left right up down labels. Check this
#TODO careful for conflicting phys labels of peps1 and peps2
if peps1.shape != peps2.shape:
raise ValueError("Peps input do not have same dimension.")
tensor_array=[]
for row in range(peps1.shape[0]):
new_row=[]
for col in range(peps1.shape[1]):
#This takes the outer product of two tensors
#Without contracting any indices
outer = tn.contract(peps1[row,col],
tn.tensor.conjugate(peps2[row,col]), [], [])
#Replace the physical label of peps1 with physout label
outer.labels[outer.labels.index(peps1.phys_label)]=physout_label
#Replace the physical label of peps2 with physin label
outer.labels[outer.labels.index(peps2.phys_label)]=physin_label
#Consolidate indices
outer.consolidate_indices(labels=[peps1.left_label,
peps1.right_label, peps1.up_label, peps1.down_label])
new_row.append(outer)
tensor_array.append(new_row)
return SquareLatticePEPO(tensor_array, up_label=peps1.up_label,
down_label=peps1.down_label, right_label=peps1.right_label,
left_label=peps1.left_label, physin_label=physin_label,
physout_label=physout_label)
class SquareLatticePEPO(SquareLatticeTensorNetwork):
def __init__(self, tensors, up_label="up", right_label="right",
down_label="down", left_label="left", physin_label="physin",
physout_label="physout", copy_data=True):
SquareLatticeTensorNetwork.__init__(self, tensors, up_label,
right_label, down_label, left_label, copy_data=copy_data)
self.physin_label = physin_label
self.physout_label = physout_label
def copy(self):
"""Return a copy of SquareLatticePEPO that is not linked in
memory to the original."""
return SquareLatticePEPO(self.data,
up_label=self.up_label, right_label=self.right_label,
down_label=self.down_label, left_label=self.left_label,
physin_label=self.physin_label,
physout_label=self.physout_label, copy_data=True)
def trace(self):
"""Contract the physin and physout indices of every tensor. Returns
an instance of SquareLatticeTensorNetwork."""
tensor_array=[]
for i in range(self.shape[0]):
row=[]
for j in range(self.shape[1]):
tmp=self[i,j].copy()
tmp.trace(self.physin_label, self.physout_label)
row.append(tmp)
tensor_array.append(row)
return SquareLatticeTensorNetwork(tensor_array, up_label=self.up_label,
down_label=self.down_label, right_label=self.right_label,
left_label=self.left_label)
def apply_pepo_to_peps(peps, pepo):
nrows, ncols = peps.shape
new_tensors=[]
for i in range(nrows):
new_row=[]
for j in range(ncols):
new_tensor=peps[i,j][peps.phys_label]*pepo[i,j][pepo.physin_label]
new_tensor.replace_label(pepo.physout_label, peps.phys_label)
new_tensor.consolidate_indices()
new_row.append(new_tensor)
new_tensors.append(new_row)
return SquareLatticePEPS(new_tensors,
up_label=peps.up_label, right_label=peps.right_label,
down_label=peps.down_label, left_label=peps.left_label,
phys_label=peps.phys_label)
def column_to_mpo(square_tn, col):
"""
Will extract column col from square_tn (which is assumed to be a
SquareLatticeTensorNetwork object), and convert the column into a
MatrixProductState object (if first or last column without periodic
boundary conditions) or a MatrixProductOperator object.
"""
new_data = square_tn[:, col].copy()
if col == 0 or col == square_tn.shape[1] - 1:
if col == 0:
new_mps = od.MatrixProductState(new_data, square_tn.up_label,
square_tn.down_label, square_tn.right_label)
for x in new_mps.data:
x.remove_all_dummy_indices(square_tn.left_label)
else: # Last column
new_mps = od.MatrixProductState(new_data, square_tn.up_label,
square_tn.down_label, square_tn.left_label)
for x in new_mps.data:
x.remove_all_dummy_indices(square_tn.right_label)
return new_mps
else:
return od.MatrixProductOperator(new_data, square_tn.up_label,
square_tn.down_label, square_tn.right_label,
square_tn.left_label)
| {
"repo_name": "andrewdarmawan/tncontract",
"path": "tncontract/twodim/square_lattice.py",
"copies": "1",
"size": "18383",
"license": "mit",
"hash": 913164034327373000,
"line_mean": 43.0839328537,
"line_max": 101,
"alpha_frac": 0.5824946962,
"autogenerated": false,
"ratio": 3.9330338040222506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.501552850022225,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
""" Shapes for drawing genes and gene features.
"""
_contributors = [
"Darcy Jones <darcy.ab.jones@gmail.com>"
]
############################ Import all modules ##############################
from math import sin
from math import radians
import numpy as np
import matplotlib
from matplotlib.patches import PathPatch
from matplotlib.path import Path
################################## Classes ###################################
class Shape(matplotlib.patches.Patch):
""" Base class for drawing genomic features.
Shape objects are templates for later drawing.
Methods
-------
"""
def __init__(
self,
start,
end,
strand=None,
width=1,
offset=0,
by_axis=None,
name=None,
**kwargs
):
""" . """
super().__init__(**kwargs)
self._start = start
self._end = end
self._strand = strand
self._offset = offset
self._width = width
self._by_axis = by_axis
self.name = name
self._draw_path()
return
def __repr__(self):
clss = type(self).__doc__.strip().strip(".")
return ("{obj}(start={_start}, "
"end={_end}, "
"offset={_offset}, "
"width={_width}, "
"by_axis={_by_axis})").format(obj=clss, **self.__dict__)
def _draw_vertices(self):
raise NotImplementedError
return
def _draw_path(self):
self._draw_vertices()
try:
self._path.vertices = self._vertices
except AttributeError:
self._path = Path(self._vertices, self._codes)
@property
def path(self):
return self._path
def get_path(self):
# Alias to path
return self.path
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, vertices):
self._vertices = vertices
self._draw_path()
return
@property
def codes(self):
return self._codes
@codes.setter
def codes(self, codes):
self._codes = codes
self._draw_path()
return
@property
def start(self):
return self._start
@start.setter
def start(self, start):
self._start = start
self._draw_path()
return
@property
def end(self):
return self._end
@end.setter
def end(self, end):
self._end = end
self._draw_path()
return
@property
def strand(self):
return self._strand
@strand.setter
def strand(self, strand):
self._strand = strand
self._draw_path()
return
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, offset):
self._offset = offset
self._draw_path()
return
@property
def width(self):
return self._width
@width.setter
def width(self, width):
self._width = width
self._draw_path()
return
@property
def by_axis(self):
return self._by_axis
@by_axis.setter
def by_axis(self, by_axis):
self._by_axis = by_axis
self._draw_path()
class Rectangle(Shape):
""" Rectangle. """
def __init__(
self,
start,
end,
strand=None,
width=1,
offset=0,
by_axis=None,
name=None,
**kwargs
):
self._codes = [
Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY
]
super().__init__(
start=start,
end=end,
strand=strand,
offset=offset,
width=width,
by_axis=by_axis,
name=name,
**kwargs
)
return
def _draw_vertices(self):
""" . """
start = self.start
end = self.end
width = self.width
offset = self.offset
self._vertices = np.array([
[start, offset], # bottom left
[start, offset + width], # top left
[end, offset + width], # top right
[end, offset], # bottom right
[start, offset] # bottom left
])
if self.by_axis == "y":
self._vertices = self._vertices[:,::-1]
return
class Triangle(Shape):
""" Triangle. """
def __init__(
self,
start,
end,
strand=None,
width=1,
offset=0,
by_axis=None,
name=None,
**kwargs
):
self._codes = [
Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY
]
super().__init__(
start=start,
end=end,
strand=strand,
offset=offset,
width=width,
by_axis=by_axis,
name=name,
**kwargs
)
return
def _draw_vertices(self):
""" . """
start = self.start
end = self.end
width = self.width
offset = self.offset
strand = self.strand
if strand == -1:
start = self.end
end = self.start
self._vertices = np.array([
[start, offset],
[start, offset + width],
[end, offset + (width / 2)],
[start, offset]
])
if self.by_axis == "y":
self._vertices = self._vertices[:,::-1]
return
class Arrow(Shape):
""" Arrow. """
def __init__(
self,
start,
end,
strand=None,
head_length=1,
tail_width=None,
width=1,
offset=0,
by_axis=None,
name=None,
**kwargs
):
""" . """
self._codes = [
Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY
]
self._tail_width = tail_width
self._head_length = head_length
super().__init__(
start=start,
end=end,
strand=strand,
offset=offset,
width=width,
by_axis=by_axis,
name=name,
**kwargs
)
return
@property
def head_length(self):
return self._head_length
@head_length.setter
def head_length(self, head_length):
self._head_length = head_length
self._draw_vertices()
@property
def tail_width(self):
return self._tail_width
@tail_width.setter
def tail_width(self, tail_width):
self._tail_width = tail_width
self._draw_vertices()
def _draw_vertices(self):
""" . """
start = self.start
end = self.end
strand = self.strand
width = self.width
offset = self.offset
head_length = self.head_length
tail_width = self.tail_width
if tail_width is None:
tail_width = self.width
if abs(head_length) > abs(end - start):
head_length = end - start
if start > end:
head_length *= -1
if strand == -1:
start = self.end
end = self.start
head_length *= -1
tail_offset = offset + (width - tail_width) / 2
self._vertices = np.array([
[start, tail_offset],
[start, tail_offset + tail_width],
[end - head_length, tail_offset + tail_width],
[end - head_length, offset + width],
[end, offset + (width / 2)],
[end - head_length, offset],
[end - head_length, tail_offset],
[start, tail_offset]
])
if self.by_axis == "y":
self._vertices = self._vertices[:,::-1]
return
class OpenTriangle(Shape):
""" . """
def __init__(
self,
start,
end,
strand=None,
width=1,
offset=0,
by_axis=None,
name=None,
**kwargs
):
self._codes = [
Path.MOVETO,
Path.LINETO,
Path.LINETO
]
super().__init__(
start=start,
end=end,
strand=strand,
offset=offset,
width=width,
by_axis=by_axis,
name=name,
**kwargs
)
return
def _draw_vertices(self):
""" . """
start = self.start
end = self.end
offset = self.offset
width = self.width
self._vertices = np.array([
[start, offset],
[(start + end) / 2, offset + width],
[end, offset],
])
if self.by_axis == "y":
self._vertices = self._vertices[:,::-1]
return
class OpenRectangle(Shape):
""" . """
def __init__(
self,
start,
end,
strand=None,
width=1,
offset=0,
by_axis=None,
name=None,
**kwargs
):
self._codes = [
Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO
]
super().__init__(
start=start,
end=end,
strand=strand,
offset=offset,
width=width,
by_axis=by_axis,
name=name,
**kwargs
)
return
def _draw_vertices(self):
""" . """
start = self.start
end = self.end
offset = self.offset
width = self.width
self._vertices = np.array([
[start, offset], # bottom left
[start, offset + width], # top left
[end, offset + width], # top right
[end, offset], # bottom right
])
if self.by_axis == "y":
self._vertices = self._vertices[:,::-1]
return
class OpenSemicircle(Shape):
""" . """
def __init__(
self,
start,
end,
strand=None,
width=1,
offset=0,
by_axis=None,
name=None,
**kwargs
):
self._codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
super().__init__(
start=start,
end=end,
strand=strand,
offset=offset,
width=width,
by_axis=by_axis,
name=name,
**kwargs
)
return
def _draw_vertices(self):
""" . """
start = self.start
end = self.end
offset = self.offset
width = self.width
self._vertices = np.array([
[start, offset], # bottom left
[start, offset + width], # top left
[end, offset + width], # top right
[end, offset], # bottom right
])
if self.by_axis == "y":
self._vertices = self._vertices[:,::-1]
return
'''
class Hexagon(Shape):
""" Hexagon. """
def __init__(self):
return
class Ellipse(Shape):
""" Ellipse. """
def __init__(self):
return
class Trapeziod(Shape):
""" Trapeziod. """
def __init__(self):
return
class SineWave(Shape):
""" . """
def __init__(self):
return
class SawtoothWave(Shape):
""" . """
def __init__(self):
return
class SquareWave(Shape):
""" . """
def __init__(self):
return
class TriangleWave(Shape):
""" . """
def __init__(self):
return
class Helix(Shape):
""" . """
def __init__(self):
return
class DoubleHelix(Shape):
""" . """
def __init__(self):
return
'''
| {
"repo_name": "darcyabjones/bioplotlib",
"path": "bioplotlib/feature_shapes.py",
"copies": "1",
"size": "12495",
"license": "bsd-3-clause",
"hash": 5244658280130930000,
"line_mean": 18.9600638978,
"line_max": 78,
"alpha_frac": 0.4384153661,
"autogenerated": false,
"ratio": 4.286449399656947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011757885725068831,
"num_lines": 626
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
try:
import simplejson as json
json # silence pyflakes
except ImportError:
import json
qwerty = r'''
`~ 1! 2@ 3# 4$ 5% 6^ 7& 8* 9( 0) -_ =+
qQ wW eE rR tT yY uU iI oO pP [{ ]} \|
aA sS dD fF gG hH jJ kK lL ;: '"
zZ xX cC vV bB nN mM ,< .> /?
'''
dvorak = r'''
`~ 1! 2@ 3# 4$ 5% 6^ 7& 8* 9( 0) [{ ]}
'" ,< .> pP yY fF gG cC rR lL /? =+ \|
aA oO eE uU iI dD hH tT nN sS -_
;: qQ jJ kK xX bB mM wW vV zZ
'''
keypad = r'''
/ * -
7 8 9 +
4 5 6
1 2 3
0 .
'''
mac_keypad = r'''
= / *
7 8 9 -
4 5 6 +
1 2 3
0 .
'''
def get_slanted_adjacent_coords(x, y):
'''
returns the six adjacent coordinates on a standard keyboard, where each row is slanted to the right from the last.
adjacencies are clockwise, starting with key to the left, then two keys above, then right key, then two keys below.
(that is, only near-diagonal keys are adjacent, so g's coordinate is adjacent to those of t,y,b,v, but not those of r,u,n,c.)
'''
return [(x-1, y), (x, y-1), (x+1, y-1), (x+1, y), (x, y+1), (x-1, y+1)]
def get_aligned_adjacent_coords(x, y):
'''
returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned.
'''
return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)]
def build_graph(layout_str, slanted):
'''
builds an adjacency graph as a dictionary: {character: [adjacent_characters]}.
adjacent characters occur in a clockwise order.
for example:
* on qwerty layout, 'g' maps to ['fF', 'tT', 'yY', 'hH', 'bB', 'vV']
* on keypad layout, '7' maps to [None, None, None, '=', '8', '5', '4', None]
'''
position_table = {} # maps from tuple (x,y) -> characters at that position.
tokens = layout_str.split()
token_size = len(tokens[0])
x_unit = token_size + 1 # x position unit length is token length plus 1 for the following whitespace.
adjacency_func = get_slanted_adjacent_coords if slanted else get_aligned_adjacent_coords
assert all(len(token) == token_size for token in tokens), 'token length mismatch:\n ' + layout_str
for y, line in enumerate(layout_str.split('\n')):
slant = y - 1 if slanted else 0 # the way i illustrated keys above, each qwerty row is indented one space in from the last
for token in line.split():
x, remainder = divmod(line.index(token) - slant, x_unit)
assert remainder == 0, 'unexpected x offset for %s in:\n%s' % (token, layout_str)
position_table[(x,y)] = token
adjacency_graph = {}
for (x,y), chars in position_table.items():
for char in chars:
adjacency_graph[char] = []
for coord in adjacency_func(x, y):
# position in the list indicates direction (for qwerty, 0 is left, 1 is top, 2 is top right, ...)
# for edge chars like 1 or m, insert None as a placeholder when needed so that each character in the graph has a same-length adjacency list.
adjacency_graph[char].append(position_table.get(coord, None))
return adjacency_graph
if __name__ == '__main__':
with open('../generated/adjacency_graphs.json', 'w') as f:
out = {}
for graph_name, args in [('qwerty', (qwerty, True)),
('dvorak', (dvorak, True)),
('keypad', (keypad, False)),
('mac_keypad', (mac_keypad, False))]:
graph = build_graph(*args)
out[graph_name] = graph
json.dump(out, f)
| {
"repo_name": "moreati/python-zxcvbn",
"path": "zxcvbn/scripts/build_keyboard_adjacency_graph.py",
"copies": "2",
"size": "3690",
"license": "mit",
"hash": 7756545510550058000,
"line_mean": 38.2553191489,
"line_max": 156,
"alpha_frac": 0.5726287263,
"autogenerated": false,
"ratio": 3.0420445177246496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.461467324402465,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division
# vim: set fileencoding=utf-8 :
import sys
from nose_pyversion import PyVersion
def test_version_match():
"""Test that PyVersion returns None on matching filenames and False on
non-matching filenames"""
print 'testing'
separator = '-'
pyversion = PyVersion()
pyversion.separator = separator
assert pyversion.wantFile('file.py'.format(*sys.version_info
)) is None
assert pyversion.wantFile('file-py{0}.py'.format(*sys.version_info
)) is None
assert pyversion.wantFile('file-py{0}{1}.py'.format(*sys.version_info
)) is None
assert pyversion.wantFile('file-py{0}{1}{2}.py'.format(*sys.version_info
)) is None
# Cannot test this any other way, if minor and micro version is the same
# this cannot be tested.
if sys.version_info.micro is not sys.version_info.minor:
assert pyversion.wantFile('file-py{0}{2}.py'.format(*sys.version_info)) is False
major, minor, micro = sys.version_info[0:3]
assert pyversion.wantFile('file-py{0}{1}.py'.format(
major + 1,
minor
)) is False
assert pyversion.wantFile('file-py{0}{1}.py'.format(
major,
minor - 1 if minor > 0 else minor + 1
)) is False
assert pyversion.wantFile('file-py{0}{1}{2}.py'.format(
major,
minor - 1 if minor > 0 else minor + 1,
micro
)) is False
assert pyversion.wantFile('file-py{0}{1}{2}.py'.format(
major,
minor,
micro - 1 if micro > 0 else micro + 1
)) is False
| {
"repo_name": "danielholmstrom/nose-pyversion",
"path": "tests/test_pyversion.py",
"copies": "1",
"size": "1772",
"license": "mit",
"hash": 9017448563256716000,
"line_mean": 34.44,
"line_max": 88,
"alpha_frac": 0.5637697517,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50637697517,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Advanced GlyphProcessor
from vanilla import FloatingWindow, Slider
from mojo.glyphPreview import GlyphPreview
from mojo.roboFont import version as roboFontVersion
from GlyphProcessor import GlyphProcessorUI
from fontTools.misc.transform import Identity
class RotatedGlyphPreview(GlyphProcessorUI):
def __init__(self):
self.extensionID = self.getExtensionID()
self._initSettings()
self._loadSettings()
self.w = self._buildUI()
self._addObservers()
self.setUpBaseWindowBehavior()
self._currentGlyphChangedObserver()
self.w.open()
def _getExtensionID(self):
return "com.netzallee.rotatedGlyphPreview"
def _buildUI(self):
w = FloatingWindow((300, 340), "Rotated Glyph Preview", (300, 340), (2560, 1440))
w.preview = GlyphPreview((2, 2, -2, -40))
w.rotationSlider = Slider((10, -30, -10, 20),
minValue=-180,
maxValue=180,
value=self.settings["rotation"],
tickMarkCount=5,
stopOnTickMarks=False,
continuous=True,
callback=self._setRotation,
sizeStyle="small",
)
return w
def _getObservers(self):
return {
"draw": ["_currentGlyphChangedObserver",],
"currentGlyphChanged": ["_currentGlyphChangedObserver",],
}
def _currentGlyphChangedObserver(self, info=None):
if CurrentFont() is not None:
self._scale = 1000 / CurrentFont().info.unitsPerEm
self._y = (CurrentFont().info.ascender + CurrentFont().info.descender) / 2 * self._scale
else:
self._scale = 1
self._y = 500
self._draw()
def _setRotation(self, sender):
_rotation = sender.get()
if 87 <= _rotation <= 93:
_rotation = 90
elif -93 <= _rotation <= -87:
_rotation = -90
elif -3 <= _rotation <= 3:
_rotation = 0
self.settings["rotation"] = _rotation
self._draw()
def _deepAppendGlyph(self, glyph, gToAppend, font, offset=(0, 0)):
if not gToAppend.components:
glyph.appendGlyph(gToAppend, offset)
else:
for component in gToAppend.components:
if component.baseGlyph not in font.keys():
# avoid traceback in the case where the selected glyph
# is referencing a component whose glyph is not in the font
continue
compGlyph = font[component.baseGlyph].copy()
if component.transformation != (1, 0, 0, 1, 0, 0):
# if component is skewed and/or is shifted:
matrix = component.transformation[0:4]
if matrix != (1, 0, 0, 1): # if component is skewed
transformObj = Identity.transform(matrix + (0, 0))
# ignore the original component's shifting values
compGlyph.transform(transformObj)
# add the two tuples of offset:
totalOffset = tuple(map(sum, zip(component.offset, offset)))
glyph.appendGlyph(compGlyph, totalOffset)
for contour in gToAppend:
glyph.appendContour(contour, offset)
# if the assembled glyph still has components, recursively
# remove and replace them 1-by-1 by the glyphs they reference:
if glyph.components:
nestedComponent = glyph.components[-1]
glyph.removeComponent(nestedComponent)
glyph = self._deepAppendGlyph(glyph, font[nestedComponent.baseGlyph], font, nestedComponent.offset)
return glyph
def _draw(self):
cG = CurrentGlyph()
if cG is not None:
self.previewGlyph = self._deepAppendGlyph(RGlyph(), cG, CurrentFont())
self.previewGlyph.width = cG.width * self._scale
if roboFontVersion >= "2.0b":
self.previewGlyph.scaleBy((self._scale, self._scale))
self.previewGlyph.rotateBy(self.settings["rotation"], (self.previewGlyph.width / 2, self._y))
else:
self.previewGlyph.scale((self._scale, self._scale))
self.previewGlyph.rotate(self.settings["rotation"], (self.previewGlyph.width / 2, self._y))
else:
self.previewGlyph = RGlyph()
self.w.preview.setGlyph(self.previewGlyph)
def _initSettings(self):
self.settings = {
"rotation": 0,
}
OpenWindow(RotatedGlyphPreview)
| {
"repo_name": "jenskutilek/RoboFont",
"path": "Extensions/GlyphRotator.roboFontExt/lib/GlyphPreviewRotate.py",
"copies": "1",
"size": "4724",
"license": "mit",
"hash": -408211972285068100,
"line_mean": 36.1968503937,
"line_max": 111,
"alpha_frac": 0.5774767146,
"autogenerated": false,
"ratio": 4.294545454545455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5372022169145455,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
#!/bin/env python
# standard library
import os
import sys
import argparse
import ast
# dependencies
import numpy as np
from skimage import img_as_ubyte
import toolz as tz
from sklearn import neighbors
from sklearn.preprocessing import StandardScaler
# local imports
from . import io
from . import screens
from .screens import cellomics
from . import preprocess as pre
from . import cluster
from .io import temporary_hdf5_dataset
from six.moves import map, zip
parser = argparse.ArgumentParser(description="Run the microscopium functions.")
subpar = parser.add_subparsers()
def get_command(argv):
"""Get the command name used from the command line.
Parameters
----------
argv : [string]
The argument vector.
Returns
-------
cmd : string
The command name.
"""
return argv[1]
def main():
"""Run the command-line interface."""
args = parser.parse_args()
cmd = get_command(sys.argv)
if cmd == 'crop':
run_crop(args)
elif cmd == 'mask':
run_mask(args)
elif cmd == 'illum':
run_illum(args)
elif cmd == 'montage':
run_montage(args)
elif cmd == 'features':
run_features(args)
else:
sys.stderr.write("Error: command %s not found. Run %s -h for help." %
(cmd, sys.argv[0]))
sys.exit(2) # 2 is commonly a command-line error
crop = subpar.add_parser('crop', help="Crop images.")
crop.add_argument('crops', nargs=4, metavar='INT',
help='xstart, xstop, ystart, ystop. "None" also allowed.')
crop.add_argument('images', nargs='+', metavar='IM', help="The input images.")
crop.add_argument('-o', '--output-suffix',
default='.crop.tif', metavar='SUFFIX',
help="What suffix to attach to the cropped images.")
crop.add_argument('-O', '--output-dir',
help="Directory in which to output the cropped images.")
crop.add_argument('-c', '--compress', metavar='INT', type=int, default=1,
help='Use TIFF compression in the range 0 (no compression) '
'to 9 (max compression, slowest) (default 1).')
def run_crop(args):
"""Run image cropping."""
crops = []
for c in args.crops:
try:
crops.append(int(c))
except ValueError:
crops.append(None)
xstart, xstop, ystart, ystop = crops
slices = (slice(xstart, xstop), slice(ystart, ystop))
for imfn in args.images:
im = io.imread(imfn)
imout = pre.crop(im, slices)
fnout = os.path.splitext(imfn)[0] + args.output_suffix
if args.output_dir is not None:
fnout = os.path.join(args.output_dir, os.path.split(fnout)[1])
io.imsave(fnout, imout, compress=args.compress)
mask = subpar.add_parser('mask', help="Estimate a mask over image artifacts.")
mask.add_argument('images', nargs='+', metavar='IM', help="The input images.")
mask.add_argument('-o', '--offset', metavar='INT', default=0, type=int,
help='Offset the automatic mask threshold by this amount.')
mask.add_argument('-v', '--verbose', action='store_true',
help='Print runtime information to stdout.')
mask.add_argument('-c', '--close', metavar='RADIUS', default=0, type=int,
help='Perform morphological closing of masks of this radius.')
mask.add_argument('-e', '--erode', metavar='RADIUS', default=0, type=int,
help='Perform morphological erosion of masks of this radius.')
def run_mask(args):
"""Run mask generation."""
n, m = pre.write_max_masks(args.images, args.offset, args.close, args.erode)
if args.verbose:
print("%i masks created out of %i images processed" % (n, m))
illum = subpar.add_parser('illum',
help="Estimate and correct illumination.")
illum.add_argument('images', nargs='*', metavar='IMG', default=[],
help="The input images.")
illum.add_argument('-f', '--file-list', type=lambda x: open(x, 'r'),
metavar='FN',
help='Text file with one image filename per line.')
illum.add_argument('-o', '--output-suffix',
default='.illum.tif', metavar='SUFFIX',
help="What suffix to attach to the corrected images.")
illum.add_argument('-l', '--stretchlim', metavar='[0.0-1.0]', type=float,
default=0.0, help='Stretch image range before all else.')
illum.add_argument('-L', '--stretchlim-output', metavar='[0.0-1.0]', type=float,
default=0.0, help='Stretch image range before output.')
illum.add_argument('-q', '--quantile', metavar='[0.0-1.0]', type=float,
default=0.05,
help='Use this quantile to determine illumination.')
illum.add_argument('-r', '--radius', metavar='INT', type=int, default=51,
help='Radius in which to find quantile.')
illum.add_argument('-s', '--save-illumination', metavar='FN',
help='Save the illumination field to a file.')
illum.add_argument('-c', '--compress', metavar='INT', type=int, default=1,
help='Use TIFF compression in the range 0 (no compression) '
'to 9 (max compression, slowest) (default 1).')
illum.add_argument('-v', '--verbose', action='store_true',
help='Print runtime information to stdout.')
illum.add_argument('--method', metavar='STR', default='median',
help='How to collapse filtered images to illumination '
'field. options: median (default), mean.')
illum.add_argument('--random-seed', type=int, default=None,
help='The random seed for sampling illumination image.')
def run_illum(args):
"""Run illumination correction.
Parameters
----------
args : argparse.Namespace
The arguments parsed by the argparse library.
"""
if args.file_list is not None:
args.images.extend([fn.rstrip() for fn in args.file_list])
il = pre.find_background_illumination(args.images, args.radius,
args.quantile, args.stretchlim,
args.method)
if args.verbose:
print('illumination field:', type(il), il.dtype, il.min(), il.max())
if args.save_illumination is not None:
io.imsave(args.save_illumination, il / il.max())
base_fns = [pre.basefn(fn) for fn in args.images]
ims_out = [fn + args.output_suffix for fn in base_fns]
corrected = pre.correct_multiimage_illumination(args.images, il,
args.stretchlim_output,
args.random_seed)
for im, fout in zip(corrected, ims_out):
io.imsave(fout, im, compress=args.compress)
montage = subpar.add_parser('montage',
help='Montage and channel stack images.')
montage.add_argument('images', nargs='*', metavar='IM',
help="The input images.")
montage.add_argument('-c', '--compress', metavar='INT', type=int, default=1,
help='Use TIFF compression in the range 0 '
'(no compression) '
'to 9 (max compression, slowest) (default 1).')
montage.add_argument('-o', '--montage-order', type=ast.literal_eval,
default=cellomics.SPIRAL_CLOCKWISE_RIGHT_25,
help='The shape of the final montage.')
montage.add_argument('-O', '--channel-order', type=ast.literal_eval,
default=[0, 1, 2],
help='The position of red, green, and blue channels '
'in the stream.')
montage.add_argument('-s', '--suffix', default='.montage.tif',
help='The suffix for saved images after conversion.')
montage.add_argument('-d', '--output-dir', default=None,
help='The output directory for the images. Defaults to '
'the input directory.')
def run_montage(args):
"""Run montaging and channel concatenation.
Parameters
----------
args : argparse.Namespace
The arguments parsed by the argparse library.
"""
ims = map(io.imread, args.images)
ims_out = pre.montage_stream(ims, montage_order=args.montage_order,
channel_order=args.channel_order)
def out_fn(fn):
sem = cellomics.cellomics_semantic_filename(fn)
out_fn = '_'.join([str(sem[k])
for k in sem
if k not in ['field', 'channel', 'suffix']
and sem[k] != ''])
outdir = (args.output_dir if args.output_dir is not None
else sem['directory'])
out = os.path.join(outdir, out_fn) + args.suffix
return out
step = np.array(args.montage_order).size * len(args.channel_order)
out_fns = (out_fn(fn) for fn in args.images[::step])
for im, fn in zip(ims_out, out_fns):
try:
io.imsave(fn, im, compress=args.compress)
except ValueError:
im = img_as_ubyte(pre.stretchlim(im, 0.001, 0.999))
io.imsave(fn, im, compress=args.compress)
features = subpar.add_parser('features',
help="Map images to feature vectors.")
features.add_argument('images', nargs='*', metavar='IM',
help="The input images.")
features.add_argument('-s', '--screen', default='cellomics',
help="The name of the screen being run. Feature maps "
"appropriate for the screen should be in the "
"'screens' package.")
features.add_argument('-c', '--n-components', type=int, default=2,
help='The number of components to compute for PCA.')
features.add_argument('-b', '--pca-batch-size', type=int, default=384,
help='The number of samples needed for each step of the '
'incremental PCA.')
features.add_argument('-n', '--num-neighbours', type=int, default=25,
help='The number of nearest neighbours to output '
'per sample.')
features.add_argument('-S', '--sample-size', type=int, default=None,
help='For feature computations that depend on objects, '
'sample this many objects.')
features.add_argument('--random-seed', type=int, default=None,
help='Set random seed, for testing and debugging only.')
features.add_argument('-e', '--emitter', default='json',
help='Format to output features during computation.')
features.add_argument('-G', '--global-threshold', action='store_true',
help='Use sampled intensity from all images to obtain '
'a global threshold.')
def run_features(args):
"""Run image feature computation.
Parameters
----------
args : argparse.Namespace
The arguments parsed by the argparse library.
"""
if args.global_threshold:
images = map(io.imread, args.images)
thresholds = pre.global_threshold(images, args.random_seed)
else:
thresholds = None
images = map(io.imread, args.images)
screen_info = screens.d[args.screen]
index_function, fmap = screen_info['index'], screen_info['fmap']
fmap = tz.partial(fmap, threshold=thresholds,
sample_size=args.sample_size,
random_seed=args.random_seed)
indices = list(map(index_function, args.images))
f0, feature_names = fmap(next(images))
feature_vectors = tz.cons(f0, (fmap(im)[0] for im in images))
online_scaler = StandardScaler()
online_pca = cluster.OnlineIncrementalPCA(n_components=args.n_components,
batch_size=args.pca_batch_size)
nimages, nfeatures = len(args.images), len(f0)
emit = io.emitter_function(args.emitter)
with temporary_hdf5_dataset((nimages, nfeatures), 'float') as dset:
# First pass: compute the features, compute the mean and SD,
# compute the PCA
for i, (idx, v) in enumerate(zip(indices, feature_vectors)):
emit({'_id': idx, 'feature_vector': list(v)})
dset[i] = v
online_scaler.partial_fit(v.reshape(1, -1))
online_pca.add_sample(v)
# Second pass: standardise the feature vectors, compute PCA-transform
for i, (idx, v) in enumerate(zip(indices, dset)):
v_std = online_scaler.transform(v)
v_pca = online_pca.transform(v)
dset[i] = v_std
emit({'_id': idx, 'feature_vector_std': list(v_std),
'pca_vector': list(v_pca)})
online_pca.transform(v)
# Third pass: Compute the nearest neighbors graph.
# THIS ANNOYINGLY INSTANTIATES FULL ARRAY -- no out-of-core
# solution that I'm aware of...
ng = neighbors.kneighbors_graph(dset, args.num_neighbours,
include_self=False, mode='distance')
for idx, row in zip(indices, ng):
emit({'_id': idx, 'neighbours': [indices[i] for i in row.indices]})
if __name__ == '__main__':
main()
| {
"repo_name": "starcalibre/microscopium",
"path": "microscopium/main.py",
"copies": "1",
"size": "13464",
"license": "bsd-3-clause",
"hash": -1731179684482274000,
"line_mean": 43.4356435644,
"line_max": 80,
"alpha_frac": 0.5805109923,
"autogenerated": false,
"ratio": 3.9082728592162552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9965025949453054,
"avg_score": 0.004751580412640364,
"num_lines": 303
} |
from __future__ import absolute_import, division, print_function
# Can't import unicode_literals in setup.py currently
# http://stackoverflow.com/a/23175131
import codecs
import os
from setuptools import setup
import sys
# Prevent spurious errors during `python setup.py test`, a la
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html:
try:
import multiprocessing
except ImportError:
pass
if sys.version_info[:3] <= (2, 6, 4):
print("Please upgrade to a python >= 2.6.5!", file=sys.stderr)
sys.exit(1)
if sys.version_info[0] == 3 and sys.version_info[1] < 3:
print("Please upgrade to a python >= 3.3!", file=sys.stderr)
sys.exit(1)
def read(fname):
fpath = os.path.join(os.path.dirname(__file__), fname)
with codecs.open(fpath, 'r', 'utf8') as f:
return f.read().strip()
def find_install_requires():
reqs = [x.strip() for x in
read('requirements.txt').splitlines()
if x.strip() and not x.startswith('#')]
try:
from functools import total_ordering
except ImportError:
reqs.append('total-ordering==0.1')
return reqs
def find_tests_require():
return [x.strip() for x in
read('test-requirements.txt').splitlines()
if x.strip() and not x.startswith('#')]
setup(
name='configman',
version=read('configman/version.txt'),
description=(
'Flexible reading and writing of namespaced configuration options'
),
long_description=read('README.md'),
author='K Lars Lohn, Peter Bengtsson',
author_email='lars@mozilla.com, peterbe@mozilla.com',
url='https://github.com/mozilla/configman',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Intended Audience :: Developers',
'Environment :: Console',
],
packages=['configman'],
package_data={'configman': ['*/*', 'version.txt']},
install_requires=find_install_requires(),
tests_require=find_tests_require(),
test_suite='nose.collector',
zip_safe=False,
),
| {
"repo_name": "twobraids/configman",
"path": "setup.py",
"copies": "2",
"size": "2497",
"license": "mpl-2.0",
"hash": 2713235367187858400,
"line_mean": 30.6075949367,
"line_max": 74,
"alpha_frac": 0.6315578694,
"autogenerated": false,
"ratio": 3.7324364723467864,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 79
} |
from __future__ import absolute_import, division, print_function
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 11:40:26 2018
@author: rachael
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 6 12:03:20 2017
@author: rachael
Create a dummy gsd file with 4 molecules of 17 beads each in 6 snapshots
Used to check cluster analysis
"""
import gsd.hoomd
import numpy as np
def quatMultiply(q1,q2):
"""Returns a quaternion that is a composition of two quaternions
Parameters
----------
q1: 1 x 4 numpy array
representing a quaternion
q2: 1 x 4 numpy array
representing a quatnernion
Returns
-------
qM: 1 x 4 numpy array
representing a quaternion that is the rotation of q1 followed by
the rotation of q2
Notes
-----
q2 * q1 is the correct order for applying rotation q1 and then
rotation q2
"""
Q2 = np.array([[q2[0],-q2[1],-q2[2],-q2[3]],[q2[1],q2[0],-q2[3],q2[2]],
[q2[2],q2[3],q2[0],-q2[1]],[q2[3],-q2[2],q2[1],q2[0]]])
qM = np.dot(Q2,q1)
return qM
def createOneMol(comPos,qrot,typeIDs):
"""Returns a molecule, which is a list of typeids and positions
Parameters
----------
comPos: 1 x 3 numpy array
position of the center of mass
qrot: 1 x 4 numpy array
quaternion representing the orientation of the molecule
typeIDs: ints
IDs of beads, in the order centraltype, LB, AB
Returns
-------
pos: 17 x 3 numpy array
represents the positions of all the beads in the molecule
typeinds: 1 x 17 numpy array
represents the molecule types of all the beads in the molecule
central bead, centraltype = typeID
large beads, LB = 1
aromatic beads, AB = 2
diams: 1 x 17 numpy array
gives the diameters of all the beads
Notes
-----
For consistency, track the pairs of indices going into the aromatics in
the order
[[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]]
small beads are at a radius of 0.4 and an angle of theta = 10 degrees
"""
sRad = 0.475
th = 10 *(np.pi/180)
pos = np.zeros([17,3])
typeinds = np.array([typeIDs[0],typeIDs[1],typeIDs[1],typeIDs[1],
typeIDs[1],typeIDs[2],typeIDs[2],typeIDs[2],
typeIDs[2],typeIDs[2],typeIDs[2],typeIDs[2],
typeIDs[2],typeIDs[2],typeIDs[2],typeIDs[2],
typeIDs[2]])
diams = np.zeros(17)
for i in range(len(diams)):
if typeinds[i] == typeIDs[2]:
diams[i] = 0.125
else:
diams[i] = 1.0
baseLocations = np.array([[0.,0.,0.],[0.5,0.,0.],[-0.5,0.,0.],
[1.5,0.,0.],[-1.5,0.,0.],
[-0.5,sRad*np.cos(th),sRad*np.sin(th)],
[-0.5,sRad*np.cos(th),-sRad*np.sin(th)],
[0.,sRad*np.cos(th),sRad*np.sin(th)],
[0.,sRad*np.cos(th),-sRad*np.sin(th)],
[0.5,sRad*np.cos(th),sRad*np.sin(th)],
[0.5,sRad*np.cos(th),-sRad*np.sin(th)],
[-0.5,-sRad*np.cos(th),sRad*np.sin(th)],
[-0.5,-sRad*np.cos(th),-sRad*np.sin(th)],
[0.,-sRad*np.cos(th),sRad*np.sin(th)],
[0.,-sRad*np.cos(th),-sRad*np.sin(th)],
[0.5,-sRad*np.cos(th),sRad*np.sin(th)],
[0.5,-sRad*np.cos(th),-sRad*np.sin(th)]])
pos = np.zeros(np.shape(baseLocations))
for rind in range(np.shape(baseLocations)[0]):
r = baseLocations[rind,:]
q = qrot[0]
qvec = qrot[1:4]
rp = r + 2. * q * np.cross(qvec,r) \
+ 2. * np.cross(qvec,np.cross(qvec,r))
pos[rind,:] = rp
pos += comPos
return(pos,typeinds,diams)
def createSnapshot(coms,qrots,step,typelist):
"""Create HOOMD snapshot with the given molecules
Parameters
----------
coms: N x 3 numpy array
the positions of the centers of masses of the N molecules in the system
qrots: N x 4 numpy array
the orientations of the N molecules in the system
step: int
timestep
typelist: list
the type of the central bead of each molecule, corresponding to
typeid of 0
Returns
-------
snap: HOOMD snapshot
"""
snap = gsd.hoomd.Snapshot()
molno = np.shape(coms)[0]
beadsPerMol = 17
snap.particles.N = molno * beadsPerMol
snap.configuration.step = step
snap.configuration.box = [20,20,20,0,0,0]
utypelist = np.unique(typelist)
snap.particles.types = list(utypelist)+['LB','AB']
snap.particles.position = np.zeros([molno * beadsPerMol,3])
snap.particles.body = np.zeros(molno * beadsPerMol)
snap.particles.typeid = np.zeros(molno * beadsPerMol)
snap.particles.diameter = np.zeros(molno * beadsPerMol)
for moli in range(molno):
snap.particles.body[(moli * beadsPerMol): \
(moli * beadsPerMol + beadsPerMol)] \
= moli * np.ones(beadsPerMol)
typeIDs = [np.argwhere(utypelist==typelist[moli]),len(utypelist),
len(utypelist)+1]
(pos,typeinds,diams) = createOneMol(coms[moli,:],qrots[moli,:],typeIDs)
snap.particles.position[(moli * beadsPerMol): \
(moli * beadsPerMol + beadsPerMol)] = pos
snap.particles.typeid[(moli * beadsPerMol): \
(moli * beadsPerMol + beadsPerMol)] = typeinds
snap.particles.diameter[(moli * beadsPerMol): \
(moli * beadsPerMol + beadsPerMol)] = diams
return snap
if __name__ == "__main__":
#quaternion = (cos(th/2),sin(th/2) omhat) => rotation of th about omhat
molno = 4
ats = 17
pN = molno * ats
df4 = gsd.hoomd.open('dummyfull2type_run1.gsd','wb')
"""Snapshot 1"""
coms1 = np.array([[0.,0.,0.],[0.,3.,0.],[0.,0.,-3],[0.,3.,-3.]])
qrot1 = np.array([[1.,0.,0.,0.],[1.,0.,0.,0.],[1.,0.,0.,0],[1.,0.,0.,0.]])
typelist = [u'EA',u'EB',u'EB',u'EA']
snap1 = createSnapshot(coms1,qrot1,0,typelist)
df4.append(snap1)
"""Snapshot 2"""
coms2 = np.array([[0.,0.,0.],[-1.5,2.5,0.],[0.,3.,-3.],[1.,3.5,-3.5]])
qrot2 = np.array([[1.,0.,0.,0.],[np.cos(np.pi/4),0.,0.,np.sin(np.pi/4)],
[np.cos(np.pi/4),0.,0.,np.sin(np.pi/4)],
quatMultiply(np.array([np.cos(np.pi/4),0.,
np.sin(np.pi/4),0.]),
np.array([np.cos(np.pi/4),0.,0.,
np.sin(np.pi/4)]))])
snap2 = createSnapshot(coms2,qrot2,1,typelist)
df4.append(snap2)
"""Snapshot 3"""
coms3 = np.array([[0.,0.,0.],[0.,1.,0.],[-4.5,-1.0,0.],[-4.,0.,0.]])
qrot3 = np.array([[1.,0.,0.,0.],[-1.,0.,0.,0.],
[1.,0.,0.,0.],[1.,0.,0.,0.]])
snap3 = createSnapshot(coms3,qrot3,2,typelist)
df4.append(snap3)
"""Snapshot 4"""
coms4 = np.array([[0.,0.,0.],[0.,1.,0.],[-4.,0.,0.],[-4.,1.,0.]])
qrot4 = qrot3
snap4 = createSnapshot(coms4,qrot4,3,typelist)
df4.append(snap4)
"""Snapshot 5"""
coms5 = np.array([[0.,0.,0.],[0.,1.,0.],[0.5,2.,-0.5],[0.5,3.,-0.5]])
qrot5 = np.array([[1.,0.,0.,0.],[-1.,0.,0.,0.],
[np.cos(np.pi/4),0.,np.sin(np.pi/4),0.],
[np.cos(np.pi/4),0.,-np.sin(np.pi/4),0.]])
snap5 = createSnapshot(coms5,qrot5,4,typelist)
df4.append(snap5)
"""Snapshot 6"""
coms6 = np.array([[0.,0.,0.],[0.,-0.5,np.sqrt(3)/2],
[0.,0.5,np.sqrt(3)/2],[0.,0.,-1.]])
qrot6 = np.array([[np.cos(np.pi/4),np.sin(np.pi/4),0.,0.],
[np.cos(np.pi/12),-np.sin(np.pi/12),0.,0.],
[np.cos(np.pi/12),np.sin(np.pi/12),0.,0.],
[np.cos(np.pi/4),np.sin(np.pi/4),0.,0.]])
snap6 = createSnapshot(coms6,qrot6,5,typelist)
df4.append(snap6)
df4_2 = gsd.hoomd.open('dummyfull2type_run2.gsd','wb')
df4_2.append(snap2)
df4_2.append(snap2)
df4_2.append(snap3)
df4_2.append(snap5)
df4_2.append(snap6)
df4_2.append(snap6)
df4_3 = gsd.hoomd.open('dummyfull2type_run3.gsd','wb')
df4_3.append(snap2)
df4_3.append(snap2)
df4_3.append(snap4)
df4_3.append(snap4)
df4_3.append(snap5)
df4_3.append(snap5)
df4_4 = gsd.hoomd.open('dummyfull2type_run4.gsd','wb')
df4_4.append(snap1)
df4_4.append(snap3)
df4_4.append(snap4)
df4_4.append(snap4)
df4_4.append(snap6)
df4_4.append(snap6)
df4_5 = gsd.hoomd.open('dummyfull2type_run5.gsd','wb')
df4_5.append(snap2)
df4_5.append(snap2)
df4_5.append(snap3)
df4_5.append(snap5)
df4_5.append(snap5)
df4_5.append(snap5)
| {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/data/dummgsdfull2type.py",
"copies": "1",
"size": "9037",
"license": "mit",
"hash": -1667528193522650600,
"line_mean": 36.1893004115,
"line_max": 79,
"alpha_frac": 0.5227398473,
"autogenerated": false,
"ratio": 2.8338037002195047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8722669348078662,
"avg_score": 0.026774839888168676,
"num_lines": 243
} |
from __future__ import (absolute_import, division, print_function)
from addie.processing.idl.export_table import ExportTable
from addie.processing.idl.import_table import ImportTable
from addie.processing.idl.table_handler import TableHandler
from addie.processing.idl.populate_background_widgets import PopulateBackgroundWidgets
class UndoHandler(object):
def __init__(self, parent=None):
self.parent = parent
self.table = self.parent.postprocessing_ui.table
def save_table(self, first_save=False):
if not first_save:
self.parent.undo_button_enabled = True
# retrieve table settings
o_export_table = ExportTable(parent=self.parent)
o_export_table.collect_data()
o_export_table.format_data()
_new_entry = o_export_table.output_text
self.add_new_entry_to_table(new_entry=_new_entry)
def add_new_entry_to_table(self, new_entry=''):
undo_table = self.parent.undo_table
new_dict = {}
if not undo_table == {}:
for key in undo_table:
_new_key = str(int(key) - 1)
new_dict[_new_key] = undo_table[key]
undo_table = new_dict
undo_table[str(self.parent.max_undo_list)] = new_entry
self.parent.undo_table = undo_table
def undo_table(self):
if self.parent.undo_index == 0:
return
self.parent.undo_index -= 1
self.load_table()
self.check_undo_widgets()
def redo_table(self):
if self.parent.undo_index == self.parent.max_undo_list:
return
self.parent.undo_index += 1
self.load_table()
self.check_undo_widgets()
def load_table(self):
self.table.blockSignals(True)
_table_to_reload = self.parent.undo_table[str(self.parent.undo_index)]
o_table = TableHandler(parent=self.parent)
o_table._clear_table()
o_import = ImportTable(parent=self.parent)
o_import._list_row = _table_to_reload
o_import.parser()
o_import.populate_gui()
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
self.table.blockSignals(False)
def check_undo_widgets(self):
_undo_index = self.parent.undo_index
if _undo_index == 0:
_undo_status = False
_redo_status = True
elif _undo_index == 10:
_undo_status = True
_redo_status = False
elif str(_undo_index-1) not in self.parent.undo_table:
_undo_status = False
_redo_status = True
else:
_undo_status = True
_redo_status = True
# buttons in main gui (Edit menu bar) removed for now !
# self.parent.ui.actionRedo.setEnabled(redo_status)
# self.parent.ui.actionUndo.setEnabled(undo_status)
self.parent.undo_button_enabled = _undo_status
self.parent.redo_button_enabled = _redo_status
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/undo_handler.py",
"copies": "1",
"size": "2998",
"license": "mit",
"hash": -389354023364675140,
"line_mean": 31.5869565217,
"line_max": 86,
"alpha_frac": 0.6117411608,
"autogenerated": false,
"ratio": 3.674019607843137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4785760768643137,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
import pytest
import yaml
with open('tests/vars/filebeat.vars', 'r') as f:
try:
yml_vars = yaml.load(f)
except yaml.YAMLError as e:
print(e)
# begin testing
# parametrize all of the values in the list so that we test all of them even if one fails
@pytest.mark.parametrize("package", yml_vars.get('packages'))
# Test for packages that are installed
def test_packages_installed(host, package):
with host.sudo():
assert host.package(package).is_installed
@pytest.mark.parametrize("service", yml_vars.get('services'))
# test for services that are enabled
def test_service_enabled(host, service):
assert host.service(service).is_enabled
assert host.service(service).is_running
# Can use this if we want to split up enabled and running into separate checks
# @pytest.mark.parametrize("service", services)
# # test for services that are running
# def test_service_running(host, service):
# assert host.service(service).is_running
@pytest.mark.parametrize("dir_path", yml_vars.get('dir_paths'))
# test for directories that should of been made
def test_directories(host, dir_path):
with host.sudo():
assert host.file(dir_path).exists
assert host.file(dir_path).is_directory
@pytest.mark.parametrize("file_path", yml_vars.get('file_paths'))
# test for files that should exist
def test_files(host, file_path):
file_p = file_path[0]
try:
file_u, file_g = file_path[1].split(':')
except IndexError:
file_u = None
file_g = None
try:
file_m = oct(int(file_path[2], 8))
except IndexError:
file_m = None
with host.sudo():
assert host.file(file_p).exists
assert host.file(file_p).is_file
if file_p.split('/')[-1] == 'filebeat.yml':
assert host.file(file_p).contains('/data/suricata/eve.json')
assert host.file(file_p).contains('/data/fsf/rockout.log')
if file_u:
assert host.file(file_p).user == file_u
if file_g:
assert host.file(file_p).group == file_g
if file_m:
assert oct(host.file(file_p).mode) == file_m
| {
"repo_name": "rocknsm/rock",
"path": "tests/test_filebeat.py",
"copies": "4",
"size": "2381",
"license": "apache-2.0",
"hash": -7012542108731036000,
"line_mean": 34.0147058824,
"line_max": 89,
"alpha_frac": 0.6467870643,
"autogenerated": false,
"ratio": 3.5066273932253313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6153414457525331,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object)
import logging
import os
from os.path import join, splitext, exists, isdir, isfile
from pathlib2 import Path
import subprocess
import re
import csv
from collections import OrderedDict
from dateutil import parser as dateparser
import xmltodict
logger = logging.getLogger()
def parse_samplesheet(file_path, standardize_keys=True):
# Old plain CSV format, IEM v3:
# FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,
# Operator,SampleProject
#
# last line: #_IEMVERSION_3_TruSeq LT,,,,,,,,,
# Newer INI-style CSV format, IEM v4:
# [Header],,,,,,,,
# IEMFileVersion,4,,,,,,,
# .. >snip< ..
# Assay,TruSeq LT,,,,,,,
# [Reads],,,,,,,,
# .. >snip< ..
# [Settings],,,,,,,,
# .. >snip< ..
# [Data],,,,,,,,
# Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,Sample_Project,Description
# .. >snip< ..
#
lines = []
with open(file_path, "rU") as f:
lines = f.readlines()
chemistry = None
# IEM v4 INI-style CSV
if '[Header]' in lines[0]:
section = None
for i, l in enumerate(lines):
if l[0] == '[':
section = l[1:].split(']')[0]
if section == 'Header' and l.startswith('Assay,'):
chemistry = l.split(',')[1].strip()
if section == 'Data':
data_index = i
break
reader = csv.DictReader(lines[data_index+1:])
if standardize_keys:
samples = []
# remove any underscores to make names consistent between
# old and new style samplesheets (eg Sample_ID -> SampleID)
for r in [row for row in reader]:
r = {k.replace('_', ''): r[k] for k in r.keys()}
samples.append(r)
else:
samples = [row for row in reader]
return samples, chemistry
else: # Plain CSV (IEM v3 ?)
reader = csv.DictReader(lines)
samples = [row for row in reader]
lastlinebit = samples[-1:][0].get('FCID', None)
if lastlinebit is not None:
chemistry = lastlinebit.split('_')[-1].strip()
del samples[-1:]
return samples, chemistry
def filter_samplesheet_by_project(file_path, proj_id,
project_column_label='SampleProject'):
"""
Windows \r\n
:param file_path:
:type file_path:
:param proj_id:
:type proj_id:
:param project_column_label:
:type project_column_label:
:return:
:rtype:
"""
# FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,
# Operator,SampleProject
#
# last line: #_IEMVERSION_3_TruSeq LT,,,,,,,,,
outlines = []
with open(file_path, "rU") as f:
header = f.readline().strip()
# skip any INI-style headers down to the CSV sample list in the [Data]
if '[Header]' in header:
while not '[Data]' in header:
header = f.readline()
header = f.readline().strip()
s = header.split(',')
# old samplesheet formats have no underscores in column labels,
# newer (IEMv4) ones do. by removing underscores here, we can find
# 'SampleProject' and 'Sample_Project', whichever exists
s_no_underscores = [c.replace('_', '') for c in s]
project_column_index = s_no_underscores.index(project_column_label)
outlines.append(header + '\r\n')
for l in f:
s = l.strip().split(',')
if s[project_column_index] == proj_id or l[0] == '#':
outlines.append(l.strip() + '\r\n')
return outlines
def samplesheet_to_dict(samplesheet_rows, key='SampleID'):
by_sample_id = {}
for sample in samplesheet_rows:
sample_id = sample[key]
by_sample_id[sample_id] = sample.copy()
del by_sample_id[sample_id][key]
return by_sample_id
def get_project_ids_from_samplesheet(samplesheet, include_no_index_name=None):
# Get unique project names
projects = list(set([s['SampleProject'] for s in samplesheet]))
# Treat the 'Undetermined_indices' directory as a (special) project
if include_no_index_name is not None:
projects.append(include_no_index_name)
return projects
def get_number_of_reads_fastq(filepath):
"""
Count the number of reads in a (gzipped) FASTQ file.
Assumes fours lines per read.
:type filepath: str
:rtype: int
"""
num = subprocess.check_output("zcat %s | echo $((`wc -l`/4))" % filepath,
shell=True)
return int(num.strip())
def get_read_length_fastq(filepath):
"""
Return the length of the first read in a (gzipped) FASTQ file.
:param filepath: Path to the (gzipped) FASTQ file
:type filepath: str
:return: Length of the first read in the FASTQ file
:rtype: int
"""
num = subprocess.check_output("zcat < %s | "
"head -n 2 | "
"tail -n 1 | "
"wc -m" % filepath,
shell=True)
return int(num.strip()) - 1
# Copypasta from: https://goo.gl/KpWo1w
# def unique(seq):
# seen = set()
# seen_add = seen.add
# return [x for x in seq if not (x in seen or seen_add(x))]
def rta_complete_parser(run_path):
"""
Parses RTAComplete.txt files in completed Illumina runs.
Returns the Illumina RTA (Realtime Analysis) version and the
date & time the run finished transferring from the instrument.
Copes with file generated by both RTA 1.x and RTA 2.x.
:type run_path: str
:rtype: (datetime.DateTime, str)
"""
with open(join(run_path, "RTAComplete.txt"), 'r') as f:
line = f.readline()
if line.startswith('RTA'):
# RTA 2.7.3 completed on 3/25/2016 3:31:22 AM
s = line.split(' completed on ')
version = s[0]
day, time = s[1].split(' ', 1)
else:
# 6/11/2014,20:00:49.935,Illumina RTA 1.17.20
day, time, version = line.split(',')
end_time = dateparser.parse("%s %s" % (day, time))
return end_time, version
def runinfo_parser(run_path):
"""
Matches some or all of the fields defined in schema:
http://www.tardis.edu.au/schemas/sequencing/run/illumina
:type run_path: str
:rtype: dict
"""
with open(join(run_path, "RunInfo.xml"), 'r') as f:
runinfo = xmltodict.parse(f.read())['RunInfo']['Run']
info = {u'run_id': runinfo['@Id'],
u'run_number': runinfo['@Number'],
u'flowcell_id': runinfo['Flowcell'],
u'instrument_id': runinfo['Instrument']}
reads = runinfo['Reads']['Read']
cycle_list = []
# index_reads = []
for read in reads:
if read['@IsIndexedRead'] == 'Y':
# index_reads.append(read['@Number'])
# we wrap the index reads in brackets
cycle_list.append("(%s)" % read['@NumCycles'])
else:
cycle_list.append(read['@NumCycles'])
info['read_cycles'] = ', '.join(cycle_list)
# info['index_reads'] = ', '.join(index_reads)
# Currently not capturing this metadata
# runinfo['RunInfo']['Run']['FlowcellLayout']['@LaneCount']
# runinfo['RunInfo']['Run']['FlowcellLayout']['@SurfaceCount']
# runinfo['RunInfo']['Run']['FlowcellLayout']['@SwathCount']
# runinfo['RunInfo']['Run']['FlowcellLayout']['@TileCount']
return info
def illumina_config_parser(run_path):
"""
Extacts data from an Illumina run Config/*_Effective.cfg file.
Returns a dictionary with key/values, where keys have
the [section] from the Windows INI-style file are prepended,
separated by colon. eg
{"section_name:variable_name": "value"}
:type run_path: str
:rtype: dict
"""
# we find the approriate config file
config_filename = None
for filename in os.listdir(join(run_path, 'Config')):
if "Effective.cfg" in filename:
config_filename = filename
if not config_filename:
logger.error("Cannot find Config/*Effective.cfg file")
return
# we don't use ConfigParser since for whatever reason it can't handle
# these files, despite looking like a mostly sane Windows INI-style
allinfo = {}
section = None
with open(join(run_path, 'Config', config_filename), 'r') as f:
for l in f:
if l[0] == ';':
continue
if l[0] == '[':
section = l[1:].split(']')[0]
if '=' in l:
s = l.split('=')
k = s[0].strip()
v = s[1]
if ';' in l:
v = s[1].split(';')[0]
v = v.strip()
allinfo['%s:%s' % (section, k)] = v
# select just the keys of interest to return
# info = {}
# if 'system:instrumenttype' in allinfo:
# info['instrument_model'] = allinfo['system:instrumenttype']
return allinfo
# TODO: Better demultiplexer version & commandline detection
# since we don't have a really reliable way of guessing how the demultiplexing
# was done (beyond detecting DemultiplexConfig.xml for bcl2fastq 1.8.4),
# we should probably require the processing pipeline to output some
# an optional metadata file containing the version and commandline used
# (and possibly other stuff)
# Alternative - allow config / commandline to specify pattern/path to
# bcl2fastq (or other demultiplexer) stdout log - extract version and
# commandline from there. This should work for bcl2fastq 2.17 but not
# 1.8.4
def get_demultiplexer_info(demultiplexed_output_path):
"""
Determine which demultiplexing program (eg bcl2fastq) and commandline
options that were used to partition reads from the run into individual
samples (based on index).
eg. {'version': 'bcl2fastq 1.8.3,
'commandline_options':
'--input-dir ./Data/Intensities/BaseCalls ' +
'--output-dir ./130613_DMO177_0029_AH0EPTADXX.pc ' +
'--sample-sheet ./SampleSheet.csv --no-eamss'}
:param demultiplexed_output_path: bcl2fastq (or similar) output path
:type demultiplexed_output_path: str
:return: Name and version number of program used for demultiplexing reads.
:rtype: dict
"""
version_info = {'version': '',
'version_number': '',
'commandline_options': ''}
# Parse DemultiplexConfig.xml to extract the bcl2fastq version
# This works for bcl2fastq v1.8.4, but bcl2fastq2 v2.x doesn't seem
# to generate this file
demulti_config_path = join(demultiplexed_output_path,
"DemultiplexConfig.xml")
if exists(demulti_config_path):
with open(demulti_config_path, 'r') as f:
xml = xmltodict.parse(f.read())
version = xml['DemultiplexConfig']['Software']['@Version']
version = ' '.join(version.split('-'))
cmdline = xml['DemultiplexConfig']['Software']['@CmdAndArgs']
cmdline = cmdline.split(' ', 1)[1]
version_info = {'version': version,
'version_number': version.split()[1].lstrip('v'),
'commandline_options': cmdline}
else:
# if we can't find DemultiplexConfig.xml, assume the locally installed
# bcl2fastq2 (v2.x) version was used
# TODO: This assumption is just misleading for things like MiSeq runs
# the might have been demultiplexed on the instrument and
# copied over. We probably shouldn't do this, just leave it blank
# until we find a better way to determine the demultiplexer
# (which might amount to having it specified on the commandline or
# in an extra metadata file)
try:
out = subprocess.check_output("/usr/local/bin/bcl2fastq --version",
stderr=subprocess.STDOUT,
shell=True).splitlines()
if len(out) >= 2 and 'bcl2fastq' in out[1]:
version = out[1].strip()
version_info = {
'version': version,
'version_number': version.split()[1].lstrip('v'),
'commandline_options': None}
except subprocess.CalledProcessError:
pass
# if we can't determine the bcl2fastq (or other demultiplexer) based
# on config & log files, or the locally installed version, try to guess if
# bcl2fastq 1.x or 2.x was used based on 'Project_' prefixes
if not version_info.get('version'):
if any([proj_dir.startswith('Project_')
for proj_dir in os.listdir(demultiplexed_output_path)]):
version_info['version'] = 'bcl2fastq 1.0unknown'
version_info['version_number'] = '1.0unknown'
else:
version_info['version'] = 'bcl2fastq 2.0unknown'
version_info['version_number'] = '2.0unknown'
return version_info
"""
# get the version of locally installed tagdust
out = subprocess.check_output("tagdust --version", shell=True)
if len(out) >= 1 and 'Tagdust' in out[1]:
version = out[1].strip()
return {'version': version,
'commandline_options': None}
"""
def get_run_id_from_path(run_path):
return os.path.basename(run_path.strip(os.path.sep))
# TODO: We could actually make patterns like these one a config option
# (either raw Python regex with named groups, or write a translator to
# simplify the syntax so we can write {sample_id}_bla_{index} in the config
# and have it converted to a regex with named groups internally)
# https://docs.python.org/2/howto/regex.html#non-capturing-and-named-groups
#
# (All these problems of differences between v2.x and v1.8.4 and CSV vs.
# IEMv4 SampleSheets are begging for a SampleSheet data container
# object to abstract out differences in sample sheets, and an IlluminaRun
# object with a list of DemultiplexedProject objects to abstract out
# differences in directory structure)
def parse_sample_info_from_filename(filepath, suffix='.fastq.gz'):
filename = os.path.basename(filepath)
def change_dict_types(d, keys, map_fn, ignore_missing_keys=True):
for k in keys:
if k in d:
d[k] = map_fn(d[k])
elif not ignore_missing_keys:
raise KeyError("%s not found" % k)
return d
# Components of regexes to match common fastq.gz filenames output
# by Illumina software. It's easier and more extensible to combine
# these than attempt to create and maintain one big regex
sample_name_re = r'(?P<sample_name>.*)'
undetermined_sample_name_re = r'(?P<sample_name>.*_Undetermined)'
index_re = r'(?P<index>[ATGC-]{6,33})'
# dual_index_re = r'(?P<index>[ATGC]{6,12})-?(?P<index2>[ATGC]{6,12})?'
lane_re = r'L0{0,3}(?P<lane>\d+)'
# Usually R1 or R2, can be I1 etc if index reads are output seperately
read_re = r'[RI](?P<read>\d)'
# index_read_re = r'I(?P<read>\d)'
set_number_re = r'(?P<set_number>\d+)'
sample_number_re = r'S(?P<sample_number>\d+)'
extension_re = r'%s$' % suffix
filename_regexes = [
# Undetermined indices files like this:
# lane1_Undetermined_L001_R1_001.fastq.gz
r'_'.join([undetermined_sample_name_re, lane_re, read_re, set_number_re]),
# bcl2fastq 2.x style filenames:
# {sample_name}_{sample_number}_L00{lane}_R{read}_001.fastq.gz
r'_'.join([sample_name_re, sample_number_re, lane_re, read_re,
set_number_re]),
# bcl2fastq 1.8.4 style filenames:
# {sample_name}_{index}_L00{lane}_R{read}_001.fastq.gz
r'_'.join([sample_name_re, index_re, lane_re, read_re, set_number_re]),
]
filename_regexes = [r + extension_re for r in filename_regexes]
# path_regexes = [
# r'(?P<project_id>.*)/Sample_(?P<sample_id>.*)/'
# r'(?P<project_id>.*)/(?P<sample_id>.*)/',
# r'',
# ]
# combined_re = r'(%s)' % '|'.join(filename_regexes)
# combined_re = r'(%s)(%s)' % ('|'.join(path_regexes),
# '|'.join(filename_regexes))
for regex in filename_regexes:
m = re.match(regex, filename)
if m is not None:
break
if m is not None:
d = m.groupdict()
d = change_dict_types(d, ['sample_number', 'lane', 'read',
'set_number'], int)
return d
return None
def get_sample_project_mapping(basepath,
samplesheet=None,
suffix='.fastq.gz',
absolute_paths=False,
catch_undetermined=True):
"""
Given a path containing fastq.gz files, possibily nested in Project/Sample
directories, return a data structure mapping fastq-samples to projects.
TODO: The SampleSheet.csv may be used as a hint but is not required.
:param basepath: Path to directory tree of fastq.gz files - eg, bcl2fastq
output directory
:type basepath: str
:return: Dictionary lists, {project_id : [relative fastq.gz paths]}
:rtype: OrderedDict
"""
from fs.opener import opener
fq_files = []
with opener.opendir(basepath) as vfs:
for fn in vfs.walkfiles():
if suffix in fn:
fq_files.append(fn.lstrip('/').lstrip('\\'))
fq_files = sorted(fq_files)
project_mapping = OrderedDict()
for fqpath in fq_files:
project = ''
fqfile = fqpath
parts = Path(fqpath).parts
if len(parts) == 3:
project, sample_id, fqfile = map(str, parts)
if len(parts) == 2:
project, fqfile = map(str, parts)
if len(parts) == 1:
fqfile = str(parts[0])
if catch_undetermined and 'Undetermined' in fqfile:
project = u'Undetermined_indices'
# TODO: we currently don't deal with Project_ prefixes, really
# the project ID doesn't include Project_. If we strip
# this here, maybe we need to include the project directory
# in the fastq paths so we can know the path and project id
# - will require fixes to downstream code that
# does join(bcl2fastq_output_dir, project_id, fastq_file)
# TODO: also incorporate sample_id in this datastructure
if project not in project_mapping:
project_mapping[project] = []
if absolute_paths:
fqpath = join(basepath, fqpath)
project_mapping[project].append(fqpath)
# TODO: Use the SampleSheet.csv to validate or hint
# TODO: Also assign sample_id, sample_name, lane, read, etc
# we could use parse_sample_info_from_filename for this,
# and/or use the FASTQ header(s)
return project_mapping
def undetermined_reads_in_root(basepath):
"""
Returns False if the Undetermined_indicies fastq.gz files are in their own
project directory (ie, old bcl2fastq 1.8.4 style), or True if they are
bare in the root of the bcl2fastq output diretory (eg bcl2fastq 2.x)
:param basepath:
:type basepath:
:return:
:rtype:
"""
for f in os.listdir(basepath):
if 'Undetermined_' not in f:
continue
f_path = join(basepath, f)
if isfile(f_path):
return True
if isdir(f_path):
return False
raise Exception("No Undetermined_indices files or directories found.")
def get_sample_id_from_fastq_filename(filepath):
"""
Takes:
15-02380-CE11-T13-L1_AACCAG_L001_R1_001.fastq.gz
Returns a sample ID that should be unique within a run,
consisting of the sample name, index, lane and read pair:
15-02380-CE11-T13-L1_AACCAG_L001_R1_001
:param filepath: a filename (possibly including a path)
:type filepath: str
:return: Unique sample ID
:rtype: str
"""
return os.path.basename(filepath).split('.fastq.gz')[0]
def get_sample_name_from_fastq_filename(filepath):
"""
Takes:
15-02380-CE11-T13-L1_AACCAG_L001_R1_001.fastq.gz
Returns just the sample name:
15-02380-CE11-T13-L1
:param filepath: a filename (possibly including a path)
:type filepath: str
:return: Short sample name
:rtype: str
"""
parts = parse_sample_info_from_filename(filepath)
return parts['sample_name'] | {
"repo_name": "pansapiens/mytardis_ngs_ingestor",
"path": "mytardis_ngs_ingestor/illumina/run_info.py",
"copies": "1",
"size": "20841",
"license": "bsd-3-clause",
"hash": 9026288671173119000,
"line_mean": 33.9697986577,
"line_max": 102,
"alpha_frac": 0.5912384243,
"autogenerated": false,
"ratio": 3.659525899912204,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4750764324212204,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from builtins import *
import struct
import array
import json
from .client import PCICV3Client
class PCICFormatRecord:
def __init__(self, recordId):
self.recordMap = {}
self.recordMap["type"] = "records"
self.recordMap["id"] = str(recordId)
self.recordMap["elements"] = []
def addStringElement(self, id, value):
stringElement = {}
stringElement["type"] = "string"
stringElement["value"] = str(value)
stringElement["id"] = str(id)
self.recordMap["elements"].append(stringElement)
def addBlobElement(self, id):
blobElement = {}
blobElement["type"] = "blob"
blobElement["id"] = str(id)
self.recordMap["elements"].append(blobElement)
def toMap(self):
return self.recordMap
def toString(self):
return json.dumps(self.recordMap)
class PCICFormat:
def __init__(self, formatString = None):
self.formatMap = {}
if formatString != None:
self.formatMap = json.loads(formatString)
else:
self.formatMap["layouter"] = "flexible"
self.formatMap["format"] = { "dataencoding": "ascii" }
self.formatMap["elements"] = []
def addStringElement(self, id, value):
stringElement = {}
stringElement["type"] = "string"
stringElement["value"] = str(value)
stringElement["id"] = str(id)
self.formatMap["elements"].append(stringElement)
def addBlobElement(self, id):
blobElement = {}
blobElement["type"] = "blob"
blobElement["id"] = str(id)
self.formatMap["elements"].append(blobElement)
def addRecordElement(self, record):
self.formatMap["elements"].append(record.toMap())
def toString(self):
return json.dumps(self.formatMap)
@staticmethod
def blobs(*blobIds):
format = PCICFormat()
for blobId in blobIds:
format.addBlobElement(blobId)
return format
class PCICParser:
def __init__(self, format = None):
self.format = format
self.debug = False
def parseBlob(self, answer, answerIndex):
# extract version independent information from chunk header
chunkType, chunkSize, headerSize, headerVersion = struct.unpack('IIII', bytes(answer[answerIndex:answerIndex+16]))
# extract full chunk header information
if headerVersion == 1:
chunkType, chunkSize, headerSize, headerVersion, imageWidth, imageHeight, pixelFormat, timeStamp, frameCount = struct.unpack('IIIIIIIII', bytes(answer[answerIndex:answerIndex+headerSize]))
elif headerVersion == 2:
chunkType, chunkSize, headerSize, headerVersion, imageWidth, imageHeight, pixelFormat, timeStamp, frameCount, statusCode, timeStampSec, timeStampNsec = struct.unpack('IIIIIIIIIIII', bytes(answer[answerIndex:answerIndex+headerSize]))
else:
print("Unknown chunk header version %d!" % headerVersion)
return chunkType, chunkSize, None
if self.debug == True:
print('''Data chunk:
Chunk type: %d
Chunk size: %d
Header size: %d
Header version: %d
Image width: %d
Image height: %d
Pixel format: %d
Time stamp: %d
Frame counter: %d''' % (chunkType, chunkSize, headerSize, headerVersion, imageWidth, imageHeight, pixelFormat, timeStamp, frameCount))
# check payload size
if len(answer) < answerIndex + chunkSize:
raise RuntimeError("data truncated ({} bytes missing)", answerIndex + chunkSize - len(answer))
# read chunk payload data
answerIndex += headerSize
# distinguish pixel type
if pixelFormat == 0:
image = array.array('B', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
elif pixelFormat == 1:
image = array.array('b', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
elif pixelFormat == 2:
image = array.array('H', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
elif pixelFormat == 3:
image = array.array('h', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
elif pixelFormat == 4:
image = array.array('I', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
elif pixelFormat == 5:
image = array.array('i', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
elif pixelFormat == 6:
image = array.array('f', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
elif pixelFormat == 8:
image = array.array('d', bytes(answer[answerIndex:answerIndex+chunkSize-headerSize]))
else:
print("Unknown pixel format %d!" % pixelFormat)
image = None
return chunkType, chunkSize, image
def parseElement(self, answer, answerIndex, element, result):
if element["type"] == "string":
readString = answer[answerIndex:answerIndex + len(element["value"])].decode("utf-8")
if self.debug == True:
print("String: '{}'".format(readString))
if readString == element["value"]:
result[element["id"]] = element["value"]
return answerIndex + len(element["value"])
else:
raise RuntimeError("read result '{}' does not match format (expected '{}')"
.format(readString, element["value"]))
elif element["type"] == "blob":
chunkType, chunkSize, blobData = self.parseBlob(answer, answerIndex)
if element["id"] in result:
if isinstance(result[element["id"]], list):
result[element["id"]].append(blobData)
else:
result[element["id"]] = [result[element["id"]], blobData]
else:
result[element["id"]] = blobData
return answerIndex + chunkSize
elif element["type"] == "records":
if self.debug == True:
print("Record: '{}'".format(element["id"]))
recordResult, answerIndex = self.parseRecord(answer, answerIndex, element["elements"])
result[element["id"]] = recordResult
return answerIndex
raise RuntimeError("cannot handle element type {}".format(element["type"]))
def parseRecord(self, answer, answerIndex, recordElements):
recordResult = []
# currently only one record at the end of the answer is supported
while answerIndex < len(answer):
iterationResult = {}
for element in recordElements:
answerIndex = self.parseElement(answer, answerIndex, element, iterationResult)
recordResult.append(iterationResult)
return recordResult, answerIndex
def parseAnswer(self, answer):
result = {}
answerIndex = 0
# print("Parsing answer '%s' against format '%s'" % (answer, self.format.toString()))
for element in self.format.formatMap["elements"]:
answerIndex = self.parseElement(answer, answerIndex, element, result)
return result
class FormatClient(PCICV3Client):
def __init__(self, address, port, format=None):
super(FormatClient, self).__init__(address, port)
# disable all result output
self.sendCommand("p0")
self.format = format
# if format is not specified, read back format as configured by the active application
if self.format == None:
formatstring = self.sendCommand("C?").decode("utf-8")[9:]
self.format = PCICFormat(str(formatstring))
# otherwise set provided format for this connection
else:
formatString = self.format.toString()
answer = self.sendCommand("c%09d%s" % (len(formatString), formatString))
if str(answer, 'utf-8') != "*":
raise RuntimeError("could not change PCIC format (format string is '{}')".format(formatString))
self.parser = PCICParser(self.format)
# enable result output again
self.sendCommand("p1")
def readNextFrame(self):
result = {}
# look for asynchronous output
ticket, answer = self.readNextAnswer()
if ticket == b"0000":
self.parser.debug = self.debug
result = self.parser.parseAnswer(answer)
return result
| {
"repo_name": "cfreundl/o3d3xx-python",
"path": "o3d3xx/pcic/format_client.py",
"copies": "1",
"size": "7364",
"license": "mit",
"hash": -1740997495905059300,
"line_mean": 34.2344497608,
"line_max": 235,
"alpha_frac": 0.7118413905,
"autogenerated": false,
"ratio": 3.4013856812933025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9454561537124724,
"avg_score": 0.03173310693371581,
"num_lines": 209
} |
from __future__ import (absolute_import, division, print_function)
from builtins import *
import struct
import array
from o3d3xx.pcic.client import PCICV3Client
from o3d3xx.pcic.cwrappers import IntrinsicCalibration, ExtrinsicCalibration
class ImageClient(PCICV3Client):
def __init__(self, address, port):
super(ImageClient, self).__init__(address, port)
# disable all result output
self.sendCommand("p0")
# format string for all images
pcicConfig = "{ \"layouter\": \"flexible\", \"format\": { \"dataencoding\": \"ascii\" }, \"elements\": [ { \"type\": \"string\", \"value\": \"star\", \"id\": \"start_string\" }, { \"type\": \"blob\", \"id\": \"normalized_amplitude_image\" }, { \"type\": \"blob\", \"id\": \"distance_image\" }, { \"type\": \"blob\", \"id\": \"x_image\" }, { \"type\": \"blob\", \"id\": \"y_image\" }, { \"type\": \"blob\", \"id\": \"z_image\" }, { \"type\": \"blob\", \"id\": \"confidence_image\" }, { \"type\": \"blob\", \"id\": \"diagnostic_data\" }, { \"type\": \"blob\", \"id\": \"extrinsic_calibration\" }, { \"type\": \"blob\", \"id\": \"intrinsic_calibration\" },{ \"type\": \"blob\", \"id\": \"inverse_intrinsic_calibration\" },{ \"type\": \"string\", \"value\": \"stop\", \"id\": \"end_string\" } ] }"
answer = self.sendCommand("c%09d%s" % (len(pcicConfig), pcicConfig))
if str(answer, 'utf-8') != "*":
raise
# enable result output again
self.sendCommand("p1")
def readNextFrame(self):
result = {}
# look for asynchronous output
ticket, answer = self.readNextAnswer()
if ticket == b"0000":
answerIndex = 0
# read start sequence
data = answer[answerIndex:answerIndex+4]
answerIndex += 4
if self.debugFull == True:
print('Read 4 Bytes start sequence: "%s"' % data)
if data != b"star":
print(data)
raise
chunkCounter = 1
while True:
# read next 4 bytes
data = answer[answerIndex:answerIndex+4]
answerIndex += 4
# stop if frame finished
if data == b"stop":
break
# else read rest of image header
data += answer[answerIndex:answerIndex+12]
answerIndex += 12
if self.debugFull == True:
print('Read %d Bytes image header: "%r"' % (len(data), data))
# extract information about chunk
chunkType, chunkSize, headerSize, headerVersion = struct.unpack('IIII', bytes(data))
# read rest of chunk header
data += answer[answerIndex:answerIndex+headerSize-16]
answerIndex += headerSize-16
if headerVersion == 1:
chunkType, chunkSize, headerSize, headerVersion, imageWidth, imageHeight, pixelFormat, timeStamp, frameCount = struct.unpack('IIIIIIIII', bytes(data))
elif headerVersion == 2:
chunkType, chunkSize, headerSize, headerVersion, imageWidth, imageHeight, pixelFormat, timeStamp, frameCount, statusCode, timeStampSec, timeStampNsec = struct.unpack('IIIIIIIIIIII', bytes(data))
else:
print("Unknown chunk header version %d!" % headerVersion)
if self.debug == True:
print('''Data chunk %d:
Chunk type: %d
Chunk size: %d
Header size: %d
Header version: %d
Image width: %d
Image height: %d
Pixel format: %d
Time stamp: %d
Frame counter: %d''' % (chunkCounter, chunkType, chunkSize, headerSize, headerVersion, imageWidth, imageHeight, pixelFormat, timeStamp, frameCount))
# read chunk data
data = answer[answerIndex:answerIndex+chunkSize-headerSize]
answerIndex += chunkSize-headerSize
# distinguish pixel type
if pixelFormat == 0:
image = array.array('B', bytes(data))
elif pixelFormat == 1:
image = array.array('b', bytes(data))
elif pixelFormat == 2:
image = array.array('H', bytes(data))
elif pixelFormat == 3:
image = array.array('h', bytes(data))
elif pixelFormat == 4:
image = array.array('I', bytes(data))
elif pixelFormat == 5:
image = array.array('i', bytes(data))
elif pixelFormat == 6:
image = array.array('f', bytes(data))
elif pixelFormat == 8:
image = array.array('d', bytes(data))
else:
image = None
# distance image
if chunkType == 100:
result['distance'] = image
# amplitude image
elif chunkType == 101:
result['amplitude'] = image
# intensity image
elif chunkType == 102:
result['intensity'] = image
# raw amplitude image
elif chunkType == 103:
result['rawAmplitude'] = image
# X image
elif chunkType == 200:
result['x'] = image
# Y image
elif chunkType == 201:
result['y'] = image
# Z image
elif chunkType == 202:
result['z'] = image
# confidence image
elif chunkType == 300:
result['confidence'] = image
# raw image
elif chunkType == 301:
if 'raw' not in result:
result['raw'] = []
result['raw'].append(image)
# diagnostic data
elif chunkType == 302:
diagnosticData = {}
payloadSize = chunkSize - headerSize
# the diagnostic data blob contains at least four temperatures plus the evaluation time
if payloadSize >= 20:
illuTemp, frontendTemp1, frontendTemp2, imx6Temp, evalTime = struct.unpack('=iiiiI', bytes(data[0:20]))
diagnosticData = dict([('illuTemp', illuTemp/10.0), ('frontendTemp1', frontendTemp1/10.0), ('frontendTemp2', frontendTemp2/10.0), ('imx6Temp', imx6Temp/10.0), ('evalTime', evalTime)])
# check whether framerate is also provided
if payloadSize == 24:
diagnosticData['frameRate'] = struct.unpack('=I', bytes(data[20:24]))[0]
result['diagnostic'] = diagnosticData
elif chunkType == 400:
result['extrinsicCalibration'] = ExtrinsicCalibration.from_buffer(data)
elif chunkType == 401:
result['intrinsicCalibration'] = IntrinsicCalibration.from_buffer(data)
elif chunkType == 402:
result['inverseIntrinsicCalibration'] = IntrinsicCalibration.from_buffer(data)
chunkCounter = chunkCounter + 1
# return amplitudeImage, intensityImage, distanceImage, xImage, yImage, zImage, confidenceImage, diagnosticData, rawImage, rawAmplitudeImage
return result
| {
"repo_name": "cfreundl/o3d3xx-python",
"path": "o3d3xx/pcic/image_client.py",
"copies": "1",
"size": "6030",
"license": "mit",
"hash": -3349663749085248000,
"line_mean": 34.2631578947,
"line_max": 795,
"alpha_frac": 0.6416252073,
"autogenerated": false,
"ratio": 3.35,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44916252073,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from builtins import (object, str)
from past.builtins import basestring
import os
import re
class RenderError(Exception):
pass
class Context(object):
"""generic template interface class """
def __init__(self, **kwargs):
"""
tmpl_dir is the base directory templates are stored in
out_dir is the output directory
env is a default set of variables to use
"""
self._search_path = []
if 'tmpl_dir' in kwargs:
self._search_path = [kwargs.get('tmpl_dir')]
if 'search_path' in kwargs:
self._search_path = kwargs.get('search_path', [])
self.out_dir = kwargs.get('out_dir', None)
self.env = kwargs.get('env', {})
@property
def search_path(self):
return self._search_path
@search_path.setter
def search_path(self, path_list):
if isinstance(path_list, basestring):
self._search_path = [path_list]
else:
self._search_path = path_list
@search_path.deleter
def search_path(self):
self._search_path = []
# overridden if engine can handle it, otherwise we mock
# def get_template(self, name):
# filename = self.find_template(name)
# if not filename:
# raise LookupError("template not found")
# return filename
def find_template(self, name):
for tmpl_dir in self.search_path:
tmpl_file = os.path.join(tmpl_dir, name)
if os.path.exists(tmpl_file):
return tmpl_file
return None
def render(self, src, env=None, out_dir=None, out_file=None):
"""
renders src.tmpl with env to produce out_dir/src
"""
if not env:
env = self.env
if not out_dir:
out_dir = self.out_dir
if out_file:
dest = out_file
else:
if not out_dir:
raise RenderError("no output directory (out_dir) set")
dest = os.path.join(str(out_dir), src)
if not out_dir:
raise RenderError("no output directory (out_dir) set")
print(self.out_dir)
print(src)
print(os.getcwd())
self._render_file(src, env, dest)
def render_file(self):
pass
def render_env(self, env=None):
if not env:
env = self.env
def render_string(self, instr, env=None):
"""
renders instr string with env and returns output string
"""
if not env:
env = self.env
return self._render_str_to_str(instr, env)
def render_walk(self, env=None, prefix='', skip=None, tmpl_dir=None, out_dir=None):
"""
Walks a directory and recursively renders all files
env -- override environment [default: self.env]
skip -- list of regex to skip files [default: None]
matches against the whole relative source path and the filename
prefix -- prefix output file with this [default: '']
returns a list generated files tuples (source, output)
"""
if not env:
env = self.env
if not out_dir:
out_dir = self.out_dir
if tmpl_dir:
return self.__render_walk(env, tmpl_dir, out_dir, prefix=prefix, skip=skip)
for tmpl_dir in self.search_path:
self.__render_walk(env, tmpl_dir, out_dir, prefix=prefix, skip=skip)
def __render_walk(self, env, tmpl_dir, out_dir, prefix, skip):
if skip:
skip_re = re.compile(skip)
generated = []
#self.debug_msg("rendering " + prefix + " from " + tmpl.tmpl_dir + " to " + tmpl.out_dir)
for root, dirs, files in os.walk(tmpl_dir):
rel_dir = os.path.relpath(root, tmpl_dir)
if rel_dir == '.':
rel_dir = ''
elif skip and skip_re.search(rel_dir):
continue
out_dir = os.path.join(out_dir, prefix)
for file in files:
if skip and skip_re.search(file):
continue
#self.debug_msg("rendering from " + file)
targ_dir = os.path.join(out_dir, rel_dir)
if not os.path.exists(targ_dir):
os.makedirs(targ_dir)
dest_file = os.path.join(targ_dir, file)
generated.append(dest_file)
env["filename"] = os.path.join(rel_dir, prefix + file)
#self.debug_msg("generating file " + env['filename'])
#self.render(os.path.join(rel_dir, file), out_file=dest_file, env=env)
self.render(os.path.join(rel_dir, file), out_file=dest_file, env=env)
return generated
def _render(self, src, env):
"""
renders src template file with env to return string
"""
abs_path = self.find_template(src)
return self._render_str_to_str(open(abs_path).read(), env)
def _render_file(self, src, env, dest):
"""
renders src template with env to produce dest file
"""
open(dest, "w").write(self._render(src, env))
def dump(self, src, env):
tmpl = self.ctx.get_template(src)
print(tmpl.render(env))
class Template(object):
def __init__(self, **kwargs):
pass
#self.src = file, string, obj
#self.ctx = Context
def render_string(self):
pass
def render_file(self):
pass
| {
"repo_name": "20c/twentyc.tmpl",
"path": "tmpl/context.py",
"copies": "1",
"size": "5542",
"license": "apache-2.0",
"hash": -5983411186830081000,
"line_mean": 28.1684210526,
"line_max": 97,
"alpha_frac": 0.5541320823,
"autogenerated": false,
"ratio": 3.853963838664812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49080959209648123,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import range
import functools
ITERABLE_TYPES = (
list,
set,
tuple,
)
MISSING_POSITIONAL_ERR = 'Required positional argument (pos 1) not found'
try:
from numpy import ndarray
ITERABLE_TYPES = ITERABLE_TYPES + (ndarray,)
except ImportError:
pass
def isiterable(v):
return isinstance(v, ITERABLE_TYPES)
def split(to_batch, batch_size):
if type(to_batch) == set:
to_batch = tuple(to_batch)
for batch in [
to_batch[i:i + batch_size]
for i in range(0, len(to_batch), batch_size)
]:
yield batch
def batchable(func=None, batch_size=100):
@functools.wraps(func)
def do_batch(*args, **kwargs):
if len(args) <= 1:
raise TypeError(MISSING_POSITIONAL_ERR)
_batch_size = kwargs.pop('batch_size', batch_size)
_self = args[0]
to_batch = args[1]
args = args[2:]
if not isiterable(to_batch):
to_batch = [to_batch]
if isinstance(to_batch, set):
to_batch = list(to_batch)
for batch in split(to_batch, _batch_size):
if _self is None:
func(batch, *args, **kwargs)
else:
func(_self, batch, *args, **kwargs)
# This avoids us having to call batchable wherever it's used, so we can
# just write:
# @batchable
# def func(self, ...):
#
# Rather than:
# @batchable()
# def func(self, ...):
#
# While still allowing this:
# @batchable(batch_size=10)
# def func(self, ...):
if func is None:
return functools.partial(batchable, batch_size=batch_size)
return do_batch
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/utils.py",
"copies": "1",
"size": "1742",
"license": "apache-2.0",
"hash": -2104155372064612600,
"line_mean": 22.8630136986,
"line_max": 75,
"alpha_frac": 0.5792192882,
"autogenerated": false,
"ratio": 3.4701195219123506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45493388101123505,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import str
from panoptes_client.subject_workflow_status import SubjectWorkflowStatus
from panoptes_client.panoptes import (
LinkCollection,
LinkResolver,
PanoptesAPIException,
PanoptesObject,
)
from panoptes_client.set_member_subject import SetMemberSubject
from panoptes_client.subject import Subject
from panoptes_client.utils import batchable
from redo import retry
class SubjectSetLinkCollection(LinkCollection):
def __contains__(self, obj):
if self._cls == Subject:
if isinstance(obj, Subject):
_subject_id = str(obj.id)
else:
_subject_id = str(obj)
linked_subject_count = SetMemberSubject.where(
subject_set_id=self._parent.id,
subject_id=_subject_id
).object_count
return linked_subject_count == 1
return super(SubjectSetLinkCollection, self).__contains__(obj)
def add(self, objs):
from panoptes_client.workflow import Workflow
if self._cls == Workflow:
raise NotImplementedError(
'Workflows and SubjectSets can only be linked via '
'Workflow.links'
)
return super(SubjectSetLinkCollection, self).add(objs)
def remove(self, objs):
from panoptes_client.workflow import Workflow
if self._cls == Workflow:
raise NotImplementedError(
'Workflows and SubjectSets can only be unlinked via '
'Workflow.links'
)
return super(SubjectSetLinkCollection, self).remove(objs)
class SubjectSet(PanoptesObject):
_api_slug = 'subject_sets'
_link_slug = 'subject_sets'
_edit_attributes = (
'display_name',
{
'links': (
'project',
),
'metadata': (
'category',
)
},
)
_link_collection = SubjectSetLinkCollection
@property
def subjects(self):
"""
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
"""
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject
def set_raw(self, raw, etag=None, loaded=True):
raw.setdefault('links', {}).setdefault('subjects', [])
return super(SubjectSet, self).set_raw(raw, etag, loaded)
def add(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to::
subject_set.links.add(subjects)
"""
return self.links.subjects.add(subjects)
def remove(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to::
subject_set.links.remove(subjects)
"""
return self.links.subjects.remove(subjects)
def subject_workflow_statuses(self, workflow_id):
"""
A generator which yields :py:class:`.SubjectWorkflowStatus` objects for subjects in this
subject set and for the supplied workflow id.
Examples::
for status in subject_set.subject_workflow_statuses(1234):
print(status.retirement_reason)
"""
subject_ids = ', '.join((subject.id for subject in self.subjects))
for status in SubjectWorkflowStatus.where(subject_ids=subject_ids, workflow_id=workflow_id):
yield status
def __contains__(self, subject):
"""
A wrapper around :py:meth:`.LinkCollection.__contains__`. Equivalent
to::
subject in subject_set.links.subjects
"""
return subject in self.links.subjects
LinkResolver.register(SubjectSet)
LinkResolver.register(SubjectSet, 'subject_set')
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/subject_set.py",
"copies": "1",
"size": "3956",
"license": "apache-2.0",
"hash": 2122100662621487400,
"line_mean": 28.7443609023,
"line_max": 100,
"alpha_frac": 0.6076845298,
"autogenerated": false,
"ratio": 4.385809312638581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5493493842438582,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import str
from panoptes_client.panoptes import (
PanoptesAPIException,
PanoptesObject,
)
from panoptes_client.subject import Subject
from panoptes_client.utils import batchable
class Collection(PanoptesObject):
_api_slug = 'collections'
_link_slug = 'collections'
_edit_attributes = (
'name',
'description',
'display_name',
'private',
{
'links': (
'project',
),
},
)
@classmethod
def find(cls, id='', slug=None):
"""
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
"""
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find collection with slug='{}'".format(slug)
)
@property
def subjects(self):
"""
A generator which yields each :py:class:`.Subject` in this collection.
"""
return self.links.subjects
def add(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to::
collection.links.add(subjects)
"""
return self.links.subjects.add(subjects)
def remove(self, subjects):
"""
A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to::
collection.links.remove(subjects)
"""
return self.links.subjects.remove(subjects)
def set_default_subject(self, subject):
"""
Sets the subject's location media URL as a link.
It displays as the default subject on PFE.
- **subject** can be a single :py:class:`.Subject` instance or a single
subject ID.
Examples::
collection.set_default_subject(1234)
collection.set_default_subject(Subject(1234))
"""
if not (
isinstance(subject, Subject)
or isinstance(subject, (int, str,))
):
raise TypeError
if isinstance(subject, Subject):
_subject_id = subject.id
else:
_subject_id = str(subject)
self.http_post(
'{}/links/default_subject'.format(self.id),
json={'default_subject': _subject_id},
)
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/collection.py",
"copies": "1",
"size": "2627",
"license": "apache-2.0",
"hash": -3511388702785620500,
"line_mean": 25.27,
"line_max": 79,
"alpha_frac": 0.562618957,
"autogenerated": false,
"ratio": 4.385642737896494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5448261694896493,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import str
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.project import Project
from panoptes_client.user import User
class ProjectPreferences(PanoptesObject):
"""
Contains the settings for a :py:class:`.User` on a :py:class:`.Project`.
"""
_api_slug = 'project_preferences'
_link_slug = 'project_preferences'
_edit_attributes = (
'preferences',
'settings',
)
@classmethod
def find(cls, id='', user=None, project=None):
"""
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
"""
if not id:
if not (user and project):
raise ValueError('Both user and project required')
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
id = cls.where(user_id=_user_id, project_id=_project_id).next().id
return super(ProjectPreferences, cls).find(id)
@classmethod
def save_settings(cls, project=None, user=None, settings=None):
"""
Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved.
"""
if (isinstance(settings, dict)):
_to_update = settings
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
cls.http_post(
'update_settings',
json={
'project_preferences': {
'user_id': _user_id,
'project_id': _project_id,
'settings': _to_update,
}
}
)
else:
raise TypeError
LinkResolver.register(ProjectPreferences)
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/project_preferences.py",
"copies": "1",
"size": "3085",
"license": "apache-2.0",
"hash": 2364196555182518300,
"line_mean": 32.5326086957,
"line_max": 78,
"alpha_frac": 0.5196110211,
"autogenerated": false,
"ratio": 4.57037037037037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5589981391470371,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import str
import getpass
import logging
import os
import requests
import threading
import pkg_resources
from datetime import datetime, timedelta
from redo import retrier
import six
from panoptes_client.utils import isiterable, batchable
HTTP_RETRY_LIMIT = 5
RETRY_BACKOFF_INTERVAL = 5
if os.environ.get('PANOPTES_DEBUG'):
logging.basicConfig(level=logging.DEBUG)
class Panoptes(object):
"""
The low-level Panoptes HTTP client class. Use this class to log into the
API. In most cases you can just call :py:meth:`.Panoptes.connect` once and
all subsequent API requests will be authenticated.
If you want to configure multiple clients, e.g. to perform operations as
multiple users, you should initialise the client as a context manager,
using the `with` statement instead of using :py:meth:`.Panoptes.connect`.
In this example, we modify a project by authenticating as the project
owner, then log in as a regular user to add a subject to a collection,
then switch back to the project owner's account to retire some subjects::
owner_client = Panoptes(username='example-project-owner', password='')
with owner_client:
project = Project(1234)
project.display_name = 'New name'
project.save()
with Panoptes(username='example-user', password=''):
Collection(1234).add(Subject(1234))
with owner_client:
Workflow(1234).retire_subjects([1234, 5678, 9012])
Using the `with` statement in this way ensures it is clear which user will
be used for each action.
"""
_http_headers = {
'default': {
'Accept': 'application/vnd.api+json; version=1',
'User-Agent': 'panoptes-python-client/version=' + pkg_resources.require('panoptes_client')[0].version
},
'GET': {},
'PUT': {
'Content-Type': 'application/json',
},
'POST': {
'Content-Type': 'application/json',
},
'DELETE': {
'Content-Type': 'application/json',
},
}
_endpoint_client_ids = {
'default': (
'ce310d45f951de68c4cc8ef46ca38cc0a008f607a2026680295757bfef99f43c'
),
'https://panoptes-staging.zooniverse.org': (
'e094b63362fdef0548e0bbcc6e6cb5996c422d3a770074ef972432d57d41049c'
),
}
_local = threading.local()
@classmethod
def connect(cls, *args, **kwargs):
"""
connect(username=None, password=None, endpoint=None, admin=False)
Configures the Panoptes client for use.
Note that there is no need to call this unless you need to pass one or
more of the below arguments. By default, the client will connect to
the public Zooniverse.org API as an anonymous user.
All arguments are optional:
- **username** is your Zooniverse.org username.
- **password** is your Zooniverse.org password.
- **endpoint** is the HTTP API endpoint you'd like to connect to.
Defaults to **https://www.zooniverse.org**. Should not include a
trailing slash.
- **admin** is a boolean, switching on admin mode if ``True``. Has no
effect if the given username is not a Zooniverse.org administrator.
Examples::
Panoptes.connect(username='example', password='example')
Panoptes.connect(endpoint='https://panoptes.example.com')
"""
cls._local.panoptes_client = cls(*args, **kwargs)
cls._local.panoptes_client.login()
return cls._local.panoptes_client
@classmethod
def client(cls, *args, **kwargs):
local_client = getattr(cls._local, "panoptes_client", None)
if not local_client:
return cls(*args, **kwargs)
return local_client
def __init__(
self,
endpoint=None,
client_id=None,
client_secret=None,
redirect_url=None,
username=None,
password=None,
login=None,
admin=False
):
self.session = requests.session()
self.endpoint = endpoint or os.environ.get(
'PANOPTES_ENDPOINT',
'https://www.zooniverse.org'
)
self.logged_in = False
self.username = None
self.password = None
self._auth(login, username, password)
self.login()
self.redirect_url = \
redirect_url or os.environ.get('PANOPTES_REDIRECT_URL')
self.client_secret = \
client_secret or os.environ.get('PANOPTES_CLIENT_SECRET')
if client_id:
self.client_id = client_id
elif os.environ.get('PANOPTES_CLIENT_ID'):
self.client_id = os.environ.get('PANOPTES_CLIENT_ID')
else:
self.client_id = self._endpoint_client_ids.get(
self.endpoint,
self._endpoint_client_ids['default']
)
self.logged_in = False
self.bearer_token = None
self.admin = admin
self.logger = logging.getLogger('panoptes_client')
def __enter__(self):
self._local.previous_client = getattr(
self._local,
'panoptes_client',
None,
)
self._local.panoptes_client = self
return self
def __exit__(self, *exc):
self._local.panoptes_client = self._local.previous_client
def http_request(
self,
method,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
_headers = self._http_headers['default'].copy()
_headers.update(self._http_headers[method])
_headers.update(headers)
headers = _headers
token = self.get_bearer_token()
if self.logged_in:
headers.update({
'Authorization': 'Bearer %s' % token,
})
if etag:
headers.update({
'If-Match': etag,
})
if endpoint:
url = endpoint + '/' + path
else:
url = self.endpoint + '/api' + path
# Setting the parameter at all (even False) turns on admin mode
if self.admin:
params.update({'admin': self.admin})
if params:
self.logger.debug(
"params={}".format(params)
)
if json:
self.logger.debug(
"json={}".format(json)
)
if retry:
retry_attempts = HTTP_RETRY_LIMIT
else:
retry_attempts = 1
for _ in retrier(
attempts=retry_attempts,
sleeptime=RETRY_BACKOFF_INTERVAL,
):
response = self.session.request(
method,
url,
params=params,
headers=headers,
json=json,
)
if response.status_code < 500:
break
else:
raise PanoptesAPIException(
'Received HTTP status code {} from API'.format(
response.status_code
)
)
return response
def json_request(
self,
method,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
response = self.http_request(
method=method,
path=path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
if (
response.status_code == 204 or
int(response.headers.get('Content-Length', -1)) == 0 or
len(response.text) == 0
):
json_response = None
else:
json_response = response.json()
if 'errors' in json_response:
raise PanoptesAPIException(', '.join(
map(lambda e: e.get('message', ''),
json_response['errors']
)
))
elif 'error' in json_response:
raise PanoptesAPIException(json_response['error'])
return (json_response, response.headers.get('ETag'))
def get_request(
self,
path,
params={},
headers={},
endpoint=None,
retry=False,
):
return self.http_request(
'GET',
path,
params=params,
headers=headers,
endpoint=endpoint,
retry=retry,
)
def get(
self,
path,
params={},
headers={},
endpoint=None,
retry=False,
):
return self.json_request(
'GET',
path,
params=params,
headers=headers,
endpoint=endpoint,
retry=retry,
)
def put_request(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.http_request(
'PUT',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=None,
retry=retry,
)
def put(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.json_request(
'PUT',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def post_request(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.http_request(
'post',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def post(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.json_request(
'POST',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def delete_request(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.http_request(
'delete',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=None,
retry=retry,
)
def delete(
self,
path,
params={},
headers={},
json=None,
etag=None,
endpoint=None,
retry=False,
):
return self.json_request(
'DELETE',
path,
params=params,
headers=headers,
json=json,
etag=etag,
endpoint=endpoint,
retry=retry,
)
def _auth(self, auth_type, username, password):
if username is None or password is None:
if auth_type == 'interactive':
username, password = self.interactive_login()
elif auth_type == 'keyring':
# Get credentials from python keyring
pass
else:
username = os.environ.get('PANOPTES_USERNAME')
password = os.environ.get('PANOPTES_PASSWORD')
self.username = username
self.password = password
def login(self, username=None, password=None):
if self.logged_in:
return
if not username:
username = self.username
else:
self.username = username
if not password:
password = self.password
else:
self.password = password
if not username or not password:
return
login_data = {
'authenticity_token': self.get_csrf_token(),
'user': {
'login': username,
'password': password,
'remember_me': True,
},
}
response = self.session.post(
self.endpoint + '/users/sign_in',
json=login_data,
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
)
if response.status_code != 200:
raise PanoptesAPIException(
response.json().get('error', 'Login failed')
)
self.logged_in = True
return response
def interactive_login(self):
print('Enter your Zooniverse credentials...')
username = input('Username: ')
password = getpass.getpass()
return username, password
def get_csrf_token(self):
url = self.endpoint + '/users/sign_in'
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
return self.session.get(url, headers=headers).headers['x-csrf-token']
def get_bearer_token(self):
if not self.valid_bearer_token():
grant_type = 'password'
if self.client_secret:
grant_type = 'client_credentials'
if not self.logged_in:
if grant_type == 'password':
if not self.login():
return
if (self.bearer_token and self.refresh_token):
bearer_data = {
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
'client_id': self.client_id,
}
else:
bearer_data = {
'grant_type': grant_type,
'client_id': self.client_id,
}
if grant_type == 'client_credentials':
bearer_data['client_secret'] = self.client_secret
bearer_data['url'] = self.redirect_url
token_response = self.session.post(
self.endpoint + '/oauth/token',
bearer_data
).json()
if 'errors' in token_response:
raise PanoptesAPIException(token_response['errors'])
self.bearer_token = token_response['access_token']
if (self.bearer_token and grant_type == 'client_credentials'):
self.logged_in = True
if 'refresh_token' in token_response:
self.refresh_token = token_response['refresh_token']
else:
self.refresh_token = None
self.bearer_expires = (
datetime.now()
+ timedelta(seconds=token_response['expires_in'])
)
return self.bearer_token
def valid_bearer_token(self):
# Return invalid if there is no token
if not self.has_bearer_token():
return False
now = datetime.now()
expires = self.bearer_expires
# Buffer to allow time for requests
# to fire without expiring in transit
buffer_ = timedelta(minutes=2)
# Add time to now --> pretend time is later
# Effect of making token expire earlier
return now + buffer_ <= expires
def has_bearer_token(self):
return self.bearer_token is not None
class PanoptesObject(object):
"""
The base class of all Panoptes model classes. You should never need to
create instances of this class, but the methods defined here are common to
all the model subclasses.
`PanoptesObject`s support lazy loading of attributes, where data is loaded
from the API only when it is first accessed. You can do this by passing an
object ID to the contructor::
project = Project(1234)
print(project.display_name)
This will not make any HTTP requests until the `print` statement.
"""
RESERVED_ATTRIBUTES = (
'_loaded',
'etag',
'links',
'modified_attributes',
'raw',
)
@classmethod
def url(cls, *args):
return '/'.join(['', cls._api_slug] + [str(a) for a in args if a])
@classmethod
def http_get(cls, path, params={}, headers={}, retry=True, **kwargs):
return Panoptes.client().get(
cls.url(path),
params,
headers,
retry=retry,
**kwargs
)
@classmethod
def http_post(cls, path, params={}, headers={}, json=None, **kwargs):
return Panoptes.client().post(
cls.url(path),
params,
headers,
json,
**kwargs
)
@classmethod
def http_put(cls, path, params={}, headers={}, json=None, **kwargs):
return Panoptes.client().put(
cls.url(path),
params,
headers,
json,
**kwargs
)
@classmethod
def http_delete(cls, path, params={}, headers={}, json=None, **kwargs):
return Panoptes.client().delete(
cls.url(path),
params,
headers,
json,
**kwargs
)
@classmethod
def where(cls, **kwargs):
"""
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
"""
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs))
@classmethod
def find(cls, _id):
"""
Returns the individual instance with the given ID, if it exists. Raises
:py:class:`PanoptesAPIException` if the object with that ID is not
found.
"""
if not _id:
return None
try:
return next(cls.where(id=_id))
except StopIteration:
raise PanoptesAPIException(
"Could not find {} with id='{}'".format(cls.__name__, _id)
)
@classmethod
def paginated_results(cls, response, etag):
return ResultPaginator(cls, response, etag)
def __init__(self, raw={}, etag=None):
self._loaded = False
self.links = LinkResolver(self)
if type(raw) == dict:
self.set_raw(raw, etag)
else:
self.set_raw({}, loaded=False)
self.raw['id'] = raw
def __getattr__(self, name):
try:
if (
name not in PanoptesObject.RESERVED_ATTRIBUTES
and name != 'id'
and not self._loaded
):
self.reload()
return getattr(self, name)
return self.raw[name]
except KeyError:
if name == 'id':
return None
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__,
name
))
def __setattr__(self, name, value):
if name in PanoptesObject.RESERVED_ATTRIBUTES:
return super(PanoptesObject, self).__setattr__(name, value)
if not self._loaded:
self.reload()
if name not in self.raw:
return super(PanoptesObject, self).__setattr__(name, value)
if name not in self._edit_attributes:
raise ReadOnlyAttributeException(
'{} is read-only'.format(name)
)
self.raw[name] = value
self.modified_attributes.add(name)
def __repr__(self):
return '<{} {}>'.format(
self.__class__.__name__,
self.id
)
def set_raw(self, raw, etag=None, loaded=True):
self.raw = {}
self.raw.update(self._savable_dict(include_none=True))
self.raw.update(raw)
self.etag = etag
self.modified_attributes = set()
self._loaded = loaded
def _savable_dict(
self,
attributes=None,
modified_attributes=None,
include_none=False,
):
if not attributes:
attributes = self._edit_attributes
out = []
for key in attributes:
if type(key) == dict:
for subkey, subattributes in key.items():
if (
subkey == 'links' and
hasattr(self, 'links') and
modified_attributes and
'links' in modified_attributes
):
out.append(
(subkey, self.links._savable_dict(subattributes))
)
else:
links_out = (subkey, self._savable_dict(
attributes=subattributes,
include_none=include_none
))
if links_out[1]:
out.append(links_out)
elif modified_attributes and key not in modified_attributes:
continue
else:
value = self.raw.get(key)
if value is not None or include_none:
out.append((key, value))
return dict(out)
def save(self):
"""
Saves the object. If the object has not been saved before (i.e. it's
new), then a new object is created. Otherwise, any changes are
submitted to the API.
"""
if not self.id:
save_method = Panoptes.client().post
force_reload = False
else:
if not self.modified_attributes:
return
if not self._loaded:
self.reload()
save_method = Panoptes.client().put
force_reload = True
response, response_etag = save_method(
self.url(self.id),
json={self._api_slug: self._savable_dict(
modified_attributes=self.modified_attributes
)},
etag=self.etag
)
raw_resource_response = response[self._api_slug][0]
self.set_raw(raw_resource_response, response_etag)
if force_reload:
self._loaded = False
return response
def reload(self):
"""
Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new.
"""
if not self.id:
return
reloaded_object = self.__class__.find(self.id)
self.set_raw(
reloaded_object.raw,
reloaded_object.etag
)
def delete(self):
"""
Deletes the object. Returns without doing anything if the object is
new.
"""
if not self.id:
return
if not self._loaded:
self.reload()
return self.http_delete(self.id, etag=self.etag)
class ResultPaginator(object):
def __init__(self, object_class, response, etag):
if response is None:
response = {}
self.object_class = object_class
self.set_page(response)
self.etag = etag
def __iter__(self):
return self
def __next__(self):
if self.object_index >= self.object_count:
if self.object_count and self.next_href:
response, _ = Panoptes.client().get(self.next_href)
self.set_page(response)
return next(self)
else:
raise StopIteration
i = self.object_index
self.object_index += 1
return self.object_class(self.object_list[i], etag=self.etag)
next = __next__
def set_page(self, response):
self.meta = response.get('meta', {})
self.meta = self.meta.get(self.object_class._api_slug, {})
self.page = self.meta.get('page', 1)
self.page_count = self.meta.get('page_count', 1)
self.next_href = self.meta.get('next_href')
self.object_list = response.get(self.object_class._api_slug, [])
self.object_count = len(self.object_list)
self.object_index = 0
class LinkResolver(object):
types = {}
readonly = set()
@classmethod
def register(cls, object_class, link_slug=None, readonly=False):
if not link_slug:
link_slug = object_class._link_slug
cls.types[link_slug] = object_class
if readonly:
cls.readonly.add(link_slug)
@classmethod
def isreadonly(cls, link_slug):
return link_slug in cls.readonly
def __init__(self, parent):
self.parent = parent
def __getattr__(self, name):
if not self.parent._loaded:
self.parent.reload()
linked_object = self.parent.raw['links'][name]
object_class = LinkResolver.types.get(name)
if (
not object_class and
type(linked_object == dict) and
'type' in linked_object
):
object_class = LinkResolver.types.get(linked_object['type'])
if isinstance(linked_object, LinkCollection):
return linked_object
if isinstance(linked_object, list):
lc = getattr(self.parent, '_link_collection', LinkCollection)(
object_class,
name,
self.parent,
linked_object
)
self.parent.raw['links'][name] = lc
return lc
if isinstance(linked_object, dict) and 'id' in linked_object:
return object_class(linked_object['id'])
else:
return object_class(linked_object)
def __setattr__(self, name, value):
reserved_names = ('raw', 'parent')
if name not in reserved_names and name not in dir(self):
if not self.parent._loaded:
self.parent.reload()
if isinstance(value, PanoptesObject):
value = value.id
self.parent.raw['links'][name] = value
self.parent.modified_attributes.add('links')
else:
super(LinkResolver, self).__setattr__(name, value)
def _savable_dict(self, edit_attributes):
out = []
for key, value in self.parent.raw['links'].items():
if not key in edit_attributes:
continue
if isiterable(value):
out.append((key, [getattr(o, 'id', o) for o in value]))
else:
if value:
out.append((key, value))
return dict(out)
class LinkCollection(object):
"""
A collection of :py:class:`.PanoptesObject` of one class which are linked
to a parent :py:class:`.PanoptesObject`.
Allows indexing, iteration, and membership testing::
project = Project(1234)
print(project.links.workflows[2].display_name)
for workflow in project.links.workflows:
print(workflow.id)
if Workflow(5678) in project.links.workflows:
print('Workflow found')
# Integers, strings, and PanoptesObjects are all OK
if 9012 not in project.links.workflows:
print('Workflow not found')
"""
def __init__(self, cls, slug, parent, linked_objects):
self._linked_object_ids = list(linked_objects)
self._cls = cls
self._slug = slug
self._parent = parent
self.readonly = LinkResolver.isreadonly(slug)
def __contains__(self, obj):
if isinstance(obj, self._cls):
obj_id = str(obj.id)
else:
obj_id = str(obj)
return obj_id in self._linked_object_ids
def __getitem__(self, i):
return self._cls(self._linked_object_ids[i])
def __iter__(self):
for obj_id in self._linked_object_ids:
yield self._cls(obj_id)
def __repr__(self):
return "[{}]".format(", ".join([
"<{} {}>".format(self._cls.__name__, obj)
for obj in self._linked_object_ids
]))
@batchable
def add(self, objs):
"""
Adds the given `objs` to this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.add(1234)
organization.links.projects.add(Project(1234))
workflow.links.subject_sets.add([1,2,3,4])
workflow.links.subject_sets.add([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj not in self]
if not _objs:
return
self._parent.http_post(
'{}/links/{}'.format(self._parent.id, self._slug),
json={self._slug: _objs},
retry=True,
)
self._linked_object_ids.extend(_objs)
@batchable
def remove(self, objs):
"""
Removes the given `objs` from this `LinkCollection`.
- **objs** can be a list of :py:class:`.PanoptesObject` instances, a
list of object IDs, a single :py:class:`.PanoptesObject` instance, or
a single object ID.
Examples::
organization.links.projects.remove(1234)
organization.links.projects.remove(Project(1234))
workflow.links.subject_sets.remove([1,2,3,4])
workflow.links.subject_sets.remove([Project(12), Project(34)])
"""
if self.readonly:
raise NotImplementedError(
'{} links can\'t be modified'.format(self._slug)
)
if not self._parent.id:
raise ObjectNotSavedException(
"Links can not be modified before the object has been saved."
)
_objs = [obj for obj in self._build_obj_list(objs) if obj in self]
if not _objs:
return
_obj_ids = ",".join(_objs)
self._parent.http_delete(
'{}/links/{}/{}'.format(self._parent.id, self._slug, _obj_ids),
retry=True,
)
self._linked_object_ids = [
obj for obj in self._linked_object_ids if obj not in _objs
]
def _build_obj_list(self, objs):
_objs = []
for obj in objs:
if not (
isinstance(obj, self._cls)
or isinstance(obj, (int, six.string_types,))
):
raise TypeError
if isinstance(obj, self._cls):
_obj_id = str(obj.id)
else:
_obj_id = str(obj)
_objs.append(_obj_id)
return _objs
class PanoptesAPIException(Exception):
"""
Raised whenever the API returns an error. The exception will contain the
raw error message from the API.
"""
pass
class ReadOnlyAttributeException(Exception):
"""
Raised if an attempt is made to modify an attribute of a
:py:class:`PanoptesObject` which the API does not allow to be modified.
"""
pass
class ObjectNotSavedException(Exception):
"""
Raised if an attempt is made to perform an operation on an unsaved
:py:class:`PanoptesObject` which requires the object to be saved first.
"""
pass
class Talk(object):
def __init__(self, endpoint='https://talk.zooniverse.org/'):
self.endpoint = endpoint
def http_get(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().get(*args, **kwargs)
def http_post(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().post(*args, **kwargs)
def http_put(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().put(*args, **kwargs)
def http_delete(self, *args, **kwargs):
kwargs['endpoint'] = self.endpoint
return Panoptes.client().delete(*args, **kwargs)
def get_data_request(self, section, kind):
return self.http_get(
'data_requests',
params={
'section': section,
'kind': kind,
}
)
def post_data_request(self, section, kind):
return self.http_post(
'data_requests',
json={
'data_requests': {
'section': section,
'kind': kind,
}
}
)
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/panoptes.py",
"copies": "1",
"size": "33546",
"license": "apache-2.0",
"hash": -4061062087590434000,
"line_mean": 27.5741056218,
"line_max": 113,
"alpha_frac": 0.5228641269,
"autogenerated": false,
"ratio": 4.335228741276816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023532760120305462,
"num_lines": 1174
} |
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.enums import InterfaceType
def config_first_detected_device(board_num, dev_id_list=None):
"""Adds the first available device to the UL. If a types_list is specified,
the first available device in the types list will be add to the UL.
Parameters
----------
board_num : int
The board number to assign to the board when configuring the device.
dev_id_list : list[int], optional
A list of product IDs used to filter the results. Default is None.
See UL documentation for device IDs.
"""
ul.ignore_instacal()
devices = ul.get_daq_device_inventory(InterfaceType.ANY)
if not devices:
raise Exception('Error: No DAQ devices found')
print('Found', len(devices), 'DAQ device(s):')
for device in devices:
print(' ', device.product_name, ' (', device.unique_id, ') - ',
'Device ID = ', device.product_id, sep='')
device = devices[0]
if dev_id_list:
device = next((device for device in devices
if device.product_id in dev_id_list), None)
if not device:
err_str = 'Error: No DAQ device found in device ID list: '
err_str += ','.join(str(dev_id) for dev_id in dev_id_list)
raise Exception(err_str)
# Add the first DAQ device to the UL with the specified board number
ul.create_daq_device(board_num, device)
| {
"repo_name": "mccdaq/mcculw",
"path": "examples/console/console_examples_util.py",
"copies": "1",
"size": "1538",
"license": "mit",
"hash": -5797878202145285000,
"line_mean": 36.512195122,
"line_max": 80,
"alpha_frac": 0.6397919376,
"autogenerated": false,
"ratio": 3.769607843137255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9907894210125994,
"avg_score": 0.00030111412225233364,
"num_lines": 41
} |
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from mcculw.enums import FunctionType, InfoType, BoardInfo, ChannelType
class DaqiInfo:
"""Provides DAQ input information for the device with the specified
board number.
NOTE: This class is primarily used to provide hardware information for the
library examples and may change some hardware configuration values. It is
recommended that values provided by this class be hard-coded in production
code.
Parameters
----------
board_num : int
The board number associated with the device when created with
:func:`.create_daq_device` or configured with Instacal.
"""
def __init__(self, board_num):
self._board_num = board_num
@property
def is_supported(self):
daqi_supported = True
try:
ul.get_status(self._board_num, FunctionType.DAQIFUNCTION)
except ul.ULError:
daqi_supported = False
return daqi_supported
@property
def supported_channel_types(self):
chan_types = []
if self.is_supported:
count = ul.get_config(InfoType.BOARDINFO, self._board_num, 0,
BoardInfo.DAQINUMCHANTYPES)
for type_index in range(count):
chan_type = ul.get_config(InfoType.BOARDINFO, self._board_num,
type_index, BoardInfo.DAQICHANTYPE)
chan_types.append(ChannelType(chan_type))
return chan_types
@property
def supports_setpoints(self):
setpoints_supported = False
if self.is_supported:
try:
ul.daq_set_setpoints(self._board_num, [], [], [], [], [], [],
[], [], 0)
setpoints_supported = True
except ULError:
setpoints_supported = False
return setpoints_supported
| {
"repo_name": "mccdaq/mcculw",
"path": "mcculw/device_info/daqi_info.py",
"copies": "1",
"size": "2042",
"license": "mit",
"hash": 473801311233238600,
"line_mean": 31.935483871,
"line_max": 78,
"alpha_frac": 0.6023506366,
"autogenerated": false,
"ratio": 4.263048016701461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5365398653301461,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
import os
import tkinter as tk
from tkinter import messagebox
from mcculw import ul
from mcculw.enums import InterfaceType, ErrorCode
from mcculw.ul import ULError
class UIExample(tk.Frame, object):
"""Provides a base class for all UI-based examples in this package."""
def __init__(self, master=None):
super(UIExample, self).__init__(master)
self.board_num = 0
example_dir = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(example_dir, 'MCC.ico')
# Initialize tkinter properties
master.iconbitmap(icon_path)
master.wm_title(type(self).__name__)
master.minsize(width=400, height=75)
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
self.grid(sticky=tk.NSEW)
def create_unsupported_widgets(self, error=False):
incompatible_label = tk.Label(self, fg="red")
incompatible_label["text"] = "Board " + str(self.board_num) + " "
if error:
incompatible_label["text"] += "was not found."
else:
incompatible_label["text"] += "is not compatible with this example."
incompatible_label.pack(fill=tk.X, side=tk.LEFT, anchor=tk.NW)
button_frame = tk.Frame(self)
button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)
def configure_first_detected_device(self):
ul.ignore_instacal()
devices = ul.get_daq_device_inventory(InterfaceType.ANY)
if not devices:
raise ULError(ErrorCode.BADBOARD)
# Add the first DAQ device to the UL with the specified board number
ul.create_daq_device(self.board_num, devices[0])
def show_ul_error(ul_error):
message = 'A UL Error occurred.\n\n' + str(ul_error)
messagebox.showerror("Error", message)
def validate_positive_int_entry(p):
valid = False if p is None else True
if p:
try:
value = int(p)
if value < 0:
valid = False
except ValueError:
valid = False
return valid
def validate_float_entry(p):
valid = False if p is None else True
if p:
try:
float(p)
except ValueError:
valid = False
return valid
| {
"repo_name": "mccdaq/mcculw",
"path": "examples/ui/ui_examples_util.py",
"copies": "1",
"size": "2374",
"license": "mit",
"hash": -4298652586721410000,
"line_mean": 29.0506329114,
"line_max": 80,
"alpha_frac": 0.6267902275,
"autogenerated": false,
"ratio": 3.657935285053929,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.978394414046454,
"avg_score": 0.0001562744178777934,
"num_lines": 79
} |
from __future__ import (absolute_import, division, print_function)
from code import compile_command, InteractiveInterpreter
from PySide import QtCore, QtGui
import sys
from traceback import print_exc
from .Ui_ConsoleWindow import Ui_ConsoleWindow
class ConsoleWindow(QtGui.QMainWindow, Ui_ConsoleWindow):
def __init__(self, parent=None):
super(ConsoleWindow, self).__init__(parent)
self.setupUi(self)
self.interp = InteractiveInterpreter({
'__name__': "__console__",
'__doc__': None,
'retwin': parent,
'retapp': parent.application,
'conwin': self,
'sys': sys,
})
outFormat = QtGui.QTextCharFormat()
outFormat.setForeground(QtGui.QBrush(QtGui.QColor(0, 128, 0)))
outFormat.setFontWeight(QtGui.QFont.Normal)
errFormat = QtGui.QTextCharFormat()
errFormat.setForeground(QtGui.QBrush(QtGui.QColor(255, 0, 0)))
entryFormat = QtGui.QTextCharFormat()
entryFormat.setFontWeight(QtGui.QFont.Bold)
self.stdout = ConsoleOutputWriter(
self.consoleOutput, "<stdout>", outFormat)
self.stderr = ConsoleOutputWriter(
self.consoleOutput, "<stderr>", errFormat)
self.entry = ConsoleOutputWriter(
self.consoleOutput, "<stdin>", entryFormat)
self.consoleEntry.evaluate = self.evaluate
return
def evaluate(self, text):
# Redirect stdout, stderr while executing the command
try:
saved_stdout, saved_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.stdout, self.stderr
try:
compiled = compile_command(text, "<console>")
if compiled is None:
return False
self.entry.write(">>> " + text.replace("\n", "\n... ") + "\n")
self.interp.runcode(compiled)
except Exception as e:
print_exc()
finally:
sys.stdout, sys.stderr = saved_stdout, saved_stderr
return True
class ConsoleOutputWriter(object):
"""\
A file-like object for redirecting output to a QTextEdit widget.
"""
def __init__(self, target, name="<output>", textFormat=None):
super(ConsoleOutputWriter, self).__init__()
self.target = target
self.name = name
self.softspace = 0
self.textFormat = textFormat
return
def close(self):
return
def flush(self):
return
def next(self):
raise StopIteration()
def read(self, size=0):
return ""
def readline(self, size=0):
return ""
def readlines(self, sizehint=0):
return []
def xreadlines(self):
return []
def seek(self, offset, whence=0):
return
def tell(self):
return 0
def truncate(self, size=None):
return
def write(self, data):
doc = self.target.document()
cur = QtGui.QTextCursor(doc)
cur.movePosition(cur.End)
if self.textFormat is not None:
cur.insertText(data, self.textFormat)
else:
cur.insertText(data)
# Make sure what we just wrote is visibile *if* there is no current
# selection.
if not self.target.textCursor().hasSelection():
self.target.setTextCursor(cur)
self.target.ensureCursorVisible()
return
def writelines(self, seq):
for el in seq:
self.write(el)
return
@property
def closed(self):
return False
@property
def encoding(self):
return None
@property
def mode(self):
return 'w'
@property
def newlines(self):
return None
# Local variables:
# mode: Python
# tab-width: 8
# indent-tabs-mode: nil
# End:
# vi: set expandtab tabstop=8
| {
"repo_name": "dacut/ret",
"path": "ret/ui/qt/ConsoleWindow.py",
"copies": "1",
"size": "3903",
"license": "bsd-2-clause",
"hash": -5045925384434309000,
"line_mean": 25.1946308725,
"line_max": 78,
"alpha_frac": 0.5857033051,
"autogenerated": false,
"ratio": 4.196774193548387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5282477498648387,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Counter, defaultdict
from decimal import Decimal
from operator import itemgetter
from manhattan import visitor
from .rollups import AllRollup, LocalDayRollup, LocalWeekRollup, BrowserRollup
from .cache import DeferredLRUCache
from .model import VisitorHistory, Test, Goal
from .persistence.sql import SQLPersistentStore
default_rollups = {
'all': AllRollup(),
'pst_day': LocalDayRollup('America/Los_Angeles'),
'pst_week': LocalWeekRollup('America/Los_Angeles'),
'browser': BrowserRollup(),
}
class Backend(object):
def __init__(self, sqlalchemy_url, rollups=None, complex_goals=None,
flush_every=500, cache_size=2000):
self.rollups = rollups or default_rollups
self.complex_goals = complex_goals or []
self.store = store = SQLPersistentStore(sqlalchemy_url)
self.visitors = DeferredLRUCache(get_backend=store.get_visitor_history,
put_backend=store.put_visitor_history,
max_size=cache_size)
self.tests = DeferredLRUCache(get_backend=store.get_test,
put_backend=store.put_test,
max_size=cache_size)
self.goals = DeferredLRUCache(get_backend=store.get_goal,
put_backend=store.put_goal,
max_size=cache_size)
self.pointer = self.store.get_pointer()
self.records_since_flush = 0
self.flush_every = flush_every
self.reset_counters()
def get_pointer(self):
return self.pointer
def reset_counters(self):
self.inc_conversions = Counter()
self.inc_values = defaultdict(Decimal)
self.inc_variant_conversions = Counter()
self.inc_variant_values = Counter()
self.inc_impressions = Counter()
def handle(self, rec, ptr):
try:
history = self.visitors.get(rec.vid)
except KeyError:
history = VisitorHistory()
if rec.key == 'pixel':
history.nonbot = True
for rec in history.nonbot_queue:
self.handle_nonbot(rec, history)
elif history.nonbot:
self.handle_nonbot(rec, history)
else:
history.nonbot_queue.append(rec)
# Limit nonbot queue to most recent 500 events.
del history.nonbot_queue[:-500]
self.visitors.put(rec.vid, history)
self.pointer = ptr
self.records_since_flush += 1
if self.records_since_flush > self.flush_every:
self.flush()
self.records_since_flush = 0
def handle_nonbot(self, rec, history):
assert rec.key in ('page', 'goal', 'split')
ts = int(float(rec.timestamp))
site_id = int(rec.site_id)
if rec.key == 'page':
history.ips.add(rec.ip)
history.user_agents.add(rec.user_agent)
self.record_conversion(history,
vid=rec.vid,
name=u'viewed page',
timestamp=ts,
site_id=site_id)
elif rec.key == 'goal':
self.record_conversion(history,
vid=rec.vid,
name=rec.name,
timestamp=ts,
site_id=site_id,
value=rec.value,
value_type=rec.value_type,
value_format=rec.value_format)
else: # split
self.record_impression(history,
vid=rec.vid,
name=rec.test_name,
selected=rec.selected,
timestamp=ts,
site_id=site_id)
def record_impression(self, history, vid, name, selected, timestamp,
site_id):
variant = name, selected
history.variants.add(variant)
try:
test = self.tests.get(name)
except KeyError:
test = Test()
test.first_timestamp = timestamp
test.last_timestamp = timestamp
test.variants.add(variant)
self.tests.put(name, test)
# Record this impression in appropriate time buckets both on the
# history object and in the current incremental accumulators.
for rollup_key, rollup in self.rollups.iteritems():
bucket_id = rollup.get_bucket(timestamp, history)
key = (name, selected, rollup_key, bucket_id, site_id)
if key not in history.impression_keys:
history.impression_keys.add(key)
self.inc_impressions[key] += 1
def iter_rollups(self, timestamp, history):
for rollup_key, rollup in self.rollups.iteritems():
bucket_id = rollup.get_bucket(timestamp, history)
yield rollup_key, bucket_id
def record_complex_goals(self, history, new_name, timestamp, site_id):
for complex_name, include, exclude in self.complex_goals:
# If all goals have now been satisfied in the 'include' set,
# trigger a +1 delta on this complex goal in the current
# rollups, and track that as a complex goal conversion in this
# visitor history.
if (new_name in include) and (history.goals >= include):
new_keys = []
for rollup_key, bucket_id in self.iter_rollups(timestamp,
history):
conv_key = (complex_name, rollup_key, bucket_id, site_id)
new_keys.append(conv_key)
self.inc_conversions[conv_key] += 1
history.complex_keys[complex_name] = new_keys
# If we are adding the first goal in the 'exclude' set, trigger
# a -1 delta for all conversions on that complex goal in the
# visitory history.
if history.goals & exclude == set([new_name]):
for key in history.complex_keys.pop(complex_name, []):
self.inc_conversions[key] -= 1
def record_conversion(self, history, vid, name, timestamp, site_id,
value=None, value_type='', value_format=''):
try:
goal = self.goals.get(name)
except KeyError:
goal = Goal()
goal.value_type = value_type
goal.value_format = value_format
self.goals.put(name, goal)
if value:
value = Decimal(value)
if name not in history.goals:
history.goals.add(name)
# If this is a 'new' goal for this visitor, process complex
# conversion goals.
self.record_complex_goals(history, name, timestamp, site_id)
# Record this goal conversion in appropriate time buckets both on the
# history object and in the current incremental accumulators.
for rollup_key, bucket_id in self.iter_rollups(timestamp, history):
conv_key = (name, rollup_key, bucket_id, site_id)
if conv_key not in history.conversion_keys:
history.conversion_keys.add(conv_key)
self.inc_conversions[conv_key] += 1
if value:
self.inc_values[conv_key] += value
for test_name, selected in history.variants:
vc_key = (name, test_name, selected, rollup_key, bucket_id,
site_id)
if vc_key not in history.variant_conversion_keys:
history.variant_conversion_keys.add(vc_key)
self.inc_variant_conversions[vc_key] += 1
if value:
self.inc_variant_values[vc_key] += value
def flush(self):
self.store.begin()
self.visitors.flush()
self.tests.flush()
self.goals.flush()
# Add local counter state onto existing persisted counters.
self.store.increment_conversion_counters(self.inc_conversions,
self.inc_values)
self.store.increment_impression_counters(self.inc_impressions)
self.store.increment_variant_conversion_counters(
self.inc_variant_conversions, self.inc_variant_values)
self.reset_counters()
self.store.update_pointer(self.pointer)
self.store.commit()
def count(self, goal=None, variant=None, rollup_key='all', bucket_id=0,
site_id=None):
assert goal or variant, "must specify goal or variant"
if goal and variant:
test_name, selected = variant
key = goal, test_name, selected, rollup_key, bucket_id, site_id
local = self.inc_variant_conversions[key]
flushed = self.store.count_variant_conversions(*key)[0]
elif goal:
key = goal, rollup_key, bucket_id, site_id
local = self.inc_conversions[key]
flushed = self.store.count_conversions(*key)[0]
else:
# variant
name, selected = variant
key = name, selected, rollup_key, bucket_id, site_id
local = self.inc_impressions[key]
flushed = self.store.count_impressions(*key)
return local + flushed
def goal_value(self, goal, variant=None, rollup_key='all', bucket_id=0,
site_id=None):
goal_obj = self.goals.get(goal)
if not goal_obj.value_type:
return self.count(goal, variant, rollup_key=rollup_key,
bucket_id=bucket_id, site_id=site_id)
if variant:
test_name, selected = variant
key = goal, test_name, selected, rollup_key, bucket_id, site_id
local = self.inc_variant_values[key]
flushed = self.store.count_variant_conversions(*key)[1]
else:
key = goal, rollup_key, bucket_id, site_id
local = self.inc_values[key]
flushed = self.store.count_conversions(*key)[1]
value = local + Decimal(str(flushed))
if goal_obj.value_type == visitor.SUM:
return value
elif goal_obj.value_type == visitor.AVERAGE:
count = self.count(goal, variant, rollup_key=rollup_key,
bucket_id=bucket_id, site_id=site_id)
return value / count if count > 0 else 0
else:
# visitor.PER
count = self.count(u'viewed page', variant,
rollup_key=rollup_key,
bucket_id=bucket_id, site_id=site_id)
return value / count if count > 0 else 0
def all_tests(self):
# Start with flushed.
all = self.store.all_tests()
# Update from unflushed (so that dirty entries overwrite the flushed).
all.update(self.tests.entries)
# Sort by last timestamp descending.
all = [(name, test.first_timestamp, test.last_timestamp)
for name, test in all.iteritems()]
all.sort(key=itemgetter(2), reverse=True)
return all
def results(self, test_name, goals, site_id=None):
# Return a dict: keys are populations, values are a list of values for
# the goals specified.
test = self.tests.get(test_name)
ret = {}
for variant in test.variants:
values = []
for goal in goals:
values.append(self.goal_value(goal, variant, site_id=site_id))
ret[variant[1]] = values
return ret
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/backend/__init__.py",
"copies": "1",
"size": "11943",
"license": "mit",
"hash": 7187486088291852000,
"line_mean": 37.6504854369,
"line_max": 79,
"alpha_frac": 0.5513690028,
"autogenerated": false,
"ratio": 4.200844178684489,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252213181484489,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict, namedtuple
import functools
import itertools
import re
import nfldb
__pdoc__ = {}
def tag_players(db, players):
"""
Given a list of `nflfan.RosterPlayer` objects, set the
`nflfan.RosterPlayer.player` attribute to its corresponding
`nfldb.Player` object. (Except for roster players corresponding to
an entire team, e.g., a defense.)
"""
ids = [p.player_id for p in players if p.is_player]
q = nfldb.Query(db).player(player_id=ids)
dbps = dict([(p.player_id, p) for p in q.as_players()])
return [p._replace(player=dbps.get(p.player_id, None)) for p in players]
def score_roster(db, schema, roster, phase=nfldb.Enums.season_phase.Regular):
"""
Given a database connection, a `nflfan.ScoreSchema`, and
a `nflfan.Roster`, return a new `nflfan.Roster` with
`nflfan.RosterPlayer` objects with the `playing` and `points`
attributes set.
`phase` may be set to a different phase of the season, but
traditional fantasy sports are only played during the regular
season, which is the default.
Each `nflfan.RosterPlayer` in the roster given will also have the
`nflfan.RosterPlayer.player` attribute set to its corresponding
`nfldb.Player` object. (Except for roster players corresponding to
an entire team, e.g., a defense.)
"""
tagged = tag_players(db, roster.players)
scored = score_players(db, schema, tagged, phase=phase)
return roster._replace(players=scored)
def score_players(db, schema, players, phase=nfldb.Enums.season_phase.Regular):
"""
Given a database connection, a `nflfan.ScoreSchema`, and a list of
`nflfan.RosterPlayer`, return a list of new `nflfan.RosterPlayer`
objects with the `playing` and `points` attributes set.
`phase` may be set to a different phase of the season, but
traditional fantasy sports are only played during the regular
season, which is the default.
N.B. `players` is a list because of performance reasons. Namely,
scoring can use fewer SQL queries when given a collection of players
to score as opposed to a single player.
"""
if len(players) == 0:
return []
season, week = players[0].season, players[0].week
def player_query(ids):
return _game_query(db, players[0], phase=phase).player(player_id=ids)
def week_games():
q = _game_query(db, players[0], phase=phase)
games = {}
for game in q.as_games():
games[game.home_team] = game
games[game.away_team] = game
return games
def tag(games, pps, fgs, rp):
if rp.is_empty:
return rp
game = games.get(rp.team, None)
if rp.is_defense:
pts = _score_defense_team(schema, db, game, rp, phase)
else:
pp = pps.get(rp.player_id, None)
pp_fgs = fgs.get(rp.player_id, [])
pts = _score_player(schema, pp, pp_fgs)
return rp._replace(game=game, points=pts)
games = week_games()
pids = [p.player_id for p in players if p.is_player]
fgs = _pp_field_goals(db, players, phase=phase)
pps = dict([(p.player_id, p) for p in player_query(pids).as_aggregate()])
return map(functools.partial(tag, games, pps, fgs), players)
def _score_defense_team(schema, db, game, rplayer,
phase=nfldb.Enums.season_phase.Regular):
"""
Given a defensive `nflfan.RosterPlayer`, a nfldb database
connection and a `nflfan.ScoreSchema`, return the total defensive
fantasy points for the team.
"""
assert rplayer.is_defense
if game is None:
return 0.0
q = _game_query(db, rplayer, phase=phase)
q.play_player(team=rplayer.team)
teampps = q.as_aggregate()
if len(teampps) == 0:
return 0.0
s = 0.0
stats = lambda pp: _pp_stats(pp, _is_defense_stat)
for cat, v in itertools.chain(*map(stats, teampps)):
s += schema.settings.get(cat, 0.0) * v
pa = _defense_points_allowed(schema, db, game, rplayer, phase=phase)
pacat = schema._pick_range_setting('defense_pa', pa)
s += schema.settings.get(pacat, 0.0)
return s
def _defense_points_allowed(schema, db, game, rplayer,
phase=nfldb.Enums.season_phase.Regular):
"""
Return the total number of points allowed by a defensive team
`nflfan.RosterPlayer`.
"""
assert rplayer.is_defense
assert game is not None
if rplayer.team == game.home_team:
pa = game.away_score
else:
pa = game.home_score
if schema.settings.get('defense_pa_style', '') == 'yahoo':
# It is simpler to think of PA in this case as subtracting certain
# point allocations from the total scored. More details here:
# http://goo.gl/t5YMFC
#
# Only get the player stats for defensive plays on the opposing
# side. Namely, the only points not in PA are points scored against
# rplayer's offensive unit.
fg_blk_tds = nfldb.Query(db)
fg_blk_tds.play_player(defense_misc_tds=1, kicking_fga=1)
notcount = nfldb.QueryOR(db)
notcount.play_player(defense_safe=1, defense_int_tds=1,
defense_frec_tds=1)
notcount.orelse(fg_blk_tds)
q = _game_query(db, rplayer, phase=phase)
q.play_player(gsis_id=game.gsis_id, team__ne=rplayer.team)
q.andalso(notcount)
for pp in q.as_aggregate():
pa -= 2 * pp.defense_safe
pa -= 6 * pp.defense_int_tds
pa -= 6 * pp.defense_frec_tds
pa -= 6 * pp.defense_misc_tds
return pa
def score_details(schema, pp, fgs=None):
"""
Given an nfldb database connection, a `nflfan.ScoreSchema` and
a `nfldb.PlayPlayer` object, return a dictionary mapping the name of
a score statistic to a tuple of statistic and total point value
corresponding to the statistics in `pp`.
`fgs` should be a a list of `nfldb.PlayPlayer`, where each
describes a *single* field goal `attempt.
"""
fgs = fgs or []
def add(d, cat, stat, pts):
if pts == 0:
return
if cat in d:
d[cat] = (stat + d[cat][0], pts + d[cat][1])
else:
d[cat] = (stat, pts)
d = {}
for cat, v in _pp_stats(pp, lambda cat: not _is_defense_stat(cat)):
add(d, cat, v, v * schema.settings.get(cat, 0.0))
for field, pts, start, end in schema._bonuses():
v = getattr(pp, field, 0.0)
if start <= v <= end:
add(d, field, v, pts)
for pp in fgs:
for cat, v in _pp_stats(pp, lambda cat: cat.startswith('kicking_fg')):
if cat in ('kicking_fgm_yds', 'kicking_fgmissed_yds'):
prefix = re.sub('_yds$', '', cat)
scat = schema._pick_range_setting(prefix, v)
if scat is not None:
add(d, scat, 1, schema.settings.get(scat, 0.0))
return d
def _score_player(schema, pp, fgs=[]):
"""
Given a `nfldb.PlayPlayer` object, return the total fantasy points
according to the `nflfan.ScoreSchema` given.
`fgs` should be a a list of `nfldb.PlayPlayer`, where each
describes a *single* field goal `attempt.
"""
if not pp:
return 0.0
s = 0.0
for cat, v in _pp_stats(pp, lambda cat: not _is_defense_stat(cat)):
s += v * schema.settings.get(cat, 0.0)
for field, pts, start, end in schema._bonuses():
if start <= getattr(pp, field, 0.0) <= end:
s += pts
for pp in fgs:
for cat, v in _pp_stats(pp, lambda cat: cat.startswith('kicking_fg')):
if cat in ('kicking_fgm_yds', 'kicking_fgmissed_yds'):
prefix = re.sub('_yds$', '', cat)
score_cat = schema._pick_range_setting(prefix, v)
if score_cat is not None:
s += schema.settings.get(score_cat, 0.0)
return s
def _pp_field_goals(db, rplayers, phase=nfldb.Enums.season_phase.Regular):
"""
Given a nfldb connection and a list of `nflfan.RosterPlayer` objects,
return a dictionary mapping player id to a list of `nfldb.PlaPlayer`,
where each describes a *single* field goal attempt.
This dictionary can be passed to `nflfan._score_player`.
"""
if len(rplayers) == 0:
return {}
q = _game_query(db, rplayers[0], phase=phase).play_player(kicking_fga=1)
d = defaultdict(list)
for pp in q.as_play_players():
d[pp.player_id].append(pp)
return d
def _pp_stats(pp, predicate=None):
for cat in pp.fields:
if predicate is not None and not predicate(cat):
continue
yield (cat, float(getattr(pp, cat)))
def _is_defense_stat(name):
return name.startswith('defense_')
def _game_query(db, rp, phase=nfldb.Enums.season_phase.Regular):
q = nfldb.Query(db)
return q.game(season_year=rp.season, season_type=phase, week=rp.week)
class ScoreSchema (namedtuple('ScoreSchema', 'name settings')):
__pdoc__['ScoreSchema.name'] = \
"""The name given to this schema in the configuration."""
__pdoc__['ScoreSchema.settings'] = \
"""
A dictionary mapping a scoring category to its point value. The
interpretation of the point value depends on the scoring
category.
"""
def _pick_range_setting(self, prefix, v):
match = re.compile('%s_([0-9]+)_([0-9]+)' % prefix)
for cat in self.settings.keys():
m = match.match(cat)
if not m:
continue
start, end = int(m.group(1)), int(m.group(2))
if start <= v <= end:
return cat
return None
def _bonuses(self):
match = re.compile('^bonus_(.+)_([0-9]+)_([0-9]+)$')
for cat, pts in self.settings.items():
m = match.match(cat)
if not m:
continue
field, start, end = m.group(1), int(m.group(2)), int(m.group(3))
yield field, pts, start, end
| {
"repo_name": "BurntSushi/nflfan",
"path": "nflfan/score.py",
"copies": "2",
"size": "10123",
"license": "unlicense",
"hash": 6210435830840019000,
"line_mean": 34.1493055556,
"line_max": 79,
"alpha_frac": 0.6066383483,
"autogenerated": false,
"ratio": 3.3321263989466754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4938764747246675,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import heapq
import re
import sys
from psycopg2.extensions import cursor as tuple_cursor
from nfldb.db import Tx
import nfldb.types as types
try:
strtype = basestring
except NameError: # I have lofty hopes for Python 3.
strtype = str
__pdoc__ = {}
_sql_max_in = 4500
"""The maximum number of expressions to allow in a `IN` expression."""
def aggregate(objs):
"""
Given any collection of Python objects that provide a
`play_players` attribute, `aggregate` will return a list of
`PlayPlayer` objects with statistics aggregated (summed) over each
player. (As a special case, if an element in `objs` is itself a
`nfldb.PlayPlayer` object, then it is used and a `play_players`
attribute is not rquired.)
For example, `objs` could be a mixed list of `nfldb.Game` and
`nfldb.Play` objects.
The order of the list returned is stable with respect to the
order of players obtained from each element in `objs`.
It is recommended to use `nfldb.Query.aggregate` and
`nfldb.Query.as_aggregate` instead of this function since summing
statistics in the database is much faster. However, this function
is provided for aggregation that cannot be expressed by the query
interface.
"""
summed = OrderedDict()
for obj in objs:
pps = [obj] if isinstance(obj, types.PlayPlayer) else obj.play_players
for pp in pps:
if pp.player_id not in summed:
summed[pp.player_id] = pp._copy()
else:
summed[pp.player_id]._add(pp)
return summed.values()
def current(db):
"""
Returns a triple of `nfldb.Enums.season_phase`, season year and week
corresponding to values that `nfldb` thinks are current.
Note that this only queries the database. Only the `nfldb-update`
script fetches the current state from NFL.com.
The values retrieved may be `None` if the season is over or if they
haven't been updated yet by the `nfldb-update` script.
"""
with Tx(db, factory=tuple_cursor) as cursor:
cursor.execute('SELECT season_type, season_year, week FROM meta')
return cursor.fetchone()
return tuple([None] * 3)
def player_search(db, full_name, team=None, position=None, limit=1):
"""
Given a database handle and a player's full name, this function
searches the database for players with full names *similar* to the
one given. Similarity is measured by the
[Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance).
Results are returned as tuples. The first element is the is a
`nfldb.Player` object and the second element is the Levenshtein
distance. When `limit` is `1` (the default), then the return value
is a tuple. When `limit` is more than `1`, then the return value
is a list of tuples.
If no results are found, then `(None, None)` is returned when
`limit == 1` or the empty list is returned when `limit > 1`.
If `team` is not `None`, then only players **currently** on the
team provided will be returned. Any players with an unknown team
are therefore omitted.
If `position` is not `None`, then only players **currently**
at that position will be returned. Any players with an unknown
position are therefore omitted.
In order to use this function, the PostgreSQL `levenshtein`
function must be available. If running this functions gives
you an error about "No function matches the given name and
argument types", then you can install the `levenshtein` function
into your database by running the SQL query `CREATE EXTENSION
fuzzystrmatch` as a superuser like `postgres`. For example:
#!bash
psql -U postgres -c 'CREATE EXTENSION fuzzystrmatch;' nfldb
"""
assert isinstance(limit, int) and limit >= 1
select_leven = 'levenshtein(full_name, %s) AS distance'
q = '''
SELECT %s, %s
FROM player
%s
ORDER BY distance ASC LIMIT %d
'''
qteam, qposition = '', ''
results = []
with Tx(db) as cursor:
if team is not None:
qteam = cursor.mogrify('team = %s', (team,))
if position is not None:
qposition = cursor.mogrify('position = %s', (position,))
select_leven = cursor.mogrify(select_leven, (full_name,))
q = q % (
types.select_columns(types.Player),
select_leven,
_prefix_and(qteam, qposition),
limit
)
cursor.execute(q, (full_name,))
for row in cursor.fetchall():
results.append((types.Player.from_row(db, row), row['distance']))
if limit == 1:
if len(results) == 0:
return (None, None)
return results[0]
return results
def _append_conds(conds, tabtype, kwargs):
"""
Adds `nfldb.Condition` objects to the condition list `conds` for
the `table`. Only the values in `kwargs` that correspond to keys in
`keys` are used.
"""
keys = tabtype._sql_fields
trim = _no_comp_suffix
for k, v in ((k, v) for k, v in kwargs.items() if trim(k) in keys):
conds.append(Comparison(tabtype, k, v))
def _no_comp_suffix(s):
"""Removes the comparison operator suffix from a search field."""
return re.sub('__(eq|ne|gt|lt|ge|le)$', '', s)
def _comp_suffix(s):
"""
Returns the comparison operator suffix given a search field.
This does not include the `__` (double underscore).
If no suffix is present, then `eq` is returned.
"""
suffixes = ['eq', 'ne', 'lt', 'le', 'gt', 'ge']
for suffix in suffixes:
if s.endswith(suffix):
return suffix
return 'eq'
def _sql_where(cur, tables, andalso, orelse, prefix=None, aggregate=False):
"""
Returns a valid SQL condition expression given a list of
conjunctions and disjunctions. The list of disjunctions
is given the lowest precedent via grouping with parentheses.
"""
disjunctions = []
andsql = _cond_where_sql(cur, andalso, tables, prefix=prefix,
aggregate=aggregate)
andsql = ' AND '.join(andsql)
if len(andsql) > 0:
andsql = '(%s)' % andsql
disjunctions.append(andsql)
disjunctions += _cond_where_sql(cur, orelse, tables, prefix=prefix,
aggregate=aggregate)
if len(disjunctions) == 0:
return ''
return '(%s)' % (' OR '.join(disjunctions))
def _cond_where_sql(cursor, conds, tables, prefix=None, aggregate=False):
"""
Returns a list of valid SQL comparisons derived from a list of
`nfldb.Condition` objects in `conds` and restricted to the list
of table names `tables`.
"""
isa = isinstance
pieces = []
for c in conds:
if isa(c, Query) or (isa(c, Comparison) and c._table in tables):
sql = c._sql_where(cursor, tables, prefix=prefix,
aggregate=aggregate)
if len(sql) > 0:
pieces.append(sql)
return pieces
def _prefix_and(*exprs, **kwargs):
"""
Given a list of SQL expressions, return a valid `WHERE` clause for
a SQL query with the exprs AND'd together.
Exprs that are empty are omitted.
A keyword argument `prefix` can be used to change the value of
`WHERE ` to something else (e.g., `HAVING `).
"""
anded = ' AND '.join('(%s)' % expr for expr in exprs if expr)
if len(anded) == 0:
return ''
return kwargs.get('prefix', 'WHERE ') + anded
def _sql_pkey_in(cur, pkeys, ids, prefix=''):
"""
Returns a SQL IN expression of the form `(pkey1, pkey2, ..., pkeyN)
IN ((val1, val2, ..., valN), ...)` where `pkeyi` is a member of
the list `pkeys` and `(val1, val2, ..., valN)` is a member in the
`nfldb.query.IdSet` `ids`.
If `prefix` is set, then it is used as a prefix for each `pkeyi`.
"""
pkeys = ['%s%s' % (prefix, pk) for pk in pkeys]
if ids.is_full:
return None
elif len(ids) == 0:
nulls = ', '.join(['NULL'] * len(pkeys))
return '(%s) IN ((%s))' % (', '.join(pkeys), nulls)
return '(%s) IN %s' % (', '.join(pkeys), cur.mogrify('%s', (tuple(ids),)))
def _pk_play(cur, ids, tables=['game', 'drive']):
"""
A convenience function for calling `_sql_pkey_in` when selecting
from the `play` or `play_player` tables. Namely, it only uses a
SQL IN expression for the `nfldb.query.IdSet` `ids` when it has
fewer than `nfldb.query._sql_max_in` values.
`tables` should be a list of tables to specify which primary keys
should be used. By default, only the `game` and `drive` tables
are allowed, since they are usually within the limits of a SQL
IN expression.
"""
pk = None
is_play = 'play' in tables or 'play_player' in tables
if 'game' in tables and pk is None:
pk = _sql_pkey_in(cur, ['gsis_id'], ids['game'])
elif 'drive' in tables and len(ids['drive']) <= _sql_max_in:
pk = _sql_pkey_in(cur, ['gsis_id', 'drive_id'], ids['drive'])
elif is_play and len(ids['play']) <= _sql_max_in:
pk = _sql_pkey_in(cur, ['gsis_id', 'drive_id', 'play_id'], ids['play'])
return pk
def _play_set(ids):
"""
Returns a value representing a set of plays in correspondence
with the given `ids` dictionary mapping `play` or `drive` to
`nfldb.query.IdSet`s. The value may be any combination of drive and
play identifiers. Use `nfldb.query._in_play_set` for membership
testing.
"""
if not ids['play'].is_full:
return ('play', ids['play'])
elif not ids['drive'].is_full:
return ('drive', ids['drive'])
else:
return None
def _in_play_set(pset, play_pk):
"""
Given a tuple `(gsis_id, drive_id, play_id)`, return `True`
if and only if it exists in the play set `pset`.
Valid values for `pset` can be constructed with
`nfldb.query._play_set`.
"""
if pset is None: # No criteria for drive/play. Always true, then!
return True
elif pset[0] == 'play':
return play_pk in pset[1]
elif pset[0] == 'drive':
return play_pk[0:2] in pset[1]
assert False, 'invalid play_set value'
class Condition (object):
"""
An abstract class that describes the interface of components
in a SQL query.
"""
def __init__(self):
assert False, "Condition class cannot be instantiated."
def _tables(self):
"""Returns a `set` of tables used in this condition."""
assert False, "subclass responsibility"
def _sql_where(self, cursor, table, prefix=None, aggregate=False):
"""
Returns an escaped SQL string that can be safely substituted
into the WHERE clause of a SELECT query for a particular
`table`.
The `prefix` parameter specifies a prefix to be used for each
column written. If it's empty, then no prefix is used.
If `aggregate` is `True`, then aggregate conditions should
be used instead of regular conditions.
"""
assert False, "subclass responsibility"
class Comparison (Condition):
"""
A representation of a single comparison in a `nfldb.Query`.
This corresponds to a field name, a value and one of the following
operators: `=`, `!=`, `<`, `<=`, `>` or `>=`. A value may be a list
or a tuple, in which case PostgreSQL's `ANY` is used along with the
given operator.
"""
def __init__(self, tabtype, kw, value):
"""
Introduces a new condition given a user specified keyword `kw`
with a `tabtype` (e.g., `nfldb.Play`) and a user provided
value. The operator to be used is inferred from the suffix of
`kw`. If `kw` has no suffix or a `__eq` suffix, then `=` is
used. A suffix of `__ge` means `>=` is used, `__lt` means `<`,
and so on.
If `value` is of the form `sql(...)` then the value represented
by `...` is written to the SQL query without escaping.
"""
self.operator = '='
"""The operator used in this condition."""
self.tabtype = tabtype
"""The table type for this column."""
self.column = None
"""The SQL column name in this condition."""
self.value = value
"""The Python value to compare the SQL column to."""
suffixes = {
'__eq': '=', '__ne': '!=',
'__lt': '<', '__le': '<=', '__gt': '>', '__ge': '>=',
}
for suffix, op in suffixes.items():
if kw.endswith(suffix):
self.operator = op
self.column = kw[0:-4]
if self.column is None:
self.column = kw
@property
def _table(self):
return self.tabtype._table
def _tables(self):
return set([self.tabtype._table])
def __str__(self):
return '%s.%s %s %s' \
% (self._table, self.column, self.operator, self.value)
def _sql_where(self, cursor, tables, prefix=None, aggregate=False):
field = self.tabtype._as_sql(self.column, prefix=prefix)
if aggregate:
field = 'SUM(%s)' % field
paramed = '%s %s %s' % (field, self.operator, '%s')
if isinstance(self.value, strtype) and self.value.startswith('sql('):
return paramed % self.value[4:-1]
else:
if isinstance(self.value, tuple) or isinstance(self.value, list):
paramed = paramed % 'ANY (%s)'
self.value = list(self.value) # Coerce tuples to pg ARRAYs...
return cursor.mogrify(paramed, (self.value,))
def QueryOR(db):
"""
Creates a disjunctive `nfldb.Query` object, where every
condition is combined disjunctively. Namely, it is an alias for
`nfldb.Query(db, orelse=True)`.
"""
return Query(db, orelse=True)
class Query (Condition):
"""
A query represents a set of criteria to search nfldb's PostgreSQL
database. Its primary feature is to provide a high-level API for
searching NFL game, drive, play and player data very quickly.
The basic workflow is to specify all of the search criteria that
you want, and then use one of the `as_*` methods to actually
perform the search and return results from the database.
For example, to get all Patriots games as `nfldb.Game` objects from
the 2012 regular season, we could do:
#!python
q = Query(db).game(season_year=2012, season_type='Regular', team='NE')
for game in q.as_games():
print game
Other comparison operators like `<` or `>=` can also be used. To use
them, append a suffix like `__lt` to the end of a field name. So to get
all games with a home score greater than or equal to 50:
#!python
q = Query(db).game(home_score__ge=50)
for game in q.as_games():
print game
Other suffixes are available: `__lt` for `<`, `__le` for `<=`,
`__gt` for `>`, `__ge` for `>=`, `__ne` for `!=` and `__eq` for
`==`. Although, the `__eq` suffix is used by default and is
therefore never necessary to use.
More criteria can be specified by chaining search criteria. For
example, to get only plays as `nfldb.Play` objects where Tom Brady
threw a touchdown pass:
#!python
q = Query(db).game(season_year=2012, season_type='Regular')
q.player(full_name="Tom Brady").play(passing_tds=1)
for play in q.as_plays():
print play
By default, all critera specified are combined conjunctively (i.e.,
all criteria must be met for each result returned). However,
sometimes you may want to specify disjunctive criteria (i.e., any
of the criteria can be met for a result to be returned). To do this
for a single field, simply use a list. For example, to get all
Patriot games from the 2009 to 2013 seasons:
#!python
q = Query(db).game(season_type='Regular', team='NE')
q.game(season_year=[2009, 2010, 2011, 2012, 2013])
for game in q.as_games():
print game
Disjunctions can also be applied to multiple fields by creating a
`nfldb.Query` object with `nfldb.QueryOR`. For example, to find
all games where either team had more than 50 points:
#!python
q = QueryOR(db).game(home_score__ge=50, away_score__ge=50)
for game in q.as_games():
print game
Finally, multiple queries can be combined with `nfldb.Query.andalso`.
For example, to restrict the last search to games in the 2012 regular
season:
#!python
big_score = QueryOR(db).game(home_score__ge=50, away_score__ge=50)
q = Query(db).game(season_year=2012, season_type='Regular')
q.andalso(big_score)
for game in q.as_games():
print game
This is only the beginning of what can be done. More examples that run
the gamut can be found on
[nfldb's wiki](https://github.com/BurntSushi/nfldb/wiki).
"""
def __init__(self, db, orelse=False):
"""
Introduces a new `nfldb.Query` object. Criteria can be
added with any combination of the `nfldb.Query.game`,
`nfldb.Query.drive`, `nfldb.Query.play`, `nfldb.Query.player`
and `nfldb.Query.aggregate` methods. Results can
then be retrieved with any of the `as_*` methods:
`nfldb.Query.as_games`, `nfldb.Query.as_drives`,
`nfldb.Query.as_plays`, `nfldb.Query.as_play_players`,
`nfldb.Query.as_players` and `nfldb.Query.as_aggregate`.
Note that if aggregate criteria are specified with
`nfldb.Query.aggregate`, then the **only** way to retrieve
results is with the `nfldb.Query.as_aggregate` method. Invoking
any of the other `as_*` methods will raise an assertion error.
"""
self._db = db
"""A psycopg2 database connection object."""
self._sort_exprs = None
"""Expressions used to sort the results."""
self._limit = None
"""The number of results to limit the search to."""
self._sort_tables = []
"""The tables to restrain limiting criteria to."""
self._andalso = []
"""A list of conjunctive conditions."""
self._orelse = []
"""
A list of disjunctive conditions applied to
`Query._andalso`.
"""
self._default_cond = self._orelse if orelse else self._andalso
"""
Whether to use conjunctive or disjunctive conditions by
default.
"""
# The aggregate counter-parts of the above.
self._agg_andalso, self._agg_orelse = [], []
if orelse:
self._agg_default_cond = self._agg_orelse
else:
self._agg_default_cond = self._agg_andalso
def sort(self, exprs):
"""
Specify sorting criteria for the result set returned by
using sort expressions. A sort expression is a tuple with
two elements: a field to sort by and the order to use. The
field should correspond to an attribute of the objects you're
returning and the order should be `asc` for ascending (smallest
to biggest) or `desc` for descending (biggest to smallest).
For example, `('passing_yds', 'desc')` would sort plays by the
number of passing yards in the play, with the biggest coming
first.
Remember that a sort field must be an attribute of the
results being returned. For example, you can't sort plays by
`home_score`, which is an attribute of a `nfldb.Game` object.
If you require this behavior, you will need to do it in Python
with its `sorted` built in function. (Or alternatively, use
two separate queries if the result set is large.)
You may provide multiple sort expressions. For example,
`[('gsis_id', 'asc'), ('time', 'asc'), ('play_id', 'asc')]`
would sort plays in the order in which they occurred within
each game.
`exprs` may also just be a string specifying a single
field which defaults to a descending order. For example,
`sort('passing_yds')` sorts plays by passing yards in
descending order.
If `exprs` is set to the empty list, then sorting will be
disabled for this query.
Note that sorting criteria can be combined with
`nfldb.Query.limit` to limit results which can dramatically
speed up larger searches. For example, to fetch the top 10
passing plays in the 2012 season:
#!python
q = Query(db).game(season_year=2012, season_type='Regular')
q.sort('passing_yds').limit(10)
for p in q.as_plays():
print p
A more naive approach might be to fetch all plays and sort them
with Python:
#!python
q = Query(db).game(season_year=2012, season_type='Regular')
plays = q.as_plays()
plays = sorted(plays, key=lambda p: p.passing_yds, reverse=True)
for p in plays[:10]:
print p
But this is over **43 times slower** on my machine than using
`nfldb.Query.sort` and `nfldb.Query.limit`. (The performance
difference is due to making PostgreSQL perform the search and
restricting the number of results returned to process.)
"""
self._sort_exprs = exprs
return self
def limit(self, count):
"""
Limits the number of results to the integer `count`. If `count` is
`0` (the default), then no limiting is done.
See the documentation for `nfldb.Query.sort` for an example on how
to combine it with `nfldb.Query.limit` to get results quickly.
"""
self._limit = count
return self
@property
def _sorter(self):
return Sorter(self._sort_exprs, self._limit,
restraining=self._sort_tables)
def _assert_no_aggregate(self):
assert len(self._agg_andalso) == 0 and len(self._agg_orelse) == 0, \
'aggregate criteria are only compatible with as_aggregate'
def andalso(self, *conds):
"""
Adds the list of `nfldb.Query` objects in `conds` to this
query's list of conjunctive conditions.
"""
self._andalso += conds
return self
def orelse(self, *conds):
"""
Adds the list of `nfldb.Query` objects in `conds` to this
query's list of disjunctive conditions.
"""
self._orelse += conds
return self
def game(self, **kw):
"""
Specify search criteria for an NFL game. The possible fields
correspond to columns in the `game` table (or derived columns).
They are documented as instance variables in the `nfldb.Game`
class. Additionally, there are some special fields that provide
convenient access to common conditions:
* **team** - Find games that the team given played in, regardless
of whether it is the home or away team.
Please see the documentation for `nfldb.Query` for examples on
how to specify search criteria.
Please
[open an issue](https://github.com/BurntSushi/nfldb/issues/new)
if you can think of other special fields to add.
"""
_append_conds(self._default_cond, types.Game, kw)
if 'team' in kw:
ors = {'home_team': kw['team'], 'away_team': kw['team']}
self.andalso(Query(self._db, orelse=True).game(**ors))
return self
def drive(self, **kw):
"""
Specify search criteria for a drive. The possible fields
correspond to columns in the `drive` table (or derived
columns). They are documented as instance variables in the
`nfldb.Drive` class.
Please see the documentation for `nfldb.Query` for examples on
how to specify search criteria.
"""
_append_conds(self._default_cond, types.Drive, kw)
return self
def play(self, **kw):
"""
Specify search criteria for a play. The possible fields
correspond to columns in the `play` or `play_player` tables (or
derived columns). They are documented as instance variables in
the `nfldb.Play` and `nfldb.PlayPlayer` classes. Additionally,
the fields listed on the
[statistical categories](http://goo.gl/1qYG3C)
wiki page may be used. That includes **both** `play` and
`player` statistical categories.
Please see the documentation for `nfldb.Query` for examples on
how to specify search criteria.
"""
_append_conds(self._default_cond, types.Play, kw)
_append_conds(self._default_cond, types.PlayPlayer, kw)
# Technically, it isn't necessary to handle derived fields manually
# since their SQL can be generated automatically, but it can be
# much faster to express them in terms of boolean logic with other
# fields rather than generate them.
for field, value in kw.items():
nosuff = _no_comp_suffix(field)
suff = _comp_suffix(field)
def replace_or(*fields):
q = Query(self._db, orelse=True)
ors = dict([('%s__%s' % (f, suff), value) for f in fields])
self.andalso(q.play(**ors))
if nosuff in types.PlayPlayer._derived_sums:
replace_or(*types.PlayPlayer._derived_sums[nosuff])
return self
def player(self, **kw):
"""
Specify search criteria for a player. The possible fields
correspond to columns in the `player` table (or derived
columns). They are documented as instance variables in the
`nfldb.Player` class.
Please see the documentation for `nfldb.Query` for examples on
how to specify search criteria.
"""
_append_conds(self._default_cond, types.Player, kw)
return self
def aggregate(self, **kw):
"""
This is just like `nfldb.Query.play`, except the search
parameters are applied to aggregate statistics.
For example, to retrieve all quarterbacks who passed for at
least 4000 yards in the 2012 season:
#!python
q = Query(db).game(season_year=2012, season_type='Regular')
q.aggregate(passing_yds__ge=4000)
for pp in q.as_aggregate():
print pp.player, pp.passing_yds
Aggregate results can also be sorted:
#!python
for pp in q.sort('passing_yds').as_aggregate():
print pp.player, pp.passing_yds
Note that this method can **only** be used with
`nfldb.Query.as_aggregate`. Use with any of the other
`as_*` methods will result in an assertion error. Note
though that regular criteria can still be specified with
`nfldb.Query.game`, `nfldb.Query.play`, etc. (Regular criteria
restrict *what to aggregate* while aggregate criteria restrict
*aggregated results*.)
"""
_append_conds(self._agg_default_cond, types.Play, kw)
_append_conds(self._agg_default_cond, types.PlayPlayer, kw)
return self
def as_games(self):
"""
Executes the query and returns the results as a list of
`nfldb.Game` objects.
"""
self._assert_no_aggregate()
self._sort_tables = [types.Game]
ids = self._ids('game', self._sorter)
results = []
q = 'SELECT %s FROM game %s %s'
with Tx(self._db) as cursor:
q = q % (
types.select_columns(types.Game),
_prefix_and(_sql_pkey_in(cursor, ['gsis_id'], ids['game'])),
self._sorter.sql(tabtype=types.Game),
)
cursor.execute(q)
for row in cursor.fetchall():
results.append(types.Game.from_row(self._db, row))
return results
def as_drives(self):
"""
Executes the query and returns the results as a list of
`nfldb.Drive` objects.
"""
self._assert_no_aggregate()
self._sort_tables = [types.Drive]
ids = self._ids('drive', self._sorter)
tables = self._tables()
results = []
q = 'SELECT %s FROM drive %s %s'
with Tx(self._db) as cursor:
pkey = _pk_play(cursor, ids, tables=tables)
q = q % (
types.select_columns(types.Drive),
_prefix_and(pkey),
self._sorter.sql(tabtype=types.Drive),
)
cursor.execute(q)
for row in cursor.fetchall():
if (row['gsis_id'], row['drive_id']) in ids['drive']:
results.append(types.Drive.from_row(self._db, row))
return results
def _as_plays(self):
"""
Executes the query and returns the results as a dictionary
of `nlfdb.Play` objects that don't have the `play_player`
attribute filled. The keys of the dictionary are play id
tuples with the spec `(gsis_id, drive_id, play_id)`.
The primary key membership SQL expression is also returned.
"""
self._assert_no_aggregate()
plays = OrderedDict()
ids = self._ids('play', self._sorter)
pset = _play_set(ids)
pkey = None
q = 'SELECT %s FROM play %s %s'
tables = self._tables()
tables.add('play')
with Tx(self._db, factory=tuple_cursor) as cursor:
pkey = _pk_play(cursor, ids, tables=tables)
q = q % (
types.select_columns(types.Play),
_prefix_and(pkey),
self._sorter.sql(tabtype=types.Play),
)
cursor.execute(q)
init = types.Play._from_tuple
for t in cursor.fetchall():
pid = (t[0], t[1], t[2])
if _in_play_set(pset, pid):
p = init(self._db, t)
plays[pid] = p
return plays, pkey
def as_plays(self, fill=True):
"""
Executes the query and returns the results as a list of
`nlfdb.Play` objects with the `nfldb.Play.play_players`
attribute filled with player statistics.
If `fill` is `False`, then player statistics will not be added
to each `nfldb.Play` object returned. This can significantly
speed things up if you don't need to access player statistics.
Note that when `fill` is `False`, the `nfldb.Play.play_player`
attribute is still available, but the data will be retrieved
on-demand for each play. Also, if `fill` is `False`, then any
sorting criteria specified to player statistics will be
ignored.
"""
self._assert_no_aggregate()
self._sort_tables = [types.Play, types.PlayPlayer]
plays, pkey = self._as_plays()
if not fill:
return plays.values()
q = 'SELECT %s FROM play_player %s %s'
with Tx(self._db, factory=tuple_cursor) as cursor:
q = q % (
types.select_columns(types.PlayPlayer),
_prefix_and(pkey),
self._sorter.sql(tabtype=types.PlayPlayer),
)
cursor.execute(q)
init = types.PlayPlayer._from_tuple
for t in cursor.fetchall():
pid = (t[0], t[1], t[2])
if pid in plays:
play = plays[pid]
if play._play_players is None:
play._play_players = []
play._play_players.append(init(self._db, t))
return self._sorter.sorted(plays.values())
def as_play_players(self):
"""
Executes the query and returns the results as a list of
`nlfdb.PlayPlayer` objects.
This provides a way to access player statistics directly
by bypassing play data. Usually the results of this method
are passed to `nfldb.aggregate`. It is recommended to use
`nfldb.Query.aggregate` and `nfldb.Query.as_aggregate` when
possible, since it is significantly faster to sum statistics in
the database as opposed to Python.
"""
self._assert_no_aggregate()
self._sort_tables = [types.PlayPlayer]
ids = self._ids('play_player', self._sorter)
pset = _play_set(ids)
player_pks = None
tables = self._tables()
tables.add('play_player')
results = []
q = 'SELECT %s FROM play_player %s %s'
with Tx(self._db, factory=tuple_cursor) as cursor:
pkey = _pk_play(cursor, ids, tables=tables)
# Normally we wouldn't need to add this restriction on players,
# but the identifiers in `ids` correspond to either plays or
# players, and not their combination.
if 'player' in tables or 'play_player':
player_pks = _sql_pkey_in(cursor, ['player_id'], ids['player'])
q = q % (
types.select_columns(types.PlayPlayer),
_prefix_and(player_pks, pkey),
self._sorter.sql(tabtype=types.PlayPlayer),
)
cursor.execute(q)
init = types.PlayPlayer._from_tuple
for t in cursor.fetchall():
pid = (t[0], t[1], t[2])
if _in_play_set(pset, pid):
results.append(init(self._db, t))
return results
def as_players(self):
"""
Executes the query and returns the results as a list of
`nfldb.Player` objects.
"""
self._assert_no_aggregate()
self._sort_tables = [types.Player]
ids = self._ids('player', self._sorter)
results = []
q = 'SELECT %s FROM player %s %s'
with Tx(self._db) as cur:
q = q % (
types.select_columns(types.Player),
_prefix_and(_sql_pkey_in(cur, ['player_id'], ids['player'])),
self._sorter.sql(tabtype=types.Player),
)
cur.execute(q)
for row in cur.fetchall():
results.append(types.Player.from_row(self._db, row))
return results
def as_aggregate(self):
"""
Executes the query and returns the results as aggregated
`nfldb.PlayPlayer` objects. This method is meant to be a more
restricted but much faster version of `nfldb.aggregate`.
Namely, this method uses PostgreSQL to compute the aggregate
statistics while `nfldb.aggregate` computes them in Python
code.
If any sorting criteria is specified, it is applied to the
aggregate *player* values only.
"""
# The central approach here is to buck the trend of the other
# `as_*` methods and do a JOIN to perform our search.
# We do this because `IN` expressions are limited in the number
# of sub-expressions they can contain, and since we can't do our
# usual post-filtering with Python (since it's an aggregate),
# we must resort to doing all the filtering in PostgreSQL.
#
# The only other option I can think of is to load the identifiers
# into a temporary table and use a subquery with an `IN` expression,
# which I'm told isn't subject to the normal limitations. However,
# I'm not sure if it's economical to run a query against a big
# table with so many `OR` expressions. More convincingly, the
# approach I've used below seems to be *fast enough*.
#
# Ideas and experiments are welcome. Using a join seems like the
# most sensible approach at the moment (and it's simple!), but I'd like
# to experiment with other ideas in the future.
tables, agg_tables = self._tables(), self._agg_tables()
gids, player_ids = None, None
joins = defaultdict(str)
results = []
with Tx(self._db) as cur:
if 'game' in tables:
joins['game'] = '''
LEFT JOIN game
ON play_player.gsis_id = game.gsis_id
'''
if 'drive' in tables:
joins['drive'] = '''
LEFT JOIN drive
ON play_player.gsis_id = drive.gsis_id
AND play_player.drive_id = drive.drive_id
'''
if 'play' in tables or 'play' in agg_tables:
joins['play'] = '''
LEFT JOIN play
ON play_player.gsis_id = play.gsis_id
AND play_player.drive_id = play.drive_id
AND play_player.play_id = play.play_id
'''
if 'player' in tables:
joins['player'] = '''
LEFT JOIN player
ON play_player.player_id = player.player_id
'''
where = self._sql_where(cur, ['game', 'drive', 'play',
'play_player', 'player'])
having = self._sql_where(cur, ['play', 'play_player'],
prefix='', aggregate=True)
q = '''
SELECT play_player.player_id, {sum_fields}
FROM play_player
{join_game}
{join_drive}
{join_play}
{join_player}
{where}
GROUP BY play_player.player_id
{having}
{order}
'''.format(
sum_fields=types._sum_fields(types.PlayPlayer),
join_game=joins['game'], join_drive=joins['drive'],
join_play=joins['play'], join_player=joins['player'],
where=_prefix_and(player_ids, where, prefix='WHERE '),
having=_prefix_and(having, prefix='HAVING '),
order=self._sorter.sql(tabtype=types.PlayPlayer, prefix=''),
)
cur.execute(q)
fields = (types._player_categories.keys()
+ types.PlayPlayer._sql_derived)
for row in cur.fetchall():
stats = {}
for f in fields:
v = row[f]
if v != 0:
stats[f] = v
pp = types.PlayPlayer(self._db, None, None, None,
row['player_id'], None, stats)
results.append(pp)
return results
def _tables(self):
"""Returns all the tables referenced in the search criteria."""
tabs = set()
for cond in self._andalso + self._orelse:
tabs = tabs.union(cond._tables())
return tabs
def _agg_tables(self):
"""
Returns all the tables referenced in the aggregate search criteria.
"""
tabs = set()
for cond in self._agg_andalso + self._agg_orelse:
tabs = tabs.union(cond._tables())
return tabs
def show_where(self, aggregate=False):
"""
Returns an approximate WHERE clause corresponding to the
criteria specified in `self`. Note that the WHERE clause given
is never explicitly used for performance reasons, but one hopes
that it describes the criteria in `self`.
If `aggregate` is `True`, then aggregate criteria for the
`play` and `play_player` tables is shown with aggregate
functions applied.
"""
# Return criteria for all tables.
tables = ['game', 'drive', 'play', 'play_player', 'player']
with Tx(self._db) as cur:
return self._sql_where(cur, tables, aggregate=aggregate)
return ''
def _sql_where(self, cur, tables, prefix=None, aggregate=False):
"""
Returns a WHERE expression representing the search criteria
in `self` and restricted to the tables in `tables`.
If `aggregate` is `True`, then the appropriate aggregate
functions are used.
"""
if aggregate:
return _sql_where(cur, tables, self._agg_andalso, self._agg_orelse,
prefix=prefix, aggregate=aggregate)
else:
return _sql_where(cur, tables, self._andalso, self._orelse,
prefix=prefix, aggregate=aggregate)
def _ids(self, as_table, sorter, tables=None):
"""
Returns a dictionary of primary keys matching the criteria
specified in this query for the following tables: game, drive,
play and player. The returned dictionary will have a key for
each table with a corresponding `IdSet`, which may be empty
or full.
Each `IdSet` contains primary key values for that table. In the
case of the `drive` and `play` table, those values are tuples.
"""
# This method is where most of the complexity in this module lives,
# since it is where most of the performance considerations are made.
# Namely, the search criteria in `self` are spliced out by table
# and used to find sets of primary keys for each table. The primary
# keys are then used to filter subsequent searches on tables.
#
# The actual data returned is confined to the identifiers returned
# from this method.
# Initialize sets to "full". This distinguishes an empty result
# set and a lack of search.
ids = dict([(k, IdSet.full())
for k in ('game', 'drive', 'play', 'player')])
# A list of fields for each table for easier access by table name.
table_types = {
'game': types.Game,
'drive': types.Drive,
'play': types.Play,
'play_player': types.PlayPlayer,
'player': types.Player,
}
def merge(add):
for table, idents in ids.items():
ids[table] = idents.intersection(add.get(table, IdSet.full()))
def osql(table):
if table == 'play_player' and as_table == 'play':
# A special case to handle weird sorting issues since
# some tables use the same column names.
# When sorting plays, we only want to allow sorting on
# player statistical fields and nothing else (like gsis_id,
# play_id, etc.).
player_stat = False
for field, _ in sorter.exprs:
is_derived = field in types.PlayPlayer._sql_derived
if field in types._player_categories or is_derived:
player_stat = True
break
if not player_stat:
return ''
elif table != as_table:
return ''
return sorter.sql(tabtype=table_types[table], only_limit=True)
def ids_game(cur):
game = IdSet.empty()
cur.execute('''
SELECT gsis_id FROM game %s %s
''' % (_prefix_and(self._sql_where(cur, ['game'])), osql('game')))
for row in cur.fetchall():
game.add(row[0])
return {'game': game}
def ids_drive(cur):
idexp = pkin(['gsis_id'], ids['game'])
cur.execute('''
SELECT gsis_id, drive_id FROM drive %s %s
''' % (_prefix_and(idexp, where('drive')), osql('drive')))
game, drive = IdSet.empty(), IdSet.empty()
for row in cur.fetchall():
game.add(row[0])
drive.add((row[0], row[1]))
return {'game': game, 'drive': drive}
def ids_play(cur):
cur.execute('''
SELECT gsis_id, drive_id, play_id FROM play %s %s
''' % (_prefix_and(_pk_play(cur, ids), where('play')),
osql('play')))
pset = _play_set(ids)
game, drive, play = IdSet.empty(), IdSet.empty(), IdSet.empty()
for row in cur.fetchall():
pid = (row[0], row[1], row[2])
if not _in_play_set(pset, pid):
continue
game.add(row[0])
drive.add(pid[0:2])
play.add(pid)
return {'game': game, 'drive': drive, 'play': play}
def ids_play_player(cur):
cur.execute('''
SELECT gsis_id, drive_id, play_id, player_id
FROM play_player %s %s
''' % (_prefix_and(_pk_play(cur, ids), where('play_player')),
osql('play_player')))
pset = _play_set(ids)
game, drive, play = IdSet.empty(), IdSet.empty(), IdSet.empty()
player = IdSet.empty()
for row in cur.fetchall():
pid = (row[0], row[1], row[2])
if not _in_play_set(pset, pid):
continue
game.add(row[0])
drive.add(pid[0:2])
play.add(pid)
player.add(row[3])
return {'game': game, 'drive': drive, 'play': play,
'player': player}
def ids_player(cur):
cur.execute('''
SELECT player_id FROM player %s %s
''' % (_prefix_and(where('player')), osql('player')))
player = IdSet.empty()
for row in cur.fetchall():
player.add(row[0])
# Don't filter games/drives/plays/play_players if we're just
# retrieving the player meta directly.
if as_table == 'player':
return {'player': player}
player_pks = pkin(['player_id'], player)
cur.execute('''
SELECT gsis_id, drive_id, play_id, player_id
FROM play_player %s
''' % (_prefix_and(_pk_play(cur, ids), player_pks)))
pset = _play_set(ids)
game, drive, play = IdSet.empty(), IdSet.empty(), IdSet.empty()
player = IdSet.empty()
for row in cur.fetchall():
pid = (row[0], row[1], row[2])
if not _in_play_set(pset, pid):
continue
game.add(row[0])
drive.add(pid[0:2])
play.add(pid)
player.add(row[3])
return {'game': game, 'drive': drive, 'play': play,
'player': player}
with Tx(self._db, factory=tuple_cursor) as cur:
def pkin(pkeys, ids, prefix=''):
return _sql_pkey_in(cur, pkeys, ids, prefix=prefix)
def where(table):
return self._sql_where(cur, [table])
def should_search(table):
tabtype = table_types[table]
return where(table) or sorter.is_restraining(tabtype)
if tables is None:
tables = self._tables()
# Start with games since it has the smallest space.
if should_search('game'):
merge(ids_game(cur))
if should_search('drive'):
merge(ids_drive(cur))
if should_search('play'):
merge(ids_play(cur))
if should_search('play_player'):
merge(ids_play_player(cur))
if should_search('player') or as_table == 'player':
merge(ids_player(cur))
return ids
class Sorter (object):
"""
A representation of sort, order and limit criteria that can
be applied in a SQL query or to a Python sequence.
"""
@staticmethod
def _normalize_order(order):
order = order.upper()
assert order in ('ASC', 'DESC'), 'order must be "asc" or "desc"'
return order
@staticmethod
def cmp_to_key(mycmp): # Taken from Python 2.7's functools
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
def __hash__(self):
raise TypeError('hash not implemented')
return K
def __init__(self, exprs=None, limit=None, restraining=[]):
def normal_expr(e):
if isinstance(e, strtype):
return (e, 'DESC')
elif isinstance(e, tuple):
return (e[0], Sorter._normalize_order(e[1]))
else:
raise ValueError(
"Sortby expressions must be strings "
"or two-element tuples like (column, order). "
"Got value '%s' with type '%s'." % (e, type(e)))
self.limit = int(limit or 0)
self.exprs = []
self.restraining = restraining
if exprs is not None:
if isinstance(exprs, strtype) or isinstance(exprs, tuple):
self.exprs = [normal_expr(exprs)]
else:
for expr in exprs:
self.exprs.append(normal_expr(expr))
def sorted(self, xs):
"""
Sorts an iterable `xs` according to the criteria in `self`.
If there are no sorting criteria specified, then this is
equivalent to the identity function.
"""
key = Sorter.cmp_to_key(self._cmp)
if len(self.exprs) > 0:
if self.limit > 0:
xs = heapq.nsmallest(self.limit, xs, key=key)
else:
xs = sorted(xs, key=key)
elif self.limit > 0:
xs = xs[:self.limit]
return xs
def sql(self, tabtype, only_limit=False, prefix=None):
"""
Return a SQL `ORDER BY ... LIMIT` expression corresponding to
the criteria in `self`. If there are no ordering expressions
in the sorting criteria, then an empty string is returned
regardless of any limit criteria. (That is, specifying a limit
requires at least one order expression.)
If `fields` is specified, then only SQL columns in the sequence
are used in the ORDER BY expression.
If `only_limit` is `True`, then a SQL expression will only be
returned if there is a limit of at least `1` specified in the
sorting criteria. This is useful when an `ORDER BY` is only
used to limit the results rather than influence an ordering
returned to a client.
The value of `prefix` is passed to the `tabtype._as_sql`
function.
"""
if only_limit and self.limit < 1:
return ''
exprs = self.exprs
if tabtype is not None:
exprs = [(f, o) for f, o in exprs if f in tabtype._sql_fields]
if len(exprs) == 0:
return ''
as_sql = lambda f: tabtype._as_sql(f, prefix=prefix)
s = ' ORDER BY '
s += ', '.join('%s %s' % (as_sql(f), o) for f, o in exprs)
if self.limit > 0:
s += ' LIMIT %d' % self.limit
return s
def is_restraining(self, tabtype):
"""
Returns `True` if and only if there exist sorting criteria
*with* a limit that correspond to fields in the given table
type.
"""
if self.limit < 1:
return False
if tabtype not in self.restraining:
return False
for field, _ in self.exprs:
if field in tabtype._sql_fields:
return True
return False
def _cmp(self, a, b):
compare, geta = cmp, getattr
for field, order in self.exprs:
x, y = geta(a, field, None), geta(b, field, None)
if x is None or y is None:
continue
c = compare(x, y)
if order == 'DESC':
c *= -1
if c != 0:
return c
return 0
class IdSet (object):
"""
An incomplete wrapper for Python sets to represent collections
of identifier sets. Namely, this allows for a set to be "full"
so that every membership test returns `True` without actually
storing every identifier.
"""
@staticmethod
def full():
return IdSet(None)
@staticmethod
def empty():
return IdSet([])
def __init__(self, seq):
if seq is None:
self._set = None
else:
self._set = set(seq)
@property
def is_full(self):
return self._set is None
def add(self, x):
if self._set is None:
self._set = set()
self._set.add(x)
def intersection(self, s2):
"""
Returns the intersection of two id sets, where either can be
full. Note that `s2` **must** be a `IdSet`, which differs from
the standard library `set.intersection` function which can
accept arbitrary sequences.
"""
s1 = self
if s1.is_full:
return s2
if s2.is_full:
return s1
return IdSet(s1._set.intersection(s2._set))
def __contains__(self, x):
if self.is_full:
return True
return x in self._set
def __iter__(self):
assert not self.is_full, 'cannot iterate on full set'
return iter(self._set)
def __len__(self):
if self.is_full:
return sys.maxint # WTF? Maybe this should be an assert error?
return len(self._set)
| {
"repo_name": "webflint/nfldb",
"path": "nfldb/query.py",
"copies": "1",
"size": "54383",
"license": "unlicense",
"hash": 4634951192889220000,
"line_mean": 35.7701149425,
"line_max": 79,
"alpha_frac": 0.5684129232,
"autogenerated": false,
"ratio": 4.026282668246095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000011084139704496836,
"num_lines": 1479
} |
from __future__ import absolute_import, division, print_function
from collections import deque
from .core import VAR, END, PatternSet
from ..util import copy_doc
class DynamicPatternSet(PatternSet):
"""A set of patterns.
Forms a structure for fast matching over a set of patterns. This allows for
matching of terms to patterns for many patterns at the same time.
Attributes
----------
patterns : list
A list of `Pattern`s included in the `PatternSet`.
"""
def __init__(self, context, patterns):
self.context = context
self._net = Node()
self.patterns = []
for p in patterns:
self.add(p)
def add(self, pat):
"""Add a pat to the DynamicPatternSet.
Parameters
----------
pat : Pattern
"""
if self.context != pat.context:
raise ValueError("All patterns in a PatternSet must have the same"
"context")
vars = pat.vars
curr_node = self._net
ind = len(self.patterns)
# List of variables, in order they appear in the POT of the term
for t in map(self.context.head, self.context.traverse(pat.pat)):
prev_node = curr_node
if t in vars:
t = VAR
if t in curr_node.edges:
curr_node = curr_node.edges[t]
else:
curr_node.edges[t] = Node()
curr_node = curr_node.edges[t]
# We've reached a leaf node. Add the term index to this leaf.
prev_node.edges[t].patterns.append(ind)
self.patterns.append(pat)
@copy_doc(PatternSet.match_iter)
def match_iter(self, term):
S = self.context.traverse(term, 'copyable')
for m, syms in _match(S, self._net):
for i in m:
pat = self.patterns[i]
subs = _process_match(pat, syms)
if subs is not None:
yield pat, subs
class Node(tuple):
"""A Discrimination Net node."""
__slots__ = ()
def __new__(cls, edges=None, patterns=None):
edges = edges if edges else {}
patterns = patterns if patterns else []
return tuple.__new__(cls, (edges, patterns))
@property
def edges(self):
"""A dictionary, where the keys are edges, and the values are nodes"""
return self[0]
@property
def patterns(self):
"""A list of all patterns that currently match at this node"""
return self[1]
def _match(S, N):
"""Structural matching of term S to discrimination net node N."""
stack = deque()
restore_state_flag = False
# matches are stored in a tuple, because all mutations result in a copy,
# preventing operations from changing matches stored on the stack.
matches = ()
while True:
if S.term is END:
yield N.patterns, matches
try:
# This try-except block is to catch hashing errors from un-hashable
# types. This allows for variables to be matched with un-hashable
# objects.
n = N.edges.get(S.current, None)
if n and not restore_state_flag:
stack.append((S.copy(), N, matches))
N = n
S.next()
continue
except TypeError:
pass
n = N.edges.get(VAR, None)
if n:
restore_state_flag = False
matches = matches + (S.term,)
S.skip()
N = n
continue
try:
# Backtrack here
(S, N, matches) = stack.pop()
restore_state_flag = True
except:
return
def _process_match(pat, syms):
"""Process a match to determine if it is correct, and to find the correct
substitution that will convert the term into the pattern.
Parameters
----------
pat : Pattern
syms : iterable
Iterable of subterms that match a corresponding variable.
Returns
-------
A dictionary of {vars : subterms} describing the substitution to make the
pattern equivalent with the term. Returns `None` if the match is
invalid."""
subs = {}
varlist = pat._varlist
if not len(varlist) == len(syms):
raise RuntimeError("length of varlist doesn't match length of syms.")
for v, s in zip(varlist, syms):
if v in subs and subs[v] != s:
return None
else:
subs[v] = s
return subs
| {
"repo_name": "jcrist/pinyon",
"path": "pinyon/matching/dynamic.py",
"copies": "1",
"size": "4528",
"license": "bsd-3-clause",
"hash": 2694093549872312300,
"line_mean": 29.1866666667,
"line_max": 79,
"alpha_frac": 0.5591872792,
"autogenerated": false,
"ratio": 4.287878787878788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004166666666666667,
"num_lines": 150
} |
from __future__ import absolute_import, division, print_function
from collections import deque
class Pattern(object):
"""A pattern.
Parameters
----------
context : Context
pat : term
vars: tuple, optional
Tuple of variables found in the pattern.
"""
def __init__(self, context, pat, vars=()):
self.context = context
self.pat = pat
self.vars = vars
self._build(context)
def __repr__(self):
return "Pattern({0}, {1})".format(self.pat, self.vars)
def _build(self, context):
"""Initialized once a context is set"""
path_lookup = {}
varlist = []
for term, path in context.traverse(self.pat, 'path'):
if term in self.vars:
path_lookup.setdefault(term, []).append(path)
varlist.append(term)
# For deterministic nets
self._path_lookup = path_lookup
# For nondeterministic nets
self._varlist = varlist
class PatternSet(object):
"""A set of patterns.
Forms a structure for fast matching over a set of patterns. This allows for
matching of terms to patterns for many patterns at the same time.
Attributes
----------
context : Context
patterns : list
A list of `Pattern`s included in the `PatternSet`.
"""
def match_iter(self, term):
"""A generator that lazily finds matchings for term from the PatternSet.
Paramters
---------
term : term
Yields
------
Tuples of `(pat, subs)`, where `pat` is the pattern being matched, and
`subs` is a dictionary mapping the variables in the pattern to their
matching values in the term."""
pass
def match_all(self, term):
"""Finds all matchings for term in the PatternSet.
Equivalent to ``list(pat_set.match_iter(term))``.
Paramters
---------
term : term
Returns
-------
List containing tuples of `(pat, subs)`, where `pat` is the pattern
being matched, and `subs` is a dictionary mapping the variables in the
pattern to their matching values in the term."""
return list(self.match_iter(term))
def match_one(self, term):
"""Finds the first matching for term in the PatternSet.
Paramters
---------
term : term
Returns
-------
A tuple `(pat, subs)`, where `pat` is the pattern being matched, and
`subs` is a dictionary mapping the variables in the pattern to their
matching values in the term. In the case of no matchings, a tuple
(None, None) is returned."""
for pat, subs in self.match_iter(term):
return pat, subs
return None, None
class Token(object):
"""A token object.
Used to express certain objects in the traversal of a term or pattern."""
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
# A variable to represent *all* variables in a discrimination net
VAR = Token('?')
# Represents the end of the traversal of an expression. We can't use `None`,
# 'False', etc... here, as anything may be an argument to a function.
END = Token('end')
class Traverser(object):
"""Stack based preorder traversal of terms.
This provides a copyable traversal object, which can be used to store
choice points when backtracking."""
def __init__(self, context, term, stack=None):
self.term = term
self.context = context
if not stack:
self._stack = deque([END])
else:
self._stack = stack
def __iter__(self):
while self.term is not END:
yield self.term
self.next()
def copy(self):
"""Copy the traverser in its current state.
This allows the traversal to be pushed onto a stack, for easy
backtracking."""
return Traverser(self.context, self.term, deque(self._stack))
def next(self):
"""Proceed to the next term in the preorder traversal."""
subterms = self.context.args(self.term)
if not subterms:
# No subterms, pop off stack
self.term = self._stack.pop()
else:
self.term = subterms[0]
self._stack.extend(reversed(subterms[1:]))
@property
def current(self):
return self.context.head(self.term)
@property
def arity(self):
return len(self.context.args(self.term))
def skip(self):
"""Skip over all subterms of the current level in the traversal"""
self.term = self._stack.pop()
| {
"repo_name": "jcrist/pinyon",
"path": "pinyon/matching/core.py",
"copies": "1",
"size": "4691",
"license": "bsd-3-clause",
"hash": -5329010465052763000,
"line_mean": 26.9226190476,
"line_max": 80,
"alpha_frac": 0.588787039,
"autogenerated": false,
"ratio": 4.327490774907749,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5416277813907748,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Hashable
from types import GeneratorType
from ._vendor.six import wraps
# TODO: spend time filling out functionality and make these more robust
def memoize(func):
"""
Decorator to cause a function to cache it's results for each combination of
inputs and return the cached result on subsequent calls. Does not support
named arguments or arg values that are not hashable.
>>> @memoize
... def foo(x):
... print('running function with', x)
... return x+3
...
>>> foo(10)
running function with 10
13
>>> foo(10)
13
>>> foo(11)
running function with 11
14
>>> @memoize
... def range_tuple(limit):
... print('running function')
... return tuple(i for i in range(limit))
...
>>> range_tuple(3)
running function
(0, 1, 2)
>>> range_tuple(3)
(0, 1, 2)
>>> @memoize
... def range_iter(limit):
... print('running function')
... return (i for i in range(limit))
...
>>> range_iter(3)
Traceback (most recent call last):
TypeError: Can't memoize a generator or non-hashable object!
"""
func._result_cache = {} # pylint: disable-msg=W0212
@wraps(func)
def _memoized_func(*args, **kwargs):
key = (args, tuple(sorted(kwargs.items())))
if key in func._result_cache: # pylint: disable-msg=W0212
return func._result_cache[key] # pylint: disable-msg=W0212
else:
result = func(*args, **kwargs)
if isinstance(result, GeneratorType) or not isinstance(result, Hashable):
raise TypeError("Can't memoize a generator or non-hashable object!")
func._result_cache[key] = result # pylint: disable-msg=W0212
return result
return _memoized_func
def memoizemethod(method):
"""
Decorator to cause a method to cache it's results in self for each
combination of inputs and return the cached result on subsequent calls.
Does not support named arguments or arg values that are not hashable.
>>> class Foo (object):
... @memoizemethod
... def foo(self, x, y=0):
... print('running method with', x, y)
... return x + y + 3
...
>>> foo1 = Foo()
>>> foo2 = Foo()
>>> foo1.foo(10)
running method with 10 0
13
>>> foo1.foo(10)
13
>>> foo2.foo(11, y=7)
running method with 11 7
21
>>> foo2.foo(11)
running method with 11 0
14
>>> foo2.foo(11, y=7)
21
>>> class Foo (object):
... def __init__(self, lower):
... self.lower = lower
... @memoizemethod
... def range_tuple(self, upper):
... print('running function')
... return tuple(i for i in range(self.lower, upper))
... @memoizemethod
... def range_iter(self, upper):
... print('running function')
... return (i for i in range(self.lower, upper))
...
>>> foo = Foo(3)
>>> foo.range_tuple(6)
running function
(3, 4, 5)
>>> foo.range_tuple(7)
running function
(3, 4, 5, 6)
>>> foo.range_tuple(6)
(3, 4, 5)
>>> foo.range_iter(6)
Traceback (most recent call last):
TypeError: Can't memoize a generator or non-hashable object!
"""
@wraps(method)
def _wrapper(self, *args, **kwargs):
# NOTE: a __dict__ check is performed here rather than using the
# built-in hasattr function because hasattr will look up to an object's
# class if the attr is not directly found in the object's dict. That's
# bad for this if the class itself has a memoized classmethod for
# example that has been called before the memoized instance method,
# then the instance method will use the class's result cache, causing
# its results to be globally stored rather than on a per instance
# basis.
if '_memoized_results' not in self.__dict__:
self._memoized_results = {}
memoized_results = self._memoized_results
key = (method.__name__, args, tuple(sorted(kwargs.items())))
if key in memoized_results:
return memoized_results[key]
else:
try:
result = method(self, *args, **kwargs)
except KeyError as e:
if '__wrapped__' in str(e):
result = None # is this the right thing to do? happened during py3 conversion
else:
raise
if isinstance(result, GeneratorType) or not isinstance(result, Hashable):
raise TypeError("Can't memoize a generator or non-hashable object!")
return memoized_results.setdefault(key, result)
return _wrapper
# class memoizemethod(object):
# """cache the return value of a method
#
# This class is meant to be used as a decorator of methods. The return value
# from a given method invocation will be cached on the instance whose method
# was invoked. All arguments passed to a method decorated with memoize must
# be hashable.
#
# If a memoized method is invoked directly on its class the result will not
# be cached. Instead the method will be invoked like a static method:
# class Obj(object):
# @memoize
# def add_to(self, arg):
# return self + arg
# Obj.add_to(1) # not enough arguments
# Obj.add_to(1, 2) # returns 3, result is not cached
# """
# def __init__(self, func):
# self.func = func
# def __get__(self, obj, objtype=None):
# if obj is None:
# return self.func
# return partial(self, obj)
# def __call__(self, *args, **kw):
# obj = args[0]
# try:
# cache = obj.__cache
# except AttributeError:
# cache = obj.__cache = {}
# key = (self.func, args[1:], frozenset(kw.items()))
# try:
# res = cache[key]
# except KeyError:
# res = cache[key] = self.func(*args, **kw)
# return res
def clear_memoized_methods(obj, *method_names):
"""
Clear the memoized method or @memoizeproperty results for the given
method names from the given object.
>>> v = [0]
>>> def inc():
... v[0] += 1
... return v[0]
...
>>> class Foo(object):
... @memoizemethod
... def foo(self):
... return inc()
... @memoizeproperty
... def g(self):
... return inc()
...
>>> f = Foo()
>>> f.foo(), f.foo()
(1, 1)
>>> clear_memoized_methods(f, 'foo')
>>> (f.foo(), f.foo(), f.g, f.g)
(2, 2, 3, 3)
>>> (f.foo(), f.foo(), f.g, f.g)
(2, 2, 3, 3)
>>> clear_memoized_methods(f, 'g', 'no_problem_if_undefined')
>>> f.g, f.foo(), f.g
(4, 2, 4)
>>> f.foo()
2
"""
for key in list(getattr(obj, '_memoized_results', {}).keys()):
# key[0] is the method name
if key[0] in method_names:
del obj._memoized_results[key]
property_dict = obj._cache_
for prop in method_names:
inner_attname = '__%s' % prop
if inner_attname in property_dict:
del property_dict[inner_attname]
def memoizedproperty(func):
"""
Decorator to cause a method to cache it's results in self for each
combination of inputs and return the cached result on subsequent calls.
Does not support named arguments or arg values that are not hashable.
>>> class Foo (object):
... _x = 1
... @memoizedproperty
... def foo(self):
... self._x += 1
... print('updating and returning {0}'.format(self._x))
... return self._x
...
>>> foo1 = Foo()
>>> foo2 = Foo()
>>> foo1.foo
updating and returning 2
2
>>> foo1.foo
2
>>> foo2.foo
updating and returning 2
2
>>> foo1.foo
2
"""
inner_attname = '__%s' % func.__name__
def new_fget(self):
if not hasattr(self, '_cache_'):
self._cache_ = dict()
cache = self._cache_
if inner_attname not in cache:
cache[inner_attname] = func(self)
return cache[inner_attname]
return property(new_fget)
# def memoized_property(fget):
# """
# Return a property attribute for new-style classes that only calls its getter on the first
# access. The result is stored and on subsequent accesses is returned, preventing the need to
# call the getter any more.
# Example::
# >>> class C(object):
# ... load_name_count = 0
# ... @memoized_property
# ... def name(self):
# ... "name's docstring"
# ... self.load_name_count += 1
# ... return "the name"
# >>> c = C()
# >>> c.load_name_count
# 0
# >>> c.name
# "the name"
# >>> c.load_name_count
# 1
# >>> c.name
# "the name"
# >>> c.load_name_count
# 1
# """
# attr_name = '_{0}'.format(fget.__name__)
#
# @wraps(fget)
# def fget_memoized(self):
# if not hasattr(self, attr_name):
# setattr(self, attr_name, fget(self))
# return getattr(self, attr_name)
#
# return property(fget_memoized)
class classproperty(object): # pylint: disable=C0103
# from celery.five
def __init__(self, getter=None, setter=None):
if getter is not None and not isinstance(getter, classmethod):
getter = classmethod(getter)
if setter is not None and not isinstance(setter, classmethod):
setter = classmethod(setter)
self.__get = getter
self.__set = setter
info = getter.__get__(object) # just need the info attrs.
self.__doc__ = info.__doc__
self.__name__ = info.__name__
self.__module__ = info.__module__
def __get__(self, obj, type_=None):
if obj and type_ is None:
type_ = obj.__class__
return self.__get.__get__(obj, type_)()
def __set__(self, obj, value):
if obj is None:
return self
return self.__set.__get__(obj)(value)
def setter(self, setter):
return self.__class__(self.__get, setter)
# memoize & clear:
# class method
# function
# classproperty
# property
# staticproperty?
# memoizefunction
# memoizemethod
# memoizeproperty
#
#
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/auxlib/decorators.py",
"copies": "1",
"size": "10507",
"license": "apache-2.0",
"hash": -8497004042429920000,
"line_mean": 29.4550724638,
"line_max": 99,
"alpha_frac": 0.5556295803,
"autogenerated": false,
"ratio": 3.7364864864864864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47921160667864865,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import numpy as np
from datashape.dispatch import dispatch
from datashape import from_numpy
from dask.array.core import Array, from_array
from dask.bag.core import Bag
import dask.bag as db
from dask.compatibility import long
from odo import append, chunks, convert, discover, TextFile
from ..utils import filter_kwargs
@discover.register(Array)
def discover_dask_array(a, **kwargs):
return from_numpy(a.shape, a.dtype)
arrays = [np.ndarray]
try:
import h5py
except ImportError:
pass
else:
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
try:
import bcolz
except ImportError:
pass
else:
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
@convert.register(Array, tuple(arrays), cost=1.)
def array_to_dask(x, name=None, chunks=None, **kwargs):
if chunks is None:
raise ValueError("chunks cannot be None")
return from_array(x, chunks=chunks, name=name,
**filter_kwargs(from_array, kwargs))
@convert.register(np.ndarray, Array, cost=10.)
def dask_to_numpy(x, **kwargs):
return np.array(x)
@convert.register(float, Array, cost=10.)
def dask_to_float(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
arr.store(out)
return out
@convert.register(Iterator, Bag)
def bag_to_iterator(x, **kwargs):
return iter(x)
@convert.register(Bag, chunks(TextFile))
def bag_to_iterator(x, **kwargs):
return db.from_filenames([tf.path for tf in x])
@convert.register(Bag, list)
def bag_to_iterator(x, **kwargs):
return db.from_sequence(x, **filter_kwargs(db.from_sequence, kwargs))
| {
"repo_name": "alexmojaki/odo",
"path": "odo/backends/dask.py",
"copies": "4",
"size": "2234",
"license": "bsd-3-clause",
"hash": 3725827628877032400,
"line_mean": 23.0215053763,
"line_max": 73,
"alpha_frac": 0.6781557744,
"autogenerated": false,
"ratio": 3.1732954545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 93
} |
from __future__ import absolute_import, division, print_function
from collections import Iterator
import numpy as np
import pandas as pd
from datashape.dispatch import dispatch
from datashape import from_numpy
import dask
from dask.array.core import Array, from_array
from dask.bag.core import Bag
import dask.bag as db
from dask.compatibility import long
import dask.dataframe as dd
from odo import append, chunks, convert, discover, TextFile
from ..utils import filter_kwargs
@discover.register(Array)
def discover_dask_array(a, **kwargs):
return from_numpy(a.shape, a.dtype)
arrays = [np.ndarray]
try:
import h5py
except ImportError:
pass
else:
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
try:
import bcolz
except ImportError:
pass
else:
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
@convert.register(Array, tuple(arrays), cost=1.)
def array_to_dask(x, name=None, chunks=None, **kwargs):
if chunks is None:
raise ValueError("chunks cannot be None")
return from_array(x, chunks=chunks, name=name,
**filter_kwargs(from_array, kwargs))
@convert.register(np.ndarray, Array, cost=10.)
def dask_to_numpy(x, **kwargs):
return np.array(x)
@convert.register(pd.DataFrame, dd.DataFrame, cost=200)
@convert.register(pd.Series, dd.Series, cost=200)
@convert.register(float, Array, cost=200)
def dask_to_other(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
arr.store(out)
return out
@convert.register(Iterator, Bag)
def bag_to_iterator(x, **kwargs):
return iter(x)
@convert.register(Bag, chunks(TextFile))
def bag_to_iterator(x, **kwargs):
return db.from_filenames([tf.path for tf in x])
@convert.register(Bag, list)
def bag_to_iterator(x, **kwargs):
return db.from_sequence(x, **filter_kwargs(db.from_sequence, kwargs))
@convert.register(dd.DataFrame, pd.DataFrame, cost=1.)
def pandas_dataframe_to_dask_dataframe(x, npartitions=None, **kwargs):
if npartitions is None:
raise ValueError("npartitions cannot be None")
return dd.from_pandas(x, npartitions=npartitions,
**filter_kwargs(dd.from_pandas, kwargs))
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/dask.py",
"copies": "1",
"size": "2732",
"license": "bsd-3-clause",
"hash": -6813219361897801000,
"line_mean": 24.7735849057,
"line_max": 73,
"alpha_frac": 0.6852122987,
"autogenerated": false,
"ratio": 3.2293144208037825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4414526719503783,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import Mapping
import itertools
import functools
import heapq
zip_longest = (itertools.izip_longest if hasattr(itertools, 'izip_longest')
else itertools.zip_longest)
if hasattr(functools, 'reduce'):
reduce = functools.reduce
#
# Public Interfaces
#
def viewablelist(rawlist=None):
'''
This immutable list maintains the intermediate results of map/reduce
functions so that when the list is sliced or combined, the results of
the map/reduce can be efficiently updated. (using previous results for
parts of the original list that remain unchanged)
>>> adder = lambda l: l.reduce(lambda x,y:x+y, initializer=0)
>>> numbers = viewablelist([5, 10, 5, 0])
>>> adder(numbers)
20
>>> numbers = numbers[1:]
>>> adder(numbers)
15
>>> numbers = numbers + [10, 5]
>>> adder(numbers)
30
Internally, viewablelist uses a binary tree to store its values. In
each node, it caches results of the reduce function.
Both the mapper and reducer function must be pure (they cannot modify
anything, just return new values). Additionally, a reducer function, f,
must be associative; that is: f(f(x,y),z) == f(x, f(y,x))
The reducer function need not be commutative (f(x,y) == f(y,x)), however.
For instance, string concatenation is associative but not commutative;
this example efficiently maintains a big string which depends on many
small strings:
>>> concat = lambda l: l.reduce(lambda x, y: x + ' ' + y, initializer='')
>>> l = viewablelist(['the', 'quick', 'brown', 'fox'])
>>> concat(l)
'the quick brown fox'
>>> concat(l[:2] + ['stealthy'] + l[2:])
'the quick stealthy brown fox'
In this example, we maintain a sorted view of a list using the map and
reduce functions. Now we can make make arbitraty modifications to the
original list and the view will update efficiently. (in linear time for
most changes)
>>> import heapq # (heapq.merge() sorts two already sorted lists)
>>> def sorter(l):
... l = l.map(lambda x:[x])
... return l.reduce(lambda x, y: list(heapq.merge(x,y)),
... initializer=())
>>> l = viewablelist([9, 3, 7, 5, 1])
>>> sorter(l)
[1, 3, 5, 7, 9]
>>> sorter(l + [ 4 ])
[1, 3, 4, 5, 7, 9]
>>> sorter(l + l)
[1, 1, 3, 3, 5, 5, 7, 7, 9, 9]
If instead, we only wanted the largest 2 values, we could simply add a
[-2:] slice to the reduce function and be done with it. In this example,
we efficiently maintain the largest two values over a rolling window of
numbers:
>>> import heapq
>>> def sorter(l):
... l = l.map(lambda x:[x])
... return l.reduce(lambda x, y: list(heapq.merge(x,y))[-2:],
... initializer=())
>>> l = viewablelist([9, 7, 3, 5, 1, 2, 4])
>>> # window starts with first 4 elements and moves right:
>>> sorter(l[0:4])
[7, 9]
>>> sorter(l[1:5])
[5, 7]
>>> sorter(l[2:6])
[3, 5]
>>> sorter(l[3:7])
[4, 5]
The implementation of the backing tree (for both viewablelist and
viewabledict) is adapted from this weight-based tree implementation in
scheme:
ftp://ftp.cs.indiana.edu/pub/scheme-repository/code/struct/wttree.scm
The tree should maintain an approximate balance under creation, slices and
concatenation:
>>> viewablelist(range(100))._depth()
7
>>> viewablelist(range(100))[40:50]._depth()
4
>>> l = reduce(lambda x,y:x+y, [viewablelist(range(10)) for _ in range(10)])
>>> l._depth()
9
>>> l[40:50]._depth()
4
'''
if not rawlist:
return _EMPTY_LIST
def _helper(start, end):
if start >= end:
return None
mid = (start + end) // 2
return ViewableList(
rawlist[mid], _helper(start, mid), _helper(mid + 1, end))
return _helper(0, len(rawlist))
def viewabledict(given=None):
'''
This immutable dictionary maintains the intermediate results of map/reduce
functions so that when the dictionary is sliced or combined, the results of
the map/reduce can be efficiently updated. (using previous results for
parts of the original dictionary that remain unchanged)
TODO: more useful example here perhaps...
>>> persons = viewabledict({'jim':23, 'sally':27, 'chiban':27})
>>> def by_age(p):
... p = p.items().map(lambda kv: {kv[1]: [kv[0]]})
... return p.reduce(lambda x, y:{k: x.get(k,[]) + y.get(k,[])
... for k in set(x.keys()).union(y.keys())})
>>> by_age(persons)
{27: ['chiban', 'sally'], 23: ['jim']}
>>> by_age(persons + {'bill': 30})
{27: ['chiban', 'sally'], 30: ['bill'], 23: ['jim']}
Internally, viewabledict is implemented as a binary tree, ordered by its
keys. In each node, it caches results of the reduce function. The standard
sequential getters over these dictionaries (keys(), iteritems(), etc) all
iterate in key order. items(), keys(), and values() all return
viewablelists, which are frequently used to make further map/reduces.
>>> vals = lambda l: l.values().reduce(lambda x, y: x + ' ' + y)
>>> words = viewabledict({3:'brown', 1:'the', 2:'quick', 4:'fox'})
>>> vals(words)
'the quick brown fox'
>>> vals(words + {2.5: 'stealthy'})
'the quick stealthy brown fox'
Unlike regular python dictionaries, sub-dictionaries can be sliced from
viewabledicts using the python slice operator, "[:]".
It is common and efficient to split these objects with slices, like so:
>>> viewabledict({'alice': 1, 'jim': 2, 'sunny': 3})['betty':]
viewabledict({'jim': 2, 'sunny': 3})
>>> viewabledict({2:2, 3:3, 4:4})[:3]
viewabledict({2: 2})
Unlike list slices, both the minimum and maximum bounds are exclusive:
>>> viewabledict({'alice':1, 'jim':2, 'sunny':3})['alice':'sunny'].keys()
viewablelist(['jim'])
viewabledicts can also be (immutably) combined with the plus operator, and
you can make incremental new dictionaries with the set() and remove()
methods:
>>> viewabledict({1: 1}) + viewabledict({2: 2})
viewabledict({1: 1, 2: 2})
>>> viewabledict({1: 1}).set(2, 2)
viewabledict({1: 1, 2: 2})
>>> viewabledict({1: 1}).remove(1)
viewabledict({})
'''
if not given:
return _EMPTY_DICT
if isinstance(given, ViewableList):
return _from_viewablelist(given)
if isinstance(given, Mapping):
given = given.items()
all_kv = sorted(given)
if len(all_kv) == 0:
return _EMPTY_DICT
def _helper(start, end):
if start >= end:
return None
mid = (start + end) // 2
k, v = all_kv[mid]
return ViewableDict(
k, v, _helper(start, mid), _helper(mid + 1, end))
return _helper(0, len(all_kv))
def to_viewable(val, skip_types = ()):
'''
Recursively converts a structure of lists and dictionaries into
their viewable variants.
'''
if isinstance(val, skip_types):
return val
if isinstance(val, (list, tuple)):
return viewablelist([to_viewable(i, skip_types) for i in val])
if isinstance(val, Mapping):
return viewabledict({
to_viewable(k, skip_types): to_viewable(v, skip_types) for
(k, v) in val.items()
})
return val
def from_viewable(val):
'''
Recursively converts a structure of viewablelists and viewabledicts into
lists and dictionaries.
'''
if isinstance(val, Mapping):
return {from_viewable(k): from_viewable(v) for k,v in val.items()}
elif isinstance(val, (ViewableList, MappedList, tuple, list)):
return tuple(from_viewable(i) for i in val)
else:
return val
#
# Implementation
#
def fnkey(fn):
# Fancy footwork to get more precise equality checking on functions
# By default, inner functions and lambda are newly created each
# time they are evaluated.
# However, often they do not actually use any of the variables in
# their scope (the closure is empty). When this is the case, we can
# consider them the same if their compiled bytecode is identical
if fn is None:
return None
if fn.__closure__ is not None:
return (fn.__code__, repr([cell.cell_contents for cell in fn.__closure__]))
return fn
class MapReduceLogic(object):
"""
Instances of this class simply hold the map and reduce functions,
and for a defualt value when the reduce function does not have
enough inputs. Different instances of this class are considered
to be different (even if they contain the same functions), so be
sure to create only one per use-case, and reuse it.
"""
__slots__ = ('reducer', 'mapper', 'initializer')
def __init__(self, reducer=None, mapper=None, initializer=None):
self.reducer = reducer
self.mapper = mapper
self.initializer = initializer
def __iter__(self):
return (self.reducer, self.mapper, self.initializer).__iter__()
def __eq__(self, other):
r1, m1, i1 = self
r2, m2, i2 = other
return fnkey(r1) == fnkey(r2) and fnkey(m1) == fnkey(m2) and i1 == i2
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
r, m, i = self
hsh = hash(fnkey(self.reducer))
hsh = hsh * 31 + hash(fnkey(self.mapper))
hsh = hsh * 31 + hash(self.initializer)
return hsh
def _identity(x):
return x
#
# Implementation: viewablelist
#
class ViewableIterable(object):
__slots__ = ()
def map(self, fn):
'''
Applies a (pure) function to each element of this list. All maps are
lazy; they will not be evaluated util the list is inspected or
manipulated in a more complex manner.
>>> list(viewablelist([1,2,3]).map(lambda x:x+1))
[2, 3, 4]
>>> list(viewablelist([1,2,3]).map(lambda x:x+1).map(lambda x:x+1))
[3, 4, 5]
>>> viewablelist([1,2,3]).map(lambda x:x+1).reduce(lambda x,y:x+y)
9
'''
return MappedList(_cached_partial(_wrap_fn_in_list, fn), self)
def flatmap(self, fn):
'''
This transforms each member of the list into zero or more members,
which are all spliced together into a resulting list.
>>> list(viewablelist([2]).flatmap(lambda x:[x,x]))
[2, 2]
>>> list(viewablelist([2,3,4]).flatmap(lambda x:[]))
[]
>>> list(viewablelist([1,2]).flatmap(lambda x:[x,x]))
[1, 1, 2, 2]
>>> list(viewablelist([]).flatmap(lambda x:[x,x]))
[]
'''
return MappedList(fn, self)
def group_by(self, keyfn):
'''
Groups this list into sublists of items, I, for which keyfn(I) is
equal. The return is a viewabledict that is keyed by keyfn(I). The
values are viewablelists of those items which produce the
corresponding key. Within each group, the items retain the same
ordering they had in the original list.
For exmaple, this partitions a list into evens and odds:
>>> viewablelist([2,5,3,0,9,11]).group_by(lambda x: x%2)
viewabledict({0: viewablelist([2, 0]), 1: viewablelist([5, 3, 9, 11])})
>>> viewablelist([2,5,2,2,5]).group_by(lambda x: x)
viewabledict({2: viewablelist([2, 2, 2]), 5: viewablelist([5, 5])})
'''
ret = self.map(_cached_partial(_create_group, keyfn))
return ret.reduce(_combine_groups)
@functools.total_ordering
class ViewableList(ViewableIterable):
'''
A tree-based implementation of a list that remembers previous
results of mapreduces and can re-use them for later runs.
'''
__slots__ = ('_left', '_right', '_val', '_count', '_reducevals')
def __init__(self, val, left, right):
self._left = None
self._right = None
self._count = 0
self._val = val
self._count = 0 if val is _NO_VAL else 1
self._reducevals = {}
if left:
self._left = left
self._count += self._left._count
if right:
self._right = right
self._count += self._right._count
def __len__(self):
'''
>>> len(viewablelist([]))
0
>>> len(viewablelist([1,2]))
2
>>> len(viewablelist([1,2]) + viewablelist([3]))
3
'''
return self._count
def __getitem__(self, index):
'''
>>> list(viewablelist([2, 3, 4])[:1])
[2]
>>> list(viewablelist([2, 3])[1:])
[3]
>>> list(viewablelist([2, 3])[2:])
[]
>>> list(viewablelist([2, 3])[:-2])
[]
>>> list(viewablelist([2,3,4,5])[2:])
[4, 5]
>>> list(viewablelist([9, 7, 3, 5, 1, 2, 4])[2:-1])
[3, 5, 1, 2]
>>> list(viewablelist(range(10))[::3])
[0, 3, 6, 9]
>>> viewablelist([2, 3, 4])[1]
3
>>> viewablelist([2, 3, 4])[-1]
4
'''
if isinstance(index, slice):
if index.step is not None and index.step != 1:
return viewablelist(list(self)[index])
count = self._count
start, end, _ = index.indices(count)
cur = self
if end < count:
cur = _lsplit_lt(cur, end)
if start > 0:
cur = _lsplit_gte(cur, start)
return viewablelist() if cur is None else cur
else:
count, left, right = self._count, self._left, self._right
index = count + index if index < 0 else index
if left is not None:
left_ct = left._count
if index < left_ct:
return left[index]
elif left_ct == index:
return self._val
else:
return right[index - (left_ct + 1)]
elif index == 0:
return self._val
else:
return right[index - 1]
def _depth(self):
depth = 0 if self._val is _NO_VAL else 1
l, r = self._left, self._right
if l is not None:
depth = max(depth, l._depth() + 1)
if r is not None:
depth = max(depth, r._depth() + 1)
return depth
def __add__(self, other):
'''
>>> (viewablelist([]) + viewablelist([3]))._depth()
1
>>> (viewablelist([0]) + viewablelist([1,2,3]))._depth()
3
>>> (viewablelist([0,1,2,3,4]) + viewablelist([5]))._depth()
4
>>> # (enough changes to trigger rebalance)
>>> (viewablelist([0,1,2,3,4]) + viewablelist([5]) +
... viewablelist([6]))._depth()
4
'''
if not isinstance(other, ViewableList):
other = viewablelist(other)
return _lconcat2(self, other)
def __repr__(self):
return 'viewablelist({0})'.format(str(list(self)))
def __str__(self):
return self.__repr__()
def __iter__(self):
'''
>>> list(viewablelist([]) + viewablelist([3]))
[3]
>>> list(range(100)) == list(viewablelist(range(100)))
True
'''
if self._count == 0:
return
cur, seconds = self, []
while True:
while cur is not None:
seconds.append(cur)
cur = cur._left
if not seconds:
return
cur = seconds.pop()
yield cur._val
cur = cur._right
def __ne__(self, other):
return not self == other
def __eq__(self, other):
'''
>>> viewablelist([]) != viewablelist([1])
True
>>> viewablelist([1,3]) != viewablelist([1,2])
True
>>> viewablelist([1,2]) + viewablelist([3]) == viewablelist([1,2,3])
True
>>> viewablelist([]) + viewablelist([3]) == viewablelist([3])
True
'''
if not hasattr(other, '__iter__'):
return False
return all(a == b for a, b in zip_longest(
self.__iter__(), other.__iter__(), fillvalue=_NO_VAL))
def __hash__(self):
'''
>>> hash(viewablelist([1,2]) + viewablelist([3])) == hash(viewablelist([1,2,3]))
True
>>> hash(viewablelist([1])) != hash(viewablelist([2]))
True
'''
hsh = 0
for p in self:
hsh = (hsh * 31) + hash(p)
return hsh
def __lt__(self, other):
'''
>>> viewablelist([]) < viewablelist([3])
True
>>> viewablelist([3]) < viewablelist([3])
False
>>> viewablelist([4]) < viewablelist([3, 4])
False
>>> # @functools.total_ordering gives us other comparison operators too:
>>> viewablelist([3]) >= viewablelist([3])
True
>>> viewablelist([3]) >= viewablelist([3, 0])
False
>>> bool(viewablelist())
False
>>> bool(viewablelist([2]))
True
'''
for a, b in zip(self.__iter__(), other.__iter__()):
if a < b:
return True
elif b < a:
return False
return len(self) < len(other)
def sorted(self, key=_identity):
'''
Sorts a list, optionally by a specified key function.
If the key function returns an identical value for multiple list items,
those items retian the order they had in the original list (the sort
is stable).
>>> viewablelist([4, 9, 0]).sorted()
viewablelist([0, 4, 9])
>>> viewablelist([4, 3, 1, 0, 2]).sorted(key=lambda x: x%2)
viewablelist([4, 0, 2, 3, 1])
>>> viewablelist([]).sorted()
viewablelist([])
'''
reducer = _cached_partial(_merge_sorted_lists, key)
return self.map(_wrap_in_list).reduce(reducer, initializer=_EMPTY_LIST)
def filter(self, fn):
flatfn = _cached_partial(_filter_to_flat, fn)
return MappedList(flatfn, self)
def reduce(self, reducer, initializer=None):
mr = MapReduceLogic(mapper = _wrap_in_list,
reducer = reducer,
initializer = initializer)
return self._map_reduce(mr)
def _map_reduce(self, logic):
rv = self._reducevals.get(logic, _NO_VAL)
if rv is not _NO_VAL:
return rv
reducer, mapper, initializer = logic
ct = self._count
if ct == 0:
return initializer
c, left, right = self._val, self._left, self._right
cur = mapper(c)
if reducer:
if len(cur) <= 1:
cur = cur[0] if len(cur) == 1 else initializer
else:
cur = reduce(reducer, cur)
if left:
cur = reducer(left._map_reduce(logic), cur)
if right:
cur = reducer(cur, right._map_reduce(logic))
else:
ltree = left._map_reduce(logic) if left else None
rtree = right._map_reduce(logic) if right else None
if len(cur) == 1:
cur = ViewableList(cur[0], ltree, rtree)
else:
cur = viewablelist(cur)
if left:
cur = ltree + cur
if right:
cur = cur + rtree
self._reducevals[logic] = cur
return cur
@functools.total_ordering
class MappedList(ViewableIterable):
'''
We implement maps lazily; this object represents a mapping function
applied to a list.
'''
def __init__(self, fn, vl):
self.inner = vl
self.fn = fn
self.realized = _NO_VAL
def _realize(self):
if self.realized is not _NO_VAL:
return self.realized
logic = MapReduceLogic(mapper=self.fn,
reducer=None,
initializer=_EMPTY_LIST)
self.realized = self.inner._map_reduce(logic)
return self.realized
def __len__(self):
return self._realize().__len__()
def __getitem__(self, index):
return self._realize().__getitem__(index)
def __iter__(self):
return self._realize().__iter__()
def __add__(self, other):
return self._realize().__add__(other)
def __ne__(self, other):
return self._realize().__ne__(other)
def __eq__(self, other):
return self._realize().__eq__(other)
def __lt__(self, other):
return self._realize().__lt__(other)
def filter(self, fn):
return self._realize().filter(fn)
def sorted(self, key=_identity):
return self._realize().sorted(key=key)
def reduce(self, reducer, initializer=None):
return self._map_reduce(MapReduceLogic(
reducer=reducer, initializer=initializer))
def _map_reduce(self, logic):
fn = _cached_partial(_compose_flatmaps, (logic.mapper, self.fn))
mr = MapReduceLogic(reducer=logic.reducer,
initializer=logic.initializer,
mapper=fn)
return self.inner._map_reduce(mr)
#
# Internal tree management functions for viewablelist
#
def _lsingle_l(av, x, r):
bv, y, z = r._val, r._left, r._right
return ViewableList(bv, ViewableList(av, x, y), z)
def _lsingle_r(bv, l, z):
av, x, y = l._val, l._left, l._right
return ViewableList(av, x, ViewableList(bv, y, z))
def _ldouble_l(av, x, r):
cv, rl, z = r._val, r._left, r._right
bv, y1, y2 = rl._val, rl._left, rl._right
return ViewableList(bv,
ViewableList(av, x, y1),
ViewableList(cv, y2, z))
def _ldouble_r(cv, l, z):
av, x, lr = l._val, l._left, l._right
bv, y1, y2 = lr._val, lr._left, lr._right
return ViewableList(bv,
ViewableList(av, x, y1),
ViewableList(cv, y2, z))
_LTREE_RATIO = 5
def _ltjoin(v, l, r):
ln, rn = l._count if l else 0, r._count if r else 0
if ln + rn < 2:
return ViewableList(v, l, r)
if rn > _LTREE_RATIO * ln:
# right is too big
rl, rr = r._left, r._right
rln = rl._count if rl else 0
rrn = rr._count if rr else 0
if rln < rrn:
return _lsingle_l(v, l, r)
else:
return _ldouble_l(v, l, r)
elif ln > _LTREE_RATIO * rn:
# left is too big
ll, lr = l._left, l._right
lln = ll._count if ll else 0
lrn = lr._count if lr else 0
if lrn < lln:
return _lsingle_r(v, l, r)
else:
return _ldouble_r(v, l, r)
else:
return ViewableList(v, l, r)
def _lpopmin(node):
left, right = node._left, node._right
if left is None:
return (node._val, right)
popped, tree = _lpopmin(left)
return popped, _ltjoin(node._val, tree, right)
def _lconcat2(node1, node2):
if node1 is None or node1._count == 0:
return node2
if node2 is None or node2._count == 0:
return node1
min_val, node2 = _lpopmin(node2)
return _ltjoin(min_val, node1, node2)
def _lconcat3(v, l, r):
if l is None or r is None:
return _ltjoin(v, l, r)
else:
n1, n2 = l._count, r._count
if _LTREE_RATIO * n1 < n2:
v2, l2, r2 = r._val, r._left, r._right
return _ltjoin(v2, _lconcat3(v, l, l2), r2)
elif _LTREE_RATIO * n2 < n1:
v1, l1, r1 = l._val, l._left, l._right
return _ltjoin(v1, l1, _lconcat3(v, r1, r))
else:
return ViewableList(v, l, r)
def _lsplit_lt(node, x):
if node is None or node._count == 0:
return node
left, right = node._left, node._right
lc = left._count if left else 0
if x < lc:
return _lsplit_lt(left, x)
elif lc < x:
return _lconcat3(node._val, left, _lsplit_lt(right, x - (lc + 1)))
else:
return node._left
def _lsplit_gte(node, x):
if node is None or node._count == 0:
return node
left, right = node._left, node._right
lc = left._count if left else 0
if lc < x:
return _lsplit_gte(node._right, x - (lc + 1))
elif x < lc:
return _lconcat3(node._val, _lsplit_gte(left, x), right)
else:
return _lconcat2(ViewableList(node._val, None, None), right)
#
# Implementation: viewabledict
#
_CACHE_KEY_KEYS = object()
_CACHE_KEY_VALUES = object()
_CACHE_KEY_ITEMS = object()
_CACHE_KEY_FROM_LIST = object()
def _first(l):
return l[0]
def _second(l):
return l[1]
def _from_viewablelist(l):
pair = l.val
if l is _NO_VAL:
return _EMPTY_DICT
if len(pair) != 2:
raise ValueError(
'Dictionary must be given a list of pairs; {} is not a pair'
.format(repr(pair)))
cache = l._reducevals
ret = cache.get(_CACHE_KEY_FROM_LIST)
if ret:
return ret
left, right = l.left, l.right
ret = ViewableDict(key, value,
left.to_viewabledict() if left else None,
right.to_viewable_dict() if right else None)
cache[_CACHE_KEY_FROM_LIST] = ret
return ret
class ViewableDict(Mapping):
'''
A tree-based implementation of a dictionary that remembers previous
results of mapreduces and can re-use them for later runs.
'''
__slots__ = ('_left', '_right', '_key', '_val', '_count', '_reducevals')
def __init__(self, key, val, left, right):
self._left = None
self._right = None
self._count = 0
self._key = key
self._val = val
self._count = 0 if key is _NO_VAL else 1
self._reducevals = {}
if left:
self._left = left
self._count += self._left._count
if right:
self._right = right
self._count += self._right._count
def __len__(self):
'''
>>> len(viewabledict({}))
0
>>> len(viewabledict({1:1,2:2}))
2
>>> len(viewabledict({1:1,2:2}) + viewabledict({3:3}))
3
'''
return self._count
def __getitem__(self, index):
'''
'''
if isinstance(index, slice):
start, stop, step = index.start, index.stop, index.step
if step is not None:
raise ValueError('Slices with steps are not supported')
cur = self
if start is not None:
cur = _dsplit_gt(cur, start)
if stop is not None:
cur = _dsplit_lt(cur, stop)
return cur
else:
v = self.get(index, _NO_VAL)
if v is _NO_VAL:
raise KeyError(index)
return v
def get(self, index, default=None):
'''
>>> l = viewabledict({1:1,2:2,3:3})
>>> l[1], l[2], l[3]
(1, 2, 3)
>>> 9 in l, 1 in l
(False, True)
'''
key = self._key
if index <= key:
if index == key:
return self._val
left = self._left
if left is None:
return default
else:
return left.get(index, default)
else:
right = self._right
if right is None:
return default
else:
return right.get(index, default)
def _depth(self):
depth = 0 if self._key is _NO_VAL else 1
l, r = self._left, self._right
if l is not None:
depth = max(depth, l._depth() + 1)
if r is not None:
depth = max(depth, r._depth() + 1)
return depth
def set(self, key, val):
'''
Returns a new viewable dictionary with a given key set to a given value
>>> viewabledict({3:3, 4:4}).set(3, 0)
viewabledict({3: 0, 4: 4})
'''
return _dadd(self, key, val)
def remove(self, key):
'''
Returns a new viewable dictionary with a given key removed.
>>> viewabledict({3:3, 4:4}).remove(3)
viewabledict({4: 4})
>>> viewabledict({3:3, 4:4}).remove(99)
viewabledict({3: 3, 4: 4})
'''
return (_dunion(_dsplit_lt(self, key), _dsplit_gt(self, key))
or _EMPTY_DICT)
def __add__(self, other):
'''
>>> viewabledict({3:3})._depth()
1
>>> (viewabledict({}) + viewabledict({3:3}))._depth()
1
>>> (viewabledict({0:0}) + viewabledict({1:1,2:2,3:3}))._depth()
3
>>> (viewabledict({0:0,1:1,2:2,3:3,4:4}) + viewabledict({5:5}))._depth()
3
>>> (viewabledict({0:0,1:1,2:2,3:3,4:4}) + viewabledict({5:5})
... + viewabledict({6:6}))._depth()
4
'''
if not isinstance(other, ViewableDict):
other = viewabledict(other)
return _dunion(self, other)
def __repr__(self):
return 'viewabledict({0})'.format(str(self.to_dict()))
def __str__(self):
return self.__repr__()
def __iter__(self):
for k,v in self.iteritems():
yield k
def iteritems(self):
'''
>>> list(viewabledict({}).iteritems())
[]
>>> list(viewabledict({1:1, 2:2}).iteritems())
[(1, 1), (2, 2)]
>>> list((viewabledict({1:1,2:2}) + viewabledict({3:3})).iteritems())
[(1, 1), (2, 2), (3, 3)]
>>> list(viewabledict({1:1,2:2,3:3}).iteritems())
[(1, 1), (2, 2), (3, 3)]
'''
if self._count == 0:
return
cur, seconds = self, []
while True:
while cur is not None:
seconds.append(cur)
cur = cur._left
if not seconds:
return
cur = seconds.pop()
yield (cur._key, cur._val)
cur = cur._right
def items(self):
'''
>>> viewabledict({}).items()
viewablelist([])
>>> viewabledict({1:1, 2:2}).items()
viewablelist([viewablelist([1, 1]), viewablelist([2, 2])])
>>> (viewabledict({1:1,2:2}) + viewabledict({3:3})).items()
viewablelist([viewablelist([1, 1]), viewablelist([2, 2]), viewablelist([3, 3])])
>>> viewabledict({1:1,2:2,3:3}).items()
viewablelist([viewablelist([1, 1]), viewablelist([2, 2]), viewablelist([3, 3])])
'''
if self._count == 0:
return _EMPTY_LIST
cache = self._reducevals
ret = cache.get(_CACHE_KEY_ITEMS)
if ret:
return ret
l, r = self._left, self._right
ret = ViewableList(viewablelist([self._key, self._val]),
l.items() if l else None,
r.items() if r else None)
cache[_CACHE_KEY_ITEMS] = ret
return ret
def keys(self):
'''
>>> viewabledict({1:2, 2:3}).keys()
viewablelist([1, 2])
'''
if self._count == 0:
return _EMPTY_LIST
cache = self._reducevals
ret = cache.get(_CACHE_KEY_KEYS)
if ret:
return ret
l, r = self._left, self._right
ret = ViewableList(self._key,
l.keys() if l else None,
r.keys() if r else None)
cache[_CACHE_KEY_KEYS] = ret
return ret
def values(self):
'''
>>> viewabledict({1:2, 2:3}).values()
viewablelist([2, 3])
>>> viewabledict().values()
viewablelist([])
'''
if self._count == 0:
return _EMPTY_LIST
cache = self._reducevals
ret = cache.get(_CACHE_KEY_VALUES)
if ret:
return ret
l, r = self._left, self._right
ret = ViewableList(self._val,
l.values() if l else None,
r.values() if r else None)
cache[_CACHE_KEY_VALUES] = ret
return ret
def memoized(self, fn):
'''
When using ViewableDictionary as a record or struct, you often want
to process the whole object rather than reducing the key-value pairs.
his function lets you transform the dictionary in whatever way you
want, and caches the result so that it will not be recomputed if the
dictionary hasn't changed.
>>> d = viewabledict({'name':'Bob', 'age':20})
>>> def compute_birth(person):
... print('Computing birth year')
... return person.set('birth_year', 2017 - person['age'])
>>> sorted((k,v) for k,v in d.memoized(compute_birth).items())
Computing birth year
[('age', 20), ('birth_year', 1997), ('name', 'Bob')]
>>> sorted((k,v) for k,v in d.memoized(compute_birth).items())
[('age', 20), ('birth_year', 1997), ('name', 'Bob')]
'''
cache = self._reducevals
fkey = fnkey(fn)
if fkey in cache:
return cache[fkey]
ret = fn(self)
cache[fkey] = ret
return ret
def __ne__(self, other):
return not self == other
def __eq__(self, other):
'''
>>> viewabledict({}) == viewabledict({})
True
>>> viewabledict({}) != viewabledict({1:1})
True
>>> viewabledict({1:1,3:3}) != viewabledict({1:1,2:2})
True
>>> viewabledict({1:1,2:2}) + viewabledict({3:3}) == viewabledict({1:1,2:2,3:3})
True
>>> viewabledict({}) + viewabledict({3:3}) == viewabledict({3:3})
True
'''
if not isinstance(other, Mapping):
return False
return all(a == b for a, b in zip_longest(
self.iteritems(), other.iteritems(), fillvalue=_NO_VAL))
def __hash__(self):
'''
>>> hash(viewabledict({1:1,2:2}) + viewabledict({3:3})) == hash(viewabledict({1:1,2:2,3:3}))
True
>>> hash(viewabledict({1:1})) != hash(viewabledict({2:2}))
True
'''
hsh = 0
for p in self.iteritems():
hsh = (hsh * 31) + hash(p)
return hsh
def to_dict(self):
ret = {}
def visit(node):
left, key, val, right = node._left, node._key, node._val, node._right
if left is not None:
visit(left)
if val is not _NO_VAL:
ret[key] = val
if right is not None:
visit(right)
visit(self)
return ret
def map_values(self, mapper):
'''
This can be used to efficiently transform just the values of a viewable
dictionary. This specialized method exists, because we can efficiently
recreate the same tree structure when the keys are known not to change.
The mapping function is given two arguments: the key and the values,
and is expected to return a transformed value.
For other chainable transformations, use items(), keys(), or values(),
which all return viewable lists.
>>> d = viewabledict({'chiban':7, 'bob':40})
>>> d.map_values(lambda name, age: age + 1)
viewabledict({'bob': 41, 'chiban': 8})
'''
mr = MapReduceLogic(mapper=mapper,
reducer=None)
return self._map_reduce(mr)
def _map_reduce(self, logic):
rv = self._reducevals.get(logic, _NO_VAL)
if rv is not _NO_VAL:
return rv
reducer, mapper, initializer = logic
ct = self._count
if ct == 0:
return initializer
key, val, left, right = self._key, self._val, self._left, self._right
newval = mapper(key, val)
if reducer is None:
ltree = left._map_reduce(logic) if left else None
rtree = right._map_reduce(logic) if right else None
if newval is val and ltree is left and rtree is right:
cur = self # nothing has changed, return ourself
else:
cur = ViewableDict(key, newval, ltree, rtree)
else:
if left:
cur = reducer(left._map_reduce(logic), newval)
if right:
cur = reducer(newval, right._map_reduce(logic))
self._reducevals[logic] = cur
return cur
#
# Internal tree management functions for viewabledict
#
def _dsingle_l(ak, av, x, r):
bk, bv, y, z = r._key, r._val, r._left, r._right
return ViewableDict(bk, bv, ViewableDict(ak, av, x, y), z)
def _dsingle_r(bk, bv, l, z):
ak, av, x, y = l._key, l._val, l._left, l._right
return ViewableDict(ak, av, x, ViewableDict(bk, bv, y, z))
def _ddouble_l(ak, av, x, r):
ck, cv, rl, z = r._key, r._val, r._left, r._right
bk, bv, y1, y2 = rl._key, rl._val, rl._left, rl._right
return ViewableDict(bk, bv,
ViewableDict(ak, av, x, y1),
ViewableDict(ck, cv, y2, z))
def _ddouble_r(ck, cv, l, z):
ak, av, x, lr = l._key, l._val, l._left, l._right
bk, bv, y1, y2 = lr._key, lr._val, lr._left, lr._right
return ViewableDict(bk, bv,
ViewableDict(ak, av, x, y1),
ViewableDict(ck, cv, y2, z))
_DTREE_RATIO = 5
def _dtjoin(k, v, l, r):
ln, rn = l._count if l else 0, r._count if r else 0
if ln + rn < 2:
return ViewableDict(k, v, l, r)
if rn > _DTREE_RATIO * ln:
# right is too big
rl, rr = r._left, r._right
rln = rl._count if rl else 0
rrn = rr._count if rr else 0
if rln < rrn:
return _dsingle_l(k, v, l, r)
else:
return _ddouble_l(k, v, l, r)
elif ln > _DTREE_RATIO * rn:
# left is too big
ll, lr = l._left, l._right
lln = ll._count if ll else 0
lrn = lr._count if lr else 0
if lrn < lln:
return _dsingle_r(k, v, l, r)
else:
return _ddouble_r(k, v, l, r)
else:
return ViewableDict(k, v, l, r)
def _dadd(node, k, v):
if node is None:
return ViewableDict(k, v, None, None)
key, val, l, r = node._key, node._val, node._left, node._right
if k < key:
return _dtjoin(key, val, _dadd(l, k, v), r)
elif key < k:
return _dtjoin(key, val, l, _dadd(r, k, v))
else:
return ViewableDict(key, v, l, r)
def _dconcat3(k, v, l, r):
if l is None:
return _dadd(r, k, v)
elif r is None:
return _dadd(l, k, v)
else:
n1, n2 = l._count, r._count
if _DTREE_RATIO * n1 < n2:
k2, v2, l2, r2 = r._key, r._val, r._left, r._right
return _tjoin(k2, v2, _dconcat3(k, v, l, l2), r2)
elif _DTREE_RATIO * n2 < n1:
k1, v1, l1, r1 = l._key, l._val, l._left, l._right
return _dtjoin(k1, v1, l1, _dconcat3(k, v, r1, r))
else:
return ViewableDict(k, v, l, r)
def _dsplit_lt(node, x):
if node is None or node._count == 0:
return node
k = node._key
if x < k:
return _dsplit_lt(node._left, x)
elif k < x:
return _dconcat3(node._key, node._val, node._left, _dsplit_lt(node._right, x))
else:
return node._left
def _dsplit_gt(node, x):
if node is None or node._count == 0:
return node
k = node._key
if k < x:
return _dsplit_gt(node._right, x)
elif x < k:
return _dconcat3(node._key, node._val, _dsplit_gt(node._left, x), node._right)
else:
return node._right
def _dunion(tree1, tree2):
if tree1 is None or tree1._count == 0:
return tree2
if tree2 is None or tree2._count == 0:
return tree1
ak, av, l, r = tree2._key, tree2._val, tree2._left, tree2._right
l1 = _dsplit_lt(tree1, ak)
r1 = _dsplit_gt(tree1, ak)
return _dconcat3(ak, av, _dunion(l1, l), _dunion(r1, r))
#
# _cached_partial and utilities
#
_PARTIALS_CACHE = {}
def _cached_partial(fn, value_to_apply):
'''
This is used to wrap functions in a way that preserves identity of
the resulting function objects. For example,
>>> first = lambda l: l[0]
>>> sorter_by = lambda keyfn : (lambda l: sorted(l, key=keyfn))
>>> sorter_by(first) == sorter_by(first)
False
>>> _cached_partial(sorter_by, first) == _cached_partial(sorter_by, first)
True
'''
cache_key = (fn, value_to_apply)
ret = _PARTIALS_CACHE.get(cache_key)
if not ret:
ret = fn(value_to_apply)
_PARTIALS_CACHE[cache_key] = ret
return ret
def _l(*items):
return viewablelist(items)
def _wrap_in_list(v):
return ViewableList(v, None, None)
def _filter_to_flat(fn):
return lambda x: _wrap_in_list(x) if fn(x) else ()
def _compose_flatmaps(fns):
f1, f2 = fns
if f1 is _wrap_in_list or f1 is None: return f2
if f2 is _wrap_in_list or f2 is None: return f1
return lambda v: viewablelist([v1 for v2 in f2(v) for v1 in f1(v2)])
def _wrap_fn_in_list(fn):
return lambda x: _wrap_in_list(fn(x))
def _mergesorted(i1, i2, keyfn=_identity):
'''
>>> list(_mergesorted(iter([]), iter([])))
[]
>>> list(_mergesorted(iter([5]), iter([])))
[5]
>>> list(_mergesorted(iter([]), iter([5])))
[5]
>>> list(_mergesorted(iter([1,5]), iter([])))
[1, 5]
>>> list(_mergesorted(iter([]), iter([1,5])))
[1, 5]
>>> list(_mergesorted(iter([1,5]), iter([3])))
[1, 3, 5]
>>> list(_mergesorted(iter([9]), iter([1,5])))
[1, 5, 9]
>>> list(_mergesorted(iter([0]), iter([1,5])))
[0, 1, 5]
'''
v1, v2, _StopIteration = _NO_VAL, _NO_VAL, StopIteration
try:
v1 = next(i1)
except _StopIteration:
pass
try:
v2 = next(i2)
if v1 is not _NO_VAL:
while True:
if keyfn(v1) <= keyfn(v2):
yield v1
try:
v1 = next(i1)
except _StopIteration:
v1 = _NO_VAL
break
else:
yield v2
try:
v2 = next(i2)
except _StopIteration:
v2 = _NO_VAL
break
except _StopIteration:
pass
if v1 is not _NO_VAL:
yield v1
for v in i1:
yield v
if v2 is not _NO_VAL:
yield v2
for v in i2:
yield v
def _create_group(keyfn):
return lambda val: viewabledict({keyfn(val): _l(val)})
def _combine_groups(groups1, groups2):
prev_key, prev_values = _NO_VAL, None
result = []
for (key, values) in _mergesorted(groups1.iteritems(), groups2.iteritems(), _first):
if key == prev_key:
values = prev_values + values
elif prev_key is not _NO_VAL:
result.append((prev_key, prev_values))
prev_key, prev_values = key, values
# add the last group
if prev_key is not _NO_VAL:
result.append((prev_key, prev_values))
return viewabledict(result)
def _merge_sorted_lists(keyfn):
def merge(l1, l2):
# TODO might be nice to have a specialized version of this which
# re-uses uninterrupted subtrees from one side; it would boost
# lists that are mostly sorted already.
min1, max1 = keyfn(l1[0]), keyfn(l1[-1])
min2, max2 = keyfn(l2[0]), keyfn(l2[-1])
if max1 <= min2:
return l1 + l2
if max2 < min1: # not "<=" here because it would make the sort unstable
return l2 + l1
return viewablelist(list(_mergesorted(iter(l1), iter(l2), keyfn)))
return merge
#
# Other
#
_NO_VAL = object()
_EMPTY_LIST = ViewableList(_NO_VAL, None, None)
_EMPTY_DICT = ViewableDict(_NO_VAL, _NO_VAL, None, None)
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "pschanely/ScenicOverlook",
"path": "scenicoverlook/__init__.py",
"copies": "1",
"size": "44017",
"license": "bsd-3-clause",
"hash": 4939636739225903000,
"line_mean": 30.942670537,
"line_max": 100,
"alpha_frac": 0.5308176386,
"autogenerated": false,
"ratio": 3.4580092701704768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9467058039914229,
"avg_score": 0.004353773771249462,
"num_lines": 1378
} |
from __future__ import absolute_import, division, print_function
from collections import namedtuple
from io import StringIO
from shlex import shlex
from typing import List, Iterable
from . import _gpg
Entry = namedtuple("Entry", ["key", "user", "password", "notes"])
def _normalized_key(key: str) -> str:
return key.replace(" ", "_").lower()
class Store:
"""Password store."""
def __init__(self, path: str, entries: Iterable[Entry]) -> None:
# normalize keys
self.entries = [e._replace(key=_normalized_key(e.key)) for e in entries]
self.path = path
def search(self, key_pattern: str, user_pattern: str) -> List[Entry]:
"""Search database for given key and user pattern."""
# normalize key
key_pattern = _normalized_key(key_pattern)
# search
results = []
for entry in self.entries:
if key_pattern in entry.key and user_pattern in entry.user:
results.append(entry)
# sort results according to key (stability of sorted() ensures that the order of accounts for any given key remains untouched)
return sorted(results, key=lambda e: e.key)
@staticmethod
def load(path: str) -> "Store":
"""Load password store from file."""
# load source (decrypting if necessary)
if _gpg.is_encrypted(path):
src_bytes = _gpg.decrypt(path)
else:
src_bytes = open(path, "rb").read()
src = src_bytes.decode("utf-8")
# parse database source
ext = _gpg.unencrypted_ext(path)
assert ext not in [
".yml",
".yaml",
], "YAML support was removed in version 0.12.0"
entries = _parse_entries(src)
return Store(path, entries)
class SyntaxError(Exception):
def __init__(self, lineno: int, line: str, reason: str) -> None:
super(SyntaxError, self).__init__(
"line %s: %s (%r)" % (lineno + 1, reason, line)
)
_EXPECT_ENTRY = "expecting entry"
_EXPECT_ENTRY_OR_NOTES = "expecting entry or notes"
def _parse_entries(src: str) -> List[Entry]:
entries = [] # type: List[Entry]
state = _EXPECT_ENTRY
for lineno, line in enumerate(src.splitlines()):
# empty lines are skipped (but also terminate the notes section)
sline = line.strip()
if not sline or line.startswith("#"):
state = _EXPECT_ENTRY
continue
# non-empty line with leading spaces is interpreted as a notes line
if line[0] in [" ", "\t"]:
if state != _EXPECT_ENTRY_OR_NOTES:
raise SyntaxError(lineno, line, state)
# add line of notes
notes = entries[-1].notes
if notes:
notes += "\n"
notes += sline
entries[-1] = entries[-1]._replace(notes=notes)
continue
# otherwise, parse as an entry
sio = StringIO(line)
lexer = shlex(sio, posix=True) # type: ignore
lexer.whitespace_split = True
try:
key = lexer.get_token()
except ValueError as e:
raise SyntaxError(lineno, line, str(e))
key = key.rstrip(":")
assert key
try:
user = lexer.get_token()
except ValueError as e:
raise SyntaxError(lineno, line, str(e))
try:
password = lexer.get_token()
except ValueError as e:
raise SyntaxError(lineno, line, str(e))
if not user and not password:
raise SyntaxError(lineno, line, state)
if not password:
password = user
user = notes = u""
else:
password = password
notes = sio.read().strip()
entries.append(Entry(key, user, password, notes))
state = _EXPECT_ENTRY_OR_NOTES
return entries
| {
"repo_name": "catch22/pw",
"path": "pw/store.py",
"copies": "1",
"size": "3891",
"license": "mit",
"hash": -2185641017606514400,
"line_mean": 29.3984375,
"line_max": 134,
"alpha_frac": 0.568234387,
"autogenerated": false,
"ratio": 4.152614727854856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5220849114854856,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import namedtuple
import logging
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import plot, xlabel, ylabel, legend
from scipy.signal import butter, filtfilt
from pambox.central import EC
from pambox.speech import MrSepsm
log = logging.getLogger(__name__)
Ears = namedtuple('Ears', ['left', 'right'])
class BsEPSM(MrSepsm):
"""Binaural implementation of the sEPSM model.
Implementation used in [chabot-leclerc2016]_.
References
----------
.. [chabot-leclerc2016]
"""
def __init__(self,
fs=22050,
name='BinauralMrSepsm',
cf=MrSepsm._default_center_cf,
modf=MrSepsm._default_modf,
downsamp_factor=10,
noise_floor=0.001,
snr_env_limit=0.001,
sigma_e=0.25,
sigma_d=105e-6,
fast_cancel=True,
debug=False,
win_len=0.02,
ec_padding_windows=10
):
"""@todo: to be defined1. """
MrSepsm.__init__(self, fs, cf, modf, downsamp_factor, noise_floor,
snr_env_limit, output_time_signals=True)
self.name = name
self.overlap = 0.5
self.win_len = win_len
self.sigma_e = sigma_e
self.sigma_d = sigma_d
self.ec_padding_windows = ec_padding_windows
self.fast_cancel = fast_cancel
self._key_alpha = 'alpha'
self._key_tau = 'tau'
self.debug = debug
self.env_lp_cutoff = 770 # Hz, from breebaart2001binaurala
self.env_lp_order = 5 # from breebaart2001binaurala
self._ec_process = EC(fs, win_len=self.win_len, overlap=self.overlap,
sigma_d=self.sigma_d, sigma_e=self.sigma_e,
fast_cancel=fast_cancel,
padding_windows=self.ec_padding_windows)
def _ec_equalize(self, left, right):
alphas, taus = self._ec_process.equalize(left, right, self.cf)
return alphas, taus
def _ec_cancel(self, left, right, alphas, taus):
cancelled_mix = self._ec_process.cancel(
left, right, alphas, taus)
cancelled_mix = np.abs(cancelled_mix)
return cancelled_mix
def _apply_bu_process(self, left, right, bands=None):
"""Apply EC process between left and right envelopes (mix and noise),
apply sEPSM processing to the resulting signals and calculate the
SNR_env.
Parameters
----------
left, right : dictionary
Outputs of the mr-sEPSM model. The dictionaries must have a
'chan_envs' key.
bands : list of int
Indices of the channels to process. If `None`, all channels are
processes. Defaults to None.
Returns
-------
bu_mr_snr_env_matrix : ndarray
Multi-resolution SNRenv matrix of shape N_CHAN x N_SAMPLES.
alphas : ndarray of float
Optimal gains, in samples, calculated by the "equalize"
process.
tau : ndarray of integers
Optimal time delays, in samples, calculated by the "equalize"
process.
"""
left_mix_envs = left['chan_envs'][-2]
right_mix_envs = right['chan_envs'][-2]
left_noise_envs = left['chan_envs'][-1]
right_noise_envs = right['chan_envs'][-1]
# ... then we find the alpha and tau parameters the minimize the noise
# energy...
alphas, taus = self._ec_equalize(left_noise_envs, right_noise_envs)
# ... then we perform the cancellation with those alphas and taus for
# the mixture and noise...
cancelled_mix = self._ec_cancel(left_mix_envs, right_mix_envs, alphas,
taus)
cancelled_noise = self._ec_cancel(left_noise_envs, right_noise_envs,
alphas, taus)
# ... then we apply the same processing as the mr-sEPSM, until we
# have the multi-resolution excitation patterns...
mix_mr_exc_ptns = self._apply_sepsm_processing(cancelled_mix[
np.newaxis])
noise_mr_exc_ptns = self._apply_sepsm_processing(cancelled_noise[
np.newaxis])
# --- BU SNRenv ---
# ... we can finally calculate the BU SNRenv by calculating the
# SNRenv.
bu_mr_snr_env_matrix, _ = self._snr_env(
mix_mr_exc_ptns,
noise_mr_exc_ptns
)
return bu_mr_snr_env_matrix, alphas, taus, mix_mr_exc_ptns, noise_mr_exc_ptns
def _apply_sepsm_processing(self, envs):
filtered_envs, _ = self._mod_filtering(envs)
mr_exc_ptns = self._mr_env_powers(envs, filtered_envs)
return mr_exc_ptns[0]
def _apply_be_process(self, left, right):
# --- Better ear (BE) ---
be_mr_snr_env_matrix = self._better_ear(
left['mr_snr_env_matrix'],
right['mr_snr_env_matrix'],
left['bands_above_thres_idx'],
right['bands_above_thres_idx']
)
return be_mr_snr_env_matrix
def _better_ear(self, left, right, left_idx, right_idx):
"""Return the better-ear SNRenv for bands above threshold only.
Parameters
----------
left, right: ndarray
SNR_env values, of shape (N_CHAN, N_WIN).
left_idx, right_idx: array_like
Index of the bands above threshold for the left and right ear,
respectively.
"""
left_idx = np.asarray(left_idx)
right_idx = np.asarray(right_idx)
be_snr_env = np.zeros_like(left)
for side, idx in zip((left, right), (left_idx, right_idx)):
try:
be_snr_env[idx] = np.maximum(be_snr_env[idx], side[idx])
except IndexError:
# BE SNRenv is not modified.
pass
return be_snr_env
def _calc_bu_bands_above_thres(self, left, right):
"""Calculate bands above threshold for binaural unmasking.
A band is considered above threshold if both bands are above
threshold (logical 'and').
Parameters
----------
left, right : dictionaries
Outputs from the mr-sEPSM prediction. Must have a
'bands_above_thres_idx' key.
Returns
-------
idx : array
Indices of the bands that are above threshold in at least one side.
"""
left_bands_idx = left["bands_above_thres_idx"]
right_bands_idx = right["bands_above_thres_idx"]
# BU mask is when _both sides_ are above threshold.
indices = list(set(left_bands_idx) & set(right_bands_idx))
return indices
def _calc_be_bands_above_thres(self, left, right):
"""True if at least one side is above threshold.
Parameters
----------
left, right : dictionaries
Outputs from the mr-sEPSM prediction. Must have a
'bands_above_thres_idx' key.
Returns
-------
idx : array
Indices of the bands that are above threshold in at least one side.
"""
left_bands_idx = left["bands_above_thres_idx"]
right_bands_idx = right["bands_above_thres_idx"]
indices = list(set(left_bands_idx) | set(right_bands_idx))
return indices
def _apply_ba_process(self, be, bu, be_indices, bu_indices):
"""Applies the binaural-advantage process.
The BA advantage selection is actually the exact same thing as the BE
process: only bands above threshold for *that* signal are
considered for the comparison.
Parameters
----------
be, bu : ndarray
Better-ear and Binaural-unmasking SNRenv.
be_indices, bu_indices : lists of integers
List of the indices for the channels that were above threshold
for each input.
Returns
-------
ba : ndarray
Combination of the better-ear and binaural unmasking SNRenv.
"""
ba = self._better_ear(be, bu, be_indices, bu_indices)
return ba
def predict(self, clean=None, mixture=None, noise=None):
"""Predict intelligibility.
Parameters
----------
clean, mixture, noise : ndarray
Binaural input signals.
Returns
-------
res : dict
Model predictions and internal values. Model predictions are
stored as a dictionary under the key `'p'`.
"""
# Calculate the mr-sEPSM prediction for each ear in one call..
binaural_res = [super(BsEPSM, self).predict(clean=c, mix=m, noise=n)
for c, m, n in zip(clean, mixture, noise)]
# ... and save them independently...
ears_res = Ears(*binaural_res)
log.debug('Left bands above threshold {}.'.format(
ears_res.left["bands_above_thres_idx"]))
log.debug('Right bands above threshold {}.'.format(ears_res.right[
"bands_above_thres_idx"]))
# ... then apply the binaural unmasking (BU) process, which includes the
# EC process and the mr-sEPSM process applied to the cancelled
# signals...
bu_mr_snr_env_matrix, alphas, taus, bu_mix_mr_exc_ptns, \
bu_noise_mr_exc_ptns \
= self._apply_bu_process(ears_res.left, ears_res.right)
# ... in "parallel", we apply the better-ear (BE) process to the
# multi-resolution SNRenv...
be_mr_snr_env_matrix = self._apply_be_process(ears_res.left,
ears_res.right)
# ... then we select the bands that are considered "above threshold"
# for the BU, BE and binaural advantage (BA)...
bu_idx_above_thres = self._calc_bu_bands_above_thres(ears_res.left,
ears_res.right)
log.debug('BU bands above threshold {}.'.format(bu_idx_above_thres))
be_idx_above_thres = self._calc_be_bands_above_thres(ears_res.left,
ears_res.right)
log.debug('BE bands above threshold {}.'.format(be_idx_above_thres))
ba_idx_above_thres = list(
set(be_idx_above_thres) | set(bu_idx_above_thres))
log.debug('BA bands above threshold {}.'.format(ba_idx_above_thres))
# ... then we combine the BE and BU as part of the "binaural
# advantage"...
ba_mr_snr_env_matrix = self._apply_ba_process(
be_mr_snr_env_matrix,
bu_mr_snr_env_matrix,
be_idx_above_thres,
bu_idx_above_thres)
# ... we can now averaging over time the multi-resolution
# representation...
time_av_bu_snr_env = self._time_average(bu_mr_snr_env_matrix)
time_av_be_snr_env = self._time_average(be_mr_snr_env_matrix)
time_av_ba_snr_env = self._time_average(ba_mr_snr_env_matrix)
# ... and combine the SNRenv for the bands that are above threshold
# for each output type.
bu_snr_env = self._optimal_combination(
time_av_bu_snr_env,
bu_idx_above_thres)
be_snr_env = self._optimal_combination(
time_av_be_snr_env,
be_idx_above_thres)
ba_snr_env = self._optimal_combination(
time_av_ba_snr_env,
ba_idx_above_thres)
# Additional variation, where the multi-resolution representation is
# not average over time at first. The whole mr representation is
# combined optimally.
full_bu_snr_env = self._optimal_combination(
bu_mr_snr_env_matrix,
bu_idx_above_thres
)
full_be_snr_env = self._optimal_combination(
be_mr_snr_env_matrix,
be_idx_above_thres
)
full_ba_snr_env = self._optimal_combination(
ba_mr_snr_env_matrix,
ba_idx_above_thres
)
res = {
'p': {
'be_snr_env': be_snr_env,
'bu_snr_env': bu_snr_env,
'ba_snr_env': ba_snr_env,
'full_be_snr_env': full_be_snr_env,
'full_bu_snr_env': full_bu_snr_env,
'full_ba_snr_env': full_ba_snr_env,
'snr_env_l': ears_res.left['p']['snr_env'],
'snr_env_r': ears_res.right['p']['snr_env']
},
}
if self.debug:
res.update({
'be_matrix': be_mr_snr_env_matrix,
'bu_matrix': bu_mr_snr_env_matrix,
'ba_matrix': ba_mr_snr_env_matrix,
'be_idx_above_threshold': be_idx_above_thres,
'bu_idx_above_threshold': bu_idx_above_thres,
'ba_idx_above_threshold': ba_idx_above_thres,
'ears': ears_res,
'time_av_be_snr_env': time_av_be_snr_env,
'time_av_ba_snr_env': time_av_ba_snr_env,
'time_av_bu_snr_env': time_av_bu_snr_env,
'bu_mix_mr_exc_ptns': bu_mix_mr_exc_ptns,
'bu_noise_mr_exc_ptns': bu_noise_mr_exc_ptns,
self._key_alpha: alphas,
self._key_tau: taus
})
return res # Results for each ear's sEPSM model.
def plot_alpha(self, res):
alphas = res[self._key_alpha]
t = np.arange(alphas.shape[-1]) * self.overlap * self.win_len
plot(t, alphas.T)
xlabel('Time (sec)')
ylabel('$alpha_0$ gains (L / R)')
legend(self.cf,
loc='outside',
bbox_to_anchor=(1.05, 1))
def plot_alpha_hist(self, res, ymax=None):
alphas = res[self._key_alpha]
plt.boxplot(alphas.T, labels=self.cf)
plt.setp(plt.xticks()[1], rotation=30)
xlabel('Channel frequency (Hz)')
ylabel(r'$\alpha_0$ gains (L / R)')
plt.ylim([0, ymax])
plt.xticks(rotation=30)
def plot_tau(self, res):
tau = res[self._key_tau]
t = np.arange(tau.shape[-1]) * self.overlap * self.win_len
plot(t, tau.T)
xlabel('Time (sec)')
ylabel('Tau (s)')
legend(self.cf,
loc='outside',
bbox_to_anchor=(1.05, 1))
def plot_tau_hist(self, res, cfs=None, bins=None, return_ax=False):
"""Plot histogram of tau values.
Parameters
----------
res : dict
Results from the `predict` function.
cfs : list
Index of center frequencies to plot.
bins : int
Number of bins in the histogram. If `None`, uses bins between
-700 us and 700 us. Default is `None`.
return_ax : bool, optional
If True, returns the figure Axes. Default is False.
"""
taus = res[self._key_tau]
edges = np.max(np.abs(taus))
if bins is None:
bins = np.arange(-edges, edges, 20e-6)
# Put together all ITDs if no particular channel is chosen...
if cfs is None:
fig, ax = plt.subplots(1, 1)
ax.hist(taus.ravel(), bins=bins)
ax.set_ylabel('Count for all channels')
else:
# ... or create N subplots if more than one channel is chosen.
try:
iter(cfs)
except TypeError:
cfs = (cfs, )
cfs = cfs[::-1]
fig, axes = plt.subplots(len(cfs), 1, sharex=True, sharey=True)
try:
iter(axes)
except TypeError:
axes = (axes,)
for ax, cf in zip(axes, cfs):
ax.hist(taus[cf], bins=bins)
[ax.set_ylabel('@ {} Hz'.format(self.cf[i_cf]))
for ax, i_cf in zip(axes, cfs)]
ax.set_xlabel('Interaural delay (ms)')
# ax.set_xlim((-800e-6, 800e-6))
ticks = ax.get_xticks()
ax.set_xticklabels(ticks * 1e3)
if return_ax and cfs is None:
return axes
else:
return ax
def _extract_env(self, channel_sigs):
"""Extracts the envelope via half-wave rectification and low-pass
filtering and jitters the envelopes.
Parameters
----------
channel_sigs : ndarray
Peripheral subband signals.
Returns
-------
env : ndarray
"""
envelopes = np.maximum(channel_sigs, 0)
b, a = butter(self.env_lp_order, self.env_lp_cutoff * 2. / self.fs)
envelopes = filtfilt(b, a, envelopes)
epsilons, deltas = self._ec_process.create_jitter(envelopes[0])
for i_sig, signal in enumerate(envelopes):
envelopes[i_sig] = self._ec_process.apply_jitter(signal, epsilons, deltas)
return envelopes
def _mod_sensitivity(self, envs):
"""Doesn't do anything to the envelopes"""
return envs | {
"repo_name": "achabotl/pambox",
"path": "pambox/speech/bsepsm.py",
"copies": "1",
"size": "17156",
"license": "bsd-3-clause",
"hash": 5207471945058299000,
"line_mean": 35.1962025316,
"line_max": 86,
"alpha_frac": 0.5461063185,
"autogenerated": false,
"ratio": 3.652544177134341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4698650495634341,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from collections import OrderedDict
import copy
import simplejson
import numpy as np
import os
from qtpy.QtCore import Qt
from addie.processing.mantid.master_table.geometry_handler import table2mantid
from addie.processing.mantid.master_table.periodic_table.material_handler import \
retrieving_molecular_mass_and_number_of_atoms_worked
from addie.processing.mantid.master_table.tree_definition import SAMPLE_FIRST_COLUMN, NORMALIZATION_FIRST_COLUMN
from addie.processing.mantid.master_table.utilities import Utilities
from addie.utilities import math_tools
_export_dictionary = OrderedDict()
_element = {"Runs": "",
"Background": {"Runs": "",
"Background": {"Runs": "",
},
},
"Material": "",
"Density": {"MassDensity": np.NaN,
"UseMassDensity": True,
"NumberDensity": np.NaN,
"UseNumberDensity": False,
"Mass": np.NaN,
"UseMass": False},
"PackingFraction": np.NaN,
"Geometry": {"Shape": "",
"Radius": np.NaN,
"Radius2": np.NaN,
"Height": np.NaN,
},
"AbsorptionCorrection": {"Type": "",
},
"MultipleScatteringCorrection": {"Type": "",
},
"InelasticCorrection": {"Type": "",
"Order": "",
"Self": True,
"Interference": False,
"FitSpectrumWith": "GaussConvCubicSpline",
"LambdaBinningForFit": "",
"LambdaBinningForCalc": "",
},
}
_data = {"Facility": "SNS",
"Instrument": "NOM",
"Title": "",
"Sample": copy.deepcopy(_element),
"Normalization": copy.deepcopy(_element),
"Calibration": {"Filename": ""},
"HighQLinearFitRange": np.NaN,
"Merging": {"QBinning": [],
"SumBanks": [],
"Characterizations": "",
"Grouping": {"Initial": "",
"Output": "",
},
},
"CacheDir": "./tmp",
"OutputDir": "./output",
"AlignAndFocusArgs": {},
}
class TableFileExporter:
def __init__(self, parent=None):
self.parent = parent
self.table_ui = parent.processing_ui.h3_table
self.__nan_list = ['N/A', 'None']
# generic elements to take from the ui
self.facility = self.parent.facility
self.instrument = self.parent.instrument["short_name"]
self.cachedir = self.parent.cache_folder
self.outputdir = self.parent.output_folder
self.intermediate_grouping_file = self.parent.intermediate_grouping['filename']
self.output_grouping_file = self.parent.output_grouping['filename']
self.calibration = str(
self.parent.processing_ui.calibration_file.text())
def export(self, filename='', row=None):
"""create dictionary of all rows unless `row` argument is specified,
which then only retrieves the single row
:param filename: Filename to export the table to as JSON dump
:type filename: str
:param row: Row index to export to filename as JSON dump (optional)
:type row: int
"""
if not filename:
raise RuntimeError('Cannot export data to empty filename')
# put together the data to write out
if row is not None:
dictionary = self.retrieve_row_info(row)
else:
dictionary = self.retrieve_row_infos()
# create the directory if it doesn't already exist
direc = os.path.dirname(filename)
if not os.path.exists(direc):
os.mkdir(direc)
# write out the configuration
with open(filename, 'w') as outfile:
simplejson.dump(dictionary, outfile, indent=2, ignore_nan=True)
def isActive(self, row):
"""Determine if `row` is activated for reduction
:param row: Row to check if activated
:type row: int
:return: If the row is active
:rtype: bool
"""
# column 0 is 'Activate'
return self._get_checkbox_state(row=row, column=0)
def getRunDescr(self, row):
"""Get an <instrument>_<run number(s)> description of a given `row`
:param row: Row index to retrieve the description
:type row: int
:return: String of <instrument>_<run number(s)> for row
:rtype: str
"""
runnumbers = self._get_item_value(row=row, column=SAMPLE_FIRST_COLUMN)
if not runnumbers:
return ''
return '{}_{}'.format(self.instrument, runnumbers)
def _get_checkbox_state(self, row=-1, column=-1):
"""Determine if checkbox is selected for cell in table at (row, column)
:param row: Row index
:type row: int
:param column: Column index
:type column: int
:return: String of <instrument>_<run number(s)> for cell at (row, column)
:rtype: str
"""
state = self.table_ui.cellWidget(row, column).children()[
1].checkState()
return state == Qt.Checked
def _get_item_value(self, row=-1, column=-1):
"""Get item from cell in table at (row, column)
:param row: Row index
:type row: int
:param column: Column index
:type column: int
:return: String of item in cell at (row, column)
:rtype: str
"""
item = str(self.table_ui.item(row, column).text())
return item
def _get_text_value(self, row=-1, column=-1):
"""Get text value from cell in table at (row, column)
:param row: Row index
:type row: int
:param column: Column index
:type column: int
:return: Text value in cell at (row, column)
:rtype: str
"""
widget = self.table_ui.cellWidget(row, column).children()[1]
return str(widget.text())
def _get_selected_value(self, row=-1, column=-1):
"""Get string of selected value from cell in table at (row, column)
:param row: Row index
:type row: int
:param column: Column index
:type column: int
:return: String of selected value in cell at (row, column)
:rtype: str
"""
widget = self.table_ui.cellWidget(row, column).children()[1]
return str(widget.currentText())
def _retrieve_element_infos(self, element='sample', row=-1):
"""From the given row, and the given element (choices: [`sample`, `normalization`]),
retrieve the widgets values as a pre-reduction JSON dictionary
TODO: break this method down to smaller chunks for each key of dictionary
:param element: Either the `sample` or `normalization` part of row
:type column: str
:param row: Row index
:type row: int
:return: Dictionary for the pre-reduction JSON formed from element's part of row in table
:rtype: dict
"""
dict_element = copy.deepcopy(_element)
key = self.get_key_from_row(row)
if element == 'sample':
column = SAMPLE_FIRST_COLUMN
else:
column = NORMALIZATION_FIRST_COLUMN
runs = self._get_item_value(row=row, column=column)
dict_element['Runs'] = runs
column += 1
background_runs = self._get_item_value(row=row, column=column)
dict_element["Background"]["Runs"] = background_runs
column += 1
background_background = self._get_item_value(row=row, column=column)
dict_element["Background"]["Background"]["Runs"] = background_background
column += 1
material = self._get_text_value(row=row, column=column)
dict_element["Material"] = material
# mass density
column += 1
mass_density = str(
self.parent.master_table_list_ui[key][element]['mass_density']['text'].text())
dict_element["Density"]["MassDensity"] = mass_density
dict_element["Density"]["UseMassDensity"] = \
self.parent.master_table_list_ui[key][element]['mass_density_infos']['mass_density']['selected']
dict_element["Density"]["NumberDensity"] = \
self.parent.master_table_list_ui[key][element]['mass_density_infos']['number_density']['value']
dict_element["Density"]["UseNumberDensity"] = \
self.parent.master_table_list_ui[key][element]['mass_density_infos']['number_density']['selected']
dict_element["Density"]["Mass"] = \
self.parent.master_table_list_ui[key][element]['mass_density_infos']['mass']['value']
dict_element["Density"]["UseMass"] = \
self.parent.master_table_list_ui[key][element]['mass_density_infos']['mass']['selected']
column += 1
packing_fraction = self._get_item_value(row=row, column=column)
if packing_fraction and packing_fraction not in self.__nan_list:
dict_element["PackingFraction"] = float(
packing_fraction.strip("\""))
column += 1
shape = self._get_selected_value(row=row, column=column)
dict_element["Geometry"]["Shape"] = shape
column += 1
radius = str(
self.parent.master_table_list_ui[key][element]['geometry']['radius']['value'].text())
radius2 = 'N/A'
height = 'N/A'
if shape in ['Cylinder', 'Hollow Cylinder']:
height = str(
self.parent.master_table_list_ui[key][element]['geometry']['height']['value'].text())
elif shape == 'Sphere':
pass
if shape == "Hollow Cylinder":
radius2 = str(
self.parent.master_table_list_ui[key][element]['geometry']['radius2']['value'].text())
dict_element["Geometry"]["Radius"] = np.NaN if (
radius in self.__nan_list) else float(radius)
dict_element["Geometry"]["Radius2"] = np.NaN if (
radius2 in self.__nan_list) else float(radius2)
dict_element["Geometry"]["Height"] = np.NaN if (
height in self.__nan_list) else float(height)
column += 1
abs_correction = self._get_selected_value(row=row, column=column)
dict_element["AbsorptionCorrection"]["Type"] = abs_correction
column += 1
multiple_scattering_correction = self._get_selected_value(
row=row, column=column)
dict_element["MultipleScatteringCorrection"]["Type"] = multiple_scattering_correction
column += 1
inelastic_correction = self._get_selected_value(row=row, column=column)
dict_element["InelasticCorrection"]["Type"] = inelastic_correction
# if inelastic_correction.lower() == 'placzek':
placzek_infos = self.parent.master_table_list_ui[key][element]['placzek_infos']
if inelastic_correction not in self.__nan_list:
dict_element["InelasticCorrection"]["Order"] = placzek_infos["order_text"]
dict_element["InelasticCorrection"]["Self"] = placzek_infos["is_self"]
dict_element["InelasticCorrection"]["Interference"] = placzek_infos["is_interference"]
fit_spectrum_text = placzek_infos["fit_spectrum_text"].replace(
".",
"").replace(
" ",
"")
dict_element["InelasticCorrection"]["FitSpectrumWith"] = fit_spectrum_text
dict_element["InelasticCorrection"]["LambdaBinningForFit"] = "{},{},{}".format(
placzek_infos["lambda_fit_min"],
placzek_infos["lambda_fit_delta"],
placzek_infos["lambda_fit_max"])
dict_element["InelasticCorrection"]["LambdaBinningForCalc"] = "{},{},{}".format(
placzek_infos["lambda_calc_min"],
placzek_infos["lambda_calc_delta"],
placzek_infos["lambda_calc_max"])
else:
dict_element.pop("InelasticCorrection")
return dict_element
def _get_key_value_dict(self, row=-1):
"""Get key from row, and return the AlignAndFocusArgs info values
:param row: Row index
:type row: int
:return: Dictionary of the AlignAndFocusArgs info
:rtype: dict
"""
key = self.get_key_from_row(row)
key_value_dict = self.parent.master_table_list_ui[key]['align_and_focus_args_infos']
return key_value_dict
def _check_only_one_density_method_selected(self, dictionary):
"""Check the density section of pre-reduction JSON only has one method selected.
Raises exception if does not, just pass if complies.
:param dictionary: Pre-reduction JSON with preliminary `Density` section
:type row: dict
"""
density = dictionary['Density']
opts = iter([density['UseMassDensity'],
density['UseNumberDensity'], density['UseMass']])
if not math_tools.oneAndOnlyOneTrue(opts):
raise Exception(
"Must use one and only one way to calculated MassDensity")
def _get_mass_density_from_number_density(self, dictionary):
"""Take pre-reduction JSON with `NumberDensity` as selected method
to calculate the mass density
:param dictionary: Pre-reduction JSON with preliminary `Density` section
:type row: dict
:return: mass density
:rtype: float
"""
if 'Material' not in dictionary:
raise Exception(
"Must define chemical formula to use NumberDensity for reduction")
density = dictionary['Density']
number_density = density['NumberDensity']
chemical_formula = dictionary['Material']
mass, natoms = retrieving_molecular_mass_and_number_of_atoms_worked(
chemical_formula)
mass_density = math_tools.number_density2mass_density(
number_density, natoms, mass)
return mass_density
def _get_mass_density_from_mass(self, dictionary):
"""Take pre-reduction JSON with `Mass` as selected method
to calculate the mass density
:param dictionary: Pre-reduction JSON with preliminary `Density` section
:type row: dict
:return: mass density
:rtype: float
"""
if 'Material' not in dictionary:
raise Exception(
"Must define chemical formula to use Mass for reduction")
if 'Geometry' not in dictionary:
raise Exception(
"Must define a geometry to use Mass for reduction")
mass = dictionary['Density']['Mass']
volume = math_tools.get_volume_from_geometry(dictionary['Geometry'])
mass_density = math_tools.mass2mass_density(mass, volume)
return mass_density
def get_key_from_row(self, row):
"""Get key from row
:param row: Row index
:type row: int
:return: Get key for the row
:rtype: str
"""
o_util = Utilities(parent=self.parent)
key = o_util.get_row_key_from_row_index(row=row)
return key
def retrieve_row_info(self, row):
"""Retrieve a single row's information in a pre-reduction JSON format
:param row: Row index
:type row: int
:return: Dictionary for the row in a pre-reduction JSON format
:rtype: dict
"""
activate = self._get_checkbox_state(row=row, column=0)
title = self._get_item_value(row=row, column=1)
_export_dictionary_sample = self._retrieve_element_infos(
element='sample', row=row)
_export_dictionary_normalization = self._retrieve_element_infos(
element='normalization', row=row)
_key_value_dict = self._get_key_value_dict(row=row)
dictionary = {
'Activate': activate,
'Title': title,
'Sample': _export_dictionary_sample,
'Normalization': _export_dictionary_normalization,
'Calibration': {
"Filename": self.calibration},
'Facility': self.facility,
'Instrument': self.instrument,
'CacheDir': self.cachedir,
'OutputDir': self.outputdir,
"Merging": {
"QBinning": [],
"SumBanks": [],
"Characterizations": "",
"Grouping": {
"Initial": self.intermediate_grouping_file,
"Output": self.output_grouping_file,
},
},
'AlignAndFocusArgs': _key_value_dict,
}
return dictionary
def retrieve_row_infos(self):
"""Retrieve all of the rows' information in a pre-reduction JSON format
:param row: Row index
:type row: int
:return: Dictionary for all the rows in the table in a pre-reduction JSON format
:rtype: dict
"""
full_export_dictionary = OrderedDict()
nbr_row = self.table_ui.rowCount()
for row in range(nbr_row):
# force 3 digits index (to make sure loading back the table will be
# done in the same order)
full_export_dictionary["{:03}".format(
row)] = self.retrieve_row_info(row)
return full_export_dictionary
def density_selection_for_reduction(self, dictionary):
"""Processing of the pre-reduction JSON's `Density` to return
a reduction-ready `MassDensity` section in the passed dictionary
:param dictionary: Pre-reduction JSON with preliminary `Density` section
:type row: dict
:return: JSON dictionary with reduction-ready `MassDensity` section
:rtype: dict
"""
# Default value for mass density
mass_density = 1.0
# return if density section not defined
if 'Density' not in dictionary:
dictionary['MassDensity'] = mass_density
return dictionary
# ensure one and only one way is selected for calculating MassDensity
self._check_only_one_density_method_selected(dictionary)
# convert to mass density
density = dictionary['Density']
if density['UseMassDensity']:
mass_density = density['MassDensity']
if density['UseNumberDensity']:
mass_density = self._get_mass_density_from_number_density(
dictionary)
if density['UseMass']:
mass_density = self._get_mass_density_from_mass(dictionary)
# Post-process for output: take out overall Density and add MassDensity
# key
dictionary.pop('Density')
dictionary['MassDensity'] = float(mass_density)
return dictionary
def _remove_keys_from_with_nan_values(
self, dictionary, selected_values=None):
"""Remove keys in a dictionary if the value is NaN
:param dictionary: Dictionary with keys we want to check
:type dictionary: dict
:param selected_values: Dictionary with keys we want to check
:type dictionary: dict
:return: Dictionary with keys removed where value is NaN
:rtype: dict
"""
# Set default to check all keys unless selected_values defined
if selected_values is None:
selected_values = list(dictionary.keys()).copy()
# Warn if selected_values is not a proper subset of the keys in the
# dict
if not set(selected_values).issubset(dictionary.keys()):
err_string = "Found keys that are not part dictionary\n"
err_string += " List with 'erroneous' key: {} \n".format(
",".join(selected_values))
err_string += " Dictionary keys: {} \n".format(
",".join(dictionary.keys()))
raise Exception(err_string)
# Remove keys with NaN values
for key in selected_values:
try:
if np.isnan(dictionary[key]):
dictionary.pop(key)
except TypeError:
pass
return dictionary
def _check_necessary_geometry_keys_exist(self, geometry):
""" Check we have necessary keys for the specified geometry shape
:param geometry: Geometry from ADDIE Table (pre-reduction-ready)
"type geometry: dict
:return: Geometry dictionary that has been checked for necessary keys
:rtype: dict
"""
# Grab shape we need to check against
shape = geometry['Shape']
# Find necessary keys from geometry_handler.table2mantid dict
shape_dict = table2mantid[shape].copy()
necessary_keys = list(shape_dict.keys())
# Make sure all necessary keys exist
for key in necessary_keys:
if key not in geometry:
err_string = "Did not find key {} in geometry {}".format(
key, geometry)
raise Exception(err_string)
def _map_table_to_mantid_geometry(self, geometry):
""" Map from table geometry to mantid geometry using geometry_handler.table2mantid
:param geometry: Geometry from ADDIE Table (pre-reduction-ready and checked)
"type geometry: dict
:return: Reduction-ready geometry dictionary
:rtype: dict
"""
# Grab shape we need to check against
shape = geometry['Shape']
# Get map from geometry_handler.table2mantid dict
shape_dict = table2mantid[shape].copy()
# Construct new geometry dict with mantid keys and do value processing
# for the mapping
new_geometry = dict()
for k, v in geometry.items():
new_key = shape_dict[k]["Key"]
if "ValueProcessor" in shape_dict[k]:
value_processor = shape_dict[k]["ValueProcessor"]
new_value = value_processor(v)
else:
new_value = v
new_geometry[new_key] = new_value
return new_geometry
def geometry_selection_for_reduction(self, dictionary):
"""Processing of the pre-reduction-ready JSON's `Geometry` to return
a reduction-ready `Geometry` section in the passed dictionary
:param dictionary: Pre-reduction-ready JSON with preliminary `Geometry` section
:type row: dict
:return: JSON dictionary with reduction-ready `Geometry` section
:rtype: dict
"""
# Default value for geometry
geometry = {'Shape': 'Cylinder', 'Radius': 1.0}
# return a default geometry if not specified
if 'Geometry' not in dictionary:
dictionary['Geometry'] = geometry
print("No Geometry found, defaul geometry added:", geometry)
return dictionary
# Remove all NaN values from Geometry
dictionary['Geometry'] = self._remove_keys_from_with_nan_values(
dictionary['Geometry'])
# return if no shape in Geometry, will use default in Mantid
if 'Shape' not in dictionary['Geometry']:
return dictionary
# Get geometry and check if we have the necessary geometry keys for the
# shape
geometry = dictionary['Geometry']
self._check_necessary_geometry_keys_exist(geometry)
# Construct new geometry dict based on table to mantid mapping
geometry = self._map_table_to_mantid_geometry(geometry)
dictionary['Geometry'] = geometry
return dictionary
def convert_from_row_to_reduction(self, json_input):
"""Processing of the pre-reduction JSON's `Density` to return
a reduction-ready `MassDensity` section in the passed dictionary
:param dictionary: Pre-reduction JSON with preliminary `Density` section
:type row: dict
:return: JSON dictionary with reduction-ready `MassDensity` section
:rtype: dict
"""
reduction_input = json_input
for element in ["Sample", "Normalization"]:
element_section = reduction_input[element]
element_section = self.density_selection_for_reduction(
element_section)
self.geometry_selection_for_reduction(element_section)
return reduction_input
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/master_table_exporter.py",
"copies": "1",
"size": "24786",
"license": "mit",
"hash": -3520647653553857500,
"line_mean": 37.0153374233,
"line_max": 112,
"alpha_frac": 0.5824255628,
"autogenerated": false,
"ratio": 4.377605086541858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5460030649341858,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import numpy as np
import simdna
from simdna.synthetic import (
RepeatedEmbedder,
SubstringEmbedder,
ReverseComplementWrapper,
UniformPositionGenerator,
InsideCentralBp,
LoadedEncodeMotifs,
PwmSamplerFromLoadedMotifs,
UniformIntegerGenerator,
ZeroOrderBackgroundGenerator,
EmbedInABackground,
GenerateSequenceNTimes,
RandomSubsetOfEmbedders,
IsInTraceLabelGenerator,
EmbeddableEmbedder,
PairEmbeddableGenerator,
)
from simdna.util import DiscreteDistribution
loaded_motifs = LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
def get_distribution(GC_fraction):
return DiscreteDistribution({
'A': (1 - GC_fraction) / 2,
'C': GC_fraction / 2,
'G': GC_fraction / 2,
'T': (1 - GC_fraction) / 2
})
def simple_motif_embedding(motif_name, seq_length, num_seqs, GC_fraction):
"""
Simulates sequences with a motif embedded anywhere in the sequence.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_seqs: int
number of sequences
GC_fraction : float
GC basepair fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
embedding_arr: 1darray
Array of embedding objects.
"""
if motif_name is None:
embedders = []
else:
substring_generator = PwmSamplerFromLoadedMotifs(loaded_motifs, motif_name)
embedders = [
SubstringEmbedder(ReverseComplementWrapper(substring_generator))
]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(
GenerateSequenceNTimes(embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, embedding_arr
def motif_density(motif_name,
seq_length,
num_seqs,
min_counts,
max_counts,
GC_fraction,
central_bp=None):
"""
Returns sequences with motif density, along with embeddings array.
"""
substring_generator = PwmSamplerFromLoadedMotifs(loaded_motifs, motif_name)
if central_bp is not None:
position_generator = InsideCentralBp(central_bp)
else:
position_generator = UniformPositionGenerator()
quantity_generator = UniformIntegerGenerator(min_counts, max_counts)
embedders = [
RepeatedEmbedder(
SubstringEmbedder(
ReverseComplementWrapper(substring_generator),
position_generator), quantity_generator)
]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(
GenerateSequenceNTimes(embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, embedding_arr
def simulate_single_motif_detection(motif_name, seq_length, num_pos, num_neg,
GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequence with a motif
embedded anywhere in the sequence
- Negative class sequence without the motif
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: 1darray
Array of embedding objects.
"""
motif_sequence_arr, positive_embedding_arr = simple_motif_embedding(
motif_name, seq_length, num_pos, GC_fraction)
random_sequence_arr, negative_embedding_arr = simple_motif_embedding(
None, seq_length, num_neg, GC_fraction)
sequence_arr = np.concatenate((motif_sequence_arr, random_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_counting(motif_name, seq_length, pos_counts, neg_counts,
num_pos, num_neg, GC_fraction):
"""
Generates data for motif counting task.
Parameters
----------
motif_name : str
seq_length : int
pos_counts : list
(min_counts, max_counts) for positive set.
neg_counts : list
(min_counts, max_counts) for negative set.
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_count_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos, pos_counts[0], pos_counts[1],
GC_fraction)
neg_count_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_pos, neg_counts[0], neg_counts[1],
GC_fraction)
sequence_arr = np.concatenate((pos_count_sequence_array,
neg_count_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_density_localization(motif_name, seq_length, center_size,
min_motif_counts, max_motif_counts,
num_pos, num_neg, GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequences with multiple motif instances
in center of the sequence.
- Negative class sequences with multiple motif instances
anywhere in the sequence.
The number of motif instances is uniformly sampled
between minimum and maximum motif counts.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
center_size : int
length of central part of the sequence where motifs can be positioned
min_motif_counts : int
minimum number of motif instances
max_motif_counts : int
maximum number of motif instances
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
localized_density_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos, min_motif_counts, max_motif_counts,
GC_fraction, center_size)
unlocalized_density_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_neg, min_motif_counts, max_motif_counts,
GC_fraction)
sequence_arr = np.concatenate((localized_density_sequence_array,
unlocalized_density_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_multi_motif_embedding(motif_names, seq_length, min_num_motifs,
max_num_motifs, num_seqs, GC_fraction):
"""
Generates data for multi motif recognition task.
Parameters
----------
motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_seqs : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : ndarray
Contains labels for each motif.
embedding_arr: 1darray
Array of embedding objects.
"""
def get_embedder(motif_name):
substring_generator = PwmSamplerFromLoadedMotifs(loaded_motifs, motif_name)
return SubstringEmbedder(
ReverseComplementWrapper(substring_generator), name=motif_name)
embedders = [get_embedder(motif_name) for motif_name in motif_names]
quantity_generator = UniformIntegerGenerator(min_num_motifs, max_num_motifs)
combined_embedder = [RandomSubsetOfEmbedders(quantity_generator, embedders)]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
combined_embedder)
generated_sequences = tuple(
GenerateSequenceNTimes(embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
label_generator = IsInTraceLabelGenerator(np.asarray(motif_names))
y = np.array(
[
label_generator.generateLabels(generated_seq)
for generated_seq in generated_sequences
],
dtype=bool)
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, y, embedding_arr
def simulate_differential_accessibility(
pos_motif_names, neg_motif_names, seq_length, min_num_motifs,
max_num_motifs, num_pos, num_neg, GC_fraction):
"""
Generates data for differential accessibility task.
Parameters
----------
pos_motif_names : list
List of strings.
neg_motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_motif_sequence_arr, _, positive_embedding_arr = simulate_multi_motif_embedding(
pos_motif_names, seq_length, min_num_motifs, max_num_motifs, num_pos,
GC_fraction)
neg_motif_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
neg_motif_names, seq_length, min_num_motifs, max_num_motifs, num_neg,
GC_fraction)
sequence_arr = np.concatenate((pos_motif_sequence_arr,
neg_motif_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_heterodimer_grammar(motif1, motif2, seq_length, min_spacing,
max_spacing, num_pos, num_neg, GC_fraction):
"""
Simulates two classes of sequences with motif1 and motif2:
- Positive class sequences with motif1 and motif2 positioned
min_spacing and max_spacing
- Negative class sequences with independent motif1 and motif2 positioned
anywhere in the sequence, not as a heterodimer grammar
Parameters
----------
seq_length : int, length of sequence
GC_fraction : float, GC fraction in background sequence
num_pos : int, number of positive class sequences
num_neg : int, number of negatice class sequences
motif1 : str, encode motif name
motif2 : str, encode motif name
min_spacing : int, minimum inter motif spacing
max_spacing : int, maximum inter motif spacing
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: list
List of embedding objects.
"""
motif1_generator = ReverseComplementWrapper(
PwmSamplerFromLoadedMotifs(loaded_motifs, motif1))
motif2_generator = ReverseComplementWrapper(
PwmSamplerFromLoadedMotifs(loaded_motifs, motif2))
separation_generator = UniformIntegerGenerator(min_spacing, max_spacing)
embedder = EmbeddableEmbedder(
PairEmbeddableGenerator(motif1_generator, motif2_generator,
separation_generator))
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
[embedder])
generated_sequences = tuple(
GenerateSequenceNTimes(embed_in_background, num_pos).generateSequences())
grammar_sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
positive_embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
nongrammar_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
[motif1, motif2], seq_length, 2, 2, num_neg, GC_fraction)
sequence_arr = np.concatenate((grammar_sequence_arr, nongrammar_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
| {
"repo_name": "Agent007/deepchem",
"path": "deepchem/molnet/dnasim.py",
"copies": "1",
"size": "13518",
"license": "mit",
"hash": -5705314250062408000,
"line_mean": 33.0503778338,
"line_max": 86,
"alpha_frac": 0.6755437195,
"autogenerated": false,
"ratio": 3.86559908492994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008143229779472853,
"num_lines": 397
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import numpy as np
import simdna
from simdna.synthetic import (
RepeatedEmbedder, SubstringEmbedder, ReverseComplementWrapper,
UniformPositionGenerator, InsideCentralBp,
LoadedEncodeMotifs, PwmSamplerFromLoadedMotifs,
UniformIntegerGenerator, ZeroOrderBackgroundGenerator,
EmbedInABackground, GenerateSequenceNTimes,
RandomSubsetOfEmbedders, IsInTraceLabelGenerator,
EmbeddableEmbedder, PairEmbeddableGenerator,
)
from simdna.util import DiscreteDistribution
loaded_motifs = LoadedEncodeMotifs(simdna.ENCODE_MOTIFS_PATH,
pseudocountProb=0.001)
def get_distribution(GC_fraction):
return DiscreteDistribution({
'A': (1 - GC_fraction) / 2, 'C': GC_fraction / 2,
'G': GC_fraction / 2, 'T': (1 - GC_fraction) / 2})
def simple_motif_embedding(motif_name, seq_length, num_seqs, GC_fraction):
"""
Simulates sequences with a motif embedded anywhere in the sequence.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_seqs: int
number of sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
embedding_arr: 1darray
Array of embedding objects.
"""
if motif_name is None:
embedders = []
else:
substring_generator = PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
embedders = [SubstringEmbedder(
ReverseComplementWrapper(substring_generator))]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
return sequence_arr, embedding_arr
def motif_density(motif_name, seq_length, num_seqs,
min_counts, max_counts, GC_fraction,
central_bp=None):
"""
returns sequences with motif density, along with embeddings array.
"""
substring_generator = PwmSamplerFromLoadedMotifs(loaded_motifs, motif_name)
if central_bp is not None:
position_generator = InsideCentralBp(central_bp)
else:
position_generator = UniformPositionGenerator()
quantity_generator = UniformIntegerGenerator(min_counts, max_counts)
embedders = [
RepeatedEmbedder(
SubstringEmbedder(
ReverseComplementWrapper(
substring_generator), position_generator),
quantity_generator)]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
return sequence_arr, embedding_arr
def simulate_single_motif_detection(motif_name, seq_length,
num_pos, num_neg, GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequence with a motif
embedded anywhere in the sequence
- Negative class sequence without the motif
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: 1darray
Array of embedding objects.
"""
motif_sequence_arr, positive_embedding_arr = simple_motif_embedding(
motif_name, seq_length, num_pos, GC_fraction)
random_sequence_arr, negative_embedding_arr = simple_motif_embedding(
None, seq_length, num_neg, GC_fraction)
sequence_arr = np.concatenate((motif_sequence_arr, random_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_counting(motif_name, seq_length, pos_counts, neg_counts,
num_pos, num_neg, GC_fraction):
"""
Generates data for motif counting task.
Parameters
----------
motif_name : str
seq_length : int
pos_counts : list
(min_counts, max_counts) for positive set.
neg_counts : list
(min_counts, max_counts) for negative set.
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_count_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos,
pos_counts[0], pos_counts[1], GC_fraction)
neg_count_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_pos,
neg_counts[0], neg_counts[1], GC_fraction)
sequence_arr = np.concatenate(
(pos_count_sequence_array, neg_count_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_density_localization(
motif_name, seq_length, center_size, min_motif_counts,
max_motif_counts, num_pos, num_neg, GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequences with multiple motif instances
in center of the sequence.
- Negative class sequences with multiple motif instances
anywhere in the sequence.
The number of motif instances is uniformly sampled
between minimum and maximum motif counts.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
center_size : int
length of central part of the sequence where motifs can be positioned
min_motif_counts : int
minimum number of motif instances
max_motif_counts : int
maximum number of motif instances
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
localized_density_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos,
min_motif_counts, max_motif_counts, GC_fraction, center_size)
unlocalized_density_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_neg,
min_motif_counts, max_motif_counts, GC_fraction)
sequence_arr = np.concatenate(
(localized_density_sequence_array, unlocalized_density_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_multi_motif_embedding(motif_names, seq_length, min_num_motifs,
max_num_motifs, num_seqs, GC_fraction):
"""
Generates data for multi motif recognition task.
Parameters
----------
motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_seqs : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : ndarray
Contains labels for each motif.
embedding_arr: 1darray
Array of embedding objects.
"""
def get_embedder(motif_name):
substring_generator = PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
return SubstringEmbedder(
ReverseComplementWrapper(substring_generator),
name=motif_name)
embedders = [get_embedder(motif_name) for motif_name in motif_names]
quantity_generator = UniformIntegerGenerator(
min_num_motifs, max_num_motifs)
combined_embedder = [RandomSubsetOfEmbedders(
quantity_generator, embedders)]
embed_in_background = EmbedInABackground(
ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
combined_embedder)
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_seqs).generateSequences())
sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
label_generator = IsInTraceLabelGenerator(np.asarray(motif_names))
y = np.array([label_generator.generateLabels(generated_seq)
for generated_seq in generated_sequences], dtype=bool)
embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
return sequence_arr, y, embedding_arr
def simulate_differential_accessibility(
pos_motif_names, neg_motif_names, seq_length,
min_num_motifs, max_num_motifs, num_pos, num_neg, GC_fraction):
"""
Generates data for differential accessibility task.
Parameters
----------
pos_motif_names : list
List of strings.
neg_motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_motif_sequence_arr, _, positive_embedding_arr = simulate_multi_motif_embedding(
pos_motif_names, seq_length,
min_num_motifs, max_num_motifs, num_pos, GC_fraction)
neg_motif_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
neg_motif_names, seq_length,
min_num_motifs, max_num_motifs, num_neg, GC_fraction)
sequence_arr = np.concatenate(
(pos_motif_sequence_arr, neg_motif_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_heterodimer_grammar(
motif1, motif2, seq_length,
min_spacing, max_spacing, num_pos, num_neg, GC_fraction):
"""
Simulates two classes of sequences with motif1 and motif2:
- Positive class sequences with motif1 and motif2 positioned
min_spacing and max_spacing
- Negative class sequences with independent motif1 and motif2 positioned
anywhere in the sequence, not as a heterodimer grammar
Parameters
----------
seq_length : int, length of sequence
GC_fraction : float, GC fraction in background sequence
num_pos : int, number of positive class sequences
num_neg : int, number of negatice class sequences
motif1 : str, encode motif name
motif2 : str, encode motif name
min_spacing : int, minimum inter motif spacing
max_spacing : int, maximum inter motif spacing
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: list
List of embedding objects.
"""
motif1_generator = ReverseComplementWrapper(PwmSamplerFromLoadedMotifs(loaded_motifs, motif1))
motif2_generator = ReverseComplementWrapper(PwmSamplerFromLoadedMotifs(loaded_motifs, motif2))
separation_generator = UniformIntegerGenerator(min_spacing, max_spacing)
embedder = EmbeddableEmbedder(PairEmbeddableGenerator(
motif1_generator, motif2_generator, separation_generator))
embed_in_background = EmbedInABackground(ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)), [embedder])
generated_sequences = tuple(GenerateSequenceNTimes(
embed_in_background, num_pos).generateSequences())
grammar_sequence_arr = np.array([generated_seq.seq for generated_seq in generated_sequences])
positive_embedding_arr = [generated_seq.embeddings for generated_seq in generated_sequences]
nongrammar_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
[motif1, motif2], seq_length, 2, 2, num_neg, GC_fraction)
sequence_arr = np.concatenate(
(grammar_sequence_arr, nongrammar_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
| {
"repo_name": "lilleswing/deepchem",
"path": "contrib/dragonn/simulations.py",
"copies": "6",
"size": "13615",
"license": "mit",
"hash": 1127913743723948900,
"line_mean": 36.0980926431,
"line_max": 98,
"alpha_frac": 0.6701432244,
"autogenerated": false,
"ratio": 3.933834151979197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7603977376379196,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import numpy as np
def get_distribution(GC_fraction):
from simdna.util import DiscreteDistribution
return DiscreteDistribution({
'A': (1 - GC_fraction) / 2,
'C': GC_fraction / 2,
'G': GC_fraction / 2,
'T': (1 - GC_fraction) / 2
})
def simple_motif_embedding(motif_name, seq_length, num_seqs, GC_fraction):
"""
Simulates sequences with a motif embedded anywhere in the sequence.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_seqs: int
number of sequences
GC_fraction : float
GC basepair fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
embedding_arr: 1darray
Array of embedding objects.
"""
import simdna
from simdna import synthetic
if motif_name is None:
embedders = []
else:
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
embedders = [
synthetic.SubstringEmbedder(
synthetic.ReverseComplementWrapper(substring_generator))
]
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, embedding_arr
def motif_density(motif_name,
seq_length,
num_seqs,
min_counts,
max_counts,
GC_fraction,
central_bp=None):
"""
Returns sequences with motif density, along with embeddings array.
"""
import simdna
from simdna import synthetic
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
if central_bp is not None:
position_generator = synthetic.InsideCentralBp(central_bp)
else:
position_generator = synthetic.UniformPositionGenerator()
quantity_generator = synthetic.UniformIntegerGenerator(min_counts, max_counts)
embedders = [
synthetic.RepeatedEmbedder(
synthetic.SubstringEmbedder(
synthetic.ReverseComplementWrapper(substring_generator),
position_generator), quantity_generator)
]
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, embedding_arr
def simulate_single_motif_detection(motif_name, seq_length, num_pos, num_neg,
GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequence with a motif
embedded anywhere in the sequence
- Negative class sequence without the motif
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: 1darray
Array of embedding objects.
"""
motif_sequence_arr, positive_embedding_arr = simple_motif_embedding(
motif_name, seq_length, num_pos, GC_fraction)
random_sequence_arr, negative_embedding_arr = simple_motif_embedding(
None, seq_length, num_neg, GC_fraction)
sequence_arr = np.concatenate((motif_sequence_arr, random_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_counting(motif_name, seq_length, pos_counts, neg_counts,
num_pos, num_neg, GC_fraction):
"""
Generates data for motif counting task.
Parameters
----------
motif_name : str
seq_length : int
pos_counts : list
(min_counts, max_counts) for positive set.
neg_counts : list
(min_counts, max_counts) for negative set.
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_count_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos, pos_counts[0], pos_counts[1],
GC_fraction)
neg_count_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_pos, neg_counts[0], neg_counts[1],
GC_fraction)
sequence_arr = np.concatenate((pos_count_sequence_array,
neg_count_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_density_localization(motif_name, seq_length, center_size,
min_motif_counts, max_motif_counts,
num_pos, num_neg, GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequences with multiple motif instances
in center of the sequence.
- Negative class sequences with multiple motif instances
anywhere in the sequence.
The number of motif instances is uniformly sampled
between minimum and maximum motif counts.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
center_size : int
length of central part of the sequence where motifs can be positioned
min_motif_counts : int
minimum number of motif instances
max_motif_counts : int
maximum number of motif instances
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
localized_density_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos, min_motif_counts, max_motif_counts,
GC_fraction, center_size)
unlocalized_density_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_neg, min_motif_counts, max_motif_counts,
GC_fraction)
sequence_arr = np.concatenate((localized_density_sequence_array,
unlocalized_density_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_multi_motif_embedding(motif_names, seq_length, min_num_motifs,
max_num_motifs, num_seqs, GC_fraction):
"""
Generates data for multi motif recognition task.
Parameters
----------
motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_seqs : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : ndarray
Contains labels for each motif.
embedding_arr: 1darray
Array of embedding objects.
"""
import simdna
from simdna import synthetic
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
def get_embedder(motif_name):
substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
return synthetic.SubstringEmbedder(
synthetic.ReverseComplementWrapper(substring_generator),
name=motif_name)
embedders = [get_embedder(motif_name) for motif_name in motif_names]
quantity_generator = synthetic.UniformIntegerGenerator(
min_num_motifs, max_num_motifs)
combined_embedder = [
synthetic.RandomSubsetOfEmbedders(quantity_generator, embedders)
]
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
combined_embedder)
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
label_generator = synthetic.IsInTraceLabelGenerator(np.asarray(motif_names))
y = np.array(
[
label_generator.generateLabels(generated_seq)
for generated_seq in generated_sequences
],
dtype=bool)
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, y, embedding_arr
def simulate_differential_accessibility(
pos_motif_names, neg_motif_names, seq_length, min_num_motifs,
max_num_motifs, num_pos, num_neg, GC_fraction):
"""
Generates data for differential accessibility task.
Parameters
----------
pos_motif_names : list
List of strings.
neg_motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_motif_sequence_arr, _, positive_embedding_arr = simulate_multi_motif_embedding(
pos_motif_names, seq_length, min_num_motifs, max_num_motifs, num_pos,
GC_fraction)
neg_motif_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
neg_motif_names, seq_length, min_num_motifs, max_num_motifs, num_neg,
GC_fraction)
sequence_arr = np.concatenate((pos_motif_sequence_arr,
neg_motif_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_heterodimer_grammar(motif1, motif2, seq_length, min_spacing,
max_spacing, num_pos, num_neg, GC_fraction):
"""
Simulates two classes of sequences with motif1 and motif2:
- Positive class sequences with motif1 and motif2 positioned
min_spacing and max_spacing
- Negative class sequences with independent motif1 and motif2 positioned
anywhere in the sequence, not as a heterodimer grammar
Parameters
----------
seq_length : int, length of sequence
GC_fraction : float, GC fraction in background sequence
num_pos : int, number of positive class sequences
num_neg : int, number of negatice class sequences
motif1 : str, encode motif name
motif2 : str, encode motif name
min_spacing : int, minimum inter motif spacing
max_spacing : int, maximum inter motif spacing
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: list
List of embedding objects.
"""
import simdna
from simdna import synthetic
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
motif1_generator = synthetic.ReverseComplementWrapper(
synthetic.PwmSamplerFromLoadedMotifs(loaded_motifs, motif1))
motif2_generator = synthetic.ReverseComplementWrapper(
synthetic.PwmSamplerFromLoadedMotifs(loaded_motifs, motif2))
separation_generator = synthetic.UniformIntegerGenerator(
min_spacing, max_spacing)
embedder = synthetic.EmbeddableEmbedder(
synthetic.PairEmbeddableGenerator(motif1_generator, motif2_generator,
separation_generator))
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
[embedder])
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_pos).generateSequences())
grammar_sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
positive_embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
nongrammar_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
[motif1, motif2], seq_length, 2, 2, num_neg, GC_fraction)
sequence_arr = np.concatenate((grammar_sequence_arr, nongrammar_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
| {
"repo_name": "ktaneishi/deepchem",
"path": "deepchem/molnet/dnasim.py",
"copies": "1",
"size": "14130",
"license": "mit",
"hash": 4402554632414927400,
"line_mean": 33.8888888889,
"line_max": 86,
"alpha_frac": 0.6715498938,
"autogenerated": false,
"ratio": 3.8733552631578947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.999951883470967,
"avg_score": 0.009077264449645068,
"num_lines": 405
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import re
import gzip
import os
import json
from simdna import random
import numpy as np
DEFAULT_LETTER_TO_INDEX = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
DEFAULT_BACKGROUND_FREQ = OrderedDict(
[('A', 0.3), ('C', 0.2), ('G', 0.2), ('T', 0.3)])
DEFAULT_DINUC_FREQ = OrderedDict([
('AA',0.095),
('AC',0.050),
('AG',0.071),
('AT',0.075),
('CA',0.073),
('CC',0.054),
('CG',0.010),
('CT',0.072),
('GA',0.060),
('GC',0.044),
('GG',0.054),
('GT',0.050),
('TA',0.064),
('TC',0.060),
('TG',0.073),
('TT',0.095),
])
class DiscreteDistribution(object):
def __init__(self, valToFreq):
"""
valToFreq: dict where the keys are the possible things to sample, and the values are their frequencies
"""
self.valToFreq = valToFreq
self.keysOrder = sorted(valToFreq.keys()) #sort the keys for determinism
self.freqArr = [valToFreq[key] for key in self.keysOrder] # array representing only the probabilities
assert abs(sum(self.freqArr)-1.0) < 10**-5
# map from index in freqArr to the corresponding value it represents
self.indexToVal = dict((x[0], x[1]) for x in enumerate(self.keysOrder))
def sample(self):
"""Sample from the distribution.
"""
return self.indexToVal[sampleFromProbsArr(self.freqArr)]
DEFAULT_BASE_DISCRETE_DISTRIBUTION = DiscreteDistribution(
DEFAULT_BACKGROUND_FREQ)
def get_file_handle(filename, mode="r"):
"""
Retrieve an open file handle
WARNING: must close file handle returned from this function
:param filename: str, path to file
:param mode: char, 'r'=read; 'w'=write, etc according `open`
:return: open file handle
"""
if (re.search('.gz$',filename) or re.search('.gzip',filename)):
if (mode=="r"):
mode="rb";
elif (mode=="w"):
# I think write will actually append if the file already
# exists...so you want to remove it if it exists
if os.path.isfile(filename):
os.remove(filename)
return gzip.open(filename,mode)
else:
return open(filename,mode)
def default_tab_seppd(s):
s = trim_newline(s)
s = s.split("\t")
return s
def trim_newline(s):
return s.rstrip('\r\n')
def perform_action_on_each_line_of_file(
file_handle, action, transformation=default_tab_seppd, ignore_input_title=False):
"""
Read file and perform action on each line
:param file_handle: file, file handle
:param action: function handle, what to do with line
:param transformation: function handle, manipulate line before action
:param ignore_input_title: bool, skip index 0
:return:
"""
i = 0
for line in file_handle:
i += 1
if hasattr(line, "decode"):
line = line.decode("utf-8")
process_line(line, i, ignore_input_title,
transformation, action)
file_handle.close()
def process_line(line, i, ignore_input_title,
transformation, action):
"""
Line by line file processor; used in motif loading
and simdata loading functions
:param line: str, line from file
:param i: int, line index
:param ignore_input_title: bool, skip index 0
:param transformation: function handle, manipulate line before action
:param action: function handle, what to do with line
:return:
"""
if i > 1 or (ignore_input_title is False):
action(transformation(line), i)
class VariableWrapper():
""" For when I want reference-type access to an immutable"""
def __init__(self, var):
self.var = var
def enum(**enums):
"""
Constructs an enum object around a set of kwargs (all of the same length)
:param enums: dict of iterables of the same length
:return: enum version of kwargs
"""
class Enum(object):
pass
to_return = Enum
for key,val in enums.items():
if hasattr(val, '__call__'):
setattr(to_return, key, staticmethod(val))
else:
setattr(to_return, key, val)
to_return.vals = [x for x in enums.values()]
to_return.the_dict = enums
return to_return
def combine_enums(*enums):
new_enum_dict = OrderedDict()
for an_enum in enums:
new_enum_dict.update(an_enum.the_dict)
return enum(**new_enum_dict)
def sampleFromProbsArr(arrWithProbs):
"""Samples from a discrete distribution.
Arguments:
arrWithProbs: array of probabilities
Returns:
an index, sampled with the probability of that index in
array of probabilities.
"""
arrWithProbs = np.array(arrWithProbs)
return random.choice(len(arrWithProbs), p=arrWithProbs/arrWithProbs.sum())
reverseComplementLookup = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G',
'a': 't', 't': 'a', 'g': 'c', 'c': 'g', 'N': 'N', 'n': 'n'}
def reverseComplement(sequence):
"""
Get the reverse complement of a sequence by flipping
the pairs of nucleotides and reversing the string
:param sequence: str, sequence of elements in reverseComplementLookup
:return: str, reversed complement
"""
reversedSequence = sequence[::-1]
reverseComplemented = "".join(
[reverseComplementLookup[x] for x in reversedSequence])
return reverseComplemented
def sampleWithoutReplacement(arr, numToSample):
arrayCopy = [x for x in arr]
for i in range(numToSample):
randomIndex = int(random.random() * (len(arrayCopy) - i)) + i
swapIndices(arrayCopy, i, randomIndex)
return arrayCopy[0:numToSample]
def swapIndices(arr, idx1, idx2):
temp = arr[idx1]
arr[idx1] = arr[idx2]
arr[idx2] = temp
def get_file_name_parts(file_name):
"""
Extract filename components with regex
:param file_name: a unix file path
:return:
"""
p = re.compile(r"^(.*/)?([^\./]+)(\.[^/]*)?$")
m = p.search(file_name)
return FileNameParts(m.group(1), m.group(2), m.group(3))
class FileNameParts(object):
"""
Warning: this will break for non-unix systems;
wrapper on filename for manipulating file names
"""
def __init__(self, directory, core_file_name, extension):
self.directory = directory if (directory is not None) else os.getcwd()
self.core_file_name = core_file_name
self.extension = extension
def get_full_path(self):
return self.directory+"/"+self.file_name
def get_core_file_name_and_extension(self):
return self.core_file_name+self.extension
def get_transformed_core_file_name(self, transformation, extension=None):
to_return = transformation(self.core_file_name)
if (extension is not None):
to_return = to_return + extension
else:
if (self.extension is not None):
to_return = to_return + self.extension
return to_return
def get_transformed_file_path(self, transformation, extension=None):
return (self.directory+"/"+
self.get_transformed_core_file_name(transformation,
extension=extension))
def format_as_json(jsonable_data):
return json.dumps(jsonable_data, indent=4, separators=(',', ': '))
class ArgumentToAdd(object):
"""
Class to append runtime arguments to a string
to facilitate auto-generation of output file names.
"""
def __init__(self, val, argumentName=None, argNameAndValSep="-"):
self.val = val;
self.argumentName = argumentName;
self.argNameAndValSep = argNameAndValSep;
def argNamePrefix(self):
return ("" if self.argumentName is None else self.argumentName+str(self.argNameAndValSep))
def transform(self):
string = (','.join([str(el) for el in self.val])\
if (isinstance(self.val, str)==False and
hasattr(self.val,"__len__")) else str(self.val))
return self.argNamePrefix()+string;
# return self.argNamePrefix()+str(self.val).replace(".","p");
class FloatArgument(ArgumentToAdd):
"""
Replace the period with a p
"""
def __init__(self, val, argumentName=None, argNameAndValSep="-"):
self.val = val;
self.argumentName = argumentName;
self.argNameAndValSep = argNameAndValSep;
def argNamePrefix(self):
return ("" if self.argumentName is None else self.argumentName+str(self.argNameAndValSep))
def transform(self):
string = str(self.val)
string = string.replace(".","p")
return self.argNamePrefix()+string
class BooleanArgument(ArgumentToAdd):
def transform(self):
assert self.val # should be True if you're calling transformation
return self.argumentName
class CoreFileNameArgument(ArgumentToAdd):
def transform(self):
import fileProcessing as fp
return self.argNamePrefix() + fp.getCoreFileName(self.val)
class ArrArgument(ArgumentToAdd):
def __init__(self, val, argumentName, sep="+", toStringFunc=str):
super(ArrArgument, self).__init__(val, argumentName)
self.sep = sep
self.toStringFunc = toStringFunc
def transform(self):
return self.argNamePrefix() + self.sep.join([self.toStringFunc(x) for x in self.val])
class ArrOfFileNamesArgument(ArrArgument):
def __init__(self, val, argumentName, sep="+"):
import fileProcessing as fp
super(ArrOfFileNamesArgument, self).__init__(val, argumentName,
sep, toStringFunc=lambda x: fp.getCoreFileName(x))
def addArguments(string, args, joiner="_"):
"""
args is an array of ArgumentToAdd.
"""
for arg in args:
string = string + ("" if arg.val is None or arg.val is False or (hasattr(
arg.val, "__len__") and len(arg.val) == 0) else joiner + arg.transform())
return string
| {
"repo_name": "kundajelab/simdna",
"path": "simdna/simdnautil/util.py",
"copies": "1",
"size": "10029",
"license": "mit",
"hash": 7032133110976493000,
"line_mean": 29.5762195122,
"line_max": 114,
"alpha_frac": 0.6229933194,
"autogenerated": false,
"ratio": 3.6898454746136866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48128387940136863,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import numpy as np
import tensorflow as tf
class ConvNet(object):
"""Basic implementation of ConvNet class compatible with tfutils.
"""
def __init__(
self,
seed=None,
**kwargs):
self.seed = seed
self.output = None
self._params = OrderedDict()
self._layers = OrderedDict()
@property
def params(self):
return self._params
@property
def layers(self):
return self._layers
@params.setter
def params(self, value):
name = tf.get_variable_scope().name
if name not in self._params:
self._params[name] = OrderedDict()
self._params[name][value['type']] = value
@property
def graph(self):
return tf.get_default_graph().as_graph_def()
def initializer(
self, kind='xavier',
stddev=0.01):
if kind == 'xavier':
init = tf.contrib.layers.xavier_initializer(seed=self.seed)
elif kind == 'trunc_norm':
init = tf.truncated_normal_initializer(
mean=0, stddev=stddev,
seed=self.seed)
elif kind == 'variance_scaling_initializer':
init = tf.contrib.layers.variance_scaling_initializer(
seed=self.seed)
else:
raise ValueError('Please provide an appropriate initialization '
'method: xavier or trunc_norm')
return init
@tf.contrib.framework.add_arg_scope
def batchnorm(
self, is_training,
in_layer=None, decay=0.997,
epsilon=1e-5):
if not in_layer:
in_layer = self.output
self.output = tf.layers.batch_normalization(
inputs=in_layer, axis=-1,
momentum=decay, epsilon=epsilon,
center=True, scale=True,
training=is_training, fused=True,
)
return self.output
@tf.contrib.framework.add_arg_scope
def conv(self,
out_shape,
ksize=3,
stride=1,
padding='SAME',
init='xavier',
stddev=.01,
bias=0,
activation='relu',
train=True,
add_bn=False,
weight_decay=None,
in_layer=None,
layer='conv',
):
# Set parameters
if in_layer is None:
in_layer = self.output
if not weight_decay:
weight_decay = 0.
# Get conv kernel shape
in_shape = in_layer.get_shape().as_list()[-1]
conv2d_strides = [1, stride, stride, 1]
if isinstance(ksize, int):
ksize1 = ksize
ksize2 = ksize
else:
ksize1, ksize2 = ksize
conv_k_shape = [ksize1, ksize2, in_shape, out_shape]
# Define variable
bias_init = tf.constant_initializer(bias)
with tf.variable_scope(layer, reuse=tf.AUTO_REUSE):
kernel = tf.get_variable(
initializer=self.initializer(init, stddev=stddev),
shape=conv_k_shape,
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
biases = tf.get_variable(
initializer=bias_init,
shape=[out_shape],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
# Do the actual computation
conv = tf.nn.conv2d(
in_layer, kernel,
strides=conv2d_strides,
padding=padding)
self.output = tf.nn.bias_add(
conv, biases,
name='conv')
# Whether adding batch normalization
if add_bn:
with tf.variable_scope(layer, reuse=tf.AUTO_REUSE):
self.output = self.batchnorm(train)
# Add activation
if activation:
self.output = self.activation(kind=activation)
# Set parameters (this dict will be appended to the final params)
self.params = {'input': in_layer.name,
'type': 'conv',
'num_filters': out_shape,
'stride': stride,
'kernel_size': (ksize1, ksize2),
'padding': padding,
'init': init,
'stddev': stddev,
'bias': bias,
'activation': activation,
'weight_decay': weight_decay,
'seed': self.seed}
self._layers[layer] = self.output
return self.output
@tf.contrib.framework.add_arg_scope
def fc(self,
out_shape,
init='xavier',
stddev=.01,
bias=1,
activation='relu',
dropout=.5,
in_layer=None,
weight_decay=None,
layer='fc',
):
# Set parameters
if weight_decay is None:
weight_decay = 0.
if in_layer is None:
in_layer = self.output
resh = tf.layers.Flatten()(in_layer) # keep the batch size dim
in_shape = resh.get_shape().as_list()[-1]
# Define variable
bias_init = tf.constant_initializer(bias)
with tf.variable_scope(layer, reuse=tf.AUTO_REUSE):
kernel = tf.get_variable(
initializer=self.initializer(init, stddev=stddev),
shape=[in_shape, out_shape],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='weights')
biases = tf.get_variable(
initializer=bias_init,
shape=[out_shape],
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
name='bias')
# Do the actual computation
fcm = tf.matmul(resh, kernel)
self.output = tf.nn.bias_add(fcm, biases, name='fc')
# Add activation or dropout
if activation is not None:
self.activation(kind=activation)
if dropout is not None:
self.dropout(dropout=dropout)
self.params = {'input': in_layer.name,
'type': 'fc',
'num_filters': out_shape,
'init': init,
'bias': bias,
'stddev': stddev,
'activation': activation,
'dropout': dropout,
'weight_decay': weight_decay,
'seed': self.seed}
self._layers[layer] = self.output
return self.output
@tf.contrib.framework.add_arg_scope
def lrn(
self,
depth_radius=2,
bias=1,
alpha=0.0001,
beta=.75,
in_layer=None,
layer='lrn'):
if in_layer is None:
in_layer = self.output
with tf.variable_scope(layer, reuse=tf.AUTO_REUSE):
self.output = tf.nn.lrn(
in_layer,
depth_radius=np.float(depth_radius),
bias=np.float(bias),
alpha=alpha,
beta=beta,
name='norm')
self.params = {'input': in_layer.name,
'type': 'lrnorm',
'depth_radius': depth_radius,
'bias': bias,
'alpha': alpha,
'beta': beta}
self._layers[layer] = self.output
return self.output
@tf.contrib.framework.add_arg_scope
def pool(self,
ksize=3,
stride=2,
padding='SAME',
pool_type='maxpool',
layer='pool',
in_layer=None):
# Set parameters
if in_layer is None:
in_layer = self.output
if isinstance(ksize, int):
ksize1 = ksize
ksize2 = ksize
else:
ksize1, ksize2 = ksize
if isinstance(stride, int):
stride1 = stride
stride2 = stride
else:
stride1, stride2 = stride
ksizes = [1, ksize1, ksize2, 1]
strides = [1, stride1, stride2, 1]
# Do the pooling
if pool_type=='maxpool':
pool_func = tf.nn.max_pool
else:
pool_func = tf.nn.avg_pool
self.output = pool_func(
in_layer,
ksize=ksizes,
strides=strides,
padding=padding,
name=layer,
)
# Set params, return the value
self.params = {
'input':in_layer.name,
'type':pool_type,
'kernel_size': (ksize1, ksize2),
'stride': stride,
'padding': padding}
self._layers[layer] = self.output
return self.output
def activation(self, kind='relu', in_layer=None):
if in_layer is None:
in_layer = self.output
if kind == 'relu':
out = tf.nn.relu(in_layer, name='relu')
else:
raise ValueError("Activation '{}' not defined".format(kind))
self.output = out
return out
def dropout(self, dropout=.5, in_layer=None, **kwargs):
if in_layer is None:
in_layer = self.output
self.output = tf.nn.dropout(
in_layer, dropout,
seed=self.seed, name='dropout',
**kwargs)
return self.output
def mnist(inputs, train=True, seed=0):
m = ConvNet()
fc_kwargs = {
'init': 'xavier',
'dropout': None,
}
m.fc(128, layer='hidden1', in_layer=inputs, **fc_kwargs)
m.fc(32, layer='hidden2', **fc_kwargs)
m.fc(10, activation=None, layer='softmax_linear', **fc_kwargs)
return m
def alexnet(inputs, train=True, norm=True, seed=0, **kwargs):
# Define model class and default kwargs for different types of layers
m = ConvNet(seed=seed)
conv_kwargs = {
'add_bn': False,
'init': 'xavier',
'weight_decay': .0001,
}
pool_kwargs = {
'pool_type': 'maxpool',
}
fc_kwargs = {
'init': 'trunc_norm',
'weight_decay': .0001,
'stddev': .01,
}
dropout = .5 if train else None
# Actually define the network
m.conv(
96, 11, 4, padding='VALID',
layer='conv1', in_layer=inputs, **conv_kwargs)
if norm:
m.lrn(depth_radius=5, bias=1, alpha=.0001, beta=.75, layer='conv1')
m.pool(3, 2, **pool_kwargs)
m.conv(256, 5, 1, layer='conv2', **conv_kwargs)
if norm:
m.lrn(depth_radius=5, bias=1, alpha=.0001, beta=.75, layer='conv2')
m.pool(3, 2, **pool_kwargs)
m.conv(384, 3, 1, layer='conv3', **conv_kwargs)
m.conv(384, 3, 1, layer='conv4', **conv_kwargs)
m.conv(256, 3, 1, layer='conv5', **conv_kwargs)
m.pool(3, 2, **pool_kwargs)
m.fc(4096, dropout=dropout, bias=.1, layer='fc6', **fc_kwargs)
m.fc(4096, dropout=dropout, bias=.1, layer='fc7', **fc_kwargs)
m.fc(1000, activation=None, dropout=None, bias=0, layer='fc8', **fc_kwargs)
return m
def mnist_tfutils(inputs, train=True, **kwargs):
m = mnist(inputs['images'], train=train)
return m.output, m.params
def alexnet_tfutils(inputs, **kwargs):
m = alexnet(inputs['images'], **kwargs)
return m.output, m.params
| {
"repo_name": "neuroailab/tfutils",
"path": "tfutils/model_tool.py",
"copies": "1",
"size": "11940",
"license": "mit",
"hash": -8229830301554185000,
"line_mean": 30.7553191489,
"line_max": 79,
"alpha_frac": 0.4960636516,
"autogenerated": false,
"ratio": 4.124352331606218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013650205636165343,
"num_lines": 376
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
class DeferredLRUCache(object):
"""
An LRU cache with deferred (on-request) writing. When values put back into
the cache, they are flagged as dirty, but are not written to the backend
until ``.flush()`` is called. When the cache is flushed, all dirty values
will be written back. Note that ``.flush()`` must be called frequently
enough that the cache does not self-evict dirty values--this will fail.
This is NOT thread-safe.
"""
def __init__(self, get_backend, put_backend, max_size=2000):
"""
Create a new LRU cache.
:param get_backend:
Function which fetches value from the persistent backend.
:type get_backend:
Callable with the signature ``func(key)``, returning a ``value``.
:param put_backend:
Function which flushes values from the cache to the persistent
backend.
:type put_backend:
Callable with the signature ``func(key, value)``. Return value is
ignored.
:param max_size:
Maximum number of entries to allow in the cache.
:type max_size:
int
"""
self.max_size = max_size
self.get_backend = get_backend
self.put_backend = put_backend
self.entries = OrderedDict()
self.dirty = set()
def get(self, key):
"""
Fetch a value from the cache, reading it from the persistent backend if
necessary.
"""
try:
value = self.entries.pop(key)
except KeyError:
value = self.get_backend(key)
self.entries[key] = value
self.prune()
return value
def put(self, key, value):
"""
Put a value into the cache, flagging it as dirty to be written back to
the persistent backend on the next ``flush()`` call.
"""
if key in self.entries:
self.entries.pop(key)
self.entries[key] = value
self.dirty.add(key)
self.prune()
def prune(self):
"""
Prune the cache object back down to the desired size, if it is larger.
"""
while len(self.entries) > self.max_size:
key, value = self.entries.popitem(last=False)
assert key not in self.dirty
def flush(self):
"""
Flush dirty cache entries to the persistent backend.
"""
to_put = {}
for key in list(self.dirty):
to_put[key] = self.entries[key]
self.put_backend(to_put)
self.dirty = set()
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/backend/cache.py",
"copies": "1",
"size": "2674",
"license": "mit",
"hash": 5444441400656695000,
"line_mean": 32.425,
"line_max": 79,
"alpha_frac": 0.5833956619,
"autogenerated": false,
"ratio": 4.33387358184765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.541726924374765,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from .compat import standard_library
import json, ijson
from itertools import chain
def _build_value(data):
''' Build a value (number, array, whatever) from an ijson stream.
'''
for (prefix, event, value) in data:
if event in ('string', 'null', 'boolean'):
return value
elif event == 'number':
return int(value) if (int(value) == float(value)) else float(value)
elif event == 'start_array':
return _build_list(data)
elif event == 'start_map':
return _build_map(data)
else:
# MOOP.
raise ValueError((prefix, event, value))
def _build_list(data):
''' Build a list from an ijson stream.
Stop when 'end_array' is reached.
'''
output = list()
for (prefix, event, value) in data:
if event == 'end_array':
break
else:
# let _build_value() handle the array item.
_data = chain([(prefix, event, value)], data)
output.append(_build_value(_data))
return output
def _build_map(data):
''' Build a dictionary from an ijson stream.
Stop when 'end_map' is reached.
'''
output = dict()
for (prefix, event, value) in data:
if event == 'end_map':
break
elif event == 'map_key':
output[value] = _build_value(data)
else:
# MOOP.
raise ValueError((prefix, event, value))
return output
def sample_geojson(stream, max_features):
''' Read a stream of input GeoJSON and return a string with a limited feature count.
'''
data, features = ijson.parse(stream), list()
for (prefix1, event1, value1) in data:
if event1 != 'start_map':
# A root GeoJSON object is a map.
raise ValueError((prefix1, event1, value1))
for (prefix2, event2, value2) in data:
if event2 == 'map_key' and value2 == 'type':
prefix3, event3, value3 = next(data)
if event3 != 'string' and value3 != 'FeatureCollection':
# We only want GeoJSON feature collections
raise ValueError((prefix3, event3, value3))
elif event2 == 'map_key' and value2 == 'features':
prefix4, event4, value4 = next(data)
if event4 != 'start_array':
# We only want lists of features here.
raise ValueError((prefix4, event4, value4))
for (prefix5, event5, value5) in data:
if event5 == 'end_array' or len(features) == max_features:
break
# let _build_value() handle the feature.
_data = chain([(prefix5, event5, value5)], data)
features.append(_build_value(_data))
geojson = dict(type='FeatureCollection', features=features)
return json.dumps(geojson)
raise ValueError()
| {
"repo_name": "slibby/machine",
"path": "openaddr/sample.py",
"copies": "1",
"size": "3223",
"license": "isc",
"hash": 5574996323803394000,
"line_mean": 30.9108910891,
"line_max": 88,
"alpha_frac": 0.519081601,
"autogenerated": false,
"ratio": 4.349527665317139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025752788951952856,
"num_lines": 101
} |
from __future__ import absolute_import, division, print_function
from .compat import standard_library
import json
from csv import DictReader
from io import StringIO
from base64 import b64decode
from operator import itemgetter
from os.path import join, dirname, splitext, relpath
from dateutil.parser import parse as parse_datetime
from urllib.parse import urljoin
from os import environ
from re import compile
import json, pickle
import requests
from . import S3, __version__
from .compat import expand_uri
# Sort constants for summarize_runs()
GLASS_HALF_FULL = 1
GLASS_HALF_EMPTY = 2
def _get_cached(memcache, key):
''' Get a thing from the cache, or None.
'''
if not memcache:
return None
pickled = memcache.get(key)
if pickled is None:
return None
try:
value = pickle.loads(pickled)
except Exception as e:
return None
else:
return value
def _set_cached(memcache, key, value):
''' Put a thing in the cache, if it exists.
'''
if not memcache:
return
pickled = pickle.dumps(value, protocol=2)
memcache.set(key, pickled)
def is_coverage_complete(source):
'''
'''
if 'coverage' in source:
cov = source['coverage']
if ('ISO 3166' in cov or 'US Census' in cov or 'geometry' in cov):
return True
return False
def state_conform_type(state):
'''
'''
if 'cache' not in state:
return None
if state['cache'] is None:
return None
if state['cache'].endswith('.zip'):
if state.get('geometry type', 'Point') in ('Polygon', 'MultiPolygon'):
return 'shapefile-polygon'
else:
return 'shapefile'
elif state['cache'].endswith('.json'):
return 'geojson'
elif state['cache'].endswith('.csv'):
return 'csv'
else:
return None
def convert_run(memcache, run, url_template):
'''
'''
cache_key = 'converted-run-{}-{}'.format(run.id, __version__)
cached_run = _get_cached(memcache, cache_key)
if cached_run is not None:
return cached_run
try:
source = json.loads(b64decode(run.source_data).decode('utf8'))
except:
source = {}
run_state = run.state or {}
converted_run = {
'address count': run_state.get('address count'),
'cache': run_state.get('cache'),
'cache time': run_state.get('cache time'),
'cache_date': run.datetime_tz.strftime('%Y-%m-%d'),
'conform': bool(source.get('conform', False)),
'conform type': state_conform_type(run_state),
'coverage complete': is_coverage_complete(source),
'fingerprint': run_state.get('fingerprint'),
'geometry type': run_state.get('geometry type'),
'href': expand_uri(url_template, run.__dict__),
'output': run_state.get('output'),
'process time': run_state.get('process time'),
'processed': run_state.get('processed'),
'sample': run_state.get('sample'),
'sample_link': expand_uri('/runs/{id}/sample.html', dict(id=run.id)),
'shortname': splitext(relpath(run.source_path, 'sources'))[0],
'skip': bool(source.get('skip', False)),
'source': relpath(run.source_path, 'sources'),
'type': source.get('type', '').lower(),
'version': run_state.get('version')
}
_set_cached(memcache, cache_key, converted_run)
return converted_run
def run_counts(runs):
'''
'''
states = [(run.state or {}) for run in runs]
return {
'sources': len(runs),
'cached': sum([int(bool(state.get('cache'))) for state in states]),
'processed': sum([int(bool(state.get('processed'))) for state in states]),
'addresses': sum([int(state.get('address count') or 0) for state in states])
}
def sort_run_dicts(dicts, sort_order):
'''
'''
if sort_order is GLASS_HALF_FULL:
# Put the happy, successful stuff up front.
key = lambda d: (not bool(d['processed']), not bool(d['cache']), d['source'])
elif sort_order is GLASS_HALF_EMPTY:
# Put the stuff that needs help up front.
key = lambda d: (bool(d['cache']), bool(d['processed']), d['source'])
else:
raise ValueError('Unknown sort order "{}"'.format(sort_order))
dicts.sort(key=key)
def nice_integer(number):
''' Format a number like '999,999,999'
'''
string = str(number)
pattern = compile(r'^(\d+)(\d\d\d)\b')
while pattern.match(string):
string = pattern.sub(r'\1,\2', string)
return string
def break_state(string):
''' Adds <wbr> tag and returns an HTML-safe string.
'''
pattern = compile(r'^(.+)/([^/]+)$')
string = string.replace('&', '&').replace('<', '<').replace('>', '>')
if pattern.match(string):
string = pattern.sub(r'\1/<wbr>\2', string)
return string
def summarize_runs(memcache, runs, datetime, owner, repository, sort_order):
''' Return summary data for set.html template.
'''
base_url = expand_uri(u'https://github.com/{owner}/{repository}/',
dict(owner=owner, repository=repository))
url_template = urljoin(base_url, u'blob/{commit_sha}/{+source_path}')
states = [convert_run(memcache, run, url_template) for run in runs]
counts = run_counts(runs)
sort_run_dicts(states, sort_order)
return dict(states=states, last_modified=datetime, counts=counts)
| {
"repo_name": "slibby/machine",
"path": "openaddr/summarize.py",
"copies": "1",
"size": "5532",
"license": "isc",
"hash": -1113512025995896400,
"line_mean": 28.9027027027,
"line_max": 85,
"alpha_frac": 0.598879248,
"autogenerated": false,
"ratio": 3.735313977042539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4834193225042539,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from configparser import ConfigParser
import os.path
import sys
from future import standard_library
standard_library.install_aliases()
parser = ConfigParser()
path = os.path.split(__file__)[0]
parser.read(os.path.join(path, 'config.ini'))
# File names with full path
THERMOSTATS_FILE = parser.get('raw_data_files', 'THERMOSTATS_FILE')
POSTAL_FILE = parser.get('raw_data_files', 'POSTAL_FILE')
# File headers (strings: headings for each column in the raw text files)
CYCLE_FIELD1 = parser.get('file_headers', 'CYCLE_FIELD1')
CYCLE_FIELD2 = parser.get('file_headers', 'CYCLE_FIELD2')
CYCLE_FIELD3 = parser.get('file_headers', 'CYCLE_FIELD3')
CYCLE_FIELD4 = parser.get('file_headers', 'CYCLE_FIELD4')
CYCLE_FIELD5 = parser.get('file_headers', 'CYCLE_FIELD5')
CYCLE_FIELD6 = parser.get('file_headers', 'CYCLE_FIELD6')
CYCLE_FIELD7 = parser.get('file_headers', 'CYCLE_FIELD7')
CYCLE_FIELDS = tuple([CYCLE_FIELD1, CYCLE_FIELD2, CYCLE_FIELD3, CYCLE_FIELD4,
CYCLE_FIELD5, CYCLE_FIELD6, CYCLE_FIELD7])
CYCLE_START_TIME = parser.get('file_headers', 'CYCLE_START_TIME')
CYCLE_END_TIME = parser.get('file_headers', 'CYCLE_END_TIME')
# Ints: 0-based column position within the raw file (left to right)
CYCLE_ID_INDEX = int(parser.get('file_headers', 'CYCLE_ID_INDEX'))
CYCLE_TYPE_INDEX = int(parser.get('file_headers', 'CYCLE_TYPE_INDEX'))
CYCLE_START_INDEX = int(parser.get('file_headers', 'CYCLE_START_INDEX'))
CYCLE_END_TIME_INDEX = int(parser.get('file_headers', 'CYCLE_END_TIME_INDEX'))
CYCLE_RECORD_COLS = sum([1 for col in [CYCLE_TYPE_INDEX, CYCLE_START_INDEX,
CYCLE_END_TIME_INDEX]])
unique_cycle_field_pos = int(parser.get('file_headers', 'UNIQUE_CYCLE_FIELD_INDEX'))
# Column heading that is unique to cycles data file
UNIQUE_CYCLE_FIELD_INDEX = CYCLE_FIELDS[unique_cycle_field_pos]
# String in record indicating cooling mode
CYCLE_TYPE_COOL = parser.get('record_values', 'CYCLE_TYPE_COOL')
# Inside observation file column names
INSIDE_FIELD1 = parser.get('file_headers', 'INSIDE_FIELD1')
INSIDE_FIELD2 = parser.get('file_headers', 'INSIDE_FIELD2')
INSIDE_FIELD3 = parser.get('file_headers', 'INSIDE_FIELD3')
SENSOR_FIELDS = tuple([INSIDE_FIELD1, INSIDE_FIELD2, INSIDE_FIELD3])
# SENSOR_ID_FIELD is the string heading of corresponding field
# SENSOR_ID_INDEX gives the index of the INSIDE field containing device ID
# in the tuple SENSOR_FIELDS.
SENSOR_ID_FIELD = SENSOR_FIELDS[int(parser.get('file_headers', 'SENSOR_ID_INDEX'))]
# Ints: 0-based positions of fields in raw file
SENSOR_ID_INDEX = int(parser.get('file_headers', 'SENSOR_ID_INDEX'))
SENSORS_LOG_DATE_INDEX = int(parser.get('file_headers', 'SENSORS_LOG_DATE_INDEX'))
SENSORS_DATA_INDEX = int(parser.get('file_headers', 'SENSORS_DATA_INDEX'))
# INSIDE_TEMP_FIELD is the string heading of corresponding field
# INSIDE_TEMP_INDEX is index of field containing inside temperature
INSIDE_TEMP_FIELD = SENSOR_FIELDS[int(parser.get('file_headers', 'SENSORS_DATA_INDEX'))]
# Outside observation file column names
OUTSIDE_FIELD1 = parser.get('file_headers', 'OUTSIDE_FIELD1')
OUTSIDE_FIELD2 = parser.get('file_headers', 'OUTSIDE_FIELD2')
OUTSIDE_FIELD3 = parser.get('file_headers', 'OUTSIDE_FIELD3')
GEOSPATIAL_FIELDS = tuple([OUTSIDE_FIELD1, OUTSIDE_FIELD2, OUTSIDE_FIELD3])
OUTSIDE_TIMESTAMP_LABEL = parser.get('file_headers', 'OUTSIDE_TIMESTAMP_LABEL')
OUTSIDE_DEGREES_LABEL = parser.get('file_headers', 'OUTSIDE_DEGREES_LABEL')
# Column heading that is unique to outside data file
UNIQUE_GEOSPATIAL_FIELD = GEOSPATIAL_FIELDS[int(parser.get('file_headers', 'UNIQUE_GEOSPATIAL_FIELD_INDEX'))]
# Ints: 0-based positions of fields in raw files
GEOSPATIAL_ID_INDEX = int(parser.get('file_headers', 'GEOSPATIAL_ID_INDEX'))
GEOSPATIAL_LOG_DATE_INDEX = int(parser.get('file_headers', 'GEOSPATIAL_LOG_DATE_INDEX'))
GEOSPATIAL_OBSERVATION_INDEX = int(parser.get('file_headers', 'GEOSPATIAL_OBSERVATION_INDEX'))
# Thermostat file metadata file column names
SENSOR_DEVICE_ID = parser.get('file_headers', 'SENSOR_DEVICE_ID')
SENSOR_LOCATION_ID = parser.get('file_headers', 'SENSOR_LOCATION_ID')
SENSOR_ZIP_CODE = parser.get('file_headers', 'SENSOR_ZIP_CODE')
# Postal file containing zip codes and other geographic metadata
POSTAL_FILE_ZIP = parser.get('file_headers', 'POSTAL_FILE_ZIP')
POSTAL_TWO_LETTER_STATE = parser.get('file_headers', 'POSTAL_TWO_LETTER_STATE')
# Dataframe index names
INSIDE_DEVICE_ID = parser.get('df_index_names', 'INSIDE_DEVICE_ID')
INSIDE_LOG_DATE = parser.get('df_index_names', 'INSIDE_LOG_DATE')
OUTSIDE_LOCATION_ID = parser.get('df_index_names', 'OUTSIDE_LOCATION_ID')
OUTSIDE_LOG_DATE = parser.get('df_index_names', 'OUTSIDE_LOG_DATE')
CYCLE_DEVICE_ID = parser.get('df_index_names', 'CYCLE_DEVICE_ID')
CYCLE_START_TIME = parser.get('df_index_names', 'CYCLE_START_TIME')
# Dataframe column_names
CYCLE_END_TIME = parser.get('df_column_names', 'CYCLE_END_TIME')
INSIDE_DEGREES = parser.get('df_column_names', 'INSIDE_DEGREES')
OUTSIDE_DEGREES = parser.get('df_column_names', 'OUTSIDE_DEGREES')
##########
# TESTING
##########
# Directory
if parser.get('test_files', 'TEST_DIR') == '':
TEST_DIR = os.path.abspath('../tests/data')
else:
TEST_DIR = parser.get('test_files', 'TEST_DIR')
# Ints
SENSOR_ID1 = int(parser.get('test_ids_and_states', 'SENSOR_ID1'))
SENSOR_ID2 = int(parser.get('test_ids_and_states', 'SENSOR_ID2'))
SENSOR_IDS = [SENSOR_ID1, SENSOR_ID2]
LOCATION_ID1 = int(parser.get('test_ids_and_states', 'LOCATION_ID1'))
LOCATION_ID2 = int(parser.get('test_ids_and_states', 'LOCATION_ID2'))
LOCATION_IDS = [LOCATION_ID1, LOCATION_ID2]
# Two-letter abbreviation
STATE = parser.get('test_ids_and_states', 'STATE')
# File names
options_vals = ['TEST_CYCLES_FILE', 'TEST_SENSOR_OBS_FILE', 'TEST_GEOSPATIAL_OBS_FILE',
'TEST_SENSORS_FILE', 'TEST_POSTAL_FILE']
for option_val in options_vals:
vars()[option_val] = os.path.join(TEST_DIR, parser.get('test_files', option_val))
TEST_CYCLES_FILE = vars()['TEST_CYCLES_FILE']
TEST_SENSOR_OBS_FILE = vars()['TEST_SENSOR_OBS_FILE']
TEST_GEOSPATIAL_OBS_FILE = vars()['TEST_GEOSPATIAL_OBS_FILE']
TEST_SENSORS_FILE = vars()['TEST_SENSORS_FILE']
TEST_POSTAL_FILE = vars()['TEST_POSTAL_FILE']
test_pickle_section = 'test_pickle_files'
if '2.7' in sys.version:
test_pickle_section += '_py2'
options_vals = ['CYCLES_PICKLE_FILE_OUT', 'SENSOR_PICKLE_FILE_OUT', 'GEOSPATIAL_PICKLE_FILE_OUT',
'CYCLES_PICKLE_FILE', 'SENSOR_PICKLE_FILE', 'GEOSPATIAL_PICKLE_FILE',
'ALL_STATES_CYCLES_PICKLED_OUT', 'ALL_STATES_SENSOR_OBS_PICKLED_OUT', 'ALL_STATES_GEOSPATIAL_OBS_PICKLED_OUT',
'ALL_STATES_CYCLES_PICKLED', 'ALL_STATES_SENSOR_OBS_PICKLED', 'ALL_STATES_GEOSPATIAL_OBS_PICKLED']
for option_val in options_vals:
vars()[option_val] = os.path.join(TEST_DIR, parser.get(test_pickle_section, option_val))
CYCLES_PICKLE_FILE_OUT = vars()['CYCLES_PICKLE_FILE_OUT']
SENSOR_PICKLE_FILE_OUT = vars()['SENSOR_PICKLE_FILE_OUT']
GEOSPATIAL_PICKLE_FILE_OUT = vars()['GEOSPATIAL_PICKLE_FILE_OUT']
CYCLES_PICKLE_FILE = vars()['CYCLES_PICKLE_FILE']
SENSOR_PICKLE_FILE = vars()['SENSOR_PICKLE_FILE']
GEOSPATIAL_PICKLE_FILE = vars()['GEOSPATIAL_PICKLE_FILE']
ALL_STATES_CYCLES_PICKLED_OUT = vars()['ALL_STATES_CYCLES_PICKLED_OUT']
ALL_STATES_SENSOR_OBS_PICKLED_OUT = vars()['ALL_STATES_SENSOR_OBS_PICKLED_OUT']
ALL_STATES_GEOSPATIAL_OBS_PICKLED_OUT = vars()['ALL_STATES_GEOSPATIAL_OBS_PICKLED_OUT']
ALL_STATES_CYCLES_PICKLED = vars()['ALL_STATES_CYCLES_PICKLED']
ALL_STATES_SENSOR_OBS_PICKLED = vars()['ALL_STATES_SENSOR_OBS_PICKLED']
ALL_STATES_GEOSPATIAL_OBS_PICKLED = vars()['ALL_STATES_GEOSPATIAL_OBS_PICKLED']
| {
"repo_name": "nickpowersys/CaaR",
"path": "caar/configparser_read.py",
"copies": "1",
"size": "7740",
"license": "bsd-3-clause",
"hash": 1103075953985575300,
"line_mean": 45.9090909091,
"line_max": 126,
"alpha_frac": 0.7237726098,
"autogenerated": false,
"ratio": 2.8033321260412896,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.402710473584129,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
from functools import wraps
import multiprocess as mp
import os.path as op
import errno
import sys
import os
import pandas as pd
import numpy as np
import click
import six
from .. import util
class DelimitedTuple(click.types.ParamType):
def __init__(self, sep=",", type=str):
self.sep = sep
self.type = click.types.convert_type(type)
@property
def name(self):
return "separated[%s]" % self.sep
def convert(self, value, param, ctx):
# needs to pass through value = None unchanged
# needs to be idempotent
# needs to be able to deal with param and context being None
if value is None:
return value
elif isinstance(value, six.string_types):
parts = value.split(",")
else:
parts = value
return tuple(self.type(x, param, ctx) for x in parts)
def parse_kv_list_param(arg, item_sep=",", kv_sep="="):
import yaml
from six import StringIO
if item_sep != ",":
arg = arg.replace(item_sep, ",")
arg = "{" + arg.replace(kv_sep, ": ") + "}"
try:
result = yaml.safe_load(StringIO(arg))
except yaml.YAMLError:
raise click.BadParameter("Error parsing key-value pairs: {}".format(arg))
return result
def parse_field_param(arg, includes_colnum=True, includes_agg=True):
parts = arg.split(":")
prefix = parts[0]
if len(parts) == 1:
props = None
elif len(parts) == 2:
props = parts[1]
else:
raise click.BadParameter(arg)
if includes_colnum:
parts = prefix.split("=")
name = parts[0]
if len(parts) == 1:
colnum = None
elif len(parts) == 2:
try:
colnum = int(parts[1]) - 1
except ValueError:
raise click.BadParameter(
"Not a number: '{}'".format(parts[1]), param_hint=arg
)
if colnum < 0:
raise click.BadParameter("Field numbers start at 1.", param_hint=arg)
else:
raise click.BadParameter(arg)
else:
name = parts[0]
colnum = None
dtype = None
agg = None
if props is not None:
for item in props.split(","):
try:
prop, value = item.split("=")
except ValueError:
raise click.BadParameter(arg)
if prop == "dtype":
dtype = np.dtype(value)
elif prop == "agg" and includes_agg:
agg = value
else:
raise click.BadParameter(
"Invalid property: '{}'.".format(prop), param_hint=arg
)
return name, colnum, dtype, agg
def parse_bins(arg):
# Provided chromsizes and binsize
if ":" in arg:
chromsizes_file, binsize = arg.split(":")
if not op.exists(chromsizes_file):
raise ValueError('File "{}" not found'.format(chromsizes_file))
try:
binsize = int(binsize)
except ValueError:
raise ValueError(
'Expected integer binsize argument (bp), got "{}"'.format(binsize)
)
chromsizes = util.read_chromsizes(chromsizes_file, all_names=True)
bins = util.binnify(chromsizes, binsize)
# Provided bins
elif op.exists(arg):
try:
bins = pd.read_csv(
arg,
sep="\t",
names=["chrom", "start", "end"],
usecols=[0, 1, 2],
dtype={"chrom": str},
)
except pd.parser.CParserError as e:
raise ValueError('Failed to parse bins file "{}": {}'.format(arg, str(e)))
chromtable = (
bins.drop_duplicates(["chrom"], keep="last")[["chrom", "end"]]
.reset_index(drop=True)
.rename(columns={"chrom": "name", "end": "length"})
)
chroms, lengths = list(chromtable["name"]), list(chromtable["length"])
chromsizes = pd.Series(index=chroms, data=lengths)
else:
raise ValueError(
"Expected BINS to be either <Path to bins file> or "
"<Path to chromsizes file>:<binsize in bp>."
)
return chromsizes, bins
def check_ncpus(arg_value):
arg_value = int(arg_value)
if arg_value <= 0:
raise click.BadParameter("n_cpus must be >= 1")
else:
return min(arg_value, mp.cpu_count())
@contextmanager
def on_broken_pipe(handler):
try:
yield
except IOError as e:
if e.errno == errno.EPIPE:
handler(e)
else:
# Not a broken pipe error. Bubble up.
raise
def exit_on_broken_pipe(exit_code):
"""
Decorator to catch a broken pipe (EPIPE) error and exit cleanly.
Use this decorator to prevent the "[Errno 32] Broken pipe" output message.
Notes
-----
A SIGPIPE signal is sent to a process writing to a pipe while the other
end has been closed. For example, this happens when piping output to
programs like head(1). Python traps this signal and translates it into an
exception. It is presented as an IOError in PY2, and a subclass of OSError
in PY3 (aliased by IOError), both using POSIX error number 32 (EPIPE).
Some programs exit with 128 + signal.SIGPIPE == 141. However, according to
the example in the docs, Python exits with the generic error code of 1
on EPIPE.
The equivalent system error when trying to write on a socket which has
been shutdown for writing is ESHUTDOWN (108). It also raises
BrokenPipeError on PY3.
[1] https://docs.python.org/3.7/library/signal.html#note-on-sigpipe
[2] https://www.quora.com/How-can-you-avoid-a-broken-pipe-error-on-Python
"""
def decorator(func):
@wraps(func)
def decorated(*args, **kwargs):
try:
func(*args, **kwargs)
except IOError as e:
if e.errno == errno.EPIPE:
# We caught a broken pipe error.
# Python flushes standard streams on exit; redirect remaining
# output to devnull to avoid another BrokenPipeError at shutdown.
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(exit_code)
else:
# Not a broken pipe error. Bubble up.
raise
return decorated
return decorator
| {
"repo_name": "mirnylab/cooler",
"path": "cooler/cli/_util.py",
"copies": "1",
"size": "6640",
"license": "bsd-3-clause",
"hash": 8058319075600214000,
"line_mean": 29.8837209302,
"line_max": 86,
"alpha_frac": 0.5647590361,
"autogenerated": false,
"ratio": 4.088669950738916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033110931860732537,
"num_lines": 215
} |
from __future__ import absolute_import, division, print_function
from copy import copy, deepcopy
from frozendict import frozendict
from namedlist import namedlist
from numpy import allclose, float64, nan
from pandas import DataFrame
from sympy import Min, Piecewise, Symbol
from HelpyFuncs.SymPy import sympy_theanify
from .Security import Security
def parse_security_info_set(security_info_set):
if isinstance(security_info_set, dict):
return security_info_set
elif isinstance(security_info_set, (list, tuple)):
if len(security_info_set) == 2:
if isinstance(security_info_set[0], Security) and isinstance(security_info_set[1], (int, float)):
return security_info_set
elif isinstance(security_info_set[0], str) and isinstance(security_info_set[1], (int, float)):
return {security_info_set[0]: security_info_set[1]}
elif isinstance(security_info_set[0], (int, float)) and isinstance(security_info_set[1], str):
return {security_info_set[1]: security_info_set[0]}
elif len(security_info_set) == 1:
return parse_security_info_set(security_info_set[0])
elif isinstance(security_info_set, str):
return {security_info_set: 1}
elif isinstance(security_info_set, Security):
return security_info_set, None
def parse_security_info_sets(security_info_sets):
if isinstance(security_info_sets, dict):
return security_info_sets
elif isinstance(security_info_sets, (list, tuple)):
s = parse_security_info_set(security_info_sets[0])
if isinstance(s, dict):
for security_info_set in security_info_sets[1:]:
s.update(parse_security_info_set(security_info_set))
elif isinstance(s, (list, tuple)):
s = [s]
for security_info_set in security_info_sets[1:]:
s.append(parse_security_info_set(security_info_set))
return s
elif isinstance(security_info_sets, str):
return {security_info_sets: 1}
elif isinstance(security_info_sets, Security):
return (security_info_sets, None),
n_security_factory = namedlist('N_Security', ['n', 'security'])
class CapitalStructure:
def __init__(self, *securities_and_optional_conversion_ratios):
self.outstanding = {}
self.lifo_liquidation_order = []
self.optional_conversion_ratios = {}
self.ownerships = {}
for securities_and_optional_conversion_ratio in securities_and_optional_conversion_ratios:
self.create_securities(securities_and_optional_conversion_ratio)
self.common_share_label = self[0][0]
def __contains__(self, item):
return item in self.outstanding
def __getitem__(self, item):
if isinstance(item, str):
return self.outstanding[item]
elif isinstance(item, int):
return self.lifo_liquidation_order[item]
def __iter__(self):
return iter(self.lifo_liquidation_order)
def __len__(self):
return len(self.lifo_liquidation_order)
def copy(self, deep=True):
if deep: # to deep-copy; this is the safest option
return deepcopy(self)
else: # to shallow-copy; NOTE: this can be unclear & unsafe
capital_structure = CapitalStructure()
capital_structure.outstanding = self.outstanding.copy()
capital_structure.lifo_liquidation_order = copy(self.lifo_liquidation_order)
capital_structure.optional_conversion_ratios = self.optional_conversion_ratios.copy()
capital_structure.ownerships = self.ownerships.copy()
capital_structure.waterfall()
return capital_structure
def show(self, ownerships=False):
if ownerships:
df = DataFrame(columns=['Owner', 'Security', 'Quantity'])
i = 0
for owner, holdings in self.ownerships.items():
for security_label, quantity in holdings.items():
df.loc[i] = owner, security_label, quantity
i += 1
df.Quantity = df.Quantity.astype(float)
else:
df = DataFrame(columns=['Liquidation Order (LIFO)', 'Outstanding', 'Conversion Ratio'])
for i in range(len(self)):
security_labels = self[i]
for security_label in security_labels:
n, security = self[security_label]
optional_conversion_ratio = self.optional_conversion_ratios.get(security_label)
df.loc[security_label] = i, n, optional_conversion_ratio
df['Liquidation Order (LIFO)'] = df['Liquidation Order (LIFO)'].astype(int)
return df
def __repr__(self):
return str(self.show())
def create_securities(self, securities, liquidation_order=None, insert=False, inplace=True, deep=True):
if inplace:
capital_structure = self
else:
capital_structure = self.copy(deep=deep)
securities_and_optional_common_share_conversion_ratios = parse_security_info_sets(securities)
if len(securities_and_optional_common_share_conversion_ratios) > 1:
liquidation_order = None
for security, optional_common_share_conversion_ratio in securities_and_optional_common_share_conversion_ratios:
capital_structure.outstanding[security.label] = n_security_factory(n=0, security=security)
if (liquidation_order is None) or liquidation_order >= len(self):
capital_structure.lifo_liquidation_order.append([security.label])
elif insert:
capital_structure.lifo_liquidation_order.insert(liquidation_order, [security.label])
else:
capital_structure.lifo_liquidation_order[liquidation_order].append(security.label)
if optional_common_share_conversion_ratio is not None:
capital_structure.optional_conversion_ratios[security.label] = optional_common_share_conversion_ratio
if not inplace:
return capital_structure
def waterfall(self):
v = Symbol('enterprise_val')
for lifo_liquidation_order in reversed(range(len(self))):
security_labels = self[lifo_liquidation_order]
if lifo_liquidation_order:
total_claim_val_this_round = \
reduce(
lambda x, y: x + y,
map(lambda x: x.n * x.security.claim_val_expr,
map(lambda x: self[x],
security_labels)))
claimable = Min(total_claim_val_this_round, v)
for security_label in security_labels:
n, security = self[security_label]
security.val_expr = \
Piecewise(
((claimable / total_claim_val_this_round) * security.claim_val_expr,
total_claim_val_this_round > 0),
(claimable,
True))
security.val = sympy_theanify(security.val_expr)
v -= claimable
else:
n, common_share = self[security_labels[0]]
common_share.val_expr = v / n
common_share.val = sympy_theanify(common_share.val_expr)
def issue(self, owner='', securities=None, inplace=True, deep=True):
if inplace:
capital_structure = self
else:
capital_structure = self.copy(deep=deep)
if securities is not None:
security_labels_and_quantities = parse_security_info_sets(securities)
for security_label, quantity in security_labels_and_quantities.items():
capital_structure[security_label].n += quantity
if owner in capital_structure.ownerships:
if security_label in capital_structure.ownerships[owner]:
capital_structure.ownerships[owner][security_label] += quantity
else:
capital_structure.ownerships[owner][security_label] = quantity
else:
capital_structure.ownerships[owner] = {security_label: quantity}
capital_structure.waterfall()
if not inplace:
return capital_structure
def transfer(self, from_owner='', to_owner='', securities=None, inplace=True, deep=True):
if inplace:
capital_structure = self
else:
capital_structure = self.copy(deep=deep)
if securities is None:
security_labels_and_quantities = capital_structure.ownerships[from_owner]
else:
security_labels_and_quantities = parse_security_info_sets(securities)
for security_label, quantity in security_labels_and_quantities.items():
transferred_quantity = min(capital_structure.ownerships[from_owner][security_label], quantity)
capital_structure.ownerships[from_owner][security_label] -= transferred_quantity
if allclose(capital_structure.ownerships[from_owner][security_label], 0.):
del capital_structure.ownerships[from_owner][security_label]
if not capital_structure.ownerships[from_owner]:
del capital_structure.ownerships[from_owner]
if to_owner in capital_structure.ownerships:
if security_label in capital_structure.ownerships[to_owner]:
capital_structure.ownerships[to_owner][security_label] += transferred_quantity
else:
capital_structure.ownerships[to_owner][security_label] = transferred_quantity
else:
capital_structure.ownerships[to_owner] = {security_label: transferred_quantity}
if not inplace:
return capital_structure
def redeem(self, owners=None, securities=None, inplace=True, deep=True):
if inplace:
capital_structure = self
else:
capital_structure = self.copy(deep=deep)
if (owners is not None) or (securities is not None):
owners_holdings_to_redeem = \
capital_structure.parse_owners_securities_holdings(
owners=owners,
securities=securities)
for owner, holdings_to_redeem in owners_holdings_to_redeem.items():
for security_label, quantity in holdings_to_redeem.items():
capital_structure[security_label].n -= quantity
if allclose(capital_structure[security_label].n, 0.):
capital_structure[security_label].n = 0.
capital_structure.ownerships[owner][security_label] -= quantity
if allclose(capital_structure.ownerships[owner][security_label], 0.):
del capital_structure.ownerships[owner][security_label]
if not capital_structure.ownerships[owner]:
del capital_structure.ownerships[owner]
capital_structure.waterfall()
if not inplace:
return capital_structure
def convert_to_common(self, owners=None, securities=None, inplace=True, deep=True):
if inplace:
capital_structure = self
else:
capital_structure = self.copy(deep=deep)
if (owners is not None) or (securities is not None):
owners_holdings_to_convert = \
capital_structure.parse_owners_securities_holdings(
owners=owners,
securities=securities)
for owner, holdings_to_convert in owners_holdings_to_convert.items():
for security_label, quantity in holdings_to_convert.items():
if security_label in capital_structure.optional_conversion_ratios:
conversion_ratio = capital_structure.optional_conversion_ratios[security_label]
capital_structure.redeem(
owners=owner,
securities={security_label: quantity})
capital_structure.issue(
owner=owner,
securities={capital_structure.common_share_label: quantity * conversion_ratio})
capital_structure.waterfall()
if not inplace:
return capital_structure
def conversion_scenarios(self, conversions_tried={}, conversions_to_try=None):
convertibles = set(self.optional_conversion_ratios)
conversion_possibilities = set()
for owner, holdings in self.ownerships.items():
for security_label in set(holdings) & convertibles:
conversion_possibilities.add((owner, security_label))
if conversions_to_try is None:
conversions_to_try = conversion_possibilities
else:
conversions_to_try &= conversion_possibilities
if conversions_to_try:
owner, security_label = conversions_to_try.pop()
conversions_tried_0 = conversions_tried.copy()
if owner in conversions_tried_0:
conversions_tried_0[owner][security_label] = False
else:
conversions_tried_0[owner] = {security_label: False}
d = self.conversion_scenarios(
conversions_tried=conversions_tried_0,
conversions_to_try=conversions_to_try.copy())
conversions_tried_1 = conversions_tried.copy()
if owner in conversions_tried_1:
conversions_tried_1[owner][security_label] = True
else:
conversions_tried_1[owner] = {security_label: True}
d.update(
self.convert_to_common(
owners=owner,
securities=security_label,
inplace=False)
.conversion_scenarios(
conversions_tried=conversions_tried_1,
conversions_to_try=conversions_to_try.copy()))
return d
else:
return {frozendict({owners: frozendict(conversions) for owners, conversions in conversions_tried.items()}):
self.copy()}
def val(self, pareto_equil_conversions=False, **kwargs):
if self.optional_conversion_ratios and pareto_equil_conversions:
conversion_scenario_capital_structures = self.conversion_scenarios()
conversion_scenarios = conversion_scenario_capital_structures.keys()
conversion_scenario_val_results = \
{conversion_scenario: capital_structure.val(pareto_equil_conversions=False, **kwargs)
for conversion_scenario, capital_structure in conversion_scenario_capital_structures.items()}
conversion_scenario_ownership_vals = \
{conversion_scenario: val_results['ownership_vals']
for conversion_scenario, val_results in conversion_scenario_val_results.items()}
for conversion_scenario, ownership_vals in conversion_scenario_ownership_vals.items():
pareto = True
for owner, conversions in conversion_scenario.items():
for alternative_conversion_scenario in conversion_scenarios:
pareto_alternative = True
for another_owner, another_owner_conversions in alternative_conversion_scenario.items():
if another_owner != owner:
pareto_alternative &= \
(conversion_scenario[another_owner] ==
alternative_conversion_scenario[another_owner])
if not pareto_alternative:
break
if pareto_alternative:
pareto &= \
(ownership_vals[owner] >=
conversion_scenario_ownership_vals[alternative_conversion_scenario][owner])
else:
continue
if pareto:
return dict(
conversion_scenario=conversion_scenario,
capital_structure=conversion_scenario_capital_structures[conversion_scenario],
security_vals=conversion_scenario_val_results[conversion_scenario]['security_vals'],
ownership_vals=ownership_vals)
else:
convertibles = set(self.optional_conversion_ratios)
conversion_scenario = {}
for owner, holdings in self.ownerships.items():
for security_label in set(holdings) & convertibles:
if owner in conversion_scenario:
conversion_scenario[owner][security_label] = False
else:
conversion_scenario[owner] = {security_label: False}
security_vals = \
{security_label: float64(self[security_label].security.val(**kwargs))
for security_label in self.outstanding}
ownership_vals = {}
for owner, holdings in self.ownerships.items():
ownership_vals[owner] = \
reduce(
lambda x, y: x + y,
map(lambda (security_label, quantity): quantity * security_vals[security_label],
holdings.items()))
return dict(
conversion_scenario=conversion_scenario,
capital_structure=self.copy(),
security_vals=security_vals,
ownership_vals=ownership_vals)
def __call__(self, pareto_equil_conversions=False, ownerships=False, **kwargs):
val_results = self.val(pareto_equil_conversions=pareto_equil_conversions, **kwargs)
capital_structure = val_results['capital_structure']
security_vals = val_results['security_vals']
if ownerships:
df = DataFrame(columns=['Owner', 'Security', 'Val', 'Share'])
common_share_val = capital_structure[self.common_share_label].n * security_vals[self.common_share_label]
i = 0
for owner, holdings in capital_structure.ownerships.items():
for security_label, quantity in holdings.items():
security_val = quantity * security_vals[security_label]
if security_label == self.common_share_label:
share_in_common = security_val / common_share_val
else:
share_in_common = nan
df.loc[i] = owner, security_label, security_val, share_in_common
i += 1
df.loc['TOTAL'] = 2 * ('',) + (df.Val.sum(), df.Share.sum())
else:
df = capital_structure.show()
df['Val / Unit'] = [security_vals[security_label] for security_label in df.index]
df['Val'] = df.Outstanding * df['Val / Unit']
df.loc['TOTAL'] = 4 * [''] + [df.Val.sum()]
return df
def parse_owners_securities_holdings(self, owners=None, securities=None):
if (owners is not None) or (securities is not None):
d = {}
if owners is None:
if isinstance(securities, str):
securities = securities,
for security_label in securities:
for owner, holdings in self.ownerships.items():
if security_label in holdings:
quantity = holdings[security_label]
if owner in d:
d[owner][security_label] = quantity
else:
d[owner] = {security_label: quantity}
elif isinstance(owners, (list, tuple)):
if isinstance(securities, str):
securities = securities,
for security_label in securities:
for owner in owners:
holdings = self.ownerships[owner]
if security_label in holdings:
quantity = holdings[security_label]
if owner in d:
d[owner][security_label] = quantity
else:
d[owner] = {security_label: quantity}
elif isinstance(owners, str):
owner = owners
d[owner] = {}
if isinstance(securities, dict):
for security_label, quantity in securities.items():
if security_label in self.ownerships[owner]:
d[owner][security_label] = \
min(self.ownerships[owner][security_label], quantity)
elif isinstance(securities, str):
security_label = securities
if security_label in self.ownerships[owner]:
d[owner][security_label] = \
self.ownerships[owner][security_label]
elif isinstance(securities, (list, tuple)):
for security_info_set in securities:
if isinstance(security_info_set, (list, tuple)):
if isinstance(security_info_set[0], str) and \
isinstance(security_info_set[1], (int, float)):
security_label, quantity = security_info_set
elif isinstance(security_info_set[0], (int, float)) and \
isinstance(security_info_set[1], str):
quantity, security_label = security_info_set
if security_label in self.ownerships[owner]:
d[owner][security_label] = \
min(self.ownerships[owner][security_label], quantity)
elif isinstance(security_info_set, str):
security_label = security_info_set
if security_label in self.ownerships[owner]:
d[owner][security_label] = \
self.ownerships[owner][security_label]
return d
| {
"repo_name": "MBALearnsToCode/FinSymPy",
"path": "CorpFin/Capital.py",
"copies": "2",
"size": "22730",
"license": "mit",
"hash": -1337137771020233200,
"line_mean": 43.39453125,
"line_max": 119,
"alpha_frac": 0.5651561813,
"autogenerated": false,
"ratio": 4.518886679920477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6084042861220478,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from copy import deepcopy
from datashape.predicates import isscalar
from multipledispatch import MDNotImplementedError
from .expressions import *
from .strings import *
from .arithmetic import *
from .collections import *
from .split_apply_combine import *
from .broadcast import *
from .reductions import *
from ..dispatch import dispatch
def lean_projection(expr):
""" Insert projections to keep dataset as thin as possible
>>> t = symbol('t', 'var * {a: int, b: int, c: int, d: int}')
>>> lean_projection(t.sort('a').b)
t[['a', 'b']].sort('a', ascending=True).b
"""
fields = expr.fields
return _lean(expr, fields=fields)[0]
@dispatch(Symbol)
def _lean(expr, fields=None):
"""
>>> s = symbol('s', '{x: int, y: int}')
>>> _lean(s, ('x',))
(s['x'], ('x',))
>>> _lean(s, ())
(s, ())
>>> s = symbol('s', 'int')
>>> _lean(s, ())
(s, ())
>>> _lean(s, ('s',))
(s, ())
"""
if not fields or set(expr.fields).issubset(fields):
return expr, fields
else:
return expr[sorted(fields)], fields
@dispatch(Projection)
def _lean(expr, fields=None):
child, _ = _lean(expr._child, fields=fields)
return child[sorted(fields, key=expr.fields.index)], fields
@dispatch(Field)
def _lean(expr, fields=None):
fields = set(fields)
fields.add(expr._name)
child, _ = _lean(expr._child, fields=fields)
return child[expr._name], fields
@dispatch(Arithmetic)
def _lean(expr, fields=None):
lhs, right_fields = _lean(expr.lhs, fields=())
rhs, left_fields = _lean(expr.rhs, fields=())
new_fields = set(fields) | set(left_fields) | set(right_fields)
return type(expr)(lhs, rhs), new_fields
@dispatch(object)
def _lean(expr, fields=None):
return expr, fields
@dispatch(Label)
def _lean(expr, fields=None):
child, new_fields = _lean(expr._child, fields=())
return child.label(expr._name), new_fields
@dispatch(ReLabel)
def _lean(expr, fields=None):
labels = dict(expr.labels)
reverse_labels = dict((v, k) for k, v in expr.labels)
child_fields = set(reverse_labels.get(f, f) for f in fields)
child, new_fields = _lean(expr._child, fields=child_fields)
return child.relabel(**dict((k, v) for k, v in expr.labels if k in
child.fields)), new_fields
@dispatch(ElemWise)
def _lean(expr, fields=None):
if isscalar(expr._child.dshape.measure):
child, _ = _lean(expr._child, fields=set(expr._child.fields))
return expr._subs({expr._child: child}), set(expr._child.fields)
else:
raise MDNotImplementedError()
@dispatch(Broadcast)
def _lean(expr, fields=None):
fields = set(fields) | set(expr.active_columns())
child, _ = _lean(expr._child, fields=fields)
return expr._subs({expr._child: child}), fields
@dispatch(Selection)
def _lean(expr, fields=None):
predicate, pred_fields = _lean(expr.predicate, fields=fields)
fields = set(fields) | set(pred_fields)
child, _ = _lean(expr._child, fields=fields)
return expr._subs({expr._child: child}), fields
@dispatch(Like)
def _lean(expr, fields=None):
child, new_fields = _lean(expr._child,
fields=set(fields) | set(expr.patterns.keys()))
return expr._subs({expr._child: child}), new_fields
@dispatch(Sort)
def _lean(expr, fields=None):
key = expr.key
if not isinstance(key, (list, set, tuple)):
key = [key]
new_fields = set(fields) | set(key)
child, _ = _lean(expr._child, fields=new_fields)
return child.sort(key=expr.key, ascending=expr.ascending), new_fields
@dispatch(Head)
def _lean(expr, fields=None):
child, child_fields = _lean(expr._child, fields=fields)
return child.head(expr.n), child_fields
@dispatch(Reduction)
def _lean(expr, fields=None):
child = expr._child
try:
fields = child.active_columns()
except AttributeError:
fields = child.fields
child, child_fields = _lean(child, fields=set(filter(None, fields)))
return expr._subs({expr._child: child}), child_fields
@dispatch(Summary)
def _lean(expr, fields=None):
save = dict()
new_fields = set()
for name, val in zip(expr.names, expr.values):
if name not in fields:
continue
child, child_fields = _lean(val, fields=set())
save[name] = child
new_fields |= set(child_fields)
return summary(**save), new_fields
@dispatch(By)
def _lean(expr, fields=None):
fields = set(fields)
grouper, grouper_fields = _lean(expr.grouper,
fields=fields.intersection(expr.grouper.fields))
apply, apply_fields = _lean(expr.apply,
fields=fields.intersection(expr.apply.fields))
new_fields = set(apply_fields) | set(grouper_fields)
child = common_subexpression(grouper, apply)
if len(child.fields) > len(new_fields):
child, _ = _lean(child, fields=new_fields)
grouper = grouper._subs({expr._child: child})
apply = apply._subs({expr._child: child})
return By(grouper, apply), new_fields
@dispatch(Distinct)
def _lean(expr, fields=None):
child, new_fields = _lean(expr._child, fields=expr.fields)
return expr._subs({expr._child: child}), new_fields
@dispatch(Merge)
def _lean(expr, fields=None):
new_fields = set()
for f in expr.fields:
if f not in fields:
continue
le, nf = _lean(expr[f], fields=set([f]))
new_fields.update(nf)
child, _ = _lean(expr._child, fields=new_fields)
return expr._subs({expr._child: child})[sorted(fields)], new_fields
@dispatch((Join, Concat))
def _lean(expr, fields=None):
return expr, fields
@dispatch(Expr)
def _lean(expr, fields=None):
""" Lean projection version of expression
Parameters
----------
expr : Expression
An expression to be optimized
fields : Iterable of strings
The fields that will be needed from this expression
Returns
-------
expr : Expression
An expression with Projections inserted to avoid unnecessary fields
fields : Iterable of strings
The fields that this expression requires to execute
"""
raise NotImplementedError()
@dispatch(Selection)
def simple_selections(expr):
"""Cast all ``Selection`` nodes into ``SimpleSelection`` nodes.
This causes the compute core to not treat the predicate as an input.
Parameters
----------
expr : Expr
The expression to traverse.
Returns
-------
siplified : Expr
The expression with ``Selection``s replaces with ``SimpleSelection``s.
"""
return SimpleSelection(
simple_selections(expr._child),
simple_selections(expr.predicate),
)
@dispatch(Expr)
def simple_selections(expr):
return expr._subs({e: simple_selections(e) for e in expr._inputs})
| {
"repo_name": "ChinaQuants/blaze",
"path": "blaze/expr/optimize.py",
"copies": "1",
"size": "6959",
"license": "bsd-3-clause",
"hash": -6373915542598529000,
"line_mean": 25.3598484848,
"line_max": 84,
"alpha_frac": 0.6312688605,
"autogenerated": false,
"ratio": 3.5235443037974683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9654049222357055,
"avg_score": 0.00015278838808250572,
"num_lines": 264
} |
from __future__ import (absolute_import, division, print_function)
from ctypes import byref, create_string_buffer
from logging import getLogger
import ret.dis as dis
from ret.state import (
ArchitectureSimulator, bit_t, int32_t, Function, NamedExpression, State,
Type, UnknownType, void,
)
simlog = getLogger("ret.iad32.simall")
class IA32PrefixMap(object):
__slots__ = ["operand_size_override", "address_size_override",
"segment_override", "lock", "repeat"]
def __init__(self):
super(IA32PrefixMap, self).__init__()
self.operand_size_override = False
self.address_size_override = False
self.segment_override = None
self.lock = False
self.repeat = None
return
class IA32RegisterMap(object):
__slots__ = ['eax', 'ecx', 'edx', 'ebx', 'esp', 'ebp', 'esi', 'edi',
'of', 'df', 'sf', 'zf', 'af', 'pf', 'cf']
unknown32 = NamedExpression("?", int32_t)
unknown_bit = NamedExpression("?", bit_t)
def __init__(self, eax=unknown32, ecx=unknown32, edx=unknown32,
ebx=unknown32, esp=NamedExpression(0, int32_t),
ebp=unknown32, esi=unknown32,
edi=unknown32, of=unknown_bit, df=unknown_bit, sf=unknown_bit,
zf=unknown_bit, af=unknown_bit, pf=unknown_bit,
cf=unknown_bit):
super(IA32RegisterMap, self).__init__()
self.eax = eax
self.ecx = ecx
self.edx = edx
self.ebx = ebx
self.esp = esp
self.ebp = ebp
self.esi = esi
self.edi = edi
self.of = of
self.df = df
self.sf = sf
self.zf = zf
self.af = af
self.pf = pf
self.cf = cf
return
class IA32Function(Function):
def __init__(self, simulator, name, start_address, end_address,
arguments=None, return_type=None, calling_convention="cdecl"):
super(IA32Function, self).__init__(
simulator, name, start_address, end_address, arguments,
(return_type if return_type is not None
else UnknownType(bit_size=32)), calling_convention)
self.instruction_states = {} # addr -> {prev_addr -> state}
self.instructions = {} # addr -> instr
self.evaluate_arguments()
self.simulate_all()
return
def evaluate_arguments(self):
"""\
Examine the arguments and calculate the start state upon entry into the
function.
"""
stack = [NamedExpression("returnptr", self.simulator.void_ptr)]
for argid, arg in enumerate(self.arguments):
if isinstance(arg, NamedExpression):
stack.append(arg)
elif isinstance(arg, Type):
stack.append(NamedExpression("arg%0d" % argid, arg))
elif isinstance(arg, Expression):
stack.append(NamedExpression(arg.name, arg.type))
else:
raise TypeError(
"argument %d (%r) is not an Expression or type "
"object" % (argid, arg))
register_map = IA32RegisterMap(
esp=NamedExpression(
"esp", self.simulator.get_pointer_type(int32_t)))
# Do we already have an existing state?
if self.entry_state is not None:
# Yes; manipulate it directly.
self.entry_state.stack = stack
self.entry_state.register_map = register_map
else:
# No; just create a new one.
self.entry_state = State(
self.start_address, previous_state=None, stack=stack,
register_map=register_map)
self._insert_state(self.entry_state)
return
def simulate_all(self):
to_process = set([self.entry_state])
while len(to_process) > 0:
state = to_process.pop()
# If we've disassembled this before, go ahead and reuse the
# instruction.
insn = self.instructions.get(state.address)
if insn is None:
insn = self.simulator.simulate(state.address)
self.instructions[state.address] = insn
state.instruction = insn
simlog.debug("Simulating 0x%x %s", state.address, str(insn))
next_addresses = []
if insn.type == dis.insn_jmp:
# Unconditional jump.
next_addresses.append(insn.operands[0].value)
elif insn.type in (dis.insn_jcc, dis.insn_call, dis.insn_callcc):
# Conditional jump or subroutine call. Append the jump target
# and the next instruction
next_addresses.append(insn.operands[0].value)
next_addresses.append(insn.addr + insn.size)
elif insn.type == dis.insn_return:
# Return; we don't know where to return to, so don't target
# anything.
pass
else:
# Normal instruction
next_addresses.append(insn.addr + insn.size)
# FIXME: Simulate stack effects
# FIXME: Simulate register effects
# FIXME: Simulate memory effects
for next_address in next_addresses:
if not (self.start_address <= next_address < self.end_address):
# Not within function bounds; skip it.
continue
# Do we have a state for this?
next_state = self._get_state(next_address, state.address)
if next_state is not None:
# Yep; no need to process it.
continue
# Create a new state and push it onto the queue
next_state = State(
next_address, previous_state=state, stack=state.stack,
register_map=state.register_map)
to_process.add(next_state)
self._insert_state(next_state)
return
def _insert_state(self, state):
"""\
iafn._insert_state(state)
Insert the given state into the instruction_states dict.
"""
addr_states = self.instruction_states.get(state.address)
if addr_states is None:
addr_states = {}
self.instruction_states[state.address] = addr_states
addr_states[state.prev_address] = state
return
def _get_state(self, address, prev_address):
"""\
iafn._get_state(address, prev_address) -> State/None
Return the state defined by the address, prev_address key, if any.
"""
addr_states = self.instruction_states.get(address)
if addr_states is None:
return None
return addr_states.get(prev_address)
class IA32Simulator(ArchitectureSimulator):
def __init__(self, name="IA32", bits=""):
super(IA32Simulator, self).__init__(name=name, pointer_size_bits=32)
self.bits = bits
return
def _get_bits(self):
return self._bits
def _set_bits(self, bits):
if isinstance(bits, basestring):
assert len(bits) > 0
self._bits = create_string_buffer(len(bits))
for i in xrange(len(bits)):
self._bits[i] = bits[i]
elif isinstance(bits, Array):
self._bits = bits
else:
raise TypeError("bits must be a string or ctypes String buffer")
bits = property(_get_bits, _set_bits)
def create_function(self, name, start_address, end_address, arguments=None,
return_type=None, calling_convention="cdecl"):
return IA32Function(self, name, start_address, end_address, arguments,
return_type, calling_convention)
@staticmethod
def error_report(code, data, arg):
simlog.error("disassembly error: code=%d, data=0x%x", code, data)
return
def simulate(self, address):
"""\
sim.simulate(address) -> insn
Simulate the instruction at the specified address, returning a dis.x86_insn_t
structure.
"""
insn = dis.x86_insn_t()
cb = dis.DISASM_REPORTER(IA32Simulator.error_report)
dis.libdisasm.x86_init(0, cb, None)
result = dis.libdisasm.x86_disasm(self.bits, len(self.bits),
0, address, byref(insn))
if result == 0:
raise ValueError("Unable to disassemble address 0x%x" % address)
return insn
# Local variables:
# mode: Python
# tab-width: 8
# indent-tabs-mode: nil
# End:
# vi: set expandtab tabstop=8
| {
"repo_name": "dacut/ret",
"path": "ret/ia32.py",
"copies": "1",
"size": "8610",
"license": "bsd-2-clause",
"hash": -4776463256657530000,
"line_mean": 34.5785123967,
"line_max": 79,
"alpha_frac": 0.5679442509,
"autogenerated": false,
"ratio": 3.986111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012764696180153207,
"num_lines": 242
} |
from __future__ import absolute_import, division, print_function
from datetime import date, datetime
from ..expr.core import *
from ..expr.table import *
from ..expr.scalar import *
from ..dispatch import dispatch
__all__ = ['compute']
base = (int, float, str, bool, date, datetime)
@dispatch(base, object)
def compute(a, b):
return a
@dispatch(Expr, dict)
def compute(t, d):
if t in d:
return d[t]
# Pare down d to only nodes in table
nodes = set(t.traverse())
d = dict((k, v) for k, v in d.items()
if k in nodes)
# Common case: One relevant value in dict
# Switch to standard dispatching scheme
if len(d) == 1:
return compute(t, list(d.values())[0])
if hasattr(t, 'parent'):
parent = compute(t.parent, d)
t2 = t.subs({t.parent: TableSymbol('', t.parent.schema)})
return compute(t2, parent)
raise NotImplementedError("No method found to compute on multiple Tables")
@dispatch(Join, dict)
def compute(t, d):
lhs = compute(t.lhs, d)
rhs = compute(t.rhs, d)
t2 = t.subs({t.lhs: TableSymbol('_lhs', t.lhs.schema),
t.rhs: TableSymbol('_rhs', t.rhs.schema)})
return compute(t2, lhs, rhs)
@dispatch(Join, object)
def compute(t, o):
return compute(t, o, o)
def columnwise_funcstr(t, variadic=True, full=False):
"""
>>> t = TableSymbol('t', '{x: real, y: real, z: real}')
>>> cw = t['x'] + t['z']
>>> columnwise_funcstr(cw)
'lambda x, z: x + z'
>>> columnwise_funcstr(cw, variadic=False)
'lambda (x, z): x + z'
>>> columnwise_funcstr(cw, variadic=False, full=True)
'lambda (x, y, z): x + z'
"""
if full:
columns = t.parent.columns
else:
columns = t.active_columns()
if variadic:
prefix = 'lambda %s: '
else:
prefix = 'lambda (%s): '
return prefix % ', '.join(map(str, columns)) + eval_str(t.expr)
| {
"repo_name": "aterrel/blaze",
"path": "blaze/compute/core.py",
"copies": "1",
"size": "1953",
"license": "bsd-3-clause",
"hash": 7564154597021644000,
"line_mean": 23.4125,
"line_max": 78,
"alpha_frac": 0.5801331285,
"autogenerated": false,
"ratio": 3.217462932454695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9294751024635082,
"avg_score": 0.0005690072639225181,
"num_lines": 80
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from numpy import nan, isnan
from pandas import DataFrame
from sympy import Eq, Expr, Max, Min, Piecewise, Symbol, symbols
from sympy.printing.theanocode import theano_function
from HelpyFuncs.SymPy import sympy_eval_by_theano
def terminal_value(
terminal_cash_flow=0.,
long_term_discount_rate=.01,
long_term_growth_rate=0.):
return (1 + long_term_growth_rate) * terminal_cash_flow / (long_term_discount_rate - long_term_growth_rate)
def present_value(amount=0., discount_rate=0., nb_periods=0.):
return amount / ((1 + discount_rate) ** nb_periods)
def net_present_value(
cash_flows=(0,),
discount_rate=0.):
return reduce(
lambda x, y: x + y,
[cash_flows[i] / ((1 + discount_rate) ** i)
for i in range(len(cash_flows))])
class ValModel: # base class for UnlevValModel & LevValModel below
def __init__(self, venture_name='', year_0=0, nb_pro_forma_years_excl_0=1, compile=True):
# set Venture Name and corresponding variable prefixes
self.venture_name = venture_name
if venture_name:
self.venture_name_prefix = '%s___' % venture_name
else:
self.venture_name_prefix = ''
# set pro forma period timeline
self.year_0 = year_0
self.nb_pro_forma_years_excl_0 = nb_pro_forma_years_excl_0
self.nb_pro_forma_years_incl_0 = nb_pro_forma_years_excl_0 + 1
self.final_pro_forma_year = year_0 + nb_pro_forma_years_excl_0
self.index_range = range(self.nb_pro_forma_years_incl_0)
self.index_range_from_1 = range(1, self.nb_pro_forma_years_incl_0)
# list all Input & Output attributes & symbols, and set model structure
self.input_attrs = []
self.output_attrs = []
self.set_model_structure()
# gather all Input symbols and set their default values
self.input_symbols = []
self.input_defaults = {}
for input_attr in self.input_attrs:
a = getattr(self, '%s___input' % input_attr)
if isinstance(a, (list, tuple)):
if (not isinstance(a[0], Symbol)) and isnan(a[0]):
for i in self.index_range_from_1:
self.input_symbols.append(a[i])
self.input_defaults[a[i].name] = -1.
else:
for i in self.index_range:
self.input_symbols.append(a[i])
self.input_defaults[a[i].name] = 0.
else:
self.input_symbols.append(a)
self.input_defaults[a.name] = 0.
# compile Outputs if so required
self.compile = compile
if compile:
def format_time_delta(time_delta):
time_delta_str = str(time_delta)
return time_delta_str[:time_delta_str.index('.')]
print('Compiling:')
tic_0 = datetime.now()
for output in self.output_attrs:
print(' %s... ' % output, end='')
a = getattr(self, output)
tic = datetime.now()
if isinstance(a, (list, tuple)):
if (not isinstance(a[0], Expr)) and isnan(a[0]):
setattr(
self, output,
[nan] + [theano_function(self.input_symbols, [a[i]]) for i in self.index_range_from_1])
else:
setattr(
self, output,
[theano_function(self.input_symbols, [a[i]]) for i in self.index_range])
else:
setattr(self, output, theano_function(self.input_symbols, [a]))
toc = datetime.now()
print('done after %s (%s so far)' % (format_time_delta(toc - tic), format_time_delta(toc - tic_0)))
print('done after %s' % format_time_delta(toc - tic_0))
def set_model_structure(self):
pass
def __call__(self, outputs=None, append_to_results_data_frame=None, **kwargs):
if not outputs:
outputs = self.output_attrs
inputs = self.input_defaults.copy()
for k, v in kwargs.items():
attr = '%s___input' % k
if hasattr(self, attr):
a = getattr(self, attr)
if isinstance(a, (list, tuple)):
for i in range(len(v)):
if isinstance(a[i], Symbol) and not isnan(v[i]):
inputs[a[i].name] = v[i]
else:
inputs[a.name] = v
def calc(x):
if isinstance(x, (list, tuple)):
return [calc(i) for i in x]
elif callable(x):
return float(x(**inputs))
elif (not isinstance(x, Expr)) and isnan(x):
return nan
else:
return float(sympy_eval_by_theano(sympy_expr=x, symbols=self.input_symbols, **inputs))
results = {}
if isinstance(append_to_results_data_frame, DataFrame):
df = append_to_results_data_frame
else:
df = DataFrame(index=['Year 0'] + range(self.year_0 + 1, self.final_pro_forma_year + 1))
print('Calculating:')
for output in outputs:
if output in self.output_attrs:
print(' %s' % output)
result = calc(getattr(self, output))
results[output] = result
if isinstance(result, (list, tuple)):
df[output] = result
else:
df[output] = ''
if output in ('StabilizedDiscountRate', 'TV', 'TV_RevenueMultiple', 'TV_EBITMultiple', 'ITS_TV'):
df.loc[self.final_pro_forma_year, output] = result
else:
df.loc['Year 0', output] = result
else:
df[output] = ''
if output in kwargs:
v = kwargs[output]
if isinstance(v, (list, tuple)):
df.ix[range(len(v)), output] = v
elif output in \
('StabilizedBeta', 'StabilizedDiscountRate', 'LongTermGrowthRate', 'TV_RevenueMultiple'):
df.loc[self.final_pro_forma_year, output] = v
else:
df.loc['Year 0', output] = v
print('done!')
results['data_frame'] = df
return results
class UnlevValModel(ValModel):
def __init__(self, venture_name='', year_0=0, nb_pro_forma_years_excl_0=1, val_all_years=False, compile=True):
self.val_all_years = val_all_years
ValModel.__init__(
self,
venture_name=venture_name,
year_0=year_0,
nb_pro_forma_years_excl_0=nb_pro_forma_years_excl_0,
compile=compile)
def set_model_structure(self):
# model Revenue
self.Revenue___input = \
symbols(
self.venture_name_prefix +
'Revenue___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.RevenueGrowth___input = \
(nan,) + \
symbols(
self.venture_name_prefix +
'RevenueGrowth___%d:%d' % (self.year_0 + 1, self.final_pro_forma_year + 1))
self.Revenue = [self.Revenue___input[0]]
for i in self.index_range_from_1:
self.Revenue.append(
Piecewise(
((1. + self.RevenueGrowth___input[i]) * self.Revenue[-1],
Eq(self.Revenue___input[i], 0.)),
(self.Revenue___input[i],
True)))
self.RevenueChange = \
[nan] + \
[self.Revenue[i] - self.Revenue[i - 1]
for i in self.index_range_from_1]
self.RevenueGrowth = \
[nan] + \
[Piecewise(
(self.RevenueChange[i] / self.Revenue[i - 1],
Eq((self.Revenue[i - 1] > 0.) * (self.Revenue[i] > 0.), 1.)),
((self.Revenue[i - 1] > 0.) * (self.Revenue[i] > 0.),
True))
for i in self.index_range_from_1]
# model OpEx
self.OpEx___input = \
symbols(
self.venture_name_prefix +
'OpEx___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.OpEx = self.OpEx___input
self.OpEx_over_Revenue = \
[self.OpEx[i] / self.Revenue[i]
for i in self.index_range]
self.OpExGrowth = \
[nan] + \
[Piecewise(
(self.OpEx[i] / self.OpEx[i - 1] - 1.,
Eq((self.OpEx[i - 1] > 0.) * (self.OpEx[i] > 0.), 1.)),
((self.OpEx[i - 1] > 0.) * (self.OpEx[i] > 0.),
True))
for i in self.index_range_from_1]
# model EBIT
self.EBIT___input = \
symbols(
self.venture_name_prefix +
'EBIT___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.EBITMargin___input = \
symbols(
self.venture_name_prefix +
'EBITMargin___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.EBIT = \
[Piecewise(
(Piecewise(
(self.EBITMargin___input[i] * self.Revenue[i],
Eq(self.OpEx[i], 0.)),
(self.Revenue[i] - self.OpEx[i],
True)),
Eq(self.EBIT___input[i], 0.)),
(self.EBIT___input[i],
True))
for i in self.index_range]
self.EBITMargin = \
[self.EBIT[i] / self.Revenue[i]
for i in self.index_range]
self.EBITGrowth = \
[nan] + \
[Piecewise(
(self.EBIT[i] / self.EBIT[i - 1] - 1.,
Eq((self.EBIT[i - 1] > 0.) * (self.EBIT[i] > 0.), 1.)),
((self.EBIT[i - 1] > 0.) * (self.EBIT[i] > 0.),
True))
for i in self.index_range_from_1]
# model EBIAT
self.CorpTaxRate___input = \
Symbol(
self.venture_name_prefix +
'CorpTaxRate')
self.OpeningTaxLoss___input = \
Symbol(
self.venture_name_prefix +
'OpeningTaxLoss')
self.TaxLoss = []
self.TaxableEBIT = []
self.EBIAT = []
for i in self.index_range:
if i:
self.TaxLoss += [Min(self.TaxableEBIT[-1], 0.)]
else:
self.TaxLoss += [self.OpeningTaxLoss___input]
self.TaxableEBIT += [self.TaxLoss[i] + self.EBIT[i]]
self.EBIAT += [self.EBIT[i] - self.CorpTaxRate___input * Max(self.TaxableEBIT[i], 0.)]
# model CLOSING Fixed Assets NET of cumulative Depreciation
self.FA___input = \
symbols(
self.venture_name_prefix +
'FA___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.FA_over_Revenue___input = \
symbols(
self.venture_name_prefix +
'FA_over_Revenue___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.FAGrowth___input = \
(nan,) + \
symbols(
self.venture_name_prefix +
'FAGrowth___%d:%d' % (self.year_0 + 1, self.final_pro_forma_year + 1))
self.FA = [self.FA___input[0]]
for i in self.index_range_from_1:
self.FA.append(
Piecewise(
(Piecewise(
((1. + self.FAGrowth___input[i]) * self.FA[-1],
Eq(self.FA_over_Revenue___input[i], 0.)),
(self.FA_over_Revenue___input[i] * self.Revenue[i],
True)),
Eq(self.FA___input[i], 0.)),
(self.FA___input[i],
True)))
self.FA_over_Revenue = \
[self.FA[i] / self.Revenue[i]
for i in self.index_range]
self.FAGrowth = \
[nan] + \
[Piecewise(
(self.FA[i] / self.FA[i - 1] - 1.,
Eq((self.FA[i - 1] > 0.) * (self.FA[i] > 0.), 1.)),
((self.FA[i - 1] > 0.) * (self.FA[i] > 0.),
True))
for i in self.index_range_from_1]
# model Depreciation
self.Depreciation___input = \
symbols(
self.venture_name_prefix +
'Depreciation___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.Depreciation_over_prevFA___input = \
Symbol(
self.venture_name_prefix +
'Depreciation_over_prevFA')
self.Depreciation = \
[self.Depreciation___input[0]] + \
[Piecewise(
(self.Depreciation_over_prevFA___input * self.FA[i - 1],
Eq(self.Depreciation___input[i], 0.)),
(self.Depreciation___input[i],
True))
for i in self.index_range_from_1]
self.Depreciation_over_prevFA = \
[nan] + \
[self.Depreciation[i] / self.FA[i - 1]
for i in self.index_range_from_1]
# model Capital Expenditure
self.CapEx___input = \
symbols(
self.venture_name_prefix +
'CapEx___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.CapEx_over_Revenue___input = \
symbols(
self.venture_name_prefix +
'CapEx_over_Revenue___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.CapEx_over_RevenueChange___input = \
Symbol(
self.venture_name_prefix +
'CapEx_over_RevenueChange')
self.CapExGrowth___input = \
(nan,) + \
symbols(
self.venture_name_prefix +
'CapExGrowth___%d:%d' % (self.year_0 + 1, self.final_pro_forma_year + 1))
self.CapEx = [self.CapEx___input[0]]
for i in self.index_range_from_1:
self.CapEx.append(
Piecewise(
(Piecewise(
(Piecewise(
(Piecewise(
(self.FA[i] + self.Depreciation[i] - self.FA[i - 1],
Eq(self.CapExGrowth___input[i], -1.)),
((1. + self.CapExGrowth___input[i]) * self.CapEx[-1],
True)),
Eq(self.CapEx_over_RevenueChange___input, 0.)),
(self.CapEx_over_RevenueChange___input * self.RevenueChange[i],
True)),
Eq(self.CapEx_over_Revenue___input[i], 0.)),
(self.CapEx_over_Revenue___input[i] * self.Revenue[i],
True)),
Eq(self.CapEx___input[i], 0.)),
(self.CapEx___input[i],
True)))
self.CapEx_over_Revenue = \
[self.CapEx[i] / self.Revenue[i]
for i in self.index_range]
self.CapEx_over_RevenueChange = \
[nan] + \
[self.CapEx[i] / self.RevenueChange[i]
for i in self.index_range_from_1]
self.CapExGrowth = \
[nan] + \
[Piecewise(
(self.CapEx[i] / self.CapEx[i - 1] - 1.,
Eq((self.CapEx[i - 1] > 0.) * (self.CapEx[i] > 0.), 1.)),
((self.CapEx[i - 1] > 0.) * (self.CapEx[i] > 0.),
True))
for i in self.index_range_from_1]
# model Net Working Capital and its change
self.NWC___input = \
symbols(
self.venture_name_prefix +
'NWC___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.NWC_over_Revenue___input = \
symbols(
self.venture_name_prefix +
'NWC_over_Revenue___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.NWCGrowth___input = \
(nan,) + \
symbols(
self.venture_name_prefix +
'NWCGrowth___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.NWC = [self.NWC___input[0]]
for i in self.index_range_from_1:
self.NWC.append(
Piecewise(
(Piecewise(
((1. + self.NWCGrowth___input[i]) * self.NWC[-1],
Eq(self.NWC_over_Revenue___input[i], 0.)),
(self.NWC_over_Revenue___input[i] * self.Revenue[i], True)),
Eq(self.NWC___input[i], 0.)),
(self.NWC___input[i],
True)))
self.NWC_over_Revenue = \
[self.NWC[i] / self.Revenue[i]
for i in self.index_range]
self.NWCGrowth = \
[nan] + \
[Piecewise(
(self.NWC[i] / self.NWC[i - 1] - 1.,
Eq((self.NWC[i - 1] > 0.) * (self.NWC[i] > 0.), 1.)),
((self.NWC[i - 1] > 0.) * (self.NWC[i] > 0.),
True))
for i in self.index_range_from_1]
self.NWCChange___input = \
symbols(
self.venture_name_prefix +
'NWCChange___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.NWCChange_over_Revenue___input = \
symbols(
self.venture_name_prefix +
'NWCChange_over_Revenue___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.NWCChange_over_RevenueChange___input = \
Symbol(
self.venture_name_prefix +
'NWCChange_over_RevenueChange')
self.NWCChange = [self.NWCChange___input[0]]
for i in self.index_range_from_1:
self.NWCChange.append(
Piecewise(
(Piecewise(
(Piecewise(
(self.NWC[i] - self.NWC[i - 1],
Eq(self.NWCChange_over_RevenueChange___input, 0.)),
(self.NWCChange_over_RevenueChange___input * self.RevenueChange[i],
True)),
Eq(self.NWCChange_over_Revenue___input[i], 0.)),
(self.NWCChange_over_Revenue___input[i] * self.Revenue[i],
True)),
Eq(self.NWCChange___input[i], 0.)),
(self.NWCChange___input[i],
True)))
self.NWCChange_over_Revenue = \
[self.NWCChange[i] / self.Revenue[i]
for i in self.index_range]
self.NWCChange_over_RevenueChange = \
[nan] + \
[self.NWCChange[i] / self.RevenueChange[i]
for i in self.index_range_from_1]
# model Free Cash Flows before Terminal Value
self.FCF = \
[self.EBIAT[i] + self.Depreciation[i] - self.CapEx[i] - self.NWCChange[i]
for i in self.index_range]
# model Discount Rates
self.RiskFreeRate___input = \
Symbol(
self.venture_name_prefix +
'RiskFreeRate')
self.PublicMarketReturn___input = \
Symbol(
self.venture_name_prefix +
'PublicMarketReturn')
self.PublicMarketPremium___input = \
Symbol(
self.venture_name_prefix +
'PublicMarketPremium')
self.PublicMarketPremium___expr = \
Piecewise(
(self.PublicMarketReturn___input - self.RiskFreeRate___input,
Eq(self.PublicMarketPremium___input, 0.)),
(self.PublicMarketPremium___input,
True))
self.PublicMarketPremium = self.PublicMarketPremium___expr
self.InvestmentManagerFeePremium___input = \
Symbol(
self.venture_name_prefix +
'InvestmentManagerFeePremium')
self.ProFormaPeriodBeta___input = \
Symbol(
self.venture_name_prefix +
'ProFormaPeriodBeta')
self.ProFormaPeriodAssetDiscountRate___input = \
Symbol(
self.venture_name_prefix +
'ProFormaPeriodAssetDiscountRate')
self.ProFormaPeriodAssetDiscountRate___expr = \
Piecewise(
(self.RiskFreeRate___input + self.ProFormaPeriodBeta___input * self.PublicMarketPremium,
Eq(self.ProFormaPeriodAssetDiscountRate___input, 0.)),
(self.ProFormaPeriodAssetDiscountRate___input,
True))
self.ProFormaPeriodAssetDiscountRate = self.ProFormaPeriodAssetDiscountRate___expr
self.ProFormaPeriodDiscountRate___input = \
Symbol(
self.venture_name_prefix +
'ProFormaPeriodDiscountRate')
self.ProFormaPeriodDiscountRate = \
Piecewise(
(self.ProFormaPeriodAssetDiscountRate + self.InvestmentManagerFeePremium___input,
Eq(self.ProFormaPeriodDiscountRate___input, 0.)),
(self.ProFormaPeriodDiscountRate___input,
True))
self.StabilizedBeta___input = \
Symbol(
self.venture_name_prefix +
'StabilizedBeta')
self.StabilizedDiscountRate___input = \
Symbol(
self.venture_name_prefix +
'StabilizedDiscountRate')
self.StabilizedDiscountRate___expr = \
Piecewise(
(Piecewise(
(self.ProFormaPeriodDiscountRate,
Eq(self.StabilizedBeta___input, 0.)),
(self.RiskFreeRate___input + self.StabilizedBeta___input * self.PublicMarketPremium,
True)),
Eq(self.StabilizedDiscountRate___input, 0.)),
(self.StabilizedDiscountRate___input,
True))
self.StabilizedDiscountRate = self.StabilizedDiscountRate___expr
# model Long-Term Growth Rate
self.LongTermGrowthRate___input = \
Symbol(
self.venture_name_prefix +
'LongTermGrowthRate')
# model Terminal Value
self.TV_RevenueMultiple___input = \
Symbol(
self.venture_name_prefix +
'TV_RevenueMultiple')
self.TV = \
Piecewise(
(terminal_value(
terminal_cash_flow=self.FCF[-1],
long_term_discount_rate=self.StabilizedDiscountRate,
long_term_growth_rate=self.LongTermGrowthRate___input),
Eq(self.TV_RevenueMultiple___input, 0.)),
(self.TV_RevenueMultiple___input * self.Revenue[-1],
True))
self.TV_RevenueMultiple = \
self.TV / self.Revenue[-1]
self.TV_EBITMultiple = \
Piecewise(
(self.TV / self.EBIT[-1], self.EBIT[-1] > 0),
(0., True))
# model Unlevered Valuation
FCF = [0.] + self.FCF[1:]
if self.val_all_years:
self.Val_of_FCF = \
[net_present_value(
cash_flows=FCF[i:],
discount_rate=self.ProFormaPeriodDiscountRate)
for i in self.index_range]
self.Val_of_TV = \
[present_value(
amount=self.TV,
discount_rate=self.ProFormaPeriodDiscountRate,
nb_periods=self.nb_pro_forma_years_excl_0 - i)
for i in self.index_range]
self.Unlev_Val = \
[self.Val_of_FCF[i] + self.Val_of_TV[i]
for i in self.index_range]
else:
self.Val_of_FCF = \
net_present_value(
cash_flows=FCF,
discount_rate=self.ProFormaPeriodDiscountRate)
self.Val_of_TV = \
present_value(
amount=self.TV,
discount_rate=self.StabilizedDiscountRate,
nb_periods=self.nb_pro_forma_years_excl_0)
self.Unlev_Val = self.Val_of_FCF + self.Val_of_TV
self.input_attrs = \
['Revenue', 'RevenueGrowth',
'OpEx',
'EBIT', 'EBITMargin',
'CorpTaxRate', 'OpeningTaxLoss',
'FA', 'FA_over_Revenue', 'FAGrowth',
'Depreciation', 'Depreciation_over_prevFA',
'CapEx', 'CapEx_over_Revenue', 'CapEx_over_RevenueChange', 'CapExGrowth',
'NWC', 'NWC_over_Revenue', 'NWCGrowth',
'NWCChange',
'NWCChange_over_Revenue', 'NWCChange_over_RevenueChange',
'RiskFreeRate', 'PublicMarketReturn', 'PublicMarketPremium', 'InvestmentManagerFeePremium',
'ProFormaPeriodBeta', 'ProFormaPeriodAssetDiscountRate', 'ProFormaPeriodDiscountRate',
'StabilizedBeta', 'StabilizedDiscountRate',
'LongTermGrowthRate',
'TV_RevenueMultiple']
self.output_attrs = \
['PublicMarketPremium',
'Revenue', 'RevenueChange', 'RevenueGrowth',
'OpEx', 'OpEx_over_Revenue', 'OpExGrowth',
'EBIT', 'EBITMargin', 'EBITGrowth',
'TaxLoss', 'TaxableEBIT', 'EBIAT',
'FA', 'FA_over_Revenue', 'FAGrowth',
'Depreciation', 'Depreciation_over_prevFA',
'CapEx', 'CapEx_over_Revenue', 'CapEx_over_RevenueChange', 'CapExGrowth',
'NWC', 'NWC_over_Revenue', 'NWCGrowth',
'NWCChange', 'NWCChange_over_Revenue',
'NWCChange_over_RevenueChange',
'FCF',
'StabilizedDiscountRate', 'TV', 'TV_RevenueMultiple', 'TV_EBITMultiple',
'ProFormaPeriodAssetDiscountRate', 'ProFormaPeriodDiscountRate', 'Unlev_Val'
# skipping Val_of_FCF & Val_of_TV to save compilation time
]
class LevValModel(ValModel):
def __init__(self, unlev_val_model):
self.unlev_val_model = unlev_val_model
ValModel.__init__(
self,
venture_name=unlev_val_model.venture_name,
year_0=unlev_val_model.year_0,
nb_pro_forma_years_excl_0=unlev_val_model.nb_pro_forma_years_excl_0,
compile=unlev_val_model.compile)
def set_model_structure(self):
# get certain Input symbols from the Unlevered Valuation Model
self.CorpTaxRate___input = self.unlev_val_model.CorpTaxRate___input
self.RiskFreeRate___input = self.unlev_val_model.RiskFreeRate___input
self.PublicMarketReturn___input = self.unlev_val_model.PublicMarketReturn___input
self.PublicMarketPremium___input = self.unlev_val_model.PublicMarketPremium___input
self.InvestmentManagerFeePremium___input = self.unlev_val_model.InvestmentManagerFeePremium___input
self.ProFormaPeriodBeta___input = self.unlev_val_model.ProFormaPeriodBeta___input
self.ProFormaPeriodAssetDiscountRate___input = self.unlev_val_model.ProFormaPeriodAssetDiscountRate___input
self.ProFormaPeriodDiscountRate___input = self.unlev_val_model.ProFormaPeriodDiscountRate___input
self.StabilizedBeta___input = self.unlev_val_model.StabilizedBeta___input
self.StabilizedDiscountRate___input = self.unlev_val_model.StabilizedDiscountRate___input
# model Unlevered Valuation
if self.unlev_val_model.val_all_years:
self.Unlev_Val___input = \
symbols(
'Unlev_Val___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
else:
self.Unlev_Val___input = \
Symbol('Unlev_Val')
self.Unlev_Val = self.Unlev_Val___input
# model Debt-to-Equity ("D / E") Ratios
self.DERatio___input = \
Symbol(
self.venture_name_prefix +
'DERatio')
self.ProFormaPeriodDERatio___input = \
Symbol(
self.venture_name_prefix +
'ProFormaPeriodDERatio')
pro_forma_period_d_e_ratio = \
Piecewise(
(self.DERatio___input,
Eq(self.ProFormaPeriodDERatio___input, 0.)),
(self.ProFormaPeriodDERatio___input,
True))
self.DERatios = \
self.nb_pro_forma_years_excl_0 * [pro_forma_period_d_e_ratio] + [self.DERatio___input]
# model Debt
self.Debt___input = \
symbols(
self.venture_name_prefix +
'Debt___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
if self.unlev_val_model.val_all_years:
self.Debt = \
[Piecewise(
((1. - 1. / (1. + self.DERatios[i])) * self.Unlev_Val[i],
Eq(self.Debt___input[i], 0.)),
(self.Debt___input[i],
True))
for i in self.index_range]
else:
self.Debt = self.Debt___input
# model Interest Rates
self.InterestRate___input = \
Symbol(
self.venture_name_prefix +
'InterestRate')
self.ProFormaPeriodInterestRate___input = \
Symbol(
self.venture_name_prefix +
'ProFormaPeriodInterestRate')
pro_forma_period_interest_rate = \
Piecewise(
(self.InterestRate___input,
Eq(self.ProFormaPeriodInterestRate___input, 0.)),
(self.ProFormaPeriodInterestRate___input,
True))
self.InterestRates = \
self.nb_pro_forma_years_excl_0 * [pro_forma_period_interest_rate] + [self.InterestRate___input]
self.InterestRates___input = \
symbols(
self.venture_name_prefix +
'InterestRates___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.InterestRates = \
[Piecewise(
(self.InterestRates[i],
Eq(self.InterestRates___input[i], 0.)),
(self.InterestRates___input[i],
True))
for i in self.index_range]
# model Interest Expense & InterestTaxShield
self.InterestExpense___input = \
symbols(
self.venture_name_prefix +
'InterestExpense___%d:%d' % (self.year_0, self.final_pro_forma_year + 1))
self.InterestExpense = \
[Piecewise(
(self.InterestRates[i] * self.Debt[i],
Eq(self.InterestExpense___input[i], 0.)),
(self.InterestExpense___input[i],
True))
for i in self.index_range]
self.ITS = \
map(lambda x: self.CorpTaxRate___input * x,
self.InterestExpense)
# model Interest Tax Shield (ITS) Discount Rate
self.DebtBeta___input = \
Symbol(
self.venture_name_prefix +
'DebtBeta')
self.DebtDiscountRate___input = \
Symbol(
self.venture_name_prefix +
'DebtDiscountRate')
self.DebtDiscountRate = \
Piecewise(
(Piecewise(
(self.DebtBeta___input + self.DebtDiscountRate___input,
# use this expression because '0.' throws SymPy / Theano bug
Eq(self.DebtBeta___input, 0.)),
(self.RiskFreeRate___input +
self.DebtBeta___input * self.unlev_val_model.PublicMarketPremium___expr,
True)),
Eq(self.DebtDiscountRate___input, 0.)),
(self.DebtDiscountRate___input,
True))
self.ProFormaPeriodITSDiscountRate = \
Piecewise(
(self.unlev_val_model.ProFormaPeriodAssetDiscountRate___expr,
Eq(self.DebtDiscountRate, 0.)),
(self.DebtDiscountRate,
True))
self.StabilizedITSDiscountRate = \
Piecewise(
(self.unlev_val_model.StabilizedDiscountRate___expr,
Eq(self.DebtDiscountRate, 0.)),
(self.DebtDiscountRate,
True))
# model Terminal Value of Interest Tax Shield
self.ITS_TV = \
terminal_value(
terminal_cash_flow=self.ITS[-1],
long_term_discount_rate=self.StabilizedITSDiscountRate,
long_term_growth_rate=0.)
# model Valuation of Interest Tax Shield, and Levered Valuation
ITS = [0.] + self.ITS[1:]
if self.unlev_val_model.val_all_years:
self.Val_of_ITS = \
[net_present_value(
cash_flows=ITS[i:],
discount_rate=self.ProFormaPeriodITSDiscountRate)
for i in self.index_range]
self.Val_of_ITS_TV = \
[present_value(
amount=self.ITS_TV,
discount_rate=self.StabilizedITSDiscountRate,
nb_periods=self.nb_pro_forma_years_excl_0 - i)
for i in self.index_range]
self.Val_of_ITS_incl_TV = \
[self.Val_of_ITS[i] + self.Val_of_ITS_TV[i]
for i in self.index_range]
self.Lev_Val = \
[self.Unlev_Val[i] + self.Val_of_ITS_incl_TV[i]
for i in self.index_range]
else:
self.Val_of_ITS = \
net_present_value(
cash_flows=ITS,
discount_rate=self.ProFormaPeriodITSDiscountRate)
self.Val_of_ITS_TV = \
present_value(
amount=self.ITS_TV,
discount_rate=self.StabilizedITSDiscountRate,
nb_periods=self.nb_pro_forma_years_excl_0)
self.Val_of_ITS_incl_TV = self.Val_of_ITS + self.Val_of_ITS_TV
self.Lev_Val = self.Unlev_Val + self.Val_of_ITS_incl_TV
self.input_attrs = \
['Unlev_Val',
'CorpTaxRate',
'RiskFreeRate', 'PublicMarketReturn', 'PublicMarketPremium', 'InvestmentManagerFeePremium',
'ProFormaPeriodBeta', 'ProFormaPeriodAssetDiscountRate', 'ProFormaPeriodDiscountRate',
'StabilizedBeta', 'StabilizedDiscountRate',
'DERatio', 'ProFormaPeriodDERatio', 'Debt',
'InterestRate', 'ProFormaPeriodInterestRate', 'InterestRates',
'InterestExpense',
'DebtBeta', 'DebtDiscountRate']
self.output_attrs = \
['ProFormaPeriodITSDiscountRate', 'StabilizedITSDiscountRate',
'Unlev_Val',
'DERatios', 'Debt',
'InterestRates',
'InterestExpense', 'ITS',
'ITS_TV',
'Val_of_ITS_incl_TV', # skipping Val_of_ITS & Val_of_ITS_TV to save compilation time
'Lev_Val']
| {
"repo_name": "MBALearnsToCode/CorpFin",
"path": "CorpFin/Valuation.py",
"copies": "2",
"size": "35647",
"license": "mit",
"hash": 7526294460787289000,
"line_mean": 37.4956803456,
"line_max": 117,
"alpha_frac": 0.4961988386,
"autogenerated": false,
"ratio": 3.6791206522860977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000740970356981737,
"num_lines": 926
} |
from __future__ import absolute_import, division, print_function
from datetime import datetime
import errno
import os
import random
import string
import subprocess
import tempfile
from document_clipper import exceptions
class ShellCommand(object):
"""
Make easier to run external programs.
Based on textract, thxs :)
"""
def run(self, args):
"""
Run command return stdout and stderr as tuple.
IF not successful raises ShellCommandError
"""
# run a subprocess and put the stdout and stderr on the pipe object
try:
pipe = subprocess.Popen(
args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
except OSError as e:
if e.errno == errno.ENOENT:
# File not found.
# This is equivalent to getting exitcode 127 from sh
raise exceptions.ShellCommandError(
' '.join(args), 127, '', '',
)
else:
raise exceptions.ShellCommandError(' '.join(args), e.errno or -1, '', e.strerror or '')
# pipe.wait() ends up hanging on large files. using
# pipe.communicate appears to avoid this issue
stdout, stderr = pipe.communicate()
# if pipe is busted, raise an error (unlike Fabric)
if pipe.returncode != 0:
raise exceptions.ShellCommandError(
' '.join(args), pipe.returncode, stdout, stderr,
)
return stdout, stderr
def temp_dir(self):
"""
Return
:return:
"""
return tempfile.mkdtemp()
class PDFToTextCommand(ShellCommand):
"""
pdftotext Poppler utils
"""
def run(self, file_name, page):
stdout, stderr = super(PDFToTextCommand, self).run(['pdftotext', '-enc', 'UTF-8', '-f', str(page), '-l',
str(page), file_name, '-'])
return stdout
class PDFToImagesCommand(ShellCommand):
"""
pdfimages Poppler utils
"""
def run(self, file_name, page):
tmp_dir = self.temp_dir()
stdout, stderr = super(PDFToImagesCommand, self).run(['pdfimages', '-f', str(page), '-l', str(page), '-j',
file_name, '%s/%s' % (tmp_dir, str(page))])
return tmp_dir
class PDFListImagesCommand(ShellCommand):
"""
pdfimages Poppler utils just check if there are images
"""
def run(self, file_name, page):
stdout, stderr = super(PDFListImagesCommand, self).run(['pdfimages', '-f', str(page), '-l', str(page),
'-list', file_name])
return stdout
def has_images(self, out):
return b'image' in out
class FixPdfCommand(ShellCommand):
"""
Creates a new PDF file from a possibly-corrupted or bad-formatted PDF file.
"""
def run(self, input_file_path):
in_filename = os.path.basename(input_file_path)
random.seed(datetime.now())
filename_prefix = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(7))
path_to_corrected_pdf = u"/tmp/%s_%s" % (filename_prefix, in_filename)
try:
super(FixPdfCommand, self).run(['/usr/bin/pdftocairo', '-pdf', '-origpagesizes',
input_file_path, path_to_corrected_pdf])
except exceptions.ShellCommandError:
return input_file_path
except Exception:
return input_file_path
else:
os.remove(input_file_path)
return path_to_corrected_pdf
class PdfToXMLCommand(ShellCommand):
def run(self, pdf_file_path):
with tempfile.NamedTemporaryFile(mode='r', suffix='.xml') as xmlin:
tmpxml = os.path.splitext(xmlin.name)[0]
stdout, stderr = super(PdfToXMLCommand, self).run(['pdftohtml', '-xml', '-nodrm', '-zoom', '1.5',
'-enc', 'UTF-8', '-noframes', pdf_file_path, tmpxml])
xmldata = xmlin.read()
try:
return xmldata.decode('utf-8')
except AttributeError:
return xmldata
| {
"repo_name": "reclamador/document_clipper",
"path": "document_clipper/utils.py",
"copies": "1",
"size": "4296",
"license": "mit",
"hash": 1515979164283441200,
"line_mean": 32.3023255814,
"line_max": 116,
"alpha_frac": 0.5526070764,
"autogenerated": false,
"ratio": 4.232512315270936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009165349915923084,
"num_lines": 129
} |
from __future__ import absolute_import, division, print_function
from ete3 import Tree
import sys
import operator
#read in a rooted treelist, print out a table of the most probable root splits and their probabilities
trees = []
inh = open(sys.argv[1])
for line in inh:
ct = Tree(line.rstrip())
trees.append(ct)
inh.close()
num_trees = len(trees)
leaf_names = []
for leaf in trees[0]:
leaf_names.append(leaf.name)
num_taxa = str(len(leaf_names))
roots = [] #a list of sets...hmm
for t in trees:
#get the taxa on either side of the root node
sides = t.get_children()
side1 = set()
side2 = set()
the_other = []
for leaf in sides[0]:
side1.add(leaf.name)
# print("Side0\t" + str(leaf.name))
for leaf in sides[1]:
side2.add(leaf.name)
# print("Side1\t" + str(leaf.name))
if len(side1) <= len(side2): #use the smaller set as the label for this root position
roots.append(side1)
else:
roots.append(side2)
root_probs = {}
#now count how many times each unique set occurs
#get unique sets
unique_roots = set(frozenset(i) for i in roots)
print(len(unique_roots))
print(unique_roots)
for root in unique_roots:
sampled = 0
for rset in roots:
if root == rset:
sampled += 1
root_probs[root] = sampled
sorted_root_probs = sorted(root_probs.items(), key=operator.itemgetter(1), reverse = True)
for element in sorted_root_probs:
print(str(element[0]) + "\t" + str(float(element[1])/float(num_trees)))
| {
"repo_name": "Tancata/phylo",
"path": "sgts/marginal_root_probabilities.py",
"copies": "1",
"size": "1523",
"license": "mit",
"hash": -1187991832647919000,
"line_mean": 27.7358490566,
"line_max": 102,
"alpha_frac": 0.6513460276,
"autogenerated": false,
"ratio": 3.1861924686192467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9311514097576352,
"avg_score": 0.0052048797285789315,
"num_lines": 53
} |
from __future__ import absolute_import, division, print_function
from ete3 import Tree
import sys
#read in a rooted treelist, print out a .trees file that RootAnnotator might (?) like
trees = []
inh = open(sys.argv[1])
for line in inh:
ct = Tree(line.rstrip())
trees.append(ct)
inh.close()
leaf_names = []
for leaf in trees[0]:
leaf_names.append(leaf.name)
num_taxa = str(len(leaf_names))
leaf_map = {}
index = 0
for element in leaf_names:
index += 1
leaf_map[element] = str(index)
#now print some basic guff
print('#NEXUS\n\nBegin taxa\n\tDimensions ntax=' + num_taxa + ';\n\tTaxlabels')
for taxon in leaf_names:
print('\t\t' + taxon)
print('\t\t;\nEnd;\n\nBegin trees;\n\tTranslate')
for taxon in leaf_names:
if taxon == leaf_names[-1]:
print('\t\t' + leaf_map[taxon] + ' ' + taxon)
else:
print('\t\t' + leaf_map[taxon] + ' ' + taxon + ',')
print('\t\t;')
tree_count = 0
for t in trees:
tree_count += 1
for leaf in t:
leaf.name = leaf_map[leaf.name]
print('tree ' + str(tree_count) + ' = ' + str(t.write()))
print('End;')
| {
"repo_name": "Tancata/phylo",
"path": "sgts/format_treelist_for_rootannotator.py",
"copies": "1",
"size": "1107",
"license": "mit",
"hash": -2690309039003195000,
"line_mean": 22.0625,
"line_max": 85,
"alpha_frac": 0.6151761518,
"autogenerated": false,
"ratio": 2.7675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38826761517999997,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
def filenames(hdfs, path):
"""
Filenames in Hadoop File System under specific path
Parameters
----------
hdfs: pywebhdfs.webhdfs.PyWebHdfsClient instance
path: string
Directory on HDFS
Path can be either
1. A directory -- 'user/data/myfiles/'
2. A globstring -- 'user/data/myfiles/*/*.json'
Returns all filenames within this directory and all subdirectories
"""
if '*' in path:
directory = path[:path.find('*')].rsplit('/', 1)[0]
return [fn for fn in filenames(hdfs, directory)
if fnmatch(fn, path + '*')]
path = path.strip('/')
listdir = hdfs.list_dir(path)['FileStatuses']['FileStatus']
files = ['%s/%s' % (path, d['pathSuffix'])
for d in listdir if d['type'] == 'FILE']
directories = ['%s/%s' % (path, d['pathSuffix'])
for d in listdir if d['type'] == 'DIRECTORY']
return files + sum([filenames(hdfs, d) for d in directories], [])
| {
"repo_name": "wiso/dask",
"path": "dask/hdfs_utils.py",
"copies": "14",
"size": "1078",
"license": "bsd-3-clause",
"hash": 3593010836461802000,
"line_mean": 29.8,
"line_max": 70,
"alpha_frac": 0.594619666,
"autogenerated": false,
"ratio": 3.92,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from functools import partial
from .compatibility import _strtypes
from .compute.spark import *
from .data.utils import coerce
from .dispatch import dispatch
from .expr import Expr
from datashape import discover, var
from collections import Iterator, Iterable
__all__ = ['pyspark', 'coerce']
@dispatch(_strtypes, RDD)
def coerce(dshape, rdd):
return rdd.mapPartitions(partial(coerce, dshape))
@dispatch(type, RDD)
def into(a, rdd, **kwargs):
f = into.dispatch(a, type(rdd))
return f(a, rdd, **kwargs)
@dispatch(object, RDD)
def into(o, rdd, **kwargs):
return into(o, rdd.collect())
@dispatch((tuple, list, set), RDD)
def into(a, b, **kwargs):
if not isinstance(a, type):
a = type(a)
b = b.collect()
if isinstance(b[0], (tuple, list)) and not type(b[0]) == tuple:
b = map(tuple, b)
return a(b)
@dispatch(SparkContext, (Expr, RDD, object) + _strtypes)
def into(sc, o, **kwargs):
return sc.parallelize(into(list, o, **kwargs))
@dispatch(RDD)
def discover(rdd):
data = rdd.take(50)
return var * discover(data).subshape[0]
| {
"repo_name": "vitan/blaze",
"path": "blaze/spark.py",
"copies": "1",
"size": "1155",
"license": "bsd-3-clause",
"hash": -6546688460158879000,
"line_mean": 23.5744680851,
"line_max": 67,
"alpha_frac": 0.670995671,
"autogenerated": false,
"ratio": 3.2172701949860723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4388265865986072,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from functools import total_ordering
class Type(object):
def restrict_with(self, other):
if self == other:
return self
else:
return ConfusedType({self, other})
@total_ordering
class VoidType(Type):
def __repr__(self):
return "void"
def __eq__(self, other):
return isinstance(other, VoidType)
def __lt__(self, other):
return id(self.__class__) < id(other.__class__)
def __hash__(self):
return 0
void = VoidType()
@total_ordering
class UnknownType(Type):
def __init__(self, bit_size):
super(UnknownType, self).__init__()
self.bit_size = bit_size
return
def restrict_with(self, other):
return other
def __repr__(self):
return "unknown%d" % self.bit_size
def __eq__(self, other):
return self is other
def __lt__(self, other):
return id(self) < id(other)
def __hash__(self):
return hash((self.__class__, id(self)))
@total_ordering
class ConfusedType(Type):
def __init__(self, subtypes):
super(ConfusedType, self).__init__()
self.subtypes = set(subtypes)
return
def restrict_with(self, other):
if isinstance(other, ConfusedType):
return ConfusedType(self.subtypes.union(other.subtypes))
else:
return ConfusedType(self.subtypes.union({other}))
def __repr__(self):
return ("confused(%s)" %
",".join([repr(st) for st in sorted(self.subtypes)]))
def __eq__(self, other):
return (isinstance(other, ConfusedType) and
self.subtypes == other.subtypes)
def __lt__(self, other):
if not isinstance(other, ConfusedType):
return id(self.__class__) < id(other.__class__)
return sorted(self.subtypes) < sorted(other.subtypes)
def __hash__(self):
return hash((self.__class__, self.subtypes))
@total_ordering
class IntegerType(Type):
def __init__(self, bit_size):
super(IntegerType, self).__init__()
self.bit_size = bit_size
return
def __repr__(self):
if self.bit_size == 1:
return "bit_t"
else:
return ("int%d_t" % (self.bit_size,))
def __eq__(self, other):
return (isinstance(other, IntegerType) and
self.bit_size == other.bit_size)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return id(self.__class__) < id(other.__class__)
return self.bit_size < other.bit_size
def __hash__(self):
return hash((self.__class__, self.bit_size))
bit_t = IntegerType( 1)
int8_t = IntegerType( 8)
int16_t = IntegerType( 16)
int32_t = IntegerType( 32)
int64_t = IntegerType( 64)
int128_t = IntegerType(128)
int256_t = IntegerType(256)
@total_ordering
class CharacterType(IntegerType):
def __init__(self, bit_size, name):
super(CharacterType, self).__init__(bit_size)
self.name = name
return
def __repr__(self):
return self.name
def __eq__(self, other):
return (isinstance(other, CharacterType) and
self.bit_size == other.bit_size)
def __lt__(self, other):
if not isinstance(other, CharacterType):
return id(self.__class__) < id(other.__class__)
return self.bit_size < other.bit_size
char_t = CharacterType(8, "char")
ucs16_t = CharacterType(16, "ucs16")
ucs32_t = CharacterType(32, "ucs32")
@total_ordering
class ArrayType(Type):
def __init__(self, component_type, length):
super(ArrayType, self).__init__()
self.component_type = component_type
self.length = length
return
def restrict_with(self, other):
if (isinstance(other, ArrayType) and
self.component_type == other.component_type):
if self.length is None:
return other
elif other.length is None:
return self
return super(ArrayType, self).restrict_with(other)
def __repr__(self):
if self.length is None:
return "%r[]" % (self.component_type,)
else:
return "%r[%d]" % (self.component_type, self.length)
def __eq__(self, other):
return (isinstance(other, ArrayType) and
self.component_type == other.component_type and
self.length == other.length)
def __lt__(self, other):
if not isinstance(other, ArrayType):
return id(self.__class__) < id(other.__class__)
return ((self.component_type, self.length) <
(other.component_type, other.length))
def __hash__(self):
return hash((self.__class__, self.component_type, self.length))
char_array_t = ArrayType(char_t, None)
@total_ordering
class StructureMember(object):
def __init__(self, name, offset, type):
super(StructureMember, self).__init__()
self.offset = offset
self.name = name
self.type = type
return
def __repr__(self):
return "%s@0x%x(%r)" % (self.name, self.offset, self.type)
def __eq__(self, other):
return (isinstance(other, StructureMember) and
self.offset == other.offset and
self.name == other.name and
self.type == other.type)
def __lt__(self, other):
if not isinstance(other, StructureMember):
return id(self.__class__) < id(other.__class__)
return ((self.offset, self.name, self.type) <
(other.offset, other.name, other.type))
def __hash__(self):
return hash((self.__class__, self.offset, self.name, self.type))
@total_ordering
class StructureType(Type):
def __init__(self, name, components):
super(StructureType, self).__init__()
if not all([isinstance(el, StructureMember) for el in components]):
raise TypeError("components must be a sequence of "
"StructureMember objects")
self.name = name
self.components = dict([(comp.offset, comp) for comp in components])
return
def __repr__(self):
return "struct %s" % (self.name,)
def __eq__(self, other):
return (isinstance(other, StructureType) and
self.name == other.name)
def __lt__(self, other):
if not isinstance(other, StructureType):
return id(self.__class__) < id(other.__class__)
return self.name < other.name
def __hash__(self):
return hash((self.__class__, self.name))
@total_ordering
class PointerType(IntegerType):
def __init__(self, bit_size, reference_type):
super(PointerType, self).__init__(bit_size)
self.reference_type = reference_type
return
def restrict_with(self, other):
if (isinstance(other, PointerType) and
self.bit_size == other.bit_size):
if self.reference_type == void:
return other
elif other.reference_type == void:
return self
return super(PointerType, self).restrict_with(other)
def __repr__(self):
return "ptr(%r)" % (self.reference_type,)
def __eq__(self, other):
return (isinstance(other, PointerType) and
self.bit_size == other.bit_size and
self.reference_type == other.reference_type)
def __lt__(self, other):
if not isinstance(other, PointerType):
return id(self.__class__) < id(other.__class__)
return ((self.bit_size, self.reference_type) <
(other.bit_size, other.reference_type))
def __hash__(self):
return hash((self.__class__, self.bit_size, self.reference_type))
class Expression(object):
def __init__(self, type):
super(Expression, self).__init__()
self.type = type
return
@property
def name(self):
return "unnamed"
def __repr__(self):
type_repr = repr(self.type) if self.type is not None else "unknown"
return self.name + ":" + type_repr
def __neg__(self):
return NegationExpression(self)
def __pos__(self):
return self
def __invert__(self):
return BitwiseNegationExpression(self)
def __mul__(self, other):
return MultiplicationExpression(self, other)
def __div__(self, other):
return DivisionExpression(self, other)
def __truediv__(self, other):
return DivisionExpression(self, other)
def __add_(self, other):
return AdditionExpression(self, other)
def __sub__(self, other):
return SubtractionExpression(self, other)
def __lshift__(self, other):
return LeftShiftExpression(self, other)
def __rshift__(self, other):
return RightShiftExpression(self, other)
def __and__(self, other):
return AndExpression(self, other)
def __xor__(self, other):
return XorExpression(self, other)
def __or__(self, other):
return OrExpression(self, other)
def __rmul__(self, other):
return MultiplicationExpression(other, self)
def __rdiv__(self, other):
return DivisionExpression(other, self)
def __rtruediv__(self, other):
return DivisionExpression(other, self)
def __radd_(self, other):
return AdditionExpression(other, self)
def __rsub__(self, other):
return SubtractionExpression(other, self)
def __rlshift__(self, other):
return LeftShiftExpression(other, self)
def __rrshift__(self, other):
return RightShiftExpression(other, self)
def __rand__(self, other):
return AndExpression(other, self)
def __rxor__(self, other):
return XorExpression(other, self)
def __ror__(self, other):
return OrExpression(other, self)
class NamedExpression(Expression):
precedence = 1
def __init__(self, name, type):
super(NamedExpression, self).__init__(type=type)
self._name = name
return
@property
def name(self):
return self._name
def __hash__(self):
return hash((self.__class__, self.name))
class UnaryExpression(Expression):
def __init__(self, operand):
super(UnaryExpression, self).__init__(type=operand.type)
self.operand = operand
return
@property
def name(self):
if isinstance(self.operand, (UnaryExpression, BinaryExpression)):
return self.op + "(" + self.operand.name + ")"
else:
return self.op + self.operand.name
def __hash__(self):
return hash((self.__class__, self.op, self.operand))
class BinaryExpression(Expression):
enclose_expressive_lhs = False
enclose_expressive_rhs = False
def __init__(self, lhs, rhs):
super(BinaryExpression, self).__init__()
self.lhs = lhs
self.rhs = rhs
return
@property
def name(self):
if ((isinstance(self.lhs, Expression) and
self.lhs.precedence > self.precedence) or
(isinstance(self.lhs, (UnaryExpression, BinaryExpression)) and
self.enclose_expressive_lhs)):
lhs_str = "(" + self.lhs.name + ")"
else:
lhs_str = self.lhs.name
if ((isinstance(self.rhs, Expression) and
self.rhs.precedence > self.precedence) or
(isinstance(self.rhs, (UnaryExpression, BinaryExpression)) and
self.enclose_expressive_rhs)):
rhs_str = "(" + self.rhs.name + ")"
else:
rhs_str = self.rhs.name
return lhs_str + " " + self.op + " " + rhs_str
def __hash__(self):
return hash((self.__class__, self.op, self.lhs, self.rhs))
class NegationExpression(UnaryExpression):
precedence = 3
op = "-"
class BitwiseNegationExpression(UnaryExpression):
precedence = 3
op = "~"
class MultiplicationExpression(BinaryExpression):
precedence = 5
op = "*"
class DivisionExpression(BinaryExpression):
precedence = 5
op = "/"
enclose_expressive_rhs = True
class AdditionExpression(BinaryExpression):
precedence = 6
op = "+"
class SubtractionExpression(BinaryExpression):
precedence = 6
op = "-"
enclose_expressive_rhs = True
class LeftShiftExpression(BinaryExpression):
precedence = 7
op = "<<"
enclose_expressive_lhs = True
enclose_expressive_rhs = True
class RightShiftExpression(BinaryExpression):
precedence = 7
op = ">>"
enclose_expressive_lhs = True
enclose_expressive_rhs = True
class EqualToExpression(BinaryExpression):
precedence = 9
op = "=="
class NotEqualToExpression(BinaryExpression):
precedence = 9
op = "!="
class AndExpression(BinaryExpression):
precedence = 10
op = "&"
class XorExpression(BinaryExpression):
precedence = 11
op = "^"
class OrExpression(BinaryExpression):
precedence = 12
op = "|"
class State(object):
"""\
Representation of the program state (register and stack contents) at a given
instruction address.
"""
def __init__(self, address, previous_state=None, stack=None,
register_map=None):
super(State, self).__init__()
self.address = address
self.previous_state = previous_state
self.next_states = []
self.stack = (stack if stack is not None else [])
self.register_map = register_map
self.instruction = None
return
@property
def prev_address(self):
if self.previous_state is None:
return None
return self.previous_state.address
class MemoryRange(object):
"""\
A range of memory and associated attributes.
"""
def __init__(self, start_address, end_address):
super(MemoryRange, self).__init__()
self.address_range = (start_address, end_address)
return
def _get_start_address(self):
return self._start_address
def _set_start_address(self, start_address):
if not isinstance(start_address, int):
raise TypeError("start_address must be an int instead of %s" %
type(start_address).__name__)
if start_address >= self.end_address:
raise ValueError("start_address 0x%x is not less than "
"end_address 0x%x" %
(start_address, self._end_address))
self._start_address = start_address
return
start_address = property(
_get_start_address, _set_start_address, None,
"The start address of the memory range.")
def _get_end_address(self):
return self._end_address
def _set_end_address(self, end_address):
if not isinstance(end_address, int):
raise TypeError("end_address must be an int instead of %s" %
type(end_address).__name__)
if end_address <= self.start_address:
raise ValueError("end_address 0x%x is not greater than "
"start_address 0x%x" %
(end_address, self._start_address))
self._end_address = end_address
return
end_address = property(
_get_end_address, _set_end_address, None,
"The end address of the memory range (exclusive).")
def _get_address_range(self):
return (self._start_address, self._end_address)
def _set_address_range(self, arange):
try:
if len(arange) != 2:
raise ValueError(
"address_range must be a sequence of (start_address, "
"end_address)")
except TypeError:
raise TypeError(
"address_range must be a sequence of (start_address, "
"end_address)")
start_address, end_address = arange
if not isinstance(start_address, int):
raise TypeError("start_address must be an int instead of %s" %
type(start_address).__name__)
if not isinstance(end_address, int):
raise TypeError("end_address must be an int instead of %s" %
type(end_address).__name__)
if start_address >= end_address:
raise ValueError("start_address 0x%x is not less than end_address "
"0x%x" % (start_address, end_address))
self._start_address = start_address
self._end_address = end_address
return
address_range = property(
_get_address_range, _set_address_range, None,
"The start address and end address (exclusive) of the memory range.")
def _to_tuple(self):
"""\
Convert the attributes of the memory range to a tuple for ease of comparisons.
"""
return (self._start_address, self._end_address)
def __eq__(self, other):
return self._to_tuple() == other._to_tuple()
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self._to_tuple() < other._to_tuple()
def __le__(self, other):
return self._to_tuple() <= other._to_tuple()
def __gt__(self, other):
return not (self.__le__(other))
def __ge__(self, other):
return not (self.__lt__(other))
class Function(object):
def __init__(self, simulator, name, start_address, end_address,
arguments=None, return_type=None, calling_convention="cdecl"):
super(Function, self).__init__()
self.simulator = simulator
self.name = name
self.start_address = start_address
self.end_address = end_address
self.arguments = arguments if arguments is not None else []
self.return_type = return_type
self.entry_state = None
self.calling_convention = calling_convention
return
@total_ordering
class ArchitectureSimulator(object):
def __init__(self, name, pointer_size_bits):
super(ArchitectureSimulator, self).__init__()
self.name = name
self.pointer_size_bits = pointer_size_bits
self.void_ptr = self.get_pointer_type(void)
return
def get_pointer_type(self, reference_type, cls=PointerType):
return cls(self.pointer_size_bits, reference_type)
def __repr__(self):
return self.name
def __eq__(self, other):
return (isinstance(other, ArchitectureSimulator) and
self.name == other.name and
self.pointer_size_bits == other.pointer_size_bits)
def __lt__(self, other):
if not isinstance(other, ArchitectureSimulator):
return id(self.__class__) < id(other.__class__)
return ((self.name, self.pointer_size_bits) <
(other.name, other.pointer_size_bits))
def __hash__(self):
return hash((self.__class__, self.name, self.pointer_size_bits))
# Local variables:
# mode: Python
# tab-width: 8
# indent-tabs-mode: nil
# End:
# vi: set expandtab tabstop=8
| {
"repo_name": "dacut/ret",
"path": "ret/state.py",
"copies": "1",
"size": "19165",
"license": "bsd-2-clause",
"hash": -1008451455651369600,
"line_mean": 28.8055987558,
"line_max": 79,
"alpha_frac": 0.5819462562,
"autogenerated": false,
"ratio": 4.002715121136173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5084661377336173,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from builtins import *
import logging
import struct
import time
from Crypto.Random.random import StrongRandom as random
from enum import Enum
from . import crypto
from . import util
#
# Common data types
#
def Date(num=None):
if isinstance(num, bytes):
num = struct.unpack(b'>Q', num)[0]
if num is None:
num = time.time() * 1000
num = int(num)
return struct.pack(b'>Q', num)
class Mapping(object):
"""
i2p dictionary object
it sucks
"""
_log = logging.getLogger('Mapping')
def __init__(self, opts=None, raw=None):
if raw:
self.data = raw
self.opts = {}
dlen = struct.unpack(b'>H', raw[:2])
data = raw[2:2+dlen]
while dlen > 0:
key = String.parse(data)
data = data[len(key)+1:]
val = String.parse(data)
data = data[len(val)+1:]
dlen = len(data)
self.opts[key] = val
else:
self.opts = opts or {}
data = bytes()
keys = sorted(self.opts.keys())
for key in keys:
val = bytes(opts[key], 'utf-8')
key = bytes(key, 'utf-8')
data += String.create(key)
data += bytes('=', 'utf-8')
data += String.create(val)
data += bytes(';', 'utf-8')
dlen = len(data)
self._log.debug('len of Mapping is %d bytes' % dlen)
dlen = struct.pack(b'>H', dlen)
self.data = dlen + data
def serialize(self):
return self.data
def __str__(self):
return str([self.opts])
class String(object):
@staticmethod
def parse(data):
dlen = util.get_as_int(data[0])
return bytearray(data[:dlen])
@staticmethod
def create(data):
if not isinstance(data, bytes):
data = bytearray(data, 'utf-8')
dlen = len(data)
return struct.pack(b'>B', dlen) + data
class CertificateType(Enum):
NULL = 0
HASHCASH = 1
HIDDEN = 2
SIGNED = 3
MULTI = 4
KEY = 5
class Certificate(object):
_log = logging.getLogger('Certificate')
def _parse(self, raw, b64=False):
if b64:
if hasattr(raw, 'read'):
raise TypeError('b64 flag is incompatible with stream data')
raw = util.i2p_b64decode(raw)
# TODO handle length in both cases
#if len(raw) < 3:
# raise ValueError('invalid Certificate')
if hasattr(raw, 'read'):
ctype = raw.read(1)
clen = raw.read(2)
else:
ctype = raw[0]
clen = raw[1:3]
raw = raw[3:]
ctype = CertificateType(util.get_as_int(ctype))
clen = struct.unpack(b'>H', clen)[0]
if hasattr(raw, 'read'):
data = raw.read(clen)
else:
data = raw[:clen]
raw = raw[clen:]
return ctype, data
def __init__(self, type=CertificateType.NULL, data=bytes(), raw=None, b64=False):
if raw:
type, data = self._parse(raw, b64)
if isinstance(type, str):
type = type.encode('ascii')
if isinstance(type, int) or isinstance(type, bytes):
type = CertificateType(type)
if raw is None and b64:
data = util.i2p_b64decode(data)
self.data = data
self.type = type
self._log.debug('type=%s data=%s raw=%s' % (type.name, util.i2p_b64encode(data), self.serialize()))
def __str__(self):
return '[cert type=%s data=%s]' % (self.type.name, self.data)
def upconvert(self):
if self.type == CertificateType.KEY:
return KeyCertificate(data=self.data)
else:
return self
def serialize(self, b64=False):
data = bytearray()
data += struct.pack(b'>B', self.type.value)
data += struct.pack(b'>H', len(self.data))
data += self.data
if b64:
data = util.i2p_b64encode(data)
return data
class KeyCertificate(Certificate):
_log = logging.getLogger('KeyCertificate')
def __init__(self, sigkey=None, enckey=None, data=bytes(), raw=None,
b64=False):
if sigkey is not None and enckey is not None:
data = self._data_from_keys(sigkey, enckey)
super().__init__(CertificateType.KEY, data, raw, b64)
if len(self.data) < 4:
raise ValueError("data too short: %s" % self.data)
@staticmethod
def _data_from_keys(sigkey, enckey):
if not isinstance(sigkey, crypto.SigningKey):
raise TypeError('sigkey is not a SigningKey')
if not isinstance(enckey, crypto.CryptoKey):
raise TypeError('enckey is not a CryptoKey')
data = bytes()
data += struct.pack(b'>H', sigkey.key_type.code)
data += struct.pack(b'>H', enckey.key_type.code)
# XXX Assume no extra crypto key data
sigpub = sigkey.get_pubkey()
extra = max(0, len(sigpub) - 128)
data += sigpub[128:128+extra]
return data
@property
def sigtype(self):
return crypto.SigType.get_by_code(struct.unpack(b'>H',
self.data[:2])[0])
@property
def enctype(self):
return crypto.EncType.get_by_code(struct.unpack(b'>H',
self.data[2:4])[0])
@property
def extra_sigkey_data(self):
if self.sigtype is None:
raise ValueError("unknown sig type")
# XXX Assume no extra crypto key data
extra = max(0, self.sigtype.pubkey_len - 128)
return self.data[4:4+extra]
@property
def extra_enckey_data(self):
# XXX Assume no extra crypto key data
return bytes()
#
# Common data structures
#
class Destination(object):
_log = logging.getLogger('Destination')
def __init__(self, enckey=None, sigkey=None, cert=None, padding=bytes(),
raw=None, b64=False, private=False):
"""Construct a Destination.
A Destination can be constructed in several ways:
1. Generate a Destination with default types
Destination()
2. Generate a Destination with specified types
Destination(EncType.EC_P256, SigType.ECDSA_SHA256_P256)
3. Generate a Destination with one default and one specified type
Destination(sigkey=SigType.ECDSA_SHA256_P256)
4. Generate a Destination using types specified in a KeyCertificate
(the KeyCertificate's extra key data is ignored)
Destination(cert=keycert)
5. Read a Destination in from an eepPriv.dat file
with open(keyfile, 'rb') as rf:
Destination(raw=rf, private=True)
6. Parse a B64 Destination
Destination(raw=b64string, b64=True)
7. Construct a Destination from the provided keys
Destination(enckey, sigkey)
8. Construct a Destination from the provided keys and cert
Destination(enckey, sigkey, cert)
"""
if raw:
enckey, sigkey, cert, padding = self._parse(raw, b64, private)
rebuild_cert = False
if enckey is None or isinstance(enckey, crypto.EncType) or \
sigkey is None or isinstance(sigkey, crypto.SigType):
if enckey is None:
if isinstance(cert, KeyCertificate):
enckey = cert.enctype.cls()
else:
enckey = crypto.ElGamalKey()
elif isinstance(enckey, crypto.EncType):
enckey = enckey.cls()
if sigkey is None:
if isinstance(cert, KeyCertificate):
sigkey = cert.sigtype.cls()
else:
sigkey = crypto.DSAKey()
elif isinstance(sigkey, crypto.SigType):
sigkey = sigkey.cls()
rebuild_cert = True
# Cases:
# - cert is None, need NULL -> build NULL
# - cert is None, need KEY -> build KEY
# - cert is MULTI -> error
# - need NULL, cert is KEY -> build NULL
# - need NULL, cert is not KEY -> leave
# - need KEY, cert is not NULL or KEY -> error
# - need KEY -> build KEY
if cert is None:
if enckey.key_type == crypto.EncType.ELGAMAL_2048 and \
sigkey.key_type == crypto.SigType.DSA_SHA1:
cert = Certificate()
else:
cert = KeyCertificate(sigkey, enckey)
elif rebuild_cert:
if cert.type == CertificateType.MULTI:
raise NotImplementedError('Multiple certs not yet supported')
elif enckey.key_type == crypto.EncType.ELGAMAL_2048 and \
sigkey.key_type == crypto.SigType.DSA_SHA1:
# Without MULTI+KEY, other certs are assumed to be ElG/DSA
if cert.type == CertificateType.KEY:
cert = Certificate()
elif cert.type != CertificateType.NULL and \
cert.type != CertificateType.KEY:
raise NotImplementedError('Multiple certs not yet supported')
else:
cert = KeyCertificate(sigkey, enckey)
self.enckey = enckey
self.sigkey = sigkey
self.cert = cert
self.padding = padding
def _parse(self, raw, b64=False, private=False):
if b64:
if hasattr(raw, 'read'):
raise TypeError('b64 flag is incompatible with stream data')
raw = util.i2p_b64decode(raw)
# TODO handle length in both cases
#if len(data) < 387:
# raise ValueError('invalid Destination')
if hasattr(raw, 'read'):
data = raw.read(384)
else:
data = raw[:384]
raw = raw[384:]
cert = Certificate(raw=raw)
# If this is an eepPriv.dat, there will be more key material
if hasattr(raw, 'read'):
rest = raw.read()
else:
rest = raw[len(cert.serialize()):]
if cert.type == CertificateType.KEY:
cert = cert.upconvert()
# XXX Assume no extra crypto key data
enc_end = min(256, cert.enctype.pubkey_len)
sig_start = max(256, 384-cert.sigtype.pubkey_len)
encpub = data[:enc_end]
padding = data[enc_end:sig_start]
sigpub = data[sig_start:384] + \
cert.extra_sigkey_data
if len(rest):
encpriv = rest[:cert.enctype.privkey_len]
sigpriv = rest[cert.enctype.privkey_len:\
cert.enctype.privkey_len+\
cert.sigtype.privkey_len]
return cert.enctype.cls(encpub, encpriv), \
cert.sigtype.cls(sigpub, sigpriv), \
cert, \
padding
else:
return cert.enctype.cls(encpub), \
cert.sigtype.cls(sigpub), \
cert, \
padding
elif cert.type != CertificateType.MULTI:
# No KeyCert, so defaults to ElGamal/DSA
encpub = data[:256]
sigpub = data[256:384]
if len(rest) and private:
encpriv = rest[:256]
sigpriv = rest[256:276]
return crypto.ElGamalKey(encpub, encpriv), \
crypto.DSAKey(sigpub, sigpriv), \
cert, \
bytes()
else:
return crypto.ElGamalKey(encpub), \
crypto.DSAKey(sigpub), \
cert, \
bytes()
else:
raise NotImplementedError('Multiple certs not yet supported')
def __str__(self):
return '[Destination %s %s cert=%s]' % (
self.base32(), self.base64(),
self.cert)
def has_private(self):
"""
Returns True if this Destination contains private material,
False otherwise.
"""
return self.enckey.has_private or self.sigkey.has_private()
def to_public(self):
"""
Return a copy of this Destination without any private key material.
"""
if self.has_private():
return Destination(self.enckey.to_public(),
self.sigkey.to_public(),
self.cert,
self.padding)
else:
return self
def sign(self, data):
return self.sigkey.sign(data)
def signature_size(self):
return self.sigkey.key_type.sig_len
def verify(self, data, sig):
return self.sigkey.verify(data, sig)
def __len__(self):
return len(self.serialize())
def serialize(self, priv=False):
if priv and not self.has_private():
raise ValueError('No private key material in this Destination')
data = bytes()
if self.cert.type == CertificateType.KEY:
encpub = self.enckey.get_pubkey()
sigpub = self.sigkey.get_pubkey()
enc_end = min(256, self.cert.enctype.pubkey_len)
sig_start = max(256, 384-self.cert.sigtype.pubkey_len)
if len(self.padding) == 0:
# Generate random padding
pad_len = sig_start - enc_end
# Wrap with int() for Py2
self.padding = int(random().getrandbits(pad_len*8)).to_bytes(
pad_len, 'big')
data += encpub[:enc_end]
data += self.padding
data += sigpub[:384-sig_start]
data += self.cert.serialize()
elif self.cert.type != CertificateType.MULTI:
data += self.enckey.get_pubkey()
data += self.sigkey.get_pubkey()
data += self.cert.serialize()
else:
raise NotImplementedError('Multiple certs not yet supported')
if priv:
data += self.enckey.get_privkey()
data += self.sigkey.get_privkey()
self._log.debug('serialize len=%d' % len(data))
return data
def base32(self):
data = self.serialize()
return util.i2p_b32encode(crypto.sha256(data)).decode('ascii')
def base64(self, priv=False):
return util.i2p_b64encode(self.serialize(priv)).decode('ascii')
class Lease(object):
_log = logging.getLogger('Lease')
def __init__(self, ri_hash=None, tid=None, end_date=None):
self.ri = ri_hash
self.tid = tid
self.end_date = end_date
self._log.debug('ri_hash %d bytes' % len(ri_hash))
def serialize(self):
data = bytearray()
data += self.ri
data += struct.pack(b'>I', self.tid)
data += self.end_date
self._log.debug('Lease is %d bytes' % len(data))
assert len(data) == 44
return data
def __repr__(self):
return '[Lease ri=%s tid=%d]' % ([self.ri], self.tid)
class LeaseSet(object):
_log = logging.getLogger('LeaseSet')
def __init__(self, raw=None, dest=None, ls_enckey=None, ls_sigkey=None,
leases=None):
if raw:
data = raw
self.leases = []
self.dest = Destination(raw=data)
self._log.debug(self.dest)
# Verify that the signature matches the Destination
self.sig = raw[-40:]
self.dest.verify(raw[:-40], self.sig)
# Signature matches, now parse the rest
data = data[len(self.dest):]
self.enckey = crypto.ElGamalKey(data[:256])
self._log.debug(self.enckey)
data = data[256:]
self.sigkey = crypto.DSAKey(data[:128])
self._log.debug(self.sigkey)
data = data[128:]
numls = data[0]
data = data[1:]
while numls > 0:
_l = data[:44]
l = Lease(_l[:32], _l[32:36], _l[36:44])
data = data[44:]
numls -= 1
self.leases.append(l)
else:
self.dest = dest
self.enckey = ls_enckey
self.sigkey = ls_sigkey
self.leases = list(leases)
def __str__(self):
return '[LeaseSet leases=%s enckey=%s sigkey=%s dest=%s]' % (
self.leases,
[self.enckey.get_pubkey()],
[self.sigkey.get_pubkey()],
self.dest)
def serialize(self):
"""
serialize and sign LeaseSet
only works with DSA-SHA1 right now
"""
data = bytes()
data += self.dest.serialize()
data += self.enckey.get_pubkey()
data += self.sigkey.get_pubkey()
# Wrap with int() for Py2
data += int(len(self.leases)).to_bytes(1, 'big')
for l in self.leases:
data += l.serialize()
sig = self.dest.sign(data)
data += sig
self._log.debug('LS has length %d' % len(data))
return data
class I2CPProtocol(Enum):
STREAMING = 6
DGRAM = 17
RAW = 18
class datagram(object):
def __eq__(self, obj):
if hasattr(self, 'payload') and hasattr(obj, 'payload'):
return self.payload == obj.payload
return False
class raw_datagram(object):
protocol = I2CPProtocol.RAW
def __init__(self, dest=None, raw=None, payload=None):
if raw:
self.data = raw
else:
self.data = payload
self.dest = None
def serialize(self):
return self.data
class dsa_datagram(datagram):
protocol = I2CPProtocol.DGRAM
_log = logging.getLogger('datagram-dsa')
def __init__(self, dest=None, raw=None, payload=None):
if raw:
self._log.debug('rawlen=%d' % len(raw))
self.data = raw
self._log.debug('load dgram data: %s' % [raw])
self.dest = Destination(raw=raw)
self._log.debug('destlen=%s' % self.dest)
raw = raw[len(self.dest):]
self._log.debug('raw=%s' % [raw])
self.sig = raw[:40]
raw = raw[40:]
self._log.debug('payloadlen=%d' % len(raw))
self.payload = raw
phash = crypto.sha256(self.payload)
self._log.debug('verify dgram: sig=%s hash=%s' % (
[self.sig], [phash]))
self.dest.verify(phash, self.sig)
else:
self.dest = dest
self.payload = payload
self.data = bytearray()
self.data += self.dest.serialize()
payload_hash = crypto.sha256(self.payload)
self.sig = self.dest.sign(payload_hash)
self._log.debug('signature=%s' % [self.sig])
self.data += self.sig
self.data += payload
def serialize(self):
return self.data
def __str__(self):
return '[DSADatagram payload=%s sig=%s]' % (self.payload, self.sig)
class i2cp_payload(object):
gz_header = b'\x1f\x8b\x08'
_log = logging.getLogger('i2cp_payload')
def __init__(self, raw=None, data=None, srcport=0, dstport=0,
proto=I2CPProtocol.RAW):
if raw:
self.dlen = struct.unpack(b'>I', raw[:4])[0]
self._log.debug('payload len=%d' % self.dlen)
data = raw[4:self.dlen]
self._log.debug('compressed payload len=%d' % len(data))
assert data[:3] == self.gz_header
self.flags = data[3]
self.srcport = struct.unpack(b'>H', data[4:6])[0]
self.dstport = struct.unpack(b'>H', data[6:8])[0]
self.xflags = data[8]
self.proto = I2CPProtocol(util.get_as_int(data[9]))
self.data = util.i2p_decompress(data[10:])
self._log.debug('decompressed=%s' % [self.data])
else:
if util.check_portnum(srcport) and util.check_portnum(dstport):
self._log.debug('payload data len=%d' % len(data))
self.data = util.i2p_compress(data)
self._log.debug('compressed payload len=%d' % len(self.data))
self.srcport = srcport
self.dstport = dstport
self.proto = I2CPProtocol(proto)
self.flags = 0
self.xflags = 2
else:
raise ValueError('invalid ports: srcport=%s dstport=%s' % (
[srcport], [dstport]))
def serialize(self):
data = bytearray()
data += self.gz_header
data += struct.pack(b'>B', self.flags)
data += struct.pack(b'>H', self.srcport)
data += struct.pack(b'>H', self.dstport)
data += struct.pack(b'>B', self.xflags)
data += struct.pack(b'>B', self.proto.value)
data += self.data
dlen = len(data)
self._log.debug('serialize len=%d' % dlen)
return struct.pack(b'>I', dlen) + data
def __str__(self):
return ('[Payload flags=%s srcport=%s dstport=%s xflags=%s' +
' proto=%s data=%s]') % (
self.flags,
self.srcport,
self.dstport,
self.xflags,
self.proto,
self.data)
def to_b32_bytes(val):
if isinstance(val, Destination):
return to_b32_bytes(val.base64())
if isinstance(val, bytes):
if val.lower().endswith(b".b32.i2p"):
return util.i2p_b32decode(val)
else:
return crypto.sha256(vale)
raise TypeError("invalid type", val)
| {
"repo_name": "majestrate/i2p-tools",
"path": "pyi2tun/i2p/datatypes.py",
"copies": "1",
"size": "21860",
"license": "mit",
"hash": 5227252736004367000,
"line_mean": 32.0711043873,
"line_max": 107,
"alpha_frac": 0.5280878317,
"autogenerated": false,
"ratio": 3.8384547848990342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9863355885518361,
"avg_score": 0.0006373462161345354,
"num_lines": 661
} |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from builtins import *
import logging
import struct
import time
try:
from Crypto.Random.random import StrongRandom
# Wrap with int() for Py2
random = lambda n : int(StrongRandom().getrandbits(n*8)).to_bytes(n, 'big')
except ImportError:
from os import urandom as random
try:
from . import crypto
except ImportError:
crypto = None
from i2p import util
if crypto:
sha256 = crypto.sha256
else:
sha256 = util.sha256
from enum import Enum
#
# Common data types
#
def Date(num=None):
if isinstance(num, bytes):
num = struct.unpack(b'>Q', num)[0]
if num is None:
num = time.time() * 1000
num = int(num)
return struct.pack(b'>Q', num)
class Mapping(object):
"""
i2p dictionary object
it sucks
"""
_log = logging.getLogger('Mapping')
def __init__(self, opts=None, raw=None):
if raw:
self.data = raw
self.opts = {}
dlen = struct.unpack(b'>H', raw[:2])
data = raw[2:2+dlen]
while dlen > 0:
key = String.parse(data)
data = data[len(key)+1:]
val = String.parse(data)
data = data[len(val)+1:]
dlen = len(data)
self.opts[key] = val
else:
self.opts = opts or {}
data = bytes()
keys = sorted(self.opts.keys())
for key in keys:
val = bytes(opts[key], 'utf-8')
key = bytes(key, 'utf-8')
data += String.create(key)
data += bytes('=', 'utf-8')
data += String.create(val)
data += bytes(';', 'utf-8')
dlen = len(data)
self._log.debug('len of Mapping is %d bytes' % dlen)
dlen = struct.pack(b'>H', dlen)
self.data = dlen + data
def serialize(self):
return self.data
def __str__(self):
return str([self.opts])
class String(object):
@staticmethod
def parse(data):
dlen = util.get_as_int(data[0])
return bytearray(data[:dlen])
@staticmethod
def create(data):
if not isinstance(data, bytes):
data = bytearray(data, 'utf-8')
dlen = len(data)
return struct.pack(b'>B', dlen) + data
class CertificateType(Enum):
NULL = 0
HASHCASH = 1
HIDDEN = 2
SIGNED = 3
MULTI = 4
KEY = 5
class Certificate(object):
_log = logging.getLogger('Certificate')
def _parse(self, raw, b64=False):
if b64:
if hasattr(raw, 'read'):
raise TypeError('b64 flag is incompatible with stream data')
raw = util.i2p_b64decode(raw)
# TODO handle length in both cases
#if len(raw) < 3:
# raise ValueError('invalid Certificate')
if hasattr(raw, 'read'):
ctype = raw.read(1)
clen = raw.read(2)
else:
ctype = raw[0]
clen = raw[1:3]
raw = raw[3:]
ctype = CertificateType(util.get_as_int(ctype))
clen = struct.unpack(b'>H', clen)[0]
if hasattr(raw, 'read'):
data = raw.read(clen)
else:
data = raw[:clen]
raw = raw[clen:]
return ctype, data
def __init__(self, type=CertificateType.NULL, data=bytes(), raw=None, b64=False):
if raw:
type, data = self._parse(raw, b64)
if isinstance(type, str):
type = type.encode('ascii')
if isinstance(type, int) or isinstance(type, bytes):
type = CertificateType(type)
if raw is None and b64:
data = util.i2p_b64decode(data)
self.data = data
self.type = type
self._log.debug('type=%s data=%s raw=%s' % (type.name, util.i2p_b64encode(data), self.serialize()))
def __str__(self):
return '[cert type=%s data=%s]' % (self.type.name, self.data)
def upconvert(self):
if self.type == CertificateType.KEY:
return KeyCertificate(data=self.data)
else:
return self
def serialize(self, b64=False):
data = bytearray()
data += struct.pack(b'>B', self.type.value)
data += struct.pack(b'>H', len(self.data))
data += self.data
if b64:
data = util.i2p_b64encode(data)
return data
class KeyCertificate(Certificate):
_log = logging.getLogger('KeyCertificate')
def __init__(self, sigkey=None, enckey=None, data=bytes(), raw=None,
b64=False):
if sigkey is not None and enckey is not None:
data = self._data_from_keys(sigkey, enckey)
super().__init__(CertificateType.KEY, data, raw, b64)
if len(self.data) < 4:
raise ValueError("data too short: %s" % self.data)
@staticmethod
def _data_from_keys(sigkey, enckey):
if not isinstance(sigkey, crypto.SigningKey):
raise TypeError('sigkey is not a SigningKey')
if not isinstance(enckey, crypto.CryptoKey):
raise TypeError('enckey is not a CryptoKey')
data = bytes()
data += struct.pack(b'>H', sigkey.key_type.code)
data += struct.pack(b'>H', enckey.key_type.code)
# XXX Assume no extra crypto key data
sigpub = sigkey.get_pubkey()
extra = max(0, len(sigpub) - 128)
data += sigpub[128:128+extra]
return data
@property
def sigtype(self):
return crypto.SigType.get_by_code(struct.unpack(b'>H',
self.data[:2])[0])
@property
def enctype(self):
return crypto.EncType.get_by_code(struct.unpack(b'>H',
self.data[2:4])[0])
@property
def extra_sigkey_data(self):
if self.sigtype is None:
raise ValueError("unknown sig type")
# XXX Assume no extra crypto key data
extra = max(0, self.sigtype.pubkey_len - 128)
return self.data[4:4+extra]
@property
def extra_enckey_data(self):
# XXX Assume no extra crypto key data
return bytes()
#
# Common data structures
#
class Destination(object):
_log = logging.getLogger('Destination')
def __init__(self, enckey=None, sigkey=None, cert=None, padding=bytes(),
raw=None, b64=False, private=False):
"""Construct a Destination.
A Destination can be constructed in several ways:
1. Generate a Destination with default types
Destination()
2. Generate a Destination with specified types
Destination(EncType.EC_P256, SigType.ECDSA_SHA256_P256)
3. Generate a Destination with one default and one specified type
Destination(sigkey=SigType.ECDSA_SHA256_P256)
4. Generate a Destination using types specified in a KeyCertificate
(the KeyCertificate's extra key data is ignored)
Destination(cert=keycert)
5. Read a Destination in from an eepPriv.dat file
with open(keyfile, 'rb') as rf:
Destination(raw=rf, private=True)
6. Parse a B64 Destination
Destination(raw=b64string, b64=True)
7. Construct a Destination from the provided keys
Destination(enckey, sigkey)
8. Construct a Destination from the provided keys and cert
Destination(enckey, sigkey, cert)
"""
self._crypto = crypto
if self._crypto is None:
self._raw = raw
self._b64 = b64 is True
self._private = private is True
return
if raw:
enckey, sigkey, cert, padding = self._parse(raw, b64, private)
rebuild_cert = False
if enckey is None or isinstance(enckey, self._crypto.EncType) or \
sigkey is None or isinstance(sigkey, self._crypto.SigType):
if enckey is None:
if isinstance(cert, KeyCertificate):
enckey = cert.enctype.cls()
else:
enckey = self._crypto.ElGamalKey()
elif isinstance(enckey, self._crypto.EncType):
enckey = enckey.cls()
if sigkey is None:
if isinstance(cert, KeyCertificate):
sigkey = cert.sigtype.cls()
else:
sigkey = self._crypto.DSAKey()
elif isinstance(sigkey, self._crypto.SigType):
sigkey = sigkey.cls()
rebuild_cert = True
# Cases:
# - cert is None, need NULL -> build NULL
# - cert is None, need KEY -> build KEY
# - cert is MULTI -> error
# - need NULL, cert is KEY -> build NULL
# - need NULL, cert is not KEY -> leave
# - need KEY, cert is not NULL or KEY -> error
# - need KEY -> build KEY
if cert is None:
if enckey.key_type == self._crypto.EncType.ELGAMAL_2048 and \
sigkey.key_type == self._crypto.SigType.DSA_SHA1:
cert = Certificate()
else:
cert = KeyCertificate(sigkey, enckey)
elif rebuild_cert:
if cert.type == CertificateType.MULTI:
raise NotImplementedError('Multiple certs not yet supported')
elif enckey.key_type == self._crypto.EncType.ELGAMAL_2048 and \
sigkey.key_type == self._crypto.SigType.DSA_SHA1:
# Without MULTI+KEY, other certs are assumed to be ElG/DSA
if cert.type == CertificateType.KEY:
cert = Certificate()
elif cert.type != CertificateType.NULL and \
cert.type != CertificateType.KEY:
raise NotImplementedError('Multiple certs not yet supported')
else:
cert = KeyCertificate(sigkey, enckey)
self.enckey = enckey
self.sigkey = sigkey
self.cert = cert
self.padding = padding
def _parse(self, raw, b64=False, private=False):
if b64:
if hasattr(raw, 'read'):
raise TypeError('b64 flag is incompatible with stream data')
raw = util.i2p_b64decode(raw)
# TODO handle length in both cases
#if len(data) < 387:
# raise ValueError('invalid Destination')
if hasattr(raw, 'read'):
data = raw.read(384)
else:
data = raw[:384]
raw = raw[384:]
cert = Certificate(raw=raw)
# If this is an eepPriv.dat, there will be more key material
if hasattr(raw, 'read'):
rest = raw.read()
else:
rest = raw[len(cert.serialize()):]
if cert.type == CertificateType.KEY:
cert = cert.upconvert()
# XXX Assume no extra crypto key data
enc_end = min(256, cert.enctype.pubkey_len)
sig_start = max(256, 384-cert.sigtype.pubkey_len)
encpub = data[:enc_end]
padding = data[enc_end:sig_start]
sigpub = data[sig_start:384] + \
cert.extra_sigkey_data
if len(rest):
encpriv = rest[:cert.enctype.privkey_len]
sigpriv = rest[cert.enctype.privkey_len:\
cert.enctype.privkey_len+\
cert.sigtype.privkey_len]
return cert.enctype.cls(encpub, encpriv), \
cert.sigtype.cls(sigpub, sigpriv), \
cert, \
padding
else:
return cert.enctype.cls(encpub), \
cert.sigtype.cls(sigpub), \
cert, \
padding
elif cert.type != CertificateType.MULTI:
# No KeyCert, so defaults to ElGamal/DSA
encpub = data[:256]
sigpub = data[256:384]
if len(rest) and private:
encpriv = rest[:256]
sigpriv = rest[256:276]
return self._crypto.ElGamalKey(encpub, encpriv), \
self._crypto.DSAKey(sigpub, sigpriv), \
cert, \
bytes()
else:
return self._crypto.ElGamalKey(encpub), \
self._crypto.DSAKey(sigpub), \
cert, \
bytes()
else:
raise NotImplementedError('Multiple certs not yet supported')
def __str__(self):
return '[Destination %s %s]' % (
self.base32(), self.base64())
def has_private(self):
"""
Returns True if this Destination contains private material,
False otherwise.
"""
if self._crypto is None:
return self._private is True
return self.enckey.has_private or self.sigkey.has_private()
def to_public(self):
"""
Return a copy of this Destination without any private key material.
"""
if self._crypto is None and self._private:
raise NotImplemented()
if self.has_private():
return Destination(self.enckey.to_public(),
self.sigkey.to_public(),
self.cert,
self.padding)
else:
return self
def sign(self, data):
if self._crypto is None:
raise NotImplemented()
return self.sigkey.sign(data)
def signature_size(self):
if self._crypto is None:
raise NotImplemented()
return self.sigkey.key_type.sig_len
def verify(self, data, sig):
if self._crypto is None:
raise NotImplemented()
return self.sigkey.verify(data, sig)
def __len__(self):
return len(self.serialize())
def serialize(self, priv=False):
if self._crypto is None:
# XXX: converting to public from private not supported without crypto libs
if (priv is False and self._private is False) or (priv is True and self._private is True):
if self._b64:
return util.i2p_b64decode(self._raw)
else:
return self._raw
else:
raise NotImplemented()
if priv and not self.has_private():
raise ValueError('No private key material in this Destination')
data = bytes()
if self.cert.type == CertificateType.KEY:
encpub = self.enckey.get_pubkey()
sigpub = self.sigkey.get_pubkey()
enc_end = min(256, self.cert.enctype.pubkey_len)
sig_start = max(256, 384-self.cert.sigtype.pubkey_len)
if len(self.padding) == 0:
# Generate random padding
pad_len = sig_start - enc_end
self.padding = random(pad_len)
data += encpub[:enc_end]
data += self.padding
data += sigpub[:384-sig_start]
data += self.cert.serialize()
elif self.cert.type != CertificateType.MULTI:
data += self.enckey.get_pubkey()
data += self.sigkey.get_pubkey()
data += self.cert.serialize()
else:
raise NotImplementedError('Multiple certs not yet supported')
if priv:
data += self.enckey.get_privkey()
data += self.sigkey.get_privkey()
self._log.debug('serialize len=%d' % len(data))
return data
def base32(self):
if crypto is None and self._private:
raise NotImplemented()
data = self.serialize()
return util.i2p_b32encode(sha256(data)).decode('ascii')
def base64(self):
if crypto is None and self._private:
raise NotImplemented()
return util.i2p_b64encode(self.serialize()).decode('ascii')
class Lease(object):
_log = logging.getLogger('Lease')
def __init__(self, ri_hash=None, tid=None, end_date=None):
self.ri = ri_hash
self.tid = tid
self.end_date = end_date
self._log.debug('ri_hash %d bytes' % len(ri_hash))
def serialize(self):
data = bytearray()
data += self.ri
data += struct.pack(b'>I', self.tid)
data += self.end_date
self._log.debug('Lease is %d bytes' % len(data))
assert len(data) == 44
return data
def __repr__(self):
return '[Lease ri=%s tid=%d]' % ([self.ri], self.tid)
class LeaseSet(object):
_log = logging.getLogger('LeaseSet')
def __init__(self, raw=None, dest=None, ls_enckey=None, ls_sigkey=None,
leases=None):
if raw:
data = raw
self.leases = []
self.dest = Destination(raw=data)
self._log.debug(self.dest)
# Verify that the signature matches the Destination
self.sig = raw[-40:]
self.dest.verify(raw[:-40], self.sig)
# Signature matches, now parse the rest
data = data[len(self.dest):]
self.enckey = crypto.ElGamalKey(data[:256])
self._log.debug(self.enckey)
data = data[256:]
self.sigkey = crypto.DSAKey(data[:128])
self._log.debug(self.sigkey)
data = data[128:]
numls = data[0]
data = data[1:]
while numls > 0:
_l = data[:44]
l = Lease(_l[:32], _l[32:36], _l[36:44])
data = data[44:]
numls -= 1
self.leases.append(l)
else:
self.dest = dest
self.enckey = ls_enckey
self.sigkey = ls_sigkey
self.leases = list(leases)
def __str__(self):
return '[LeaseSet leases=%s enckey=%s sigkey=%s dest=%s]' % (
self.leases,
[self.enckey.get_pubkey()],
[self.sigkey.get_pubkey()],
self.dest)
def serialize(self):
"""
serialize and sign LeaseSet
only works with DSA-SHA1 right now
"""
data = bytes()
data += self.dest.serialize()
data += self.enckey.get_pubkey()
data += self.sigkey.get_pubkey()
# Wrap with int() for Py2
data += int(len(self.leases)).to_bytes(1, 'big')
for l in self.leases:
data += l.serialize()
sig = self.dest.sign(data)
data += sig
self._log.debug('LS has length %d' % len(data))
return data
class I2CPProtocol(Enum):
STREAMING = 6
DGRAM = 17
RAW = 18
class datagram(object):
def __eq__(self, obj):
if hasattr(self, 'payload') and hasattr(obj, 'payload'):
return self.payload == obj.payload
return False
class raw_datagram(object):
protocol = I2CPProtocol.RAW
def __init__(self, dest=None, raw=None, payload=None):
if raw:
self.data = raw
else:
self.data = payload
self.dest = None
def serialize(self):
return self.data
class dsa_datagram(datagram):
protocol = I2CPProtocol.DGRAM
_log = logging.getLogger('datagram-dsa')
def __init__(self, dest=None, raw=None, payload=None):
if raw:
self._log.debug('rawlen=%d' % len(raw))
self.data = raw
self._log.debug('load dgram data: %s' % [raw])
self.dest = Destination(raw=raw)
self._log.debug('destlen=%s' % self.dest)
raw = raw[len(self.dest):]
self._log.debug('raw=%s' % [raw])
self.sig = raw[:40]
raw = raw[40:]
self._log.debug('payloadlen=%d' % len(raw))
self.payload = raw
phash = sha256(self.payload)
self._log.debug('verify dgram: sig=%s hash=%s' % (
[self.sig], [phash]))
self.dest.verify(phash, self.sig)
else:
self.dest = dest
self.payload = payload
self.data = bytearray()
self.data += self.dest.serialize()
payload_hash = sha256(self.payload)
self.sig = self.dest.sign(payload_hash)
self._log.debug('signature=%s' % [self.sig])
self.data += self.sig
self.data += payload
def serialize(self):
return self.data
def __str__(self):
return '[DSADatagram payload=%s sig=%s]' % (self.payload, self.sig)
class i2cp_payload(object):
gz_header = b'\x1f\x8b\x08'
_log = logging.getLogger('i2cp_payload')
def __init__(self, raw=None, data=None, srcport=0, dstport=0,
proto=I2CPProtocol.RAW):
if raw:
self.dlen = struct.unpack(b'>I', raw[:4])[0]
self._log.debug('payload len=%d' % self.dlen)
data = raw[4:self.dlen]
self._log.debug('compressed payload len=%d' % len(data))
assert data[:3] == self.gz_header
self.flags = data[3]
self.srcport = struct.unpack(b'>H', data[4:6])[0]
self.dstport = struct.unpack(b'>H', data[6:8])[0]
self.xflags = data[8]
self.proto = I2CPProtocol(util.get_as_int(data[9]))
self.data = util.i2p_decompress(data[10:])
self._log.debug('decompressed=%s' % [self.data])
else:
if util.check_portnum(srcport) and util.check_portnum(dstport):
self._log.debug('payload data len=%d' % len(data))
self.data = util.i2p_compress(data)
self._log.debug('compressed payload len=%d' % len(self.data))
self.srcport = srcport
self.dstport = dstport
self.proto = I2CPProtocol(proto)
self.flags = 0
self.xflags = 2
else:
raise ValueError('invalid ports: srcport=%s dstport=%s' % (
[srcport], [dstport]))
def serialize(self):
data = bytearray()
data += self.gz_header
data += struct.pack(b'>B', self.flags)
data += struct.pack(b'>H', self.srcport)
data += struct.pack(b'>H', self.dstport)
data += struct.pack(b'>B', self.xflags)
data += struct.pack(b'>B', self.proto.value)
data += self.data
dlen = len(data)
self._log.debug('serialize len=%d' % dlen)
return struct.pack(b'>I', dlen) + data
def __str__(self):
return ('[Payload flags=%s srcport=%s dstport=%s xflags=%s' +
' proto=%s data=%s]') % (
self.flags,
self.srcport,
self.dstport,
self.xflags,
self.proto,
self.data)
def to_b32_bytes(val):
if isinstance(val, Destination):
return to_b32_bytes(val.serialize())
if isinstance(val, bytes):
if val.lower().endswith(b".b32.i2p"):
return util.i2p_b32decode(val)
else:
return sha256(val)
raise TypeError("invalid type", val)
| {
"repo_name": "majestrate/i2p.socket",
"path": "i2p/crypto/datatypes.py",
"copies": "1",
"size": "23219",
"license": "mit",
"hash": 8521610996274852000,
"line_mean": 31.888101983,
"line_max": 107,
"alpha_frac": 0.5297816443,
"autogenerated": false,
"ratio": 3.882126734659756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9901271067986934,
"avg_score": 0.0021274621945644536,
"num_lines": 706
} |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from builtins import range
from builtins import object
import numpy as np
from scipy.ndimage import interpolation, filters
def scale_to_h(img, target_height, order=1, dtype=np.dtype('f'), cval=0):
h, w = img.shape
scale = target_height*1.0/h
target_width = int(scale*w)
output = interpolation.affine_transform(1.0*img, np.eye(2)/scale,
order=order,
output_shape=(target_height,
target_width),
mode='constant', cval=cval)
output = np.array(output, dtype=dtype)
return output
class CenterNormalizer(object):
def __init__(self, target_height=48, params=(4, 1.0, 0.3)):
self.target_height = target_height
self.range, self.smoothness, self.extra = params
def setHeight(self, target_height):
self.target_height = target_height
def measure(self, line):
h, w = line.shape
smoothed = filters.gaussian_filter(line, (h*0.5, h*self.smoothness),
mode='constant')
smoothed += 0.001*filters.uniform_filter(smoothed, (h*0.5, w),
mode='constant')
self.shape = (h, w)
a = np.argmax(smoothed, axis=0)
a = filters.gaussian_filter(a, h*self.extra)
self.center = np.array(a, 'i')
deltas = np.abs(np.arange(h)[:, np.newaxis]-self.center[np.newaxis, :])
self.mad = np.mean(deltas[line != 0])
self.r = int(1+self.range*self.mad)
def dewarp(self, img, cval=0, dtype=np.dtype('f')):
assert img.shape == self.shape
h, w = img.shape
padded = np.vstack([cval*np.ones((h, w)), img, cval*np.ones((h, w))])
center = self.center+h
dewarped = [padded[center[i]-self.r:center[i]+self.r, i] for i in
range(w)]
dewarped = np.array(dewarped, dtype=dtype).T
return dewarped
def normalize(self, img, order=1, dtype=np.dtype('f'), cval=0):
dewarped = self.dewarp(img, cval=cval, dtype=dtype)
h, w = dewarped.shape
scaled = scale_to_h(dewarped, self.target_height, order=order,
dtype=dtype, cval=cval)
return scaled
| {
"repo_name": "QuLogic/ocropy",
"path": "kraken/lib/lineest.py",
"copies": "1",
"size": "2454",
"license": "apache-2.0",
"hash": -7905637283865201000,
"line_mean": 39.2295081967,
"line_max": 79,
"alpha_frac": 0.5521597392,
"autogenerated": false,
"ratio": 3.6088235294117648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46609832686117647,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from builtins import zip
from builtins import range
from builtins import object
import unicodedata
import numpy as np
from scipy.ndimage import measurements
from scipy.special import expit
initial_range = 0.1
class Codec(object):
"""Translate between integer codes and characters."""
def init(self, charset):
charset = sorted(list(set(charset)))
self.code2char = {}
self.char2code = {}
for code,char in enumerate(charset):
self.code2char[code] = char
self.char2code[char] = code
return self
def size(self):
"""The total number of codes (use this for the number of output
classes when training a classifier."""
return len(list(self.code2char.keys()))
def encode(self, s):
"Encode the string `s` into a code sequence."
tab = self.char2code
dflt = self.char2code["~"]
return [self.char2code.get(c,dflt) for c in s]
def decode(self, l):
"Decode a code sequence into a string."
s = [self.code2char.get(c,"~") for c in l]
return s
def normalize_nfkc(s):
return unicodedata.normalize('NFKC',s)
def prepare_line(line, pad=16):
"""Prepare a line for recognition; this inverts it, transposes
it, and pads it."""
line = line * 1.0/np.amax(line)
line = np.amax(line)-line
line = line.T
if pad>0:
w = line.shape[1]
line = np.vstack([np.zeros((pad,w)),line,np.zeros((pad,w))])
return line
def randu(*shape):
# ATTENTION: whether you use randu or randn can make a difference.
"""Generate uniformly random values in the range (-1,1).
This can usually be used as a drop-in replacement for `randn`
resulting in a different distribution."""
return 2*np.random.rand(*shape)-1
def sigmoid(x):
"""
Compute the sigmoid function. We don't bother with clipping the input
value because IEEE floating point behaves reasonably with this function
even for infinities.
Further we use scipy's expit function which is ~50% faster for decently
sized arrays.
"""
return expit(x)
# These are the nonlinearities used by the LSTM network.
# We don't bother parameterizing them here
def ffunc(x):
"Nonlinearity used for gates."
return 1.0/(1.0+np.exp(-x))
def gfunc(x):
"Nonlinearity used for input to state."
return np.tanh(x)
# ATTENTION: try linear for hfunc
def hfunc(x):
"Nonlinearity used for output."
return np.tanh(x)
################################################################
# LSTM classification with forward/backward alignment ("CTC")
################################################################
def translate_back(outputs, threshold=0.5, pos=0):
"""Translate back. Thresholds on class 0, then assigns
the maximum class to each region."""
labels, n = measurements.label(outputs[:,0] < threshold)
mask = np.tile(labels.reshape(-1,1), (1,outputs.shape[1]))
maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask)+1))
if pos: return maxima
return [c for (r,c) in maxima if c != 0]
def translate_back_locations(outputs, threshold=0.5):
"""
Translates back the network output to a class sequence.
Thresholds on class 0, then assigns the maximum (non-zero) class to each
region. Difference to translate_back is the output region not just the
maximum's position is returned.
Args:
Returns:
A list with tuples (class, start, end, max). max is the maximum value
of the softmax layer in the region.
"""
labels, n = measurements.label(outputs[:,0] < threshold)
mask = np.tile(labels.reshape(-1,1), (1,outputs.shape[1]))
maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask)+1))
p = 0
start = None
x = []
for idx, val in enumerate(labels):
if val != 0 and start is None:
start = idx
p += 1
if val == 0 and start:
if maxima[p-1][1] == 0:
start = None
else:
x.append((maxima[p-1][1], start, idx, outputs[maxima[p-1]]))
start = None
# append last non-zero region to list of no zero region occurs after it
if start:
x.append((maxima[p-1][1], start, len(outputs), outputs[maxima[p-1]]))
return x
class Network:
def predict(self,xs):
"""Prediction is the same as forward propagation."""
return self.forward(xs)
class Softmax(Network):
"""A logistic regression network."""
def __init__(self,Nh,No,initial_range=initial_range,rand=np.random.rand):
self.Nh = Nh
self.No = No
self.W2 = randu(No,Nh+1)*initial_range
self.DW2 = np.zeros((No,Nh+1))
def ninputs(self):
return self.Nh
def noutputs(self):
return self.No
def forward(self,ys):
n = len(ys)
inputs,zs = [None]*n,[None]*n
for i in range(n):
inputs[i] = np.concatenate([np.ones(1),ys[i]])
temp = np.dot(self.W2,inputs[i])
temp = np.exp(np.clip(temp,-100,100))
temp /= np.sum(temp)
zs[i] = temp
self.state = (inputs,zs)
return zs
def backward(self,deltas):
inputs,zs = self.state
n = len(zs)
assert len(deltas)==len(inputs)
dzspre,dys = [None]*n,[None]*n
for i in reversed(list(range(len(zs)))):
dzspre[i] = deltas[i]
dys[i] = np.dot(dzspre[i],self.W2)[1:]
self.DW2 = sumouter(dzspre,inputs)
return dys
def info(self):
vars = sorted("W2".split())
for v in vars:
a = np.array(getattr(self,v))
print(v, a.shape, np.amin(a), np.amax(a))
def weights(self):
yield self.W2,self.DW2,"Softmax"
class LSTM(Network):
"""A standard LSTM network. This is a direct implementation of all the forward
and backward propagation formulas, mainly for speed. (There is another, more
abstract implementation as well, but that's significantly slower in Python
due to function call overhead.)"""
def __init__(self,ni,ns,initial=initial_range,maxlen=5000):
na = 1+ni+ns
self.dims = ni,ns,na
self.init_weights(initial)
self.allocate(maxlen)
def init_weights(self,initial):
"Initialize the weight matrices and derivatives"
ni,ns,na = self.dims
# gate weights
for w in "WGI WGF WGO WCI".split():
setattr(self,w,randu(ns,na)*initial)
setattr(self,"D"+w,np.zeros((ns,na)))
# peep weights
for w in "WIP WFP WOP".split():
setattr(self,w,randu(ns)*initial)
setattr(self,"D"+w,np.zeros(ns))
def allocate(self,n):
"""Allocate space for the internal state variables.
`n` is the maximum sequence length that can be processed."""
ni,ns,na = self.dims
vars = "cix ci gix gi gox go gfx gf"
vars += " state output gierr gferr goerr cierr stateerr outerr"
for v in vars.split():
setattr(self,v,np.nan*np.ones((n,ns)))
self.source = np.nan*np.ones((n,na))
self.sourceerr = np.nan*np.ones((n,na))
def reset(self,n):
"""Reset the contents of the internal state variables to `nan`"""
vars = "cix ci gix gi gox go gfx gf"
vars += " state output gierr gferr goerr cierr stateerr outerr"
vars += " source sourceerr"
for v in vars.split():
getattr(self,v)[:,:] = np.nan
def forward(self,xs):
"""Perform forward propagation of activations."""
ni,ns,na = self.dims
assert len(xs[0])==ni
n = len(xs)
# grow internal state arrays if len(xs) > maxlen
if n > self.gi.shape[0]:
self.allocate(n)
self.last_n = n
self.reset(n)
for t in range(n):
prev = np.zeros(ns) if t==0 else self.output[t-1]
self.source[t,0] = 1
self.source[t,1:1+ni] = xs[t]
self.source[t,1+ni:] = prev
np.dot(self.WGI,self.source[t],out=self.gix[t])
np.dot(self.WGF,self.source[t],out=self.gfx[t])
np.dot(self.WGO,self.source[t],out=self.gox[t])
np.dot(self.WCI,self.source[t],out=self.cix[t])
if t>0:
# ATTENTION: peep weights are diagonal matrices
self.gix[t] += self.WIP*self.state[t-1]
self.gfx[t] += self.WFP*self.state[t-1]
self.gi[t] = ffunc(self.gix[t])
self.gf[t] = ffunc(self.gfx[t])
self.ci[t] = gfunc(self.cix[t])
self.state[t] = self.ci[t]*self.gi[t]
if t>0:
self.state[t] += self.gf[t]*self.state[t-1]
self.gox[t] += self.WOP*self.state[t]
self.go[t] = ffunc(self.gox[t])
self.output[t] = hfunc(self.state[t]) * self.go[t]
assert not np.isnan(self.output[:n]).any()
return self.output[:n]
################################################################
# combination classifiers
################################################################
class Stacked(Network):
"""Stack two networks on top of each other."""
def __init__(self,nets):
self.nets = nets
def forward(self,xs):
for i,net in enumerate(self.nets):
xs = net.forward(xs)
return xs
class Reversed(Network):
"""Run a network on the time-reversed input."""
def __init__(self,net):
self.net = net
def forward(self,xs):
return self.net.forward(xs[::-1])[::-1]
class Parallel(Network):
"""Run multiple networks in parallel on the same input."""
def __init__(self,*nets):
self.nets = nets
def forward(self,xs):
outputs = [net.forward(xs) for net in self.nets]
outputs = list(zip(*outputs))
outputs = [np.concatenate(l) for l in outputs]
return outputs
def BIDILSTM(Ni,Ns,No):
"""A bidirectional LSTM, constructed from regular and reversed LSTMs."""
lstm1 = LSTM(Ni,Ns)
lstm2 = Reversed(LSTM(Ni,Ns))
bidi = Parallel(lstm1,lstm2)
assert No>1
logreg = Softmax(2*Ns,No)
stacked = Stacked([bidi,logreg])
return stacked
class SeqRecognizer(Network):
"""Perform sequence recognition using BIDILSTM and alignment."""
def __init__(self,ninput,nstates,noutput=-1,codec=None,normalize=normalize_nfkc):
self.Ni = ninput
if codec: noutput = codec.size()
assert noutput>0
self.No = noutput
self.lstm = BIDILSTM(ninput,nstates,noutput)
self.normalize = normalize
self.codec = codec
def predictSequence(self,xs):
"Predict an integer sequence of codes."
assert xs.shape[1]==self.Ni,"wrong image height (image: %d, expected: %d)"%(xs.shape[1],self.Ni)
self.outputs = np.array(self.lstm.forward(xs))
return translate_back(self.outputs)
def l2s(self,l):
"Convert a code sequence into a unicode string after recognition."
l = self.codec.decode(l)
return u"".join(l)
def predictString(self,xs):
"Predict output as a string. This uses codec and normalizer."
cs = self.predictSequence(xs)
return self.l2s(cs)
| {
"repo_name": "QuLogic/ocropy",
"path": "kraken/lib/lstm.py",
"copies": "1",
"size": "11417",
"license": "apache-2.0",
"hash": -4976930508321156000,
"line_mean": 34.5669781931,
"line_max": 104,
"alpha_frac": 0.586756591,
"autogenerated": false,
"ratio": 3.492505353319058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4579261944319058,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# from __future__ import unicode_literals
import os
import fnmatch
import sys
import json
def metadatajson():
json_file = json.dumps(
{
"Name": "",
"Description": "",
"HrefAppSession": "",
"Properties": [
{
"Type": "sample[]",
"Name": "Input.Samples",
"Items": [
]
}
]
}
)
return json_file
# app specific definitions (not needed for personal app)
parameter_list = []
# load json file
jsonfile = open('/data/input/AppSession.json')
jsonObject = json.load(jsonfile)
# determine the number of properties
numberOfPropertyItems = len(jsonObject['Properties']['Items'])
# loop over properties
sampleID = []
sampleHref = []
sampleName = []
sampleDir = []
for index in range(numberOfPropertyItems):
# add parameters to parameters list
if jsonObject['Properties']['Items'][index]['Name'] == 'Input.textbox':
parameter = jsonObject['Properties']['Items'][index]['Content']
parameter_list.append(parameter)
if jsonObject['Properties']['Items'][index]['Name'] == 'Input.radio':
parameter = jsonObject['Properties']['Items'][index]['Content']
parameter_list.append(parameter)
if jsonObject['Properties']['Items'][index]['Name'] == 'Input.checkbox':
parameter = jsonObject['Properties']['Items'][index]['Items'][0]
parameter_list.append(parameter)
if jsonObject['Properties']['Items'][index]['Name'] == 'Input.numeric':
parameter = jsonObject['Properties']['Items'][index]['Content']
parameter_list.append(parameter)
# set project ID
if jsonObject['Properties']['Items'][index]['Name'] == 'Input.Projects':
projectID = jsonObject['Properties']['Items'][index]['Items'][0]['Id']
for index in range(numberOfPropertyItems):
# set sample parameters
if jsonObject['Properties']['Items'][index]['Name'] == 'Input.Samples':
for sample in range(len(jsonObject['Properties']['Items'][index]['Items'])):
sampleID.append(jsonObject['Properties']['Items'][index]['Items'][sample]['Id'])
sampleHref.append(jsonObject['Properties']['Items'][index]['Items'][sample]['Href'])
sampleName.append(jsonObject['Properties']['Items'][index]['Items'][sample]['Name'])
sampleDir = '/data/input/samples/%s/Data/Intensities/BaseCalls' % (sampleID[sample])
if not os.path.exists(sampleDir):
sampleDir = '/data/input/samples/%s' % (sampleID[sample])
for root, dirs, files in os.walk(sampleDir[sample]):
R1files = fnmatch.filter(files, '*_R1_*')
R2files = fnmatch.filter(files, '*_R2_*')
if len(R1files) != len(R2files):
print("number of R1 and R2 files do not match")
sys.exit()
sampleOutDir = '/data/output/appresults/%s/%s' % (projectID, sampleName[sample])
os.system('mkdir -p "%s"' % sampleOutDir)
# create output file and print parameters to output file (this is where you would run the command)
handle = '%s/parameters.csv' % sampleOutDir
outFile = open(handle, 'w')
count = 0
for parameter in parameter_list:
count += 1
outFile.write('%s,%s\n' % (count, parameter))
outFile.close()
# create metadata file for each appresult
metadataObject = metadatajson()
metaJsonObject = json.loads(metadataObject)
# modify metadataObject
metaJsonObject['Name'] = jsonObject['Properties']['Items'][index]['Items'][sample]['Id']
metaJsonObject['Description'] = 'Sample Description'
metaJsonObject['HrefAppSession'] = jsonObject['Href']
for href in sampleHref:
metaJsonObject['Properties'][0]['Items'].append(href)
metadataFile = '%s/_metadata.json' % sampleOutDir
outMetadataFile = open(metadataFile, 'w')
json.dump(metaJsonObject, outMetadataFile)
| {
"repo_name": "alaindomissy/docker-crispr",
"path": "files/BACKEND/demo.py",
"copies": "1",
"size": "4083",
"license": "mit",
"hash": -1104634939583395800,
"line_mean": 40.6632653061,
"line_max": 106,
"alpha_frac": 0.6137643889,
"autogenerated": false,
"ratio": 4.14517766497462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008605740500526627,
"num_lines": 98
} |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import re
import zipfile
from pkginfo.distribution import Distribution
wininst_file_re = re.compile(r".*py(?P<pyver>\d+\.\d+)\.exe$")
class WinInst(Distribution):
def __init__(self, filename, metadata_version=None):
self.filename = filename
self.metadata_version = metadata_version
self.extractMetadata()
@property
def py_version(self):
m = wininst_file_re.match(self.filename)
if m is None:
return "any"
else:
return m.group("pyver")
def read(self):
fqn = os.path.abspath(os.path.normpath(self.filename))
if not os.path.exists(fqn):
raise ValueError('No such file: %s' % fqn)
if fqn.endswith('.exe'):
archive = zipfile.ZipFile(fqn)
names = archive.namelist()
def read_file(name):
return archive.read(name)
else:
raise ValueError('Not a known archive format: %s' % fqn)
try:
tuples = [x.split('/') for x in names
if x.endswith(".egg-info") or x.endswith("PKG-INFO")]
schwarz = sorted([(len(x), x) for x in tuples])
for path in [x[1] for x in schwarz]:
candidate = '/'.join(path)
data = read_file(candidate)
if b'Metadata-Version' in data:
return data
finally:
archive.close()
raise ValueError('No PKG-INFO/.egg-info in archive: %s' % fqn)
| {
"repo_name": "dstufft/twine",
"path": "twine/wininst.py",
"copies": "9",
"size": "1624",
"license": "apache-2.0",
"hash": 2465787794247158000,
"line_mean": 29.0740740741,
"line_max": 75,
"alpha_frac": 0.5609605911,
"autogenerated": false,
"ratio": 3.941747572815534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9002708163915535,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from guitool_ibeis.__PYQT__ import QtCore, QtGui
from guitool_ibeis.__PYQT__ import QtWidgets
from guitool_ibeis.__PYQT__ import GUITOOL_PYQT_VERSION
import utool as ut
ut.noinject(__name__, '[guitool_ibeis.delegates]', DEBUG=False)
class APIDelegate(QtWidgets.QItemDelegate):
is_persistant_editable = True
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
def sizeHint(option, qindex):
# QStyleOptionViewItem option
return QtCore.QSize(50, 50)
class ImageDelegate(QtWidgets.QStyledItemDelegate):
def __init__(self, parent):
print(dir(self))
QtWidgets.QStyledItemDelegate.__init__(self, parent)
def paint(self, painter, option, index):
painter.fillRect(option.rect, QtGui.QColor(191, 222, 185))
# path = "path\to\my\image.jpg"
self.path = "image.bmp"
image = QtGui.QImage(str(self.path))
pixmap = QtGui.QPixmap.fromImage(image)
pixmap.scaled(50, 40, QtCore.Qt.KeepAspectRatio)
painter.drawPixmap(option.rect, pixmap)
class ComboDelegate(APIDelegate):
"""
A delegate that places a fully functioning QComboBox in every
cell of the column to which it's applied
"""
def __init__(self, parent):
APIDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
combo = QtWidgets.QComboBox(parent)
combo.addItems(['option1', 'option2', 'option3'])
# FIXME: Change to newstyle signal slot
if GUITOOL_PYQT_VERSION == 5:
self.connect(combo.currentIndexChanged, self.currentIndexChanged)
else:
# I believe this particular option is broken in pyqt4
self.connect(combo, QtCore.SIGNAL("currentIndexChanged(int)"),
self, QtCore.SLOT("currentIndexChanged()"))
return combo
def setEditorData(self, editor, index):
editor.blockSignals(True)
editor.setCurrentIndex(int(index.model().data(index).toString()))
editor.blockSignals(False)
def setModelData(self, editor, model, index):
model.setData(index, editor.currentIndex())
@QtCore.pyqtSlot()
def currentIndexChanged(self):
self.commitData.emit(self.sender())
class ButtonDelegate(APIDelegate):
"""
A delegate that places a fully functioning QPushButton in every
cell of the column to which it's applied
"""
def __init__(self, parent):
# The parent is not an optional argument for the delegate as
# we need to reference it in the paint method (see below)
APIDelegate.__init__(self, parent)
def paint(self, painter, option, index):
# This method will be called every time a particular cell is
# in view and that view is changed in some way. We ask the
# delegates parent (in this case a table view) if the index
# in question (the table cell) already has a widget associated
# with it. If not, create one with the text for this index and
# connect its clicked signal to a slot in the parent view so
# we are notified when its used and can do something.
if not self.parent().indexWidget(index):
self.parent().setIndexWidget(
index,
QtWidgets.QPushButton(
index.data().toString(),
self.parent(),
clicked=self.parent().cellButtonClicked
)
)
# DELEGATE_MAP = {
# 'BUTTON': ButtonDelegate,
# 'COMBO': ComboDelegate,
# }
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/guitool_delegates.py",
"copies": "1",
"size": "3638",
"license": "apache-2.0",
"hash": -5985914987802135000,
"line_mean": 34.6666666667,
"line_max": 77,
"alpha_frac": 0.6401869159,
"autogenerated": false,
"ratio": 4.055741360089186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5195928275989186,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from guitool_ibeis.__PYQT__ import QtCore, QtGui
from guitool_ibeis.__PYQT__ import QtWidgets # NOQA
from guitool_ibeis.__PYQT__.QtCore import Qt
import math
import utool
import six
utool.noinject(__name__, '[StripProxyModel]', DEBUG=False)
#STRIPE_PROXY_BASE = QtGui.QAbstractProxyModel
#STRIPE_PROXY_BASE = QtGui.QSortFilterProxyModel
try:
STRIPE_PROXY_BASE = QtGui.QIdentityProxyModel
except Exception:
STRIPE_PROXY_BASE = QtCore.QIdentityProxyModel
STRIP_PROXY_META_CLASS = utool.makeForwardingMetaclass(
lambda self: self.sourceModel(),
['_set_context_id',
'_get_context_id',
'_set_changeblocked',
'_get_changeblocked',
'_about_to_change',
'_change',
'_update',
'_rows_updated',
'name'],
base_class=STRIPE_PROXY_BASE)
STRIP_PROXY_SIX_BASE = six.with_metaclass(STRIP_PROXY_META_CLASS, STRIPE_PROXY_BASE)
class StripeProxyModel(STRIP_PROXY_SIX_BASE): # (STRIPE_PROXY_BASE, metaclass=STRIP_PROXY_META_CLASS):
#__metaclass__ = STRIP_PROXY_META_CLASS
def __init__(self, parent=None, numduplicates=1):
STRIPE_PROXY_BASE.__init__(self, parent=parent)
self._nd = numduplicates
def rowCount(self, parent=QtCore.QModelIndex()):
sourceParent = self.mapToSource(parent)
source_rows = self.sourceModel().rowCount(parent=sourceParent)
rows = math.ceil(source_rows / self._nd)
#print('StripeProxyModel.rowCount(): %r %r' % (source_rows, rows))
return int(rows)
def columnCount(self, parent=QtCore.QModelIndex()):
source_cols = self.sourceModel().columnCount(parent=parent)
cols = self._nd * source_cols
#print('StripeProxyModel.columnCount(): %r %r' % (source_cols, cols))
return int(cols)
def proxy_to_source(self, row, col, parent=QtCore.QModelIndex()):
source_model = self.sourceModel()
source_cols = source_model.columnCount(parent=parent)
r, c, p = row, col, parent
r2 = int(math.floor(c / source_cols)) + (r * self._nd)
c2 = c % source_cols
p2 = p
return r2, c2, p2
def source_to_proxy(self, row, col, parent=QtCore.QModelIndex()):
source_model = self.sourceModel()
source_cols = source_model.columnCount(parent=parent)
r, c, p = row, col, parent
r2 = int(math.floor(r / self._nd))
c2 = ((r % self._nd) * source_cols) + c
p2 = p
return r2, c2, p2
def mapToSource(self, proxyIndex):
""" returns index into original model """
if proxyIndex is None:
return None
if proxyIndex.isValid():
r2, c2, p2 = self.proxy_to_source(proxyIndex.row(), proxyIndex.column())
#print('StripeProxyModel.mapToSource(): %r %r %r; %r %r %r' % (r, c, p, r2, c2, p2))
sourceIndex = self.sourceModel().index(r2, c2, parent=p2) # self.sourceModel().root_node[r2]
else:
sourceIndex = QtCore.QModelIndex()
return sourceIndex
def mapFromSource(self, sourceIndex):
""" returns index into proxy model """
if sourceIndex is None:
return None
if sourceIndex.isValid():
r2, c2, p2 = self.source_to_proxy(sourceIndex.row(), sourceIndex.column(), sourceIndex.parent())
proxyIndex = self.index(r2, c2, p2)
else:
proxyIndex = QtCore.QModelIndex()
return proxyIndex
def index(self, row, col, parent=QtCore.QModelIndex()):
if (row, col) != (-1, -1):
proxyIndex = self.createIndex(row, col, parent)
else:
proxyIndex = QtCore.QModelIndex()
return proxyIndex
def data(self, proxyIndex, role=Qt.DisplayRole, **kwargs):
sourceIndex = self.mapToSource(proxyIndex)
return self.sourceModel().data(sourceIndex, role, **kwargs)
def setData(self, proxyIndex, value, role=Qt.EditRole):
sourceIndex = self.mapToSource(proxyIndex)
return self.sourceModel().setData(sourceIndex, value, role)
def sort(self, column, order):
source_model = self.sourceModel()
source_cols = source_model.columnCount()
if source_cols > 0:
source_model.sort(column % source_cols, order)
def parent(self, index):
return self.sourceModel().parent(self.mapToSource(index))
# def mapSelectionToSource(self, sel):
# def flags(self, *args, **kwargs):
# return self.sourceModel().flags(*args, **kwargs)
# def headerData(self, *args, **kwargs):
# return self.sourceModel().headerData(*args, **kwargs)
#
# def hasChildren(self, *args, **kwargs):
# return self.sourceModel().hasChildren(*args, **kwargs)
#
# def itemData(self, *args, **kwargs):
# return self.sourceModel().itemData(*args, **kwargs)
def _update_rows(self):
return self.sourceModel()._update_rows()
def _get_row_id(self, proxyIndex):
return self.sourceModel()._get_row_id(self.mapToSource(proxyIndex))
def _get_level(self, proxyIndex):
return self.sourceModel()._get_level(self.mapToSource(proxyIndex))
def _get_adjacent_qtindex(self, proxyIndex, *args, **kwargs):
qtindex = self.mapToSource(proxyIndex)
next_qtindex = self.sourceModel()._get_adjacent_qtindex(qtindex, *args, **kwargs)
next_proxyindex = self.mapFromSource(next_qtindex)
return next_proxyindex
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/stripe_proxy_model.py",
"copies": "1",
"size": "5464",
"license": "apache-2.0",
"hash": 2820186070646117400,
"line_mean": 36.4246575342,
"line_max": 108,
"alpha_frac": 0.6361639824,
"autogenerated": false,
"ratio": 3.4321608040201004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45683247864201004,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from guitool_ibeis.__PYQT__ import QtCore, QtGui
from guitool_ibeis.__PYQT__ import QtWidgets # NOQA
import six
import utool
import sys
import logging
from six.moves import range
from guitool_ibeis.guitool_decorators import slot_
from guitool_ibeis import guitool_main
import utool as ut
ut.noinject(__name__)
# For some reason QtCore.Qt.ALT doesn't work right
ALT_KEY = 16777251
def make_option_dict(options, shortcuts=True):
""" helper for popup menu callbacks """
keys = ut.take_column(options, 0)
values = ut.take_column(options, 1)
if shortcuts:
shortcut_keys = [
key[key.find('&') + 1] if '&' in key else None
for key in keys
]
try:
ut.assert_unique(shortcut_keys, name='shortcut_keys', ignore=[None])
except AssertionError:
print('shortcut_keys = %r' % (shortcut_keys,))
print('options = %r' % (ut.repr2(options),))
raise
shortcut_dict = {
sc_key: val
#sc_key: (make_option_dict(val, shortcuts=True)
# if isinstance(val, list) else val)
for (sc_key, val) in zip(shortcut_keys, values)
if sc_key is not None and not isinstance(val, list)
}
return shortcut_dict
else:
ut.assert_unique(keys, name='option_keys')
fulltext_dict = {
key: (make_option_dict(val, shortcuts=False)
if isinstance(val, list) else val)
for (key, val) in zip(keys, values)
if key is not None
}
return fulltext_dict
def find_used_chars(name_list):
""" Move to guitool_ibeis """
used_chars = []
for name in name_list:
index = name.find('&')
if index == -1 or index + 1 >= len(name):
continue
char = name[index + 1]
used_chars.append(char)
return used_chars
def make_word_hotlinks(name_list, used_chars=[], after_colon=False):
""" Move to guitool_ibeis
Args:
name_list (list):
used_chars (list): (default = [])
Returns:
list: hotlinked_name_list
CommandLine:
python -m guitool_ibeis.guitool_misc --exec-make_word_hotlinks
Example:
>>> # DISABLE_DOCTEST
>>> from guitool_ibeis.guitool_misc import * # NOQA
>>> name_list = ['occlusion', 'occlusion:large', 'occlusion:medium', 'occlusion:small', 'lighting', 'lighting:shadowed', 'lighting:overexposed', 'lighting:underexposed']
>>> used_chars = []
>>> hotlinked_name_list = make_word_hotlinks(name_list, used_chars)
>>> result = ('hotlinked_name_list = %s' % (str(hotlinked_name_list),))
>>> print(result)
"""
seen_ = set(used_chars)
hotlinked_name_list = []
for name in name_list:
added = False
if after_colon:
split_chars = name.split(':')
offset = len(':'.join(split_chars[:-1])) + 1
avail_chars = split_chars[-1]
else:
offset = 0
avail_chars = name
for count, char in enumerate(avail_chars, start=offset):
char = char.upper()
if char not in seen_:
added = True
seen_.add(char)
linked_name = name[:count] + '&' + name[count:]
hotlinked_name_list.append(linked_name)
break
if not added:
# Cannot hotlink this name
hotlinked_name_list.append(name)
return hotlinked_name_list
class BlockContext(object):
def __init__(self, widget):
self.widget = widget
self.was_blocked = None
def __enter__(self):
self.was_blocked = self.widget.blockSignals(True)
def __exit__(self, type_, value, trace):
if trace is not None:
print('[BlockContext] Error in context manager!: ' + str(value))
return False # return a falsey value on error
self.widget.blockSignals(self.was_blocked)
# Qt object that will send messages (as signals) to the frontend gui_write slot
class GUILoggingSender(QtCore.QObject):
write_ = QtCore.pyqtSignal(str)
def __init__(self, write_slot):
QtCore.QObject.__init__(self)
self.write_.connect(write_slot)
def write_gui(self, msg):
self.write_.emit(str(msg))
class GUILoggingHandler(logging.StreamHandler):
"""
A handler class which sends messages to to a connected QSlot
"""
def __init__(self, write_slot):
super(GUILoggingHandler, self).__init__()
self.sender = GUILoggingSender(write_slot)
def emit(self, record):
try:
msg = self.format(record) + '\n'
self.sender.write_.emit(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QLoggedOutput(QtWidgets.QTextEdit):
def __init__(self, parent=None, visible=True):
super(QLoggedOutput, self).__init__(parent)
#QtWidgets.QTextEdit.__init__(self, parent)
self.setAcceptRichText(False)
self.setReadOnly(True)
self.setVisible(visible)
self.logging_handler = None
if visible:
self._initialize_handler()
def setVisible(self, flag):
if flag and self.logging_handler is None:
# Make sure handler is initialized on first go
self._initialize_handler()
super(QLoggedOutput, self).setVisible(flag)
def _initialize_handler(self):
self.logging_handler = GUILoggingHandler(self.gui_write)
utool.add_logging_handler(self.logging_handler)
@slot_(str)
def gui_write(outputEdit, msg_):
# Slot for teed log output
app = guitool_main.get_qtapp()
# Write msg to text area
outputEdit.moveCursor(QtGui.QTextCursor.End)
# TODO: Find out how to do backspaces in textEdit
msg = str(msg_)
if msg.find('\b') != -1:
msg = msg.replace('\b', '') + '\n'
outputEdit.insertPlainText(msg)
if app is not None:
app.processEvents()
@slot_()
def gui_flush(outputEdit):
app = guitool_main.get_qtapp()
if app is not None:
app.processEvents()
def get_cplat_tab_height():
if sys.platform.startswith('darwin'):
tab_height = 21
else:
tab_height = 30
return tab_height
def get_view_selection_as_str(view):
"""
References:
http://stackoverflow.com/questions/3135737/copying-part-of-qtableview
"""
model = view.model()
selection_model = view.selectionModel()
qindex_list = selection_model.selectedIndexes()
qindex_list = sorted(qindex_list)
# print('[guitool_ibeis] %d cells selected' % len(qindex_list))
if len(qindex_list) == 0:
return
copy_table = []
previous = qindex_list[0]
def astext(data):
""" Helper which casts model data to a string """
if not isinstance(data, six.string_types):
text = repr(data)
else:
text = str(data)
return text.replace('\n', '<NEWLINE>').replace(',', '<COMMA>')
#
for ix in range(1, len(qindex_list)):
text = astext(model.data(previous))
copy_table.append(text)
qindex = qindex_list[ix]
if qindex.row() != previous.row():
copy_table.append('\n')
else:
copy_table.append(', ')
previous = qindex
# Do last element in list
text = astext(model.data(qindex_list[-1]))
copy_table.append(text)
#copy_table.append('\n')
copy_str = str(''.join(copy_table))
return copy_str
def set_qt_object_names(dict_):
"""
Hack to set qt object names from locals, vars, or general dict context
"""
for key, val in dict_.items():
if hasattr(val, 'setObjectName'):
val.setObjectName(key)
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/guitool_misc.py",
"copies": "1",
"size": "7968",
"license": "apache-2.0",
"hash": 8828968360305911000,
"line_mean": 30.2470588235,
"line_max": 177,
"alpha_frac": 0.5857178715,
"autogenerated": false,
"ratio": 3.700882489549466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47866003610494656,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from guitool_ibeis.__PYQT__ import QtGui, QtCore # NOQA
#from guitool_ibeis import guitool_components
#(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[APIButtonWidget]', DEBUG=False)
import utool as ut
ut.noinject(__name__, '[api_timestamp_delegate]', DEBUG=False)
DELEGATE_BASE = QtWidgets.QItemDelegate
#DELEGATE_BASE = QtWidgets.QStyledItemDelegate
class APITimestampDelegate(DELEGATE_BASE):
def __init__(dgt, parent=None):
assert parent is not None, 'parent must be a view'
DELEGATE_BASE.__init__(dgt, parent)
def paint(dgt, painter, option, qtindex):
painter.save()
data = qtindex.model().data(qtindex, QtCore.Qt.DisplayRole)
print(data)
painter.restore()
#def editorEvent(dgt, event, model, option, qtindex):
# event_type = event.type()
# if event_type == QtCore.QEvent.MouseButtonPress:
# # store the position that is clicked
# dgt._pressed = (qtindex.row(), qtindex.column())
# if utool.VERBOSE:
# print('store')
# return True
# elif event_type == QtCore.QEvent.MouseButtonRelease:
# if dgt.is_qtindex_pressed(qtindex):
# print('emit')
# dgt.button_clicked.emit(qtindex)
# pass
# elif dgt._pressed is not None:
# # different place.
# # force a repaint on the pressed cell by emitting a dataChanged
# # Note: This is probably not the best idea
# # but I've yet to find a better solution.
# print('repaint')
# oldIndex = qtindex.model().index(*dgt._pressed)
# dgt._pressed = None
# qtindex.model().dataChanged.emit(oldIndex, oldIndex)
# pass
# dgt._pressed = None
# #print('mouse release')
# return True
# elif event_type == QtCore.QEvent.Leave:
# print('leave')
# elif event_type == QtCore.QEvent.MouseButtonDblClick:
# print('doubleclick')
# else:
# print('event_type = %r' % event_type)
# return DELEGATE_BASE.editorEvent(dgt, event, model, option, qtindex)
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/api_timestamp_delegate.py",
"copies": "1",
"size": "2300",
"license": "apache-2.0",
"hash": 1071705599564313100,
"line_mean": 38.6551724138,
"line_max": 99,
"alpha_frac": 0.5865217391,
"autogenerated": false,
"ratio": 3.5881435257410295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4674665264841029,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from guitool_ibeis.__PYQT__ import QtGui, QtCore # NOQA
from guitool_ibeis.__PYQT__ import QtWidgets # NOQA
from guitool_ibeis import guitool_components
import utool
#(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[APIButtonWidget]', DEBUG=False)
import utool as ut
ut.noinject(__name__, '[api_button_delegate]', DEBUG=False)
#DELEGATE_BASE = QtWidgets.QItemDelegate
DELEGATE_BASE = QtWidgets.QStyledItemDelegate
def rgb_to_qcolor(rgb):
return QtGui.QColor(*rgb[0:3])
def rgb_to_qbrush(rgb):
return QtGui.QBrush(rgb_to_qcolor(rgb))
def paint_button(painter, option, text='button', pressed=True, bgcolor=None,
fgcolor=None, clicked=None, button=None, view=None):
#http://www.qtcentre.org/archive/index.php/t-31029.html
opt = QtWidgets.QStyleOptionButton()
opt.text = text
opt.rect = option.rect
opt.palette = option.palette
if pressed:
opt.state = QtWidgets.QStyle.State_Enabled | QtWidgets.QStyle.State_Sunken
else:
opt.state = QtWidgets.QStyle.State_Enabled | QtWidgets.QStyle.State_Raised
#style = QtGui.Q Application.style()
style = button.style()
style.drawControl(QtWidgets.QStyle.CE_PushButton, opt, painter, button)
class APIButtonDelegate(DELEGATE_BASE):
button_clicked = QtCore.pyqtSignal(QtCore.QModelIndex)
def __init__(dgt, parent=None):
assert parent is not None, 'parent must be a view'
DELEGATE_BASE.__init__(dgt, parent)
# FIXME: I don't like this state in the delegate, as it renders all
# buttons
dgt._pressed = None
dgt.button_clicked.connect(dgt.on_button_click)
def get_index_butkw(dgt, qtindex):
""" The model data for a button should be a (text, callback) tuple. OR
it could be a function which accepts an qtindex and returns a button """
data = qtindex.model().data(qtindex, QtCore.Qt.DisplayRole)
# Get info
if isinstance(data, tuple):
buttontup = data
elif utool.is_funclike(data):
func = data
buttontup = func(qtindex)
else:
raise AssertionError('bad type')
text, callback = buttontup[0:2]
butkw = {
#'parent': dgt.parent(),
'text': text,
'clicked': callback,
}
if len(buttontup) > 2:
butkw['bgcolor'] = buttontup[2]
butkw['fgcolor'] = (0, 0, 0)
return butkw
def paint(dgt, painter, option, qtindex):
painter.save()
butkw = dgt.get_index_butkw(qtindex)
# FIXME: I don't want to create a widget each time just
# so I can access the button's style
button = guitool_components.newButton(**butkw)
pressed = dgt.is_qtindex_pressed(qtindex)
view = dgt.parent()
paint_button(painter, option, button=button, pressed=pressed,
view=view, **butkw)
painter.restore()
def is_qtindex_pressed(dgt, qtindex):
return dgt._pressed is not None and dgt._pressed == (qtindex.row(), qtindex.column())
@QtCore.pyqtSlot(QtCore.QModelIndex)
def on_button_click(dgt, qtindex):
if utool.VERBOSE:
print('pressed button')
butkw = dgt.get_index_butkw(qtindex)
callback = butkw['clicked']
callback()
def editorEvent(dgt, event, model, option, qtindex):
# http://stackoverflow.com/questions/14585575/button-delegate-issue
#print('editor event')
event_type = event.type()
if event_type == QtCore.QEvent.MouseButtonPress:
# store the position that is clicked
dgt._pressed = (qtindex.row(), qtindex.column())
if utool.VERBOSE:
print('store')
return True
elif event_type == QtCore.QEvent.MouseButtonRelease:
if dgt.is_qtindex_pressed(qtindex):
print('emit')
dgt.button_clicked.emit(qtindex)
pass
elif dgt._pressed is not None:
# different place.
# force a repaint on the pressed cell by emitting a dataChanged
# Note: This is probably not the best idea
# but I've yet to find a better solution.
print('repaint')
oldIndex = qtindex.model().index(*dgt._pressed)
dgt._pressed = None
qtindex.model().dataChanged.emit(oldIndex, oldIndex)
pass
dgt._pressed = None
#print('mouse release')
return True
elif event_type == QtCore.QEvent.Leave:
print('leave')
elif event_type == QtCore.QEvent.MouseButtonDblClick:
print('doubleclick')
else:
print('event_type = %r' % event_type)
return DELEGATE_BASE.editorEvent(dgt, event, model, option, qtindex)
## graveyard:
# #opt = QtWidgets.QStyleOptionViewItemV4(option)
# #opt.initFrom(button)
# #painter.drawRect(option.rect)
# #print(style)
# #if view is not None:
# #view.style
# ## FIXME: I cant set the colors!
# #if bgcolor is not None:
# # opt.palette.setCurrentColorGroup(QtGui.QPalette.Normal)
# # opt.palette.setBrush(QtGui.QPalette.Normal, QtGui.QPalette.Button, rgb_to_qbrush(bgcolor))
# # opt.palette.setBrush(QtGui.QPalette.Base, rgb_to_qbrush(bgcolor))
# # opt.palette.setBrush(QtGui.QPalette.Window, rgb_to_qbrush(bgcolor))
# # opt.palette.setBrush(QtGui.QPalette.ButtonText, rgb_to_qbrush(bgcolor))
# # #
# # opt.palette.setColor(QtGui.QPalette.Normal, QtGui.QPalette.Button, rgb_to_qcolor(bgcolor))
# # opt.palette.setColor(QtGui.QPalette.Base, rgb_to_qcolor(bgcolor))
# # opt.palette.setColor(QtGui.QPalette.Window, rgb_to_qcolor(bgcolor))
# # opt.palette.setColor(QtGui.QPalette.ButtonText, rgb_to_qcolor(bgcolor))
# #painter.setBrush(rgb_to_qbrush(bgcolor))
# #if fgcolor is not None:
# # opt.palette.setBrush(QtGui.QPalette.Normal, QtGui.QPalette.ButtonText, rgb_to_qbrush(fgcolor))
# #if not qtindex.isValid():
# # return None
# #if view.indexWidget(qtindex):
# # return
# #else:
# # view.setIndexWidget(qtindex, button)
# # # The view already has a button
# # # NOTE: this requires model::qtindex to be overwritten
# # # and return model.createIndex(row, col, object) where
# # # object is specified.
# # view.setIndexWidget(qtindex, None)
# # button = QtWidgets.QPushButton(text, view, clicked=view.cellButtonClicked)
# # pass
# # # dgt._pressed = (qtindex.row(), qtindex.column())
# # # return True
# # pass
# # else:
# # pass
# # # if dgt._pressed == (qtindex.row(), qtindex.column()):
# # # # we are at the same place, so emit
# # # dgt.button_clicked.emit(*dgt._pressed)
# # # elif dgt._pressed:
# # # # different place.
# # # # force a repaint on the pressed cell by emitting a dataChanged
# # # # Note: This is probably not the best idea
# # # # but I've yet to find a better solution.
# # # oldIndex = qtindex.model().qtindex(*dgt._pressed)
# # # dgt._pressed = None
# # # qtindex.model().dataChanged.emit(oldIndex, oldIndex)
# # # dgt._pressed = None
# # # return True
# # # else:
# # # default editor event;
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/api_button_delegate.py",
"copies": "1",
"size": "7686",
"license": "apache-2.0",
"hash": -4918361990995301000,
"line_mean": 39.6666666667,
"line_max": 104,
"alpha_frac": 0.5965391621,
"autogenerated": false,
"ratio": 3.473113420695888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559601140141987,
"avg_score": 0.00201028853078012,
"num_lines": 189
} |
from __future__ import absolute_import, division, print_function
from guitool_ibeis.__PYQT__ import QtGui, QtCore # NOQA
from guitool_ibeis.__PYQT__.QtCore import Qt
import utool
utool.noinject(__name__, '[APIItemView]', DEBUG=False)
#BASE_CLASS = QtGui.QAbstractProxyModel
try:
BASE_CLASS = QtGui.QSortFilterProxyModel
except Exception:
BASE_CLASS = QtCore.QIdentityProxyModel
# BASE_CLASS = QtGui.QIdentityProxyModel
class FilterProxyModel(BASE_CLASS):
__metaclass__ = utool.makeForwardingMetaclass(
lambda self: self.sourceModel(),
['_set_context_id', '_get_context_id', '_set_changeblocked',
'_get_changeblocked', '_about_to_change', '_change', '_update',
'_rows_updated', 'name', 'get_header_name'],
base_class=BASE_CLASS)
def __init__(self, parent=None):
BASE_CLASS.__init__(self, parent=parent)
self.filter_dict = {}
def proxy_to_source(self, row, col, parent=QtCore.QModelIndex()):
r2, c2, p2 = row, col, parent
return r2, c2, p2
def source_to_proxy(self, row, col, parent=QtCore.QModelIndex()):
r2, c2, p2 = row, col, parent
return r2, c2, p2
def mapToSource(self, proxyIndex):
""" returns index into original model """
if proxyIndex is None:
return None
if proxyIndex.isValid():
r2, c2, p2 = self.proxy_to_source(proxyIndex.row(), proxyIndex.column())
sourceIndex = self.sourceModel().index(r2, c2, parent=p2) # self.sourceModel().root_node[r2]
else:
sourceIndex = QtCore.QModelIndex()
return sourceIndex
def mapFromSource(self, sourceIndex):
""" returns index into proxy model """
if sourceIndex is None:
return None
if sourceIndex.isValid():
r2, c2, p2 = self.source_to_proxy(sourceIndex.row(), sourceIndex.column(), sourceIndex.parent())
proxyIndex = self.index(r2, c2, p2)
else:
proxyIndex = QtCore.QModelIndex()
return proxyIndex
def filterAcceptsRow(self, source_row, source_parent):
source = self.sourceModel()
row_type = str(source.data(source.index(source_row, 2, parent=source_parent)))
#print('%r \'%r\'' % (source_row, row_type))
#print(self.filter_dict)
rv = self.filter_dict.get(row_type, True)
#print('return value %r' % rv)
return rv
def index(self, row, col, parent=QtCore.QModelIndex()):
if (row, col) != (-1, -1):
proxyIndex = self.createIndex(row, col, parent)
else:
proxyIndex = QtCore.QModelIndex()
return proxyIndex
def data(self, proxyIndex, role=Qt.DisplayRole, **kwargs):
sourceIndex = self.mapToSource(proxyIndex)
return self.sourceModel().data(sourceIndex, role, **kwargs)
def setData(self, proxyIndex, value, role=Qt.EditRole):
sourceIndex = self.mapToSource(proxyIndex)
return self.sourceModel().setData(sourceIndex, value, role)
def sort(self, column, order):
self.sourceModel().sort(column, order)
def parent(self, index):
return self.sourceModel().parent(self.mapToSource(index))
def get_header_data(self, colname, proxyIndex):
#print('[guitool_ibeis] calling default map to source')
#print('[guitool_ibeis] proxyIndex=%r' % proxyIndex)
#proxy_keys = dir(proxyIndex)
#proxy_vals = [getattr(proxyIndex, key) for key in proxy_keys]
#proxy_dict = dict(zip(proxy_keys, proxy_vals))
#print('[guitool_ibeis] proxyIndex.__dict__=%s' % utool.repr2(proxy_dict))
#utool.embed()
#sourceIndex = BASE_CLASS.mapToSource(self, proxyIndex)
sourceIndex = self.mapToSource(proxyIndex)
#print('[guitool_ibeis] calling set header')
ret = self.sourceModel().get_header_data(colname, sourceIndex)
#print('[guitool_ibeis] finished')
return ret
def update_filterdict(self, new_dict):
self.filter_dict = new_dict
def _update_rows(self):
return self.sourceModel()._update_rows()
def _get_row_id(self, proxyIndex):
return self.sourceModel()._get_row_id(self.mapToSource(proxyIndex))
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/filter_proxy_model.py",
"copies": "1",
"size": "4238",
"license": "apache-2.0",
"hash": 9096966263062906000,
"line_mean": 37.5272727273,
"line_max": 108,
"alpha_frac": 0.6309579991,
"autogenerated": false,
"ratio": 3.5228595178719866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4653817516971987,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
#from guitool_ibeis.__PYQT__.QtCore import Qt
import six
from guitool_ibeis.__PYQT__.QtCore import QLocale
import utool as ut
import uuid
import numpy as np
from guitool_ibeis.__PYQT__ import QtGui
from guitool_ibeis.guitool_decorators import checks_qt_error
#if six.PY2:
# from guitool_ibeis.__PYQT__.QtCore import QString
# from guitool_ibeis.__PYQT__.QtCore import QVariant
#elif six.PY3:
QVariant = None
__STR__ = unicode if six.PY2 else str
QString = __STR__
(print, rrr, profile) = ut.inject2(__name__)
SIMPLE_CASTING = True
ItemDataRoles = {
0 : 'DisplayRole', # key data to be rendered in the form of text. (QString)
1 : 'DecorationRole', # data to be rendered as an icon. (QColor, QIcon or QPixmap)
2 : 'EditRole', # data in a form suitable for editing in an editor. (QString)
3 : 'ToolTipRole', # data displayed in the item's tooltip. (QString)
4 : 'StatusTipRole', # data displayed in the status bar. (QString)
5 : 'WhatsThisRole', # data displayed in "What's This?" mode. (QString)
13 : 'SizeHintRole', # size hint for item that will be supplied to views. (QSize)
6 : 'FontRole', # font used for items rendered with default delegate. (QFont)
7 : 'TextAlignmentRole', # text alignment of items with default delegate. (Qt::AlignmentFlag)
8 : 'BackgroundRole', # background brush for items with default delegate. (QBrush)
9 : 'ForegroundRole', # foreground brush for items rendered with default delegate. (QBrush)
10 : 'CheckStateRole', # checked state of an item. (Qt::CheckState)
14 : 'InitialSortOrderRole', # initial sort order of a header view (Qt::SortOrder).
11 : 'AccessibleTextRole', # text used by accessibility extensions and plugins (QString)
12 : 'AccessibleDescriptionRole', # accessibe description of the item for (QString)
32 : 'UserRole', # first role that can be used for application-specific purposes.
8 : 'BackgroundColorRole', # Obsolete. Use BackgroundRole instead.
9 : 'TextColorRole', # Obsolete. Use ForegroundRole instead.
}
LOCALE = QLocale()
# Custom types of data that can be displayed (usually be a delegate)
QT_PIXMAP_TYPES = set((QtGui.QPixmap, 'PIXMAP'))
QT_ICON_TYPES = set((QtGui.QIcon, 'ICON'))
QT_BUTTON_TYPES = set(('BUTTON',))
QT_COMBO_TYPES = set(('COMBO',))
QT_IMAGE_TYPES = set(list(QT_PIXMAP_TYPES) + list(QT_ICON_TYPES))
# A set of all delegate types
QT_DELEGATE_TYPES = set(list(QT_IMAGE_TYPES) + list(QT_BUTTON_TYPES) + list(QT_COMBO_TYPES))
def qindexinfo(index):
variant = index.data()
if SIMPLE_CASTING:
item = __STR__(variant)
else:
item = __STR__(variant.toString())
row = index.row()
col = index.column()
return (item, row, col)
#def format_float(data):
# #argument_format = {
# # 'e': format as [-]9.9e[+|-]999
# # 'E': format as [-]9.9E[+|-]999
# # 'f': format as [-]9.9
# # 'g': use e or f format, whichever is the most concise
# # 'G': use E or f format, whichever is the most concise
# #}
# data = 1000000
# print(ut.repr2({
# 'out1': __STR__(QString.number(float(data), format='g', precision=8))
# }))
# QLocale(QLocale.English).toString(123456789, 'f', 2)
def numpy_to_qpixmap(npimg):
data = npimg.astype(np.uint8)
(height, width) = npimg.shape[0:2]
format_ = QtGui.QImage.Format_RGB888
qimg = QtGui.QImage(data, width, height, format_)
qpixmap = QtGui.QPixmap.fromImage(qimg)
return qpixmap
def numpy_to_qicon(npimg):
qpixmap = numpy_to_qpixmap(npimg)
qicon = QtGui.QIcon(qpixmap)
return qicon
def locale_float(float_, precision=4):
"""
References:
http://qt-project.org/doc/qt-4.8/qlocale.html#toString-9
"""
return LOCALE.toString(float(float_), format='g', precision=precision)
#@profile
def cast_into_qt(data):
"""
Casts python data into a representation suitable for QT (usually a string)
"""
if SIMPLE_CASTING:
if ut.is_str(data):
return __STR__(data)
elif ut.is_float(data):
#qnumber = QString.number(float(data), format='g', precision=8)
return locale_float(data)
elif ut.is_bool(data):
return bool(data)
elif ut.is_int(data):
return int(data)
elif isinstance(data, uuid.UUID):
return __STR__(data)
elif ut.isiterable(data):
return ', '.join(map(__STR__, data))
else:
return __STR__(data)
if ut.is_str(data):
return __STR__(data)
elif ut.is_float(data):
#qnumber = QString.number(float(data), format='g', precision=8)
return locale_float(data)
elif ut.is_bool(data):
return bool(data)
elif ut.is_int(data):
return int(data)
elif isinstance(data, uuid.UUID):
return __STR__(data)
elif ut.isiterable(data):
return ', '.join(map(__STR__, data))
elif data is None:
return 'None'
else:
return 'Unknown qtype: %r for data=%r' % (type(data), data)
@checks_qt_error
def cast_from_qt(var, type_=None):
""" Casts a QVariant to data """
if SIMPLE_CASTING:
if var is None:
return None
if type_ is not None:
reprstr = __STR__(var)
return ut.smart_cast(reprstr, type_)
return var
# TODO: sip api v2 should take care of this.
def infer_coltype(column_list):
""" Infer Column datatypes """
try:
coltype_list = [type(column_data[0]) for column_data in column_list]
except Exception:
coltype_list = [__STR__] * len(column_list)
return coltype_list
def to_qcolor(color):
qcolor = QtGui.QColor(*color[0:3])
return qcolor
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/qtype.py",
"copies": "1",
"size": "5917",
"license": "apache-2.0",
"hash": -2071116270386251800,
"line_mean": 33.0057471264,
"line_max": 100,
"alpha_frac": 0.6195707284,
"autogenerated": false,
"ratio": 3.258259911894273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9351713329420761,
"avg_score": 0.0052234621747024495,
"num_lines": 174
} |
from __future__ import absolute_import, division, print_function
from h2o.frame import H2OFrame
import pandas as pd
from .base import check_frame
from ..utils import flatten_all
__all__ = [
'_check_is_1d_frame',
'as_series',
'is_numeric',
'is_integer',
'is_float',
'value_counts'
]
def _check_is_1d_frame(X):
"""Check whether X is an H2OFrame
and that it's a 1d column. If not, will
raise an AssertionError
Parameters
----------
X : H2OFrame, shape=(n_samples, 1)
The H2OFrame to check
Raises
------
AssertionError if the ``X`` variable
is not a 1-dimensional H2OFrame.
Returns
-------
X : H2OFrame, shape=(n_samples, 1)
The frame if is 1d
"""
X = check_frame(X, copy=False)
assert X.shape[1] == 1, 'expected 1d H2OFrame'
return X
def as_series(x):
"""Make a 1d H2OFrame into a pd.Series.
Parameters
----------
x : ``H2OFrame``, shape=(n_samples, 1)
The H2OFrame
Returns
-------
x : Pandas ``Series``, shape=(n_samples,)
The pandas series
"""
x = _check_is_1d_frame(x)
x = x.as_data_frame(use_pandas=True)[x.columns[0]]
return x
def is_numeric(x):
"""Determine whether a 1d H2OFrame is numeric.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
bool : True if numeric, else False
"""
_check_is_1d_frame(x)
return flatten_all(x.isnumeric())[0]
def is_integer(x):
"""Determine whether a 1d H2OFrame is
made up of integers.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
bool : True if integers, else False
"""
_check_is_1d_frame(x)
if not is_numeric(x):
return False
return (x.round(digits=0) - x).sum() == 0
def is_float(x):
"""Determine whether a 1d H2OFrame is
made up of floats.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
bool : True if float, else False
"""
_check_is_1d_frame(x)
return is_numeric(x) and not is_integer(x)
def value_counts(x):
"""Compute a Pandas-esque ``value_counts``
on a 1d H2OFrame.
Parameters
----------
x : H2OFrame, shape=(n_samples, 1)
The H2OFrame
Returns
-------
cts : pd.Series, shape=(n_samples,)
The pandas series
"""
x = _check_is_1d_frame(x)
cts = as_series(x).value_counts()
return cts
| {
"repo_name": "tgsmith61591/skutil",
"path": "skutil/h2o/frame.py",
"copies": "1",
"size": "2578",
"license": "bsd-3-clause",
"hash": -8104464553022159000,
"line_mean": 17.1549295775,
"line_max": 64,
"alpha_frac": 0.5589604344,
"autogenerated": false,
"ratio": 3.2632911392405064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4322251573640506,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from horton import *
import numpy as np
from itertools import *
from math import *
from random import *
class Fields(object):
def __init__(self, mol, samples=50, F=0.00005, x=2**0.5 ,f=None):
'''
This class generate fields needed for function fitting
**Arguments:**
samples
number of sample points in each sphere shell
F
the initial field value
x
the interval of field
'''
self.samples = samples
self.F = F
self.x = x
if f is None:
self.f=[]
for i in range(5):
self.f.append(self.F*self.x**i)
def get_l(self, mol):
'''
This function mesure the x, y, z distance compoent between
all possible atom pairs and return back the biggest distance
**Arguments:**
mol
object moleculer come from horton
'''
l = []
for i,term in enumerate(mol.numbers):
tmp = mol.coordinates[:,i]
column = []
for term in combinations(tmp , 2):
bili = 2.6+abs(term[0]-term[1])
column.append(bili)
l.append(max(column))
return l
def polar_coordinate(self, order):
'''
This function create a random point on a n dimensional hypersphere
in polar coordinate
**Arguments:**
order
field order
'''
phi=[]
for i in order:
for term in combinations_with_replacement((0,1,2), i):
phi.append(uniform(0,pi))
phi=phi[0:-1]
phi[-1]=phi[-1]*2
return phi
def sphere_list(self, order, r):
'''
This funciton return back a n dimensional hyper sphere field list
**Arguments:**
order
field order
r
a list of radius of several sphere layers
[F0, F0*x, F0*x**2, F0*x**3 ...]
'''
field = []
phi = self.polar_coordinate(order)
for i,angle in enumerate(phi):
if i==0:
x=r
x=x*cos(phi[i])
field.append(x)
elif i>0 and i<len(phi)-1:
x=r
for j in range(i):
x=x*sin(phi[j])
x=x*cos(phi[i])
field.append(x)
elif i==len(phi)-1:
x=r
for j in range(i):
x=x*sin(phi[j])
x=x*cos(phi[i])
field.append(x)
x=r
for j in range(i+1):
x=x*sin(phi[j])
field.append(x)
return field
def ellipse_list(self, order, r, mol):
'''
This funciton return back a n dimensional hyper ellipse field list
**Arguments:**
order
field order
r
a list of radius of several sphere layers
[F0, F0*x, F0*x**2, F0*x**3 ...]
mol
object moleculer come from horton
'''
l = self.get_l(mol)
field = self.sphere_list(order,r)
c = 0
for i in order:
for term in combinations_with_replacement((0,1,2),i):
tmp = 1
for j in term:
tmp *= l[j]
field[c] *=tmp
c += 1
return field
def fields(self,order,mol):
'''
This function return back a set of fields with certain field
pattern
**Arguments:**
order
field order
mol
object moleculer come from horton
'''
n = 0
for i in order:
n += (i+1)*(i+2)/2
fields=[np.zeros(n)]
for r in self.f:
for i in range(self.samples):
tmp = self.ellipse_list(order, r, mol)
fields.append(np.array(tmp))
return fields
| {
"repo_name": "fhqgfss/polar",
"path": "polar/fields_generate.py",
"copies": "1",
"size": "4075",
"license": "mit",
"hash": -3923450697741510000,
"line_mean": 23.5481927711,
"line_max": 75,
"alpha_frac": 0.4645398773,
"autogenerated": false,
"ratio": 4.162410623084781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.512695050038478,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from itertools import izip
import utool
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QAbstractItemView
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[rowidtables]', DEBUG=False)
from ibeis.control import DB_SCHEMA
USER_MODE = utool.get_flag('--usermode')
# Define which columns are usable in tables:
# Specified in (type, header, fancy_header) format
COLUMN_DEFS = [
(int, 'gid', 'Image ID'),
(int, 'rid', 'ROI ID'),
(int, 'nid', 'Name ID'),
(int, 'imgsetid', 'ImageSet ID'),
(int, 'nRids', '#ROIs'),
(int, 'nGt', '#GT'),
(int, 'nFeats', '#Features'),
(str, 'rank', 'Rank'), # needs to be a string for !Query
(float, 'unixtime', 'unixtime'),
(str, 'imagesettext', 'ImageSet'),
(str, 'gname', 'Image Name'),
(str, 'name', 'Name'),
(str, 'notes', 'Notes'),
(str, 'match_name', 'Matching Name'),
(str, 'bbox', 'BBOX (x, y, w, h)'), # Non editables are safe as strs
(str, 'score', 'Confidence'),
(str, 'theta', 'Theta'),
(bool, 'aif', 'All Detected'),
]
def _datatup_cols(ibs, tblname, cx2_score=None):
"""
Returns maps which map which maps internal column names
to lazy evaluation functions which compute the data (hence the lambdas)
"""
printDBG('[gui] _datatup_cols()')
# Return requested columns
# TODO: Use partials here?
if tblname == NAME_TABLE:
cols = {
'nid': lambda nids: nids,
'name': lambda nids: ibs.get_names(nids),
'nRids': lambda nids: ibs.get_name_num_rois(nids),
'notes': lambda nids: ibs.get_name_notes(nids),
}
elif tblname == IMAGE_TABLE:
cols = {
'gid': lambda gids: gids,
'imgsetid': lambda gids: ibs.get_image_imgsetids(gids),
'imagesettext': lambda gids: map(utool.tupstr, ibs.get_image_imagesettext(gids)),
'aif': lambda gids: ibs.get_image_aifs(gids),
'gname': lambda gids: ibs.get_image_gnames(gids),
'nRids': lambda gids: ibs.get_image_num_rois(gids),
'unixtime': lambda gids: ibs.get_image_unixtime(gids),
'notes': lambda nids: ibs.get_image_notes(nids),
}
elif tblname in [ROI_TABLE, QRES_TABLE]:
# ROI_TBL_COLS \subset RES_TBL_COLS
cols = {
'rid': lambda rids: rids,
'name': lambda rids: ibs.get_roi_names(rids),
'gname': lambda rids: ibs.get_roi_gnames(rids),
'nGt': lambda rids: ibs.get_roi_num_groundtruth(rids),
'theta': lambda rids: map(utool.theta_str, ibs.get_roi_thetas(rids)),
'bbox': lambda rids: map(str, ibs.get_roi_bboxes(rids)),
'nFeats': lambda rids: ibs.get_roi_num_feats(rids),
'notes': lambda rids: ibs.get_roi_notes(rids),
}
if tblname == QRES_TABLE:
# But result table has extra cols
cols.update({
'rank': lambda rids: utool.padded_str_range(1, len(rids) + 1),
})
else:
cols = {}
return cols
col_type_list = [tup[0] for tup in COLUMN_DEFS]
col_header_list = [tup[1] for tup in COLUMN_DEFS]
col_fancyheader_list = [tup[2] for tup in COLUMN_DEFS]
# Mappings from (internal) header to (user-seen) fancy header
fancy_headers = dict(izip(col_header_list, col_fancyheader_list))
# Mapping from fancy header to header
reverse_fancy = dict(izip(col_fancyheader_list, col_header_list))
# Mapping from header to type
header_typemap = dict(izip(col_header_list, col_type_list))
# Different python types uuids can be
# We are basically just using int as the UID type now
# We aren't even messing with UUIDs here anymore
# TODO: Clean this section of code up!
UID_TYPE = int
schema_qt_typemap = {
'INTEGER': int,
'UUID': str,
}
# Specialize table rowid types
QT_IMAGE_UID_TYPE = schema_qt_typemap[DB_SCHEMA.IMAGE_UID_TYPE]
QT_ROI_UID_TYPE = schema_qt_typemap[DB_SCHEMA.ROI_UID_TYPE]
QT_NAME_UID_TYPE = schema_qt_typemap[DB_SCHEMA.NAME_UID_TYPE]
def qt_cast(qtinput):
""" Cast from Qt types to Python types """
#printDBG('Casting qtinput=%r' % (qtinput,))
if isinstance(qtinput, QtCore.QVariant):
if qtinput.typeName() == 'bool':
qtoutput = bool(qtinput.toBool())
if qtinput.typeName() == 'QString':
qtoutput = str(qtinput.toString())
elif isinstance(qtinput, QtCore.QString):
qtoutput = str(qtinput)
#elif isinstance(qtinput, (int, long, str, float)):
elif isinstance(qtinput, (int, str, unicode)):
return qtinput
else:
raise ValueError('Unknown QtType: type(qtinput)=%r, qtinput=%r' %
(type(qtinput), qtinput))
return qtoutput
def qt_imagesettext_cast(imagesettext):
if imagesettext is None:
return None
imagesettext = qt_cast(imagesettext)
# Sanatize imagesettext
if imagesettext in ['None', '', 'database']:
imagesettext = None
return imagesettext
# Table names (should reflect SQL tables)
IMAGE_TABLE = 'gids'
ROI_TABLE = 'rids'
NAME_TABLE = 'nids'
QRES_TABLE = 'qres'
# TABLE DEFINITIONS
# tblname, fancyname, headers, editable_headers
TABLE_DEF = [
(IMAGE_TABLE, 'Image Table',
['gid', 'gname', 'nRids', 'aif', 'notes', 'imagesettext', 'unixtime'],
['notes', 'aif']),
(ROI_TABLE, 'ROIs Table',
['rid', 'name', 'gname', 'nGt', 'nFeats', 'bbox', 'theta', 'notes'],
['name', 'notes']),
(NAME_TABLE, 'Name Table',
['nid', 'name', 'nRids', 'notes'],
['name', 'notes']),
(QRES_TABLE, 'Query Results Table',
['rank', 'score', 'name', 'rid'],
['name']),
]
tblname_list = [tup[0] for tup in TABLE_DEF]
fancytblname_list = [tup[1] for tup in TABLE_DEF]
tblheaders_list = [tup[2] for tup in TABLE_DEF]
tbleditables_list = [tup[3] for tup in TABLE_DEF]
# A list of default internal headers to display
table_headers = dict(izip(tblname_list, tblheaders_list))
table_editable = dict(izip(tblname_list, tbleditables_list))
fancy_tablenames = dict(izip(tblname_list, fancytblname_list))
if USER_MODE:
table_headers[ROI_TABLE] = ['rid', 'name', 'gname', 'nGt', 'notes']
def _get_datatup_list(ibs, tblname, index_list, header_order, extra_cols):
"""
Used by guiback to get lists of datatuples by internal column names.
"""
#printDBG('[gui] _get_datatup_list()')
cols = _datatup_cols(ibs, tblname)
#printDBG('[gui] cols=%r' % cols)
cols.update(extra_cols)
#printDBG('[gui] cols=%r' % cols)
unknown_header = lambda indexes: ['ERROR!' for gx in indexes]
get_tup = lambda header: cols.get(header, unknown_header)(index_list)
unziped_tups = [get_tup(header) for header in header_order]
#printDBG('[gui] unziped_tups=%r' % unziped_tups)
datatup_list = [tup for tup in izip(*unziped_tups)]
#printDBG('[gui] datatup_list=%r' % datatup_list)
return datatup_list
def make_header_lists(tbl_headers, editable_list, prop_keys=[]):
col_headers = tbl_headers[:] + prop_keys
col_editable = [False] * len(tbl_headers) + [True] * len(prop_keys)
for header in editable_list:
col_editable[col_headers.index(header)] = True
return col_headers, col_editable
def _get_table_headers_editable(tblname):
headers = table_headers[tblname]
editable = table_editable[tblname]
printDBG('headers = %r ' % headers)
printDBG('editable = %r ' % editable)
col_headers, col_editable = make_header_lists(headers, editable)
return col_headers, col_editable
def _get_table_datatup_list(ibs, tblname, col_headers, col_editable,
extra_cols={}, index_list=None, prefix_cols=[],
**kwargs):
imagesettext = kwargs.get('imagesettext')
if index_list is None:
if imagesettext is None or imagesettext == '' or imagesettext == 'None':
imgsetid = None
else:
imgsetid = ibs.get_imageset_imgsetids(imagesettext)
index_list = ibs.get_valid_ids(tblname, imgsetid=imgsetid)
printDBG('[tables] len(index_list) = %r' % len(index_list))
# Prefix datatup
prefix_datatup = [[prefix_col.get(header, 'error')
for header in col_headers]
for prefix_col in prefix_cols]
body_datatup = _get_datatup_list(ibs, tblname, index_list,
col_headers, extra_cols)
datatup_list = prefix_datatup + body_datatup
return datatup_list
def emit_populate_table(back, tblname, *args, **kwargs):
printDBG('>>>>>>>>>>>>>>>>>>>>>')
printDBG('[rowidtbls] _populate_table(%r)' % tblname)
col_headers, col_editable = _get_table_headers_editable(tblname)
#printDBG('[rowidtbls] col_headers = %r' % col_headers)
#printDBG('[rowidtbls] col_editable = %r' % col_editable)
imagesettext = kwargs.get('imagesettext', '')
datatup_list = _get_table_datatup_list(back.ibs, tblname, col_headers,
col_editable, *args, **kwargs)
#printDBG('[rowidtbls] datatup_list = %r' % datatup_list)
row_list = range(len(datatup_list))
# Populate with fancyheaders.
col_fancyheaders = [fancy_headers.get(key, key) for key in col_headers]
col_types = [header_typemap.get(key) for key in col_headers]
printDBG('[rowidtbls] populateTableSignal.emit(%r, len=%r)' %
(tblname, len(col_fancyheaders)))
back.populateTableSignal.emit(tblname,
col_fancyheaders,
col_editable,
col_types,
row_list,
datatup_list,
imagesettext)
def _type_from_data(data):
""" If type is not given make an educated guess """
if utool.is_bool(data) or data == 'True' or data == 'False':
return bool
elif utool.is_int(data):
return int
elif utool.is_float(data):
return float
else:
return str
def populate_item_table(tbl,
col_fancyheaders,
col_editable,
col_types,
row_list,
datatup_list):
# TODO: for chip table: delete metedata column
# RCOS TODO:
# I have a small right-click context menu working
# Maybe one of you can put some useful functions in these?
# RCOS TODO: How do we get the clicked item on a right click?
# RCOS TODO:
# The data tables should not use the item model
# Instead they should use the more efficient and powerful
# QAbstractItemModel / QAbstractTreeModel
hheader = tbl.horizontalHeader()
sort_col = hheader.sortIndicatorSection()
sort_ord = hheader.sortIndicatorOrder()
tbl.sortByColumn(0, Qt.AscendingOrder) # Basic Sorting
tblWasBlocked = tbl.blockSignals(True)
tbl.clear()
tbl.setColumnCount(len(col_fancyheaders))
tbl.setRowCount(len(row_list))
tbl.verticalHeader().hide()
tbl.setHorizontalHeaderLabels(col_fancyheaders)
tbl.setSelectionMode(QAbstractItemView.SingleSelection)
tbl.setSelectionBehavior(QAbstractItemView.SelectRows)
tbl.setSortingEnabled(False)
# Add items for each row and column
for row in iter(row_list):
datatup = datatup_list[row]
for col, data in enumerate(datatup):
#type_ = _type_from_data(data)
type_ = col_types[col]
item = QtWidgets.QTableWidgetItem()
try:
if data is None:
# Default case to handle None inputs
item.setText(str(data))
elif type_ == bool:
check_state = Qt.Checked if bool(data) else Qt.Unchecked
item.setCheckState(check_state)
item.setData(Qt.DisplayRole, bool(data))
elif type_ == int:
item.setData(Qt.DisplayRole, int(data))
elif type_ == float:
item.setData(Qt.DisplayRole, float(data))
elif type_ == str:
item.setText(str(data))
elif type_ == list:
item.setText(str(data))
else:
raise Exception('Unknown datatype:' + repr(type_) +
'has the type of this column been defined?')
# Mark as editable or not
if col_editable[col] and type_ != bool:
item.setFlags(item.flags() | Qt.ItemIsEditable)
item.setBackground(QtGui.QColor(250, 240, 240))
else:
item.setFlags(item.flags() ^ Qt.ItemIsEditable)
item.setTextAlignment(Qt.AlignHCenter)
tbl.setItem(row, col, item)
except Exception as ex:
utool.printex(ex, key_list=['type_', 'data', 'col', 'row',
'tblname', 'col_types'])
raise
#printDBG(dbg_col2_dtype)
tbl.setSortingEnabled(True)
tbl.sortByColumn(sort_col, sort_ord) # Move back to old sorting
tbl.show()
tbl.blockSignals(tblWasBlocked)
def populate_imageset_tab(front, imagesettext):
#print('[rowidtbls] populate_imageset_tab')
front.ui.ensureImageSetTab(front, imagesettext)
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "_broken/uidtables.py",
"copies": "1",
"size": "13690",
"license": "apache-2.0",
"hash": 8445031291241380000,
"line_mean": 37.6723163842,
"line_max": 94,
"alpha_frac": 0.588312637,
"autogenerated": false,
"ratio": 3.3686023622047245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9441617500275805,
"avg_score": 0.0030594997857837864,
"num_lines": 354
} |
from __future__ import absolute_import, division, print_function
from itertools import product
from math import ceil
import numpy as np
import pandas as pd
import h5py
try:
from sparse import COO
except ImportError:
raise ImportError("The 'sparse' package is required to use dask")
from dask.base import tokenize
import dask.dataframe as dd
import dask.array as da
from ..core import CSRReader, query_rect
from ..util import parse_cooler_uri, partition
def _get_group_info(path, grouppath, keys):
with h5py.File(path, "r") as f:
grp = f[grouppath]
if keys is None:
keys = list(grp.keys())
nrows = len(grp[keys[0]])
categoricals = {}
for key in keys:
dt = h5py.check_dtype(enum=grp[key].dtype)
if dt is not None:
categoricals[key] = sorted(dt, key=dt.__getitem__)
# Meta is an empty dataframe that serves as a compound "dtype"
meta = pd.DataFrame(
{key: np.array([], dtype=grp[key].dtype) for key in keys}, columns=keys
)
for key in categoricals:
meta[key] = pd.Categorical([], categories=categoricals[key], ordered=True)
return nrows, keys, meta, categoricals
def _slice_dataset(filepath, grouppath, key, slc, lock=None):
try:
if lock is not None:
lock.acquire()
with h5py.File(filepath, "r") as f:
return f[grouppath][key][slc]
finally:
if lock is not None:
lock.release()
def _slice_group(filepath, grouppath, keys, slc, lock=None):
try:
if lock is not None:
lock.acquire()
with h5py.File(filepath, "r") as f:
return {key: f[grouppath][key][slc] for key in keys}
finally:
if lock is not None:
lock.release()
def _restore_categories(data, categorical_columns):
for key, category_dict in categorical_columns.items():
data[key] = pd.Categorical.from_codes(data[key], category_dict, ordered=True)
return data
def read_table(group_uri, keys=None, chunksize=int(10e6), index=None, lock=None):
"""
Create a dask dataframe around a column-oriented table in HDF5.
A table is a group containing equal-length 1D datasets.
Parameters
----------
group_uri : str
URI to the HDF5 group storing the table.
keys : list, optional
list of HDF5 Dataset keys, default is to use all keys in the group
chunksize : int, optional
Chunk size
index : str, optional
Sorted column to use as index
lock : multiprocessing.Lock, optional
Lock to serialize HDF5 read/write access. Default is no lock.
Returns
-------
:class:`dask.dataframe.DataFrame`
Notes
-----
Learn more about the `dask <https://docs.dask.org/en/latest/>`_ project.
"""
filepath, grouppath = parse_cooler_uri(group_uri)
nrows, keys, meta, categoricals = _get_group_info(filepath, grouppath, keys)
# Make a unique task name
token = tokenize(filepath, grouppath, chunksize, keys)
task_name = "daskify-h5py-table-" + token
# Partition the table
divisions = (0,) + tuple(range(-1, nrows, chunksize))[1:]
if divisions[-1] != nrows - 1:
divisions = divisions + (nrows - 1,)
# Build the task graph
dsk = {}
for i in range(0, int(ceil(nrows / chunksize))):
slc = slice(i * chunksize, (i + 1) * chunksize)
data_dict = (_slice_group, filepath, grouppath, keys, slc, lock)
if categoricals:
data_dict = (_restore_categories, data_dict, categoricals)
dsk[task_name, i] = (pd.DataFrame, data_dict, None, meta.columns)
# Generate ddf from dask graph
df = dd.DataFrame(dsk, task_name, meta, divisions)
if index is not None:
df = df.set_index(index, sorted=True, drop=False)
return df
def _array_select(clr, i0, i1, j0, j1, field, sparse_array):
is_upper = clr._is_symm_upper
with clr.open("r") as h5:
dtype = h5['pixels'][field].dtype
reader = CSRReader(h5, field, max_chunk=500000000)
if is_upper:
i, j, v = query_rect(reader.query, i0, i1, j0, j1, duplex=True)
else:
i, j, v = reader.query(i0, i1, j0, j1)
if not len(v):
v = v.astype(dtype)
arr = COO((i - i0, j - j0), v, shape=(i1 - i0, j1 - j0))
if not sparse_array:
arr = arr.todense()
return arr
def load_dask_array(
clr, i0, i1, j0, j1, field="count", sparse_array=False, chunksize=256
):
"""
Create a parallel Dask array around the matrix representation of a cooler.
Parameters
----------
clr : :class:`cooler.Cooler`
Cooler object
i0, i1 : int
Row query range
j0, j1 : int
Column query range
field : str
Value column to query
sparse_array : bool, optional
Create a dask array backed by :class:`sparse.COO` sparse arrays
instead of dense numpy arrays (default).
chunksize : int, optional
Length of the rowwise chunks to partition the underlying data into.
Returns
-------
:class:`dask.array.Array`
"""
token = tokenize(clr.uri, i0, i1, j0, i1, field, chunksize)
task_name = "cooler-array-slice-" + token
shape = (i1 - i0, j1 - j0)
meta = _array_select(clr, 0, 0, 0, 0, field, sparse_array)
slices = [(lo, hi, j0, j1) for lo, hi in partition(0, shape[0], chunksize)]
chunks = (tuple(s[1] - s[0] for s in slices), (shape[1],))
keys = list(product([task_name], *[range(len(dim)) for dim in chunks]))
values = [(_array_select, clr, *slc, field, sparse_array) for slc in slices]
dsk = dict(zip(keys, values))
return da.Array(dsk, task_name, chunks, meta=meta, shape=shape)
| {
"repo_name": "mirnylab/cooler",
"path": "cooler/sandbox/dask.py",
"copies": "1",
"size": "5791",
"license": "bsd-3-clause",
"hash": -4737585745493024000,
"line_mean": 29.9679144385,
"line_max": 86,
"alpha_frac": 0.6118114315,
"autogenerated": false,
"ratio": 3.4145047169811322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4526316148481132,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from logging import getLogger
from PySide import QtCore, QtGui
from ret.elf import RetkitElfDocument
from .ConsoleWindow import ConsoleWindow
from .Ui_RetWindow import Ui_RetWindow
log = getLogger("ret.ui.qt")
class FunctionTableModel(QtCore.QAbstractTableModel):
headerText = ['Start address', 'End address', 'Name', 'Return type',
'Arguments', 'Convention']
def __init__(self, document, parent=None):
super(FunctionTableModel, self).__init__(parent)
self.document = document
return
def columnCount(self, parent):
return 6
def rowCount(self, parent):
return len(self.document.functions)
def headerData(self, pos, orientation, role):
if (role == QtCore.Qt.ItemDataRole.DisplayRole and
orientation == QtCore.Qt.Orientation.Horizontal and
pos < len(self.headerText)):
return self.headerText[pos]
else:
return None
def data(self, index, role):
row = index.row()
col = index.column()
if (role != QtCore.Qt.ItemDataRole.DisplayRole or
row >= len(self.document.functions) or
col >= len(self.headerText)):
return None
fn = self.document.functions[row]
return ["0x%08x" % fn.start_address, "0x%08x" % fn.end_address,
fn.name, str(fn.return_type),
str(fn.arguments) if fn.arguments else "void",
fn.calling_convention][col]
def parent(self, index):
return self.createIndex(-1, -1)
sort_functions = [
lambda fn: fn.start_address,
lambda fn: fn.end_address,
lambda fn: fn.name,
lambda fn: str(fn.return_type),
lambda fn: str(fn.arguments) if fn.arguments else "void",
lambda fn: fn.calling_convention,
]
def sort(self, column, order):
log.debug("sort: column=%r order=%r", column, order)
reverse = (order == QtCore.Qt.SortOrder.DescendingOrder)
self.document.functions.sort(
key=self.sort_functions[column],
reverse=reverse)
self.dataChanged.emit(
self.createIndex(0, 0),
self.createIndex(len(self.document.functions) - 1, 5))
return
class FunctionDisassemblyModel(QtCore.QAbstractTableModel):
headerText = ['Address', 'Instruction']
def __init__(self, function):
"""\
FunctionDisassemblyModel(function) -> model
Create a new FunctionDisassemblyModel object for the given function
(normally a ret.state.Function object or subclass thereof). This allows the
function disassembly to be viewed in a QTableView object.
"""
super(FunctionDisassemblyModel, self).__init__()
self.function = function
self.instructions = list(function.instructions.values())
self.instructions.sort(key=lambda instr: instr.addr)
return
def columnCount(self, parent):
return 1
def rowCount(self, parent):
return len(self.instructions)
def headerData(self, pos, orientation, role):
if role == QtCore.Qt.ItemDataRole.DisplayRole:
try:
if orientation == QtCore.Qt.Orientation.Horizontal:
return self.headerText[pos]
else:
return hex(self.instructions[pos].addr)
except IndexError:
pass
return None
def data(self, index, role):
row = index.row()
col = index.column()
if (role != QtCore.Qt.ItemDataRole.DisplayRole or
row >= len(self.instructions) or
col >= 2):
return None
return str(self.instructions[row])
def parent(self, index):
return self.createIndex(-1, -1)
class RetWindow(QtGui.QMainWindow, Ui_RetWindow):
def __init__(self, application, parent=None):
super(RetWindow, self).__init__(parent)
self.setupUi(self)
self.application = application
self.document = None
self.functionsTableView.horizontalHeader().setClickable(True)
QtCore.QObject.connect(
self.functionsTableView.horizontalHeader(),
QtCore.SIGNAL("sortIndicatorChanged(int, Qt::SortOrder)"),
self.functionsTableView.sortByColumn)
self.functionsTableView.sortByColumn(
0, QtCore.Qt.SortOrder.AscendingOrder)
self.functionDisassemblyViews = {}
self.consoleWindow = ConsoleWindow(self)
return
def open(self):
filename, filter = QtGui.QFileDialog.getOpenFileName(
self, "Open object file", "",
"Retkit documents (*.retkit);;Shared libraries (*.so);;"
"All files (*)")
if filename is None or len(filename) == 0:
return
if self.document is None:
target = self
else:
target = RetWindow(self.application, self.parent)
target.load(filename)
return
def load(self, filename):
## FIXME: Don't hardcode the document here.
self.document = RetkitElfDocument(
filename=None, object_filename=filename)
model = FunctionTableModel(self.document)
self.functionsTableView.setModel(model)
return
def save(self):
pass
def saveAs(self):
pass
def close(self):
super(RetWindow, self).close()
return
def undo(self):
return
def redo(self):
return
def cut(self):
return
def copy(self):
return
def paste(self):
return
def delete(self):
return
def selectAll(self):
return
def about(self):
return
def functionDoubleClicked(self, index):
model = self.functionsTableView.model()
if model is None:
log.error("function double clicked but no model is present")
return None
fn = model.document.functions[index.row()]
view = self.functionDisassemblyViews.get(id(fn))
if view is not None:
view.raise_()
else:
view = QtGui.QTableView(self.contents)
self.functionDisassemblyViews[id(fn)] = view
view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
view.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerItem)
view.setSortingEnabled(False)
view.setCornerButtonEnabled(False)
view.setObjectName("functionDisassembly%x" % id(fn))
view.horizontalHeader().setVisible(True)
view.verticalHeader().setVisible(True)
view.setModel(FunctionDisassemblyModel(fn))
view.show()
return
def showConsole(self):
if self.isMaximized() and not self.consoleWindow.isVisible():
# Show the console window below this window.
desktop = QtGui.QApplication.desktop()
screenSize = desktop.availableGeometry(self)
# Compute the size of the window decorations
frameGeometry = self.frameGeometry()
clientGeometry = self.geometry()
decorWidth = frameGeometry.width() - clientGeometry.width()
decorHeight = frameGeometry.height() - clientGeometry.height()
# This is the top of the console window's frame.
consoleTop = (screenSize.bottom() - self.consoleWindow.height())
# De-maximize ourself and set the geometry accordingly.
self.setWindowState(
self.windowState() & ~QtCore.Qt.WindowMaximized)
self.setGeometry(
screenSize.left(), screenSize.top(),
screenSize.width() - decorWidth,
consoleTop - screenSize.top() - 2 * decorHeight)
# Position the console window and show it.
self.consoleWindow.setGeometry(
screenSize.left(), consoleTop,
screenSize.width(),
self.consoleWindow.height())
self.consoleWindow.show()
# Local variables:
# mode: Python
# tab-width: 8
# indent-tabs-mode: nil
# End:
# vi: set expandtab tabstop=8
| {
"repo_name": "dacut/ret",
"path": "ret/ui/qt/RetWindow.py",
"copies": "1",
"size": "8280",
"license": "bsd-2-clause",
"hash": 3516664332717719600,
"line_mean": 31.2178988327,
"line_max": 77,
"alpha_frac": 0.604589372,
"autogenerated": false,
"ratio": 4.323759791122716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0025324276762402047,
"num_lines": 257
} |
from __future__ import absolute_import, division, print_function
from lxml import etree
from symantecssl import utils
from symantecssl.response_models import (
OrderDetails, OrderDetail, OrderContacts, QuickOrderResponse,
ReissueResponse
)
class ApproverEmail(object):
def __init__(self):
self.approver_email = ''
def serialize(self):
"""Serializes the approver email information for request.
:return: approver e-mail element
"""
approver_email = etree.Element('ApproverEmail')
approver_email.text = self.approver_email
return approver_email
def set_approver_email(self, approver_email):
"""Sets approver e-mail for serialization.
:param approver_email: approver's email for serialization
"""
self.approver_email = approver_email
class RequestEnvelope(object):
def __init__(self, request_model):
self.request_model = request_model
def serialize(self):
"""Serializes the entire request via request model.
:return: root element for request
"""
root = etree.Element(
"{http://schemas.xmlsoap.org/soap/envelope/}Envelope",
nsmap=utils.SOAP_NS
)
body = etree.SubElement(
root, "{http://schemas.xmlsoap.org/soap/envelope/}Body",
nsmap=utils.SOAP_NS
)
request_model = self.request_model.serialize()
body.append(request_model)
return root
class RequestHeader(object):
def __init__(self):
self.partner_code = ''
self.username = ''
self.password = ''
self.product_code = ''
self.partner_order_id = ''
def serialize(self, order_type):
"""Serializes the request header.
Each request model should call this in order to process the request.
The request model will initiate serialization here.
:order_type: a True or False value to create the proper XML header for
the request.
:return: root element for the request header
"""
if order_type:
root = etree.Element("OrderRequestHeader")
for node, node_text in [
('ProductCode', self.product_code),
('PartnerOrderID', self.partner_order_id)
]:
utils.create_subelement_with_text(root, node, node_text)
else:
root = etree.Element("QueryRequestHeader")
utils.create_subelement_with_text(
root, 'PartnerCode', self.partner_code
)
auth_token = etree.SubElement(root, "AuthToken")
for node, node_text in [
("UserName", self.username),
("Password", self.password)
]:
utils.create_subelement_with_text(auth_token, node, node_text)
return root
def set_request_header(self, product_code, partner_order_id):
"""Sets request information for serialization.
:param product_code: type of certificate to be ordered (via code). See
Symantec's API documentation for specific details.
:param partner_order_id: An ID set by the user to track the order.
"""
self.product_code = product_code
self.partner_order_id = partner_order_id
class OrderQueryOptions(object):
def __init__(
self, product_detail=True, contacts=True, payment_info=True,
cert_info=True, fulfillment=True, ca_certs=True, pkcs7_cert=True,
partner_tags=True, auth_comments=True, auth_statuses=True,
file_auth_dv_summary=True, trust_services_summary=True,
trust_services_details=True, vulnerability_scan_summary=True,
vulnerability_scan_details=True, cert_algorithm_info=True
):
self.product_detail = product_detail
self.contacts = contacts
self.payment_info = payment_info
self.certificate_info = cert_info
self.fulfillment = fulfillment
self.ca_certs = ca_certs
self.pkcs7_cert = pkcs7_cert
self.partner_tags = partner_tags
self.authentication_comments = auth_comments
self.authentication_statuses = auth_statuses
self.file_auth_dv_summary = file_auth_dv_summary
self.trust_services_summary = trust_services_summary
self.trust_services_details = trust_services_details
self.vulnerability_scan_summary = vulnerability_scan_summary
self.vulnerability_scan_details = vulnerability_scan_details
self.certificate_algorithm_info = cert_algorithm_info
def serialize(self):
"""Serializes and sets the query options for the request.
The query options are by default set to true. All data is passed
through to the user. The user is able to change the values to false
if they wish.
:return: root element for the query options
"""
root = etree.Element("OrderQueryOptions")
element_map = {
"ReturnProductDetail": self.product_detail,
"ReturnContacts": self.contacts,
"ReturnPaymentInfo": self.payment_info,
"ReturnFulfillment": self.fulfillment,
"ReturnCACerts": self.ca_certs,
"ReturnPKCS7Cert": self.pkcs7_cert,
"ReturnPartnerTags": self.partner_tags,
"ReturnAuthenticationComments": self.authentication_comments,
"ReturnAuthenticationStatuses": self.authentication_statuses,
"ReturnFileAuthDVSummary": self.file_auth_dv_summary,
"ReturnTrustServicesSummary": self.trust_services_summary,
"ReturnTrustServicesDetails": self.trust_services_details,
"ReturnVulnerabilityScanSummary": self.vulnerability_scan_details,
"ReturnCertificateAlgorithmInfo": self.certificate_algorithm_info
}
for key, val in element_map.items():
ele = etree.SubElement(root, key)
ele.text = str(val).lower()
return root
class OrganizationInfo(object):
def __init__(self):
self.org_name = ''
self.org_address = ''
self.address_line_one = ''
self.address_line_two = ''
self.address_line_three = ''
self.city = ''
self.region = ''
self.postal_code = ''
self.country = ''
self.phone = ''
self.duns = ''
def serialize(self):
"""Serializes the Organization Info for the request.
:return: root element for the organization info
"""
root = etree.Element('OrganizationInfo')
utils.create_subelement_with_text(
root, 'OrganizationName', self.org_name
)
org_address = etree.SubElement(root, 'OrganizationAddress')
for node, node_text in [
('AddressLine1', self.address_line_one),
('AddressLine2', self.address_line_two),
('AddressLine3', self.address_line_three),
('City', self.city),
('Region', self.region),
('PostalCode', self.postal_code),
('Country', self.country),
('Phone', self.phone)
]:
utils.create_subelement_with_text(org_address, node, node_text)
utils.create_subelement_with_text(root, 'DUNS', self.duns)
return root
class OrderParameters(object):
def __init__(self):
self.csr = ''
self.domain_name = ''
self.order_partner_order_id = ''
self.renewal_indicator = True
self.renewal_behavior = ''
self.signature_hash_algorithm = ''
self.special_instructions = ''
self.valid_period = '12'
self.web_server_type = ''
self.wildcard = False
self.dnsnames = ''
def serialize(self):
"""Serializes the Order Parameters section for the request.
note:: Symantec limits customers to 1, 12, 24, 36, and 48 month options
for validity period.
:return: root element of OrderParameters
"""
root = etree.Element('OrderParameters')
renewal_indicator = utils._boolean_to_str(self.renewal_indicator, True)
wildcard = utils._boolean_to_str(self.wildcard, False)
for node, node_text in [
('ValidityPeriod', self.valid_period),
('DomainName', self.domain_name),
('OriginalPartnerOrderID', self.order_partner_order_id),
('CSR', self.csr),
('WebServerType', self.web_server_type),
('RenewalIndicator', renewal_indicator),
('RenewalBehavior', self.renewal_behavior),
('SignatureHashAlgorithm', self.signature_hash_algorithm),
('SpecialInstructions', self.special_instructions),
('WildCard', wildcard),
('DNSNames', self.dnsnames)
]:
utils.create_subelement_with_text(root, node, node_text)
return root
class ReissueEmail(object):
def __init__(self):
self.reissue_email = ''
def serialize(self):
"""Serializes the ReissueEmail section for request.
:return: ele, reissue e-mail element to be added to xml request
"""
ele = etree.Element('ReissueEmail')
ele.text = self.reissue_email
return ele
class OrderChange(object):
def __init__(self):
self.change_type = ''
self.new_value = ''
self.old_value = ''
def serialize(self):
"""Serialized the OrderChange section for request.
:return: root element to be added to xml request
"""
root = etree.Element('OrderChange')
utils.create_subelement_with_text(root, 'ChangeType', self.change_type)
if self.new_value:
utils.create_subelement_with_text(root, 'NewValue', self.new_value)
if self.old_value:
utils.create_subelement_with_text(root, 'OldValue', self.old_value)
return root
class OrderChanges(object):
def __init__(self):
self.add = []
self.delete = []
self.edit = []
def serialize(self):
"""Serializes the OrderChanges section for request.
:return: root element to be added to xml request
"""
root = etree.Element('OrderChanges')
if self.add:
for san in self.add:
order_change = OrderChange()
order_change.change_type = 'Add_SAN'
order_change.new_value = san
root.append(order_change.serialize())
if self.delete:
for san in self.delete:
order_change = OrderChange()
order_change.change_type = 'Delete_SAN'
order_change.old_value = san
root.append(order_change.serialize())
if self.edit:
for old_alternate_name, new_alternate_name in self.edit:
order_change = OrderChange()
order_change.change_type = 'Edit_SAN'
order_change.old_value = old_alternate_name
order_change.new_value = new_alternate_name
root.append(order_change.serialize())
return root
@property
def has_changes(self):
"""Checks if OrderChanges has any available changes for processing.
:return: True or False for order changes
"""
return self.add or self.delete or self.edit
class Request(object):
def __init__(self):
self.partner_code = ''
self.username = ''
self.password = ''
self.partner_order_id = ''
self.from_date = ''
self.to_date = ''
self.request_header = RequestHeader()
self.query_options = OrderQueryOptions()
def set_credentials(self, partner_code, username, password):
"""Sets credentials for serialization.
Sets credentials to allow user to make requests with Symantec SOAPXML
API. These credentials are set with Symantec proper. Contact Symantec
to determine your partner code and username.
:param partner_code: partner code for Symantec SOAPXML API
:param username: username for Symantec SOAPXML API
:param password: password associated with user in Symantec SOAPXML API
"""
self.request_header.partner_code = partner_code
self.request_header.username = username
self.request_header.password = password
def set_time_frame(self, from_date, to_date):
"""Sets time range of request to Symantec.
It is recommended that this time range be kept short if you are
interested in a quick response; however, it will parse a longer time
range just fine. Be wary that it may be a little slow.
:param from_date: ISO8601 Datetime object
:param to_date: ISO8601 Datetime object
"""
self.from_date = from_date.isoformat()
self.to_date = to_date.isoformat()
def set_query_options(
self, product_detail, contacts, payment_info,
cert_info, fulfillment, ca_certs, pkcs7_cert,
partner_tags, auth_comments, auth_statuses,
file_auth_dv_summary, trust_services_summary,
trust_services_details, vulnerability_scan_summary,
vulnerability_scan_details, cert_algorithm_info
):
"""Sets query options for serialization.
Allows the user to change the query options. All query options are
set default to True. There should really be no reason to change these
to False unless you are concerned about performance of a large time
ranged call. Check the Symantec API documentation for specifics on
each of these components.
All parameter explanations assume they are set to True.
:param product_detail: details of the certificate product
:param contacts: contacts for certificate order
:param payment_info: payment information for certificate order
:param cert_info: detailed certificate information
:param fulfillment: section for the actual certificate itself
:param ca_certs: section for the intermediate and root certificates
:param pkcs7_cert: section for the pkcs7 certificate itself
:param partner_tags:
:param auth_comments: comments regarding authentication for the
certificate
:param auth_statuses: status for authentication comments
:param file_auth_dv_summary:
:param trust_services_summary:
:param trust_services_details:
:param vulnerability_scan_summary: results of vulnerability scan
:param vulnerability_scan_details: details of vulnerability scan
:param cert_algorithm_info: certificate algorithm hash (SHA2
defaulted for Symantec as of January 2015)
"""
self.query_options.product_detail = product_detail
self.query_options.contacts = contacts
self.query_options.payment_info = payment_info
self.query_options.certificate_info = cert_info
self.query_options.fulfillment = fulfillment
self.query_options.ca_certs = ca_certs
self.query_options.pkcs7_cert = pkcs7_cert
self.query_options.partner_tags = partner_tags
self.query_options.authentication_comments = auth_comments
self.query_options.authentication_statuses = auth_statuses
self.query_options.file_auth_dv_summary = file_auth_dv_summary
self.query_options.trust_services_summary = trust_services_summary
self.query_options.trust_services_details = trust_services_details
self.query_options.vulnerability_scan_summary = (
vulnerability_scan_summary)
self.query_options.vulnerability_scan_details = (
vulnerability_scan_details)
self.query_options.certificate_algorithm_info = cert_algorithm_info
def set_partner_order_id(self, partner_order_id):
"""Sets the partner order ID for order retrieval.
:param partner_order_id: the partner order id from a previous order
"""
self.partner_order_id = partner_order_id
class GetModifiedOrderRequest(Request):
def __init__(self):
super(GetModifiedOrderRequest, self).__init__()
self.response_model = OrderDetails
def serialize(self):
"""Serializes the modified orders request.
The request model for the GetModifiedOrders call in the Symantec
SOAP XML API. Serializes all related sections to this request model.
This will serialize the following:
Query Request Header
Query Options
:return: root element for the get modified order request
"""
root = etree.Element('GetModifiedOrders', nsmap=utils.NS)
query_request_header = self.request_header.serialize(
order_type=False
)
query_options = self.query_options.serialize()
request = etree.SubElement(root, 'Request')
request.append(query_request_header)
request.append(query_options)
for node, node_text in [
('FromDate', self.from_date),
('ToDate', self.to_date)
]:
utils.create_subelement_with_text(request, node, node_text)
return root
class QuickOrderRequest(Request):
def __init__(self):
super(QuickOrderRequest, self).__init__()
self.order_parameters = OrderParameters()
self.order_contacts = OrderContacts()
self.organization_info = OrganizationInfo()
self.approver_email = ApproverEmail()
self.response_model = QuickOrderResponse
def serialize(self):
"""Serializes the quick order request.
The request model for the QuickOrder call in the Symantec
SOAP XML API. Serializes all related sections to this request model.
This will serialize the following:
Order Request Header
Order Contacts
Organization Info
Approver Email
:return: root element of the QuickOrderRequest section
"""
root = etree.Element('QuickOrder', nsmap=utils.NS)
order_request_header = self.request_header.serialize(order_type=True)
request = etree.SubElement(root, 'Request')
order_parameters = self.order_parameters.serialize()
organization_info = self.organization_info.serialize()
admin_contact, tech_contact, billing_contact = (
self.order_contacts.serialize()
)
approver_email = self.approver_email.serialize()
for item in [
order_request_header, organization_info, order_parameters,
admin_contact, tech_contact, billing_contact, approver_email
]:
request.append(item)
return root
def set_order_parameters(
self, csr, domain_name, partner_order_id, renewal_indicator,
renewal_behavior, hash_algorithm,
special_instructions, valid_period, web_server_type,
wildcard='false', dns_names=None
):
"""Sets the parameters for the order request.
Allows the user to change the order parameters options.
Check the Symantec API documentation for specifics on each of these
components.
:param csr: the certificate signing request for the order
:param domain_name: the domain being covered in the certificate
:param order_partner_id: the original id provided by the user for
tracking. Used with renewals.
:param renewal_indicator: flag to set renewals on
:param renewal_behavior: set to either
'RenewalNoticesSentAutomatically' or 'RenewalNoticesNotSent'
:param server_count: Reference the Symantec API documentation
:param signature_hash_algorithm: hashing algorithm for certificate
(ex: SHA2-256)
:param special_instructions: notes for the approver
:param valid_period: length of certificate in months. Defaults to 12.
See Symantec API documentation for specifics per product.
:param web_server_type: See Symantec API documentation for options
:param wildcard: optional field. Indicates if the order is a wildcard
or not. Binary
:param dnsnames: optional field. Comma separated values for SAN
certificates
"""
self.order_parameters.csr = csr
self.order_parameters.domain_name = domain_name
self.order_parameters.order_partner_order_id = partner_order_id
self.order_parameters.renewal_indicator = renewal_indicator
self.order_parameters.renewal_behavior = renewal_behavior
self.order_parameters.signature_hash_algorithm = hash_algorithm
self.order_parameters.special_instructions = special_instructions
self.order_parameters.valid_period = valid_period
self.order_parameters.web_server_type = web_server_type
self.order_parameters.wildcard = wildcard
self.order_parameters.dnsnames = dns_names
class GetOrderByPartnerOrderID(Request):
def __init__(self):
super(GetOrderByPartnerOrderID, self).__init__()
self.response_model = OrderDetail
self.partner_order_id = ''
def serialize(self):
"""Serializes the get order by partner order ID.
The request model for the GetOrderByPartnerOrderID call in the Symantec
SOAP XML API. Serializes all related sections to this request model.
This will serialize the following:
Query Request Header
Query Options
:return: root element for the get order by partner order id
"""
root = etree.Element(
'GetOrderByPartnerOrderID',
nsmap=utils.DEFAULT_NS
)
query_request_header = self.request_header.serialize(
order_type=False
)
query_options = self.query_options.serialize()
request = etree.SubElement(root, 'Request')
request.append(query_request_header)
utils.create_subelement_with_text(
request, 'PartnerOrderID', self.partner_order_id
)
request.append(query_options)
return root
class Reissue(Request):
def __init__(self):
super(Reissue, self).__init__()
self.response_model = ReissueResponse
self.order_parameters = OrderParameters()
self.order_changes = OrderChanges()
self.reissue_email = ReissueEmail()
def add_san(self, alternate_name):
"""Adds SAN from original order.
:param alternate_name: the name to be added to reissue request
"""
self.order_changes.add.append(alternate_name)
def delete_san(self, alternate_name):
"""Delete SAN from original order.
:param alternate_name: the name to be deleted from original order
"""
self.order_changes.delete.append(alternate_name)
def edit_san(self, old_alternate_name, new_alternate_name):
"""Edit SAN from original order to something new for reissue.
:param old_alternate_name: the name to be deleted from original order
:param new_alternate_name: the name to be added to reissue request
"""
edits = (old_alternate_name, new_alternate_name)
self.order_changes.edit.append(edits)
def serialize(self):
"""Serializes the Reissue request type.
The request model for the Reissue call in the Symantec SOAP XML API.
Serializes all related sections to this request model.
This will serialize the following:
Order Request Header
Order Parameters
Order Changes
Reissue Email
:return: root object for the reissue request xml object
"""
root = etree.Element('Reissue', nsmap=utils.DEFAULT_ONS)
request = etree.SubElement(root, 'Request')
order_request_header = self.request_header.serialize(order_type=True)
order_parameters = self.order_parameters.serialize()
reissue_email = self.reissue_email.serialize()
sections = [order_request_header, order_parameters, reissue_email]
for item in sections:
request.append(item)
if self.order_changes.has_changes:
changes = self.order_changes.serialize()
request.append(changes)
return root
| {
"repo_name": "glyph/symantecssl",
"path": "symantecssl/request_models.py",
"copies": "3",
"size": "24351",
"license": "apache-2.0",
"hash": 6320309869735386000,
"line_mean": 34.3425253991,
"line_max": 79,
"alpha_frac": 0.633608476,
"autogenerated": false,
"ratio": 4.307624270298956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6441232746298957,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from lxml import etree
from symantecssl import utils
class OrderContacts(object):
def __init__(self):
self.admin = ContactInfo()
self.tech = ContactInfo()
self.billing = ContactInfo()
self.approval_email = ''
@classmethod
def deserialize(cls, xml_node):
"""Deserializes the order contacts section in response.
:param xml_node: XML node to be parsed. Expected to explicitly be
Order Contacts XML node.
:return: parsed order contacts information response.
"""
contacts = OrderContacts()
admin_node = xml_node.find('.//m:AdminContact', utils.NS)
tech_node = xml_node.find('.//m:TechContact', utils.NS)
billing_node = xml_node.find('.//m:BillingContact', utils.NS)
contacts.admin = ContactInfo.deserialize(admin_node)
contacts.tech = ContactInfo.deserialize(tech_node)
contacts.billing = ContactInfo.deserialize(billing_node)
return contacts
def serialize(self):
"""Serializes the order contacts section for request.
:return: each of the contact elements
"""
admin_ele = self.admin.serialize('AdminContact')
tech_ele = self.tech.serialize('TechContact')
billing_ele = self.billing.serialize('BillingContact')
return admin_ele, tech_ele, billing_ele
class ContactInfo(object):
def __init__(self):
self.first_name = ''
self.last_name = ''
self.phone = ''
self.email = ''
self.title = ''
self.org_name = ''
self.address_line_one = ''
self.address_line_two = ''
self.city = ''
self.region = ''
self.postal_code = ''
self.country = ''
self.fax = ''
@classmethod
def deserialize(cls, xml_node):
"""Deserializes the contact information section in response.
:param xml_node: XML node to be parsed. Expected to explicitly be
Contact Information XML node.
:return: parsed contact information response.
"""
contact = ContactInfo()
contact.first_name = utils.get_element_text(
xml_node.find('.//m:FirstName', utils.NS)
)
contact.last_name = utils.get_element_text(
xml_node.find('.//m:LastName', utils. NS)
)
contact.phone = utils.get_element_text(
xml_node.find('.//m:Phone', utils.NS)
)
contact.email = utils.get_element_text(
xml_node.find('.//m:Email', utils.NS)
)
contact.title = utils.get_element_text(
xml_node.find('.//m:Title', utils. NS)
)
return contact
def serialize(self, element_name):
"""Serializes the contact information section in the request.
:param element_name: contact element type. Limited to Admin, Tech, and
Billing.
:return: the contact element that is to be used for request.
"""
ele = etree.Element(element_name)
for node, node_text in [
('FirstName', self.first_name),
('LastName', self.last_name),
('Phone', self.phone),
('Email', self.email),
('Title', self.title),
('OrganizationName', self.org_name),
('AddressLine1', self.address_line_one),
('AddressLine2', self.address_line_two),
('City', self.city),
('Region', self.region),
('PostalCode', self.postal_code),
('Country', self.country),
('Fax', self.fax)
]:
utils.create_subelement_with_text(ele, node, node_text)
return ele
def set_contact_info(
self, first_name, last_name, phone, email, title,
org_name=None, address_one=None, address_two=None, city=None,
region=None, postal_code=None, country=None, fax=None):
"""Sets information for Contact Info to be used in request.
:param first_name:
:param last_name:
:param phone:
:param email:
:param title:
:param org_name:
:param address_one: line one of address for contact
:param address_two: line two of address for contact
:param city:
:param region: region or state of contact
:param postal_code:
:param country:
:param fax: do people still have fax numbers?
"""
self.first_name = first_name
self.last_name = last_name
self.phone = phone
self.email = email
self.title = title
self.org_name = org_name
self.address_line_one = address_one
self.address_line_two = address_two
self.city = city
self.region = region
self.postal_code = postal_code
self.country = country
self.fax = fax
| {
"repo_name": "glyph/symantecssl",
"path": "symantecssl/models.py",
"copies": "4",
"size": "4920",
"license": "apache-2.0",
"hash": 8629635064549396000,
"line_mean": 31.8,
"line_max": 78,
"alpha_frac": 0.5786585366,
"autogenerated": false,
"ratio": 4.076222038111019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 150
} |
from __future__ import absolute_import, division, print_function
from lxml import etree
NS = {
'm': 'http://api.geotrust.com/webtrust/query'
}
ONS = {
'm': 'http://api.geotrust.com/webtrust/order'
}
SOAP_NS = {
'soap': 'http://schemas.xmlsoap.org/soap/envelope/'
}
DEFAULT_NS = {
None: 'http://api.geotrust.com/webtrust/query'
}
DEFAULT_ONS = {
None: 'http://api.geotrust.com/webtrust/order'
}
def get_element_text(element):
"""Checks if element is NoneType.
:param element: element to check for NoneType
:return: text of element or "None" text
"""
if element is not None:
return element.text
else:
return "None"
def create_subelement_with_text(root_element, element, text):
"""Creates an element with given text attribute.
:param element:
:param text:
:return:
"""
ele = etree.SubElement(root_element, element)
ele.text = text
return ele
def _boolean_to_str(value, default):
if isinstance(value, bool):
return str(value).lower()
else:
return str(default).lower()
| {
"repo_name": "cloudkeep/symantecssl",
"path": "symantecssl/utils.py",
"copies": "4",
"size": "1095",
"license": "apache-2.0",
"hash": 824226561915554800,
"line_mean": 18.9090909091,
"line_max": 64,
"alpha_frac": 0.6365296804,
"autogenerated": false,
"ratio": 3.2882882882882885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5924817968688288,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from mantid.api import AnalysisDataService
import mantid.simpleapi as simpleapi
import os
import addie.utilities
class AddieDriver(object):
"""
Driver for addie application
"""
def __init__(self):
""" Initialization
Returns
-------
"""
# name of the MatrixWorkspace for S(Q)
self._currSqWsName = ''
# dictionary to record workspace index of a certain S(Q)
self._sqIndexDict = dict()
# dictionary of the workspace with rmin, rmax and delta r setup
self._grWsIndex = 0
self._grWsNameDict = dict()
def calculate_sqAlt(self, ws_name, outputType):
if outputType == 'S(Q)': # don't do anything
return ws_name
elif outputType == 'Q[S(Q)-1]':
outputType = 'F(Q)'
else:
# PDConvertReciprocalSpace doesn't currently know how to convert to
# S(Q)-1
raise ValueError(
'Do not know how to convert to {}'.format(outputType))
outputName = '__{}Alt'.format(ws_name) # should be hidden
simpleapi.PDConvertReciprocalSpace(
InputWorkspace=ws_name,
OutputWorkspace=outputName,
From='S(Q)',
To=outputType)
return outputName
def calculate_gr(
self,
sq_ws_name,
pdf_type,
min_r,
delta_r,
max_r,
min_q,
max_q,
pdf_filter,
rho0):
""" Calculate G(R)
:param sq_ws_name: workspace name of S(q)
:param pdf_type: type of PDF as G(r), g(r) and RDF(r)
:param min_r: R_min
:param delta_r: delta R
:param max_r:
:param min_q:
:param max_q:
:param pdf_filter: type of PDF filter
:param rho0: average number density used for g(r) and RDF(r) conversions
:return: string as G(r) workspace's name
"""
# check
assert isinstance(
sq_ws_name,
str) and AnalysisDataService.doesExist(sq_ws_name)
assert isinstance(pdf_type, str) and len(pdf_type) > 0, \
'PDF type is %s is not supported.' % str(pdf_type)
assert min_r < max_r, 'Rmin must be less than Rmax (%f >= %f)' % (
min_r, max_r)
assert delta_r < (max_r - min_r), 'Must have more than one bin in G(r) (%f >= %f)' \
'' % (delta_r, (max_r - min_r))
assert min_q < max_q, 'Qmin must be less than Qmax (%f >= %f)' % (
min_q, max_q)
assert isinstance(
pdf_filter, str) or pdf_filter is None, 'PDF filter must be a string or None.'
# set to the current S(q) workspace name
self._currSqWsName = sq_ws_name
# set up the parameters for FourierTransform
# output workspace
prefix = 'G'
if pdf_type.startswith('g'):
prefix = 'g'
elif pdf_type.startswith('R'):
prefix = 'RDF'
if self._currSqWsName in self._sqIndexDict:
# for S(q) loaded from file
ws_seq_index = self._sqIndexDict[self._currSqWsName]
update_index = True
else:
# for S(q) calculated from IPython console
ws_seq_index = 0
update_index = False
if pdf_filter is None:
pdf_filter = False
else:
pdf_filter = True
if pdf_filter != 'lorch':
print(
'[WARNING] PDF filter {0} is not supported.'.format(pdf_filter))
gr_ws_name = '%s(R)_%s_%d' % (prefix, self._currSqWsName, ws_seq_index)
kwargs = {'OutputWorkspace': gr_ws_name,
'Qmin': min_q,
'Qmax': max_q,
'PDFType': pdf_type,
'DeltaR': delta_r,
'Rmax': max_r,
'Filter': pdf_filter}
if rho0 is not None:
kwargs['rho0'] = rho0
# Print warning about using G(r) and rho0
if 'rho0' in kwargs and pdf_type == "G(r)":
print("WARNING: Modifying the density does not affect G(r) function")
# get the input unit
sofq_type = 'S(Q)'
# do the FFT
simpleapi.PDFFourierTransform(InputWorkspace=self._currSqWsName,
InputSofQType=sofq_type,
**kwargs)
# check
assert AnalysisDataService.doesExist(
gr_ws_name), 'Failed to do Fourier Transform.'
self._grWsNameDict[(min_q, max_q)] = gr_ws_name
# update state variable
if update_index:
self._sqIndexDict[self._currSqWsName] += 1
return gr_ws_name
@staticmethod
def clone_workspace(src_name, target_name):
"""clone workspace
:param src_name:
:param target_name:
:return:
"""
# check
assert isinstance(src_name, str), 'blabla'
assert isinstance(target_name, str), 'blabla'
# check existence
if AnalysisDataService.doesExist(src_name):
simpleapi.CloneWorkspace(
InputWorkspace=src_name,
OutputWorkspace=target_name)
else:
raise RuntimeError(
'Workspace with name {0} does not exist in ADS. CloneWorkspace fails!'.format(src_name))
@staticmethod
def delete_workspace(workspace_name, no_throw=False):
"""
Delete a workspace from Mantid's AnalysisDataService
Args:
workspace_name: name of a workspace as a string instance
no_throw: if True, then it won't throw any exception if the workspace does not exist in AnalysisDataService
Returns: None
"""
# check
assert isinstance(workspace_name, str), \
'Input workspace name must be a string, but not %s.' % str(type(workspace_name))
# check whether the workspace exists
does_exit = AnalysisDataService.doesExist(workspace_name)
if does_exit:
# delete
simpleapi.DeleteWorkspace(Workspace=workspace_name)
elif not no_throw:
raise RuntimeError('Workspace %s does not exist.' % workspace_name)
return
@staticmethod
def edit_matrix_workspace(
sq_name,
scale_factor,
shift,
edited_sq_name=None):
"""
Edit the matrix workspace of S(Q) by scaling and shift
:param sq_name: name of the SofQ workspace
:param scale_factor:
:param shift:
:param edited_sq_name: workspace for the edited S(Q)
:return:
"""
# get the workspace
if AnalysisDataService.doesExist(sq_name) is False:
raise RuntimeError(
'S(Q) workspace {0} cannot be found in ADS.'.format(sq_name))
if edited_sq_name is not None:
simpleapi.CloneWorkspace(
InputWorkspace=sq_name,
OutputWorkspace=edited_sq_name)
sq_ws = AnalysisDataService.retrieve(edited_sq_name)
else:
sq_ws = AnalysisDataService.retrieve(sq_name)
# get the vector of Y
sq_ws = sq_ws * scale_factor
sq_ws = sq_ws + shift
if sq_ws.name() != edited_sq_name:
simpleapi.DeleteWorkspace(Workspace=edited_sq_name)
simpleapi.RenameWorkspace(
InputWorkspace=sq_ws,
OutputWorkspace=edited_sq_name)
assert sq_ws is not None, 'S(Q) workspace cannot be None.'
print('[DB...BAT] S(Q) workspace that is edit is {0}'.format(sq_ws))
# RMCProfile format. The 1st column tells how many X,Y pairs,
# the second is a comment line with information regarding the data
# (title, multiplier for data, etc.), and then the X,Y pairs for G(r) or S(Q) data.
@staticmethod
def export_to_rmcprofile(
ws_name,
output_file_name,
comment='',
ws_index=0):
""" Export a workspace 2D to a 2 column data for RMCProfile
"""
# check inputs
assert isinstance(
ws_name, str), 'Workspace name {0} must be a string but not a {1}.'.format(
ws_name, str(ws_name))
assert isinstance(
output_file_name, str), 'Output file name {0} must be a string but not a {1}.'.format(
output_file_name, type(output_file_name))
assert isinstance(
comment, str), 'Comment {0} must be a string but not a {1}.'.format(
comment, type(comment))
assert isinstance(
ws_index, int), 'Workspace index must be an integer but not a {0}.'.format(
type(ws_index))
# convert to point data from histogram
simpleapi.ConvertToPointData(
InputWorkspace=ws_name,
OutputWorkspace=ws_name)
# get workspace for vecX and vecY
if AnalysisDataService.doesExist(ws_name):
workspace = AnalysisDataService.retrieve(ws_name)
else:
raise RuntimeError(
'Workspace {0} does not exist in ADS.'.format(ws_name))
if not 0 <= ws_index < workspace.getNumberHistograms():
raise RuntimeError(
'Workspace index {0} is out of range.'.format(ws_index))
vec_x = workspace.readX(ws_index)
vec_y = workspace.readY(ws_index)
# write to buffer
wbuf = ''
wbuf += '{0}\n'.format(len(vec_x))
wbuf += '{0}\n'.format(comment)
for index in range(len(vec_x)):
wbuf += ' {0} {1}\n'.format(vec_x[index], vec_y[index])
# write to file
try:
ofile = open(output_file_name, 'w')
ofile.write(wbuf)
ofile.close()
except IOError as io_err:
msg = 'Unable to export data to file {0} in RMCProfile format due to {1}.'
msg = msg.format(output_file_name, io_err)
raise RuntimeError(msg)
def get_bank_numbers(self, ws_name):
'''Returns the list of spectrum numbers in the workspace'''
wksp = addie.utilities.workspaces.get_ws(ws_name)
banks = [wksp.getSpectrum(i).getSpectrumNo()
for i in range(wksp.getNumberHistograms())]
return banks
def convert_bragg_data(self, ws_name, x_unit):
wksp = addie.utilities.workspaces.get_ws(ws_name)
curr_unit = wksp.getAxis(0).getUnit().unitID()
if curr_unit != x_unit:
simpleapi.ConvertUnits(
InputWorkspace=ws_name,
OutputWorkspace=ws_name,
Target=x_unit,
EMode='Elastic')
def get_bragg_data(self, ws_name, wkspindex, x_unit):
""" Get Bragg diffraction data of 1 bank
"""
# check
assert isinstance(wkspindex, int) and wkspindex >= 0
bank_ws = addie.utilities.workspaces.get_ws(ws_name)
# convert units if necessary
curr_unit = bank_ws.getAxis(0).getUnit().unitID()
if curr_unit != x_unit:
simpleapi.ConvertToHistogram(
InputWorkspace=ws_name, OutputWorkspace=ws_name)
simpleapi.ConvertUnits(
InputWorkspace=ws_name,
OutputWorkspace=ws_name,
Target=x_unit,
EMode='Elastic')
return addie.utilities.workspaces.get_ws_data(ws_name, wkspindex)
def get_current_sq_name(self):
"""
Get the (workspace) name of current S(Q)
Returns:
"""
return self._currSqWsName
def get_current_workspaces(self):
"""
Get current workspaces' names
Returns
-------
a list of strings
"""
return AnalysisDataService.getObjectNames()
def get_gr(self, min_q, max_q):
"""Get G(r)
:param min_q:
:param max_q:
:return: 3-tuple for numpy.array
"""
# check... find key in dictionary
error_msg = 'R-range and delta R are not support. Current stored G(R) parameters' \
' are {}.'.format(list(self._grWsNameDict.keys()))
assert (min_q, max_q) in self._grWsNameDict, error_msg
# get the workspace
gr_ws_name = self._grWsNameDict[(min_q, max_q)]
return addie.utilities.workspaces.get_ws_data(gr_ws_name)
def get_sq(self, sq_name=None):
"""Get S(Q)
:param sq_name:
:return: 3-tuple of numpy array as Q, S(Q) and Sigma(Q)
"""
# check
assert isinstance(sq_name, str) or sq_name is None, 'Input S(Q) must either a string or None but not {0}.' \
''.format(type(sq_name))
# set up default
if sq_name is None:
sq_name = self._currSqWsName
if not AnalysisDataService.doesExist(sq_name):
raise RuntimeError(
'S(Q) matrix workspace {0} does not exist.'.format(sq_name))
return addie.utilities.workspaces.get_ws_data(sq_name)
def load_gr(self, gr_file_name):
"""
Load an ASCII file containing G(r)
"""
# check
assert len(gr_file_name) > 0
# load
gr_ws_name = os.path.basename(gr_file_name).split('.')[0]
simpleapi.LoadAscii(
Filename=gr_file_name,
OutputWorkspace=gr_ws_name,
Unit='Empty')
# check output
if not AnalysisDataService.doesExist(gr_ws_name):
return False, 'Unable to load file %s as target workspace %s cannot be found.' % (
gr_ws_name, gr_ws_name)
return True, gr_ws_name
def load_sq(self, file_name):
"""
Load S(Q) to a numpy
Guarantees: the file is loaded to self._currSQX, _currSQY and _currSQE
Parameters
----------
file_name :: name of the S(Q)
Returns
-------
2-tuple range of Q
"""
# generate S(Q) workspace name
sq_ws_name = os.path.basename(file_name).split('.')[0]
# call mantid LoadAscii
ext = file_name.upper().split('.')[-1]
if ext == 'NXS':
simpleapi.LoadNexusProcessed(
Filename=file_name, OutputWorkspace=sq_ws_name)
simpleapi.ConvertUnits(
InputWorkspace=sq_ws_name,
OutputWorkspace=sq_ws_name,
EMode='Elastic',
Target='MomentumTransfer')
simpleapi.ConvertToPointData(
InputWorkspace=sq_ws_name,
OutputWorkspace=sq_ws_name) # TODO REMOVE THIS LINE
elif ext == 'DAT' or ext == 'txt':
simpleapi.LoadAscii(
Filename=file_name,
OutputWorkspace=sq_ws_name,
Unit='MomentumTransfer')
# The S(Q) file is in fact S(Q)-1 in sq file. So need to add 1 to
# the workspace
out_ws = AnalysisDataService.retrieve(sq_ws_name)
out_ws += 1
assert AnalysisDataService.doesExist(
sq_ws_name), 'Unable to load S(Q) file %s.' % file_name
# set to the current S(Q) workspace name
self._currSqWsName = sq_ws_name
self._sqIndexDict[self._currSqWsName] = 0
# get range of Q from the loading
sq_ws = AnalysisDataService.retrieve(sq_ws_name)
q_min = sq_ws.readX(0)[0]
q_max = sq_ws.readX(0)[-1]
return sq_ws_name, q_min, q_max
def save_ascii(self, ws_name, file_name, filetype, comment=''):
"""
save ascii for G(r) or S(Q)
Args:
ws_name:
file_name:
filetype: xye, csv, rmcprofile, dat
comment: user comment to the file
Returns:
"""
assert isinstance(
filetype, str), 'GofR file type {0} must be a supported string.'.format(filetype)
if filetype == 'xye':
simpleapi.SaveAscii(
InputWorkspace=ws_name,
Filename=file_name,
Separator='Space')
elif filetype == 'csv':
simpleapi.SaveAscii(
InputWorkspace=ws_name,
Filename=file_name,
Separator='CSV')
elif filetype == 'rmcprofile' or filetype == 'dat':
self.export_to_rmcprofile(ws_name, file_name, comment=comment)
elif filetype == 'gr':
wksp = AnalysisDataService.retrieve(ws_name)
wksp.getAxis(0).setUnit("Label").setLabel("r", "Angstrom")
simpleapi.SavePDFGui(InputWorkspace=wksp, Filename=file_name)
elif filetype == 'sq':
simpleapi.SaveAscii(
InputWorkspace=ws_name,
Filename=file_name,
Separator='Space')
else:
# non-supported type
raise RuntimeError(
'G(r) or S(Q) file type "{0}" is not supported.'.format(filetype))
@staticmethod
def write_gss_file(ws_name_list, gss_file_name):
"""
Write a MatrixWorkspace to a GSAS file
Args:
workspace:
gss_file_name:
Returns:
"""
# check
assert isinstance(ws_name_list, list) and len(ws_name_list) > 1, \
'There must be at least 2 workspaces for conjoining operation.'
assert isinstance(gss_file_name, str)
# write with appending
append_mode = False
for i_ws, ws_name in enumerate(ws_name_list):
simpleapi.SaveGSS(InputWorkspace=ws_name, Filename=gss_file_name,
Format='SLOG', Bank=1, Append=append_mode)
append_mode = True
| {
"repo_name": "neutrons/FastGR",
"path": "addie/addiedriver.py",
"copies": "1",
"size": "17891",
"license": "mit",
"hash": 60205820418282310,
"line_mean": 33.472061657,
"line_max": 119,
"alpha_frac": 0.5454138953,
"autogenerated": false,
"ratio": 3.9088922875245795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9952195556868957,
"avg_score": 0.00042212519112454206,
"num_lines": 519
} |
from __future__ import absolute_import, division, print_function
from meta_util_git import set_userid, unixpath, repo_list
set_userid(userid='Erotemic',
owned_computers=['Hyrule', 'BakerStreet', 'Ooo'],
permitted_repos=['pyrf', 'detecttools'])
# USER DEFINITIONS
HOME_DIR = unixpath('~')
CODE_DIR = unixpath('~/code')
LATEX_DIR = unixpath('~/latex')
BUNDLE_DPATH = unixpath('~/local/vim/vimfiles/bundle')
# Non local project repos
IBEIS_REPOS_URLS, IBEIS_REPOS = repo_list([
'https://github.com/Erotemic/utool.git',
'https://github.com/Erotemic/guitool.git',
'https://github.com/Erotemic/plottool.git',
'https://github.com/Erotemic/vtool.git',
'https://github.com/Erotemic/hesaff.git',
'https://github.com/Erotemic/ibeis.git',
'https://github.com/bluemellophone/pyrf.git',
'https://github.com/bluemellophone/detecttools.git',
], CODE_DIR)
TPL_REPOS_URLS, TPL_REPOS = repo_list([
'https://github.com/Erotemic/opencv',
], CODE_DIR)
CODE_REPO_URLS = IBEIS_REPOS_URLS + TPL_REPOS_URLS
CODE_REPOS = IBEIS_REPOS + TPL_REPOS
VIM_REPO_URLS, VIM_REPOS = repo_list([
'https://github.com/dbarsam/vim-vimtweak.git',
'https://github.com/bling/vim-airline.git',
'https://github.com/davidhalter/jedi-vim.git',
'https://github.com/ervandew/supertab.git',
'https://github.com/mhinz/vim-startify.git',
'https://github.com/scrooloose/nerdcommenter.git',
'https://github.com/scrooloose/nerdtree.git',
'https://github.com/scrooloose/syntastic.git',
'https://github.com/terryma/vim-multiple-cursors.git',
'https://github.com/tpope/vim-repeat.git',
'https://github.com/tpope/vim-sensible.git',
'https://github.com/tpope/vim-surround.git',
'https://github.com/tpope/vim-unimpaired.git',
'https://github.com/vim-scripts/Conque-Shell.git',
'https://github.com/vim-scripts/csv.vim.git',
'https://github.com/vim-scripts/highlight.vim.git',
#'https://github.com/koron/minimap-vim.git',
#'https://github.com/zhaocai/GoldenView.Vim.git',
], BUNDLE_DPATH)
VIM_REPOS_WITH_SUBMODULES = [
'jedi-vim',
'syntastic',
]
# Local project repositories
PROJECT_REPOS = CODE_REPOS
| {
"repo_name": "bluemellophone/detecttools",
"path": "detecttools/gitutil/__REPOS__.py",
"copies": "1",
"size": "2199",
"license": "apache-2.0",
"hash": 7338392238598697000,
"line_mean": 33.359375,
"line_max": 64,
"alpha_frac": 0.6807639836,
"autogenerated": false,
"ratio": 2.6557971014492754,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.38365610850492754,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from mybuild._compat import *
import functools
import unittest
from mybuild.req import pgraph
from mybuild.req.solver import (ComparableSolution,
create_trunk,
solve_trunk,
solve,
SolveError)
class HandyPgraph(pgraph.Pgraph):
def __init__(self):
super(HandyPgraph, self).__init__()
for node_type in type(self)._iter_all_node_types():
if not hasattr(self, node_type.__name__):
setattr(self, node_type.__name__,
functools.partial(self.new_node, node_type))
class Named(object):
@classmethod
def _new(cls, *args, **kwargs):
kwargs.setdefault('cache_kwargs', True)
return super(Named, cls)._new(*args, **kwargs)
def __init__(self, *args, **kwargs):
self._name = kwargs.pop('name', None)
super(Named, self).__init__(*args, **kwargs)
def __repr__(self):
return self._name or super(Named, self).__repr__()
@HandyPgraph.node_type
class NamedAtom(Named, pgraph.Atom):
pass
class StarArgsToArg(object):
"""For compatibility with tests, to let them to pass operands in *args."""
@classmethod
def _new(cls, *operands, **kwargs):
return super(StarArgsToArg, cls)._new(operands, **kwargs)
@HandyPgraph.node_type
class Or(Named, StarArgsToArg, pgraph.Or):
pass
@HandyPgraph.node_type
class And(Named, StarArgsToArg, pgraph.And):
pass
@HandyPgraph.node_type
class AtMostOne(Named, StarArgsToArg, pgraph.AtMostOne):
pass
@HandyPgraph.node_type
class AllEqual(Named, StarArgsToArg, pgraph.AllEqual):
pass
class SolverTestCaseBase(unittest.TestCase):
def setUp(self):
self.pgraph = HandyPgraph()
def atoms(self, names):
return [self.pgraph.NamedAtom(name=name) for name in names]
class TrunkTestCase(SolverTestCaseBase):
"""Test cases which do not involve branching."""
def test_initial_const(self):
g = self.pgraph
A, = self.atoms('A')
N = g.new_const(True, A)
solution = solve(g, {})
self.assertIs(True, solution[A])
def test_implication_1(self):
g = self.pgraph
A, = self.atoms('A')
N = g.Not(A)
solution = solve(g, {N: True})
self.assertIs(True, solution[N])
self.assertIs(False, solution[A])
def test_implication_2(self):
g = self.pgraph
A,B,C = self.atoms('ABC')
N = g.AtMostOne(A,B,C)
solution = solve(g, {N: False})
self.assertIs(False, solution[A])
self.assertIs(False, solution[B])
self.assertIs(False, solution[C])
def test_implication_3(self):
g = self.pgraph
A,B,C = self.atoms('ABC')
N = g.AtMostOne(A,B,C)
solution = solve(g, {N: True, A: True})
self.assertIs(False, solution[B])
self.assertIs(False, solution[C])
def test_neglast_1(self):
g = self.pgraph
A,B,C,D = self.atoms('ABCD')
# (A|B) & (C|D) & (B|~C) & ~B
N = g.And(g.Or(A,B), g.Or(C,D), g.Or(B, g.Not(C)), g.Not(B))
solution = solve(g, {N: True})
self.assertIs(True, solution[N])
self.assertIs(True, solution[A])
self.assertIs(False, solution[B])
self.assertIs(False, solution[C])
self.assertIs(True, solution[D])
def test_neglast_2(self):
g = self.pgraph
A,B = self.atoms('AB')
# (A=>B) & A
N = g.And(g.Implies(A,B), A)
solution = solve(g, {N: True})
self.assertIs(True, solution[N])
self.assertIs(True, solution[A])
self.assertIs(True, solution[B])
def test_neglast_3(self):
g = self.pgraph
A,B = self.atoms('AB')
# (A=>B) & ~B
N = g.And(g.Implies(A,B), g.Not(B))
solution = solve(g, {N: True})
self.assertIs(True, solution[N])
self.assertIs(False, solution[A])
self.assertIs(False, solution[B])
def test_neglast_4(self):
g = self.pgraph
A,B,C = self.atoms('ABC')
N = g.AtMostOne(A,B,C)
solution = solve(g, {N: True, A: False, B: False})
self.assertIs(True, solution[C])
def test_violation_1(self):
g = self.pgraph
A, = self.atoms('A')
# A & ~A
N = g.And(A, g.Not(A))
with self.assertRaises(SolveError):
solve(g, {N: True})
def test_violation_2(self):
g = self.pgraph
A,B,C = self.atoms('ABC')
N = g.AtMostOne(A,B,C)
with self.assertRaises(SolveError):
solve(g, {A: True, B: True})
class BranchTestCase(SolverTestCaseBase):
def sneaky_pair_and(self, a, b, **kwargs):
"""(A | B) & (~A | B) & (A | ~B)"""
g = self.pgraph
# return g.And(g.Or(a, b),
# g.Or(g.Not(a), b),
# g.Or(a, g.Not(b)), **kwargs)
# solved the same way as an expr above, but gives lesser logs
return g.And(g.Or(a[True], b[True]),
g.Or(a[False], b[True]),
g.Or(a[True], b[False]), **kwargs)
def sneaky_chain(self):
g = self.pgraph
A, B, C, D, E = self.atoms('ABCDE')
# (E | Z) & (~E | Z) & (E | ~Z), where
# Z = (D | Y) & (~D | Y) & (D | ~Y), where
# Y = (C | X) & (~C | X) & (C | ~X), where
# X = (A | B) & (~A | B) & (A | ~B)
X = self.sneaky_pair_and(A, B, name='X')
Y = self.sneaky_pair_and(C, X, name='Y')
Z = self.sneaky_pair_and(D, Y, name='Z')
P = self.sneaky_pair_and(E, Z)
return P, (X, Y, Z), (A, B, C, D, E)
def test_contradiction_1(self):
g = self.pgraph
A,B = self.atoms('AB')
# (A|B) & (~A | A&~A)
N = g.And(g.Or(A, B), g.Or(g.Not(A), g.And(A, g.Not(A))))
solution = solve(g, {N: True})
self.assertIs(False, solution[A])
self.assertIs(True, solution[B])
def test_contradiction_2(self):
g = self.pgraph
A,B = self.atoms('AB')
nA,nB = map(g.Not, (A,B))
# (A + ~A&~B + ~B) & (B + B&~A)
# solution = solve(g, {N: True})
solution = solve(g, {
g.Or(A, g.And(nA, nB), nB): True,
g.Or(B, g.And(nA, B)): True
})
self.assertIs(True, solution[A])
self.assertIs(True, solution[B])
def test_15(self):
g = self.pgraph
A,B,C = self.atoms('ABC')
# (A | A&~A) & (A=>B) & (B=>C) & (C=>A)
A[True] >> B[True] >> C[True] >> A[True]
N = g.Or(A, g.And(A, g.Not(A)))
solution = solve(g, {N: True})
self.assertIs(True, solution[A])
self.assertIs(True, solution[B])
self.assertIs(True, solution[C])
def test_resolve_0(self):
g = self.pgraph
A, B = self.atoms('AB')
x = g.And(B[False], A[False], g.Or(A[True], B[True]))
y = B[True]
x.equivalent(y)
with self.assertRaises(SolveError):
solve(g, {self.sneaky_pair_and(A, B): True})
def test_resolve_1(self):
g = self.pgraph
A, B = self.atoms('AB')
solution = solve(g, {self.sneaky_pair_and(A, B): True})
self.assertIs(True, solution[A])
self.assertIs(True, solution[B])
def test_resolve_2(self):
g = self.pgraph
A, B, C = self.atoms('ABC')
# (C | X) & (~C | X) & (C | ~X), where
# X = (A | B) & (~A | B) & (A | ~B)
X = self.sneaky_pair_and(A, B, name='X')
P = self.sneaky_pair_and(C, X)
solution = solve(g, {P: True})
self.assertIs(True, solution[A])
self.assertIs(True, solution[B])
self.assertIs(True, solution[C])
self.assertIs(True, solution[X])
def test_resolve_4(self):
g = self.pgraph
P, pair_ands, atoms = self.sneaky_chain()
solution = solve(g, {P: True})
for node in (pair_ands + atoms):
self.assertIs(True, solution[node], "{0} is not True".format(node))
def test_trunk_base(self):
g = self.pgraph
P, pair_ands, atoms = self.sneaky_chain()
initial_trunk = create_trunk(g, {P: True})
solved_trunk = solve_trunk(g, {P: True})
self.assertEqual(ComparableSolution(initial_trunk),
ComparableSolution(solved_trunk.base))
self.assertEqual(ComparableSolution(initial_trunk.base),
ComparableSolution(solved_trunk.base))
| {
"repo_name": "embox/mybuild",
"path": "tests/test_solver.py",
"copies": "2",
"size": "8677",
"license": "bsd-2-clause",
"hash": -8386382138881340000,
"line_mean": 26.8108974359,
"line_max": 79,
"alpha_frac": 0.5239137951,
"autogenerated": false,
"ratio": 3.070417551309271,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4594331346409271,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import scipy, numpy
import sys
import logging
__EXPECTED_VERSION__ = '(2, 7)'
if str(sys.version_info[:2]) != __EXPECTED_VERSION__:
print("Unexpected Python version, attempting to continue. Check the dependencies and version numbers are compatible to execute this module.")
print("Expected "+str(__EXPECTED_VERSION__))
print("Actual "+str(sys.version_info[:2]))
EXPECTED_VERSION_OPENGL = ['3.1.1b1','3.1.1a1','3.0.2', '3.1.0']
if str(OpenGL.__version__) not in EXPECTED_VERSION_OPENGL:
print("Unexpected OpenGL version, attempting to continue. Check the dependencies and version numbers are compatible to execute this module.")
print("Expected "+str(EXPECTED_VERSION_OPENGL))
print("Actual "+str(OpenGL.__version__))
EXPECTED_VERSION_SCIPY = ['0.18.1','0.19.0','1.2.1']
if str(scipy.__version__) not in EXPECTED_VERSION_SCIPY:
print("Unexpected scipy version, attempting to continue. Check the dependencies and version numbers are compatible to execute this module.")
print("Expected "+str(EXPECTED_VERSION_SCIPY))
print("Actual "+str(scipy.__version__))
EXPECTED_VERSION_NUMPY = ['1.11.2','1.12.1','1.16.3']
if str(numpy.__version__) not in EXPECTED_VERSION_NUMPY:
print("Unexpected numpy version, attempting to continue. Check the dependencies and version numbers are compatible to execute this module.")
print("Expected "+str(EXPECTED_VERSION_NUMPY))
print("Actual "+str(numpy.__version__))
import tool_managers
from data_3d import WaveFront
from modes import Updateable_Line
from options import Key_Events, get_parsed_commandline_options
from service import GracefulKiller, GracefulShutdown
class GL_StateData(object):
X_AXIS = 20.0
Y_AXIS = 42.0
Z_AXIS = 0.0
do_rotate = True
rotate_rate = 0.25
viewport_depth = -27.0
default_x = 0
default_z = 0
@staticmethod
def toggle_rotate():
GL_StateData.do_rotate = not GL_StateData.do_rotate # toggle
class OpenGLInputHandler:
def __init__(self):
# self.updateable_line = None Updateable_Line(8)
self.keyEvents = Key_Events()
self.lastMouseX, self.lastMouseY = 0, 0
self.lastRightMouseX, self.lastRightMouseY = 0, 0
self.left_dragging = False
self.right_dragging = False
def keyPressed(self, *args):
ESCAPE = '\033'
SPACE = ' '
rate = 1
move_viewport = [GLUT_KEY_UP, GLUT_KEY_DOWN, 'q', 'e', 'w', 's', 'a', 'd']
update_line = ['y', 'h', 'u', 'j', 'i', 'k']
self.keyEvents.key_pressed(args[0])
s = ""
if args[0] in [ESCAPE]:
GracefulShutdown.do_shutdown()
elif args[0] == SPACE:
GL_StateData.toggle_rotate()
elif args[0] == 'z':
GL_StateData.default_x += rate
elif args[0] == 'x':
GL_StateData.default_x -= rate
elif args[0] in move_viewport:
if args[0] == GLUT_KEY_UP:
if GL_StateData.viewport_depth <= 0:
GL_StateData.viewport_depth += rate
s = "zoom in"
elif args[0] == GLUT_KEY_DOWN:
GL_StateData.viewport_depth -= rate
s = "zoom out"
elif args[0] == 'e':
GL_StateData.X_AXIS += rate
s = "rotate +X"
elif args[0] == 'q':
GL_StateData.X_AXIS -= rate
s = "rotate -X"
elif args[0] == 'a':
GL_StateData.Y_AXIS += rate
s = "rotate +Y"
elif args[0] == 'd':
GL_StateData.Y_AXIS -= rate
s = "rotate -Y"
elif args[0] == 'w':
GL_StateData.Z_AXIS += rate
s = "rotate +Z"
elif args[0] == 's':
GL_StateData.Z_AXIS -= rate
s = "rotate -Z"
print(str((GL_StateData.X_AXIS, GL_StateData.Y_AXIS, GL_StateData.Z_AXIS,
GL_StateData.viewport_depth)) + " " + s)
# elif args[0] in update_line:
# x, y, z = self.updateable_line.get_xyz()
# # uj, ik, ol
# if args[0] == 'u':
# x -= rate * 0.5
# elif args[0] == 'j':
# x += rate * 0.5
# elif args[0] == 'i':
# y -= rate * 0.5
# elif args[0] == 'k':
# y += rate * 0.5
# elif args[0] == 'y':
# z -= rate * 0.5
# elif args[0] == 'h':
# z += rate * 0.5
# print(x, y, z)
# self.updateable_line.set_xyz(x, y, z)
# elif args[0] == GLUT_KEY_RIGHT:
# self.updateable_line.increment()
# elif args[0] == GLUT_KEY_LEFT:
# self.updateable_line.decrement()
def drag(self, x, y):
# On left click drag, move the viewport..
if self.left_dragging:
relativeMoveX = self.lastMouseX-x
relativeMoveY = self.lastMouseY-y
GL_StateData.Y_AXIS -= (relativeMoveX*0.01)
GL_StateData.X_AXIS -= (relativeMoveY*0.01)
# On right click drag, move the updateable line slowly along the X/Y axis.
elif self.right_dragging:
relativeMoveX = self.lastRightMouseX-x
relativeMoveY = self.lastRightMouseY-y
# x1,y1,z1 = self.updateable_line.get_xyz()
newX = x1+relativeMoveX*0.01
newY = y1-relativeMoveY*0.01
# self.updateable_line.set_xyz(newX, newY, z1)
def mouse(self, button, state, x, y):
self.left_dragging = (button == GLUT_LEFT_BUTTON)
self.right_dragging = (button == GLUT_RIGHT_BUTTON)
wheelUp = (button == 3)
wheelDown = (button == 4)
if self.left_dragging:
self.lastMouseX = x
self.lastMouseY = y
if self.right_dragging:
self.lastRightMouseX = x
self.lastRightMouseY = y
elif wheelUp:
if GL_StateData.viewport_depth <= 0:
GL_StateData.viewport_depth += 1
elif wheelDown:
GL_StateData.viewport_depth -= 1
class OpenGLRunner(object):
__window = 0
# __lights = [(15, 15, 10), (-20.0, -20.0, 20.0), (0.0, -20.0, 0.0), (-20.0, 0.0, 0.0)]
__lights = [(10, 0, 0), (0, 10, 0), (0, 0, 10),
(20, 0, 0), (0, 20, 0), (0, 0, 20),
#(-20.0, 0, 0), (0.0, -20.0, 0.0), (0, 0.0, -20.0)
]
def __init__(self, input_handler, draw_callback_func):
self.__keyPressed_func = input_handler.keyPressed
self.__mouse_func = input_handler.mouse
self.__drag_func = input_handler.drag
self.__draw_callback_func = draw_callback_func
def InitGL(self, Width, Height):
global GL_LESS, GL_DEPTH_TEST, GL_CULL_FACE, GL_FRONT_AND_BACK, \
GL_AMBIENT_AND_DIFFUSE, GL_SHININESS, GL_PROJECTION, GL_MODELVIEW, GL_SMOOTH
glClearColor(1.0, 1.0, 1.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST),
glEnable(GL_CULL_FACE)
self.enable_lighting()
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, self.__vec(0.2, 0.2, 0.2, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, self.__vec(1, 1, 1, .2))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 99)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width) / float(Height), 0.1, 1000.0)
glMatrixMode(GL_MODELVIEW)
def DrawGLScene(self):
global GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_MODELVIEW
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(GL_StateData.default_x, GL_StateData.default_z, GL_StateData.viewport_depth)
glRotatef(GL_StateData.X_AXIS,1.0,0.0,0.0)
glRotatef(GL_StateData.Y_AXIS,0.0,1.0,0.0)
glRotatef(GL_StateData.Z_AXIS,0.0,0.0,1.0)
self.__draw_callback_func()
if GL_StateData.do_rotate:
#GL_StateData.X_AXIS = GL_StateData.X_AXIS - 0.05
GL_StateData.Y_AXIS = GL_StateData.Y_AXIS - GL_StateData.rotate_rate
#GL_StateData.Z_AXIS = GL_StateData.Z_AXIS - 0.05
glutSwapBuffers()
def enable_lighting(self):
global GL_LIGHTING, GL_POSITION, GL_SPECULAR, GL_DIFFUSE, GL_AMBIENT
glEnable(GL_LIGHTING)
li_num = 16384 # GL_LIGHT0, max of 8
for l in OpenGLRunner.__lights:
glEnable(li_num)
glLight(li_num, GL_POSITION, self.__vec(l[0], l[1], l[2], 1)) # http://pyopengl.sourceforge.net/documentation/manual-3.0/glLight.html
glLight(li_num, GL_AMBIENT, self.__vec(.5, .5, 5, 1))
glLight(li_num, GL_DIFFUSE, self.__vec(.5, .5, .5, 1))
glLight(li_num, GL_SPECULAR, self.__vec(1, 1, 1, 1))
li_num += 1
if li_num > 16391:
print("MAX NUM OF LIGHTS EXCEEDED. Truncating num of lights at 8.")
break
def select_menu(self, choice):
global GL_StateData
def _toggle_rotate():
GL_StateData.toggle_rotate()
def _rotate_slower():
GL_StateData.rotate_rate = GL_StateData.rotate_rate * 0.5
def _rotate_faster():
GL_StateData.rotate_rate = GL_StateData.rotate_rate * 2
def _exit():
self.begin_shutdown()
{
1: _toggle_rotate,
2: _rotate_slower,
3: _rotate_faster,
4: _exit
}[choice]()
glutPostRedisplay()
return 0
def right_click_menu(self):
from ctypes import c_int,c_void_p
import platform
#platform specific imports:
if (platform.system() == 'Windows'):
#Windows
from ctypes import WINFUNCTYPE
CMPFUNCRAW = WINFUNCTYPE(c_int, c_int)
else:
#Linux
from ctypes import CFUNCTYPE
CMPFUNCRAW = CFUNCTYPE(c_int, c_int)
myfunc = CMPFUNCRAW(self.select_menu)
color_submenu = glutCreateMenu( myfunc )
glutAddMenuEntry("Toggle Rotation", 1)
glutAddMenuEntry("Rotate Slower", 2)
glutAddMenuEntry("Rotate Faster", 3)
glutAddMenuEntry("Exit", 4)
glutAttachMenu(GLUT_RIGHT_BUTTON)
def draw(self):
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(800,600)
glutInitWindowPosition(0,5)
OpenGLRunner.__window = glutCreateWindow(b'LightStage - Target Illumination Score Tool')
glViewport(0, 0, 500, 500);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_GLUTMAINLOOP_RETURNS)
self.right_click_menu()
glutDisplayFunc(self.DrawGLScene)
glutIdleFunc(self.DrawGLScene)
glutKeyboardFunc(self.__keyPressed_func)
glutSpecialFunc(self.__keyPressed_func)
glutMouseFunc(self.__mouse_func)
glutMotionFunc(self.__drag_func)
self.InitGL(640, 480)
glutMainLoop()
self.begin_shutdown()
def begin_shutdown(self):
GracefulShutdown.do_shutdown()
# Define a simple function to create ctypes arrays of floats:
@staticmethod
def __vec(*args):
return (GLfloat * len(args))(*args)
class LightStageApp(object):
def __init__(self):
GracefulKiller()
WaveFront()
tool_managers.define_help()
self.__input_handler = OpenGLInputHandler()
self.__keyEvents = self.__input_handler.keyEvents
# self.__updateable_line = self.__input_handler.updateable_line
self.__tool = tool_managers.Tool()
def main(self):
PARSE_OPTIONS,PARSE_ARGS = get_parsed_commandline_options()
do_demo = PARSE_OPTIONS.EVALUATION == 1 or PARSE_OPTIONS.EVALUATION == 4
do_evaluation_or_tuning = PARSE_OPTIONS.EVALUATION == 2 or PARSE_OPTIONS.EVALUATION == 3
if do_demo:
run = OpenGLRunner(self.__input_handler, self.__draw_callback)
run.draw()
elif do_evaluation_or_tuning:
self.__tool.run()
else:
pass
def __draw_callback(self):
# todo: This is a hack. Rework tool_managers.py to encapsulate keypress (and other) globals into a Config class, then this call should affect that object state.
tool_managers.update_configs_via_keypress(self.__keyEvents)
self.__tool.run()
if __name__ == "__main__":
logging.basicConfig(format='%(message)s',level=logging.WARNING)
x = LightStageApp()
x.main()
| {
"repo_name": "LightStage-Aber/LightStage-Repo",
"path": "src/run.py",
"copies": "1",
"size": "13183",
"license": "apache-2.0",
"hash": -8618785350845284000,
"line_mean": 36.6657142857,
"line_max": 168,
"alpha_frac": 0.5577637867,
"autogenerated": false,
"ratio": 3.4055799535003874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4463343740200387,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from operator import add
import os.path as op
from cooler import tools
import cooler
testdir = op.realpath(op.dirname(__file__))
datadir = op.join(testdir, "data")
def test_datapipe():
inputs = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
keys = ['a', 'b', 'c', 'd']
dp = tools.MultiplexDataPipe(inputs.get, keys, map)
dp = dp.pipe(lambda x: x)
dp = dp.pipe((lambda x, const: x * const), 10)
dp = dp.pipe([
lambda x: x + 1,
lambda x: x - 1,
])
out = dp.gather()
assert out == [10, 20, 30, 40]
out = dp.reduce(add, 0)
assert out == 100
assert sum(i for i in dp) == 100
dp = tools.MultiplexDataPipe(inputs.get, keys, map)
dp = dp.prepare(lambda x: x)
# prepare initializer modifies the function signature
dp = (
dp
.pipe(lambda x0, x: x + 100)
.pipe(lambda x0, x: x0)
)
out = dp.gather()
assert out == [1, 2, 3, 4]
def test_chunkgetter():
path = op.join(datadir, "toy.symm.upper.2.cool")
clr = cooler.Cooler(path)
lo, hi = 1, 3
getter = tools.chunkgetter(clr)
chunk = getter((lo, hi))
assert isinstance(chunk, dict)
assert 'chroms' not in chunk
assert 'bins' in chunk
assert 'pixels' in chunk
assert len(chunk['pixels']['bin1_id']) == 2
getter = tools.chunkgetter(clr, include_chroms=True)
chunk = getter((lo, hi))
assert isinstance(chunk, dict)
assert 'chroms' in chunk
assert 'bins' in chunk
assert 'pixels' in chunk
getter = tools.chunkgetter(clr, use_lock=True)
chunk = getter((lo, hi))
assert isinstance(chunk, dict)
assert len(chunk['pixels']['bin1_id']) == 2
def test_split():
path = op.join(datadir, "toy.symm.upper.2.cool")
clr = cooler.Cooler(path)
tools.split(clr, map, chunksize=2)
tools.split(clr, map, spans=[(0, 2), (2, 4)])
| {
"repo_name": "mirnylab/cooler",
"path": "tests/test_tools.py",
"copies": "1",
"size": "1916",
"license": "bsd-3-clause",
"hash": 6089784645849570000,
"line_mean": 25.985915493,
"line_max": 64,
"alpha_frac": 0.5981210856,
"autogenerated": false,
"ratio": 3.080385852090032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4178506937690032,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from os.path import expanduser, normpath, realpath, join
import os
from itertools import izip
import platform
USER_ID = None
IS_USER = False
PERMITTED_REPOS = []
format_dict = {
'https': ('.com/', 'https://'),
'ssh': ('.com:', 'git@'),
}
def get_computer_name():
return platform.node()
def get_repo_dirs(repo_urls, checkout_dir):
repo_dirs = [join(checkout_dir, get_repo_dname(url)) for url in repo_urls]
return repo_dirs
def get_repo_dname(repo_url):
""" Break url into a dirname """
slashpos = repo_url.rfind('/')
colonpos = repo_url.rfind(':')
if slashpos != -1 and slashpos > colonpos:
pos = slashpos
else:
pos = colonpos
repodir = repo_url[pos + 1:].replace('.git', '')
return repodir
def set_userid(userid=None,
owned_computers={},
permitted_repos=[]):
# Check to see if you are on one of Jons Computers
global IS_USER
global USER_ID
global PERMITTED_REPOS
PERMITTED_REPOS = permitted_repos
USER_ID = userid
IS_USER = get_computer_name() in owned_computers
def truepath(path):
return normpath(realpath(expanduser(path)))
def unixpath(path):
return truepath(path).replace('\\', '/')
def cd(dir_):
dir_ = truepath(dir_)
print('> cd ' + dir_)
os.chdir(dir_)
def fix_repo_url(repo_url, in_type='https', out_type='ssh', format_dict=format_dict):
""" Changes the repo_url format """
for old, new in izip(format_dict[in_type], format_dict[out_type]):
repo_url = repo_url.replace(old, new)
return repo_url
def ensure_ssh_url(repo_url):
return fix_repo_url(repo_url, in_type='https', out_type='ssh')
def repo_list(repo_urls, checkout_dir):
repo_dirs = get_repo_dirs(repo_urls, checkout_dir)
repo_dirs = map(unixpath, repo_dirs)
return repo_urls, repo_dirs
def can_push(repo_url):
owned_repo = USER_ID is not None and repo_url.find(USER_ID) != -1
has_permit = get_repo_dname(repo_url) in PERMITTED_REPOS
return owned_repo or has_permit
def url_list(repo_urls):
if IS_USER:
repo_urls = [ensure_ssh_url(url) if can_push(url) else url
for url in repo_urls]
return map(unixpath, repo_urls)
def cmd(command):
print('> ' + command)
os.system(command)
#def url_list2(*args):
# """ Output is gaurenteed to be a list of paths """
# url_list = args
# if len(args) == 1:
# # There is one argument
# arg = args[0]
# if isinstance(arg, (str, unicode)):
# if arg.find('\n') == -1:
# # One string long
# url_list = [arg]
# else:
# # One multiline string
# url_list = textwrap.dedent(arg).strip().split('\n')
# else:
# url_list = arg
# if IS_USER:
# def userid_in(path):
# return IS_USER is not None and\
# path.find(USER_ID) != -1
# url_list = [path if userid_in(path) else fix_repo_url(path, 'https', 'ssh')
# for path in url_list]
# return map(unixpath, url_list)
#def repo_list2(*args):
# if len(args) < 1:
# return url_list(*args)
# elif len(args) == 2:
# repo_urls = url_list(args[0])
# checkout_dir = args[1]
# repo_dirs = map(unixpath, get_repo_dirs(repo_urls, checkout_dir))
# return repo_urls, repo_dirs
| {
"repo_name": "bluemellophone/detecttools",
"path": "detecttools/gitutil/meta_util_git.py",
"copies": "1",
"size": "3467",
"license": "apache-2.0",
"hash": 4236241261945843000,
"line_mean": 25.465648855,
"line_max": 85,
"alpha_frac": 0.5895586963,
"autogenerated": false,
"ratio": 3.160437556973564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.924482203314294,
"avg_score": 0.001034844026124855,
"num_lines": 131
} |
from __future__ import absolute_import, division, print_function
from os.path import join as pjoin
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
#from Cython.Build import cythonize
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "clustering: data analysis for simple MD cluster data"
# Long description will go up on the pypi page
long_description = """
Clustering
========
Clustering is a suite of code primarily intended for finding clusters and
performing data analysis on them. These clusters are physical clusters in the
data. Also instantiated is fits to the mass-averaged cluster size versus time by
the Smoluchowski model.
License
=======
``clustering`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2017--, Rachael Mansbach, University of Illinois at
Urbana-Champaign
"""
NAME = "clustering"
MAINTAINER = "Rachael Mansbach"
MAINTAINER_EMAIL = "ramansbach@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/ramansbach/clustering"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Rachael Mansbach"
AUTHOR_EMAIL = "ramansbach@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'clustering': [pjoin('data', '*')]}
REQUIRES = ["numpy","scipy","Cython","scikit"]
BEXT = {'build_ext': build_ext}
CYTHONMODS=[Extension("cdistances",
sources=["cdistances.pyx","conoptdistance.c",
"aligndistance.c","subsquashrng.c",
"gyrtensxy.c"],
include_dirs=[numpy.get_include()]),
Extension("cfractald",
sources=["cfractald.pyx","corrdim.c","getcoms.c"],
include_dirs=[numpy.get_include()]) ]
| {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/version.py",
"copies": "1",
"size": "2920",
"license": "mit",
"hash": -5033089658255687000,
"line_mean": 35.5,
"line_max": 80,
"alpha_frac": 0.6753424658,
"autogenerated": false,
"ratio": 3.777490297542044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49528327633420444,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev1'
_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "gparse: a simple python package for parsing Gaussian log files."
# Long description will go up on the pypi page
long_description = """
gparse
========
gparse is a python package for parsing Gaussian 16 log files for coordinate and
energy information.
License
=======
``gparse`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2020--, Luke D Gibson, The University of Washington
Department of Chemical Engineering.
"""
NAME = "gparse"
MAINTAINER = "Luke D Gibson"
MAINTAINER_EMAIL = "ldgibson@uw.edu"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/UWPRG/Gaussian/gparse"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Luke D Gibson"
AUTHOR_EMAIL = "ldgibson@uw.edu"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'gparse': [pjoin('data', '*')]}
REQUIRES = ['numpy', 'pandas']
| {
"repo_name": "UWPRG/Gaussian",
"path": "gparse/gparse/version.py",
"copies": "1",
"size": "2139",
"license": "mit",
"hash": -6637178724243616000,
"line_mean": 31.4090909091,
"line_max": 79,
"alpha_frac": 0.693314633,
"autogenerated": false,
"ratio": 3.553156146179402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4746470779179402,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from os.path import join
import six
import utool as ut
from six.moves import range, zip, map # NOQA
from ibeis.algo.hots import _pipeline_helpers as plh # NOQA
from ibeis.algo.hots.neighbor_index import NeighborIndex, get_support_data
(print, rrr, profile) = ut.inject2(__name__, '[neighbor_index]', DEBUG=False)
USE_HOTSPOTTER_CACHE = not ut.get_argflag('--nocache-hs')
NOCACHE_UUIDS = ut.get_argflag('--nocache-uuids') and USE_HOTSPOTTER_CACHE
# LRU cache for nn_indexers. Ensures that only a few are ever in memory
#MAX_NEIGHBOR_CACHE_SIZE = ut.get_argval('--max-neighbor-cachesize', type_=int, default=2)
MAX_NEIGHBOR_CACHE_SIZE = ut.get_argval('--max-neighbor-cachesize', type_=int, default=1)
# Background process for building indexes
CURRENT_THREAD = None
# Global map to keep track of UUID lists with prebuild indexers.
UUID_MAP = ut.ddict(dict)
NEIGHBOR_CACHE = ut.get_lru_cache(MAX_NEIGHBOR_CACHE_SIZE)
class UUIDMapHyrbridCache(object):
"""
Class that lets multiple ways of writing to the uuid_map
be swapped in and out interchangably
TODO: the global read / write should periodically sync itself to disk and it
should be loaded from disk initially
"""
def __init__(self):
self.uuid_maps = ut.ddict(dict)
#self.uuid_map_fpath = uuid_map_fpath
#self.init(uuid_map_fpath, min_reindex_thresh)
def init(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
#self.read_func = self.read_uuid_map_cpkl
#self.write_func = self.write_uuid_map_cpkl
self.read_func = self.read_uuid_map_dict
self.write_func = self.write_uuid_map_dict
def dump(self, cachedir):
# TODO: DUMP AND LOAD THIS HYBRID CACHE TO DISK
#write_uuid_map_cpkl
fname = 'uuid_maps_hybrid_cache.cPkl'
cpkl_fpath = join(cachedir, fname)
ut.lock_and_save_cPkl(cpkl_fpath, self.uuid_maps)
def load(self, cachedir):
"""
Returns a cache UUIDMap
"""
fname = 'uuid_maps_hybrid_cache.cPkl'
cpkl_fpath = join(cachedir, fname)
self.uuid_maps = ut.lock_and_load_cPkl(cpkl_fpath)
#def __call__(self):
# return self.read_func(*self.args, **self.kwargs)
#def __setitem__(self, daids_hashid, visual_uuid_list):
# uuid_map_fpath = self.uuid_map_fpath
# self.write_func(uuid_map_fpath, visual_uuid_list, daids_hashid)
#@profile
#def read_uuid_map_shelf(self, uuid_map_fpath, min_reindex_thresh):
# #with ut.EmbedOnException():
# with lockfile.LockFile(uuid_map_fpath + '.lock'):
# with ut.shelf_open(uuid_map_fpath) as uuid_map:
# candidate_uuids = {
# key: val for key, val in six.iteritems(uuid_map)
# if len(val) >= min_reindex_thresh
# }
# return candidate_uuids
#@profile
#def write_uuid_map_shelf(self, uuid_map_fpath, visual_uuid_list, daids_hashid):
# print('Writing %d visual uuids to uuid map' % (len(visual_uuid_list)))
# with lockfile.LockFile(uuid_map_fpath + '.lock'):
# with ut.shelf_open(uuid_map_fpath) as uuid_map:
# uuid_map[daids_hashid] = visual_uuid_list
#@profile
#def read_uuid_map_cpkl(self, uuid_map_fpath, min_reindex_thresh):
# with lockfile.LockFile(uuid_map_fpath + '.lock'):
# #with ut.shelf_open(uuid_map_fpath) as uuid_map:
# try:
# uuid_map = ut.load_cPkl(uuid_map_fpath)
# candidate_uuids = {
# key: val for key, val in six.iteritems(uuid_map)
# if len(val) >= min_reindex_thresh
# }
# except IOError:
# return {}
# return candidate_uuids
#@profile
#def write_uuid_map_cpkl(self, uuid_map_fpath, visual_uuid_list, daids_hashid):
# """
# let the multi-indexer know about any big caches we've made multi-indexer.
# Also lets nnindexer know about other prebuilt indexers so it can attempt to
# just add points to them as to avoid a rebuild.
# """
# print('Writing %d visual uuids to uuid map' % (len(visual_uuid_list)))
# with lockfile.LockFile(uuid_map_fpath + '.lock'):
# try:
# uuid_map = ut.load_cPkl(uuid_map_fpath)
# except IOError:
# uuid_map = {}
# uuid_map[daids_hashid] = visual_uuid_list
# ut.save_cPkl(uuid_map_fpath, uuid_map)
@profile
def read_uuid_map_dict(self, uuid_map_fpath, min_reindex_thresh):
""" uses in memory dictionary instead of disk """
uuid_map = self.uuid_maps[uuid_map_fpath]
candidate_uuids = {
key: val for key, val in six.iteritems(uuid_map)
if len(val) >= min_reindex_thresh
}
return candidate_uuids
@profile
def write_uuid_map_dict(self, uuid_map_fpath, visual_uuid_list, daids_hashid):
"""
uses in memory dictionary instead of disk
let the multi-indexer know about any big caches we've made multi-indexer.
Also lets nnindexer know about other prebuilt indexers so it can attempt to
just add points to them as to avoid a rebuild.
"""
if NOCACHE_UUIDS:
print('uuid cache is off')
return
#with ut.EmbedOnException():
uuid_map = self.uuid_maps[uuid_map_fpath]
uuid_map[daids_hashid] = visual_uuid_list
UUID_MAP_CACHE = UUIDMapHyrbridCache()
#@profile
def get_nnindexer_uuid_map_fpath(qreq_):
"""
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache get_nnindexer_uuid_map_fpath --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', p='default:fgw_thresh=.3')
>>> uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
>>> result = str(ut.path_ndir_split(uuid_map_fpath, 3))
>>> print(result)
.../_ibeis_cache/flann/uuid_map_mzwwsbjisbkdxorl.cPkl
.../_ibeis_cache/flann/uuid_map_FLANN(8_kdtrees_fgwthrsh=0.3)_Feat(hesaff+sift)_Chip(sz700,width).cPkl
.../_ibeis_cache/flann/uuid_map_FLANN(8_kdtrees)_Feat(hesaff+sift)_Chip(sz700,width).cPkl
.../_ibeis_cache/flann/uuid_map_FLANN(8_kdtrees)_FEAT(hesaff+sift_)_CHIP(sz450).cPkl
"""
flann_cachedir = qreq_.ibs.get_flann_cachedir()
# Have uuid shelf conditioned on the baseline flann and feature parameters
flann_cfgstr = qreq_.qparams.flann_cfgstr
feat_cfgstr = qreq_.qparams.feat_cfgstr
chip_cfgstr = qreq_.qparams.chip_cfgstr
featweight_cfgstr = qreq_.qparams.featweight_cfgstr
if qreq_.qparams.fgw_thresh is None or qreq_.qparams.fgw_thresh == 0:
uuid_map_cfgstr = ''.join((flann_cfgstr, feat_cfgstr, chip_cfgstr))
else:
uuid_map_cfgstr = ''.join((flann_cfgstr, featweight_cfgstr, feat_cfgstr, chip_cfgstr))
#uuid_map_ext = '.shelf'
uuid_map_ext = '.cPkl'
uuid_map_prefix = 'uuid_map'
uuid_map_fname = ut.consensed_cfgstr(uuid_map_prefix, uuid_map_cfgstr) + uuid_map_ext
uuid_map_fpath = join(flann_cachedir, uuid_map_fname)
return uuid_map_fpath
def build_nnindex_cfgstr(qreq_, daid_list):
"""
builds a string that uniquely identified an indexer built with parameters
from the input query requested and indexing descriptor from the input
annotation ids
Args:
qreq_ (QueryRequest): query request object with hyper-parameters
daid_list (list):
Returns:
str: nnindex_cfgstr
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-build_nnindex_cfgstr
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(db='testdb1')
>>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
>>> qreq_ = ibs.new_query_request(daid_list, daid_list, cfgdict=dict(fg_on=False))
>>> nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
>>> result = str(nnindex_cfgstr)
>>> print(result)
_VUUIDS((6)ylydksaqdigdecdd)_FLANN(8_kdtrees)_FeatureWeight(detector=cnn,sz256,thresh=20,ksz=20,enabled=False)_FeatureWeight(detector=cnn,sz256,thresh=20,ksz=20,enabled=False)
_VUUIDS((6)ylydksaqdigdecdd)_FLANN(8_kdtrees)_FEATWEIGHT(OFF)_FEAT(hesaff+sift_)_CHIP(sz450)
"""
flann_cfgstr = qreq_.qparams.flann_cfgstr
featweight_cfgstr = qreq_.qparams.featweight_cfgstr
feat_cfgstr = qreq_.qparams.feat_cfgstr
chip_cfgstr = qreq_.qparams.chip_cfgstr
# FIXME; need to include probchip (or better yet just use depcache)
#probchip_cfgstr = qreq_.qparams.chip_cfgstr
data_hashid = get_data_cfgstr(qreq_.ibs, daid_list)
nnindex_cfgstr = ''.join((data_hashid, flann_cfgstr, featweight_cfgstr, feat_cfgstr, chip_cfgstr))
return nnindex_cfgstr
def clear_memcache():
global NEIGHBOR_CACHE
NEIGHBOR_CACHE.clear()
def clear_uuid_cache(qreq_):
"""
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-clear_uuid_cache
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', p='default:fg_on=True')
>>> fgws_list = clear_uuid_cache(qreq_)
>>> result = str(fgws_list)
>>> print(result)
"""
print('[nnindex] clearing uuid cache')
uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
ut.delete(uuid_map_fpath)
ut.delete(uuid_map_fpath + '.lock')
print('[nnindex] finished uuid cache clear')
def print_uuid_cache(qreq_):
"""
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-print_uuid_cache
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_(defaultdb='PZ_Master0', p='default:fg_on=False')
>>> print_uuid_cache(qreq_)
>>> result = str(nnindexer)
>>> print(result)
"""
print('[nnindex] clearing uuid cache')
uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
candidate_uuids = UUID_MAP_CACHE.read_uuid_map_dict(uuid_map_fpath, 0)
print(candidate_uuids)
def request_ibeis_nnindexer(qreq_, verbose=True, **kwargs):
"""
CALLED BY QUERYREQUST::LOAD_INDEXER
IBEIS interface into neighbor_index_cache
Args:
qreq_ (QueryRequest): hyper-parameters
Returns:
NeighborIndexer: nnindexer
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-request_ibeis_nnindexer
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> nnindexer, qreq_, ibs = test_nnindexer(None)
>>> nnindexer = request_ibeis_nnindexer(qreq_)
"""
daid_list = qreq_.get_internal_daids()
if not hasattr(qreq_.qparams, 'use_augmented_indexer'):
qreq_.qparams.use_augmented_indexer = True
if False and qreq_.qparams.use_augmented_indexer:
nnindexer = request_augmented_ibeis_nnindexer(qreq_, daid_list, **kwargs)
else:
nnindexer = request_memcached_ibeis_nnindexer(qreq_, daid_list, **kwargs)
return nnindexer
def request_augmented_ibeis_nnindexer(qreq_, daid_list, verbose=True,
use_memcache=True, force_rebuild=False,
memtrack=None):
r"""
DO NOT USE. THIS FUNCTION CAN CURRENTLY CAUSE A SEGFAULT
tries to give you an indexer for the requested daids using the least amount
of computation possible. By loading and adding to a partially build nnindex
if possible and if that fails fallbs back to request_memcache.
Args:
qreq_ (QueryRequest): query request object with hyper-parameters
daid_list (list):
Returns:
str: nnindex_cfgstr
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-request_augmented_ibeis_nnindexer
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> # build test data
>>> ZEB_PLAIN = ibeis.const.TEST_SPECIES.ZEB_PLAIN
>>> ibs = ibeis.opendb('testdb1')
>>> use_memcache, max_covers, verbose = True, None, True
>>> daid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:6]
>>> qreq_ = ibs.new_query_request(daid_list, daid_list)
>>> qreq_.qparams.min_reindex_thresh = 1
>>> min_reindex_thresh = qreq_.qparams.min_reindex_thresh
>>> # CLEAR CACHE for clean test
>>> clear_uuid_cache(qreq_)
>>> # LOAD 3 AIDS INTO CACHE
>>> aid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:3]
>>> # Should fallback
>>> nnindexer = request_augmented_ibeis_nnindexer(qreq_, aid_list)
>>> # assert the fallback
>>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
... qreq_, daid_list, min_reindex_thresh, max_covers)
>>> result2 = uncovered_aids, covered_aids_list
>>> ut.assert_eq(result2, ([4, 5, 6], [[1, 2, 3]]), 'pre augment')
>>> # Should augment
>>> nnindexer = request_augmented_ibeis_nnindexer(qreq_, daid_list)
>>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
... qreq_, daid_list, min_reindex_thresh, max_covers)
>>> result3 = uncovered_aids, covered_aids_list
>>> ut.assert_eq(result3, ([], [[1, 2, 3, 4, 5, 6]]), 'post augment')
>>> # Should fallback
>>> nnindexer2 = request_augmented_ibeis_nnindexer(qreq_, daid_list)
>>> assert nnindexer is nnindexer2
"""
global NEIGHBOR_CACHE
min_reindex_thresh = qreq_.qparams.min_reindex_thresh
if not force_rebuild:
new_daid_list, covered_aids_list = group_daids_by_cached_nnindexer(
qreq_, daid_list, min_reindex_thresh, max_covers=1)
can_augment = (
len(covered_aids_list) > 0 and
not ut.list_set_equal(covered_aids_list[0], daid_list))
else:
can_augment = False
if verbose:
print('[aug] Requesting augmented nnindexer')
if can_augment:
covered_aids = covered_aids_list[0]
if verbose:
print('[aug] Augmenting index %r old daids with %d new daids' %
(len(covered_aids), len(new_daid_list)))
# Load the base covered indexer
# THIS SHOULD LOAD NOT REBUILD IF THE UUIDS ARE COVERED
base_nnindexer = request_memcached_ibeis_nnindexer(
qreq_, covered_aids, verbose=verbose, use_memcache=use_memcache)
# Remove this indexer from the memcache because we are going to change it
if NEIGHBOR_CACHE.has_key(base_nnindexer.cfgstr): # NOQA
print('Removing key from memcache')
NEIGHBOR_CACHE[base_nnindexer.cfgstr] = None
del NEIGHBOR_CACHE[base_nnindexer.cfgstr]
new_vecs_list, new_fgws_list, new_fxs_list = get_support_data(qreq_, new_daid_list)
base_nnindexer.add_support(new_daid_list, new_vecs_list, new_fgws_list, new_fxs_list, verbose=True)
# FIXME: pointer issues
nnindexer = base_nnindexer
# Change to the new cfgstr
nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
nnindexer.cfgstr = nnindex_cfgstr
cachedir = qreq_.ibs.get_flann_cachedir()
nnindexer.save(cachedir)
# Write to inverse uuid
if len(daid_list) > min_reindex_thresh:
uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
daids_hashid = get_data_cfgstr(qreq_.ibs, daid_list)
visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)
UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list, daids_hashid)
# Write to memcache
if ut.VERBOSE:
print('[aug] Wrote to memcache=%r' % (nnindex_cfgstr,))
NEIGHBOR_CACHE[nnindex_cfgstr] = nnindexer
return nnindexer
else:
#if ut.VERBOSE:
if verbose:
print('[aug] Nothing to augment, fallback to memcache')
# Fallback
nnindexer = request_memcached_ibeis_nnindexer(
qreq_, daid_list, verbose=verbose, use_memcache=use_memcache,
force_rebuild=force_rebuild, memtrack=memtrack
)
return nnindexer
def request_memcached_ibeis_nnindexer(qreq_, daid_list, use_memcache=True,
verbose=ut.NOT_QUIET, veryverbose=False,
force_rebuild=False, memtrack=None,
prog_hook=None):
r"""
FOR INTERNAL USE ONLY
takes custom daid list. might not be the same as what is in qreq_
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-request_memcached_ibeis_nnindexer
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb('testdb1')
>>> qreq_.qparams.min_reindex_thresh = 3
>>> ZEB_PLAIN = ibeis.const.TEST_SPECIES.ZEB_PLAIN
>>> daid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:3]
>>> qreq_ = ibs.new_query_request(daid_list, daid_list)
>>> verbose = True
>>> use_memcache = True
>>> # execute function
>>> nnindexer = request_memcached_ibeis_nnindexer(qreq_, daid_list, use_memcache)
>>> # verify results
>>> result = str(nnindexer)
>>> print(result)
"""
global NEIGHBOR_CACHE
#try:
if veryverbose:
print('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %r' % (len(NEIGHBOR_CACHE),))
# the lru cache wont be recognized by get_object_size_str, cast to pure python objects
print('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % (ut.get_object_size_str(NEIGHBOR_CACHE.items()),))
#if memtrack is not None:
# memtrack.report('IN REQUEST MEMCACHE')
nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
# neighbor memory cache
if not force_rebuild and use_memcache and NEIGHBOR_CACHE.has_key(nnindex_cfgstr): # NOQA (has_key is for a lru cache)
if veryverbose or ut.VERYVERBOSE or ut.VERBOSE:
print('... nnindex memcache hit: cfgstr=%s' % (nnindex_cfgstr,))
nnindexer = NEIGHBOR_CACHE[nnindex_cfgstr]
else:
if veryverbose or ut.VERYVERBOSE or ut.VERBOSE:
print('... nnindex memcache miss: cfgstr=%s' % (nnindex_cfgstr,))
# Write to inverse uuid
nnindexer = request_diskcached_ibeis_nnindexer(
qreq_, daid_list, nnindex_cfgstr, verbose,
force_rebuild=force_rebuild, memtrack=memtrack,
prog_hook=prog_hook)
NEIGHBOR_CACHE_WRITE = True
if NEIGHBOR_CACHE_WRITE:
# Write to memcache
if ut.VERBOSE or ut.VERYVERBOSE:
print('[disk] Write to memcache=%r' % (nnindex_cfgstr,))
NEIGHBOR_CACHE[nnindex_cfgstr] = nnindexer
else:
if ut.VERBOSE or ut.VERYVERBOSE:
print('[disk] Did not write to memcache=%r' % (nnindex_cfgstr,))
return nnindexer
def request_diskcached_ibeis_nnindexer(qreq_, daid_list, nnindex_cfgstr=None,
verbose=True, force_rebuild=False,
memtrack=None, prog_hook=None):
r"""
builds new NeighborIndexer which will try to use a disk cached flann if
available
Args:
qreq_ (QueryRequest): query request object with hyper-parameters
daid_list (list):
nnindex_cfgstr (?):
verbose (bool):
Returns:
NeighborIndexer: nnindexer
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-request_diskcached_ibeis_nnindexer
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb('testdb1')
>>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
>>> qreq_ = ibs.new_query_request(daid_list, daid_list)
>>> nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
>>> verbose = True
>>> # execute function
>>> nnindexer = request_diskcached_ibeis_nnindexer(qreq_, daid_list, nnindex_cfgstr, verbose)
>>> # verify results
>>> result = str(nnindexer)
>>> print(result)
"""
if nnindex_cfgstr is None:
nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
cfgstr = nnindex_cfgstr
cachedir = qreq_.ibs.get_flann_cachedir()
flann_params = qreq_.qparams.flann_params
flann_params['checks'] = qreq_.qparams.checks
#if memtrack is not None:
# memtrack.report('[PRE SUPPORT]')
# Get annot descriptors to index
if prog_hook is not None:
prog_hook.set_progress(1, 3, 'Loading support data for indexer')
print('[nnindex] Loading support data for indexer')
vecs_list, fgws_list, fxs_list = get_support_data(qreq_, daid_list)
if memtrack is not None:
memtrack.report('[AFTER GET SUPPORT DATA]')
try:
nnindexer = new_neighbor_index(
daid_list, vecs_list, fgws_list, fxs_list, flann_params, cachedir,
cfgstr=cfgstr, verbose=verbose, force_rebuild=force_rebuild,
memtrack=memtrack, prog_hook=prog_hook)
except Exception as ex:
ut.printex(ex, True, msg_='cannot build inverted index',
key_list=['ibs.get_infostr()'])
raise
# Record these uuids in the disk based uuid map so they can be augmented if
# needed
min_reindex_thresh = qreq_.qparams.min_reindex_thresh
if len(daid_list) > min_reindex_thresh:
uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
daids_hashid = get_data_cfgstr(qreq_.ibs, daid_list)
visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)
UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list, daids_hashid)
if memtrack is not None:
memtrack.report('[AFTER WRITE_UUID_MAP]')
return nnindexer
def group_daids_by_cached_nnindexer(qreq_, daid_list, min_reindex_thresh,
max_covers=None):
r"""
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-group_daids_by_cached_nnindexer
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> ZEB_PLAIN = ibeis.const.TEST_SPECIES.ZEB_PLAIN
>>> daid_list = ibs.get_valid_aids(species=ZEB_PLAIN)
>>> qreq_ = ibs.new_query_request(daid_list, daid_list)
>>> # Set the params a bit lower
>>> max_covers = None
>>> qreq_.qparams.min_reindex_thresh = 1
>>> min_reindex_thresh = qreq_.qparams.min_reindex_thresh
>>> # STEP 0: CLEAR THE CACHE
>>> clear_uuid_cache(qreq_)
>>> # STEP 1: ASSERT EMPTY INDEX
>>> daid_list = ibs.get_valid_aids(species=ZEB_PLAIN)[0:3]
>>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
... qreq_, daid_list, min_reindex_thresh, max_covers)
>>> result1 = uncovered_aids, covered_aids_list
>>> ut.assert_eq(result1, ([1, 2, 3], []), 'pre request')
>>> # TEST 2: SHOULD MAKE 123 COVERED
>>> nnindexer = request_memcached_ibeis_nnindexer(qreq_, daid_list)
>>> uncovered_aids, covered_aids_list = group_daids_by_cached_nnindexer(
... qreq_, daid_list, min_reindex_thresh, max_covers)
>>> result2 = uncovered_aids, covered_aids_list
>>> ut.assert_eq(result2, ([], [[1, 2, 3]]), 'post request')
"""
ibs = qreq_.ibs
# read which annotations have prebuilt caches
uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
candidate_uuids = UUID_MAP_CACHE.read_uuid_map_dict(uuid_map_fpath, min_reindex_thresh)
# find a maximum independent set cover of the requested annotations
annot_vuuid_list = ibs.get_annot_visual_uuids(daid_list) # 3.2 %
covertup = ut.greedy_max_inden_setcover(
candidate_uuids, annot_vuuid_list, max_covers) # 0.2 %
uncovered_vuuids, covered_vuuids_list, accepted_keys = covertup
# return the grouped covered items (so they can be loaded) and
# the remaining uuids which need to have an index computed.
#
uncovered_aids_ = ibs.get_annot_aids_from_visual_uuid(uncovered_vuuids) # 28.0%
covered_aids_list_ = ibs.unflat_map(
ibs.get_annot_aids_from_visual_uuid, covered_vuuids_list) # 68%
# FIXME:
uncovered_aids = sorted(uncovered_aids_)
#covered_aids_list = list(map(sorted, covered_aids_list_))
covered_aids_list = covered_aids_list_
return uncovered_aids, covered_aids_list
def get_data_cfgstr(ibs, daid_list):
""" part 2 data hash id """
daids_hashid = ibs.get_annot_hashid_visual_uuid(daid_list)
return daids_hashid
def new_neighbor_index(daid_list, vecs_list, fgws_list, fxs_list, flann_params, cachedir,
cfgstr, force_rebuild=False, verbose=True,
memtrack=None, prog_hook=None):
r"""
constructs neighbor index independent of ibeis
Args:
daid_list (list):
vecs_list (list):
fgws_list (list):
flann_params (dict):
flann_cachedir (None):
nnindex_cfgstr (str):
use_memcache (bool):
Returns:
nnindexer
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-new_neighbor_index
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', a='default:species=zebra_plains', p='default:fgw_thresh=.999')
>>> daid_list = qreq_.daids
>>> nnindex_cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
>>> ut.exec_funckw(new_neighbor_index, globals())
>>> cfgstr = nnindex_cfgstr
>>> cachedir = qreq_.ibs.get_flann_cachedir()
>>> flann_params = qreq_.qparams.flann_params
>>> # Get annot descriptors to index
>>> vecs_list, fgws_list, fxs_list = get_support_data(qreq_, daid_list)
>>> nnindexer = new_neighbor_index(daid_list, vecs_list, fgws_list, fxs_list, flann_params, cachedir, cfgstr, verbose=True)
>>> result = ('nnindexer.ax2_aid = %s' % (str(nnindexer.ax2_aid),))
>>> print(result)
nnindexer.ax2_aid = [1 2 3 4 5 6]
"""
nnindexer = NeighborIndex(flann_params, cfgstr)
#if memtrack is not None:
# memtrack.report('CREATEED NEIGHTOB INDEX')
# Initialize neighbor with unindexed data
nnindexer.init_support(daid_list, vecs_list, fgws_list, fxs_list, verbose=verbose)
if memtrack is not None:
memtrack.report('AFTER INIT SUPPORT')
# Load or build the indexing structure
nnindexer.ensure_indexer(cachedir, verbose=verbose,
force_rebuild=force_rebuild, memtrack=memtrack,
prog_hook=prog_hook)
if memtrack is not None:
memtrack.report('AFTER LOAD OR BUILD')
return nnindexer
def test_nnindexer(dbname='testdb1', with_indexer=True, use_memcache=True):
r"""
Ignore:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> nnindexer, qreq_, ibs = test_nnindexer('PZ_Master1')
>>> S = np.cov(nnindexer.idx2_vec.T)
>>> import plottool as pt
>>> pt.ensure_pylab_qt4()
>>> pt.plt.imshow(S)
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> nnindexer, qreq_, ibs = test_nnindexer()
"""
import ibeis
daid_list = [7, 8, 9, 10, 11]
ibs = ibeis.opendb(db=dbname)
# use_memcache isn't use here because we aren't lazy loading the indexer
cfgdict = dict(fg_on=False)
qreq_ = ibs.new_query_request(daid_list, daid_list,
use_memcache=use_memcache, cfgdict=cfgdict)
if with_indexer:
# we do an explicit creation of an indexer for these tests
nnindexer = request_ibeis_nnindexer(qreq_, use_memcache=use_memcache)
else:
nnindexer = None
return nnindexer, qreq_, ibs
# ------------
# NEW
def check_background_process():
r"""
checks to see if the process has finished and then
writes the uuid map to disk
"""
global CURRENT_THREAD
if CURRENT_THREAD is None or CURRENT_THREAD.is_alive():
print('[FG] background thread is not ready yet')
return False
# Get info set in background process
finishtup = CURRENT_THREAD.finishtup
(uuid_map_fpath, daids_hashid, visual_uuid_list, min_reindex_thresh) = finishtup
# Clean up background process
CURRENT_THREAD.join()
CURRENT_THREAD = None
# Write data to current uuidcache
if len(visual_uuid_list) > min_reindex_thresh:
UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list, daids_hashid)
return True
def can_request_background_nnindexer():
return CURRENT_THREAD is None or not CURRENT_THREAD.is_alive()
def request_background_nnindexer(qreq_, daid_list):
r""" FIXME: Duplicate code
Args:
qreq_ (QueryRequest): query request object with hyper-parameters
daid_list (list):
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache --test-request_background_nnindexer
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index_cache import * # NOQA
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb('testdb1')
>>> daid_list = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
>>> qreq_ = ibs.new_query_request(daid_list, daid_list)
>>> # execute function
>>> request_background_nnindexer(qreq_, daid_list)
>>> # verify results
>>> result = str(False)
>>> print(result)
"""
global CURRENT_THREAD
print('Requesting background reindex')
if not can_request_background_nnindexer():
# Make sure this function doesn't run if it is already running
print('REQUEST DENIED')
return False
print('REQUEST ACCPETED')
daids_hashid = qreq_.ibs.get_annot_hashid_visual_uuid(daid_list)
cfgstr = build_nnindex_cfgstr(qreq_, daid_list)
cachedir = qreq_.ibs.get_flann_cachedir()
# Save inverted cache uuid mappings for
min_reindex_thresh = qreq_.qparams.min_reindex_thresh
# Grab the keypoints names and image ids before query time?
flann_params = qreq_.qparams.flann_params
# Get annot descriptors to index
vecs_list, fgws_list, fxs_list = get_support_data(qreq_, daid_list)
# Dont hash rowids when given enough info in nnindex_cfgstr
flann_params['cores'] = 2 # Only ues a few cores in the background
# Build/Load the flann index
uuid_map_fpath = get_nnindexer_uuid_map_fpath(qreq_)
visual_uuid_list = qreq_.ibs.get_annot_visual_uuids(daid_list)
# set temporary attribute for when the thread finishes
finishtup = (uuid_map_fpath, daids_hashid, visual_uuid_list, min_reindex_thresh)
CURRENT_THREAD = ut.spawn_background_process(
background_flann_func, cachedir, daid_list, vecs_list, fgws_list, fxs_list,
flann_params, cfgstr)
CURRENT_THREAD.finishtup = finishtup
def background_flann_func(cachedir, daid_list, vecs_list, fgws_list, fxs_list, flann_params, cfgstr,
uuid_map_fpath, daids_hashid,
visual_uuid_list, min_reindex_thresh):
r""" FIXME: Duplicate code """
print('[BG] Starting Background FLANN')
# FIXME. dont use flann cache
nnindexer = NeighborIndex(flann_params, cfgstr)
# Initialize neighbor with unindexed data
nnindexer.init_support(daid_list, vecs_list, fgws_list, fxs_list, verbose=True)
# Load or build the indexing structure
nnindexer.ensure_indexer(cachedir, verbose=True)
if len(visual_uuid_list) > min_reindex_thresh:
UUID_MAP_CACHE.write_uuid_map_dict(uuid_map_fpath, visual_uuid_list, daids_hashid)
print('[BG] Finished Background FLANN')
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.algo.hots.neighbor_index_cache
python -m ibeis.algo.hots.neighbor_index_cache --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| {
"repo_name": "SU-ECE-17-7/ibeis",
"path": "ibeis/algo/hots/neighbor_index_cache.py",
"copies": "1",
"size": "33269",
"license": "apache-2.0",
"hash": -6750878788668231000,
"line_mean": 40.3279503106,
"line_max": 183,
"alpha_frac": 0.6260482732,
"autogenerated": false,
"ratio": 3.2725752508361206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9388456964475924,
"avg_score": 0.00203331191203929,
"num_lines": 805
} |
from __future__ import absolute_import, division, print_function
from panoptes_client.subject_workflow_status import SubjectWorkflowStatus
_OLD_STR_TYPES = (str,)
try:
_OLD_STR_TYPES = _OLD_STR_TYPES + (unicode,)
except NameError:
pass
from builtins import range, str
import logging
import requests
import threading
import time
from copy import deepcopy
from concurrent.futures import ThreadPoolExecutor
try:
import magic
MEDIA_TYPE_DETECTION = 'magic'
except ImportError:
import pkg_resources
try:
pkg_resources.require("python-magic")
logging.getLogger('panoptes_client').warn(
'Broken libmagic installation detected. The python-magic module is'
' installed but can\'t be imported. Please check that both '
'python-magic and the libmagic shared library are installed '
'correctly. Uploading media other than images may not work.'
)
except pkg_resources.DistributionNotFound:
pass
import imghdr
MEDIA_TYPE_DETECTION = 'imghdr'
from panoptes_client.panoptes import (
LinkResolver,
Panoptes,
PanoptesAPIException,
PanoptesObject,
)
from redo import retry
UPLOAD_RETRY_LIMIT = 5
RETRY_BACKOFF_INTERVAL = 5
ASYNC_SAVE_THREADS = 5
class Subject(PanoptesObject):
_api_slug = 'subjects'
_link_slug = 'subjects'
_edit_attributes = (
'locations',
'metadata',
{
'links': (
'project',
),
},
)
_local = threading.local()
@classmethod
def async_saves(cls):
"""
Returns a context manager to allow asynchronously creating subjects.
Using this context manager will create a pool of threads which will
create multiple subjects at once and upload any local files
simultaneously.
The recommended way to use this is with the `with` statement::
with Subject.async_saves():
local_files = [...]
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
Alternatively, you can manually shut down the thread pool::
pool = Subject.async_saves()
local_files = [...]
try:
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
finally:
pool.shutdown()
"""
cls._local.save_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS
)
return cls._local.save_exec
def __init__(self, raw={}, etag=None):
super(Subject, self).__init__(raw, etag)
if not self.locations:
self.locations = []
if not self.metadata:
self.metadata = {}
self._original_metadata = {}
self._media_files = []
def save(self, client=None):
"""
Like :py:meth:`.PanoptesObject.save`, but also uploads any local files
which have previosly been added to the subject with
:py:meth:`add_location`. Automatically retries uploads on error.
If multiple local files are to be uploaded, several files will be
uploaded simultaneously to save time.
"""
if not client:
client = Panoptes.client()
async_save = hasattr(self._local, 'save_exec')
with client:
if async_save:
try:
# The recursive call will exec in a new thread, so
# self._local.save_exec will be undefined above
self._async_future = self._local.save_exec.submit(
self.save,
client=client,
)
return
except RuntimeError:
del self._local.save_exec
async_save = False
if not self.metadata == self._original_metadata:
self.modified_attributes.add('metadata')
response = retry(
super(Subject, self).save,
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(PanoptesAPIException,),
log_args=False,
)
if not response:
return
try:
if async_save:
upload_exec = self._local.save_exec
else:
upload_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS,
)
for location, media_data in zip(
response['subjects'][0]['locations'],
self._media_files
):
if not media_data:
continue
for media_type, url in location.items():
upload_exec.submit(
retry,
self._upload_media,
args=(url, media_data, media_type),
attempts=UPLOAD_RETRY_LIMIT,
sleeptime=RETRY_BACKOFF_INTERVAL,
retry_exceptions=(
requests.exceptions.RequestException,
),
log_args=False,
)
finally:
if not async_save:
upload_exec.shutdown()
def _upload_media(self, url, media_data, media_type):
upload_response = requests.put(
url,
headers={
'Content-Type': media_type,
'x-ms-blob-type': 'BlockBlob',
},
data=media_data,
)
upload_response.raise_for_status()
return upload_response
@property
def async_save_result(self):
"""
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
"""
if hasattr(self, "_async_future") and self._async_future.done():
self._async_future.result()
return True
else:
return False
def set_raw(self, raw, etag=None, loaded=True):
super(Subject, self).set_raw(raw, etag, loaded)
if loaded and self.metadata:
self._original_metadata = deepcopy(self.metadata)
elif loaded:
self._original_metadata = None
def subject_workflow_status(self, workflow_id):
"""
Returns SubjectWorkflowStatus of Subject in Workflow
Example::
subject.subject_workflow_status(4321)
"""
return next(SubjectWorkflowStatus.where(subject_id=self.id, workflow_id=workflow_id))
def add_location(self, location):
"""
Add a media location to this subject.
- **location** can be an open :py:class:`file` object, a path to a
local file, or a :py:class:`dict` containing MIME types and URLs for
remote media.
Examples::
subject.add_location(my_file)
subject.add_location('/data/image.jpg')
subject.add_location({'image/png': 'https://example.com/image.png'})
"""
if type(location) is dict:
self.locations.append(location)
self._media_files.append(None)
return
elif type(location) in (str,) + _OLD_STR_TYPES:
f = open(location, 'rb')
else:
f = location
try:
media_data = f.read()
if MEDIA_TYPE_DETECTION == 'magic':
media_type = magic.from_buffer(media_data, mime=True)
else:
media_type = imghdr.what(None, media_data)
if not media_type:
raise UnknownMediaException(
'Could not detect file type. Please try installing '
'libmagic: https://panoptes-python-client.readthedocs.'
'io/en/latest/user_guide.html#uploading-non-image-'
'media-types'
)
media_type = 'image/{}'.format(media_type)
self.locations.append(media_type)
self._media_files.append(media_data)
finally:
f.close()
class UnknownMediaException(Exception):
pass
LinkResolver.register(Subject)
LinkResolver.register(Subject, 'subject')
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/subject.py",
"copies": "1",
"size": "8984",
"license": "apache-2.0",
"hash": -4494473678493628400,
"line_mean": 31.5507246377,
"line_max": 93,
"alpha_frac": 0.535284951,
"autogenerated": false,
"ratio": 4.684045881126173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5719330832126174,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
from past.builtins import basestring
from tmpl import context
class Jinja2Template(context.Template):
def __init__(self, tmpl, **kwargs):
self.tmpl = tmpl
super(Jinja2Template, self).__init__(**kwargs)
def render(self, env):
"""
renders from template, return object
"""
return self.tmpl.render(env)
def load(self):
pass
def loads(self):
pass
class Jinja2Engine(context.Context):
"""template interface class for jinja2"""
@staticmethod
def can_load():
import imp
try:
imp.find_module('jinja2')
return True
except ImportError:
return False
def __init__(self, **kwargs):
import jinja2
super(Jinja2Engine, self).__init__(**kwargs)
self.engine = jinja2.Environment(loader=jinja2.FileSystemLoader(self._search_path))
self.engine.line_statement_prefix = '.'
self.engine.line_comment_prefix = ';.'
self.engine.keep_trailing_newline = True
self.engine.lstrip_blocks = True
self.engine.trim_blocks = True
@property
def search_path(self):
return self.engine.loader.searchpath
@search_path.setter
def search_path(self, path):
if isinstance(path, basestring):
self._search_path = [path]
self.engine.loader.searchpath = [path]
else:
self.engine.loader.searchpath = path
@search_path.deleter
def search_path(self):
self.engine.loader.searchpath = []
#self.engine.loader.searchpath = path
def get_template(self, name):
""" finds template in search path
returns Template object
"""
return Jinja2Template(Jinja2Template(self.engine.get_template(name)))
def make_template(self, tmpl_str):
""" makes template object from a string """
return Jinja2Template(Template(tmpl_str))
def _render(self, src, env):
"""
renders from template, return object
"""
return self.engine.get_template(src).render(env)
def _render_str_to_str(self, instr, env):
"""
renders from template, return object
"""
return self.engine.from_string(instr).render(env)
class DjangoTemplate(context.Template):
def __init__(self, tmpl, **kwargs):
self.tmpl = tmpl
super(DjangoTemplate, self).__init__(**kwargs)
def render(self, env):
"""
renders from template, return object
"""
from django.template import Context
return self.tmpl.render(Context(env))
def load(self):
pass
def loads(self):
pass
class DjangoEngine(context.Context):
"""template interface class for Django"""
@staticmethod
def can_load():
import imp
try:
imp.find_module('django')
return True
except ImportError:
print("import error")
return False
def __init__(self, **kwargs):
import django.template
import django
from django.conf import settings
from django.template import Template
if not settings.configured:
settings.configure(
TEMPLATES=[
{
'BACKEND':
'django.template.backends.django.DjangoTemplates'
}
]
)
django.setup()
self.tmpl_ctor = Template
super(DjangoEngine, self).__init__(**kwargs)
def get_template(self, name):
filename = self.find_template(name)
if not filename:
raise LookupError("template not found")
return self.make_template(open(filename).read())
def make_template(self, tmpl_str):
""" makes template object from a string """
return DjangoTemplate(self.tmpl_ctor(tmpl_str))
def _render_str_to_str(self, instr, env):
"""
renders contents of instr with env returns string
"""
return self.make_template(instr).render(env)
| {
"repo_name": "20c/twentyc.tmpl",
"path": "tmpl/engine.py",
"copies": "1",
"size": "4201",
"license": "apache-2.0",
"hash": -5203081131354617000,
"line_mean": 24.4606060606,
"line_max": 91,
"alpha_frac": 0.5803380148,
"autogenerated": false,
"ratio": 4.436114044350581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000774829035698601,
"num_lines": 165
} |
from __future__ import absolute_import, division, print_function
from plottool_ibeis import custom_figure
import utool as ut
#(print, print_, printDBG, rrr, profile) = utool.inject(__name__,
# '[interact_helpers]',
# DEBUG=False)
ut.noinject(__name__, '[interact_helpers]')
#==========================
# HELPERS
#==========================
# RCOS TODO: We should change the fnum, pnum figure layout into one managed by
# gridspec.
def detect_keypress(fig):
def on_key_press(event):
if event.key == 'shift':
shift_is_held = True # NOQA
def on_key_release(event):
if event.key == 'shift':
shift_is_held = False # NOQA
fig.canvas.mpl_connect('key_press_event', on_key_press)
fig.canvas.mpl_connect('key_release_event', on_key_release)
def clicked_inside_axis(event):
in_axis = event is not None and (event.inaxes is not None and event.xdata is not None)
if not in_axis:
pass
#print(' ...out of axis')
else:
pass
#print(' ...in axis')
return in_axis
def clicked_outside_axis(event):
return not clicked_inside_axis(event)
def begin_interaction(type_, fnum):
if ut.VERBOSE:
print('\n<<<< BEGIN %s INTERACTION >>>>' % (str(type_).upper()))
print('[inter] starting %s interaction, fnum=%r' % (type_, fnum))
fig = custom_figure.figure(fnum=fnum, docla=True, doclf=True)
ax = custom_figure.gca()
disconnect_callback(fig, 'button_press_event', axes=[ax])
return fig
def disconnect_callback(fig, callback_type, **kwargs):
#print('[df2] disconnect %r callback' % callback_type)
axes = kwargs.get('axes', [])
for ax in axes:
ax._hs_viztype = ''
cbid_type = callback_type + '_cbid'
cbfn_type = callback_type + '_func'
cbid = fig.__dict__.get(cbid_type, None)
cbfn = fig.__dict__.get(cbfn_type, None)
if cbid is not None:
fig.canvas.mpl_disconnect(cbid)
else:
cbfn = None
fig.__dict__[cbid_type] = None
return cbid, cbfn
def connect_callback(fig, callback_type, callback_fn):
"""
wrapper around fig.canvas.mpl_connect
References:
http://matplotlib.org/users/event_handling.html
button_press_event
button_release_event
draw_event
key_press_event
key_release_event
motion_notify_event
pick_event
resize_event
scroll_event
figure_enter_event
figure_leave_event
axes_enter_event
axes_leave_event
"""
#printDBG('[ih] register %r callback' % callback_type)
if callback_fn is None:
return
# Store the callback in the figure diction so it doesnt lose scope
cbid_type = callback_type + '_cbid'
cbfn_type = callback_type + '_func'
fig.__dict__[cbid_type] = fig.canvas.mpl_connect(callback_type, callback_fn)
fig.__dict__[cbfn_type] = callback_fn
#REGIESTERED_INTERACTIONS = []
#def register_interaction(interaction):
# global REGIESTERED_INTERACTIONS
# REGIESTERED_INTERACTIONS.append(interaction)
| {
"repo_name": "Erotemic/plottool",
"path": "plottool_ibeis/interact_helpers.py",
"copies": "1",
"size": "3185",
"license": "apache-2.0",
"hash": 1946829528197544200,
"line_mean": 29.0471698113,
"line_max": 90,
"alpha_frac": 0.598744113,
"autogenerated": false,
"ratio": 3.546770601336303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4645514714336303,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from psychopy import core
import serial
import serial.tools.list_ports
import time
from decimal import Decimal
from threading import Thread, Event
class SRBox():
def __init__(self, port=None, baudrate=19200, timeout=0):
self.port = port
self.baudrate = baudrate
self.timeout = timeout
if port is None:
ports = []
for p in serial.tools.list_ports.comports():
ports.append(p.device)
for dev in ports:
try:
self._box = serial.Serial(dev, baudrate=self.baudrate, timeout=self.timeout)
self.port = dev
break
except:
self._box = None
self.port = None
continue
else:
self._box = serial.Serial(port, baudrate=self.baudrate, timeout=self.timeout)
if self._box is None:
raise RuntimeError('Could not connect to SRBox')
self._light_codes = [0b00001, 0b00010, 0b00100, 0b01000, 0b10000]
self._lights = [False]*5
self.update_lights()
self._button_codes = [0b00001, 0b00010, 0b00100, 0b01000, 0b10000]
self._reading = False
self._record_thread = None
self._recorded_pressed = None
def _signal(self, byte):
if type(byte) is int:
byte = chr(int)
return self._box.write(byte)
def _read(self):
byte = ''
while byte == '':
byte = self._box.read(1)
return byte
def start_input(self):
self._box.reset_input_buffer()
self._box.reset_output_buffer()
self._signal(chr(0b10100000))
self._reading = True
def stop_input(self):
self._box.reset_input_buffer()
self._box.reset_output_buffer()
self._signal(chr(0b00100000))
self._reading = False
def close(self):
self._box.reset_input_buffer()
self._box.reset_output_buffer()
self._reading = False
self._box.close()
def waitKeys(self, keyList=None, maxWait=None, timeStamped=False):
if not self._reading:
self.start_input()
if timeStamped:
clock = timeStamped.getTime
timeStamped = True
else:
clock = time.time
if maxWait is None:
run_infinite = True
else:
run_infinite = False
start_time = clock()
current_time = start_time
while run_infinite or (current_time - start_time < maxWait):
current_time = clock()
keys = ord(self._read())
if keys == 0:
continue
pressed = self._keys_pressed(keys)
if keyList is not None:
valid_keys = []
for key in pressed:
if key in keyList:
valid_keys.append(key)
else:
valids_keys = pressed
if len(valid_keys) > 0:
self.stop_input()
if timeStamped:
return valid_keys, current_time
else:
return valid_keys
return []
def recordKeys(self, keyList=None, timeStamped=False, maxWait=30):
if self._record_thread is not None:
raise RuntimeError('Cannot call recordKeys() more than once without calling getKeys()')
if self._reading:
raise RuntimeError('Cannot record keys pressed before recordKeys() is called')
self._record_thread = Thread(target=self._recorder, args=(keyList, timeStamped))
self._record_thread.start()
def _recorder(self, keyList=None, timeStamped=False, maxWait=30):
self._continue_recording = True
if timeStamped:
clock = timeStamped.getTime
timeStamped = True
else:
clock = time.time
if maxWait is None or maxWait is False:
run_infinite = True
else:
run_infinite = False
self._recorded_presses = []
start_time = clock()
current_time = start_time
last_keys = 0
while self._continue_recording and (run_infinite or (current_time - start_time < maxWait)):
current_time = clock()
keys = ord(self._read())
if keys == last_keys:
continue
else:
last_keys = keys
keys = self._keys_pressed(keys)
if keyList is not None:
valid_keys = []
for key in keys:
if key in keyList:
valid_keys.append(key)
else:
valids_keys = keys
if len(valid_keys) > 0:
if timeStamped:
self._recorded_presses.append((valid_keys, current_time-start_time))
else:
self._recorded_presses.extend(valid_keys)
if last_keys != 0 and timeStamped:
self._recorded_presses.append((current_time-start_time, self._keys_pressed(last_keys)))
if not timeStamped:
self._recorded_presses = list(set(self._recorded_presses))
def get_keys(self, keyList=None, timeout=-1, timeStamp=False):
if self._recorded_presses is None:
raise RuntimeError('recordKeys() method must be called before keys are available')
self._continue_recording = False
self.stop_input()
presses = self._recorded_presses
self._recorded_presses = None
return presses
# pressed = []
#
# start_time = time.time()
# current_time = start_time
# last_keys= 0
# while current_time - start_time < timeout:
# current_time = time.time()
# keys = ord(self._read())
# if keys == last_keys:
# continue
# elif len(pressed) > 0 and current_time-pressed[-1][0] < .002:
# continue
# else:
# pressed.append((current_time-start_time, self._keys_pressed(keys)))
# last_keys = keys
#
# if last_keys != 0 and abs(pressed[-1][0] - (current_time-start_time)) > 0.00125:
# pressed.append((current_time-start_time, self._keys_pressed(last_keys)))
#
# return pressed
def _keys_pressed(self, keys):
pressed = []
for bit in range(5):
if ((keys&(self._button_codes[bit]))!=0):
pressed.append(bit + 1)
return pressed
# def _get_bit(self, byte, bit):
# return ((byte&(self.masks[bit-1]))!=0)
def update_lights(self):
status = 0b1100000
for i, light in enumerate(self._lights):
if light:
status += self._light_codes[i]
self._signal(chr(status))
def set_light(self, light, on, update=False):
self._lights[light-1] = on
if update:
self.update_lights()
def set_lights(self, lights, update=False):
if type(lights) is list and len(lights) == len(self._lights):
self._lights = lights
elif type(lights) is dict:
for light, on in lights:
self.set_light(light, on)
if update:
self.update_lights()
def blink_lights(self, lights, interval=0.25, duration=1):
if type(lights) is int:
lights = tuple(lights)
set_on = True
start = time.time()
interval_start = start
while time.time() - start < duration:
if time.time() - interval_start >= interval:
for light in lights:
self.set_light(light, set_on)
self.update_lights()
interval_start = time.time()
set_on = (not set_on)
for light in lights:
self.set_light(light, False)
self.update_lights()
| {
"repo_name": "NickAnderegg/rpacr-mazeexperiment",
"path": "mazeexperiment/experiment/srbox.py",
"copies": "2",
"size": "8061",
"license": "mit",
"hash": 2455211230294723600,
"line_mean": 30.1235521236,
"line_max": 99,
"alpha_frac": 0.5298350081,
"autogenerated": false,
"ratio": 4.098118962887646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012521131247767473,
"num_lines": 259
} |
from __future__ import absolute_import, division, print_function
from pudb.py3compat import PY3
# {{{ breakpoint validity
def generate_executable_lines_for_code(code):
l = code.co_firstlineno
yield l
if PY3:
for c in code.co_lnotab[1::2]:
l += c
yield l
else:
for c in code.co_lnotab[1::2]:
l += ord(c)
yield l
def get_executable_lines_for_file(filename):
# inspired by rpdb2
from linecache import getlines
codes = [compile("".join(getlines(filename)), filename, "exec")]
from types import CodeType
execable_lines = set()
while codes:
code = codes.pop()
execable_lines |= set(generate_executable_lines_for_code(code))
codes.extend(const
for const in code.co_consts
if isinstance(const, CodeType))
return execable_lines
def get_breakpoint_invalid_reason(filename, lineno):
# simple logic stolen from pdb
import linecache
line = linecache.getline(filename, lineno)
if not line:
return "Line is beyond end of file."
try:
executable_lines = get_executable_lines_for_file(filename)
except SyntaxError:
return "File failed to compile."
if lineno not in executable_lines:
return "No executable statement found in line."
def lookup_module(filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
# stolen from pdb
import os
import sys
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
# }}}
# {{{ file encoding detection
# stolen from Python 3.1's tokenize.py, by Ka-Ping Yee
import re
cookie_re = re.compile("^\s*#.*coding[:=]\s*([-\w.]+)")
from codecs import lookup, BOM_UTF8
if PY3:
BOM_UTF8 = BOM_UTF8.decode()
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
def read_or_stop():
try:
return readline()
except StopIteration:
return ''
def find_cookie(line):
try:
if PY3:
line_string = line
else:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = matches[0]
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found and codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
if not first:
return 'utf-8', []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return 'utf-8', [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return 'utf-8', [first, second]
# }}}
# {{{ traceback formatting
class StringExceptionValueWrapper:
def __init__(self, string_val):
self.string_val = string_val
def __str__(self):
return self.string_val
__context__ = None
__cause__ = None
def format_exception(exc_tuple):
# Work around http://bugs.python.org/issue17413
# See also https://github.com/inducer/pudb/issues/61
from traceback import format_exception
if PY3:
exc_type, exc_value, exc_tb = exc_tuple
if isinstance(exc_value, str):
exc_value = StringExceptionValueWrapper(exc_value)
exc_tuple = exc_type, exc_value, exc_tb
return format_exception(
*exc_tuple,
**dict(chain=hasattr(exc_value, "__context__")))
else:
return format_exception(*exc_tuple)
# }}}
# vim: foldmethod=marker
| {
"repo_name": "albfan/pudb",
"path": "pudb/lowlevel.py",
"copies": "1",
"size": "5447",
"license": "mit",
"hash": -6303367642947772000,
"line_mean": 25.7009803922,
"line_max": 78,
"alpha_frac": 0.6146502662,
"autogenerated": false,
"ratio": 3.9788166544923302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.509346692069233,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from _pytest.main import EXIT_NOTESTSCOLLECTED
import pytest
import gc
def test_simple_unittest(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEquals('foo', 'foo')
def test_failing(self):
self.assertEquals('foo', 'bar')
""")
reprec = testdir.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_runTest_method(testdir):
testdir.makepyfile("""
import unittest
class MyTestCaseWithRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
class MyTestCaseWithoutRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
def test_something(self):
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
*MyTestCaseWithRunTest::runTest*
*MyTestCaseWithoutRunTest::test_something*
*2 passed*
""")
def test_isclasscheck_issue53(testdir):
testpath = testdir.makepyfile("""
import unittest
class _E(object):
def __getattr__(self, tag):
pass
E = _E()
""")
result = testdir.runpytest(testpath)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_setup(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def setup_method(self, method):
self.foo2 = 1
def test_both(self):
self.assertEquals(1, self.foo)
assert self.foo2 == 1
def teardown_method(self, method):
assert 0, "42"
""")
reprec = testdir.inline_run("-s", testpath)
assert reprec.matchreport("test_both", when="call").passed
rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and '42' in str(rep.longrepr)
def test_setUpModule(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
l.append(1)
def tearDownModule():
del l[0]
def test_hello():
assert l == [1]
def test_world():
assert l == [1]
""")
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_setUpModule_failing_no_teardown(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
0/0
def tearDownModule():
l.append(1)
def test_hello():
pass
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=0, failed=1)
call = reprec.getcalls("pytest_runtest_setup")[0]
assert not call.item.module.l
def test_new_instances(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def test_func1(self):
self.x = 2
def test_func2(self):
assert not hasattr(self, 'x')
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_teardown(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
l = []
def test_one(self):
pass
def tearDown(self):
self.l.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEquals(MyTestCase.l, [None])
""")
reprec = testdir.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
def test_teardown_issue1649(testdir):
"""
Are TestCase objects cleaned up? Often unittest TestCase objects set
attributes that are large and expensive during setUp.
The TestCase will not be cleaned up if the test fails, because it
would then exist in the stackframe.
"""
testpath = testdir.makepyfile("""
import unittest
class TestCaseObjectsShouldBeCleanedUp(unittest.TestCase):
def setUp(self):
self.an_expensive_object = 1
def test_demo(self):
pass
""")
testdir.inline_run("-s", testpath)
gc.collect()
for obj in gc.get_objects():
assert type(obj).__name__ != 'TestCaseObjectsShouldBeCleanedUp'
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue148(testdir):
testpath = testdir.makepyfile("""
import unittest
@unittest.skip("hello")
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
xxx
def test_one(self):
pass
@classmethod
def tearDownClass(self):
xxx
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(skipped=1)
def test_method_and_teardown_failing_reporting(testdir):
testdir.makepyfile("""
import unittest, pytest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
def test_method(self):
assert False, "down2"
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*tearDown*",
"*assert 0*",
"*test_method*",
"*assert False*",
"*1 failed*1 error*",
])
def test_setup_failure_is_shown(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TC(unittest.TestCase):
def setUp(self):
assert 0, "down1"
def test_method(self):
print ("never42")
xyz
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*setUp*",
"*assert 0*down1*",
"*1 failed*",
])
assert 'never42' not in result.stdout.str()
def test_setup_setUpClass(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
@classmethod
def tearDownClass(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
def test_setup_class(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
def setup_class(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
def teardown_class(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_adderrorandfailure_defers(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
try:
result.add%s(self, excinfo._excinfo)
except KeyboardInterrupt:
raise
except:
pytest.fail("add%s should not raise")
def test_hello(self):
pass
""" % (type, type))
result = testdir.runpytest()
assert 'should not raise' not in result.stdout.str()
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_custom_exception_info(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import py, pytest
import _pytest._code
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
# we fake an incompatible exception info
from _pytest.monkeypatch import MonkeyPatch
mp = MonkeyPatch()
def t(*args):
mp.undo()
raise TypeError()
mp.setattr(_pytest._code, 'ExceptionInfo', t)
try:
excinfo = excinfo._excinfo
result.add%(type)s(self, excinfo)
finally:
mp.undo()
def test_hello(self):
pass
""" % locals())
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"NOTE: Incompatible Exception Representation*",
"*ZeroDivisionError*",
"*1 failed*",
])
def test_testcase_totally_incompatible_exception_info(testdir):
item, = testdir.getitems("""
from unittest import TestCase
class MyTestCase(TestCase):
def test_hello(self):
pass
""")
item.addError(None, 42)
excinfo = item._excinfo.pop(0)
assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr())
def test_module_level_pytestmark(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
pytestmark = pytest.mark.xfail
class MyTestCase(unittest.TestCase):
def test_func1(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
skip = 'dont run'
def test_func(self):
pass
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
pass
test_func.skip = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
todo = 'dont run'
def test_func(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
assert 0
test_func.todo = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
class TestTrialUnittest(object):
def setup_class(cls):
cls.ut = pytest.importorskip("twisted.trial.unittest")
def test_trial_testcase_runtest_not_collected(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def test_hello(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def runTest(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, testdir):
testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
pytest.skip("skip_in_method")
@pytest.mark.skipif("sys.version_info != 1")
def test_hello2(self):
pass
@pytest.mark.xfail(reason="iwanto")
def test_hello3(self):
assert 0
def test_hello4(self):
pytest.xfail("i2wanto")
def test_trial_skip(self):
pass
test_trial_skip.skip = "trialselfskip"
def test_trial_todo(self):
assert 0
test_trial_todo.todo = "mytodo"
def test_trial_todo_success(self):
pass
test_trial_todo_success.todo = "mytodo"
class TC2(unittest.TestCase):
def setup_class(cls):
pytest.skip("skip_in_setup_class")
def test_method(self):
pass
""")
from _pytest.compat import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
result = testdir.runpytest("-rxs")
result.stdout.fnmatch_lines_random([
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
"*skip_in_setup_class*",
"*iwanto*",
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*",
])
assert result.ret == (1 if should_fail else 0)
def test_trial_error(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet import reactor
class TC(TestCase):
def test_one(self):
crash
def test_two(self):
def f(_):
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
def test_three(self):
def f():
pass # will never get called
reactor.callLater(0.3, f)
# will crash at teardown
def test_four(self):
def f(_):
reactor.callLater(0.3, f)
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
# will crash both at test time and at teardown
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ERRORS*",
"*DelayedCalls*",
"*test_four*",
"*NameError*crash*",
"*test_one*",
"*NameError*crash*",
"*test_three*",
"*DelayedCalls*",
"*test_two*",
"*crash*",
])
def test_trial_pdb(self, testdir):
p = testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
assert 0, "hellopdb"
""")
child = testdir.spawn_pytest(p)
child.expect("hellopdb")
child.sendeof()
def test_djangolike_testcase(testdir):
# contributed from Morten Breekevold
testdir.makepyfile("""
from unittest import TestCase, main
class DjangoLikeTestCase(TestCase):
def setUp(self):
print ("setUp()")
def test_presetup_has_been_run(self):
print ("test_thing()")
self.assertTrue(hasattr(self, 'was_presetup'))
def tearDown(self):
print ("tearDown()")
def __call__(self, result=None):
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(DjangoLikeTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
print ("_pre_setup()")
self.was_presetup = True
def _post_teardown(self):
print ("_post_teardown()")
""")
result = testdir.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*_pre_setup()*",
"*setUp()*",
"*test_thing()*",
"*tearDown()*",
"*_post_teardown()*",
])
def test_unittest_not_shown_in_traceback(testdir):
testdir.makepyfile("""
import unittest
class t(unittest.TestCase):
def test_hello(self):
x = 3
self.assertEquals(x, 4)
""")
res = testdir.runpytest()
assert "failUnlessEqual" not in res.stdout.str()
def test_unorderable_types(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
pass
def make_test():
class Test(unittest.TestCase):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
""")
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_unittest_typerror_traceback(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
def test_hello(self, arg1):
pass
""")
result = testdir.runpytest()
assert "TypeError" in result.stdout.str()
assert result.ret == 1
@pytest.mark.skipif("sys.version_info < (2,7)")
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):
script = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_failing_test_is_xfail(self):
assert False
if __name__ == '__main__':
unittest.main()
""")
if runner == 'pytest':
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*XFAIL*MyTestCase*test_failing_test_is_xfail*",
"*1 xfailed*",
])
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines([
"*1 test in*",
"*OK*(expected failures=1)*",
])
assert result.ret == 0
@pytest.mark.skipif("sys.version_info < (2,7)")
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):
script = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_passing_test_is_fail(self):
assert True
if __name__ == '__main__':
unittest.main()
""")
from _pytest.compat import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
if runner == 'pytest':
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*MyTestCase*test_passing_test_is_fail*",
"*1 failed*" if should_fail else "*1 xpassed*",
])
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines([
"*1 test in*",
"*(unexpected successes=1)*",
])
assert result.ret == (1 if should_fail else 0)
@pytest.mark.parametrize('fix_type, stmt', [
('fixture', 'return'),
('yield_fixture', 'yield'),
])
def test_unittest_setup_interaction(testdir, fix_type, stmt):
testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
@pytest.{fix_type}(scope="class", autouse=True)
def perclass(self, request):
request.cls.hello = "world"
{stmt}
@pytest.{fix_type}(scope="function", autouse=True)
def perfunction(self, request):
request.instance.funcname = request.function.__name__
{stmt}
def test_method1(self):
assert self.funcname == "test_method1"
assert self.hello == "world"
def test_method2(self):
assert self.funcname == "test_method2"
def test_classattr(self):
assert self.__class__.hello == "world"
""".format(fix_type=fix_type, stmt=stmt))
result = testdir.runpytest()
result.stdout.fnmatch_lines("*3 passed*")
def test_non_unittest_no_setupclass_support(testdir):
testpath = testdir.makepyfile("""
class TestFoo(object):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
def test_method1(self):
assert self.x == 0
@classmethod
def tearDownClass(cls):
cls.x = 1
def test_not_teareddown():
assert TestFoo.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_no_teardown_if_setupclass_failed(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
assert False
def test_func1(self):
cls.x = 10
@classmethod
def tearDownClass(cls):
cls.x = 100
def test_notTornDown():
assert MyTestCase.x == 1
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1, failed=1)
def test_issue333_result_clearing(testdir):
testdir.makeconftest("""
def pytest_runtest_call(__multicall__, item):
__multicall__.execute()
assert 0
""")
testdir.makepyfile("""
import unittest
class TestIt(unittest.TestCase):
def test_func(self):
0/0
""")
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_raise_skip_issue748(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_one(self):
raise unittest.SkipTest('skipping due to reasons')
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*test_foo.py*skipping due to reasons*
*1 skipped*
""")
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue1169(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.skip("skipping due to reasons")
def test_skip(self):
self.fail()
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*skipping due to reasons*
*1 skipped*
""")
def test_class_method_containing_test_issue1558(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_should_run(self):
pass
def test_should_not_run(self):
pass
test_should_not_run.__test__ = False
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
| {
"repo_name": "flub/pytest",
"path": "testing/test_unittest.py",
"copies": "1",
"size": "25123",
"license": "mit",
"hash": 3406843532787892000,
"line_mean": 29.8257668712,
"line_max": 95,
"alpha_frac": 0.5417346654,
"autogenerated": false,
"ratio": 4.433992234380515,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5475726899780515,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
from _pytest.main import EXIT_NOTESTSCOLLECTED
import pytest
import gc
def test_simple_unittest(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEqual('foo', 'foo')
def test_failing(self):
self.assertEqual('foo', 'bar')
""")
reprec = testdir.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_runTest_method(testdir):
testdir.makepyfile("""
import unittest
class MyTestCaseWithRunTest(unittest.TestCase):
def runTest(self):
self.assertEqual('foo', 'foo')
class MyTestCaseWithoutRunTest(unittest.TestCase):
def runTest(self):
self.assertEqual('foo', 'foo')
def test_something(self):
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
*MyTestCaseWithRunTest::runTest*
*MyTestCaseWithoutRunTest::test_something*
*2 passed*
""")
def test_isclasscheck_issue53(testdir):
testpath = testdir.makepyfile("""
import unittest
class _E(object):
def __getattr__(self, tag):
pass
E = _E()
""")
result = testdir.runpytest(testpath)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_setup(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def setup_method(self, method):
self.foo2 = 1
def test_both(self):
self.assertEqual(1, self.foo)
assert self.foo2 == 1
def teardown_method(self, method):
assert 0, "42"
""")
reprec = testdir.inline_run("-s", testpath)
assert reprec.matchreport("test_both", when="call").passed
rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and '42' in str(rep.longrepr)
def test_setUpModule(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
l.append(1)
def tearDownModule():
del l[0]
def test_hello():
assert l == [1]
def test_world():
assert l == [1]
""")
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_setUpModule_failing_no_teardown(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
0/0
def tearDownModule():
l.append(1)
def test_hello():
pass
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=0, failed=1)
call = reprec.getcalls("pytest_runtest_setup")[0]
assert not call.item.module.l
def test_new_instances(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def test_func1(self):
self.x = 2
def test_func2(self):
assert not hasattr(self, 'x')
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_teardown(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
l = []
def test_one(self):
pass
def tearDown(self):
self.l.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEqual(MyTestCase.l, [None])
""")
reprec = testdir.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
def test_teardown_issue1649(testdir):
"""
Are TestCase objects cleaned up? Often unittest TestCase objects set
attributes that are large and expensive during setUp.
The TestCase will not be cleaned up if the test fails, because it
would then exist in the stackframe.
"""
testpath = testdir.makepyfile("""
import unittest
class TestCaseObjectsShouldBeCleanedUp(unittest.TestCase):
def setUp(self):
self.an_expensive_object = 1
def test_demo(self):
pass
""")
testdir.inline_run("-s", testpath)
gc.collect()
for obj in gc.get_objects():
assert type(obj).__name__ != 'TestCaseObjectsShouldBeCleanedUp'
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue148(testdir):
testpath = testdir.makepyfile("""
import unittest
@unittest.skip("hello")
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
xxx
def test_one(self):
pass
@classmethod
def tearDownClass(self):
xxx
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(skipped=1)
def test_method_and_teardown_failing_reporting(testdir):
testdir.makepyfile("""
import unittest, pytest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
def test_method(self):
assert False, "down2"
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*tearDown*",
"*assert 0*",
"*test_method*",
"*assert False*",
"*1 failed*1 error*",
])
def test_setup_failure_is_shown(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TC(unittest.TestCase):
def setUp(self):
assert 0, "down1"
def test_method(self):
print ("never42")
xyz
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*setUp*",
"*assert 0*down1*",
"*1 failed*",
])
assert 'never42' not in result.stdout.str()
def test_setup_setUpClass(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
@classmethod
def tearDownClass(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
def test_setup_class(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
def setup_class(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
def teardown_class(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_adderrorandfailure_defers(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
try:
result.add%s(self, excinfo._excinfo)
except KeyboardInterrupt:
raise
except:
pytest.fail("add%s should not raise")
def test_hello(self):
pass
""" % (type, type))
result = testdir.runpytest()
assert 'should not raise' not in result.stdout.str()
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_custom_exception_info(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import py, pytest
import _pytest._code
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
# we fake an incompatible exception info
from _pytest.monkeypatch import MonkeyPatch
mp = MonkeyPatch()
def t(*args):
mp.undo()
raise TypeError()
mp.setattr(_pytest._code, 'ExceptionInfo', t)
try:
excinfo = excinfo._excinfo
result.add%(type)s(self, excinfo)
finally:
mp.undo()
def test_hello(self):
pass
""" % locals())
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"NOTE: Incompatible Exception Representation*",
"*ZeroDivisionError*",
"*1 failed*",
])
def test_testcase_totally_incompatible_exception_info(testdir):
item, = testdir.getitems("""
from unittest import TestCase
class MyTestCase(TestCase):
def test_hello(self):
pass
""")
item.addError(None, 42)
excinfo = item._excinfo.pop(0)
assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr())
def test_module_level_pytestmark(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
pytestmark = pytest.mark.xfail
class MyTestCase(unittest.TestCase):
def test_func1(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
class TestTrialUnittest(object):
def setup_class(cls):
cls.ut = pytest.importorskip("twisted.trial.unittest")
# on windows trial uses a socket for a reactor and apparently doesn't close it properly
# https://twistedmatrix.com/trac/ticket/9227
cls.ignore_unclosed_socket_warning = ('-W', 'always')
def test_trial_testcase_runtest_not_collected(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def test_hello(self):
pass
""")
reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def runTest(self):
pass
""")
reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, testdir):
testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
pytest.skip("skip_in_method")
@pytest.mark.skipif("sys.version_info != 1")
def test_hello2(self):
pass
@pytest.mark.xfail(reason="iwanto")
def test_hello3(self):
assert 0
def test_hello4(self):
pytest.xfail("i2wanto")
def test_trial_skip(self):
pass
test_trial_skip.skip = "trialselfskip"
def test_trial_todo(self):
assert 0
test_trial_todo.todo = "mytodo"
def test_trial_todo_success(self):
pass
test_trial_todo_success.todo = "mytodo"
class TC2(unittest.TestCase):
def setup_class(cls):
pytest.skip("skip_in_setup_class")
def test_method(self):
pass
""")
from _pytest.compat import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
result = testdir.runpytest("-rxs", *self.ignore_unclosed_socket_warning)
result.stdout.fnmatch_lines_random([
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
"*skip_in_setup_class*",
"*iwanto*",
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*",
])
assert result.ret == (1 if should_fail else 0)
def test_trial_error(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet import reactor
class TC(TestCase):
def test_one(self):
crash
def test_two(self):
def f(_):
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
def test_three(self):
def f():
pass # will never get called
reactor.callLater(0.3, f)
# will crash at teardown
def test_four(self):
def f(_):
reactor.callLater(0.3, f)
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
# will crash both at test time and at teardown
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ERRORS*",
"*DelayedCalls*",
"*test_four*",
"*NameError*crash*",
"*test_one*",
"*NameError*crash*",
"*test_three*",
"*DelayedCalls*",
"*test_two*",
"*crash*",
])
def test_trial_pdb(self, testdir):
p = testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
assert 0, "hellopdb"
""")
child = testdir.spawn_pytest(p)
child.expect("hellopdb")
child.sendeof()
def test_trial_testcase_skip_property(self, testdir):
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
skip = 'dont run'
def test_func(self):
pass
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(self, testdir):
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
pass
test_func.skip = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_todo_property(self, testdir):
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
todo = 'dont run'
def test_func(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_todo_property(self, testdir):
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
assert 0
test_func.todo = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s", *self.ignore_unclosed_socket_warning)
reprec.assertoutcome(skipped=1)
def test_djangolike_testcase(testdir):
# contributed from Morten Breekevold
testdir.makepyfile("""
from unittest import TestCase, main
class DjangoLikeTestCase(TestCase):
def setUp(self):
print ("setUp()")
def test_presetup_has_been_run(self):
print ("test_thing()")
self.assertTrue(hasattr(self, 'was_presetup'))
def tearDown(self):
print ("tearDown()")
def __call__(self, result=None):
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(DjangoLikeTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
print ("_pre_setup()")
self.was_presetup = True
def _post_teardown(self):
print ("_post_teardown()")
""")
result = testdir.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*_pre_setup()*",
"*setUp()*",
"*test_thing()*",
"*tearDown()*",
"*_post_teardown()*",
])
def test_unittest_not_shown_in_traceback(testdir):
testdir.makepyfile("""
import unittest
class t(unittest.TestCase):
def test_hello(self):
x = 3
self.assertEqual(x, 4)
""")
res = testdir.runpytest()
assert "failUnlessEqual" not in res.stdout.str()
def test_unorderable_types(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
pass
def make_test():
class Test(unittest.TestCase):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
""")
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_unittest_typerror_traceback(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
def test_hello(self, arg1):
pass
""")
result = testdir.runpytest()
assert "TypeError" in result.stdout.str()
assert result.ret == 1
@pytest.mark.skipif("sys.version_info < (2,7)")
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):
script = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_failing_test_is_xfail(self):
assert False
if __name__ == '__main__':
unittest.main()
""")
if runner == 'pytest':
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*XFAIL*MyTestCase*test_failing_test_is_xfail*",
"*1 xfailed*",
])
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines([
"*1 test in*",
"*OK*(expected failures=1)*",
])
assert result.ret == 0
@pytest.mark.skipif("sys.version_info < (2,7)")
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):
script = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_passing_test_is_fail(self):
assert True
if __name__ == '__main__':
unittest.main()
""")
from _pytest.compat import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
if runner == 'pytest':
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*MyTestCase*test_passing_test_is_fail*",
"*1 failed*" if should_fail else "*1 xpassed*",
])
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines([
"*1 test in*",
"*(unexpected successes=1)*",
])
assert result.ret == (1 if should_fail else 0)
@pytest.mark.parametrize('fix_type, stmt', [
('fixture', 'return'),
('yield_fixture', 'yield'),
])
def test_unittest_setup_interaction(testdir, fix_type, stmt):
testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
@pytest.{fix_type}(scope="class", autouse=True)
def perclass(self, request):
request.cls.hello = "world"
{stmt}
@pytest.{fix_type}(scope="function", autouse=True)
def perfunction(self, request):
request.instance.funcname = request.function.__name__
{stmt}
def test_method1(self):
assert self.funcname == "test_method1"
assert self.hello == "world"
def test_method2(self):
assert self.funcname == "test_method2"
def test_classattr(self):
assert self.__class__.hello == "world"
""".format(fix_type=fix_type, stmt=stmt))
result = testdir.runpytest()
result.stdout.fnmatch_lines("*3 passed*")
def test_non_unittest_no_setupclass_support(testdir):
testpath = testdir.makepyfile("""
class TestFoo(object):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
def test_method1(self):
assert self.x == 0
@classmethod
def tearDownClass(cls):
cls.x = 1
def test_not_teareddown():
assert TestFoo.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_no_teardown_if_setupclass_failed(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
assert False
def test_func1(self):
cls.x = 10
@classmethod
def tearDownClass(cls):
cls.x = 100
def test_notTornDown():
assert MyTestCase.x == 1
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1, failed=1)
def test_issue333_result_clearing(testdir):
testdir.makeconftest("""
def pytest_runtest_call(__multicall__, item):
__multicall__.execute()
assert 0
""")
testdir.makepyfile("""
import unittest
class TestIt(unittest.TestCase):
def test_func(self):
0/0
""")
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_raise_skip_issue748(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_one(self):
raise unittest.SkipTest('skipping due to reasons')
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*test_foo.py*skipping due to reasons*
*1 skipped*
""")
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue1169(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.skip("skipping due to reasons")
def test_skip(self):
self.fail()
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*skipping due to reasons*
*1 skipped*
""")
def test_class_method_containing_test_issue1558(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_should_run(self):
pass
def test_should_not_run(self):
pass
test_should_not_run.__test__ = False
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
| {
"repo_name": "MichaelAquilina/pytest",
"path": "testing/test_unittest.py",
"copies": "1",
"size": "25470",
"license": "mit",
"hash": 6252194707594892000,
"line_mean": 29.5762304922,
"line_max": 95,
"alpha_frac": 0.539183353,
"autogenerated": false,
"ratio": 4.452018877818563,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5491202230818563,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.