code
stringlengths
2k
1.04M
repo_path
stringlengths
5
517
parsed_code
stringlengths
0
1.04M
quality_prob
float64
0.02
0.95
learning_prob
float64
0.02
0.93
"""Tests for functional operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import functional_ops from tensorflow.python.platform import test class FunctionalOpsTest(test.TestCase): def testIfWithDefun(self): # Defun should only be used in graph mode with ops.Graph().as_default(): @function.Defun(dtypes.float32) def Then(x): return x + 1 @function.Defun(dtypes.float32) def Else(x): return x - 1 inputs = [10.] result = self.evaluate(functional_ops.If(False, inputs, Then, Else)) self.assertEqual([9.0], result) def testIfWithFunction(self): @def_function.function( input_signature=[tensor_spec.TensorSpec((), dtypes.float32)]) def Then(x): return x + 1 @def_function.function( input_signature=[tensor_spec.TensorSpec((), dtypes.float32)]) def Else(x): return x - 1 inputs = [10.] then_cf = Then.get_concrete_function() else_cf = Else.get_concrete_function() result = self.evaluate(functional_ops.If(False, inputs, then_cf, else_cf)) self.assertEqual([9.0], result) def testIfWithFunctionComposite(self): signature = [tensor_spec.TensorSpec([], dtypes.float32)] @def_function.function(input_signature=signature) def Then(x): return sparse_tensor.SparseTensor([[0]], [x + 1], [1]) @def_function.function(input_signature=signature) def Else(x): return sparse_tensor.SparseTensor([[0]], [x - 1], [1]) inputs = [10.] then_cf = Then.get_concrete_function() else_cf = Else.get_concrete_function() result = functional_ops.If(False, inputs, then_cf, else_cf) self.assertIsInstance(result, sparse_tensor.SparseTensor) self.assertAllEqual([9.0], result.values) if __name__ == '__main__': test.main()
tensorflow/python/ops/functional_ops_test.py
"""Tests for functional operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import functional_ops from tensorflow.python.platform import test class FunctionalOpsTest(test.TestCase): def testIfWithDefun(self): # Defun should only be used in graph mode with ops.Graph().as_default(): @function.Defun(dtypes.float32) def Then(x): return x + 1 @function.Defun(dtypes.float32) def Else(x): return x - 1 inputs = [10.] result = self.evaluate(functional_ops.If(False, inputs, Then, Else)) self.assertEqual([9.0], result) def testIfWithFunction(self): @def_function.function( input_signature=[tensor_spec.TensorSpec((), dtypes.float32)]) def Then(x): return x + 1 @def_function.function( input_signature=[tensor_spec.TensorSpec((), dtypes.float32)]) def Else(x): return x - 1 inputs = [10.] then_cf = Then.get_concrete_function() else_cf = Else.get_concrete_function() result = self.evaluate(functional_ops.If(False, inputs, then_cf, else_cf)) self.assertEqual([9.0], result) def testIfWithFunctionComposite(self): signature = [tensor_spec.TensorSpec([], dtypes.float32)] @def_function.function(input_signature=signature) def Then(x): return sparse_tensor.SparseTensor([[0]], [x + 1], [1]) @def_function.function(input_signature=signature) def Else(x): return sparse_tensor.SparseTensor([[0]], [x - 1], [1]) inputs = [10.] then_cf = Then.get_concrete_function() else_cf = Else.get_concrete_function() result = functional_ops.If(False, inputs, then_cf, else_cf) self.assertIsInstance(result, sparse_tensor.SparseTensor) self.assertAllEqual([9.0], result.values) if __name__ == '__main__': test.main()
0.87901
0.366675
import pytest from convtools import conversion as c from convtools.base import Breakpoint def test_base_zip(): meta = {1: "a", 2: "b", 3: "c"} input_data = {"items": [1, 2, 3], "meta": meta} converter = ( c.zip( c.item("items"), c.repeat(c.item("meta")), ) .as_type(list) .gen_converter(debug=False) ) assert converter(input_data) == [ (1, meta), (2, meta), (3, meta), ] converter = ( c.zip( item=c.item("items"), meta=c.repeat(c.item("meta")), ) .as_type(list) .gen_converter(debug=False) ) assert converter(input_data) == [ {"item": 1, "meta": meta}, {"item": 2, "meta": meta}, {"item": 3, "meta": meta}, ] input_data = [ ([1, 2, 3], {1: "a", 2: "b", 3: "c"}), ([4, 5, 6], {4: "a", 5: "b", 6: "c"}), ] converter = ( c.iter(c.zip(c.item(0), c.repeat(c.item(1)))) .flatten() .iter(c.item(1, c.item(0))) .pipe(c.call_func(",".join, c.this())) .gen_converter(debug=False) ) assert converter(input_data) == "a,b,c,a,b,c" with pytest.raises(ValueError): c.zip(1, 2, a=1) def test_zip_in_aggregate(): input_data = [ ("kitchen", "size", 10), ("kitchen", "temperature", 40), ("living_room", "size", 12), ("living_room", "color", "white"), ] converter = ( c.group_by(c.item(1)) .aggregate( { "prop": c.item(1), "values": c.zip( room=c.ReduceFuncs.Array(c.item(0)), value=c.ReduceFuncs.Array(c.item(2)), ).as_type(list), } ) .gen_converter(debug=True) ) assert converter(input_data) == [ { "prop": "size", "values": [ {"room": "kitchen", "value": 10}, {"room": "living_room", "value": 12}, ], }, {"prop": "temperature", "values": [{"room": "kitchen", "value": 40}]}, { "prop": "color", "values": [{"room": "living_room", "value": "white"}], }, ] def test_flatten(): assert c.flatten().as_type(list).execute([[1], [2]]) == [1, 2] def test_min_max(): assert c.min(0, 1).execute(None) == 0 assert c.min(2, 1).execute(None) == 1 assert c.max(0, 1).execute(None) == 1 assert c.max(2, 1).execute(None) == 2 assert c.min(c.item(0), c.item(1)).execute((0, 1)) == 0 assert c((2, 1)).pipe(c.min(c.item(0), c.item(1))).execute(None) == 1 with pytest.raises(TypeError): c.min(c.this()).execute(-1) with pytest.raises(TypeError): c.max(c.this()).execute(-1) def test_breakpoint(): before = Breakpoint.debug_func l = [] def add_to_list(obj): l.append(obj) return obj Breakpoint.debug_func = staticmethod(add_to_list) try: c.list_comp(c.this().breakpoint()).execute([1, 2, 3]) c.list_comp(c.breakpoint()).execute([3, 4]) finally: Breakpoint.debug_func = before assert l == [1, 2, 3, 3, 4]
tests/test_shortcuts.py
import pytest from convtools import conversion as c from convtools.base import Breakpoint def test_base_zip(): meta = {1: "a", 2: "b", 3: "c"} input_data = {"items": [1, 2, 3], "meta": meta} converter = ( c.zip( c.item("items"), c.repeat(c.item("meta")), ) .as_type(list) .gen_converter(debug=False) ) assert converter(input_data) == [ (1, meta), (2, meta), (3, meta), ] converter = ( c.zip( item=c.item("items"), meta=c.repeat(c.item("meta")), ) .as_type(list) .gen_converter(debug=False) ) assert converter(input_data) == [ {"item": 1, "meta": meta}, {"item": 2, "meta": meta}, {"item": 3, "meta": meta}, ] input_data = [ ([1, 2, 3], {1: "a", 2: "b", 3: "c"}), ([4, 5, 6], {4: "a", 5: "b", 6: "c"}), ] converter = ( c.iter(c.zip(c.item(0), c.repeat(c.item(1)))) .flatten() .iter(c.item(1, c.item(0))) .pipe(c.call_func(",".join, c.this())) .gen_converter(debug=False) ) assert converter(input_data) == "a,b,c,a,b,c" with pytest.raises(ValueError): c.zip(1, 2, a=1) def test_zip_in_aggregate(): input_data = [ ("kitchen", "size", 10), ("kitchen", "temperature", 40), ("living_room", "size", 12), ("living_room", "color", "white"), ] converter = ( c.group_by(c.item(1)) .aggregate( { "prop": c.item(1), "values": c.zip( room=c.ReduceFuncs.Array(c.item(0)), value=c.ReduceFuncs.Array(c.item(2)), ).as_type(list), } ) .gen_converter(debug=True) ) assert converter(input_data) == [ { "prop": "size", "values": [ {"room": "kitchen", "value": 10}, {"room": "living_room", "value": 12}, ], }, {"prop": "temperature", "values": [{"room": "kitchen", "value": 40}]}, { "prop": "color", "values": [{"room": "living_room", "value": "white"}], }, ] def test_flatten(): assert c.flatten().as_type(list).execute([[1], [2]]) == [1, 2] def test_min_max(): assert c.min(0, 1).execute(None) == 0 assert c.min(2, 1).execute(None) == 1 assert c.max(0, 1).execute(None) == 1 assert c.max(2, 1).execute(None) == 2 assert c.min(c.item(0), c.item(1)).execute((0, 1)) == 0 assert c((2, 1)).pipe(c.min(c.item(0), c.item(1))).execute(None) == 1 with pytest.raises(TypeError): c.min(c.this()).execute(-1) with pytest.raises(TypeError): c.max(c.this()).execute(-1) def test_breakpoint(): before = Breakpoint.debug_func l = [] def add_to_list(obj): l.append(obj) return obj Breakpoint.debug_func = staticmethod(add_to_list) try: c.list_comp(c.this().breakpoint()).execute([1, 2, 3]) c.list_comp(c.breakpoint()).execute([3, 4]) finally: Breakpoint.debug_func = before assert l == [1, 2, 3, 3, 4]
0.536556
0.632091
from config import Names as N from control.record import Record from control.html import HtmlElements as H class AssessmentR(Record): """Logic for assessment records. Assessment records that are part of a workflow have customised titles, showing the creator and create data of the assessment. !!! hint If the `assessment` record is not part of the workflow, the behaviour of this class falls back to the base class `control.record.Record`. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def title(self, record=None, *args, **kwargs): inActualCls = self.inActualCls(record) wfitem = self.wfitem if not wfitem: return super().title(*args, **kwargs) markup = kwargs.get("markup", True) datetime = self.field(N.dateCreated).wrapBare(markup=markup) date = datetime.split(maxsplit=1)[0] creator = self.field(N.creator).wrapBare(markup=markup) valBare = f"""on {date} by {creator}""" return ( H.span(f"""on {date} by {creator}""", cls=f"small {inActualCls}") if markup else valBare ) def field(self, fieldName, **kwargs): """Customised factory function to wrap a field object around the data of a field. This function only comes into play when the assigning reviewers. Office users assign reviewers by editing the fields `reviewerE` and `reviewerF`. But they may only do so if the assessment is submitted and not withdrawn and there is not yet a final verdict. If those conditions apply, the base version of `field` will be called with a `mayEdit=False` parameter. """ if fieldName in {N.reviewerE, N.reviewerF}: wfitem = self.wfitem if wfitem: (stage,) = wfitem.info(N.assessment, N.stage) if stage not in { N.submitted, N.submittedRevised, N.reviewRevise, }: kwargs[N.mayEdit] = False return super().field(fieldName, **kwargs)
server/control/cust/assessment_record.py
from config import Names as N from control.record import Record from control.html import HtmlElements as H class AssessmentR(Record): """Logic for assessment records. Assessment records that are part of a workflow have customised titles, showing the creator and create data of the assessment. !!! hint If the `assessment` record is not part of the workflow, the behaviour of this class falls back to the base class `control.record.Record`. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def title(self, record=None, *args, **kwargs): inActualCls = self.inActualCls(record) wfitem = self.wfitem if not wfitem: return super().title(*args, **kwargs) markup = kwargs.get("markup", True) datetime = self.field(N.dateCreated).wrapBare(markup=markup) date = datetime.split(maxsplit=1)[0] creator = self.field(N.creator).wrapBare(markup=markup) valBare = f"""on {date} by {creator}""" return ( H.span(f"""on {date} by {creator}""", cls=f"small {inActualCls}") if markup else valBare ) def field(self, fieldName, **kwargs): """Customised factory function to wrap a field object around the data of a field. This function only comes into play when the assigning reviewers. Office users assign reviewers by editing the fields `reviewerE` and `reviewerF`. But they may only do so if the assessment is submitted and not withdrawn and there is not yet a final verdict. If those conditions apply, the base version of `field` will be called with a `mayEdit=False` parameter. """ if fieldName in {N.reviewerE, N.reviewerF}: wfitem = self.wfitem if wfitem: (stage,) = wfitem.info(N.assessment, N.stage) if stage not in { N.submitted, N.submittedRevised, N.reviewRevise, }: kwargs[N.mayEdit] = False return super().field(fieldName, **kwargs)
0.689724
0.459319
"""Quadratic DeepOBS dataset.""" import numpy as np import tensorflow as tf from . import dataset class quadratic(dataset.DataSet): """DeepOBS data set class to create an n dimensional stochastic quadratic\ testproblem. This toy data set consists of a fixed number (``train_size``) of iid draws from a zero-mean normal distribution in ``dim`` dimensions with isotropic covariance specified by ``noise_level``. Args: batch_size (int): The mini-batch size to use. Note that, if ``batch_size`` is not a divider of the dataset size (``1000`` for train and test) the remainder is dropped in each epoch (after shuffling). dim (int): Dimensionality of the quadratic. Defaults to ``100``. train_size (int): Size of the dataset; will be used for train, train eval and test datasets. Defaults to ``1000``. noise_level (float): Standard deviation of the data points around the mean. The data points are drawn from a Gaussian distribution. Defaults to ``0.6``. Attributes: batch: A tensor ``X`` of shape ``(batch_size, dim)`` yielding elements from the dataset. Executing these tensors raises a ``tf.errors.OutOfRangeError`` after one epoch. train_init_op: A tensorflow operation initializing the dataset for the training phase. train_eval_init_op: A tensorflow operation initializing the testproblem for evaluating on training data. test_init_op: A tensorflow operation initializing the testproblem for evaluating on test data. phase: A string-value tf.Variable that is set to ``train``, ``train_eval`` or ``test``, depending on the current phase. This can be used by testproblems to adapt their behavior to this phase. """ def __init__(self, batch_size, dim=100, train_size=1000, noise_level=0.6): """Creates a new Quadratic instance. Args: batch_size (int): The mini-batch size to use. Note that, if ``batch_size`` is not a divider of the dataset size (``1000`` for train and test) the remainder is dropped in each epoch (after shuffling). dim (int): Dimensionality of the quadratic. Defaults to ``100``. train_size (int): Size of the dataset; will be used for train, train eval and test datasets. Defaults to ``1000``. noise_level (float): Standard deviation of the data points around the mean. The data points are drawn from a Gaussian distribution. Defaults to ``0.6``. """ self._name = "quadratic" self._dim = dim self._train_size = train_size self._noise_level = noise_level super(quadratic, self).__init__(batch_size) def _make_dataset(self, X, shuffle=True): """Creates a quadratic data set (helper used by ``.make_*_datset`` below). Args: X (np.array): Numpy array containing the ``x`` values of the data points. data_y (np.array): Numpy array containing the ``y`` values of the data points. shuffle (bool): Switch to turn on or off shuffling of the data set. Defaults to ``True``. Returns: A tf.data.Dataset yielding batches of quadratic data. """ with tf.name_scope(self._name): with tf.device('/cpu:0'): data = tf.data.Dataset.from_tensor_slices(X) if shuffle: data = data.shuffle(buffer_size=20000) data = data.batch(self._batch_size, drop_remainder=True) data = data.prefetch(buffer_size=4) return data def _make_train_dataset(self): """Creates the quadratic training dataset. Returns: A tf.data.Dataset instance with batches of training data. """ # Draw data from a random generator with a fixed seed to always get the # same data. rng = np.random.RandomState(42) X = rng.normal(0.0, self._noise_level, (self._train_size, self._dim)) X = np.float32(X) return self._make_dataset(X, shuffle=True) def _make_train_eval_dataset(self): """Creates the quadratic train eval dataset. Returns: A tf.data.Dataset instance with batches of training eval data. """ return self._train_dataset.take(-1) # Take all. def _make_test_dataset(self): """Creates the quadratic test dataset. Returns: A tf.data.Dataset instance with batches of test data. """ # Draw data from a random generator with a fixed seed to always get the # same data. rng = np.random.RandomState(43) X = rng.normal(0.0, self._noise_level, (self._train_size, self._dim)) X = np.float32(X) return self._make_dataset(X, shuffle=False)
deepobs/tensorflow/datasets/quadratic.py
"""Quadratic DeepOBS dataset.""" import numpy as np import tensorflow as tf from . import dataset class quadratic(dataset.DataSet): """DeepOBS data set class to create an n dimensional stochastic quadratic\ testproblem. This toy data set consists of a fixed number (``train_size``) of iid draws from a zero-mean normal distribution in ``dim`` dimensions with isotropic covariance specified by ``noise_level``. Args: batch_size (int): The mini-batch size to use. Note that, if ``batch_size`` is not a divider of the dataset size (``1000`` for train and test) the remainder is dropped in each epoch (after shuffling). dim (int): Dimensionality of the quadratic. Defaults to ``100``. train_size (int): Size of the dataset; will be used for train, train eval and test datasets. Defaults to ``1000``. noise_level (float): Standard deviation of the data points around the mean. The data points are drawn from a Gaussian distribution. Defaults to ``0.6``. Attributes: batch: A tensor ``X`` of shape ``(batch_size, dim)`` yielding elements from the dataset. Executing these tensors raises a ``tf.errors.OutOfRangeError`` after one epoch. train_init_op: A tensorflow operation initializing the dataset for the training phase. train_eval_init_op: A tensorflow operation initializing the testproblem for evaluating on training data. test_init_op: A tensorflow operation initializing the testproblem for evaluating on test data. phase: A string-value tf.Variable that is set to ``train``, ``train_eval`` or ``test``, depending on the current phase. This can be used by testproblems to adapt their behavior to this phase. """ def __init__(self, batch_size, dim=100, train_size=1000, noise_level=0.6): """Creates a new Quadratic instance. Args: batch_size (int): The mini-batch size to use. Note that, if ``batch_size`` is not a divider of the dataset size (``1000`` for train and test) the remainder is dropped in each epoch (after shuffling). dim (int): Dimensionality of the quadratic. Defaults to ``100``. train_size (int): Size of the dataset; will be used for train, train eval and test datasets. Defaults to ``1000``. noise_level (float): Standard deviation of the data points around the mean. The data points are drawn from a Gaussian distribution. Defaults to ``0.6``. """ self._name = "quadratic" self._dim = dim self._train_size = train_size self._noise_level = noise_level super(quadratic, self).__init__(batch_size) def _make_dataset(self, X, shuffle=True): """Creates a quadratic data set (helper used by ``.make_*_datset`` below). Args: X (np.array): Numpy array containing the ``x`` values of the data points. data_y (np.array): Numpy array containing the ``y`` values of the data points. shuffle (bool): Switch to turn on or off shuffling of the data set. Defaults to ``True``. Returns: A tf.data.Dataset yielding batches of quadratic data. """ with tf.name_scope(self._name): with tf.device('/cpu:0'): data = tf.data.Dataset.from_tensor_slices(X) if shuffle: data = data.shuffle(buffer_size=20000) data = data.batch(self._batch_size, drop_remainder=True) data = data.prefetch(buffer_size=4) return data def _make_train_dataset(self): """Creates the quadratic training dataset. Returns: A tf.data.Dataset instance with batches of training data. """ # Draw data from a random generator with a fixed seed to always get the # same data. rng = np.random.RandomState(42) X = rng.normal(0.0, self._noise_level, (self._train_size, self._dim)) X = np.float32(X) return self._make_dataset(X, shuffle=True) def _make_train_eval_dataset(self): """Creates the quadratic train eval dataset. Returns: A tf.data.Dataset instance with batches of training eval data. """ return self._train_dataset.take(-1) # Take all. def _make_test_dataset(self): """Creates the quadratic test dataset. Returns: A tf.data.Dataset instance with batches of test data. """ # Draw data from a random generator with a fixed seed to always get the # same data. rng = np.random.RandomState(43) X = rng.normal(0.0, self._noise_level, (self._train_size, self._dim)) X = np.float32(X) return self._make_dataset(X, shuffle=False)
0.968066
0.957278
import testtools class TempestException(Exception): """Base Tempest Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = "An unknown exception occurred" def __init__(self, *args, **kwargs): super(TempestException, self).__init__() try: self._error_string = self.message % kwargs except Exception: # at least get the core message out if something happened self._error_string = self.message if len(args) > 0: # If there is a non-kwarg parameter, assume it's the error # message or reason description and tack it on to the end # of the exception message # Convert all arguments into their string representations... args = ["%s" % arg for arg in args] self._error_string = (self._error_string + "\nDetails: %s" % '\n'.join(args)) def __str__(self): return self._error_string class RestClientException(TempestException, testtools.TestCase.failureException): pass class InvalidConfiguration(TempestException): message = "Invalid Configuration" class InvalidCredentials(TempestException): message = "Invalid Credentials" class InvalidServiceTag(TempestException): message = "Invalid service tag" class InvalidIdentityVersion(TempestException): message = "Invalid version %(identity_version)s of the identity service" class TimeoutException(TempestException): message = "Request timed out" class BuildErrorException(TempestException): message = "Server %(server_id)s failed to build and is in ERROR status" class ImageKilledException(TempestException): message = "Image %(image_id)s 'killed' while waiting for '%(status)s'" class AddImageException(TempestException): message = "Image %(image_id)s failed to become ACTIVE in the allotted time" class VolumeBuildErrorException(TempestException): message = "Volume %(volume_id)s failed to build and is in ERROR status" class VolumeRestoreErrorException(TempestException): message = "Volume %(volume_id)s failed to restore and is in ERROR status" class SnapshotBuildErrorException(TempestException): message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status" class VolumeBackupException(TempestException): message = "Volume backup %(backup_id)s failed and is in ERROR status" class StackBuildErrorException(TempestException): message = ("Stack %(stack_identifier)s is in %(stack_status)s status " "due to '%(stack_status_reason)s'") class EndpointNotFound(TempestException): message = "Endpoint not found" class IdentityError(TempestException): message = "Got identity error" class ServerUnreachable(TempestException): message = "The server is not reachable via the configured network" # NOTE(andreaf) This exception is added here to facilitate the migration # of get_network_from_name and preprov_creds to tempest.lib, and it should # be migrated along with them class InvalidTestResource(TempestException): message = "%(name) is not a valid %(type), or the name is ambiguous" class RFCViolation(RestClientException): message = "RFC Violation" class InvalidHttpSuccessCode(RestClientException): message = "The success code is different than the expected one" class BadRequest(RestClientException): message = "Bad request" class ResponseWithNonEmptyBody(RFCViolation): message = ("RFC Violation! Response with %(status)d HTTP Status Code " "MUST NOT have a body") class ResponseWithEntity(RFCViolation): message = ("RFC Violation! Response with 205 HTTP Status Code " "MUST NOT have an entity") class InvalidHTTPResponseHeader(RestClientException): message = "HTTP response header is invalid" class InvalidStructure(TempestException): message = "Invalid structure of table with details" class CommandFailed(Exception): def __init__(self, returncode, cmd, output, stderr): super(CommandFailed, self).__init__() self.returncode = returncode self.cmd = cmd self.stdout = output self.stderr = stderr def __str__(self): return ("Command '%s' returned non-zero exit status %d.\n" "stdout:\n%s\n" "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
ceilometer/tests/tempest/exceptions.py
import testtools class TempestException(Exception): """Base Tempest Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = "An unknown exception occurred" def __init__(self, *args, **kwargs): super(TempestException, self).__init__() try: self._error_string = self.message % kwargs except Exception: # at least get the core message out if something happened self._error_string = self.message if len(args) > 0: # If there is a non-kwarg parameter, assume it's the error # message or reason description and tack it on to the end # of the exception message # Convert all arguments into their string representations... args = ["%s" % arg for arg in args] self._error_string = (self._error_string + "\nDetails: %s" % '\n'.join(args)) def __str__(self): return self._error_string class RestClientException(TempestException, testtools.TestCase.failureException): pass class InvalidConfiguration(TempestException): message = "Invalid Configuration" class InvalidCredentials(TempestException): message = "Invalid Credentials" class InvalidServiceTag(TempestException): message = "Invalid service tag" class InvalidIdentityVersion(TempestException): message = "Invalid version %(identity_version)s of the identity service" class TimeoutException(TempestException): message = "Request timed out" class BuildErrorException(TempestException): message = "Server %(server_id)s failed to build and is in ERROR status" class ImageKilledException(TempestException): message = "Image %(image_id)s 'killed' while waiting for '%(status)s'" class AddImageException(TempestException): message = "Image %(image_id)s failed to become ACTIVE in the allotted time" class VolumeBuildErrorException(TempestException): message = "Volume %(volume_id)s failed to build and is in ERROR status" class VolumeRestoreErrorException(TempestException): message = "Volume %(volume_id)s failed to restore and is in ERROR status" class SnapshotBuildErrorException(TempestException): message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status" class VolumeBackupException(TempestException): message = "Volume backup %(backup_id)s failed and is in ERROR status" class StackBuildErrorException(TempestException): message = ("Stack %(stack_identifier)s is in %(stack_status)s status " "due to '%(stack_status_reason)s'") class EndpointNotFound(TempestException): message = "Endpoint not found" class IdentityError(TempestException): message = "Got identity error" class ServerUnreachable(TempestException): message = "The server is not reachable via the configured network" # NOTE(andreaf) This exception is added here to facilitate the migration # of get_network_from_name and preprov_creds to tempest.lib, and it should # be migrated along with them class InvalidTestResource(TempestException): message = "%(name) is not a valid %(type), or the name is ambiguous" class RFCViolation(RestClientException): message = "RFC Violation" class InvalidHttpSuccessCode(RestClientException): message = "The success code is different than the expected one" class BadRequest(RestClientException): message = "Bad request" class ResponseWithNonEmptyBody(RFCViolation): message = ("RFC Violation! Response with %(status)d HTTP Status Code " "MUST NOT have a body") class ResponseWithEntity(RFCViolation): message = ("RFC Violation! Response with 205 HTTP Status Code " "MUST NOT have an entity") class InvalidHTTPResponseHeader(RestClientException): message = "HTTP response header is invalid" class InvalidStructure(TempestException): message = "Invalid structure of table with details" class CommandFailed(Exception): def __init__(self, returncode, cmd, output, stderr): super(CommandFailed, self).__init__() self.returncode = returncode self.cmd = cmd self.stdout = output self.stderr = stderr def __str__(self): return ("Command '%s' returned non-zero exit status %d.\n" "stdout:\n%s\n" "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
0.605566
0.251883
import rosgraph import rosparam import rospkg import rospy import rosservice from re import compile BLACK_LIST_PARAM = ['/rosdistro', '/rosversion', '/run_id'] BLACK_LIST_TOPIC = ["/tf", "/tf_static", "/rosout", "/clock"] BLACK_LIST_SERV = ["/set_logger_level", "/get_loggers"] BLACK_LIST_NODE = ["/rosout"] ACTION_FILTER = ['goal', 'cancel'] ACTION_FILTER2 = ['status', 'result', 'feedback'] def get_param(param_name): if not rospy.has_param('~desired_rossystem'): raise KeyError("Private parameter 'desired_rossystem' not set") return rospy.get_param('~desired_rossystem') def _check_black_list(name, black_list): for bl_ in black_list: if bl_ in name: return False return True def _init_node_dict(nodes, name): nodes[name] = {'parameters' : dict(), 'publishers' : dict(), 'subscribers' : dict(), 'service_servers' : dict(), 'service_clients' :dict(), 'action_servers' :dict(), 'action_clients' : dict() } def _check_actions(publishers, subscribers, action_clients, action_servers): pubs_ = [pub for pub in publishers.keys()] subs_ = [sub for sub in subscribers.keys()] remove_pubs = list() remove_subs = list() # Check Action client for topic_name, topic_type in publishers.items(): if topic_name.endswith(ACTION_FILTER[0]): _action_name = topic_name[:-len(ACTION_FILTER[0]) - 1] cancel_topic = _action_name + '/' + ACTION_FILTER[1] if not (cancel_topic in pubs_): continue remove_pubs.append(topic_name) remove_pubs.append(cancel_topic) for name in ACTION_FILTER2: topic = _action_name + '/' + name if not (topic in subs_): continue remove_subs.append(topic) _action_type = topic_type[:-10] # Hardcoded ActionGoal if _action_name not in action_clients.keys(): action_clients[_action_name] = [_action_type] else: action_clients[_action_name].append(_action_type) # Check Action Server for topic_name, topic_type in subscribers.items(): if topic_name.endswith(ACTION_FILTER[0]): _action_name = topic_name[:-len(ACTION_FILTER[0]) - 1] cancel_topic = _action_name + '/' + ACTION_FILTER[1] if not (cancel_topic in subs_): continue remove_subs.append(topic_name) remove_subs.append(cancel_topic) for name in ACTION_FILTER2: topic = _action_name + '/' + name if not (topic in pubs_): continue remove_pubs.append(topic) _action_type = topic_type[:-10] # Hardcode ActionGoal action_servers[_action_name] = _action_type for topic in remove_pubs: publishers.pop(topic) for topic in remove_subs: subscribers.pop(topic) def create_ros_graph_snapshot(): master = rosgraph.Master('snapshot') params = list() topics_dict = dict() if not(master.is_online()): print("Error: ROSMaster not found") return list() state = master.getSystemState() #get the system state pubs, subs, services = state #get all topics type topic_list = master.getTopicTypes() for topic, topic_type in topic_list: topics_dict[topic] = topic_type components = dict() for pub, nodes in pubs: if not _check_black_list(pub, BLACK_LIST_TOPIC): continue for node in nodes: if not _check_black_list(node, BLACK_LIST_NODE): continue if node not in components: _init_node_dict(components, node) components[node]['publishers'][pub] = topics_dict[pub] for sub, nodes in subs: if not _check_black_list(sub, BLACK_LIST_TOPIC): continue for node in nodes: if not _check_black_list(node, BLACK_LIST_NODE): continue if node not in components: _init_node_dict(components, node) components[node]['subscribers'][sub] = topics_dict[sub] for serv, nodes in services: if not _check_black_list(serv, BLACK_LIST_SERV): continue for node in nodes: if not _check_black_list(node, BLACK_LIST_NODE): continue if node not in components: _init_node_dict(components, node) try: components[node]['service_servers'][serv] = rosservice.get_service_type(serv) except rosservice.ROSServiceIOException as e: pass for name in components: publishers = components[name]['publishers'] subscribers = components[name]['subscribers'] action_clients = components[name]['action_clients'] action_servers = components[name]['action_servers'] _check_actions(publishers, subscribers, action_clients, action_servers) # Get parameters params = master.getParamNames() component_names = list(components.keys()) for name in component_names: r = compile(name + "*") # Python2.x: param_node_ns = filter(r.match, params) param_node_ns = list(filter(r.match, params)) # remove the params which belong to the node's namespace from the list # the params that remain at the end of the loop are global params g_params = [param for param in params if param not in param_node_ns] params = g_params for param in param_node_ns: if param not in BLACK_LIST_PARAM and not(param.startswith('/roslaunch')): p = master.getParam(param) components[name]['parameters'][param] = [p, type(p)] # the remaining params are global params if len(params) > 0: components['global_parameters'] = dict() for param in params: if param not in BLACK_LIST_PARAM and not(param.startswith('/roslaunch')): p = master.getParam(param) components['global_parameters'][param] = [p, type(p)] return components
src/rosgraph_monitor/graph.py
import rosgraph import rosparam import rospkg import rospy import rosservice from re import compile BLACK_LIST_PARAM = ['/rosdistro', '/rosversion', '/run_id'] BLACK_LIST_TOPIC = ["/tf", "/tf_static", "/rosout", "/clock"] BLACK_LIST_SERV = ["/set_logger_level", "/get_loggers"] BLACK_LIST_NODE = ["/rosout"] ACTION_FILTER = ['goal', 'cancel'] ACTION_FILTER2 = ['status', 'result', 'feedback'] def get_param(param_name): if not rospy.has_param('~desired_rossystem'): raise KeyError("Private parameter 'desired_rossystem' not set") return rospy.get_param('~desired_rossystem') def _check_black_list(name, black_list): for bl_ in black_list: if bl_ in name: return False return True def _init_node_dict(nodes, name): nodes[name] = {'parameters' : dict(), 'publishers' : dict(), 'subscribers' : dict(), 'service_servers' : dict(), 'service_clients' :dict(), 'action_servers' :dict(), 'action_clients' : dict() } def _check_actions(publishers, subscribers, action_clients, action_servers): pubs_ = [pub for pub in publishers.keys()] subs_ = [sub for sub in subscribers.keys()] remove_pubs = list() remove_subs = list() # Check Action client for topic_name, topic_type in publishers.items(): if topic_name.endswith(ACTION_FILTER[0]): _action_name = topic_name[:-len(ACTION_FILTER[0]) - 1] cancel_topic = _action_name + '/' + ACTION_FILTER[1] if not (cancel_topic in pubs_): continue remove_pubs.append(topic_name) remove_pubs.append(cancel_topic) for name in ACTION_FILTER2: topic = _action_name + '/' + name if not (topic in subs_): continue remove_subs.append(topic) _action_type = topic_type[:-10] # Hardcoded ActionGoal if _action_name not in action_clients.keys(): action_clients[_action_name] = [_action_type] else: action_clients[_action_name].append(_action_type) # Check Action Server for topic_name, topic_type in subscribers.items(): if topic_name.endswith(ACTION_FILTER[0]): _action_name = topic_name[:-len(ACTION_FILTER[0]) - 1] cancel_topic = _action_name + '/' + ACTION_FILTER[1] if not (cancel_topic in subs_): continue remove_subs.append(topic_name) remove_subs.append(cancel_topic) for name in ACTION_FILTER2: topic = _action_name + '/' + name if not (topic in pubs_): continue remove_pubs.append(topic) _action_type = topic_type[:-10] # Hardcode ActionGoal action_servers[_action_name] = _action_type for topic in remove_pubs: publishers.pop(topic) for topic in remove_subs: subscribers.pop(topic) def create_ros_graph_snapshot(): master = rosgraph.Master('snapshot') params = list() topics_dict = dict() if not(master.is_online()): print("Error: ROSMaster not found") return list() state = master.getSystemState() #get the system state pubs, subs, services = state #get all topics type topic_list = master.getTopicTypes() for topic, topic_type in topic_list: topics_dict[topic] = topic_type components = dict() for pub, nodes in pubs: if not _check_black_list(pub, BLACK_LIST_TOPIC): continue for node in nodes: if not _check_black_list(node, BLACK_LIST_NODE): continue if node not in components: _init_node_dict(components, node) components[node]['publishers'][pub] = topics_dict[pub] for sub, nodes in subs: if not _check_black_list(sub, BLACK_LIST_TOPIC): continue for node in nodes: if not _check_black_list(node, BLACK_LIST_NODE): continue if node not in components: _init_node_dict(components, node) components[node]['subscribers'][sub] = topics_dict[sub] for serv, nodes in services: if not _check_black_list(serv, BLACK_LIST_SERV): continue for node in nodes: if not _check_black_list(node, BLACK_LIST_NODE): continue if node not in components: _init_node_dict(components, node) try: components[node]['service_servers'][serv] = rosservice.get_service_type(serv) except rosservice.ROSServiceIOException as e: pass for name in components: publishers = components[name]['publishers'] subscribers = components[name]['subscribers'] action_clients = components[name]['action_clients'] action_servers = components[name]['action_servers'] _check_actions(publishers, subscribers, action_clients, action_servers) # Get parameters params = master.getParamNames() component_names = list(components.keys()) for name in component_names: r = compile(name + "*") # Python2.x: param_node_ns = filter(r.match, params) param_node_ns = list(filter(r.match, params)) # remove the params which belong to the node's namespace from the list # the params that remain at the end of the loop are global params g_params = [param for param in params if param not in param_node_ns] params = g_params for param in param_node_ns: if param not in BLACK_LIST_PARAM and not(param.startswith('/roslaunch')): p = master.getParam(param) components[name]['parameters'][param] = [p, type(p)] # the remaining params are global params if len(params) > 0: components['global_parameters'] = dict() for param in params: if param not in BLACK_LIST_PARAM and not(param.startswith('/roslaunch')): p = master.getParam(param) components['global_parameters'][param] = [p, type(p)] return components
0.240061
0.093844
import olefile import shutil import os import zipfile import tempfile import sys from glob import iglob from pathlib import Path def stomp_vba(original_file, stomped_file): if olefile.isOleFile(original_file): # Make copy of file to modify. shutil.copyfile(original_file, stomped_file) stomp_it(stomped_file) elif zipfile.is_zipfile(original_file): # unzip to temporary location tmpdir = tempfile.TemporaryDirectory(prefix="stomp_") with zipfile.ZipFile(original_file) as zf: zf.extractall(tmpdir.name) # iterate through files, call stomp on any ole file file_list = [f for f in iglob(tmpdir.name + '/**/*', recursive=True) if os.path.isfile(f)] for f in file_list: if olefile.isOleFile(f): stomp_it(f) # write the new zip file os.chdir(Path(stomped_file).resolve().parent) shutil.make_archive(stomped_file, 'zip', tmpdir.name) if os.path.exists(stomped_file): os.remove(stomped_file) os.rename(stomped_file + '.zip', stomped_file) # This method originally from <NAME> (@bigmacjpg) def stomp_it(stomped_file): # Open file to mangle the VBA streams. with olefile.OleFileIO(stomped_file, write_mode=True) as ole: # Mangle the macro VBA streams. for stream_name in ole.listdir(): # Got macros? data = ole.openstream(stream_name).read() marker = b"Attrib" if marker not in data: continue # Find where to write. start = data.rindex(marker) # Stomp the rest of the data. new_data = data[:start] for i in range(start, len(data)): # Stomp with random bytes. new_data += os.urandom(1) # Write out the garbage data. ole.write_stream(stream_name, new_data) if __name__ == "__main__": files = [] if sys.argv: if len(sys.argv) == 2: file_to_stomp_path = sys.argv[1] else: print("Please provide a path to a file to be VBA stomped") if os.path.isfile(file_to_stomp_path): stomp_vba(file_to_stomp_path, file_to_stomp_path + ".stomped") if os.path.isfile(file_to_stomp_path + ".stomped"): print("[*] Stomped VBA - new file at: " + str(file_to_stomp_path + ".stomped")) else: print("[!] Failed to stomp file: " + file_to_stomp_path)
internals/stomp_vba.py
import olefile import shutil import os import zipfile import tempfile import sys from glob import iglob from pathlib import Path def stomp_vba(original_file, stomped_file): if olefile.isOleFile(original_file): # Make copy of file to modify. shutil.copyfile(original_file, stomped_file) stomp_it(stomped_file) elif zipfile.is_zipfile(original_file): # unzip to temporary location tmpdir = tempfile.TemporaryDirectory(prefix="stomp_") with zipfile.ZipFile(original_file) as zf: zf.extractall(tmpdir.name) # iterate through files, call stomp on any ole file file_list = [f for f in iglob(tmpdir.name + '/**/*', recursive=True) if os.path.isfile(f)] for f in file_list: if olefile.isOleFile(f): stomp_it(f) # write the new zip file os.chdir(Path(stomped_file).resolve().parent) shutil.make_archive(stomped_file, 'zip', tmpdir.name) if os.path.exists(stomped_file): os.remove(stomped_file) os.rename(stomped_file + '.zip', stomped_file) # This method originally from <NAME> (@bigmacjpg) def stomp_it(stomped_file): # Open file to mangle the VBA streams. with olefile.OleFileIO(stomped_file, write_mode=True) as ole: # Mangle the macro VBA streams. for stream_name in ole.listdir(): # Got macros? data = ole.openstream(stream_name).read() marker = b"Attrib" if marker not in data: continue # Find where to write. start = data.rindex(marker) # Stomp the rest of the data. new_data = data[:start] for i in range(start, len(data)): # Stomp with random bytes. new_data += os.urandom(1) # Write out the garbage data. ole.write_stream(stream_name, new_data) if __name__ == "__main__": files = [] if sys.argv: if len(sys.argv) == 2: file_to_stomp_path = sys.argv[1] else: print("Please provide a path to a file to be VBA stomped") if os.path.isfile(file_to_stomp_path): stomp_vba(file_to_stomp_path, file_to_stomp_path + ".stomped") if os.path.isfile(file_to_stomp_path + ".stomped"): print("[*] Stomped VBA - new file at: " + str(file_to_stomp_path + ".stomped")) else: print("[!] Failed to stomp file: " + file_to_stomp_path)
0.123868
0.167083
# Copyright (c) 2021-2022 scmanjarrez. All rights reserved. # This work is licensed under the terms of the MIT license. import genshinstats.errors as err import genshinstats as gs import paimon_gui as gui import utils as ut STATE = ut.CMD.NOP HELP = ( "Hello Traveler, use the following commands to interact with me:" "\n\n" "❔ /menu - Interact with me using UI." "\n" "❔ /redeem <code>[#]</code> - Redeem the gift code." "\n\n" "<b>Bot Usage</b>\n" "❔ /help - List of commands." "\n" "❔ /cancel - Cancel current action." "\n\n" "<i><b>Note:</b> Arguments inside brackets are optional.</i>" ) def _state(state=ut.CMD.NOP): global STATE STATE = state def allowed(uid): return uid == int(ut.config('admin')) def bot_help(update, context): uid = ut.uid(update) if allowed(uid): ut.send(update, HELP) def menu(update, context): uid = ut.uid(update) if allowed(uid): gui.main_menu(update) def _redeem(code): try: gs.redeem_code(code) except err.GenshinStatsException as e: msg = e.msg else: msg = "Code redeemed successfully." return msg def redeem(update, context): uid = ut.uid(update) if allowed(uid): if context.args: msg = _redeem(context.args[0]) else: msg = "Tell me the gift code to redeem:" _state(ut.CMD.GIFT) ut.send(update, msg) def text(update, context): uid = ut.uid(update) if allowed(uid): msg = "❗ Send only one argument." args = update.message.text.split() if len(args) == 1: if STATE == ut.CMD.GIFT: msg = _redeem(args[0]) else: _state(uid) ut.send(update, msg) def cancel(update, context): uid = ut.uid(update) if allowed(uid): if STATE != ut.CMD.NOP: msg = (f"The command <code>{STATE.value}</code> " f"has been cancelled. Anything else I can do for you?" f"\n\n" f"Send /help for a list of commands.") _state() else: msg = ("No active command to cancel. " "I wasn't doing anything anyway.\nZzzzz...") ut.send(update, msg)
paimon_cli.py
# Copyright (c) 2021-2022 scmanjarrez. All rights reserved. # This work is licensed under the terms of the MIT license. import genshinstats.errors as err import genshinstats as gs import paimon_gui as gui import utils as ut STATE = ut.CMD.NOP HELP = ( "Hello Traveler, use the following commands to interact with me:" "\n\n" "❔ /menu - Interact with me using UI." "\n" "❔ /redeem <code>[#]</code> - Redeem the gift code." "\n\n" "<b>Bot Usage</b>\n" "❔ /help - List of commands." "\n" "❔ /cancel - Cancel current action." "\n\n" "<i><b>Note:</b> Arguments inside brackets are optional.</i>" ) def _state(state=ut.CMD.NOP): global STATE STATE = state def allowed(uid): return uid == int(ut.config('admin')) def bot_help(update, context): uid = ut.uid(update) if allowed(uid): ut.send(update, HELP) def menu(update, context): uid = ut.uid(update) if allowed(uid): gui.main_menu(update) def _redeem(code): try: gs.redeem_code(code) except err.GenshinStatsException as e: msg = e.msg else: msg = "Code redeemed successfully." return msg def redeem(update, context): uid = ut.uid(update) if allowed(uid): if context.args: msg = _redeem(context.args[0]) else: msg = "Tell me the gift code to redeem:" _state(ut.CMD.GIFT) ut.send(update, msg) def text(update, context): uid = ut.uid(update) if allowed(uid): msg = "❗ Send only one argument." args = update.message.text.split() if len(args) == 1: if STATE == ut.CMD.GIFT: msg = _redeem(args[0]) else: _state(uid) ut.send(update, msg) def cancel(update, context): uid = ut.uid(update) if allowed(uid): if STATE != ut.CMD.NOP: msg = (f"The command <code>{STATE.value}</code> " f"has been cancelled. Anything else I can do for you?" f"\n\n" f"Send /help for a list of commands.") _state() else: msg = ("No active command to cancel. " "I wasn't doing anything anyway.\nZzzzz...") ut.send(update, msg)
0.52683
0.129816
from torch.utils.data import DataLoader import torchvision.datasets as datasets import torchvision.transforms as transforms from sotabenchapi.core import BenchmarkResult, check_inputs from torchbench.utils import send_model_to_device, default_data_to_device from torchbench.image_classification.utils import evaluate_classification class ImageNet: """`ImageNet <https://www.sotabench.com/benchmark/imagenet>`_ benchmark. Examples: Evaluate a ResNeXt model from the torchvision repository: .. code-block:: python from torchbench.image_classification import ImageNet from torchvision.models.resnet import resnext101_32x8d import torchvision.transforms as transforms import PIL # Define the transforms need to convert ImageNet data to expected # model input normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) input_transform = transforms.Compose([ transforms.Resize(256, PIL.Image.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) # Run the benchmark ImageNet.benchmark( model=resnext101_32x8d(pretrained=True), paper_model_name='ResNeXt-101-32x8d', paper_arxiv_id='1611.05431', input_transform=input_transform, batch_size=256, num_gpu=1 ) If the model you are implementing does not have *paper* results on sotabench, you can add them: .. code-block:: python ... mynet_paper_results = { 'Top 1 Accuracy': 0.754, 'Top 5 Accuracy': 0.8565 } # Run the benchmark ImageNet.benchmark( model=mynet101(pretrained=True), paper_model_name='MyNet', paper_arxiv_id='2099.05431', paper_results=mynet_paper_results, input_transform=input_transform, batch_size=256, num_gpu=1 ) """ dataset = datasets.ImageNet normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) input_transform = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ] ) send_data_to_device = default_data_to_device task = "Image Classification" @classmethod @check_inputs def benchmark( cls, model, model_description=None, input_transform=None, target_transform=None, model_output_transform=None, send_data_to_device=None, device: str = "cuda", data_root: str = "./.data/vision/imagenet", num_workers: int = 4, batch_size: int = 128, pin_memory: bool = False, num_gpu: int = 1, paper_model_name: str = None, paper_arxiv_id: str = None, paper_pwc_id: str = None, paper_results: dict = None, pytorch_hub_url: str = None, force: bool = False ) -> BenchmarkResult: """Benchmarking function. Args: model: a PyTorch module, (e.g. a ``nn.Module`` object), that takes in ImageNet inputs and outputs ImageNet predictions. model_description (str, optional): Optional model description. input_transform (transforms.Compose, optional): Composing the transforms used to transform the dataset, e.g. applying resizing (e.g ``transforms.Resize``), center cropping, to tensor transformations and normalization. target_transform (torchvision.transforms.Compose, optional): Composing any transforms used to transform the target. This is usually not used for ImageNet. model_output_transform (callable, optional): An optional function that takes in model output (after being passed through your ``model`` forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. send_data_to_device (callable, optional): An optional function specifying how the model is sent to a device; see ``torchbench.utils.send_model_to_device`` for the default treatment. device (str): Default is 'cuda' - this is the device that the model is sent to in the default treatment. data_root (str): The location of the ImageNet dataset - change this parameter when evaluating locally if your ImageNet data is located in a different folder (or alternatively if you want to download to an alternative location). num_workers (int): The number of workers to use for the DataLoader. batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU. num_gpu (int): Number of GPUs - note that sotabench.com workers only support 1 GPU for now. paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the ImageNet benchmark page for model names, https://www.sotabench.com/benchmark/imagenet, e.g. on the paper leaderboard tab. paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'. paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional' paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g:: {'Top 1 Accuracy': 0.543, 'Top 5 Accuracy': 0.654}. Ensure that the metric names match those on the sotabench leaderboard - for ImageNet it should be 'Top 1 Accuracy' and 'Top 5 Accuracy'. pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'. """ print("Benchmarking on ImageNet...") config = locals() model, device = send_model_to_device( model, device=device, num_gpu=num_gpu ) model.eval() if not input_transform: input_transform = cls.input_transform if not send_data_to_device: send_data_to_device = cls.send_data_to_device try: test_dataset = cls.dataset( data_root, split="val", transform=input_transform, target_transform=target_transform, download=True, ) except Exception: test_dataset = cls.dataset( data_root, split="val", transform=input_transform, target_transform=target_transform, download=False, ) test_loader = DataLoader( test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, ) test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, send_data_to_device=send_data_to_device, device=device, force=force ) print( " * Acc@1 {top1:.3f} Acc@5 {top5:.3f}".format( top1=test_results["Top 1 Accuracy"], top5=test_results["Top 5 Accuracy"], ) ) return BenchmarkResult( task=cls.task, config=config, dataset=cls.dataset.__name__, results=test_results, speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, arxiv_id=paper_arxiv_id, pwc_id=paper_pwc_id, paper_results=paper_results, run_hash=run_hash, )
torchbench/image_classification/imagenet.py
from torch.utils.data import DataLoader import torchvision.datasets as datasets import torchvision.transforms as transforms from sotabenchapi.core import BenchmarkResult, check_inputs from torchbench.utils import send_model_to_device, default_data_to_device from torchbench.image_classification.utils import evaluate_classification class ImageNet: """`ImageNet <https://www.sotabench.com/benchmark/imagenet>`_ benchmark. Examples: Evaluate a ResNeXt model from the torchvision repository: .. code-block:: python from torchbench.image_classification import ImageNet from torchvision.models.resnet import resnext101_32x8d import torchvision.transforms as transforms import PIL # Define the transforms need to convert ImageNet data to expected # model input normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) input_transform = transforms.Compose([ transforms.Resize(256, PIL.Image.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) # Run the benchmark ImageNet.benchmark( model=resnext101_32x8d(pretrained=True), paper_model_name='ResNeXt-101-32x8d', paper_arxiv_id='1611.05431', input_transform=input_transform, batch_size=256, num_gpu=1 ) If the model you are implementing does not have *paper* results on sotabench, you can add them: .. code-block:: python ... mynet_paper_results = { 'Top 1 Accuracy': 0.754, 'Top 5 Accuracy': 0.8565 } # Run the benchmark ImageNet.benchmark( model=mynet101(pretrained=True), paper_model_name='MyNet', paper_arxiv_id='2099.05431', paper_results=mynet_paper_results, input_transform=input_transform, batch_size=256, num_gpu=1 ) """ dataset = datasets.ImageNet normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) input_transform = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ] ) send_data_to_device = default_data_to_device task = "Image Classification" @classmethod @check_inputs def benchmark( cls, model, model_description=None, input_transform=None, target_transform=None, model_output_transform=None, send_data_to_device=None, device: str = "cuda", data_root: str = "./.data/vision/imagenet", num_workers: int = 4, batch_size: int = 128, pin_memory: bool = False, num_gpu: int = 1, paper_model_name: str = None, paper_arxiv_id: str = None, paper_pwc_id: str = None, paper_results: dict = None, pytorch_hub_url: str = None, force: bool = False ) -> BenchmarkResult: """Benchmarking function. Args: model: a PyTorch module, (e.g. a ``nn.Module`` object), that takes in ImageNet inputs and outputs ImageNet predictions. model_description (str, optional): Optional model description. input_transform (transforms.Compose, optional): Composing the transforms used to transform the dataset, e.g. applying resizing (e.g ``transforms.Resize``), center cropping, to tensor transformations and normalization. target_transform (torchvision.transforms.Compose, optional): Composing any transforms used to transform the target. This is usually not used for ImageNet. model_output_transform (callable, optional): An optional function that takes in model output (after being passed through your ``model`` forward pass) and transforms it. Afterwards, the output will be passed into an evaluation function. send_data_to_device (callable, optional): An optional function specifying how the model is sent to a device; see ``torchbench.utils.send_model_to_device`` for the default treatment. device (str): Default is 'cuda' - this is the device that the model is sent to in the default treatment. data_root (str): The location of the ImageNet dataset - change this parameter when evaluating locally if your ImageNet data is located in a different folder (or alternatively if you want to download to an alternative location). num_workers (int): The number of workers to use for the DataLoader. batch_size (int) : The batch_size to use for evaluation; if you get memory errors, then reduce this (half each time) until your model fits onto the GPU. num_gpu (int): Number of GPUs - note that sotabench.com workers only support 1 GPU for now. paper_model_name (str, optional): The name of the model from the paper - if you want to link your build to a machine learning paper. See the ImageNet benchmark page for model names, https://www.sotabench.com/benchmark/imagenet, e.g. on the paper leaderboard tab. paper_arxiv_id (str, optional): Optional linking to ArXiv if you want to link to papers on the leaderboard; put in the corresponding paper's ArXiv ID, e.g. '1611.05431'. paper_pwc_id (str, optional): Optional linking to Papers With Code; put in the corresponding papers with code URL slug, e.g. 'u-gat-it-unsupervised-generative-attentional' paper_results (dict, optional) : If the paper you are reproducing does not have model results on sotabench.com, you can specify the paper results yourself through this argument, where keys are metric names, values are metric values. e.g:: {'Top 1 Accuracy': 0.543, 'Top 5 Accuracy': 0.654}. Ensure that the metric names match those on the sotabench leaderboard - for ImageNet it should be 'Top 1 Accuracy' and 'Top 5 Accuracy'. pytorch_hub_url (str, optional): Optional linking to PyTorch Hub url if your model is linked there; e.g: 'nvidia_deeplearningexamples_waveglow'. """ print("Benchmarking on ImageNet...") config = locals() model, device = send_model_to_device( model, device=device, num_gpu=num_gpu ) model.eval() if not input_transform: input_transform = cls.input_transform if not send_data_to_device: send_data_to_device = cls.send_data_to_device try: test_dataset = cls.dataset( data_root, split="val", transform=input_transform, target_transform=target_transform, download=True, ) except Exception: test_dataset = cls.dataset( data_root, split="val", transform=input_transform, target_transform=target_transform, download=False, ) test_loader = DataLoader( test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, ) test_results, speed_mem_metrics, run_hash = evaluate_classification( model=model, test_loader=test_loader, model_output_transform=model_output_transform, send_data_to_device=send_data_to_device, device=device, force=force ) print( " * Acc@1 {top1:.3f} Acc@5 {top5:.3f}".format( top1=test_results["Top 1 Accuracy"], top5=test_results["Top 5 Accuracy"], ) ) return BenchmarkResult( task=cls.task, config=config, dataset=cls.dataset.__name__, results=test_results, speed_mem_metrics=speed_mem_metrics, pytorch_hub_id=pytorch_hub_url, model=paper_model_name, model_description=model_description, arxiv_id=paper_arxiv_id, pwc_id=paper_pwc_id, paper_results=paper_results, run_hash=run_hash, )
0.958895
0.626795
from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import numpy as np import os import sys from observations.util import maybe_download_and_extract def paulsen(path): """Neurotransmission in Guinea Pig Brains The `paulsen` data frame has 346 rows and 1 columns. Sections were prepared from the brain of adult guinea pigs. Spontaneous currents that flowed into individual brain cells were then recorded and the peak amplitude of each current measured. The aim of the experiment was to see if the current flow was quantal in nature (i.e. that it is not a single burst but instead is built up of many smaller bursts of current). If the current was indeed quantal then it would be expected that the distribution of the current amplitude would be multimodal with modes at regular intervals. The modes would be expected to decrease in magnitude for higher current amplitudes. This data frame contains the following column: `y` The current flowing into individual brain cells. The currents are measured in pico-amperes. The data were kindly made available by Dr. <NAME> from the Department of Pharmacology at the University of Oxford. <NAME>. and <NAME>. (1994) The quantal size at retinogeniculate synapses determined from spontaneous and evoked EPSCs in guinea-pig thalamic slices. *Journal of Physiology*, **480**, 505–511. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `paulsen.csv`. Returns: Tuple of np.ndarray `x_train` with 346 rows and 1 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'paulsen.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/boot/paulsen.csv' maybe_download_and_extract(path, url, save_file_name='paulsen.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
observations/r/paulsen.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import numpy as np import os import sys from observations.util import maybe_download_and_extract def paulsen(path): """Neurotransmission in Guinea Pig Brains The `paulsen` data frame has 346 rows and 1 columns. Sections were prepared from the brain of adult guinea pigs. Spontaneous currents that flowed into individual brain cells were then recorded and the peak amplitude of each current measured. The aim of the experiment was to see if the current flow was quantal in nature (i.e. that it is not a single burst but instead is built up of many smaller bursts of current). If the current was indeed quantal then it would be expected that the distribution of the current amplitude would be multimodal with modes at regular intervals. The modes would be expected to decrease in magnitude for higher current amplitudes. This data frame contains the following column: `y` The current flowing into individual brain cells. The currents are measured in pico-amperes. The data were kindly made available by Dr. <NAME> from the Department of Pharmacology at the University of Oxford. <NAME>. and <NAME>. (1994) The quantal size at retinogeniculate synapses determined from spontaneous and evoked EPSCs in guinea-pig thalamic slices. *Journal of Physiology*, **480**, 505–511. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `paulsen.csv`. Returns: Tuple of np.ndarray `x_train` with 346 rows and 1 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'paulsen.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/boot/paulsen.csv' maybe_download_and_extract(path, url, save_file_name='paulsen.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
0.742328
0.504455
import datetime import json import re import warnings import zipfile from io import BytesIO, StringIO import numpy as np import pandas as pd import requests from mssdk.futures import cons from mssdk.futures.requests_fun import requests_link calendar = cons.get_calendar() def get_cffex_daily(date: str = "20100401") -> pd.DataFrame: """ 中国金融期货交易所日交易数据 http://www.cffex.com.cn/rtj/ :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 为空时为当天 :return: pandas.DataFrame 中国金融期货交易所日: symbol 合约代码 date 日期 open 开盘价 high 最高价 low 最低价 close 收盘价 volume 成交量 open_interest 持仓量 turnover 成交额 settle 结算价 pre_settle 前结算价 variety 合约类别 或 None(给定日期没有交易数据) """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None url = f"http://www.cffex.com.cn/sj/historysj/{date[:-2]}/zip/{date[:-2]}.zip" r = requests.get(url) try: with zipfile.ZipFile(BytesIO(r.content)) as file: with file.open(f"{date}_1.csv") as my_file: data = my_file.read().decode("gb2312") data_df = pd.read_csv(StringIO(data)) except: return None data_df = data_df[data_df["合约代码"] != "小计"] data_df = data_df[data_df["合约代码"] != "合计"] data_df = data_df[~data_df["合约代码"].str.contains("IO")] data_df.reset_index(inplace=True, drop=True) data_df["合约代码"] = data_df["合约代码"].str.strip() symbol_list = data_df["合约代码"].to_list() variety_list = [re.compile(r"[a-zA-Z_]+").findall(item)[0] for item in symbol_list] if data_df.shape[1] == 15: data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover", "open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_", "_"] else: data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover", "open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_"] data_df["date"] = date data_df["variety"] = variety_list data_df = data_df[ ["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle", "pre_settle", "variety"]] return data_df def get_ine_daily(date: str = "20200106") -> pd.DataFrame: """ 上海国际能源交易中心-日频率-量价数据 上海国际能源交易中心: 原油期货(上市时间: 20180326); 20号胶期货(上市时间: 20190812) trade_price: http://www.ine.cn/statements/daily/?paramid=kx trade_note: http://www.ine.cn/data/datanote.dat :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日 :type date: str or datetime.date :return: 上海国际能源交易中心-日频率-量价数据 :rtype: pandas.DataFrame or None """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn(f"{day.strftime('%Y%m%d')}非交易日") return None url = f"http://www.ine.cn/data/dailydata/kx/kx{day.strftime('%Y%m%d')}.dat" r = requests.get(url) result_df = pd.DataFrame() try: data_json = r.json() except: return None temp_df = pd.DataFrame(data_json["o_curinstrument"]).iloc[:-1, :] temp_df = temp_df[temp_df["DELIVERYMONTH"] != "小计"] temp_df = temp_df[~temp_df["PRODUCTNAME"].str.contains("总计")] try: result_df["symbol"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip() + temp_df["DELIVERYMONTH"] except: result_df["symbol"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0] + temp_df["DELIVERYMONTH"] result_df["date"] = day.strftime("%Y%m%d") result_df["open"] = temp_df["OPENPRICE"] result_df["high"] = temp_df["HIGHESTPRICE"] result_df["low"] = temp_df["LOWESTPRICE"] result_df["close"] = temp_df["CLOSEPRICE"] result_df["volume"] = temp_df["VOLUME"] result_df["open_interest"] = temp_df["OPENINTEREST"] result_df["turnover"] = 0 result_df["settle"] = temp_df["SETTLEMENTPRICE"] result_df["pre_settle"] = temp_df["PRESETTLEMENTPRICE"] try: result_df["variety"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip() except: result_df["variety"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0] result_df = result_df[result_df["symbol"] != "总计"] result_df = result_df[~result_df["symbol"].str.contains("efp")] return result_df def get_czce_daily(date: str = "20050525") -> pd.DataFrame: """ 郑州商品交易所-日频率-量价数据 :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日; 日期需要大于 20100824 :type date: str or datetime.date :return: 郑州商品交易所-日频率-量价数据 :rtype: pandas.DataFrame or None """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn(f"{day.strftime('%Y%m%d')}非交易日") return None if day > datetime.date(2010, 8, 24): if day > datetime.date(2015, 9, 19): u = cons.CZCE_DAILY_URL_3 url = u % (day.strftime("%Y"), day.strftime("%Y%m%d")) elif day < datetime.date(2015, 9, 19): u = cons.CZCE_DAILY_URL_2 url = u % (day.strftime("%Y"), day.strftime("%Y%m%d")) listed_columns = cons.CZCE_COLUMNS output_columns = cons.OUTPUT_COLUMNS try: r = requests.get(url) html = r.text except requests.exceptions.HTTPError as reason: if reason.response.status_code != 404: print( cons.CZCE_DAILY_URL_3 % (day.strftime("%Y"), day.strftime("%Y%m%d")), reason, ) return if html.find("您的访问出错了") >= 0 or html.find("无期权每日行情交易记录") >= 0: return html = [ i.replace(" ", "").split("|") for i in html.split("\n")[:-4] if i[0][0] != "小" ] if day > datetime.date(2015, 9, 19): if html[1][0] not in ["品种月份", "品种代码", "合约代码"]: return dict_data = list() day_const = int(day.strftime("%Y%m%d")) for row in html[2:]: m = cons.FUTURES_SYMBOL_PATTERN.match(row[0]) if not m: continue row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)} for i, field in enumerate(listed_columns): if row[i + 1] == "\r" or row[i + 1] == '': row_dict[field] = 0.0 elif field in [ "volume", "open_interest", "oi_chg", "exercise_volume", ]: row[i + 1] = row[i + 1].replace(",", "") row_dict[field] = int(row[i + 1]) else: row[i + 1] = row[i + 1].replace(",", "") row_dict[field] = float(row[i + 1]) dict_data.append(row_dict) return pd.DataFrame(dict_data)[output_columns] elif day < datetime.date(2015, 9, 19): dict_data = list() day_const = int(day.strftime("%Y%m%d")) for row in html[1:]: row = row[0].split(",") m = cons.FUTURES_SYMBOL_PATTERN.match(row[0]) if not m: continue row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)} for i, field in enumerate(listed_columns): if row[i + 1] == "\r": row_dict[field] = 0.0 elif field in [ "volume", "open_interest", "oi_chg", "exercise_volume", ]: row_dict[field] = int(float(row[i + 1])) else: row_dict[field] = float(row[i + 1]) dict_data.append(row_dict) return pd.DataFrame(dict_data)[output_columns] if day <= datetime.date(2010, 8, 24): u = cons.CZCE_DAILY_URL_1 url = u % day.strftime("%Y%m%d") listed_columns = cons.CZCE_COLUMNS_2 output_columns = cons.OUTPUT_COLUMNS df = pd.read_html(url)[1].dropna(how="any") dict_data = list() day_const = int(day.strftime("%Y%m%d")) for row in df.to_dict(orient="records"): row = list(row.values()) m = cons.FUTURES_SYMBOL_PATTERN.match(row[0]) if not m: continue row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)} for i, field in enumerate(listed_columns): if row[i + 1] == "\r": row_dict[field] = 0.0 elif field in ["volume", "open_interest", "oi_chg", "exercise_volume"]: row_dict[field] = int(row[i + 1]) else: row_dict[field] = float(row[i + 1]) dict_data.append(row_dict) return pd.DataFrame(dict_data)[output_columns] def get_shfe_v_wap(date: str = "20131017") -> pd.DataFrame: """ 获取上期所日成交均价数据 Parameters ------ date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天 Return ------- DataFrame 郑商所日交易数据(DataFrame): symbol 合约代码 date 日期 time_range v_wap时段,分09:00-10:15和09:00-15:00两类 v_wap 加权平均成交均价 或 None(给定日期没有数据) """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None try: json_data = json.loads( requests_link( cons.SHFE_V_WAP_URL % (day.strftime("%Y%m%d")), headers=cons.headers, encoding="utf-8", ).text ) except: return None if len(json_data["o_currefprice"]) == 0: return None try: df = pd.DataFrame(json_data["o_currefprice"]) df["INSTRUMENTID"] = df["INSTRUMENTID"].str.strip() df[":B1"].astype("int16") return df.rename(columns=cons.SHFE_V_WAP_COLUMNS)[ list(cons.SHFE_V_WAP_COLUMNS.values()) ] except: return None def get_shfe_daily(date: str = "20160104") -> pd.DataFrame: """ 上海期货交易所-日频率-量价数据 http://www.shfe.com.cn/statements/dataview.html?paramid=kx :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象, 默认为当前交易日 :type date: str or datetime.date :return: 上海期货交易所-日频率-量价数据 :rtype: pandas.DataFrame or None 上期所日交易数据(DataFrame): symbol 合约代码 date 日期 open 开盘价 high 最高价 low 最低价 close 收盘价 volume 成交量 open_interest 持仓量 turnover 成交额 settle 结算价 pre_settle 前结算价 variety 合约类别 或 None(给定交易日没有交易数据) """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None try: json_data = json.loads( requests_link( cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")), headers=cons.shfe_headers, ).text ) except requests.HTTPError as reason: if reason.response != 404: print(cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")), reason) return if len(json_data["o_curinstrument"]) == 0: return df = pd.DataFrame( [ row for row in json_data["o_curinstrument"] if row["DELIVERYMONTH"] not in ["小计", "合计"] and row["DELIVERYMONTH"] != "" ] ) try: df["variety"] = df["PRODUCTGROUPID"].str.upper().str.strip() except KeyError as e: df["variety"] = df["PRODUCTID"].str.upper().str.split('_', expand=True).iloc[:, 0].str.strip() df["symbol"] = df["variety"] + df["DELIVERYMONTH"] df["date"] = day.strftime("%Y%m%d") v_wap_df = get_shfe_v_wap(day) if v_wap_df is not None: df = pd.merge( df, v_wap_df[v_wap_df.time_range == "9:00-15:00"], on=["date", "symbol"], how="left", ) df["turnover"] = df.v_wap * df.VOLUME else: df["VOLUME"] = df["VOLUME"].apply(lambda x: 0 if x == "" else x) df["turnover"] = df["VOLUME"] * df["SETTLEMENTPRICE"] df.rename(columns=cons.SHFE_COLUMNS, inplace=True) df = df[~df["symbol"].str.contains("efp")] return df[cons.OUTPUT_COLUMNS] def get_dce_daily(date: str = "20030115") -> pd.DataFrame: """ 大连商品交易所日交易数据 http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rxq/index.html :param date: 交易日, e.g., 20200416 :type date: str :return: 具体交易日的个品种行情数据 :rtype: pandas.DataFrame """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None url = "http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html" headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8", "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Length": "86", "Content-Type": "application/x-www-form-urlencoded", "Host": "www.dce.com.cn", "Origin": "http://www.dce.com.cn", "Pragma": "no-cache", "Referer": "http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36", } params = { "dayQuotes.variety": "all", "dayQuotes.trade_type": "0", "year": date[:4], "month": str(int(date[4:6]) - 1), "day": date[6:], "exportFlag": "excel", } r = requests.post(url, data=params, headers=headers) data_df = pd.read_excel(BytesIO(r.content)) data_df = data_df[~data_df["商品名称"].str.contains("小计")] data_df = data_df[~data_df["商品名称"].str.contains("总计")] data_df["variety"] = data_df["商品名称"].map(lambda x: cons.DCE_MAP[x]) data_df["symbol"] = data_df["variety"] + data_df["交割月份"].astype(int).astype(str) del data_df["商品名称"] del data_df["交割月份"] data_df.columns = ["open", "high", "low", "close", "pre_settle", "settle", "_", "_", "volume", "open_interest", "_", "turnover", "variety", "symbol"] data_df["date"] = date data_df = data_df[ ["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle", "pre_settle", "variety"]] data_df = data_df.applymap(lambda x: x.replace(",", "")) data_df = data_df.astype({"open": "float", "high": "float", "low": "float", "close": "float", "volume": "float", "open_interest": "float", "turnover": "float", "settle": "float", "pre_settle": "float", }) return data_df def get_futures_daily(start_date: str = "20210421", end_date: str = "20210426", market: str = "INE", index_bar: bool = False) -> pd.DataFrame: """ 交易所日交易数据 :param start_date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天 :type start_date: str :param end_date: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天 :type end_date: str :param market: 'CFFEX' 中金所, 'CZCE' 郑商所, 'SHFE' 上期所, 'DCE' 大商所 之一, 'INE' 上海国际能源交易中心。默认为中金所 :type market: str :param index_bar: 是否合成指数K线, 默认为 False 否则影响 roll_yield 的计算 :type index_bar: bool :return: 交易所日交易数据 :rtype: pandas.DataFrame """ if market.upper() == "CFFEX": f = get_cffex_daily elif market.upper() == "CZCE": f = get_czce_daily elif market.upper() == "SHFE": f = get_shfe_daily elif market.upper() == "DCE": f = get_dce_daily elif market.upper() == "INE": f = get_ine_daily else: print("Invalid Market Symbol") return None start_date = ( cons.convert_date(start_date) if start_date is not None else datetime.date.today() ) end_date = ( cons.convert_date(end_date) if end_date is not None else cons.convert_date(cons.get_latest_data_date(datetime.datetime.now())) ) df_list = list() while start_date <= end_date: df = f(date=str(start_date).replace("-", "")) if df is not None: df_list.append(df) if index_bar: df_list.append(get_futures_index(df)) start_date += datetime.timedelta(days=1) if len(df_list) > 0: temp_df = pd.concat(df_list).reset_index(drop=True) temp_df = temp_df[~temp_df['symbol'].str.contains("efp")] return temp_df def get_futures_index(df: pd.DataFrame) -> pd.DataFrame: """ 指数日交易数据, 指数合成 :param df: 爬到的原始合约日线行情 :type df: pandas.DataFrame :return: 持仓量加权指数日线行情 :rtype: pandas.DataFrame """ index_dfs = [] for var in set(df["variety"]): df_cut = df[df["variety"] == var] df_cut = df_cut[df_cut["open_interest"] != 0] df_cut = df_cut[df_cut["close"] != np.nan] df_cut = df_cut[df_cut["volume"] != int(0)] if len(df_cut.index) > 0: index_df = pd.Series(index=df_cut.columns, dtype="object") index_df[["volume", "open_interest", "turnover"]] = df_cut[ ["volume", "open_interest", "turnover"] ].sum() if "efp" in df_cut.iloc[-1, 0]: df_cut = df_cut.iloc[:-1, :] df_cut.replace("", 0, inplace=True) # 20201026 部分数据开盘价空缺 index_df[["open", "high", "low", "close", "settle", "pre_settle"]] = np.dot( np.array( df_cut[["open", "high", "low", "close", "settle", "pre_settle"]] ).T, np.array((df_cut["open_interest"].astype(float))), ) / np.sum(df_cut["open_interest"].astype(float)) index_df[["date", "variety"]] = df_cut[["date", "variety"]].iloc[0, :] index_df["symbol"] = index_df["variety"] + "99" index_dfs.append(index_df) return pd.concat(index_dfs, axis=1).T if __name__ == "__main__": get_futures_daily_df = get_futures_daily(start_date='20200105', end_date='20200201', market="INE", index_bar=False) print(get_futures_daily_df) get_dce_daily_df = get_dce_daily(date="20210427") print(get_dce_daily_df) get_cffex_daily_df = get_cffex_daily(date="20101101") print(get_cffex_daily_df) get_ine_daily_df = get_ine_daily(date="20210426") print(get_ine_daily_df) get_czce_daily_df = get_czce_daily(date="20210416") print(get_czce_daily_df) get_shfe_daily_df = get_shfe_daily(date="20160104") print(get_shfe_daily_df)
mssdk/futures/futures_daily_bar.py
import datetime import json import re import warnings import zipfile from io import BytesIO, StringIO import numpy as np import pandas as pd import requests from mssdk.futures import cons from mssdk.futures.requests_fun import requests_link calendar = cons.get_calendar() def get_cffex_daily(date: str = "20100401") -> pd.DataFrame: """ 中国金融期货交易所日交易数据 http://www.cffex.com.cn/rtj/ :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 为空时为当天 :return: pandas.DataFrame 中国金融期货交易所日: symbol 合约代码 date 日期 open 开盘价 high 最高价 low 最低价 close 收盘价 volume 成交量 open_interest 持仓量 turnover 成交额 settle 结算价 pre_settle 前结算价 variety 合约类别 或 None(给定日期没有交易数据) """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None url = f"http://www.cffex.com.cn/sj/historysj/{date[:-2]}/zip/{date[:-2]}.zip" r = requests.get(url) try: with zipfile.ZipFile(BytesIO(r.content)) as file: with file.open(f"{date}_1.csv") as my_file: data = my_file.read().decode("gb2312") data_df = pd.read_csv(StringIO(data)) except: return None data_df = data_df[data_df["合约代码"] != "小计"] data_df = data_df[data_df["合约代码"] != "合计"] data_df = data_df[~data_df["合约代码"].str.contains("IO")] data_df.reset_index(inplace=True, drop=True) data_df["合约代码"] = data_df["合约代码"].str.strip() symbol_list = data_df["合约代码"].to_list() variety_list = [re.compile(r"[a-zA-Z_]+").findall(item)[0] for item in symbol_list] if data_df.shape[1] == 15: data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover", "open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_", "_"] else: data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover", "open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_"] data_df["date"] = date data_df["variety"] = variety_list data_df = data_df[ ["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle", "pre_settle", "variety"]] return data_df def get_ine_daily(date: str = "20200106") -> pd.DataFrame: """ 上海国际能源交易中心-日频率-量价数据 上海国际能源交易中心: 原油期货(上市时间: 20180326); 20号胶期货(上市时间: 20190812) trade_price: http://www.ine.cn/statements/daily/?paramid=kx trade_note: http://www.ine.cn/data/datanote.dat :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日 :type date: str or datetime.date :return: 上海国际能源交易中心-日频率-量价数据 :rtype: pandas.DataFrame or None """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn(f"{day.strftime('%Y%m%d')}非交易日") return None url = f"http://www.ine.cn/data/dailydata/kx/kx{day.strftime('%Y%m%d')}.dat" r = requests.get(url) result_df = pd.DataFrame() try: data_json = r.json() except: return None temp_df = pd.DataFrame(data_json["o_curinstrument"]).iloc[:-1, :] temp_df = temp_df[temp_df["DELIVERYMONTH"] != "小计"] temp_df = temp_df[~temp_df["PRODUCTNAME"].str.contains("总计")] try: result_df["symbol"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip() + temp_df["DELIVERYMONTH"] except: result_df["symbol"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0] + temp_df["DELIVERYMONTH"] result_df["date"] = day.strftime("%Y%m%d") result_df["open"] = temp_df["OPENPRICE"] result_df["high"] = temp_df["HIGHESTPRICE"] result_df["low"] = temp_df["LOWESTPRICE"] result_df["close"] = temp_df["CLOSEPRICE"] result_df["volume"] = temp_df["VOLUME"] result_df["open_interest"] = temp_df["OPENINTEREST"] result_df["turnover"] = 0 result_df["settle"] = temp_df["SETTLEMENTPRICE"] result_df["pre_settle"] = temp_df["PRESETTLEMENTPRICE"] try: result_df["variety"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip() except: result_df["variety"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0] result_df = result_df[result_df["symbol"] != "总计"] result_df = result_df[~result_df["symbol"].str.contains("efp")] return result_df def get_czce_daily(date: str = "20050525") -> pd.DataFrame: """ 郑州商品交易所-日频率-量价数据 :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日; 日期需要大于 20100824 :type date: str or datetime.date :return: 郑州商品交易所-日频率-量价数据 :rtype: pandas.DataFrame or None """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn(f"{day.strftime('%Y%m%d')}非交易日") return None if day > datetime.date(2010, 8, 24): if day > datetime.date(2015, 9, 19): u = cons.CZCE_DAILY_URL_3 url = u % (day.strftime("%Y"), day.strftime("%Y%m%d")) elif day < datetime.date(2015, 9, 19): u = cons.CZCE_DAILY_URL_2 url = u % (day.strftime("%Y"), day.strftime("%Y%m%d")) listed_columns = cons.CZCE_COLUMNS output_columns = cons.OUTPUT_COLUMNS try: r = requests.get(url) html = r.text except requests.exceptions.HTTPError as reason: if reason.response.status_code != 404: print( cons.CZCE_DAILY_URL_3 % (day.strftime("%Y"), day.strftime("%Y%m%d")), reason, ) return if html.find("您的访问出错了") >= 0 or html.find("无期权每日行情交易记录") >= 0: return html = [ i.replace(" ", "").split("|") for i in html.split("\n")[:-4] if i[0][0] != "小" ] if day > datetime.date(2015, 9, 19): if html[1][0] not in ["品种月份", "品种代码", "合约代码"]: return dict_data = list() day_const = int(day.strftime("%Y%m%d")) for row in html[2:]: m = cons.FUTURES_SYMBOL_PATTERN.match(row[0]) if not m: continue row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)} for i, field in enumerate(listed_columns): if row[i + 1] == "\r" or row[i + 1] == '': row_dict[field] = 0.0 elif field in [ "volume", "open_interest", "oi_chg", "exercise_volume", ]: row[i + 1] = row[i + 1].replace(",", "") row_dict[field] = int(row[i + 1]) else: row[i + 1] = row[i + 1].replace(",", "") row_dict[field] = float(row[i + 1]) dict_data.append(row_dict) return pd.DataFrame(dict_data)[output_columns] elif day < datetime.date(2015, 9, 19): dict_data = list() day_const = int(day.strftime("%Y%m%d")) for row in html[1:]: row = row[0].split(",") m = cons.FUTURES_SYMBOL_PATTERN.match(row[0]) if not m: continue row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)} for i, field in enumerate(listed_columns): if row[i + 1] == "\r": row_dict[field] = 0.0 elif field in [ "volume", "open_interest", "oi_chg", "exercise_volume", ]: row_dict[field] = int(float(row[i + 1])) else: row_dict[field] = float(row[i + 1]) dict_data.append(row_dict) return pd.DataFrame(dict_data)[output_columns] if day <= datetime.date(2010, 8, 24): u = cons.CZCE_DAILY_URL_1 url = u % day.strftime("%Y%m%d") listed_columns = cons.CZCE_COLUMNS_2 output_columns = cons.OUTPUT_COLUMNS df = pd.read_html(url)[1].dropna(how="any") dict_data = list() day_const = int(day.strftime("%Y%m%d")) for row in df.to_dict(orient="records"): row = list(row.values()) m = cons.FUTURES_SYMBOL_PATTERN.match(row[0]) if not m: continue row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)} for i, field in enumerate(listed_columns): if row[i + 1] == "\r": row_dict[field] = 0.0 elif field in ["volume", "open_interest", "oi_chg", "exercise_volume"]: row_dict[field] = int(row[i + 1]) else: row_dict[field] = float(row[i + 1]) dict_data.append(row_dict) return pd.DataFrame(dict_data)[output_columns] def get_shfe_v_wap(date: str = "20131017") -> pd.DataFrame: """ 获取上期所日成交均价数据 Parameters ------ date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天 Return ------- DataFrame 郑商所日交易数据(DataFrame): symbol 合约代码 date 日期 time_range v_wap时段,分09:00-10:15和09:00-15:00两类 v_wap 加权平均成交均价 或 None(给定日期没有数据) """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None try: json_data = json.loads( requests_link( cons.SHFE_V_WAP_URL % (day.strftime("%Y%m%d")), headers=cons.headers, encoding="utf-8", ).text ) except: return None if len(json_data["o_currefprice"]) == 0: return None try: df = pd.DataFrame(json_data["o_currefprice"]) df["INSTRUMENTID"] = df["INSTRUMENTID"].str.strip() df[":B1"].astype("int16") return df.rename(columns=cons.SHFE_V_WAP_COLUMNS)[ list(cons.SHFE_V_WAP_COLUMNS.values()) ] except: return None def get_shfe_daily(date: str = "20160104") -> pd.DataFrame: """ 上海期货交易所-日频率-量价数据 http://www.shfe.com.cn/statements/dataview.html?paramid=kx :param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象, 默认为当前交易日 :type date: str or datetime.date :return: 上海期货交易所-日频率-量价数据 :rtype: pandas.DataFrame or None 上期所日交易数据(DataFrame): symbol 合约代码 date 日期 open 开盘价 high 最高价 low 最低价 close 收盘价 volume 成交量 open_interest 持仓量 turnover 成交额 settle 结算价 pre_settle 前结算价 variety 合约类别 或 None(给定交易日没有交易数据) """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None try: json_data = json.loads( requests_link( cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")), headers=cons.shfe_headers, ).text ) except requests.HTTPError as reason: if reason.response != 404: print(cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")), reason) return if len(json_data["o_curinstrument"]) == 0: return df = pd.DataFrame( [ row for row in json_data["o_curinstrument"] if row["DELIVERYMONTH"] not in ["小计", "合计"] and row["DELIVERYMONTH"] != "" ] ) try: df["variety"] = df["PRODUCTGROUPID"].str.upper().str.strip() except KeyError as e: df["variety"] = df["PRODUCTID"].str.upper().str.split('_', expand=True).iloc[:, 0].str.strip() df["symbol"] = df["variety"] + df["DELIVERYMONTH"] df["date"] = day.strftime("%Y%m%d") v_wap_df = get_shfe_v_wap(day) if v_wap_df is not None: df = pd.merge( df, v_wap_df[v_wap_df.time_range == "9:00-15:00"], on=["date", "symbol"], how="left", ) df["turnover"] = df.v_wap * df.VOLUME else: df["VOLUME"] = df["VOLUME"].apply(lambda x: 0 if x == "" else x) df["turnover"] = df["VOLUME"] * df["SETTLEMENTPRICE"] df.rename(columns=cons.SHFE_COLUMNS, inplace=True) df = df[~df["symbol"].str.contains("efp")] return df[cons.OUTPUT_COLUMNS] def get_dce_daily(date: str = "20030115") -> pd.DataFrame: """ 大连商品交易所日交易数据 http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rxq/index.html :param date: 交易日, e.g., 20200416 :type date: str :return: 具体交易日的个品种行情数据 :rtype: pandas.DataFrame """ day = cons.convert_date(date) if date is not None else datetime.date.today() if day.strftime("%Y%m%d") not in calendar: warnings.warn("%s非交易日" % day.strftime("%Y%m%d")) return None url = "http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html" headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8", "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Length": "86", "Content-Type": "application/x-www-form-urlencoded", "Host": "www.dce.com.cn", "Origin": "http://www.dce.com.cn", "Pragma": "no-cache", "Referer": "http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36", } params = { "dayQuotes.variety": "all", "dayQuotes.trade_type": "0", "year": date[:4], "month": str(int(date[4:6]) - 1), "day": date[6:], "exportFlag": "excel", } r = requests.post(url, data=params, headers=headers) data_df = pd.read_excel(BytesIO(r.content)) data_df = data_df[~data_df["商品名称"].str.contains("小计")] data_df = data_df[~data_df["商品名称"].str.contains("总计")] data_df["variety"] = data_df["商品名称"].map(lambda x: cons.DCE_MAP[x]) data_df["symbol"] = data_df["variety"] + data_df["交割月份"].astype(int).astype(str) del data_df["商品名称"] del data_df["交割月份"] data_df.columns = ["open", "high", "low", "close", "pre_settle", "settle", "_", "_", "volume", "open_interest", "_", "turnover", "variety", "symbol"] data_df["date"] = date data_df = data_df[ ["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle", "pre_settle", "variety"]] data_df = data_df.applymap(lambda x: x.replace(",", "")) data_df = data_df.astype({"open": "float", "high": "float", "low": "float", "close": "float", "volume": "float", "open_interest": "float", "turnover": "float", "settle": "float", "pre_settle": "float", }) return data_df def get_futures_daily(start_date: str = "20210421", end_date: str = "20210426", market: str = "INE", index_bar: bool = False) -> pd.DataFrame: """ 交易所日交易数据 :param start_date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天 :type start_date: str :param end_date: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天 :type end_date: str :param market: 'CFFEX' 中金所, 'CZCE' 郑商所, 'SHFE' 上期所, 'DCE' 大商所 之一, 'INE' 上海国际能源交易中心。默认为中金所 :type market: str :param index_bar: 是否合成指数K线, 默认为 False 否则影响 roll_yield 的计算 :type index_bar: bool :return: 交易所日交易数据 :rtype: pandas.DataFrame """ if market.upper() == "CFFEX": f = get_cffex_daily elif market.upper() == "CZCE": f = get_czce_daily elif market.upper() == "SHFE": f = get_shfe_daily elif market.upper() == "DCE": f = get_dce_daily elif market.upper() == "INE": f = get_ine_daily else: print("Invalid Market Symbol") return None start_date = ( cons.convert_date(start_date) if start_date is not None else datetime.date.today() ) end_date = ( cons.convert_date(end_date) if end_date is not None else cons.convert_date(cons.get_latest_data_date(datetime.datetime.now())) ) df_list = list() while start_date <= end_date: df = f(date=str(start_date).replace("-", "")) if df is not None: df_list.append(df) if index_bar: df_list.append(get_futures_index(df)) start_date += datetime.timedelta(days=1) if len(df_list) > 0: temp_df = pd.concat(df_list).reset_index(drop=True) temp_df = temp_df[~temp_df['symbol'].str.contains("efp")] return temp_df def get_futures_index(df: pd.DataFrame) -> pd.DataFrame: """ 指数日交易数据, 指数合成 :param df: 爬到的原始合约日线行情 :type df: pandas.DataFrame :return: 持仓量加权指数日线行情 :rtype: pandas.DataFrame """ index_dfs = [] for var in set(df["variety"]): df_cut = df[df["variety"] == var] df_cut = df_cut[df_cut["open_interest"] != 0] df_cut = df_cut[df_cut["close"] != np.nan] df_cut = df_cut[df_cut["volume"] != int(0)] if len(df_cut.index) > 0: index_df = pd.Series(index=df_cut.columns, dtype="object") index_df[["volume", "open_interest", "turnover"]] = df_cut[ ["volume", "open_interest", "turnover"] ].sum() if "efp" in df_cut.iloc[-1, 0]: df_cut = df_cut.iloc[:-1, :] df_cut.replace("", 0, inplace=True) # 20201026 部分数据开盘价空缺 index_df[["open", "high", "low", "close", "settle", "pre_settle"]] = np.dot( np.array( df_cut[["open", "high", "low", "close", "settle", "pre_settle"]] ).T, np.array((df_cut["open_interest"].astype(float))), ) / np.sum(df_cut["open_interest"].astype(float)) index_df[["date", "variety"]] = df_cut[["date", "variety"]].iloc[0, :] index_df["symbol"] = index_df["variety"] + "99" index_dfs.append(index_df) return pd.concat(index_dfs, axis=1).T if __name__ == "__main__": get_futures_daily_df = get_futures_daily(start_date='20200105', end_date='20200201', market="INE", index_bar=False) print(get_futures_daily_df) get_dce_daily_df = get_dce_daily(date="20210427") print(get_dce_daily_df) get_cffex_daily_df = get_cffex_daily(date="20101101") print(get_cffex_daily_df) get_ine_daily_df = get_ine_daily(date="20210426") print(get_ine_daily_df) get_czce_daily_df = get_czce_daily(date="20210416") print(get_czce_daily_df) get_shfe_daily_df = get_shfe_daily(date="20160104") print(get_shfe_daily_df)
0.196865
0.171304
import csv import itertools import os import multiprocessing import sys import numpy.random from typing import Tuple from matplotlib import pyplot as plt from tqdm import tqdm from pandas import DataFrame from cvrp.aco_cvrp_solver import AntColonyCVRPSolver from cvrp.augerat_loader import load_augerat_example from cvrp.cvrp_solver import CVRPDefinition, CVRPSolver from cvrp.greedy_cvrp_solver import GreedyCVRPSolver from cvrp.util import route_len class TestResult: def __init__(self, problem: CVRPDefinition, solver: CVRPSolver, rlen: float, rlen_std_dev = 0.0): self.customers_count = len(problem.graph.nodes) - 1 self.truck_capacity = problem.truck_capacity self.truck_route_limit = problem.truck_route_limit self.solver_desc = solver.get_info() self.rlen = rlen self.rlen_std_dev = rlen_std_dev class AvgTestResult: def __init__( self, customers_count: int, truck_capacity: float, truck_route_limit: int, solver_desc: str, rlen_avg: float, rlen_std_dev: float ): self.customers_count = customers_count self.truck_capacity = truck_capacity self.truck_route_limit = truck_route_limit self.solver_desc = solver_desc self.rlen_avg = rlen_avg self.rlen_std_dev = rlen_std_dev class PlotData: def __init__(self, filename: str): self.filename = filename self.labels = [] self.scores = [] cvrp_instances = [ 'A-n33-k5.vrp', # 'B-n41-k6.vrp', # 'B-n50-k8.vrp', 'A-n60-k9.vrp', # 'A-n69-k9.vrp', # 'A-n80-k10.vrp', ] cvrp_solvers = [ AntColonyCVRPSolver(iterations = 1000), AntColonyCVRPSolver(iterations = 3000), AntColonyCVRPSolver(iterations = 2500, permute_routes = True), AntColonyCVRPSolver(iterations = 2000, candidate_fraction = 0.25), AntColonyCVRPSolver(iterations = 1500, ants_per_customer = 2) ] SAMPLE_COUNT = 20 def run_test(test_case: Tuple[CVRPDefinition, AntColonyCVRPSolver, numpy.random.Generator]) -> TestResult: problem = test_case[0] solver = test_case[1] solver.set_rng(rng = test_case[2]) solution = solver.solve_cvrp(problem) return TestResult(problem, solver, route_len(solution)) if __name__ == '__main__': problems = [load_augerat_example(instance) for instance in cvrp_instances] rngs = [numpy.random.default_rng() for _ in range(SAMPLE_COUNT)] test_cases = itertools.product(problems, cvrp_solvers, rngs) cases_count = len(problems) * len(cvrp_solvers) * SAMPLE_COUNT process_count = int(sys.argv[1]) results = [] with multiprocessing.Pool(process_count) as process_pool: for result in tqdm(process_pool.imap_unordered(run_test, test_cases), total = cases_count): results.append(result) results_df = DataFrame(data = { 'customers_count': [result.customers_count for result in results], 'truck_capacity': [result.truck_capacity for result in results], 'truck_route_limit': [result.truck_route_limit for result in results], 'solver_desc': [result.solver_desc for result in results], 'rlen': [result.rlen for result in results], }) df_avg = results_df \ .groupby(['customers_count', 'solver_desc', 'truck_capacity', 'truck_route_limit'])['rlen'] \ .agg({ 'mean', 'std' }).reset_index() results_avg = [AvgTestResult( record['customers_count'], record['truck_capacity'], record['truck_route_limit'], record['solver_desc'], record['mean'], record['std'] ) for _, record in df_avg.iterrows()] greedy_solver = GreedyCVRPSolver() for cvrp in problems: route = greedy_solver.solve_cvrp(cvrp) result = TestResult(cvrp, greedy_solver, route_len(route)) avg_result = AvgTestResult( result.customers_count, result.truck_capacity, result.truck_route_limit, result.solver_desc, result.rlen, 0.0 ) results_avg.append(avg_result) if not os.path.exists('out'): os.mkdir('out') with open('out/results.csv', mode = 'wt') as file: csv_writer = csv.writer(file) csv_writer.writerow( ['Customers count', 'solver', 'truck capacity', 'truck route limit', 'avg route len', 'std deviation'] ) for result in results_avg: row = [ result.customers_count, result.solver_desc, result.truck_capacity, result.truck_route_limit, result.rlen_avg, result.rlen_std_dev ] csv_writer.writerow(row) plot_data_map = { } for result in results_avg: if result.customers_count not in plot_data_map: plot_data_map[result.customers_count] = PlotData(filename = f'plot_n{result.customers_count}') plot_data_map[result.customers_count].labels.append(result.solver_desc) plot_data_map[result.customers_count].scores.append(result.rlen_avg) for plot_data in plot_data_map.values(): plt.clf() plt.bar(x = range(len(plot_data.labels)), height = plot_data.scores, tick_label = plot_data.labels) plt.savefig(f'out/{plot_data.filename}.png')
test.py
import csv import itertools import os import multiprocessing import sys import numpy.random from typing import Tuple from matplotlib import pyplot as plt from tqdm import tqdm from pandas import DataFrame from cvrp.aco_cvrp_solver import AntColonyCVRPSolver from cvrp.augerat_loader import load_augerat_example from cvrp.cvrp_solver import CVRPDefinition, CVRPSolver from cvrp.greedy_cvrp_solver import GreedyCVRPSolver from cvrp.util import route_len class TestResult: def __init__(self, problem: CVRPDefinition, solver: CVRPSolver, rlen: float, rlen_std_dev = 0.0): self.customers_count = len(problem.graph.nodes) - 1 self.truck_capacity = problem.truck_capacity self.truck_route_limit = problem.truck_route_limit self.solver_desc = solver.get_info() self.rlen = rlen self.rlen_std_dev = rlen_std_dev class AvgTestResult: def __init__( self, customers_count: int, truck_capacity: float, truck_route_limit: int, solver_desc: str, rlen_avg: float, rlen_std_dev: float ): self.customers_count = customers_count self.truck_capacity = truck_capacity self.truck_route_limit = truck_route_limit self.solver_desc = solver_desc self.rlen_avg = rlen_avg self.rlen_std_dev = rlen_std_dev class PlotData: def __init__(self, filename: str): self.filename = filename self.labels = [] self.scores = [] cvrp_instances = [ 'A-n33-k5.vrp', # 'B-n41-k6.vrp', # 'B-n50-k8.vrp', 'A-n60-k9.vrp', # 'A-n69-k9.vrp', # 'A-n80-k10.vrp', ] cvrp_solvers = [ AntColonyCVRPSolver(iterations = 1000), AntColonyCVRPSolver(iterations = 3000), AntColonyCVRPSolver(iterations = 2500, permute_routes = True), AntColonyCVRPSolver(iterations = 2000, candidate_fraction = 0.25), AntColonyCVRPSolver(iterations = 1500, ants_per_customer = 2) ] SAMPLE_COUNT = 20 def run_test(test_case: Tuple[CVRPDefinition, AntColonyCVRPSolver, numpy.random.Generator]) -> TestResult: problem = test_case[0] solver = test_case[1] solver.set_rng(rng = test_case[2]) solution = solver.solve_cvrp(problem) return TestResult(problem, solver, route_len(solution)) if __name__ == '__main__': problems = [load_augerat_example(instance) for instance in cvrp_instances] rngs = [numpy.random.default_rng() for _ in range(SAMPLE_COUNT)] test_cases = itertools.product(problems, cvrp_solvers, rngs) cases_count = len(problems) * len(cvrp_solvers) * SAMPLE_COUNT process_count = int(sys.argv[1]) results = [] with multiprocessing.Pool(process_count) as process_pool: for result in tqdm(process_pool.imap_unordered(run_test, test_cases), total = cases_count): results.append(result) results_df = DataFrame(data = { 'customers_count': [result.customers_count for result in results], 'truck_capacity': [result.truck_capacity for result in results], 'truck_route_limit': [result.truck_route_limit for result in results], 'solver_desc': [result.solver_desc for result in results], 'rlen': [result.rlen for result in results], }) df_avg = results_df \ .groupby(['customers_count', 'solver_desc', 'truck_capacity', 'truck_route_limit'])['rlen'] \ .agg({ 'mean', 'std' }).reset_index() results_avg = [AvgTestResult( record['customers_count'], record['truck_capacity'], record['truck_route_limit'], record['solver_desc'], record['mean'], record['std'] ) for _, record in df_avg.iterrows()] greedy_solver = GreedyCVRPSolver() for cvrp in problems: route = greedy_solver.solve_cvrp(cvrp) result = TestResult(cvrp, greedy_solver, route_len(route)) avg_result = AvgTestResult( result.customers_count, result.truck_capacity, result.truck_route_limit, result.solver_desc, result.rlen, 0.0 ) results_avg.append(avg_result) if not os.path.exists('out'): os.mkdir('out') with open('out/results.csv', mode = 'wt') as file: csv_writer = csv.writer(file) csv_writer.writerow( ['Customers count', 'solver', 'truck capacity', 'truck route limit', 'avg route len', 'std deviation'] ) for result in results_avg: row = [ result.customers_count, result.solver_desc, result.truck_capacity, result.truck_route_limit, result.rlen_avg, result.rlen_std_dev ] csv_writer.writerow(row) plot_data_map = { } for result in results_avg: if result.customers_count not in plot_data_map: plot_data_map[result.customers_count] = PlotData(filename = f'plot_n{result.customers_count}') plot_data_map[result.customers_count].labels.append(result.solver_desc) plot_data_map[result.customers_count].scores.append(result.rlen_avg) for plot_data in plot_data_map.values(): plt.clf() plt.bar(x = range(len(plot_data.labels)), height = plot_data.scores, tick_label = plot_data.labels) plt.savefig(f'out/{plot_data.filename}.png')
0.493409
0.239911
from cmstk.filetypes import TextFile from typing import Optional, Tuple class KpointsFile(TextFile): """File wrapper for a VASP KPOINTS file. Args: filepath: Filepath to a KPOINTS file. comment: Top line file descriptor. n_kpoints: Number of K-Points. mesh_shift: Shift of the K-Point mesh in 3D. mesh_size: Size of the K-Point mesh in 3D. mesh_type: Mesh generation scheme. Attributes: filepath: Filepath to a KPOINTS file. comment: Top line file descriptor. n_kpoints: Number of K-Points. mesh_shift: Shift of the K-Point mesh in 3D. mesh_size: Size of the K-Point mesh in 3D. mesh_type: Mesh generation scheme. """ def __init__(self, filepath: Optional[str] = None, comment: Optional[str] = None, n_kpoints: Optional[int] = None, mesh_shift: Optional[Tuple[int, int, int]] = None, mesh_size: Optional[Tuple[int, int, int]] = None, mesh_type: Optional[str] = None) -> None: if filepath is None: filepath = "KPOINTS" if comment is None: comment = "Automatically generated by cmstk." self._comment = comment if n_kpoints is None: n_kpoints = 0 self._n_kpoints = n_kpoints if mesh_shift is None: mesh_shift = (0, 0, 0) self._mesh_shift = mesh_shift if mesh_size is None: mesh_size = (5, 5, 5) self._mesh_size = mesh_size if mesh_type is None: mesh_type = "Monkhorst-Pack" self._mesh_type = mesh_type super().__init__(filepath) @property def comment(self) -> str: if self._comment is None: self._comment = self.lines[0] return self._comment @comment.setter def comment(self, value: str) -> None: self._comment = value @property def n_kpoints(self) -> int: if self._n_kpoints is None: self._n_kpoints = int(self.lines[1]) return self._n_kpoints @n_kpoints.setter def n_kpoints(self, value: int) -> None: self._n_kpoints = value @property def mesh_shift(self) -> Tuple[int, int, int]: if self._mesh_shift is None: shift = self.lines[4].split() self._mesh_shift = tuple(map(int, [shift[0], shift[1], shift[2]])) return self._mesh_shift @mesh_shift.setter def mesh_shift(self, value: Tuple[int, int, int]) -> None: self._mesh_shift = value @property def mesh_size(self) -> Tuple[int, int, int]: if self._mesh_size is None: size = self.lines[3].split() self._mesh_size = tuple(map(int, [size[0], size[1], size[2]])) return self._mesh_size @mesh_size.setter def mesh_size(self, value: Tuple[int, int, int]) -> None: self._mesh_size = value @property def mesh_type(self) -> str: if self._mesh_type is None: self._mesh_type = self.lines[2] return self._mesh_type @mesh_type.setter def mesh_type(self, value: str) -> None: self._mesh_type = value def write(self, path: Optional[str] = None) -> None: """Writes a KPOINTS file. Args: path: The filepath to write to. """ if path is None: path = self.filepath with open(path, "w") as f: for s in [self.comment, self.n_kpoints, self.mesh_type]: f.write("{}\n".format(s)) for s in [self.mesh_size, self.mesh_shift]: f.write("{} {} {}\n".format(*s))
cmstk/vasp/kpoints.py
from cmstk.filetypes import TextFile from typing import Optional, Tuple class KpointsFile(TextFile): """File wrapper for a VASP KPOINTS file. Args: filepath: Filepath to a KPOINTS file. comment: Top line file descriptor. n_kpoints: Number of K-Points. mesh_shift: Shift of the K-Point mesh in 3D. mesh_size: Size of the K-Point mesh in 3D. mesh_type: Mesh generation scheme. Attributes: filepath: Filepath to a KPOINTS file. comment: Top line file descriptor. n_kpoints: Number of K-Points. mesh_shift: Shift of the K-Point mesh in 3D. mesh_size: Size of the K-Point mesh in 3D. mesh_type: Mesh generation scheme. """ def __init__(self, filepath: Optional[str] = None, comment: Optional[str] = None, n_kpoints: Optional[int] = None, mesh_shift: Optional[Tuple[int, int, int]] = None, mesh_size: Optional[Tuple[int, int, int]] = None, mesh_type: Optional[str] = None) -> None: if filepath is None: filepath = "KPOINTS" if comment is None: comment = "Automatically generated by cmstk." self._comment = comment if n_kpoints is None: n_kpoints = 0 self._n_kpoints = n_kpoints if mesh_shift is None: mesh_shift = (0, 0, 0) self._mesh_shift = mesh_shift if mesh_size is None: mesh_size = (5, 5, 5) self._mesh_size = mesh_size if mesh_type is None: mesh_type = "Monkhorst-Pack" self._mesh_type = mesh_type super().__init__(filepath) @property def comment(self) -> str: if self._comment is None: self._comment = self.lines[0] return self._comment @comment.setter def comment(self, value: str) -> None: self._comment = value @property def n_kpoints(self) -> int: if self._n_kpoints is None: self._n_kpoints = int(self.lines[1]) return self._n_kpoints @n_kpoints.setter def n_kpoints(self, value: int) -> None: self._n_kpoints = value @property def mesh_shift(self) -> Tuple[int, int, int]: if self._mesh_shift is None: shift = self.lines[4].split() self._mesh_shift = tuple(map(int, [shift[0], shift[1], shift[2]])) return self._mesh_shift @mesh_shift.setter def mesh_shift(self, value: Tuple[int, int, int]) -> None: self._mesh_shift = value @property def mesh_size(self) -> Tuple[int, int, int]: if self._mesh_size is None: size = self.lines[3].split() self._mesh_size = tuple(map(int, [size[0], size[1], size[2]])) return self._mesh_size @mesh_size.setter def mesh_size(self, value: Tuple[int, int, int]) -> None: self._mesh_size = value @property def mesh_type(self) -> str: if self._mesh_type is None: self._mesh_type = self.lines[2] return self._mesh_type @mesh_type.setter def mesh_type(self, value: str) -> None: self._mesh_type = value def write(self, path: Optional[str] = None) -> None: """Writes a KPOINTS file. Args: path: The filepath to write to. """ if path is None: path = self.filepath with open(path, "w") as f: for s in [self.comment, self.n_kpoints, self.mesh_type]: f.write("{}\n".format(s)) for s in [self.mesh_size, self.mesh_shift]: f.write("{} {} {}\n".format(*s))
0.9017
0.287677
from datetime import datetime from itertools import chain, zip_longest from pathlib import Path from typing import Iterable, List, Tuple import jinja2 from pytestdocgen.object import TestCase, TestDir here: Path = Path(__file__).parent # Jinja env with minor tweaks j_env = jinja2.Environment( loader=jinja2.FileSystemLoader(str(here / "templates")), trim_blocks=True, lstrip_blocks=True, ) # A template for test case tc_template = j_env.get_template("test_case.md") utc_now: str = datetime.utcnow().strftime("UTC %Y-%m-%d %H:%M:%S") def next_section_to_template( former_section: Iterable[str], current_section: Iterable[str] ) -> Iterable[Tuple[int, str]]: """ Find a section (structured directory scheme) to be rendered Args: former_section: A last section we rendered current_section: A current section we are working on Yields: Index(depth, 0-based) and the name of section """ for fs_idx_value, cs_idx_value in zip_longest( enumerate(former_section), enumerate(current_section) ): if not fs_idx_value or ( cs_idx_value and fs_idx_value[1] != cs_idx_value[1] ): yield cs_idx_value def general_header(td: TestDir) -> str: """Produce general header""" title = "Test case documentation\n" title += len(title) * "=" + "\n" return ( f'{title}<div style="text-align: right">' f"<p>version: {utc_now}</p>" f"</div>\n" ) def general_footer(td: TestDir) -> str: """Produce general footer""" return f"*documentation created by PyTestDocGen@{utc_now}*" def worth_to_put_in_snippet(code_line: str) -> bool: """Check if a line of source code is worth to be in a code snippet""" if "async " in code_line or "def " in code_line: return True if code_line.strip().startswith("assert"): return True return False def tc_to_markdown(tc: TestCase): """Render test case to markdown""" file_name = "/".join( chain([tc.file.root_dir.name], tc.rel_dir, [tc.file.file_name]) ) tc_position = ( f"{tc.pos[0][0]}:{tc.pos[0][1]} - {tc.pos[1][0]}:{tc.pos[1][1]}" ) code_snippet = "\n".join( [x for x in tc.code.splitlines() if worth_to_put_in_snippet(x)] ) return tc_template.render( name=tc.name, file=file_name, pos=tc_position, snippet=code_snippet, summary=tc.parsed_doc.summary, description=tc.parsed_doc.description, sections=tc.parsed_doc.sections, decorators=tc.decorators, ) def td_to_markdown( td: TestDir, custom_header: str = None, custom_footer: str = None ) -> str: """ Render TestDir to the Markdown string Args: td: Instantiated TestDir object custom_header: Custom header to put in the output custom_footer: Custom footer to put in the output Returns: Markdown string """ result: List[str] = [custom_header] if custom_header else [ general_header(td) ] result.append("\n") former_section: Iterable[str] = [] former_page: str = "" for section in td.test_cases: for section_to_render in next_section_to_template( former_section, section ): if section_to_render[0] == 0: result.append("\n***\n") section_name = section_to_render[1].replace("_", " ") section_name = section_name[0].upper() + section_name[1:] section_str = "#" * (section_to_render[0] + 1) + " " section_str += section_name + "\n\n" result.append(section_str) former_section = section for tc in td.test_cases[section]: assert isinstance(tc, TestCase) if former_page != tc.file.page_name: result.append( "#" * 4 + " Test Page: " + tc.file.page_name.replace("_", " ") + "\n" ) former_page = tc.file.page_name result.append(tc_to_markdown(tc)) if custom_footer: result.append(custom_footer) else: result.append(general_footer(td)) result.append("\n") return "".join(result)
pytestdocgen/gendoc.py
from datetime import datetime from itertools import chain, zip_longest from pathlib import Path from typing import Iterable, List, Tuple import jinja2 from pytestdocgen.object import TestCase, TestDir here: Path = Path(__file__).parent # Jinja env with minor tweaks j_env = jinja2.Environment( loader=jinja2.FileSystemLoader(str(here / "templates")), trim_blocks=True, lstrip_blocks=True, ) # A template for test case tc_template = j_env.get_template("test_case.md") utc_now: str = datetime.utcnow().strftime("UTC %Y-%m-%d %H:%M:%S") def next_section_to_template( former_section: Iterable[str], current_section: Iterable[str] ) -> Iterable[Tuple[int, str]]: """ Find a section (structured directory scheme) to be rendered Args: former_section: A last section we rendered current_section: A current section we are working on Yields: Index(depth, 0-based) and the name of section """ for fs_idx_value, cs_idx_value in zip_longest( enumerate(former_section), enumerate(current_section) ): if not fs_idx_value or ( cs_idx_value and fs_idx_value[1] != cs_idx_value[1] ): yield cs_idx_value def general_header(td: TestDir) -> str: """Produce general header""" title = "Test case documentation\n" title += len(title) * "=" + "\n" return ( f'{title}<div style="text-align: right">' f"<p>version: {utc_now}</p>" f"</div>\n" ) def general_footer(td: TestDir) -> str: """Produce general footer""" return f"*documentation created by PyTestDocGen@{utc_now}*" def worth_to_put_in_snippet(code_line: str) -> bool: """Check if a line of source code is worth to be in a code snippet""" if "async " in code_line or "def " in code_line: return True if code_line.strip().startswith("assert"): return True return False def tc_to_markdown(tc: TestCase): """Render test case to markdown""" file_name = "/".join( chain([tc.file.root_dir.name], tc.rel_dir, [tc.file.file_name]) ) tc_position = ( f"{tc.pos[0][0]}:{tc.pos[0][1]} - {tc.pos[1][0]}:{tc.pos[1][1]}" ) code_snippet = "\n".join( [x for x in tc.code.splitlines() if worth_to_put_in_snippet(x)] ) return tc_template.render( name=tc.name, file=file_name, pos=tc_position, snippet=code_snippet, summary=tc.parsed_doc.summary, description=tc.parsed_doc.description, sections=tc.parsed_doc.sections, decorators=tc.decorators, ) def td_to_markdown( td: TestDir, custom_header: str = None, custom_footer: str = None ) -> str: """ Render TestDir to the Markdown string Args: td: Instantiated TestDir object custom_header: Custom header to put in the output custom_footer: Custom footer to put in the output Returns: Markdown string """ result: List[str] = [custom_header] if custom_header else [ general_header(td) ] result.append("\n") former_section: Iterable[str] = [] former_page: str = "" for section in td.test_cases: for section_to_render in next_section_to_template( former_section, section ): if section_to_render[0] == 0: result.append("\n***\n") section_name = section_to_render[1].replace("_", " ") section_name = section_name[0].upper() + section_name[1:] section_str = "#" * (section_to_render[0] + 1) + " " section_str += section_name + "\n\n" result.append(section_str) former_section = section for tc in td.test_cases[section]: assert isinstance(tc, TestCase) if former_page != tc.file.page_name: result.append( "#" * 4 + " Test Page: " + tc.file.page_name.replace("_", " ") + "\n" ) former_page = tc.file.page_name result.append(tc_to_markdown(tc)) if custom_footer: result.append(custom_footer) else: result.append(general_footer(td)) result.append("\n") return "".join(result)
0.756268
0.328853
from __future__ import print_function import sys import re import os import six input_file = sys.argv[1] output_dir = sys.argv[2] sys.argv[:] = [] if not os.path.exists(output_dir): os.makedirs(output_dir) import ROOT ROOT.gROOT.SetBatch(True) ROOT.gROOT.SetStyle("Plain") ROOT.gStyle.SetPalette(1) def get_by_type(directory, type): for key in directory.GetListOfKeys(): object = key.ReadObj() if isinstance(object, type): yield object def gini_index(signal, background): signal_integral = signal.GetIntegral() background_integral = background.GetIntegral() total = signal.Integral() + background.Integral() linear = ROOT.TGraph(signal.GetNbinsX()+1) signal_fraction = ROOT.TGraph(signal.GetNbinsX()+1) for ibin in range(0, signal.GetNbinsX()+1): total_fraction_of_sample = ( signal_integral[ibin] + background_integral[ibin])/2.0 linear.SetPoint( ibin, total_fraction_of_sample, total_fraction_of_sample) total_fraction_of_signal = signal_integral[ibin] signal_fraction.SetPoint( ibin, total_fraction_of_sample, total_fraction_of_signal) return 0.5-signal_fraction.Integral() colors = { 'Signal' : ROOT.EColor.kRed, 'Background' : ROOT.EColor.kBlue, } if __name__ == "__main__": # Exit gracefully if not os.path.exists(input_file): print("WARNING: no training control plot .root file found!") sys.exit(0) file = ROOT.TFile(input_file) correlation_canvas = ROOT.TCanvas("corr", "corr", 2000, 1000) correlation_canvas.Divide(2) signal_correlation = file.Get("CorrelationMatrixS") background_correlation = file.Get("CorrelationMatrixB") background_correlation.SetMarkerColor(ROOT.EColor.kBlack) for index, plot in enumerate([signal_correlation, background_correlation]): correlation_canvas.cd(index+1) plot.SetMarkerColor(ROOT.EColor.kBlack) plot.Draw("col") plot.Draw("text, same") plot.GetXaxis().SetLabelSize(0.03) plot.GetYaxis().SetLabelSize(0.03) ROOT.gPad.SetMargin(0.2, 0.1, 0.2, 0.1) correlation_canvas.SaveAs(os.path.join( output_dir, "correlations.png")) method_canvas = ROOT.TCanvas("method", "method", 800, 800) method_canvas.cd() # Find the MVA result directory for method_dir in [dir for dir in get_by_type(file, ROOT.TDirectory) if 'Method_' in dir.GetName()]: method_canvas.SetLogy(False) # Strip prefix method_type = method_dir.GetName().replace('Method_', '') print(method_type) result_dir = method_dir.Get(method_type) signal_mva_out = result_dir.Get("MVA_%s_S" % method_type) background_mva_out = result_dir.Get("MVA_%s_B" % method_type) signal_mva_out.SetLineColor(colors['Signal']) background_mva_out.SetLineColor(colors['Background']) stack = ROOT.THStack("stack", "MVA Output") stack.Add(signal_mva_out, "HIST") stack.Add(background_mva_out, "HIST") stack.Draw("nostack") method_canvas.SaveAs(os.path.join( output_dir, "%s_mva_output.png" % method_type)) perf_curve = result_dir.Get("MVA_%s_effBvsS" % method_type) perf_curve.Draw() perf_curve.SetMinimum(1e-4) method_canvas.SetLogy(True) method_canvas.SaveAs(os.path.join(output_dir, "%s_performance.png" % method_type)) input_var_dir = file.Get("InputVariables_NoTransform") if not input_var_dir: input_var_dir = file.Get("InputVariables_Id") matcher = re.compile("(?P<name>[^_]*)__(?P<type>[A-Za-z0-9]*)_Id") input_distributions = {} for histo in get_by_type(input_var_dir, ROOT.TH1F): rawname = histo.GetName() match = matcher.match(rawname) name = match.group('name') type = match.group('type') histo.Scale(1.0/histo.Integral()) histo.SetLineColor(colors[type]) histo_info = input_distributions.setdefault(name, {}) histo_info[type] = histo variable_canvas = ROOT.TCanvas("var", "var", 1000, 1000) for variable, histograms in six.iteritems(input_distributions): maximum = max(histograms[type].GetMaximum() for type in ['Signal', 'Background']) for type in ['Signal', 'Background']: histograms[type].SetLineWidth(2) # Tgraph integral not in ROOT 5.27? gini = gini_index(histograms['Signal'], histograms['Background']) histograms['Signal'].SetMaximum(1.2*maximum) histograms['Signal'].SetTitle(variable + " gini: %0.2f" % gini) histograms['Signal'].Draw() histograms['Background'].Draw('same') variable_canvas.SaveAs(os.path.join( output_dir, variable + ".png"))
RecoTauTag/TauTagTools/test/training/training_control_plots.py
from __future__ import print_function import sys import re import os import six input_file = sys.argv[1] output_dir = sys.argv[2] sys.argv[:] = [] if not os.path.exists(output_dir): os.makedirs(output_dir) import ROOT ROOT.gROOT.SetBatch(True) ROOT.gROOT.SetStyle("Plain") ROOT.gStyle.SetPalette(1) def get_by_type(directory, type): for key in directory.GetListOfKeys(): object = key.ReadObj() if isinstance(object, type): yield object def gini_index(signal, background): signal_integral = signal.GetIntegral() background_integral = background.GetIntegral() total = signal.Integral() + background.Integral() linear = ROOT.TGraph(signal.GetNbinsX()+1) signal_fraction = ROOT.TGraph(signal.GetNbinsX()+1) for ibin in range(0, signal.GetNbinsX()+1): total_fraction_of_sample = ( signal_integral[ibin] + background_integral[ibin])/2.0 linear.SetPoint( ibin, total_fraction_of_sample, total_fraction_of_sample) total_fraction_of_signal = signal_integral[ibin] signal_fraction.SetPoint( ibin, total_fraction_of_sample, total_fraction_of_signal) return 0.5-signal_fraction.Integral() colors = { 'Signal' : ROOT.EColor.kRed, 'Background' : ROOT.EColor.kBlue, } if __name__ == "__main__": # Exit gracefully if not os.path.exists(input_file): print("WARNING: no training control plot .root file found!") sys.exit(0) file = ROOT.TFile(input_file) correlation_canvas = ROOT.TCanvas("corr", "corr", 2000, 1000) correlation_canvas.Divide(2) signal_correlation = file.Get("CorrelationMatrixS") background_correlation = file.Get("CorrelationMatrixB") background_correlation.SetMarkerColor(ROOT.EColor.kBlack) for index, plot in enumerate([signal_correlation, background_correlation]): correlation_canvas.cd(index+1) plot.SetMarkerColor(ROOT.EColor.kBlack) plot.Draw("col") plot.Draw("text, same") plot.GetXaxis().SetLabelSize(0.03) plot.GetYaxis().SetLabelSize(0.03) ROOT.gPad.SetMargin(0.2, 0.1, 0.2, 0.1) correlation_canvas.SaveAs(os.path.join( output_dir, "correlations.png")) method_canvas = ROOT.TCanvas("method", "method", 800, 800) method_canvas.cd() # Find the MVA result directory for method_dir in [dir for dir in get_by_type(file, ROOT.TDirectory) if 'Method_' in dir.GetName()]: method_canvas.SetLogy(False) # Strip prefix method_type = method_dir.GetName().replace('Method_', '') print(method_type) result_dir = method_dir.Get(method_type) signal_mva_out = result_dir.Get("MVA_%s_S" % method_type) background_mva_out = result_dir.Get("MVA_%s_B" % method_type) signal_mva_out.SetLineColor(colors['Signal']) background_mva_out.SetLineColor(colors['Background']) stack = ROOT.THStack("stack", "MVA Output") stack.Add(signal_mva_out, "HIST") stack.Add(background_mva_out, "HIST") stack.Draw("nostack") method_canvas.SaveAs(os.path.join( output_dir, "%s_mva_output.png" % method_type)) perf_curve = result_dir.Get("MVA_%s_effBvsS" % method_type) perf_curve.Draw() perf_curve.SetMinimum(1e-4) method_canvas.SetLogy(True) method_canvas.SaveAs(os.path.join(output_dir, "%s_performance.png" % method_type)) input_var_dir = file.Get("InputVariables_NoTransform") if not input_var_dir: input_var_dir = file.Get("InputVariables_Id") matcher = re.compile("(?P<name>[^_]*)__(?P<type>[A-Za-z0-9]*)_Id") input_distributions = {} for histo in get_by_type(input_var_dir, ROOT.TH1F): rawname = histo.GetName() match = matcher.match(rawname) name = match.group('name') type = match.group('type') histo.Scale(1.0/histo.Integral()) histo.SetLineColor(colors[type]) histo_info = input_distributions.setdefault(name, {}) histo_info[type] = histo variable_canvas = ROOT.TCanvas("var", "var", 1000, 1000) for variable, histograms in six.iteritems(input_distributions): maximum = max(histograms[type].GetMaximum() for type in ['Signal', 'Background']) for type in ['Signal', 'Background']: histograms[type].SetLineWidth(2) # Tgraph integral not in ROOT 5.27? gini = gini_index(histograms['Signal'], histograms['Background']) histograms['Signal'].SetMaximum(1.2*maximum) histograms['Signal'].SetTitle(variable + " gini: %0.2f" % gini) histograms['Signal'].Draw() histograms['Background'].Draw('same') variable_canvas.SaveAs(os.path.join( output_dir, variable + ".png"))
0.506103
0.145722
import numpy as np import cv2 # Constants ALPHA = 0.5 FONT = cv2.FONT_HERSHEY_PLAIN TEXT_SCALE = 1.0 TEXT_THICKNESS = 1 BLACK = (0, 0, 0) WHITE = (255, 255, 255) def gen_colors(num_colors): """Generate different colors. # Arguments num_colors: total number of colors/classes. # Output bgrs: a list of (B, G, R) tuples which correspond to each of the colors/classes. """ import random import colorsys hsvs = [[float(x) / num_colors, 1., 0.7] for x in range(num_colors)] random.seed(1234) random.shuffle(hsvs) rgbs = list(map(lambda x: list(colorsys.hsv_to_rgb(*x)), hsvs)) bgrs = [(int(rgb[2] * 255), int(rgb[1] * 255), int(rgb[0] * 255)) for rgb in rgbs] return bgrs def draw_boxed_text(img, text, topleft, color): """Draw a transluent boxed text in white, overlayed on top of a colored patch surrounded by a black border. FONT, TEXT_SCALE, TEXT_THICKNESS and ALPHA values are constants (fixed) as defined on top. # Arguments img: the input image as a numpy array. text: the text to be drawn. topleft: XY coordinate of the topleft corner of the boxed text. color: color of the patch, i.e. background of the text. # Output img: note the original image is modified inplace. """ assert img.dtype == np.uint8 img_h, img_w, _ = img.shape if topleft[0] >= img_w or topleft[1] >= img_h: return margin = 3 size = cv2.getTextSize(text, FONT, TEXT_SCALE, TEXT_THICKNESS) w = size[0][0] + margin * 2 h = size[0][1] + margin * 2 # the patch is used to draw boxed text patch = np.zeros((h, w, 3), dtype=np.uint8) patch[...] = color cv2.putText(patch, text, (margin+1, h-margin-2), FONT, TEXT_SCALE, WHITE, thickness=TEXT_THICKNESS, lineType=cv2.LINE_8) cv2.rectangle(patch, (0, 0), (w-1, h-1), BLACK, thickness=1) w = min(w, img_w - topleft[0]) # clip overlay at image boundary h = min(h, img_h - topleft[1]) # Overlay the boxed text onto region of interest (roi) in img roi = img[topleft[1]:topleft[1]+h, topleft[0]:topleft[0]+w, :] cv2.addWeighted(patch[0:h, 0:w, :], ALPHA, roi, 1 - ALPHA, 0, roi) return img class BBoxVisualization(): """BBoxVisualization class implements nice drawing of boudning boxes. # Arguments cls_dict: a dictionary used to translate class id to its name. """ def __init__(self, cls_dict): self.cls_dict = cls_dict self.colors = gen_colors(len(cls_dict)) def draw_bboxes(self, img, box, conf, cls): """Draw detected bounding boxes on the original image.""" x_center,y_center = 0,0 green_on_y,green_off_y = 0,0 ON = False OFF = False STAIRS = False MSG = 'A' msg_area = 'A' ON_AREA = 0 for bb, cf, cl in zip(box, conf, cls): cl = int(cl) if(cf>=0.85): y_min, x_min, y_max, x_max = bb[0], bb[1], bb[2], bb[3] x_center = (x_min+x_max)/2 y_center = (y_min+y_max)/2 color = self.colors[cl] cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color, 2) txt_loc = (max(x_min+2, 0), max(y_min+2, 0)) cls_name = self.cls_dict.get(cl, 'CLS{}'.format(cl)) txt = '{} {:.2f}'.format(cls_name, cf) img = draw_boxed_text(img, txt, txt_loc, color) if(cl == 1): green_on_x = x_center ON = True ON_AREA = (x_max-x_min)*(y_max-y_min) #print("area:",ON_AREA) if(cl == 2): green_off_x = x_center OFF = True if(cl == 3): STAIRS = True msg_length = len(str(ON_AREA)) if(ON_AREA==0): msg_area = "0000" elif(msg_length==1): msg_area = "000" + str(ON_AREA) elif(msg_length==2): msg_area = "00" + str(ON_AREA) elif(msg_length==3): msg_area = "0" + str(ON_AREA) elif(msg_length==4): msg_area = str(ON_AREA) else: msg_area = "9999" if(ON and OFF): if(green_on_x<green_off_x): #turn left MSG = 'L' else: #turn right MSG = 'R' if(STAIRS): MSG += '_S' else: MSG += '_X' MSG += '_'+msg_area return img,MSG
src/robot_deep_learning/src/utils/visualization.py
import numpy as np import cv2 # Constants ALPHA = 0.5 FONT = cv2.FONT_HERSHEY_PLAIN TEXT_SCALE = 1.0 TEXT_THICKNESS = 1 BLACK = (0, 0, 0) WHITE = (255, 255, 255) def gen_colors(num_colors): """Generate different colors. # Arguments num_colors: total number of colors/classes. # Output bgrs: a list of (B, G, R) tuples which correspond to each of the colors/classes. """ import random import colorsys hsvs = [[float(x) / num_colors, 1., 0.7] for x in range(num_colors)] random.seed(1234) random.shuffle(hsvs) rgbs = list(map(lambda x: list(colorsys.hsv_to_rgb(*x)), hsvs)) bgrs = [(int(rgb[2] * 255), int(rgb[1] * 255), int(rgb[0] * 255)) for rgb in rgbs] return bgrs def draw_boxed_text(img, text, topleft, color): """Draw a transluent boxed text in white, overlayed on top of a colored patch surrounded by a black border. FONT, TEXT_SCALE, TEXT_THICKNESS and ALPHA values are constants (fixed) as defined on top. # Arguments img: the input image as a numpy array. text: the text to be drawn. topleft: XY coordinate of the topleft corner of the boxed text. color: color of the patch, i.e. background of the text. # Output img: note the original image is modified inplace. """ assert img.dtype == np.uint8 img_h, img_w, _ = img.shape if topleft[0] >= img_w or topleft[1] >= img_h: return margin = 3 size = cv2.getTextSize(text, FONT, TEXT_SCALE, TEXT_THICKNESS) w = size[0][0] + margin * 2 h = size[0][1] + margin * 2 # the patch is used to draw boxed text patch = np.zeros((h, w, 3), dtype=np.uint8) patch[...] = color cv2.putText(patch, text, (margin+1, h-margin-2), FONT, TEXT_SCALE, WHITE, thickness=TEXT_THICKNESS, lineType=cv2.LINE_8) cv2.rectangle(patch, (0, 0), (w-1, h-1), BLACK, thickness=1) w = min(w, img_w - topleft[0]) # clip overlay at image boundary h = min(h, img_h - topleft[1]) # Overlay the boxed text onto region of interest (roi) in img roi = img[topleft[1]:topleft[1]+h, topleft[0]:topleft[0]+w, :] cv2.addWeighted(patch[0:h, 0:w, :], ALPHA, roi, 1 - ALPHA, 0, roi) return img class BBoxVisualization(): """BBoxVisualization class implements nice drawing of boudning boxes. # Arguments cls_dict: a dictionary used to translate class id to its name. """ def __init__(self, cls_dict): self.cls_dict = cls_dict self.colors = gen_colors(len(cls_dict)) def draw_bboxes(self, img, box, conf, cls): """Draw detected bounding boxes on the original image.""" x_center,y_center = 0,0 green_on_y,green_off_y = 0,0 ON = False OFF = False STAIRS = False MSG = 'A' msg_area = 'A' ON_AREA = 0 for bb, cf, cl in zip(box, conf, cls): cl = int(cl) if(cf>=0.85): y_min, x_min, y_max, x_max = bb[0], bb[1], bb[2], bb[3] x_center = (x_min+x_max)/2 y_center = (y_min+y_max)/2 color = self.colors[cl] cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color, 2) txt_loc = (max(x_min+2, 0), max(y_min+2, 0)) cls_name = self.cls_dict.get(cl, 'CLS{}'.format(cl)) txt = '{} {:.2f}'.format(cls_name, cf) img = draw_boxed_text(img, txt, txt_loc, color) if(cl == 1): green_on_x = x_center ON = True ON_AREA = (x_max-x_min)*(y_max-y_min) #print("area:",ON_AREA) if(cl == 2): green_off_x = x_center OFF = True if(cl == 3): STAIRS = True msg_length = len(str(ON_AREA)) if(ON_AREA==0): msg_area = "0000" elif(msg_length==1): msg_area = "000" + str(ON_AREA) elif(msg_length==2): msg_area = "00" + str(ON_AREA) elif(msg_length==3): msg_area = "0" + str(ON_AREA) elif(msg_length==4): msg_area = str(ON_AREA) else: msg_area = "9999" if(ON and OFF): if(green_on_x<green_off_x): #turn left MSG = 'L' else: #turn right MSG = 'R' if(STAIRS): MSG += '_S' else: MSG += '_X' MSG += '_'+msg_area return img,MSG
0.60964
0.428473
import yaml import os import re import dateparser from unidecode import unidecode import logging as logger from collections import OrderedDict from .plugins import lines OPTIONS_DEFAULT = { 'remove_whitespace': False, 'remove_accents': False, 'lowercase': False, 'currency': 'EUR', 'date_formats': [], 'languages': [], 'decimal_separator': '.', 'replace': [], # example: see templates/fr/fr.free.mobile.yml } PLUGIN_MAPPING = { 'lines': lines } class InvoiceTemplate(OrderedDict): """ Represents single template files that live as .yml files on the disk. Methods ------- prepare_input(extracted_str) Input raw string and do transformations, as set in template file. matches_input(optimized_str) See if string matches keywords set in template file parse_number(value) Parse number, remove decimal separator and add other options parse_date(value) Parses date and returns date after parsing coerce_type(value, target_type) change type of values extract(optimized_str) Given a template file and a string, extract matching data fields. """ def __init__(self, *args, **kwargs): super(InvoiceTemplate, self).__init__(*args, **kwargs) # Merge template-specific options with defaults self.options = OPTIONS_DEFAULT.copy() for lang in self.options['languages']: assert len(lang) == 2, 'lang code must have 2 letters' if 'options' in self: self.options.update(self['options']) # Set issuer, if it doesn't exist. if 'issuer' not in self.keys(): self['issuer'] = self['keywords'][0] def prepare_input(self, extracted_str): """ Input raw string and do transformations, as set in template file. """ # Remove withspace if self.options['remove_whitespace']: optimized_str = re.sub(' +', '', extracted_str) else: optimized_str = extracted_str # Remove accents if self.options['remove_accents']: optimized_str = unidecode(optimized_str) # convert to lower case if self.options['lowercase']: optimized_str = optimized_str.lower() # specific replace for replace in self.options['replace']: assert len(replace) == 2, 'A replace should be a list of 2 items' optimized_str = optimized_str.replace(replace[0], replace[1]) return optimized_str def matches_input(self, optimized_str): """See if string matches keywords set in template file""" if all([keyword in optimized_str for keyword in self['keywords']]): logger.debug('Matched template %s', self['template_name']) return True def parse_number(self, value): assert value.count(self.options['decimal_separator']) < 2,\ 'Decimal separator cannot be present several times' # replace decimal separator by a | amount_pipe = value.replace(self.options['decimal_separator'], '|') # remove all possible thousands separators amount_pipe_no_thousand_sep = re.sub( '[.,\s]', '', amount_pipe) # put dot as decimal sep return float(amount_pipe_no_thousand_sep.replace('|', '.')) def parse_date(self, value): """Parses date and returns date after parsing""" res = dateparser.parse( value, date_formats=self.options['date_formats'], languages=self.options['languages']) logger.debug("result of date parsing=%s", res) return res def coerce_type(self, value, target_type): if target_type == 'int': if not value.strip(): return 0 return int(self.parse_number(value)) elif target_type == 'float': if not value.strip(): return 0.0 return float(self.parse_number(value)) elif target_type == 'date': return self.parse_date(value) assert False, 'Unknown type' def extract(self, optimized_str): """ Given a template file and a string, extract matching data fields. """ logger.debug('START optimized_str ========================') logger.debug(optimized_str) logger.debug('END optimized_str ==========================') logger.debug( 'Date parsing: languages=%s date_formats=%s', self.options['languages'], self.options['date_formats']) logger.debug('Float parsing: decimal separator=%s', self.options['decimal_separator']) logger.debug("keywords=%s", self['keywords']) logger.debug(self.options) # Try to find data for each field. output = {} output['issuer'] = self['issuer'] for k, v in self['fields'].items(): if k.startswith('static_'): logger.debug("field=%s | static value=%s", k, v) output[k.replace('static_', '')] = v else: logger.debug("field=%s | regexp=%s", k, v) sum_field = False if k.startswith('sum_amount') and type(v) is list: k = k[4:] # remove 'sum_' prefix sum_field = True # Fields can have multiple expressions if type(v) is list: res_find = [] for v_option in v: res_val = re.findall(v_option, optimized_str) if res_val: if sum_field: res_find += res_val else: res_find.extend(res_val) else: res_find = re.findall(v, optimized_str) if res_find: logger.debug("res_find=%s", res_find) if k.startswith('date') or k.endswith('date'): output[k] = self.parse_date(res_find[0]) if not output[k]: logger.error( "Date parsing failed on date '%s'", res_find[0]) return None elif k.startswith('amount'): if sum_field: output[k] = 0 for amount_to_parse in res_find: output[k] += self.parse_number(amount_to_parse) else: output[k] = self.parse_number(res_find[0]) else: res_find = list(set(res_find)) if len(res_find) == 1: output[k] = res_find[0] else: output[k] = res_find else: logger.warning("regexp for field %s didn't match", k) output['currency'] = self.options['currency'] # Run plugins: for plugin_keyword, plugin_func in PLUGIN_MAPPING.items(): if plugin_keyword in self.keys(): plugin_func.extract(self, optimized_str, output) # If required fields were found, return output, else log error. if 'required_fields' not in self.keys(): required_fields = ['date', 'amount', 'invoice_number', 'issuer'] else: required_fields = [] for v in self['required_fields']: required_fields.append(v) if set(required_fields).issubset(output.keys()): output['desc'] = 'Invoice from %s' % (self['issuer']) logger.debug(output) return output else: logger.error('Unable to match some fields:', output) return None
invoice2data/extract/invoice_template.py
import yaml import os import re import dateparser from unidecode import unidecode import logging as logger from collections import OrderedDict from .plugins import lines OPTIONS_DEFAULT = { 'remove_whitespace': False, 'remove_accents': False, 'lowercase': False, 'currency': 'EUR', 'date_formats': [], 'languages': [], 'decimal_separator': '.', 'replace': [], # example: see templates/fr/fr.free.mobile.yml } PLUGIN_MAPPING = { 'lines': lines } class InvoiceTemplate(OrderedDict): """ Represents single template files that live as .yml files on the disk. Methods ------- prepare_input(extracted_str) Input raw string and do transformations, as set in template file. matches_input(optimized_str) See if string matches keywords set in template file parse_number(value) Parse number, remove decimal separator and add other options parse_date(value) Parses date and returns date after parsing coerce_type(value, target_type) change type of values extract(optimized_str) Given a template file and a string, extract matching data fields. """ def __init__(self, *args, **kwargs): super(InvoiceTemplate, self).__init__(*args, **kwargs) # Merge template-specific options with defaults self.options = OPTIONS_DEFAULT.copy() for lang in self.options['languages']: assert len(lang) == 2, 'lang code must have 2 letters' if 'options' in self: self.options.update(self['options']) # Set issuer, if it doesn't exist. if 'issuer' not in self.keys(): self['issuer'] = self['keywords'][0] def prepare_input(self, extracted_str): """ Input raw string and do transformations, as set in template file. """ # Remove withspace if self.options['remove_whitespace']: optimized_str = re.sub(' +', '', extracted_str) else: optimized_str = extracted_str # Remove accents if self.options['remove_accents']: optimized_str = unidecode(optimized_str) # convert to lower case if self.options['lowercase']: optimized_str = optimized_str.lower() # specific replace for replace in self.options['replace']: assert len(replace) == 2, 'A replace should be a list of 2 items' optimized_str = optimized_str.replace(replace[0], replace[1]) return optimized_str def matches_input(self, optimized_str): """See if string matches keywords set in template file""" if all([keyword in optimized_str for keyword in self['keywords']]): logger.debug('Matched template %s', self['template_name']) return True def parse_number(self, value): assert value.count(self.options['decimal_separator']) < 2,\ 'Decimal separator cannot be present several times' # replace decimal separator by a | amount_pipe = value.replace(self.options['decimal_separator'], '|') # remove all possible thousands separators amount_pipe_no_thousand_sep = re.sub( '[.,\s]', '', amount_pipe) # put dot as decimal sep return float(amount_pipe_no_thousand_sep.replace('|', '.')) def parse_date(self, value): """Parses date and returns date after parsing""" res = dateparser.parse( value, date_formats=self.options['date_formats'], languages=self.options['languages']) logger.debug("result of date parsing=%s", res) return res def coerce_type(self, value, target_type): if target_type == 'int': if not value.strip(): return 0 return int(self.parse_number(value)) elif target_type == 'float': if not value.strip(): return 0.0 return float(self.parse_number(value)) elif target_type == 'date': return self.parse_date(value) assert False, 'Unknown type' def extract(self, optimized_str): """ Given a template file and a string, extract matching data fields. """ logger.debug('START optimized_str ========================') logger.debug(optimized_str) logger.debug('END optimized_str ==========================') logger.debug( 'Date parsing: languages=%s date_formats=%s', self.options['languages'], self.options['date_formats']) logger.debug('Float parsing: decimal separator=%s', self.options['decimal_separator']) logger.debug("keywords=%s", self['keywords']) logger.debug(self.options) # Try to find data for each field. output = {} output['issuer'] = self['issuer'] for k, v in self['fields'].items(): if k.startswith('static_'): logger.debug("field=%s | static value=%s", k, v) output[k.replace('static_', '')] = v else: logger.debug("field=%s | regexp=%s", k, v) sum_field = False if k.startswith('sum_amount') and type(v) is list: k = k[4:] # remove 'sum_' prefix sum_field = True # Fields can have multiple expressions if type(v) is list: res_find = [] for v_option in v: res_val = re.findall(v_option, optimized_str) if res_val: if sum_field: res_find += res_val else: res_find.extend(res_val) else: res_find = re.findall(v, optimized_str) if res_find: logger.debug("res_find=%s", res_find) if k.startswith('date') or k.endswith('date'): output[k] = self.parse_date(res_find[0]) if not output[k]: logger.error( "Date parsing failed on date '%s'", res_find[0]) return None elif k.startswith('amount'): if sum_field: output[k] = 0 for amount_to_parse in res_find: output[k] += self.parse_number(amount_to_parse) else: output[k] = self.parse_number(res_find[0]) else: res_find = list(set(res_find)) if len(res_find) == 1: output[k] = res_find[0] else: output[k] = res_find else: logger.warning("regexp for field %s didn't match", k) output['currency'] = self.options['currency'] # Run plugins: for plugin_keyword, plugin_func in PLUGIN_MAPPING.items(): if plugin_keyword in self.keys(): plugin_func.extract(self, optimized_str, output) # If required fields were found, return output, else log error. if 'required_fields' not in self.keys(): required_fields = ['date', 'amount', 'invoice_number', 'issuer'] else: required_fields = [] for v in self['required_fields']: required_fields.append(v) if set(required_fields).issubset(output.keys()): output['desc'] = 'Invoice from %s' % (self['issuer']) logger.debug(output) return output else: logger.error('Unable to match some fields:', output) return None
0.619586
0.247726
from django.db import models from django.conf import settings from django.contrib.auth.models import AbstractUser # кабинет class Room(models.Model): room = models.IntegerField() # ученики class Student(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) gender = models.CharField(max_length=6, choices=(('male','male'), ('female','female'))) # класс class Groups(models.Model): name = models.CharField(max_length=5) # в каком классе учатся ученики class StudentsGroup(models.Model): student_id = models.ForeignKey(Student, on_delete=models.CASCADE) class_id = models.ForeignKey(Groups, on_delete=models.CASCADE, null=True) # учитель class Teacher(AbstractUser): patronymic = models.CharField(max_length=30) # отчество group_id = models.ForeignKey(StudentsGroup, on_delete=models.CASCADE, null=True, blank=True) room_id = models.ForeignKey(Room, on_delete=models.CASCADE, null=True, blank=True) REQUIRED_FIELDS = ["first_name", "last_name", "patronymic", "group_id", "room_id"] # предмет class Subject(models.Model): subject = models.CharField(max_length=30) teacher_id = models.ForeignKey(Teacher, on_delete=models.CASCADE) status = models.CharField(max_length=15, choices=(('basic','basic'), ('profile','profile'))) # оценка class Grades(models.Model): student_id = models.ForeignKey(Student, on_delete=models.CASCADE) subject_id = models.ForeignKey(Subject, on_delete=models.CASCADE) grade = models.CharField(max_length=1, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'), ('5','5'))) quarter = models.CharField(max_length=1, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'))) # расписание class Timetable(models.Model): teacher_id = models.ForeignKey(Teacher, on_delete=models.CASCADE) room_id = models.ForeignKey(Room, on_delete=models.CASCADE) subject_id = models.ForeignKey(Subject, on_delete=models.CASCADE) class_id = models.ForeignKey(StudentsGroup, on_delete=models.CASCADE, null=True) day_of_week = models.CharField(max_length=30, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'), ('5','5'), ('6','6')), default='1') lesson = models.CharField(max_length=30, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'), ('5','5'), ('6','6')), default='1') # какой предмет кто ведет class Teaching(models.Model): id_teacher = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) id_subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
students/K33422/Izmaylova_Anna/web_lab3/lr_3 school/school_app/models.py
from django.db import models from django.conf import settings from django.contrib.auth.models import AbstractUser # кабинет class Room(models.Model): room = models.IntegerField() # ученики class Student(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) gender = models.CharField(max_length=6, choices=(('male','male'), ('female','female'))) # класс class Groups(models.Model): name = models.CharField(max_length=5) # в каком классе учатся ученики class StudentsGroup(models.Model): student_id = models.ForeignKey(Student, on_delete=models.CASCADE) class_id = models.ForeignKey(Groups, on_delete=models.CASCADE, null=True) # учитель class Teacher(AbstractUser): patronymic = models.CharField(max_length=30) # отчество group_id = models.ForeignKey(StudentsGroup, on_delete=models.CASCADE, null=True, blank=True) room_id = models.ForeignKey(Room, on_delete=models.CASCADE, null=True, blank=True) REQUIRED_FIELDS = ["first_name", "last_name", "patronymic", "group_id", "room_id"] # предмет class Subject(models.Model): subject = models.CharField(max_length=30) teacher_id = models.ForeignKey(Teacher, on_delete=models.CASCADE) status = models.CharField(max_length=15, choices=(('basic','basic'), ('profile','profile'))) # оценка class Grades(models.Model): student_id = models.ForeignKey(Student, on_delete=models.CASCADE) subject_id = models.ForeignKey(Subject, on_delete=models.CASCADE) grade = models.CharField(max_length=1, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'), ('5','5'))) quarter = models.CharField(max_length=1, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'))) # расписание class Timetable(models.Model): teacher_id = models.ForeignKey(Teacher, on_delete=models.CASCADE) room_id = models.ForeignKey(Room, on_delete=models.CASCADE) subject_id = models.ForeignKey(Subject, on_delete=models.CASCADE) class_id = models.ForeignKey(StudentsGroup, on_delete=models.CASCADE, null=True) day_of_week = models.CharField(max_length=30, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'), ('5','5'), ('6','6')), default='1') lesson = models.CharField(max_length=30, choices=(('1','1'), ('2','2'), ('3','3'),('4','4'), ('5','5'), ('6','6')), default='1') # какой предмет кто ведет class Teaching(models.Model): id_teacher = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) id_subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
0.26923
0.122628
from __future__ import division import os.path as osp import cameramodels import numpy as np import numpy.ma as ma import PIL import scipy.io import torch from dense_fusion.datasets.ycb.ycb_utils import get_data_list from dense_fusion.datasets.ycb.ycb_utils import get_ycb_video_dataset from dense_fusion.datasets.ycb.ycb_utils import label_names def get_bbox(label, img_height, img_width, base=40): rows = np.any(label, axis=1) cols = np.any(label, axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] rmax += 1 cmax += 1 r_b = rmax - rmin r_b = int((r_b + base - 1) / base) * base c_b = cmax - cmin c_b = int((c_b + base - 1) / base) * base center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)] rmin = center[0] - int(r_b / 2) rmax = center[0] + int(r_b / 2) cmin = center[1] - int(c_b / 2) cmax = center[1] + int(c_b / 2) if rmin < 0: delt = -rmin rmin = 0 rmax += delt if cmin < 0: delt = -cmin cmin = 0 cmax += delt if rmax > img_height: delt = rmax - img_height rmax = img_height rmin -= delt if cmax > img_width: delt = cmax - img_width cmax = img_width cmin -= delt return rmin, cmin, rmax, cmax class YCBVideoPoseDataset(torch.utils.data.Dataset): def __init__(self, split='train', dataset_path=None): if split not in ['train', 'test']: raise ValueError( "{} split {} is not supported. " "Only 'train' and 'test' are supported.".format( self.__class__.__name__, split)) super(YCBVideoPoseDataset, self).__init__() if dataset_path is None: self.data_dir = get_ycb_video_dataset() else: self.data_dir = dataset_path self.label_names = label_names self.ids = get_data_list(split) def __len__(self): return len(self.ids) def __getitem__(self, idx): rgb_img = self.get_image(idx) depth_img = self.get_depth(idx) meta_data = self.load_meta_data(idx) depth_scale = self.get_depth_scale(data=meta_data) depth_img = np.array(depth_img, dtype=np.float32) / depth_scale height, width, _ = rgb_img.shape intrinsic_matrix = self.get_intrinsic(data=meta_data) cm = cameramodels.PinholeCameraModel.from_intrinsic_matrix( intrinsic_matrix, height, width) uv = np.hstack([np.tile(np.arange(width), height)[:, None], np.repeat(np.arange(height), width)[:, None]]) depth = np.array( depth_img / self.get_depth_scale(data=meta_data), 'f') points = cm.batch_project_pixel_to_3d_ray(uv) * depth.reshape(-1, 1) points = points.reshape(height, width, 3) poses = self.get_pose(data=meta_data) label = self.get_label(data=meta_data) label_img = self.get_label_image(idx) bboxes = [] new_label = [] for label_idx in label: mask_label = ma.getmaskarray( ma.masked_equal(label_img, label_idx)) if not np.any(mask_label): continue bboxes.append(get_bbox(mask_label, height, width)) new_label.append(label_idx) new_label = np.array(new_label, dtype=np.int32) return rgb_img, depth_img, label_img, poses, new_label, bboxes def load_meta_data(self, idx): meta_path = osp.join( self.data_dir, '{}-meta.mat'.format(self.ids[idx])) data = scipy.io.loadmat(meta_path) return data def get_label_image(self, idx): img_path = osp.join( self.data_dir, '{}-label.png'.format(self.ids[idx])) img = np.array(PIL.Image.open(img_path)) return img def get_pose(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) rt = data['poses'].transpose((2, 0, 1)) pose = np.zeros((len(rt), 4, 4), dtype=np.float32) pose[:, 3, 3] = 1 pose[:, :3, :3] = rt[:, :, :3] pose[:, :3, 3] = rt[:, :, 3] pose = pose.transpose((0, 2, 1)) return pose def get_image(self, i): imgpath = osp.join(self.data_dir, '{}-color.png'.format(self.ids[i])) img = np.array(PIL.Image.open(imgpath)) return img def get_depth(self, i): depthpath = osp.join(self.data_dir, '{}-depth.png'.format(self.ids[i])) depth = np.array(PIL.Image.open(depthpath), dtype=np.uint16) return depth def get_label(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) object_ids = data['cls_indexes'].flatten() object_ids = object_ids return object_ids def get_depth_scale(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) depth_scale = data['factor_depth'][0][0] return depth_scale def get_intrinsic(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) intrinsic_matrix = data['intrinsic_matrix'] return intrinsic_matrix if __name__ == '__main__': import cv2 from dense_fusion.visualizations import vis_bboxes dataset = YCBVideoPoseDataset(split='test') index = 0 prev_index = -1 while True: if prev_index != index: rgb_img, depth_img, label_img, poses, label, bboxes = \ dataset[index] bgr_img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR) bboxes = np.array(bboxes, dtype=np.int32) vis_bboxes(bgr_img, bboxes, label.reshape(-1), label_names=label_names) cv2.imshow(dataset.__class__.__name__, bgr_img) prev_index = index k = cv2.waitKey(10) if k == ord('q'): cv2.destroyAllWindows() break elif k == ord('n'): if index == len(dataset) - 1: print('WARNING: reached edge index of dataset: %d' % index) continue index += 1 elif k == ord('p'): if index == 0: print('WARNING: reached edge index of dataset: %d' % index) continue index -= 1
dense_fusion/datasets/ycb/ycb_video_dataset.py
from __future__ import division import os.path as osp import cameramodels import numpy as np import numpy.ma as ma import PIL import scipy.io import torch from dense_fusion.datasets.ycb.ycb_utils import get_data_list from dense_fusion.datasets.ycb.ycb_utils import get_ycb_video_dataset from dense_fusion.datasets.ycb.ycb_utils import label_names def get_bbox(label, img_height, img_width, base=40): rows = np.any(label, axis=1) cols = np.any(label, axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] rmax += 1 cmax += 1 r_b = rmax - rmin r_b = int((r_b + base - 1) / base) * base c_b = cmax - cmin c_b = int((c_b + base - 1) / base) * base center = [int((rmin + rmax) / 2), int((cmin + cmax) / 2)] rmin = center[0] - int(r_b / 2) rmax = center[0] + int(r_b / 2) cmin = center[1] - int(c_b / 2) cmax = center[1] + int(c_b / 2) if rmin < 0: delt = -rmin rmin = 0 rmax += delt if cmin < 0: delt = -cmin cmin = 0 cmax += delt if rmax > img_height: delt = rmax - img_height rmax = img_height rmin -= delt if cmax > img_width: delt = cmax - img_width cmax = img_width cmin -= delt return rmin, cmin, rmax, cmax class YCBVideoPoseDataset(torch.utils.data.Dataset): def __init__(self, split='train', dataset_path=None): if split not in ['train', 'test']: raise ValueError( "{} split {} is not supported. " "Only 'train' and 'test' are supported.".format( self.__class__.__name__, split)) super(YCBVideoPoseDataset, self).__init__() if dataset_path is None: self.data_dir = get_ycb_video_dataset() else: self.data_dir = dataset_path self.label_names = label_names self.ids = get_data_list(split) def __len__(self): return len(self.ids) def __getitem__(self, idx): rgb_img = self.get_image(idx) depth_img = self.get_depth(idx) meta_data = self.load_meta_data(idx) depth_scale = self.get_depth_scale(data=meta_data) depth_img = np.array(depth_img, dtype=np.float32) / depth_scale height, width, _ = rgb_img.shape intrinsic_matrix = self.get_intrinsic(data=meta_data) cm = cameramodels.PinholeCameraModel.from_intrinsic_matrix( intrinsic_matrix, height, width) uv = np.hstack([np.tile(np.arange(width), height)[:, None], np.repeat(np.arange(height), width)[:, None]]) depth = np.array( depth_img / self.get_depth_scale(data=meta_data), 'f') points = cm.batch_project_pixel_to_3d_ray(uv) * depth.reshape(-1, 1) points = points.reshape(height, width, 3) poses = self.get_pose(data=meta_data) label = self.get_label(data=meta_data) label_img = self.get_label_image(idx) bboxes = [] new_label = [] for label_idx in label: mask_label = ma.getmaskarray( ma.masked_equal(label_img, label_idx)) if not np.any(mask_label): continue bboxes.append(get_bbox(mask_label, height, width)) new_label.append(label_idx) new_label = np.array(new_label, dtype=np.int32) return rgb_img, depth_img, label_img, poses, new_label, bboxes def load_meta_data(self, idx): meta_path = osp.join( self.data_dir, '{}-meta.mat'.format(self.ids[idx])) data = scipy.io.loadmat(meta_path) return data def get_label_image(self, idx): img_path = osp.join( self.data_dir, '{}-label.png'.format(self.ids[idx])) img = np.array(PIL.Image.open(img_path)) return img def get_pose(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) rt = data['poses'].transpose((2, 0, 1)) pose = np.zeros((len(rt), 4, 4), dtype=np.float32) pose[:, 3, 3] = 1 pose[:, :3, :3] = rt[:, :, :3] pose[:, :3, 3] = rt[:, :, 3] pose = pose.transpose((0, 2, 1)) return pose def get_image(self, i): imgpath = osp.join(self.data_dir, '{}-color.png'.format(self.ids[i])) img = np.array(PIL.Image.open(imgpath)) return img def get_depth(self, i): depthpath = osp.join(self.data_dir, '{}-depth.png'.format(self.ids[i])) depth = np.array(PIL.Image.open(depthpath), dtype=np.uint16) return depth def get_label(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) object_ids = data['cls_indexes'].flatten() object_ids = object_ids return object_ids def get_depth_scale(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) depth_scale = data['factor_depth'][0][0] return depth_scale def get_intrinsic(self, idx=None, data=None): if data is None: data = self.load_meta_data(idx) intrinsic_matrix = data['intrinsic_matrix'] return intrinsic_matrix if __name__ == '__main__': import cv2 from dense_fusion.visualizations import vis_bboxes dataset = YCBVideoPoseDataset(split='test') index = 0 prev_index = -1 while True: if prev_index != index: rgb_img, depth_img, label_img, poses, label, bboxes = \ dataset[index] bgr_img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR) bboxes = np.array(bboxes, dtype=np.int32) vis_bboxes(bgr_img, bboxes, label.reshape(-1), label_names=label_names) cv2.imshow(dataset.__class__.__name__, bgr_img) prev_index = index k = cv2.waitKey(10) if k == ord('q'): cv2.destroyAllWindows() break elif k == ord('n'): if index == len(dataset) - 1: print('WARNING: reached edge index of dataset: %d' % index) continue index += 1 elif k == ord('p'): if index == 0: print('WARNING: reached edge index of dataset: %d' % index) continue index -= 1
0.6488
0.198511
import logging import traceback import uuid from collections import defaultdict import pymongo import six from blitzdb.backends.base import Backend as BaseBackend from blitzdb.backends.base import NotInTransaction from blitzdb.backends.mongo.queryset import QuerySet from blitzdb.document import Document from blitzdb.helpers import delete_value, get_value, set_value logger = logging.getLogger(__name__) class DotEncoder(object): DOT_MAGIC_VALUE = ":a5b8afc131:" @classmethod def encode(cls,obj,path): def replace_key(key): if isinstance(key,six.string_types): return key.replace(".", cls.DOT_MAGIC_VALUE) return key if isinstance(obj,dict): return {replace_key(key):value for key, value in obj.items()} return obj @classmethod def decode(cls,obj): if isinstance(obj,dict): return {key.replace(cls.DOT_MAGIC_VALUE, "."): value for key, value in obj.items()} return obj class Backend(BaseBackend): """ A MongoDB backend. :param db: An instance of a `pymongo.database.Database <http://api.mongodb.org/python/current/api/pymongo/database.html>`_ class Example usage: .. code-block:: python from pymongo import connection from blitzdb.backends.mongo import Backend as MongoBackend c = connection() my_db = c.test_db #create a new BlitzDB backend using a MongoDB database backend = MongoBackend(my_db) """ standard_encoders = BaseBackend.standard_encoders + [DotEncoder] def __init__(self, db, autocommit=False, use_pk_based_refs = True,**kwargs): super(Backend, self).__init__(**kwargs) self.db = db self._autocommit = autocommit self._save_cache = defaultdict(lambda: {}) self._delete_cache = defaultdict(lambda: {}) self._update_cache = defaultdict(lambda: {}) self._use_pk_based_refs = use_pk_based_refs self.in_transaction = False def begin(self): if self.in_transaction: # we're already in a transaction... self.commit() self.in_transaction = True def rollback(self,transaction = None): if not self.in_transaction: raise NotInTransaction("Not in a transaction!") self._save_cache = defaultdict(lambda: {}) self._delete_cache = defaultdict(lambda: {}) self._update_cache = defaultdict(lambda: {}) self.in_transaction = False def commit(self,transaction = None): try: for collection, cache in self._save_cache.items(): for pk, attributes in cache.items(): try: self.db[collection].save(attributes) except: logger.error("Error when saving the document with pk {} in collection {}".format(attributes['pk'], collection)) logger.error("Attributes (excerpt):" + str(dict(attributes.items()[:100]))) raise for collection, cache in self._delete_cache.items(): for pk in cache: self.db[collection].remove({'_id': pk}) for collection, cache in self._update_cache.items(): for pk, attributes in cache.items(): update_dict = {} for key in ('$set', '$unset'): if key in attributes and attributes[key]: update_dict[key] = attributes[key] if update_dict: self.db[collection].update({'_id': pk}, update_dict) finally: #regardless what happens in the 'commit' operation, we clear the cache self._save_cache = defaultdict(lambda: {}) self._delete_cache = defaultdict(lambda: {}) self._update_cache = defaultdict(lambda: {}) self.in_transaction = True @property def autocommit(self): return self._autocommit @autocommit.setter def autocommit(self, value): if value not in (True, False): raise TypeError("Value must be boolean!") self._autocommit = value def delete_by_primary_keys(self, cls, pks): collection = self.get_collection_for_cls(cls) if self.autocommit: for pk in pks: self.db[collection].remove({'_id': pk}) else: self._delete_cache[collection].update({pk: True for pk in pks}) def delete(self, obj): self.call_hook('before_delete',obj) collection = self.get_collection_for_cls(obj.__class__) if obj.pk == None: raise obj.DoesNotExist if self.autocommit: self.db[collection].remove({'_id': obj.pk}) else: self._delete_cache[collection][obj.pk] = True if obj.pk in self._save_cache[collection]: del self._save_cache[collection][obj.pk] def save_multiple(self, objs): if not objs: return serialized_attributes_list = [] collection = self.get_collection_for_cls(objs[0].__class__) for obj in objs: self.call_hook('before_save',obj) if obj.pk == None: obj.pk = uuid.uuid4().hex serialized_attributes = self.serialize(obj.attributes) serialized_attributes['_id'] = obj.pk serialized_attributes_list.append(serialized_attributes) for attributes in serialized_attributes_list: if self.autocommit: self.db[collection].save(attributes) else: self._save_cache[collection][attributes['pk']] = attributes if attributes['pk'] in self._delete_cache[collection]: del self._delete_cache[collection][attributes['pk']] def save(self, obj): return self.save_multiple([obj]) def update(self, obj, set_fields=None, unset_fields=None, update_obj=True): collection = self.get_collection_for_cls(obj.__class__) if obj.pk == None: raise obj.DoesNotExist("update() called on document without primary key!") def serialize_fields(fields): if isinstance(fields, (list,tuple)): update_dict = {} for key in fields: try: update_dict[key] = get_value(obj,key) except KeyError: pass elif isinstance(fields,dict): update_dict = fields.copy() else: raise TypeError("fields must be a list/tuple!") return update_dict if set_fields: set_attributes = serialize_fields(set_fields) else: set_attributes = {} if unset_fields: unset_attributes = list(unset_fields) else: unset_attributes = [] self.call_hook('before_update',obj,set_attributes,unset_attributes) set_attributes = {key : self.serialize(value) for key,value in set_attributes.items()} if update_obj: for key,value in set_attributes.items(): set_value(obj,key,value) for key in unset_attributes: delete_value(obj,key) update_dict = {} if set_attributes: update_dict['$set'] = set_attributes if unset_attributes: update_dict['$unset'] = {key : '' for key in unset_attributes} if not update_dict: return #nothing to do... if self.autocommit: self.db[collection].update({'_id': obj.pk}, update_dict) else: if obj.pk in self._delete_cache[collection]: raise obj.DoesNotExist("update() on document that is marked for deletion!") if obj.pk in self._update_cache[collection]: update_cache = self._update_cache[collection][obj.pk] if set_attributes: if '$set' not in update_cache: update_cache['$set'] = {} for key, value in set_attributes.items(): if '$unset' in update_cache and key in update_cache['$unset']: del update_cache['$unset'][key] update_cache['$set'][key] = value if unset_attributes: if '$unset' not in update_cache: update_cache['$unset'] = {} for key in unset_attributes: if '$set' in update_cache and key in update_cache['$set']: del update_cache['$set'][key] update_cache['$unset'][key] = '' else: self._update_cache[collection][obj.pk] = update_dict def serialize(self, obj, convert_keys_to_str=True, embed_level=0, encoders=None, autosave=True, for_query=False,path = None): return super(Backend, self).serialize(obj, convert_keys_to_str=convert_keys_to_str, embed_level=embed_level, encoders=encoders, autosave=autosave, path=path, for_query=for_query) def create_indexes(self, cls_or_collection, params_list): for params in params_list: self.create_index(cls_or_collection, **params) def ensure_indexes(self, include_pk=True): for cls in self.classes: meta_attributes = self.get_meta_attributes(cls) if include_pk: self.create_index(cls, fields={'pk': 1},opts = {'unique' : True}) if 'indexes' in meta_attributes: self.create_indexes(cls, meta_attributes['indexes']) def create_index(self, cls_or_collection, *args, **kwargs): if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) else: collection = cls_or_collection if 'fields' not in kwargs: raise AttributeError("You must specify the 'fields' parameter when creating an index!") if 'opts' in kwargs: opts = kwargs['opts'] else: opts = {} try: self.db[collection].ensure_index(list(kwargs['fields'].items()), **opts) except pymongo.errors.OperationFailure as failure: traceback.print_exc() #The index already exists with different options, so we drop it and recreate it... self.db[collection].drop_index(list(kwargs['fields'].items())) self.db[collection].ensure_index(list(kwargs['fields'].items()), **opts) def _canonicalize_query(self, query): """ Transform the query dictionary to replace e.g. documents with __ref__ fields. """ def transform_query(q): for encoder in self.query_encoders: q = encoder.encode(q,[]) if isinstance(q, dict): nq = {} for key,value in q.items(): new_key = key if isinstance(value,dict) and len(value) == 1 and list(value.keys())[0].startswith('$'): if list(value.keys())[0] in ('$all','$in'): if list(value.values())[0] and isinstance(list(value.values())[0][0],Document): if self._use_pk_based_refs: new_key+='.pk' else: new_key+='.__ref__' elif isinstance(value,Document): if self._use_pk_based_refs: new_key+='.pk' else: new_key+='.__ref__' nq[new_key] = transform_query(value) return nq elif isinstance(q, (list,QuerySet,tuple)): return [transform_query(x) for x in q] elif isinstance(q,Document): collection = self.get_collection_for_obj(q) if self._use_pk_based_refs: return q.pk else: return "%s:%s" % (collection,q.pk) else: return q return transform_query(query) def get(self, cls_or_collection, properties, raw=False, only=None): if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) else: collection = cls_or_collection cls = self.get_cls_for_collection(collection) queryset = self.filter(cls_or_collection, properties, raw=raw, only=only) if len(queryset) == 0: raise cls.DoesNotExist elif len(queryset) > 1: raise cls.MultipleDocumentsReturned return queryset[0] def filter(self, cls_or_collection, query, raw=False, only=None): """ Filter objects from the database that correspond to a given set of properties. See :py:meth:`blitzdb.backends.base.Backend.filter` for documentation of individual parameters .. note:: This function supports most query operators that are available in MongoDB and returns a query set that is based on a MongoDB cursor. """ if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) cls = cls_or_collection else: collection = cls_or_collection cls = self.get_cls_for_collection(collection) canonical_query = self._canonicalize_query(query) args = {} if only: if isinstance(only,tuple): args['projection'] = list(only) else: args['projection'] = only return QuerySet(self, cls, self.db[collection].find(canonical_query, **args), raw=raw, only=only)
blitzdb/backends/mongo/backend.py
import logging import traceback import uuid from collections import defaultdict import pymongo import six from blitzdb.backends.base import Backend as BaseBackend from blitzdb.backends.base import NotInTransaction from blitzdb.backends.mongo.queryset import QuerySet from blitzdb.document import Document from blitzdb.helpers import delete_value, get_value, set_value logger = logging.getLogger(__name__) class DotEncoder(object): DOT_MAGIC_VALUE = ":a5b8afc131:" @classmethod def encode(cls,obj,path): def replace_key(key): if isinstance(key,six.string_types): return key.replace(".", cls.DOT_MAGIC_VALUE) return key if isinstance(obj,dict): return {replace_key(key):value for key, value in obj.items()} return obj @classmethod def decode(cls,obj): if isinstance(obj,dict): return {key.replace(cls.DOT_MAGIC_VALUE, "."): value for key, value in obj.items()} return obj class Backend(BaseBackend): """ A MongoDB backend. :param db: An instance of a `pymongo.database.Database <http://api.mongodb.org/python/current/api/pymongo/database.html>`_ class Example usage: .. code-block:: python from pymongo import connection from blitzdb.backends.mongo import Backend as MongoBackend c = connection() my_db = c.test_db #create a new BlitzDB backend using a MongoDB database backend = MongoBackend(my_db) """ standard_encoders = BaseBackend.standard_encoders + [DotEncoder] def __init__(self, db, autocommit=False, use_pk_based_refs = True,**kwargs): super(Backend, self).__init__(**kwargs) self.db = db self._autocommit = autocommit self._save_cache = defaultdict(lambda: {}) self._delete_cache = defaultdict(lambda: {}) self._update_cache = defaultdict(lambda: {}) self._use_pk_based_refs = use_pk_based_refs self.in_transaction = False def begin(self): if self.in_transaction: # we're already in a transaction... self.commit() self.in_transaction = True def rollback(self,transaction = None): if not self.in_transaction: raise NotInTransaction("Not in a transaction!") self._save_cache = defaultdict(lambda: {}) self._delete_cache = defaultdict(lambda: {}) self._update_cache = defaultdict(lambda: {}) self.in_transaction = False def commit(self,transaction = None): try: for collection, cache in self._save_cache.items(): for pk, attributes in cache.items(): try: self.db[collection].save(attributes) except: logger.error("Error when saving the document with pk {} in collection {}".format(attributes['pk'], collection)) logger.error("Attributes (excerpt):" + str(dict(attributes.items()[:100]))) raise for collection, cache in self._delete_cache.items(): for pk in cache: self.db[collection].remove({'_id': pk}) for collection, cache in self._update_cache.items(): for pk, attributes in cache.items(): update_dict = {} for key in ('$set', '$unset'): if key in attributes and attributes[key]: update_dict[key] = attributes[key] if update_dict: self.db[collection].update({'_id': pk}, update_dict) finally: #regardless what happens in the 'commit' operation, we clear the cache self._save_cache = defaultdict(lambda: {}) self._delete_cache = defaultdict(lambda: {}) self._update_cache = defaultdict(lambda: {}) self.in_transaction = True @property def autocommit(self): return self._autocommit @autocommit.setter def autocommit(self, value): if value not in (True, False): raise TypeError("Value must be boolean!") self._autocommit = value def delete_by_primary_keys(self, cls, pks): collection = self.get_collection_for_cls(cls) if self.autocommit: for pk in pks: self.db[collection].remove({'_id': pk}) else: self._delete_cache[collection].update({pk: True for pk in pks}) def delete(self, obj): self.call_hook('before_delete',obj) collection = self.get_collection_for_cls(obj.__class__) if obj.pk == None: raise obj.DoesNotExist if self.autocommit: self.db[collection].remove({'_id': obj.pk}) else: self._delete_cache[collection][obj.pk] = True if obj.pk in self._save_cache[collection]: del self._save_cache[collection][obj.pk] def save_multiple(self, objs): if not objs: return serialized_attributes_list = [] collection = self.get_collection_for_cls(objs[0].__class__) for obj in objs: self.call_hook('before_save',obj) if obj.pk == None: obj.pk = uuid.uuid4().hex serialized_attributes = self.serialize(obj.attributes) serialized_attributes['_id'] = obj.pk serialized_attributes_list.append(serialized_attributes) for attributes in serialized_attributes_list: if self.autocommit: self.db[collection].save(attributes) else: self._save_cache[collection][attributes['pk']] = attributes if attributes['pk'] in self._delete_cache[collection]: del self._delete_cache[collection][attributes['pk']] def save(self, obj): return self.save_multiple([obj]) def update(self, obj, set_fields=None, unset_fields=None, update_obj=True): collection = self.get_collection_for_cls(obj.__class__) if obj.pk == None: raise obj.DoesNotExist("update() called on document without primary key!") def serialize_fields(fields): if isinstance(fields, (list,tuple)): update_dict = {} for key in fields: try: update_dict[key] = get_value(obj,key) except KeyError: pass elif isinstance(fields,dict): update_dict = fields.copy() else: raise TypeError("fields must be a list/tuple!") return update_dict if set_fields: set_attributes = serialize_fields(set_fields) else: set_attributes = {} if unset_fields: unset_attributes = list(unset_fields) else: unset_attributes = [] self.call_hook('before_update',obj,set_attributes,unset_attributes) set_attributes = {key : self.serialize(value) for key,value in set_attributes.items()} if update_obj: for key,value in set_attributes.items(): set_value(obj,key,value) for key in unset_attributes: delete_value(obj,key) update_dict = {} if set_attributes: update_dict['$set'] = set_attributes if unset_attributes: update_dict['$unset'] = {key : '' for key in unset_attributes} if not update_dict: return #nothing to do... if self.autocommit: self.db[collection].update({'_id': obj.pk}, update_dict) else: if obj.pk in self._delete_cache[collection]: raise obj.DoesNotExist("update() on document that is marked for deletion!") if obj.pk in self._update_cache[collection]: update_cache = self._update_cache[collection][obj.pk] if set_attributes: if '$set' not in update_cache: update_cache['$set'] = {} for key, value in set_attributes.items(): if '$unset' in update_cache and key in update_cache['$unset']: del update_cache['$unset'][key] update_cache['$set'][key] = value if unset_attributes: if '$unset' not in update_cache: update_cache['$unset'] = {} for key in unset_attributes: if '$set' in update_cache and key in update_cache['$set']: del update_cache['$set'][key] update_cache['$unset'][key] = '' else: self._update_cache[collection][obj.pk] = update_dict def serialize(self, obj, convert_keys_to_str=True, embed_level=0, encoders=None, autosave=True, for_query=False,path = None): return super(Backend, self).serialize(obj, convert_keys_to_str=convert_keys_to_str, embed_level=embed_level, encoders=encoders, autosave=autosave, path=path, for_query=for_query) def create_indexes(self, cls_or_collection, params_list): for params in params_list: self.create_index(cls_or_collection, **params) def ensure_indexes(self, include_pk=True): for cls in self.classes: meta_attributes = self.get_meta_attributes(cls) if include_pk: self.create_index(cls, fields={'pk': 1},opts = {'unique' : True}) if 'indexes' in meta_attributes: self.create_indexes(cls, meta_attributes['indexes']) def create_index(self, cls_or_collection, *args, **kwargs): if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) else: collection = cls_or_collection if 'fields' not in kwargs: raise AttributeError("You must specify the 'fields' parameter when creating an index!") if 'opts' in kwargs: opts = kwargs['opts'] else: opts = {} try: self.db[collection].ensure_index(list(kwargs['fields'].items()), **opts) except pymongo.errors.OperationFailure as failure: traceback.print_exc() #The index already exists with different options, so we drop it and recreate it... self.db[collection].drop_index(list(kwargs['fields'].items())) self.db[collection].ensure_index(list(kwargs['fields'].items()), **opts) def _canonicalize_query(self, query): """ Transform the query dictionary to replace e.g. documents with __ref__ fields. """ def transform_query(q): for encoder in self.query_encoders: q = encoder.encode(q,[]) if isinstance(q, dict): nq = {} for key,value in q.items(): new_key = key if isinstance(value,dict) and len(value) == 1 and list(value.keys())[0].startswith('$'): if list(value.keys())[0] in ('$all','$in'): if list(value.values())[0] and isinstance(list(value.values())[0][0],Document): if self._use_pk_based_refs: new_key+='.pk' else: new_key+='.__ref__' elif isinstance(value,Document): if self._use_pk_based_refs: new_key+='.pk' else: new_key+='.__ref__' nq[new_key] = transform_query(value) return nq elif isinstance(q, (list,QuerySet,tuple)): return [transform_query(x) for x in q] elif isinstance(q,Document): collection = self.get_collection_for_obj(q) if self._use_pk_based_refs: return q.pk else: return "%s:%s" % (collection,q.pk) else: return q return transform_query(query) def get(self, cls_or_collection, properties, raw=False, only=None): if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) else: collection = cls_or_collection cls = self.get_cls_for_collection(collection) queryset = self.filter(cls_or_collection, properties, raw=raw, only=only) if len(queryset) == 0: raise cls.DoesNotExist elif len(queryset) > 1: raise cls.MultipleDocumentsReturned return queryset[0] def filter(self, cls_or_collection, query, raw=False, only=None): """ Filter objects from the database that correspond to a given set of properties. See :py:meth:`blitzdb.backends.base.Backend.filter` for documentation of individual parameters .. note:: This function supports most query operators that are available in MongoDB and returns a query set that is based on a MongoDB cursor. """ if not isinstance(cls_or_collection, six.string_types): collection = self.get_collection_for_cls(cls_or_collection) cls = cls_or_collection else: collection = cls_or_collection cls = self.get_cls_for_collection(collection) canonical_query = self._canonicalize_query(query) args = {} if only: if isinstance(only,tuple): args['projection'] = list(only) else: args['projection'] = only return QuerySet(self, cls, self.db[collection].find(canonical_query, **args), raw=raw, only=only)
0.571169
0.064153
# python3 """Tensorflow-specific implementations of value.FieldSpec.""" from typing import Text, Tuple, Union, Sequence import edward2 as ed # type: ignore from gym import spaces from recsim_ng.core import value import tensorflow as tf FieldValue = value.FieldValue TFInvariant = Union[None, tf.TypeSpec, tf.TensorShape] _DYNAMIC_DIM = None class FieldSpec(value.FieldSpec): """Base Tensorflow field spec; checks shape consistency.""" def __init__(self): self._is_tensor = False self._is_not_tensor = False self._tensor_shape = tf.TensorShape(dims=None) def check_value(self, field_value): """Overrides `value.FieldSpec`. If this is called multiple times then the values must satisfy one of these conditions: - They are all convertible to tensors with compatible `TensorShape`s. - None of them are convertible to tensors. Args: field_value: See `value.FieldSpec`. Returns: See `value.FieldSpec`. """ try: field_value = tf.convert_to_tensor(field_value) except TypeError: pass if isinstance(field_value, tf.Tensor): self._is_tensor = True else: self._is_not_tensor = type(field_value) if self._is_tensor and self._is_not_tensor: return False, "both Tensor and non-Tensor ({}) values".format( self._is_not_tensor.__name__) if self._is_not_tensor: return True, "" shape = field_value.shape if not shape.is_compatible_with(self._tensor_shape): return False, "shapes {} and {} are incompatible".format( shape, self._tensor_shape) self._tensor_shape = self._tensor_shape.merge_with(shape) return True, "" def sanitize(self, field_value, field_name): """Overrides `value.FieldSpec`. If field_value is a tensor, this method will: - Rename the tensor to the name of the corresponding field for ease of debugging AutoGraph issues. - Set the tensor shape to the most specific known field shape so far. Args: field_value: See `value.FieldSpec`. field_name: Name of the field within the ValueSpec. Returns: a sanitized field value.. """ if self._is_tensor: # Tensor manipulations reduce a random variable to its sampled value, so # this case must be treated specially to avoid interfering with Edward2 # tracing. Note that the creation of ed.RandomVariable does not trigger # a tracer event as opposed to the creation of a 'named' random variable, # e.g. ed.Bernoulli. if isinstance(field_value, ed.RandomVariable): sample = tf.identity(field_value.value, name=field_name) sample.set_shape(self._tensor_shape) field_value = ed.RandomVariable( field_value.distribution, sample_shape=field_value.sample_shape, value=sample) else: field_value = tf.identity(field_value, name=field_name) field_value.set_shape(self._tensor_shape) return field_value def invariant(self): return self._tensor_shape if self._is_tensor else None class DynamicFieldSpec(FieldSpec): """Field spec for tensors which may change shape across iterations.""" def __init__(self, rank, dynamic_dims): super().__init__() if not dynamic_dims: raise ValueError("dynamic_dims must have at least one element. " "If the field has no dynamic dimensions, please use " "`FieldSpec` instead.") if rank <= max(dynamic_dims): raise ValueError( "dynamic_dims contains higher dimensions than the rank of the tensor." " `rank` must be greater than `max(dynamic_dims)`.") # Promote the tensor shape to make use of the base class compatilibility # check for rank correctness. self._tensor_shape = self._tensor_shape.with_rank(rank) self._dynamic_dims = dynamic_dims self._rank = rank def check_value(self, field_value): """Overrides `value.FieldSpec`. If this is called multiple times then the values must satisfy one of these conditions: - They are all convertible to tensors with compatible `TensorShape`s. - None of them are convertible to tensors. Args: field_value: See `value.FieldSpec`. Returns: See `value.FieldSpec`. """ ok, error_msg = super().check_value(field_value) if not ok: return ok, error_msg dynamic_tensor_shape = [ _DYNAMIC_DIM if i in self._dynamic_dims else self._tensor_shape[i] for i in range(self._rank) ] self._tensor_shape = tf.TensorShape(dynamic_tensor_shape) return ok, error_msg class Space(FieldSpec): """Tensorflow field spec with a Gym space.""" def __init__(self, space): super().__init__() self._space = space @property def space(self): return self._space
recsim_ng/lib/tensorflow/field_spec.py
# python3 """Tensorflow-specific implementations of value.FieldSpec.""" from typing import Text, Tuple, Union, Sequence import edward2 as ed # type: ignore from gym import spaces from recsim_ng.core import value import tensorflow as tf FieldValue = value.FieldValue TFInvariant = Union[None, tf.TypeSpec, tf.TensorShape] _DYNAMIC_DIM = None class FieldSpec(value.FieldSpec): """Base Tensorflow field spec; checks shape consistency.""" def __init__(self): self._is_tensor = False self._is_not_tensor = False self._tensor_shape = tf.TensorShape(dims=None) def check_value(self, field_value): """Overrides `value.FieldSpec`. If this is called multiple times then the values must satisfy one of these conditions: - They are all convertible to tensors with compatible `TensorShape`s. - None of them are convertible to tensors. Args: field_value: See `value.FieldSpec`. Returns: See `value.FieldSpec`. """ try: field_value = tf.convert_to_tensor(field_value) except TypeError: pass if isinstance(field_value, tf.Tensor): self._is_tensor = True else: self._is_not_tensor = type(field_value) if self._is_tensor and self._is_not_tensor: return False, "both Tensor and non-Tensor ({}) values".format( self._is_not_tensor.__name__) if self._is_not_tensor: return True, "" shape = field_value.shape if not shape.is_compatible_with(self._tensor_shape): return False, "shapes {} and {} are incompatible".format( shape, self._tensor_shape) self._tensor_shape = self._tensor_shape.merge_with(shape) return True, "" def sanitize(self, field_value, field_name): """Overrides `value.FieldSpec`. If field_value is a tensor, this method will: - Rename the tensor to the name of the corresponding field for ease of debugging AutoGraph issues. - Set the tensor shape to the most specific known field shape so far. Args: field_value: See `value.FieldSpec`. field_name: Name of the field within the ValueSpec. Returns: a sanitized field value.. """ if self._is_tensor: # Tensor manipulations reduce a random variable to its sampled value, so # this case must be treated specially to avoid interfering with Edward2 # tracing. Note that the creation of ed.RandomVariable does not trigger # a tracer event as opposed to the creation of a 'named' random variable, # e.g. ed.Bernoulli. if isinstance(field_value, ed.RandomVariable): sample = tf.identity(field_value.value, name=field_name) sample.set_shape(self._tensor_shape) field_value = ed.RandomVariable( field_value.distribution, sample_shape=field_value.sample_shape, value=sample) else: field_value = tf.identity(field_value, name=field_name) field_value.set_shape(self._tensor_shape) return field_value def invariant(self): return self._tensor_shape if self._is_tensor else None class DynamicFieldSpec(FieldSpec): """Field spec for tensors which may change shape across iterations.""" def __init__(self, rank, dynamic_dims): super().__init__() if not dynamic_dims: raise ValueError("dynamic_dims must have at least one element. " "If the field has no dynamic dimensions, please use " "`FieldSpec` instead.") if rank <= max(dynamic_dims): raise ValueError( "dynamic_dims contains higher dimensions than the rank of the tensor." " `rank` must be greater than `max(dynamic_dims)`.") # Promote the tensor shape to make use of the base class compatilibility # check for rank correctness. self._tensor_shape = self._tensor_shape.with_rank(rank) self._dynamic_dims = dynamic_dims self._rank = rank def check_value(self, field_value): """Overrides `value.FieldSpec`. If this is called multiple times then the values must satisfy one of these conditions: - They are all convertible to tensors with compatible `TensorShape`s. - None of them are convertible to tensors. Args: field_value: See `value.FieldSpec`. Returns: See `value.FieldSpec`. """ ok, error_msg = super().check_value(field_value) if not ok: return ok, error_msg dynamic_tensor_shape = [ _DYNAMIC_DIM if i in self._dynamic_dims else self._tensor_shape[i] for i in range(self._rank) ] self._tensor_shape = tf.TensorShape(dynamic_tensor_shape) return ok, error_msg class Space(FieldSpec): """Tensorflow field spec with a Gym space.""" def __init__(self, space): super().__init__() self._space = space @property def space(self): return self._space
0.962018
0.557243
import unittest import warnings import mxnet as mx import numpy as np def test_print_summary(): data = mx.sym.Variable('data') bias = mx.sym.Variable('fc1_bias', lr_mult=1.0) emb1= mx.symbol.Embedding(data = data, name='emb1', input_dim=100, output_dim=28) conv1= mx.symbol.Convolution(data = emb1, name='conv1', num_filter=32, kernel=(3,3), stride=(2,2)) bn1 = mx.symbol.BatchNorm(data = conv1, name="bn1") act1 = mx.symbol.Activation(data = bn1, name='relu1', act_type="relu") mp1 = mx.symbol.Pooling(data = act1, name = 'mp1', kernel=(2,2), stride=(2,2), pool_type='max') fc1 = mx.sym.FullyConnected(data=mp1, bias=bias, name='fc1', num_hidden=10, lr_mult=0) fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5) sc1 = mx.symbol.SliceChannel(data=fc2, num_outputs=10, name="slice_1", squeeze_axis=0) mx.viz.print_summary(sc1) shape = {} shape["data"]=(1,3,28) mx.viz.print_summary(sc1, shape) def graphviz_exists(): try: import graphviz except ImportError: return False else: return True @unittest.skipIf(not graphviz_exists(), "Skip test_plot_network as Graphviz could not be imported") def test_plot_network(): # Test warnings for cyclic graph net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc', num_hidden=128) net = mx.sym.Activation(data=net, name='relu1', act_type="relu") net = mx.sym.FullyConnected(data=net, name='fc', num_hidden=10) net = mx.sym.SoftmaxOutput(data=net, name='out') with warnings.catch_warnings(record=True) as w: digraph = mx.viz.plot_network(net, shape={'data': (100, 200)}, dtype={'data': np.float32}, node_attrs={"fixedsize": "false"}) assert len(w) == 1 assert "There are multiple variables with the same name in your graph" in str(w[-1].message) assert "fc" in str(w[-1].message) if __name__ == "__main__": import nose nose.runmodule()
tests/python/unittest/test_viz.py
import unittest import warnings import mxnet as mx import numpy as np def test_print_summary(): data = mx.sym.Variable('data') bias = mx.sym.Variable('fc1_bias', lr_mult=1.0) emb1= mx.symbol.Embedding(data = data, name='emb1', input_dim=100, output_dim=28) conv1= mx.symbol.Convolution(data = emb1, name='conv1', num_filter=32, kernel=(3,3), stride=(2,2)) bn1 = mx.symbol.BatchNorm(data = conv1, name="bn1") act1 = mx.symbol.Activation(data = bn1, name='relu1', act_type="relu") mp1 = mx.symbol.Pooling(data = act1, name = 'mp1', kernel=(2,2), stride=(2,2), pool_type='max') fc1 = mx.sym.FullyConnected(data=mp1, bias=bias, name='fc1', num_hidden=10, lr_mult=0) fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5) sc1 = mx.symbol.SliceChannel(data=fc2, num_outputs=10, name="slice_1", squeeze_axis=0) mx.viz.print_summary(sc1) shape = {} shape["data"]=(1,3,28) mx.viz.print_summary(sc1, shape) def graphviz_exists(): try: import graphviz except ImportError: return False else: return True @unittest.skipIf(not graphviz_exists(), "Skip test_plot_network as Graphviz could not be imported") def test_plot_network(): # Test warnings for cyclic graph net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc', num_hidden=128) net = mx.sym.Activation(data=net, name='relu1', act_type="relu") net = mx.sym.FullyConnected(data=net, name='fc', num_hidden=10) net = mx.sym.SoftmaxOutput(data=net, name='out') with warnings.catch_warnings(record=True) as w: digraph = mx.viz.plot_network(net, shape={'data': (100, 200)}, dtype={'data': np.float32}, node_attrs={"fixedsize": "false"}) assert len(w) == 1 assert "There are multiple variables with the same name in your graph" in str(w[-1].message) assert "fc" in str(w[-1].message) if __name__ == "__main__": import nose nose.runmodule()
0.398875
0.530905
import tensorflow as tf import tensorflow.keras as ks from kgcnn.layers.conv.dimenet_conv import DimNetInteractionPPBlock, DimNetOutputBlock from kgcnn.layers.embedding import EmbeddingDimeBlock from kgcnn.layers.gather import GatherNodes from kgcnn.layers.geom import SphericalBasisLayer, NodeDistance, EdgeAngle, BesselBasisLayer from kgcnn.layers.keras import Dense, Concatenate, Add from kgcnn.layers.pool.pooling import PoolingNodes from kgcnn.utils.models import update_model_kwargs # Fast and Uncertainty-Aware Directional Message Passing for Non-Equilibrium Molecules # <NAME>, <NAME>, <NAME>, <NAME> # https://arxiv.org/abs/2011.14115 model_default = {"name": "DimeNetPP", "inputs": [{"shape": [None], "name": "node_attributes", "dtype": "float32", "ragged": True}, {"shape": [None, 3], "name": "node_coordinates", "dtype": "float32", "ragged": True}, {"shape": [None, 2], "name": "edge_indices", "dtype": "int64", "ragged": True}, {"shape": [None, 2], "name": "angle_indices", "dtype": "int64", "ragged": True}], "input_embedding": {"node": {"input_dim": 95, "output_dim": 128, "embeddings_initializer": {"class_name": "RandomUniform", "config": {"minval": -1.7320508075688772, "maxval": 1.7320508075688772}}}}, "output_embedding": "graph", "emb_size": 128, "out_emb_size": 256, "int_emb_size": 64, "basis_emb_size": 8, "num_blocks": 4, "num_spherical": 7, "num_radial": 6, "cutoff": 5.0, "envelope_exponent": 5, "num_before_skip": 1, "num_after_skip": 2, "num_dense_output": 3, "num_targets": 12, "extensive": True, "output_init": "zeros", "activation": "swish", "verbose": 1, } @update_model_kwargs(model_default) def make_model(inputs=None, input_embedding=None, output_embedding=None, emb_size=None, out_emb_size=None, int_emb_size=None, basis_emb_size=None, num_blocks=None, num_spherical=None, num_radial=None, cutoff=None, envelope_exponent=None, num_before_skip=None, num_after_skip=None, num_dense_output=None, num_targets=None, activation=None, extensive=None, output_init=None, **kwargs): """Make DimeNetPP graph network via functional API. Default parameters can be found in :obj:`model_default`. Note: DimeNetPP does require a large amount of memory for this implementation, which increase quickly with the number of connections in a batch. Args: inputs (list): List of dictionaries unpacked in :obj:`tf.keras.layers.Input`. Order must match model definition. input_embedding (dict): Dictionary of embedding arguments for nodes etc. unpacked in `Embedding` layers. output_embedding (str): Main embedding task for graph network. Either "node", ("edge") or "graph". emb_size (int): Overall embedding size used for the messages. out_emb_size (int): Embedding size for output of `DimNetOutputBlock`. int_emb_size (int): Embedding size used for interaction triplets. basis_emb_size (int): Embedding size used inside the basis transformation. num_blocks (int): Number of graph embedding blocks or depth of the network. num_spherical (int): Number of spherical components in `SphericalBasisLayer`. num_radial (int): Number of radial components in basis layer. cutoff (float): Distance cutoff for basis layer. envelope_exponent (int): Exponent in envelope function for basis layer. num_before_skip (int): Number of residual layers in interaction block before skip connection num_after_skip (int): Number of residual layers in interaction block after skip connection num_dense_output (int): Number of dense units in output `DimNetOutputBlock`. num_targets (int): Number of targets or output embedding dimension of the model. activation (str, dict): Activation to use. extensive (bool): Graph output for extensive target to apply sum for pooling or mean otherwise. output_init (str, dict): Output initializer for kernel. Returns: tf.keras.models.Model """ # Make input node_input = ks.layers.Input(**inputs[0]) xyz_input = ks.layers.Input(**inputs[1]) bond_index_input = ks.layers.Input(**inputs[2]) angle_index_input = ks.layers.Input(**inputs[3]) # Atom embedding # n = generate_node_embedding(node_input, input_node_shape, input_embedding["nodes"]) if len(inputs[0]["shape"]) == 1: n = EmbeddingDimeBlock(**input_embedding["node"])(node_input) else: n = node_input x = xyz_input edi = bond_index_input adi = angle_index_input # Calculate distances d = NodeDistance()([x, edi]) rbf = BesselBasisLayer(num_radial=num_radial, cutoff=cutoff, envelope_exponent=envelope_exponent)(d) # Calculate angles a = EdgeAngle()([x, edi, adi]) sbf = SphericalBasisLayer(num_spherical=num_spherical, num_radial=num_radial, cutoff=cutoff, envelope_exponent=envelope_exponent)([d, a, adi]) # Embedding block rbf_emb = Dense(emb_size, use_bias=True, activation=activation, kernel_initializer="kgcnn>glorot_orthogonal")(rbf) n_pairs = GatherNodes()([n, edi]) x = Concatenate(axis=-1)([n_pairs, rbf_emb]) x = Dense(emb_size, use_bias=True, activation=activation, kernel_initializer="kgcnn>glorot_orthogonal")(x) ps = DimNetOutputBlock(emb_size, out_emb_size, num_dense_output, num_targets=num_targets, output_kernel_initializer=output_init)([n, x, rbf, edi]) # Interaction blocks add_xp = Add() for i in range(num_blocks): x = DimNetInteractionPPBlock(emb_size, int_emb_size, basis_emb_size, num_before_skip, num_after_skip)( [x, rbf, sbf, adi]) p_update = DimNetOutputBlock(emb_size, out_emb_size, num_dense_output, num_targets=num_targets, output_kernel_initializer=output_init)([n, x, rbf, edi]) ps = add_xp([ps, p_update]) if extensive: main_output = PoolingNodes(pooling_method="sum")(ps) else: main_output = PoolingNodes(pooling_method="mean")(ps) if output_embedding != "graph": raise ValueError("Unsupported graph embedding for mode `DimeNetPP`.") model = tf.keras.models.Model(inputs=[node_input, xyz_input, bond_index_input, angle_index_input], outputs=main_output) return model
kgcnn/literature/DimeNetPP.py
import tensorflow as tf import tensorflow.keras as ks from kgcnn.layers.conv.dimenet_conv import DimNetInteractionPPBlock, DimNetOutputBlock from kgcnn.layers.embedding import EmbeddingDimeBlock from kgcnn.layers.gather import GatherNodes from kgcnn.layers.geom import SphericalBasisLayer, NodeDistance, EdgeAngle, BesselBasisLayer from kgcnn.layers.keras import Dense, Concatenate, Add from kgcnn.layers.pool.pooling import PoolingNodes from kgcnn.utils.models import update_model_kwargs # Fast and Uncertainty-Aware Directional Message Passing for Non-Equilibrium Molecules # <NAME>, <NAME>, <NAME>, <NAME> # https://arxiv.org/abs/2011.14115 model_default = {"name": "DimeNetPP", "inputs": [{"shape": [None], "name": "node_attributes", "dtype": "float32", "ragged": True}, {"shape": [None, 3], "name": "node_coordinates", "dtype": "float32", "ragged": True}, {"shape": [None, 2], "name": "edge_indices", "dtype": "int64", "ragged": True}, {"shape": [None, 2], "name": "angle_indices", "dtype": "int64", "ragged": True}], "input_embedding": {"node": {"input_dim": 95, "output_dim": 128, "embeddings_initializer": {"class_name": "RandomUniform", "config": {"minval": -1.7320508075688772, "maxval": 1.7320508075688772}}}}, "output_embedding": "graph", "emb_size": 128, "out_emb_size": 256, "int_emb_size": 64, "basis_emb_size": 8, "num_blocks": 4, "num_spherical": 7, "num_radial": 6, "cutoff": 5.0, "envelope_exponent": 5, "num_before_skip": 1, "num_after_skip": 2, "num_dense_output": 3, "num_targets": 12, "extensive": True, "output_init": "zeros", "activation": "swish", "verbose": 1, } @update_model_kwargs(model_default) def make_model(inputs=None, input_embedding=None, output_embedding=None, emb_size=None, out_emb_size=None, int_emb_size=None, basis_emb_size=None, num_blocks=None, num_spherical=None, num_radial=None, cutoff=None, envelope_exponent=None, num_before_skip=None, num_after_skip=None, num_dense_output=None, num_targets=None, activation=None, extensive=None, output_init=None, **kwargs): """Make DimeNetPP graph network via functional API. Default parameters can be found in :obj:`model_default`. Note: DimeNetPP does require a large amount of memory for this implementation, which increase quickly with the number of connections in a batch. Args: inputs (list): List of dictionaries unpacked in :obj:`tf.keras.layers.Input`. Order must match model definition. input_embedding (dict): Dictionary of embedding arguments for nodes etc. unpacked in `Embedding` layers. output_embedding (str): Main embedding task for graph network. Either "node", ("edge") or "graph". emb_size (int): Overall embedding size used for the messages. out_emb_size (int): Embedding size for output of `DimNetOutputBlock`. int_emb_size (int): Embedding size used for interaction triplets. basis_emb_size (int): Embedding size used inside the basis transformation. num_blocks (int): Number of graph embedding blocks or depth of the network. num_spherical (int): Number of spherical components in `SphericalBasisLayer`. num_radial (int): Number of radial components in basis layer. cutoff (float): Distance cutoff for basis layer. envelope_exponent (int): Exponent in envelope function for basis layer. num_before_skip (int): Number of residual layers in interaction block before skip connection num_after_skip (int): Number of residual layers in interaction block after skip connection num_dense_output (int): Number of dense units in output `DimNetOutputBlock`. num_targets (int): Number of targets or output embedding dimension of the model. activation (str, dict): Activation to use. extensive (bool): Graph output for extensive target to apply sum for pooling or mean otherwise. output_init (str, dict): Output initializer for kernel. Returns: tf.keras.models.Model """ # Make input node_input = ks.layers.Input(**inputs[0]) xyz_input = ks.layers.Input(**inputs[1]) bond_index_input = ks.layers.Input(**inputs[2]) angle_index_input = ks.layers.Input(**inputs[3]) # Atom embedding # n = generate_node_embedding(node_input, input_node_shape, input_embedding["nodes"]) if len(inputs[0]["shape"]) == 1: n = EmbeddingDimeBlock(**input_embedding["node"])(node_input) else: n = node_input x = xyz_input edi = bond_index_input adi = angle_index_input # Calculate distances d = NodeDistance()([x, edi]) rbf = BesselBasisLayer(num_radial=num_radial, cutoff=cutoff, envelope_exponent=envelope_exponent)(d) # Calculate angles a = EdgeAngle()([x, edi, adi]) sbf = SphericalBasisLayer(num_spherical=num_spherical, num_radial=num_radial, cutoff=cutoff, envelope_exponent=envelope_exponent)([d, a, adi]) # Embedding block rbf_emb = Dense(emb_size, use_bias=True, activation=activation, kernel_initializer="kgcnn>glorot_orthogonal")(rbf) n_pairs = GatherNodes()([n, edi]) x = Concatenate(axis=-1)([n_pairs, rbf_emb]) x = Dense(emb_size, use_bias=True, activation=activation, kernel_initializer="kgcnn>glorot_orthogonal")(x) ps = DimNetOutputBlock(emb_size, out_emb_size, num_dense_output, num_targets=num_targets, output_kernel_initializer=output_init)([n, x, rbf, edi]) # Interaction blocks add_xp = Add() for i in range(num_blocks): x = DimNetInteractionPPBlock(emb_size, int_emb_size, basis_emb_size, num_before_skip, num_after_skip)( [x, rbf, sbf, adi]) p_update = DimNetOutputBlock(emb_size, out_emb_size, num_dense_output, num_targets=num_targets, output_kernel_initializer=output_init)([n, x, rbf, edi]) ps = add_xp([ps, p_update]) if extensive: main_output = PoolingNodes(pooling_method="sum")(ps) else: main_output = PoolingNodes(pooling_method="mean")(ps) if output_embedding != "graph": raise ValueError("Unsupported graph embedding for mode `DimeNetPP`.") model = tf.keras.models.Model(inputs=[node_input, xyz_input, bond_index_input, angle_index_input], outputs=main_output) return model
0.935927
0.45175
from ..HeightContainer import UniformTopographyInterface def scale_dependent_statistical_property(topography, func, n=1, scale_factor=None, distance=None): """ Compute statistical properties of a uniform topography at specific scales. The scale is specified either by `scale_factors` or `distance`. These properties are statistics of derivatives carried out at specific scales, as computed using the `derivative` pipeline function. The specific statistical properties is computed by the `func` argument. The output of `func` needs to be homogeneous, i.e. if an array is returned, this array must have the same size independent of the derivative data that is fed into `func`. Parameters ---------- topography : Topography or UniformLineScan Topogaphy or line scan. func : callable The function that computes the statistical properties: ``func(dx, dy=None) -> np.ndarray`` A function taking the derivative in x-direction and optionally the derivative in y-direction (only for topographies, i.e. maps). The function needs to be able to ignore the second argument as a container can be a mixture of topographies and line scans. The function can return a scalar value or an array, but the array size must be fixed. n : int, optional Order of derivative. (Default: 1) scale_factor : float or np.ndarray Scale factor for rescaling the finite differences stencil. A scale factor of unity means the derivative is computed at the size of the individual pixel. distance : float or np.ndarray Characteristic distances at which the derivatives are computed. If this is an array, then the statistical property is computed at each of these distances. unit : str Unit of the distance array. All topographies are converted to this unit before the derivative is computed. Returns ------- statistical_fingerprint : np.ndarray or list of np.ndarray Array containing the result of `func` Examples -------- This example yields the the height-difference autocorrelation function in the x-direction: >>> distances, A = t.autocorrelation_from_profile() >>>> s = t.scale_dependent_statistical_property(lambda x, y: np.var(x), distance=distances[1::20]) `A` and `s` are identical. """ d = topography.derivative(n=n, scale_factor=scale_factor, distance=distance) if topography.dim == 1: return [func(_d) for _d in d] else: dx, dy = d return [func(dx, dy) for dx, dy in zip(*d)] UniformTopographyInterface.register_function('scale_dependent_statistical_property', scale_dependent_statistical_property)
SurfaceTopography/Uniform/ScaleDependentStatistics.py
from ..HeightContainer import UniformTopographyInterface def scale_dependent_statistical_property(topography, func, n=1, scale_factor=None, distance=None): """ Compute statistical properties of a uniform topography at specific scales. The scale is specified either by `scale_factors` or `distance`. These properties are statistics of derivatives carried out at specific scales, as computed using the `derivative` pipeline function. The specific statistical properties is computed by the `func` argument. The output of `func` needs to be homogeneous, i.e. if an array is returned, this array must have the same size independent of the derivative data that is fed into `func`. Parameters ---------- topography : Topography or UniformLineScan Topogaphy or line scan. func : callable The function that computes the statistical properties: ``func(dx, dy=None) -> np.ndarray`` A function taking the derivative in x-direction and optionally the derivative in y-direction (only for topographies, i.e. maps). The function needs to be able to ignore the second argument as a container can be a mixture of topographies and line scans. The function can return a scalar value or an array, but the array size must be fixed. n : int, optional Order of derivative. (Default: 1) scale_factor : float or np.ndarray Scale factor for rescaling the finite differences stencil. A scale factor of unity means the derivative is computed at the size of the individual pixel. distance : float or np.ndarray Characteristic distances at which the derivatives are computed. If this is an array, then the statistical property is computed at each of these distances. unit : str Unit of the distance array. All topographies are converted to this unit before the derivative is computed. Returns ------- statistical_fingerprint : np.ndarray or list of np.ndarray Array containing the result of `func` Examples -------- This example yields the the height-difference autocorrelation function in the x-direction: >>> distances, A = t.autocorrelation_from_profile() >>>> s = t.scale_dependent_statistical_property(lambda x, y: np.var(x), distance=distances[1::20]) `A` and `s` are identical. """ d = topography.derivative(n=n, scale_factor=scale_factor, distance=distance) if topography.dim == 1: return [func(_d) for _d in d] else: dx, dy = d return [func(dx, dy) for dx, dy in zip(*d)] UniformTopographyInterface.register_function('scale_dependent_statistical_property', scale_dependent_statistical_property)
0.957606
0.795618
from tcutils.util import retry,get_random_name from vnc_api.vnc_api import RouteAggregate, RouteListType, ServiceInterfaceTag import fixtures import re class RouteAggregateFixture(fixtures.Fixture): def __init__(self, connections, prefix=None): self.connections = connections self.inputs = connections.inputs self.logger = self.connections.logger self.vnc_lib_h = self.connections.get_vnc_lib_h() self.api_s_inspect = self.connections.api_server_inspect self.cn_inspect = self.connections.cn_inspect self.project = self.connections.project_name self.agg_name = get_random_name('agg') self.agg_id = None self.agg_fq_name = ['default-domain', self.agg_name] self.prefix = prefix if type(prefix) is list else [prefix] self.project_name = self.connections.project_name self.obj = None def read(self): if self.agg_id: self.agg_obj = self.vnc_lib_h.route_aggregate_read(id=self.agg_id) self.agg_name = self.agg_obj.name # end read def setUp(self): super(RouteAggregateFixture, self).setUp() self.create() # end setup def create(self): if not self.agg_id: project=self.vnc_lib_h.project_read(fq_name=['default-domain', self.project_name]) route_aggregate=RouteAggregate(name=self.agg_name, parent_obj=project) route_list=RouteListType(self.prefix) route_aggregate.set_aggregate_route_entries(route_list) self.agg_id = self.vnc_lib_h.route_aggregate_create(route_aggregate) self.logger.info('created RouteAggreegate %s'%self.agg_name) self.read() # end create def attach_route_aggregate_to_si(self, si, interface='left'): self.agg_obj.set_service_instance(si,ServiceInterfaceTag(interface_type=interface)) self.vnc_lib_h.route_aggregate_update(self.agg_obj) # end attach_route_aggregate_to_si def remove_route_aggregate_from_si(self, si): self.agg_obj.del_service_instance(si) self.vnc_lib_h.route_aggregate_update(self.agg_obj) # end remove_route_aggregate_from_si def update_route_aggregate(self,prefix,interface=None): route_list=RouteListType(self.prefix) self.agg_obj.set_aggregate_route_entries(route_list) if interface: self.agg_obj.set_service_instance(si,ServiceInterfaceTag(interface_type=interface)) self.vnc_lib_h.route_aggregate_update(self.agg_obj) # end update_route_aggregate @retry(delay=1, tries=10) def verify_route_aggregate_in_control(self, vn_fixture, vm_fixture, prefix = '', search_value = 'Aggregate'): search_in_cn = prefix found_value = True for cn in vm_fixture.get_control_nodes(): found_value = found_value and re.findall(search_value, str( self.cn_inspect[cn ].get_cn_route_table_entry(search_in_cn, vn_fixture.vn_fq_name+":"+vn_fixture.vn_name)[0])) self.logger.info('Route Aggregates were found in control node %s'%cn) return True if found_value else False # end verify_route_aggregate_in_control def delete(self): self.logger.info('Deleting RouteAggregate %s'%self.agg_name) self.vnc_lib_h.route_aggregate_delete(id=self.agg_id) # end delete def cleanUp(self): self.delete() self.logger.info('Deleted RouteAggregate %s' % self.agg_name) super(RouteAggregateFixture, self).cleanUp() # end cleanup
fixtures/route_agg.py
from tcutils.util import retry,get_random_name from vnc_api.vnc_api import RouteAggregate, RouteListType, ServiceInterfaceTag import fixtures import re class RouteAggregateFixture(fixtures.Fixture): def __init__(self, connections, prefix=None): self.connections = connections self.inputs = connections.inputs self.logger = self.connections.logger self.vnc_lib_h = self.connections.get_vnc_lib_h() self.api_s_inspect = self.connections.api_server_inspect self.cn_inspect = self.connections.cn_inspect self.project = self.connections.project_name self.agg_name = get_random_name('agg') self.agg_id = None self.agg_fq_name = ['default-domain', self.agg_name] self.prefix = prefix if type(prefix) is list else [prefix] self.project_name = self.connections.project_name self.obj = None def read(self): if self.agg_id: self.agg_obj = self.vnc_lib_h.route_aggregate_read(id=self.agg_id) self.agg_name = self.agg_obj.name # end read def setUp(self): super(RouteAggregateFixture, self).setUp() self.create() # end setup def create(self): if not self.agg_id: project=self.vnc_lib_h.project_read(fq_name=['default-domain', self.project_name]) route_aggregate=RouteAggregate(name=self.agg_name, parent_obj=project) route_list=RouteListType(self.prefix) route_aggregate.set_aggregate_route_entries(route_list) self.agg_id = self.vnc_lib_h.route_aggregate_create(route_aggregate) self.logger.info('created RouteAggreegate %s'%self.agg_name) self.read() # end create def attach_route_aggregate_to_si(self, si, interface='left'): self.agg_obj.set_service_instance(si,ServiceInterfaceTag(interface_type=interface)) self.vnc_lib_h.route_aggregate_update(self.agg_obj) # end attach_route_aggregate_to_si def remove_route_aggregate_from_si(self, si): self.agg_obj.del_service_instance(si) self.vnc_lib_h.route_aggregate_update(self.agg_obj) # end remove_route_aggregate_from_si def update_route_aggregate(self,prefix,interface=None): route_list=RouteListType(self.prefix) self.agg_obj.set_aggregate_route_entries(route_list) if interface: self.agg_obj.set_service_instance(si,ServiceInterfaceTag(interface_type=interface)) self.vnc_lib_h.route_aggregate_update(self.agg_obj) # end update_route_aggregate @retry(delay=1, tries=10) def verify_route_aggregate_in_control(self, vn_fixture, vm_fixture, prefix = '', search_value = 'Aggregate'): search_in_cn = prefix found_value = True for cn in vm_fixture.get_control_nodes(): found_value = found_value and re.findall(search_value, str( self.cn_inspect[cn ].get_cn_route_table_entry(search_in_cn, vn_fixture.vn_fq_name+":"+vn_fixture.vn_name)[0])) self.logger.info('Route Aggregates were found in control node %s'%cn) return True if found_value else False # end verify_route_aggregate_in_control def delete(self): self.logger.info('Deleting RouteAggregate %s'%self.agg_name) self.vnc_lib_h.route_aggregate_delete(id=self.agg_id) # end delete def cleanUp(self): self.delete() self.logger.info('Deleted RouteAggregate %s' % self.agg_name) super(RouteAggregateFixture, self).cleanUp() # end cleanup
0.427277
0.068444
import math import time import torch import torch.nn.functional as F from tensornet.engine.ops.regularizer import l1 from tensornet.engine.ops.checkpoint import ModelCheckpoint from tensornet.engine.ops.tensorboard import TensorBoard from tensornet.data.processing import InfiniteDataLoader from tensornet.utils.progress_bar import ProgressBar class Learner: def __init__( self, train_loader, optimizer, criterion, device='cpu', epochs=1, l1_factor=0.0, val_loader=None, callbacks=None, metrics=None, activate_loss_logits=False, record_train=True ): """Train and validate the model. Args: train_loader (torch.utils.data.DataLoader): Training data loader. optimizer (torch.optim): Optimizer for the model. criterion (torch.nn): Loss Function. device (str or torch.device, optional): Device where the data will be loaded. (default='cpu') epochs (int, optional): Numbers of epochs/iterations to train the model for. (default: 1) l1_factor (float, optional): L1 regularization factor. (default: 0) val_loader (torch.utils.data.DataLoader, optional): Validation data loader. (default: None) callbacks (list, optional): List of callbacks to be used during training. (default: None) metrics (list of str, optional): List of names of the metrics for model evaluation. (default: None) activate_loss_logits (bool, optional): If True, the logits will first pass through the `activate_logits` function before going to the criterion. (default: False) record_train (bool, optional): If False, metrics will be calculated only during validation. (default: True) """ self.model = None self.optimizer = optimizer self.criterion = criterion self.train_loader = train_loader self.device = device self.epochs = epochs self.val_loader = val_loader self.l1_factor = l1_factor self.activate_loss_logits = activate_loss_logits self.record_train = record_train self.lr_schedulers = { 'step_lr': None, 'lr_plateau': None, 'one_cycle_policy': None, } self.checkpoint = None self.summary_writer = None if not callbacks is None: self._setup_callbacks(callbacks) # Training self.train_losses = [] # Change in loss self.train_metrics = [] # Change in evaluation metric self.val_losses = [] # Change in loss self.val_metrics = [] # Change in evaluation metric # Set evaluation metrics self.metrics = [] if metrics: self._setup_metrics(metrics) def _setup_callbacks(self, callbacks): """Extract callbacks passed to the class. Args: callbacks (list): List of callbacks. """ for callback in callbacks: if isinstance(callback, torch.optim.lr_scheduler.StepLR): self.lr_schedulers['step_lr'] = callback elif isinstance(callback, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_schedulers['lr_plateau'] = callback elif isinstance(callback, torch.optim.lr_scheduler.OneCycleLR): self.lr_schedulers['one_cycle_policy'] = callback elif isinstance(callback, ModelCheckpoint): if callback.monitor.startswith('train_'): if self.record_train: self.checkpoint = callback else: raise ValueError( 'Cannot use checkpoint for a training metric if record_train is set to False' ) else: self.checkpoint = callback elif isinstance(callback, TensorBoard): self.summary_writer = callback def set_model(self, model): """Assign model to learner. Args: model (torch.nn.Module): Model Instance. """ self.model = model if not self.summary_writer is None: self.summary_writer.write_model(self.model) def _accuracy(self, label, prediction, idx=0): """Calculate accuracy. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: accuracy """ self.metrics[idx]['accuracy']['sum'] += prediction.eq( label.view_as(prediction) ).sum().item() self.metrics[idx]['accuracy']['num_steps'] += len(label) self.metrics[idx]['accuracy']['value'] = round( 100 * self.metrics[idx]['accuracy']['sum'] / self.metrics[idx]['accuracy']['num_steps'], 2 ) def _iou(self, label, prediction, idx=0): """Calculate Intersection over Union. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: IoU """ # Remove 1 channel dimension label = label.squeeze(1) prediction = prediction.squeeze(1) intersection = (prediction * label).sum(2).sum(1) union = (prediction + label).sum(2).sum(1) - intersection # epsilon is added to avoid 0/0 epsilon = 1e-6 iou = (intersection + epsilon) / (union + epsilon) self.metrics[idx]['iou']['sum'] += iou.sum().item() self.metrics[idx]['iou']['num_steps'] += label.size(0) self.metrics[idx]['iou']['value'] = round( self.metrics[idx]['iou']['sum'] / self.metrics[idx]['iou']['num_steps'], 3 ) def _pred_label_diff(self, label, prediction, rel=False): """Calculate the difference between label and prediction. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. rel (bool, optional): If True, return the relative difference. (default: False) Returns: Difference between label and prediction """ # For numerical stability valid_labels = label > 0.0001 _label = label[valid_labels] _prediction = prediction[valid_labels] valid_element_count = _label.size(0) if valid_element_count > 0: diff = torch.abs(_label - _prediction) if rel: diff = torch.div(diff, _label) return diff, valid_element_count def _rmse(self, label, prediction, idx=0): """Calculate Root Mean Square Error. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: Root Mean Square Error """ diff = self._pred_label_diff(label, prediction) rmse = 0 if not diff is None: rmse = math.sqrt(torch.sum(torch.pow(diff[0], 2)) / diff[1]) self.metrics[idx]['rmse']['num_steps'] += label.size(0) self.metrics[idx]['rmse']['sum'] += rmse * label.size(0) self.metrics[idx]['rmse']['value'] = round( self.metrics[idx]['rmse']['sum'] / self.metrics[idx]['rmse']['num_steps'], 3 ) def _mae(self, label, prediction, idx=0): """Calculate Mean Average Error. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: Mean Average Error """ diff = self._pred_label_diff(label, prediction) mae = 0 if not diff is None: mae = torch.sum(diff[0]).item() / diff[1] self.metrics[idx]['mae']['num_steps'] += label.size(0) self.metrics[idx]['mae']['sum'] += mae * label.size(0) self.metrics[idx]['mae']['value'] = round( self.metrics[idx]['mae']['sum'] / self.metrics[idx]['mae']['num_steps'], 3 ) def _abs_rel(self, label, prediction, idx=0): """Calculate Absolute Relative Error. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: Absolute Relative Error """ diff = self._pred_label_diff(label, prediction, rel=True) abs_rel = 0 if not diff is None: abs_rel = torch.sum(diff[0]).item() / diff[1] self.metrics[idx]['abs_rel']['num_steps'] += label.size(0) self.metrics[idx]['abs_rel']['sum'] += abs_rel * label.size(0) self.metrics[idx]['abs_rel']['value'] = round( self.metrics[idx]['abs_rel']['sum'] / self.metrics[idx]['abs_rel']['num_steps'], 3 ) def _setup_metrics(self, metrics): """Validate the evaluation metrics passed to the class. Args: metrics (list or dict): Metrics. """ if not isinstance(metrics[0], (list, tuple)): metrics = [metrics] for idx, metric_list in enumerate(metrics): metric_dict = {} for metric in metric_list: metric_info = {'value': 0, 'sum': 0, 'num_steps': 0} if metric == 'accuracy': metric_info['func'] = self._accuracy elif metric == 'rmse': metric_info['func'] = self._rmse elif metric == 'mae': metric_info['func'] = self._mae elif metric == 'abs_rel': metric_info['func'] = self._abs_rel elif metric == 'iou': metric_info['func'] = self._iou if 'func' in metric_info: metric_dict[metric] = metric_info if metric_dict: self.metrics.append(metric_dict) self.train_metrics.append({ x: [] for x in metric_dict.keys() }) self.val_metrics.append({ x: [] for x in metric_dict.keys() }) def _calculate_metrics(self, labels, predictions): """Update evaluation metric values. Args: label (torch.Tensor or dict): Ground truth. prediction (torch.Tensor or dict): Prediction. """ predictions = self.activate_logits(predictions) if not isinstance(labels, (list, tuple)): labels = [labels] predictions = [predictions] for idx, (label, prediction) in enumerate(zip(labels, predictions)): # If predictions are one-hot encoded if label.size() != prediction.size(): prediction = prediction.argmax(dim=1, keepdim=True) * 1.0 if idx < len(self.metrics): for metric in self.metrics[idx]: self.metrics[idx][metric]['func']( label, prediction, idx=idx ) def _reset_metrics(self): """Reset metric params.""" for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.metrics[idx][metric]['value'] = 0 self.metrics[idx][metric]['sum'] = 0 self.metrics[idx][metric]['num_steps'] = 0 def _get_pbar_values(self, loss): """Create progress bar description. Args: loss (float): Loss value. """ pbar_values = [('loss', round(loss, 2))] if self.metrics and self.record_train: for idx in range(len(self.metrics)): for metric, info in self.metrics[idx].items(): metric_name = metric if len(self.metrics) > 1: metric_name = f'{idx} - {metric}' pbar_values.append((metric_name, info['value'])) return pbar_values def update_training_history(self, loss): """Update the training history. Args: loss (float): Loss value. """ self.train_losses.append(loss) if self.record_train: for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.train_metrics[idx][metric].append( self.metrics[idx][metric]['value'] ) def reset_history(self): """Reset the training history""" self.train_losses = [] self.val_losses = [] for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.train_metrics[idx][metric] = [] self.val_metrics[idx][metric] = [] self._reset_metrics() def activate_logits(self, logits): """Apply activation function to the logits if needed. After this the logits will be sent for calculation of loss or evaluation metrics. Args: logits: Model output Returns: activated logits """ return logits def calculate_criterion(self, logits, targets, train=True): """Calculate loss. Args: logits (torch.Tensor): Prediction. targets (torch.Tensor): Ground truth. train (bool, optional): If True, loss is sent to the L1 regularization function. (default: True) Returns: loss value """ if self.activate_loss_logits: logits = self.activate_logits(logits) if train: return l1(self.model, self.criterion(logits, targets), self.l1_factor) return self.criterion(logits, targets) def fetch_data(self, data): """Fetch data from loader and load it to GPU. Args: data (list or tuple): List containing inputs and targets. Returns: inputs and targets loaded to GPU. """ return data[0].to(self.device), data[1].to(self.device) def train_batch(self, data): """Train the model on a batch of data. Args: data: Input and target data for the model. Returns: Batch loss. """ inputs, targets = self.fetch_data(data) self.optimizer.zero_grad() # Set gradients to zero before starting backpropagation y_pred = self.model(inputs) # Predict output loss = self.calculate_criterion(y_pred, targets, train=True) # Calculate loss # Perform backpropagation loss.backward() self.optimizer.step() if self.record_train: self._calculate_metrics(targets, y_pred) # One Cycle Policy for learning rate if not self.lr_schedulers['one_cycle_policy'] is None: self.lr_schedulers['one_cycle_policy'].step() return loss.item() def train_epoch(self): """Run an epoch of model training.""" self.model.train() pbar = ProgressBar(target=len(self.train_loader), width=8) for batch_idx, data in enumerate(self.train_loader, 0): # Train a batch loss = self.train_batch(data) # Update Progress Bar pbar_values = self._get_pbar_values(loss) pbar.update(batch_idx, values=pbar_values) # Update training history self.update_training_history(loss) pbar_values = self._get_pbar_values(loss) pbar.add(1, values=pbar_values) def train_iterations(self): """Train model for the 'self.epochs' number of batches.""" self.model.train() pbar = ProgressBar(target=self.epochs, width=8) iterator = InfiniteDataLoader(self.train_loader) for iteration in range(self.epochs): # Train a batch loss = self.train_batch(iterator.get_batch()) # Update Progress Bar pbar_values = self._get_pbar_values(loss) pbar.update(iteration, values=pbar_values) # Update training history self.update_training_history(loss) pbar.add(1, values=pbar_values) def validate(self, verbose=True): """Validate an epoch of model training. Args: verbose: Print validation loss and accuracy. """ start_time = time.time() self.model.eval() val_loss = 0 correct = 0 with torch.no_grad(): for data in self.val_loader: inputs, targets = self.fetch_data(data) output = self.model(inputs) # Get trained model output val_loss += self.calculate_criterion(output, targets, train=False).item() # Sum up batch loss self._calculate_metrics(targets, output) # Calculate evaluation metrics val_loss /= len(self.val_loader.dataset) self.val_losses.append(val_loss) for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.val_metrics[idx][metric].append( self.metrics[idx][metric]['value'] ) end_time = time.time() # Time spent during validation duration = int(end_time - start_time) minutes = duration // 60 seconds = duration % 60 if verbose: log = f'Validation set (took {minutes} minutes, {seconds} seconds): Average loss: {val_loss:.4f}' for idx in range(len(self.metrics)): for metric in self.metrics[idx]: log += f', {metric}: {self.metrics[idx][metric]["value"]}' log += '\n' print(log) def save_checkpoint(self, epoch=None): """Save model checkpoint. Args: epoch (int, optional): Current epoch number. (default: None) """ if not self.checkpoint is None: metric = None params = {} if self.checkpoint.monitor == 'train_loss': metric = self.train_losses[-1] elif self.checkpoint.monitor == 'val_loss': metric = self.val_losses[-1] elif self.metrics: if self.checkpoint.monitor.startswith('train_'): if self.record_train: metric = self.train_metrics[ self.checkpoint.monitor.split('train_')[-1] ][-1] else: metric = self.val_metrics[ self.checkpoint.monitor.split('val_')[-1] ][-1] else: print('Invalid metric function, can\'t save checkpoint.') return self.checkpoint(self.model, metric, epoch) def write_summary(self, epoch, train): """Write training summary in tensorboard. Args: epoch (int): Current epoch number. train (bool): If True, summary will be written for model training else it will be writtern for model validation. """ if not self.summary_writer is None: if train: mode = 'train' # Write Images self.summary_writer.write_images( self.model, self.activate_logits, f'prediction_epoch_{epoch}' ) loss = self.train_losses[-1] else: mode = 'val' loss = self.val_losses[-1] # Write Loss self.summary_writer.write_scalar( f'Loss/{mode}', loss, epoch ) if not train or self.record_train: for idx in range(len(self.metrics)): for metric, info in self.metrics[idx].items(): self.summary_writer.write_scalar( f'{idx}/{metric.title()}/{mode}', info['value'], epoch ) def fit(self, start_epoch=1): """Perform model training. Args: start_epoch (int, optional): Start epoch for training. (default: 1) """ self.reset_history() for epoch in range(start_epoch, start_epoch + self.epochs): print(f'Epoch {epoch}:') # Train an epoch self.train_epoch() self.write_summary(epoch, True) self._reset_metrics() # Validate the model if not self.val_loader is None: self.validate() self.write_summary(epoch, False) self._reset_metrics() # Save model checkpoint self.save_checkpoint(epoch) # Call Step LR if not self.lr_schedulers['step_lr'] is None: self.lr_schedulers['step_lr'].step() # Call Reduce LR on Plateau if not self.lr_schedulers['lr_plateau'] is None: self.lr_schedulers['lr_plateau'].step(self.val_losses[-1])
tensornet/engine/learner.py
import math import time import torch import torch.nn.functional as F from tensornet.engine.ops.regularizer import l1 from tensornet.engine.ops.checkpoint import ModelCheckpoint from tensornet.engine.ops.tensorboard import TensorBoard from tensornet.data.processing import InfiniteDataLoader from tensornet.utils.progress_bar import ProgressBar class Learner: def __init__( self, train_loader, optimizer, criterion, device='cpu', epochs=1, l1_factor=0.0, val_loader=None, callbacks=None, metrics=None, activate_loss_logits=False, record_train=True ): """Train and validate the model. Args: train_loader (torch.utils.data.DataLoader): Training data loader. optimizer (torch.optim): Optimizer for the model. criterion (torch.nn): Loss Function. device (str or torch.device, optional): Device where the data will be loaded. (default='cpu') epochs (int, optional): Numbers of epochs/iterations to train the model for. (default: 1) l1_factor (float, optional): L1 regularization factor. (default: 0) val_loader (torch.utils.data.DataLoader, optional): Validation data loader. (default: None) callbacks (list, optional): List of callbacks to be used during training. (default: None) metrics (list of str, optional): List of names of the metrics for model evaluation. (default: None) activate_loss_logits (bool, optional): If True, the logits will first pass through the `activate_logits` function before going to the criterion. (default: False) record_train (bool, optional): If False, metrics will be calculated only during validation. (default: True) """ self.model = None self.optimizer = optimizer self.criterion = criterion self.train_loader = train_loader self.device = device self.epochs = epochs self.val_loader = val_loader self.l1_factor = l1_factor self.activate_loss_logits = activate_loss_logits self.record_train = record_train self.lr_schedulers = { 'step_lr': None, 'lr_plateau': None, 'one_cycle_policy': None, } self.checkpoint = None self.summary_writer = None if not callbacks is None: self._setup_callbacks(callbacks) # Training self.train_losses = [] # Change in loss self.train_metrics = [] # Change in evaluation metric self.val_losses = [] # Change in loss self.val_metrics = [] # Change in evaluation metric # Set evaluation metrics self.metrics = [] if metrics: self._setup_metrics(metrics) def _setup_callbacks(self, callbacks): """Extract callbacks passed to the class. Args: callbacks (list): List of callbacks. """ for callback in callbacks: if isinstance(callback, torch.optim.lr_scheduler.StepLR): self.lr_schedulers['step_lr'] = callback elif isinstance(callback, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_schedulers['lr_plateau'] = callback elif isinstance(callback, torch.optim.lr_scheduler.OneCycleLR): self.lr_schedulers['one_cycle_policy'] = callback elif isinstance(callback, ModelCheckpoint): if callback.monitor.startswith('train_'): if self.record_train: self.checkpoint = callback else: raise ValueError( 'Cannot use checkpoint for a training metric if record_train is set to False' ) else: self.checkpoint = callback elif isinstance(callback, TensorBoard): self.summary_writer = callback def set_model(self, model): """Assign model to learner. Args: model (torch.nn.Module): Model Instance. """ self.model = model if not self.summary_writer is None: self.summary_writer.write_model(self.model) def _accuracy(self, label, prediction, idx=0): """Calculate accuracy. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: accuracy """ self.metrics[idx]['accuracy']['sum'] += prediction.eq( label.view_as(prediction) ).sum().item() self.metrics[idx]['accuracy']['num_steps'] += len(label) self.metrics[idx]['accuracy']['value'] = round( 100 * self.metrics[idx]['accuracy']['sum'] / self.metrics[idx]['accuracy']['num_steps'], 2 ) def _iou(self, label, prediction, idx=0): """Calculate Intersection over Union. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: IoU """ # Remove 1 channel dimension label = label.squeeze(1) prediction = prediction.squeeze(1) intersection = (prediction * label).sum(2).sum(1) union = (prediction + label).sum(2).sum(1) - intersection # epsilon is added to avoid 0/0 epsilon = 1e-6 iou = (intersection + epsilon) / (union + epsilon) self.metrics[idx]['iou']['sum'] += iou.sum().item() self.metrics[idx]['iou']['num_steps'] += label.size(0) self.metrics[idx]['iou']['value'] = round( self.metrics[idx]['iou']['sum'] / self.metrics[idx]['iou']['num_steps'], 3 ) def _pred_label_diff(self, label, prediction, rel=False): """Calculate the difference between label and prediction. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. rel (bool, optional): If True, return the relative difference. (default: False) Returns: Difference between label and prediction """ # For numerical stability valid_labels = label > 0.0001 _label = label[valid_labels] _prediction = prediction[valid_labels] valid_element_count = _label.size(0) if valid_element_count > 0: diff = torch.abs(_label - _prediction) if rel: diff = torch.div(diff, _label) return diff, valid_element_count def _rmse(self, label, prediction, idx=0): """Calculate Root Mean Square Error. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: Root Mean Square Error """ diff = self._pred_label_diff(label, prediction) rmse = 0 if not diff is None: rmse = math.sqrt(torch.sum(torch.pow(diff[0], 2)) / diff[1]) self.metrics[idx]['rmse']['num_steps'] += label.size(0) self.metrics[idx]['rmse']['sum'] += rmse * label.size(0) self.metrics[idx]['rmse']['value'] = round( self.metrics[idx]['rmse']['sum'] / self.metrics[idx]['rmse']['num_steps'], 3 ) def _mae(self, label, prediction, idx=0): """Calculate Mean Average Error. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: Mean Average Error """ diff = self._pred_label_diff(label, prediction) mae = 0 if not diff is None: mae = torch.sum(diff[0]).item() / diff[1] self.metrics[idx]['mae']['num_steps'] += label.size(0) self.metrics[idx]['mae']['sum'] += mae * label.size(0) self.metrics[idx]['mae']['value'] = round( self.metrics[idx]['mae']['sum'] / self.metrics[idx]['mae']['num_steps'], 3 ) def _abs_rel(self, label, prediction, idx=0): """Calculate Absolute Relative Error. Args: label (torch.Tensor): Ground truth. prediction (torch.Tensor): Prediction. Returns: Absolute Relative Error """ diff = self._pred_label_diff(label, prediction, rel=True) abs_rel = 0 if not diff is None: abs_rel = torch.sum(diff[0]).item() / diff[1] self.metrics[idx]['abs_rel']['num_steps'] += label.size(0) self.metrics[idx]['abs_rel']['sum'] += abs_rel * label.size(0) self.metrics[idx]['abs_rel']['value'] = round( self.metrics[idx]['abs_rel']['sum'] / self.metrics[idx]['abs_rel']['num_steps'], 3 ) def _setup_metrics(self, metrics): """Validate the evaluation metrics passed to the class. Args: metrics (list or dict): Metrics. """ if not isinstance(metrics[0], (list, tuple)): metrics = [metrics] for idx, metric_list in enumerate(metrics): metric_dict = {} for metric in metric_list: metric_info = {'value': 0, 'sum': 0, 'num_steps': 0} if metric == 'accuracy': metric_info['func'] = self._accuracy elif metric == 'rmse': metric_info['func'] = self._rmse elif metric == 'mae': metric_info['func'] = self._mae elif metric == 'abs_rel': metric_info['func'] = self._abs_rel elif metric == 'iou': metric_info['func'] = self._iou if 'func' in metric_info: metric_dict[metric] = metric_info if metric_dict: self.metrics.append(metric_dict) self.train_metrics.append({ x: [] for x in metric_dict.keys() }) self.val_metrics.append({ x: [] for x in metric_dict.keys() }) def _calculate_metrics(self, labels, predictions): """Update evaluation metric values. Args: label (torch.Tensor or dict): Ground truth. prediction (torch.Tensor or dict): Prediction. """ predictions = self.activate_logits(predictions) if not isinstance(labels, (list, tuple)): labels = [labels] predictions = [predictions] for idx, (label, prediction) in enumerate(zip(labels, predictions)): # If predictions are one-hot encoded if label.size() != prediction.size(): prediction = prediction.argmax(dim=1, keepdim=True) * 1.0 if idx < len(self.metrics): for metric in self.metrics[idx]: self.metrics[idx][metric]['func']( label, prediction, idx=idx ) def _reset_metrics(self): """Reset metric params.""" for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.metrics[idx][metric]['value'] = 0 self.metrics[idx][metric]['sum'] = 0 self.metrics[idx][metric]['num_steps'] = 0 def _get_pbar_values(self, loss): """Create progress bar description. Args: loss (float): Loss value. """ pbar_values = [('loss', round(loss, 2))] if self.metrics and self.record_train: for idx in range(len(self.metrics)): for metric, info in self.metrics[idx].items(): metric_name = metric if len(self.metrics) > 1: metric_name = f'{idx} - {metric}' pbar_values.append((metric_name, info['value'])) return pbar_values def update_training_history(self, loss): """Update the training history. Args: loss (float): Loss value. """ self.train_losses.append(loss) if self.record_train: for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.train_metrics[idx][metric].append( self.metrics[idx][metric]['value'] ) def reset_history(self): """Reset the training history""" self.train_losses = [] self.val_losses = [] for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.train_metrics[idx][metric] = [] self.val_metrics[idx][metric] = [] self._reset_metrics() def activate_logits(self, logits): """Apply activation function to the logits if needed. After this the logits will be sent for calculation of loss or evaluation metrics. Args: logits: Model output Returns: activated logits """ return logits def calculate_criterion(self, logits, targets, train=True): """Calculate loss. Args: logits (torch.Tensor): Prediction. targets (torch.Tensor): Ground truth. train (bool, optional): If True, loss is sent to the L1 regularization function. (default: True) Returns: loss value """ if self.activate_loss_logits: logits = self.activate_logits(logits) if train: return l1(self.model, self.criterion(logits, targets), self.l1_factor) return self.criterion(logits, targets) def fetch_data(self, data): """Fetch data from loader and load it to GPU. Args: data (list or tuple): List containing inputs and targets. Returns: inputs and targets loaded to GPU. """ return data[0].to(self.device), data[1].to(self.device) def train_batch(self, data): """Train the model on a batch of data. Args: data: Input and target data for the model. Returns: Batch loss. """ inputs, targets = self.fetch_data(data) self.optimizer.zero_grad() # Set gradients to zero before starting backpropagation y_pred = self.model(inputs) # Predict output loss = self.calculate_criterion(y_pred, targets, train=True) # Calculate loss # Perform backpropagation loss.backward() self.optimizer.step() if self.record_train: self._calculate_metrics(targets, y_pred) # One Cycle Policy for learning rate if not self.lr_schedulers['one_cycle_policy'] is None: self.lr_schedulers['one_cycle_policy'].step() return loss.item() def train_epoch(self): """Run an epoch of model training.""" self.model.train() pbar = ProgressBar(target=len(self.train_loader), width=8) for batch_idx, data in enumerate(self.train_loader, 0): # Train a batch loss = self.train_batch(data) # Update Progress Bar pbar_values = self._get_pbar_values(loss) pbar.update(batch_idx, values=pbar_values) # Update training history self.update_training_history(loss) pbar_values = self._get_pbar_values(loss) pbar.add(1, values=pbar_values) def train_iterations(self): """Train model for the 'self.epochs' number of batches.""" self.model.train() pbar = ProgressBar(target=self.epochs, width=8) iterator = InfiniteDataLoader(self.train_loader) for iteration in range(self.epochs): # Train a batch loss = self.train_batch(iterator.get_batch()) # Update Progress Bar pbar_values = self._get_pbar_values(loss) pbar.update(iteration, values=pbar_values) # Update training history self.update_training_history(loss) pbar.add(1, values=pbar_values) def validate(self, verbose=True): """Validate an epoch of model training. Args: verbose: Print validation loss and accuracy. """ start_time = time.time() self.model.eval() val_loss = 0 correct = 0 with torch.no_grad(): for data in self.val_loader: inputs, targets = self.fetch_data(data) output = self.model(inputs) # Get trained model output val_loss += self.calculate_criterion(output, targets, train=False).item() # Sum up batch loss self._calculate_metrics(targets, output) # Calculate evaluation metrics val_loss /= len(self.val_loader.dataset) self.val_losses.append(val_loss) for idx in range(len(self.metrics)): for metric in self.metrics[idx]: self.val_metrics[idx][metric].append( self.metrics[idx][metric]['value'] ) end_time = time.time() # Time spent during validation duration = int(end_time - start_time) minutes = duration // 60 seconds = duration % 60 if verbose: log = f'Validation set (took {minutes} minutes, {seconds} seconds): Average loss: {val_loss:.4f}' for idx in range(len(self.metrics)): for metric in self.metrics[idx]: log += f', {metric}: {self.metrics[idx][metric]["value"]}' log += '\n' print(log) def save_checkpoint(self, epoch=None): """Save model checkpoint. Args: epoch (int, optional): Current epoch number. (default: None) """ if not self.checkpoint is None: metric = None params = {} if self.checkpoint.monitor == 'train_loss': metric = self.train_losses[-1] elif self.checkpoint.monitor == 'val_loss': metric = self.val_losses[-1] elif self.metrics: if self.checkpoint.monitor.startswith('train_'): if self.record_train: metric = self.train_metrics[ self.checkpoint.monitor.split('train_')[-1] ][-1] else: metric = self.val_metrics[ self.checkpoint.monitor.split('val_')[-1] ][-1] else: print('Invalid metric function, can\'t save checkpoint.') return self.checkpoint(self.model, metric, epoch) def write_summary(self, epoch, train): """Write training summary in tensorboard. Args: epoch (int): Current epoch number. train (bool): If True, summary will be written for model training else it will be writtern for model validation. """ if not self.summary_writer is None: if train: mode = 'train' # Write Images self.summary_writer.write_images( self.model, self.activate_logits, f'prediction_epoch_{epoch}' ) loss = self.train_losses[-1] else: mode = 'val' loss = self.val_losses[-1] # Write Loss self.summary_writer.write_scalar( f'Loss/{mode}', loss, epoch ) if not train or self.record_train: for idx in range(len(self.metrics)): for metric, info in self.metrics[idx].items(): self.summary_writer.write_scalar( f'{idx}/{metric.title()}/{mode}', info['value'], epoch ) def fit(self, start_epoch=1): """Perform model training. Args: start_epoch (int, optional): Start epoch for training. (default: 1) """ self.reset_history() for epoch in range(start_epoch, start_epoch + self.epochs): print(f'Epoch {epoch}:') # Train an epoch self.train_epoch() self.write_summary(epoch, True) self._reset_metrics() # Validate the model if not self.val_loader is None: self.validate() self.write_summary(epoch, False) self._reset_metrics() # Save model checkpoint self.save_checkpoint(epoch) # Call Step LR if not self.lr_schedulers['step_lr'] is None: self.lr_schedulers['step_lr'].step() # Call Reduce LR on Plateau if not self.lr_schedulers['lr_plateau'] is None: self.lr_schedulers['lr_plateau'].step(self.val_losses[-1])
0.934939
0.316739
"""Events functions""" import numpy as np # From Salamon's code # https://github.com/justinsalamon/scaper_waspaa2017/blob/master/urban_sed/util.py def contiguous_regions(act): act = np.asarray(act) onsets = np.where(np.diff(act) == 1)[0] + 1 offsets = np.where(np.diff(act) == -1)[0] + 1 # SPECIAL CASES # If there are no onsets and no offsets (all of act is the same value) if len(onsets) == 0 and len(offsets) == 0: if act[0] == 0: return np.asarray([]) else: return np.asarray([[0, len(act)]]) # If there are no onsets if len(onsets) == 0 and len(offsets) != 0: onsets = np.insert(onsets, 0, 0) # If there are no offsets if len(onsets) != 0 and len(offsets) == 0: offsets = np.insert(offsets, len(offsets), len(act)) # If there's an onset before an offset, first onset is frame 0 if onsets[0] > offsets[0]: onsets = np.insert(onsets, 0, 0) # If there's an onset after the last offset, then we need to add an offset # Offset is last index of activation (so that gives inverse of sed_eval) if onsets[-1] > offsets[-1]: offsets = np.insert(offsets, len(offsets), len(act)) assert len(onsets) == len(offsets) assert (onsets <= offsets).all() return np.asarray([onsets, offsets]).T # From Salamon's code # https://github.com/justinsalamon/scaper_waspaa2017/blob/master/urban_sed/util.py def event_roll_to_event_list(event_roll, event_label_list, time_resolution): """ Convert a event roll matrix to a event list. Parameters ---------- event_roll : ndarray Shape (N_times, N_classes) event_label_list : list of str Label list time_resolution : float Time resolution of the event_roll. Returns ------- list List of dicts with events information. e.g. [{'event_onset': 0.1, 'event_offset': 1.5, 'event_label' : 'dog'}, ...] """ event_list = [] for event_id, event_label in enumerate(event_label_list): event_activity = event_roll[:, event_id] event_segments = contiguous_regions(event_activity) * time_resolution for event in event_segments: event_list.append( {'event_onset': event[0], 'event_offset': event[1], 'event_label': event_label}) return event_list def tag_probabilities_to_tag_list(tag_probabilities, label_list, threshold=0.5): """ Convert a tag probabilites matrix to a tag list. Parameters ---------- tag_probabilities : ndarray Shape (N_times, N_classes) label_list : list of str Label list threshold : float Threshold to decide if a tag is present. Returns ------- list List of tags. e.g. ['dog', 'cat', ...] """ tag_binary = (tag_probabilities > threshold).astype(int) tag_indexes = np.argwhere(tag_binary == 1) tag_list = [label_list[index[0]] for index in tag_indexes] return tag_list
dcase_models/util/events.py
"""Events functions""" import numpy as np # From Salamon's code # https://github.com/justinsalamon/scaper_waspaa2017/blob/master/urban_sed/util.py def contiguous_regions(act): act = np.asarray(act) onsets = np.where(np.diff(act) == 1)[0] + 1 offsets = np.where(np.diff(act) == -1)[0] + 1 # SPECIAL CASES # If there are no onsets and no offsets (all of act is the same value) if len(onsets) == 0 and len(offsets) == 0: if act[0] == 0: return np.asarray([]) else: return np.asarray([[0, len(act)]]) # If there are no onsets if len(onsets) == 0 and len(offsets) != 0: onsets = np.insert(onsets, 0, 0) # If there are no offsets if len(onsets) != 0 and len(offsets) == 0: offsets = np.insert(offsets, len(offsets), len(act)) # If there's an onset before an offset, first onset is frame 0 if onsets[0] > offsets[0]: onsets = np.insert(onsets, 0, 0) # If there's an onset after the last offset, then we need to add an offset # Offset is last index of activation (so that gives inverse of sed_eval) if onsets[-1] > offsets[-1]: offsets = np.insert(offsets, len(offsets), len(act)) assert len(onsets) == len(offsets) assert (onsets <= offsets).all() return np.asarray([onsets, offsets]).T # From Salamon's code # https://github.com/justinsalamon/scaper_waspaa2017/blob/master/urban_sed/util.py def event_roll_to_event_list(event_roll, event_label_list, time_resolution): """ Convert a event roll matrix to a event list. Parameters ---------- event_roll : ndarray Shape (N_times, N_classes) event_label_list : list of str Label list time_resolution : float Time resolution of the event_roll. Returns ------- list List of dicts with events information. e.g. [{'event_onset': 0.1, 'event_offset': 1.5, 'event_label' : 'dog'}, ...] """ event_list = [] for event_id, event_label in enumerate(event_label_list): event_activity = event_roll[:, event_id] event_segments = contiguous_regions(event_activity) * time_resolution for event in event_segments: event_list.append( {'event_onset': event[0], 'event_offset': event[1], 'event_label': event_label}) return event_list def tag_probabilities_to_tag_list(tag_probabilities, label_list, threshold=0.5): """ Convert a tag probabilites matrix to a tag list. Parameters ---------- tag_probabilities : ndarray Shape (N_times, N_classes) label_list : list of str Label list threshold : float Threshold to decide if a tag is present. Returns ------- list List of tags. e.g. ['dog', 'cat', ...] """ tag_binary = (tag_probabilities > threshold).astype(int) tag_indexes = np.argwhere(tag_binary == 1) tag_list = [label_list[index[0]] for index in tag_indexes] return tag_list
0.888517
0.751089
from __future__ import print_function import sys import os import argparse import torch import torch.nn as nn import torch.backends.cudnn as cudnn import torchvision.transforms as transforms from torch.autograd import Variable from data import VOC_ROOT, VOC_CLASSES as labelmap from PIL import Image from data import VOCAnnotationTransform, VOCDetection, BaseTransform, VOC_CLASSES import torch.utils.data as data from ssd import build_ssd parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection') parser.add_argument('--trained_model', default='weights/ssd300_COCO_70000.pth', type=str, help='Trained state_dict file path to open') parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results') parser.add_argument('--visual_threshold', default=0.6, type=float, help='Final confidence threshold') parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model') parser.add_argument('--voc_root', default=VOC_ROOT, help='Location of VOC root directory') parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks") args = parser.parse_args() if args.cuda and torch.cuda.is_available(): torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') if not os.path.exists(args.save_folder): os.mkdir(args.save_folder) def test_net(save_folder, net, cuda, testset, transform, thresh): # dump predictions and assoc. ground truth to text file for now filename = save_folder+'test1.txt' num_images = len(testset) for i in range(num_images): print('Testing image {:d}/{:d}....'.format(i+1, num_images)) img = testset.pull_image(i) img_id, annotation = testset.pull_anno(i) x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1) x = Variable(x.unsqueeze(0)) with open(filename, mode='a') as f: f.write('\nGROUND TRUTH FOR: '+img_id+'\n') for box in annotation: f.write('label: '+' || '.join(str(b) for b in box)+'\n') if cuda: x = x.cuda() y = net(x) # forward pass detections = y.data # scale each detection back up to the image scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) pred_num = 0 for i in range(detections.size(1)): j = 0 while detections[0, i, j, 0] >= 0.6: if pred_num == 0: with open(filename, mode='a') as f: f.write('PREDICTIONS: '+'\n') score = detections[0, i, j, 0] label_name = labelmap[i-1] pt = (detections[0, i, j, 1:]*scale).cpu().numpy() coords = (pt[0], pt[1], pt[2], pt[3]) pred_num += 1 with open(filename, mode='a') as f: f.write(str(pred_num)+' label: '+label_name+' score: ' + str(score) + ' '+' || '.join(str(c) for c in coords) + '\n') j += 1 def test_voc(): # load net num_classes = len(VOC_CLASSES) + 1 # +1 background net = build_ssd('test', 300, num_classes) # initialize SSD net.load_state_dict(torch.load(args.trained_model)) net.eval() print('Finished loading model!') # load data testset = VOCDetection(args.voc_root, [('2007', 'test')], None, VOCAnnotationTransform()) if args.cuda: net = net.cuda() cudnn.benchmark = True # evaluation test_net(args.save_folder, net, args.cuda, testset, BaseTransform(net.size, (104, 117, 123)), thresh=args.visual_threshold) if __name__ == '__main__': test_voc()
test.py
from __future__ import print_function import sys import os import argparse import torch import torch.nn as nn import torch.backends.cudnn as cudnn import torchvision.transforms as transforms from torch.autograd import Variable from data import VOC_ROOT, VOC_CLASSES as labelmap from PIL import Image from data import VOCAnnotationTransform, VOCDetection, BaseTransform, VOC_CLASSES import torch.utils.data as data from ssd import build_ssd parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection') parser.add_argument('--trained_model', default='weights/ssd300_COCO_70000.pth', type=str, help='Trained state_dict file path to open') parser.add_argument('--save_folder', default='eval/', type=str, help='Dir to save results') parser.add_argument('--visual_threshold', default=0.6, type=float, help='Final confidence threshold') parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model') parser.add_argument('--voc_root', default=VOC_ROOT, help='Location of VOC root directory') parser.add_argument('-f', default=None, type=str, help="Dummy arg so we can load in Jupyter Notebooks") args = parser.parse_args() if args.cuda and torch.cuda.is_available(): torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') if not os.path.exists(args.save_folder): os.mkdir(args.save_folder) def test_net(save_folder, net, cuda, testset, transform, thresh): # dump predictions and assoc. ground truth to text file for now filename = save_folder+'test1.txt' num_images = len(testset) for i in range(num_images): print('Testing image {:d}/{:d}....'.format(i+1, num_images)) img = testset.pull_image(i) img_id, annotation = testset.pull_anno(i) x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1) x = Variable(x.unsqueeze(0)) with open(filename, mode='a') as f: f.write('\nGROUND TRUTH FOR: '+img_id+'\n') for box in annotation: f.write('label: '+' || '.join(str(b) for b in box)+'\n') if cuda: x = x.cuda() y = net(x) # forward pass detections = y.data # scale each detection back up to the image scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) pred_num = 0 for i in range(detections.size(1)): j = 0 while detections[0, i, j, 0] >= 0.6: if pred_num == 0: with open(filename, mode='a') as f: f.write('PREDICTIONS: '+'\n') score = detections[0, i, j, 0] label_name = labelmap[i-1] pt = (detections[0, i, j, 1:]*scale).cpu().numpy() coords = (pt[0], pt[1], pt[2], pt[3]) pred_num += 1 with open(filename, mode='a') as f: f.write(str(pred_num)+' label: '+label_name+' score: ' + str(score) + ' '+' || '.join(str(c) for c in coords) + '\n') j += 1 def test_voc(): # load net num_classes = len(VOC_CLASSES) + 1 # +1 background net = build_ssd('test', 300, num_classes) # initialize SSD net.load_state_dict(torch.load(args.trained_model)) net.eval() print('Finished loading model!') # load data testset = VOCDetection(args.voc_root, [('2007', 'test')], None, VOCAnnotationTransform()) if args.cuda: net = net.cuda() cudnn.benchmark = True # evaluation test_net(args.save_folder, net, args.cuda, testset, BaseTransform(net.size, (104, 117, 123)), thresh=args.visual_threshold) if __name__ == '__main__': test_voc()
0.502441
0.238622
from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from CrossData.API.models import * class EndpointPOSTTestCase(APITestCase): def setUp(self): self.url = reverse("games_view") self.data = [ { "id_steam": 123, "name": "<NAME>", "positive_reviews_steam": 1923123, "negative_reviews_steam": 12121, "owners": 130000, "average_forever": 2127, "average_2weeks": 132, "price": "0", "languages": [ "mandarim", "espanhol" ], "genres": [ "tiro", "porrada" ], "main_image": "google.com", "screenshots": [ { "url": "https://steamcdn-a.akamaihd.net/steam/apps/570/ss_86d675fdc73ba10462abb8f5ece7791c5047072c.600x338.jpg?t=1536248487", "palette": [ { "r": 8, "g": 16, "b": 2, "hex": "#1aa741" }, { "r": 34, "g": 12, "b": 37, "hex": "#2e204d" }, { "r": 22, "g": 48, "b": 34, "hex": "#484454" }, { "r": 121, "g": 80, "b": 254, "hex": "#b5b49a" }, { "r": 19, "g": 26, "b": 21, "hex": "#3b4233" } ] }, ], "release_date": "1 Feb, 1999", "r_average": 83, "g_average": 82, "b_average": 74, "count_videos": 1, "count_views": 2609773, "count_likes": 5555, "count_dislikes": 1107, "count_comments": 4152, "total_views": 46939, "streams": [ { "language": "en", "game_id": "29595", "started_at": "2018-11-03T12:00:06Z", "type": "live", "viewer_count": 23661 }, ] }, ] self.data_2 = [ { "id_steam": 123, "name": "<NAME>", "positive_reviews_steam": 1923123, "negative_reviews_steam": 12121, "owners": 130000, "average_forever": 2127, "average_2weeks": 132, "price": "0", "languages": [ "mandarim", "espanhol" ], "genres": [ "tiro", "porrada" ], "main_image": "google.com", "screenshots": [ { "url": "https://steamcdn-a.akamaihd.net/steam/apps/570/ss_86d675fdc73ba10462abb8f5ece7791c5047072c.600x338.jpg?t=1536248487", "palette": [ { "r": 8, "g": 16, "b": 2, "hex": "#1aa741" }, { "r": 34, "g": 12, "b": 37, "hex": "#2e204d" }, { "r": 22, "g": 48, "b": 34, "hex": "#484454" }, { "r": 121, "g": 80, "b": 254, "hex": "#b5b49a" }, { "r": 19, "g": 26, "b": 21, "hex": "#3b4233" } ] }, ], "release_date": "1 Feb, 1999", "r_average": 83, "g_average": 82, "b_average": 74, "count_videos": 1, "count_views": 2609773, "count_likes": 5555, "count_dislikes": 1107, "count_comments": 4152, "total_views": 46939, "streams": [ { "language": "en", "game_id": "29595", "started_at": "2018-11-03T12:00:06Z", "type": "live", "viewer_count": 23661 }, ] }, ] self.data_ok_2 = [ { "id_steam": 94123, "name": "<NAME>", "positive_reviews_steam": 1923123, "negative_reviews_steam": 12121, "owners": 130000, "average_forever": 2127, "average_2weeks": 132, "price": "0", "languages": [ "mandarim", "espanhol" ], "genres": [ "tiro", "porrada" ], "main_image": "google.com", "screenshots": [ { "url": "https://steamcdn-a.akamaihd.net/steam/apps/570/ss_86d675fdc73ba10462abb8f5ece7791c5047072c.600x338.jpg?t=1536248487", "palette": [ { "r": 8, "g": 16, "b": 2, "hex": "#1aa741" }, { "r": 34, "g": 12, "b": 37, "hex": "#2e204d" }, { "r": 22, "g": 48, "b": 34, "hex": "#484454" }, { "r": 121, "g": 80, "b": 254, "hex": "#b5b49a" }, { "r": 19, "g": 26, "b": 21, "hex": "#3b4233" } ] }, ], "release_date": "1 Feb, 1999", "r_average": 83, "g_average": 82, "b_average": 74, "count_videos": 1, "count_views": 2609773, "count_likes": 5555, "count_dislikes": 1107, "count_comments": 4152, "total_views": 46939, "streams": [ { "language": "en", "game_id": "29595", "started_at": "2018-11-03T12:00:06Z", "type": "live", "viewer_count": 23661 }, ] }, ] def tearDown(self): Game.objects.all().delete() def test_status_code(self): response = self.client.post(self.url, self.data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_status_code_not_created(self): response = self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_game_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(Game.objects.all().count(), 1) def test_game_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(Game.objects.all().count(), 0) def test_steam_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(InfoSteam.objects.all().count(), 1) for info in InfoSteam.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_steam_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(InfoSteam.objects.all().count(), 0) def test_youtube_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(InfoYoutube.objects.all().count(), 1) for info in InfoYoutube.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_youtube_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(InfoYoutube.objects.all().count(), 0) def test_twitch_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(InfoTwitch.objects.all().count(), 1) for info in InfoTwitch.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_twitch_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(InfoTwitch.objects.all().count(), 0) def test_stream_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(TwitchStream.objects.all().count(), 1) for info in TwitchStream.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_stream_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(TwitchStream.objects.all().count(), 0) def test_data_duplication(self): self.client.post(self.url, self.data_2, format='json') self.client.post(self.url, self.data_ok_2, format='json') self.assertEqual(Game.objects.all().count(), 1)
CrossData/API/tests.py
from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from CrossData.API.models import * class EndpointPOSTTestCase(APITestCase): def setUp(self): self.url = reverse("games_view") self.data = [ { "id_steam": 123, "name": "<NAME>", "positive_reviews_steam": 1923123, "negative_reviews_steam": 12121, "owners": 130000, "average_forever": 2127, "average_2weeks": 132, "price": "0", "languages": [ "mandarim", "espanhol" ], "genres": [ "tiro", "porrada" ], "main_image": "google.com", "screenshots": [ { "url": "https://steamcdn-a.akamaihd.net/steam/apps/570/ss_86d675fdc73ba10462abb8f5ece7791c5047072c.600x338.jpg?t=1536248487", "palette": [ { "r": 8, "g": 16, "b": 2, "hex": "#1aa741" }, { "r": 34, "g": 12, "b": 37, "hex": "#2e204d" }, { "r": 22, "g": 48, "b": 34, "hex": "#484454" }, { "r": 121, "g": 80, "b": 254, "hex": "#b5b49a" }, { "r": 19, "g": 26, "b": 21, "hex": "#3b4233" } ] }, ], "release_date": "1 Feb, 1999", "r_average": 83, "g_average": 82, "b_average": 74, "count_videos": 1, "count_views": 2609773, "count_likes": 5555, "count_dislikes": 1107, "count_comments": 4152, "total_views": 46939, "streams": [ { "language": "en", "game_id": "29595", "started_at": "2018-11-03T12:00:06Z", "type": "live", "viewer_count": 23661 }, ] }, ] self.data_2 = [ { "id_steam": 123, "name": "<NAME>", "positive_reviews_steam": 1923123, "negative_reviews_steam": 12121, "owners": 130000, "average_forever": 2127, "average_2weeks": 132, "price": "0", "languages": [ "mandarim", "espanhol" ], "genres": [ "tiro", "porrada" ], "main_image": "google.com", "screenshots": [ { "url": "https://steamcdn-a.akamaihd.net/steam/apps/570/ss_86d675fdc73ba10462abb8f5ece7791c5047072c.600x338.jpg?t=1536248487", "palette": [ { "r": 8, "g": 16, "b": 2, "hex": "#1aa741" }, { "r": 34, "g": 12, "b": 37, "hex": "#2e204d" }, { "r": 22, "g": 48, "b": 34, "hex": "#484454" }, { "r": 121, "g": 80, "b": 254, "hex": "#b5b49a" }, { "r": 19, "g": 26, "b": 21, "hex": "#3b4233" } ] }, ], "release_date": "1 Feb, 1999", "r_average": 83, "g_average": 82, "b_average": 74, "count_videos": 1, "count_views": 2609773, "count_likes": 5555, "count_dislikes": 1107, "count_comments": 4152, "total_views": 46939, "streams": [ { "language": "en", "game_id": "29595", "started_at": "2018-11-03T12:00:06Z", "type": "live", "viewer_count": 23661 }, ] }, ] self.data_ok_2 = [ { "id_steam": 94123, "name": "<NAME>", "positive_reviews_steam": 1923123, "negative_reviews_steam": 12121, "owners": 130000, "average_forever": 2127, "average_2weeks": 132, "price": "0", "languages": [ "mandarim", "espanhol" ], "genres": [ "tiro", "porrada" ], "main_image": "google.com", "screenshots": [ { "url": "https://steamcdn-a.akamaihd.net/steam/apps/570/ss_86d675fdc73ba10462abb8f5ece7791c5047072c.600x338.jpg?t=1536248487", "palette": [ { "r": 8, "g": 16, "b": 2, "hex": "#1aa741" }, { "r": 34, "g": 12, "b": 37, "hex": "#2e204d" }, { "r": 22, "g": 48, "b": 34, "hex": "#484454" }, { "r": 121, "g": 80, "b": 254, "hex": "#b5b49a" }, { "r": 19, "g": 26, "b": 21, "hex": "#3b4233" } ] }, ], "release_date": "1 Feb, 1999", "r_average": 83, "g_average": 82, "b_average": 74, "count_videos": 1, "count_views": 2609773, "count_likes": 5555, "count_dislikes": 1107, "count_comments": 4152, "total_views": 46939, "streams": [ { "language": "en", "game_id": "29595", "started_at": "2018-11-03T12:00:06Z", "type": "live", "viewer_count": 23661 }, ] }, ] def tearDown(self): Game.objects.all().delete() def test_status_code(self): response = self.client.post(self.url, self.data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_status_code_not_created(self): response = self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_game_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(Game.objects.all().count(), 1) def test_game_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(Game.objects.all().count(), 0) def test_steam_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(InfoSteam.objects.all().count(), 1) for info in InfoSteam.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_steam_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(InfoSteam.objects.all().count(), 0) def test_youtube_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(InfoYoutube.objects.all().count(), 1) for info in InfoYoutube.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_youtube_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(InfoYoutube.objects.all().count(), 0) def test_twitch_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(InfoTwitch.objects.all().count(), 1) for info in InfoTwitch.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_twitch_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(InfoTwitch.objects.all().count(), 0) def test_stream_data_persistence(self): self.client.post(self.url, self.data, format='json') self.assertEqual(TwitchStream.objects.all().count(), 1) for info in TwitchStream.objects.all(): self.assertEqual(info.game.name, self.data[0]['name']) def test_stream_data_not_persistence(self): self.client.post(self.url, self.data_2, format='json') self.assertNotEqual(TwitchStream.objects.all().count(), 0) def test_data_duplication(self): self.client.post(self.url, self.data_2, format='json') self.client.post(self.url, self.data_ok_2, format='json') self.assertEqual(Game.objects.all().count(), 1)
0.4917
0.339089
from time import time from java.awt import Color from java.awt.image import BufferedImage from de.qfs.apps.qftest.shared.extensions.image import ImageRep global RGBArray class RGBArray: """ Custom implementation of array holding RGB data of image Basic init/get/set operations defined """ def __init__(self,img): """ Load given image buffer into flattened array @param{BufferedImage/ImageRep} img - input image """ startTime = time() if type(img) == BufferedImage: self.width = img.getWidth() self.height = img.getHeight() self.data = img.getRGB(0,0, self.width, self.height, None,0, self.width) elif type(img) == ImageRep: self.width = img.getWidth() self.height = img.getHeight() self.data = img.getARGB() self.loadingTime = time() - startTime def Iter(self,ax,val): """ Generator yielding partial indices for flattened 1D array corresponding to given indices of given axis in 2D array Note: Formulas for conversion of indices between 1D and 2D arrays: i = x + y*width x = i % width y = (i-x)/width @param{int} ax - axis; x=0, y=1 @param{slice/int} val - indices in given axis @return{int} subindex; resulting index i = subindex(x)+subindex(y) """ if type(val) == slice: if val.start == None: start = 0 else: start = val.start if val.stop == None: if ax == 0: stop = self.width elif ax == 1: stop = self.height else: stop = val.stop for i in range(start,stop): if ax == 0: if i < 0 or i > self.width: raise Exception("x dimension "+str(i)+ " out of range for width "+str(self.width)) yield i if ax == 1: if i < 0 or i > self.height: raise Exception("y dimension "+str(i)+ " out of range for height "+str(self.height)) yield i*self.width else: if ax == 0: if val < 0 or val > self.width: raise Exception("x dimension "+str(val)+ " out of range for width "+str(self.width)) yield val if ax == 1: if val < 0 or val > self.height: raise Exception("y dimension "+str(val)+ " out of range for height "+str(self.height)) yield val*self.width def __getitem__(self, keys): """ Get items from array @param{int/tuple} keys - can refer to both 1D/2D indices """ if type(keys) == tuple: x,y = keys lst = list() for i in self.Iter(0,x): for j in self.Iter(1,y): lst.append(self.data[i+j]) return lst elif type(keys) == int: i = keys return self.data[i] def __setitem__(self, keys, val): """ Set array items @param{int/tuple} keys - can refer to both 1D/2D indices @param{int} val - setter value """ if type(keys) == tuple: x,y = keys for i in self.Iter(0,x): for j in self.Iter(1,y): self.data[i+j] = val elif type(keys) == int: i = keys self.data[i] = val @property def length(self): """ Get length of flattened 1D array """ return len(self.data) @property def shape(self): """ Get shape of original 2D array """ return self.width, self.height
rgb-array.py
from time import time from java.awt import Color from java.awt.image import BufferedImage from de.qfs.apps.qftest.shared.extensions.image import ImageRep global RGBArray class RGBArray: """ Custom implementation of array holding RGB data of image Basic init/get/set operations defined """ def __init__(self,img): """ Load given image buffer into flattened array @param{BufferedImage/ImageRep} img - input image """ startTime = time() if type(img) == BufferedImage: self.width = img.getWidth() self.height = img.getHeight() self.data = img.getRGB(0,0, self.width, self.height, None,0, self.width) elif type(img) == ImageRep: self.width = img.getWidth() self.height = img.getHeight() self.data = img.getARGB() self.loadingTime = time() - startTime def Iter(self,ax,val): """ Generator yielding partial indices for flattened 1D array corresponding to given indices of given axis in 2D array Note: Formulas for conversion of indices between 1D and 2D arrays: i = x + y*width x = i % width y = (i-x)/width @param{int} ax - axis; x=0, y=1 @param{slice/int} val - indices in given axis @return{int} subindex; resulting index i = subindex(x)+subindex(y) """ if type(val) == slice: if val.start == None: start = 0 else: start = val.start if val.stop == None: if ax == 0: stop = self.width elif ax == 1: stop = self.height else: stop = val.stop for i in range(start,stop): if ax == 0: if i < 0 or i > self.width: raise Exception("x dimension "+str(i)+ " out of range for width "+str(self.width)) yield i if ax == 1: if i < 0 or i > self.height: raise Exception("y dimension "+str(i)+ " out of range for height "+str(self.height)) yield i*self.width else: if ax == 0: if val < 0 or val > self.width: raise Exception("x dimension "+str(val)+ " out of range for width "+str(self.width)) yield val if ax == 1: if val < 0 or val > self.height: raise Exception("y dimension "+str(val)+ " out of range for height "+str(self.height)) yield val*self.width def __getitem__(self, keys): """ Get items from array @param{int/tuple} keys - can refer to both 1D/2D indices """ if type(keys) == tuple: x,y = keys lst = list() for i in self.Iter(0,x): for j in self.Iter(1,y): lst.append(self.data[i+j]) return lst elif type(keys) == int: i = keys return self.data[i] def __setitem__(self, keys, val): """ Set array items @param{int/tuple} keys - can refer to both 1D/2D indices @param{int} val - setter value """ if type(keys) == tuple: x,y = keys for i in self.Iter(0,x): for j in self.Iter(1,y): self.data[i+j] = val elif type(keys) == int: i = keys self.data[i] = val @property def length(self): """ Get length of flattened 1D array """ return len(self.data) @property def shape(self): """ Get shape of original 2D array """ return self.width, self.height
0.789558
0.303783
from __future__ import unicode_literals import datetime import re import six DATE = re.compile( r'^(\/Date\((?P<timestamp>-?\d+)((?P<offset_h>[-+]\d\d)(?P<offset_m>\d\d))?\)\/)' r'|' r'((?P<year>\d{4})-(?P<month>[0-2]\d)-0?(?P<day>[0-3]\d)' r'T' r'(?P<hour>[0-5]\d):(?P<minute>[0-5]\d):(?P<second>[0-6]\d))$' ) OBJECT_NAMES = { "Addresses": "Address", "Attachments": "Attachment", "Accounts": "Account", "BankTransactions": "BankTransaction", "BankTransfers": "BankTransfer", "BrandingThemes": "BrandingTheme", "ContactGroups": "ContactGroup", "ContactPersons": "ContactPerson", "Contacts": "Contact", "CreditNotes": "CreditNote", "Currencies": "Currency", "Employees": "Employee", "ExpenseClaims": "ExpenseClaim", "Invoices": "Invoice", "Items": "Item", "Journals": "Journal", "ManualJournals": "ManualJournal", "Organisation": "Organisation", "Overpayments": "Overpayment", "Payments": "Payment", "PayrollCalendars": "PayrollCalendar", "PayRuns": "PayRun", "Phones": "Phone", "Prepayments": "Prepayment", "Projects": "Project", "ProjectsUsers": "ProjectsUser", "Receipts": "Receipt", "RepeatingInvoices": "RepeatingInvoice", "Reports": "Report", "TaxComponents": "TaxComponent", "TaxRates": "TaxRate", "TrackingCategories": "TrackingCategory", "Tracking": "TrackingCategory", "Time": "Time", "Tasks": "Tasks", "Users": "User", "Associations": "Association", "Files": "File", "Folders": "Folder", "Inbox": "Inbox", "LineItems": "LineItem", "JournalLines": "JournalLine", "PurchaseOrders": "PurchaseOrder", } def isplural(word): return word in OBJECT_NAMES.keys() def singular(word): return OBJECT_NAMES.get(word) def parse_date(string, force_datetime=False): """ Takes a Xero formatted date, e.g. /Date(1426849200000+1300)/""" matches = DATE.match(string) if not matches: return None values = dict([ ( k, v if v[0] in '+-' else int(v) ) for k,v in matches.groupdict().items() if v and int(v) ]) if 'timestamp' in values: value = datetime.datetime.utcfromtimestamp(0) + datetime.timedelta( hours=int(values.get('offset_h', 0)), minutes=int(values.get('offset_m', 0)), seconds=int(values['timestamp']) / 1000.0 ) return value # I've made an assumption here, that a DateTime value will not # ever be YYYY-MM-DDT00:00:00, which is probably bad. I'm not # really sure how to handle this, other than to hard-code the # names of the field that are actually Date rather than DateTime. if len(values) > 3 or force_datetime: return datetime.datetime(**values) # Sometimes Xero returns Date(0+0000), so we end up with no # values. Return None for this case if not values: return None return datetime.date(**values) def json_load_object_hook(dct): """ Hook for json.parse(...) to parse Xero date formats. """ for key, value in dct.items(): if isinstance(value, six.string_types): value = parse_date(value) if value: dct[key] = value return dct
xero/utils.py
from __future__ import unicode_literals import datetime import re import six DATE = re.compile( r'^(\/Date\((?P<timestamp>-?\d+)((?P<offset_h>[-+]\d\d)(?P<offset_m>\d\d))?\)\/)' r'|' r'((?P<year>\d{4})-(?P<month>[0-2]\d)-0?(?P<day>[0-3]\d)' r'T' r'(?P<hour>[0-5]\d):(?P<minute>[0-5]\d):(?P<second>[0-6]\d))$' ) OBJECT_NAMES = { "Addresses": "Address", "Attachments": "Attachment", "Accounts": "Account", "BankTransactions": "BankTransaction", "BankTransfers": "BankTransfer", "BrandingThemes": "BrandingTheme", "ContactGroups": "ContactGroup", "ContactPersons": "ContactPerson", "Contacts": "Contact", "CreditNotes": "CreditNote", "Currencies": "Currency", "Employees": "Employee", "ExpenseClaims": "ExpenseClaim", "Invoices": "Invoice", "Items": "Item", "Journals": "Journal", "ManualJournals": "ManualJournal", "Organisation": "Organisation", "Overpayments": "Overpayment", "Payments": "Payment", "PayrollCalendars": "PayrollCalendar", "PayRuns": "PayRun", "Phones": "Phone", "Prepayments": "Prepayment", "Projects": "Project", "ProjectsUsers": "ProjectsUser", "Receipts": "Receipt", "RepeatingInvoices": "RepeatingInvoice", "Reports": "Report", "TaxComponents": "TaxComponent", "TaxRates": "TaxRate", "TrackingCategories": "TrackingCategory", "Tracking": "TrackingCategory", "Time": "Time", "Tasks": "Tasks", "Users": "User", "Associations": "Association", "Files": "File", "Folders": "Folder", "Inbox": "Inbox", "LineItems": "LineItem", "JournalLines": "JournalLine", "PurchaseOrders": "PurchaseOrder", } def isplural(word): return word in OBJECT_NAMES.keys() def singular(word): return OBJECT_NAMES.get(word) def parse_date(string, force_datetime=False): """ Takes a Xero formatted date, e.g. /Date(1426849200000+1300)/""" matches = DATE.match(string) if not matches: return None values = dict([ ( k, v if v[0] in '+-' else int(v) ) for k,v in matches.groupdict().items() if v and int(v) ]) if 'timestamp' in values: value = datetime.datetime.utcfromtimestamp(0) + datetime.timedelta( hours=int(values.get('offset_h', 0)), minutes=int(values.get('offset_m', 0)), seconds=int(values['timestamp']) / 1000.0 ) return value # I've made an assumption here, that a DateTime value will not # ever be YYYY-MM-DDT00:00:00, which is probably bad. I'm not # really sure how to handle this, other than to hard-code the # names of the field that are actually Date rather than DateTime. if len(values) > 3 or force_datetime: return datetime.datetime(**values) # Sometimes Xero returns Date(0+0000), so we end up with no # values. Return None for this case if not values: return None return datetime.date(**values) def json_load_object_hook(dct): """ Hook for json.parse(...) to parse Xero date formats. """ for key, value in dct.items(): if isinstance(value, six.string_types): value = parse_date(value) if value: dct[key] = value return dct
0.591251
0.436262
import asyncio import copy import itertools import re from base64 import b64decode from datetime import datetime from app.service.base_service import BaseService class PlanningService(BaseService): def __init__(self): self.log = self.add_service('planning_svc', self) async def select_links(self, operation, agent, phase): """ For an operation, phase and agent combination, determine which potential links can be executed :param operation: :param agent: :param phase: :return: a list of links """ await self.get_service('parsing_svc').parse_facts(operation) operation = (await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])))[0] if (not agent['trusted']) and (not operation['allow_untrusted']): self.log.debug('Agent %s untrusted: no link created' % agent['paw']) return [] phase_abilities = [i for p, v in operation['adversary']['phases'].items() if p <= phase for i in v] phase_abilities = sorted(phase_abilities, key=lambda i: i['id']) link_status = await self._default_link_status(operation) links = [] for a in await self.capable_agent_abilities(phase_abilities, agent): links.append( dict(op_id=operation['id'], paw=agent['paw'], ability=a['id'], command=a['test'], score=0, status=link_status, decide=datetime.now(), executor=a['executor'], jitter=self.jitter(operation['jitter']), adversary_map_id=a['adversary_map_id'])) links[:] = await self._trim_links(operation, links, agent) return await self._sort_links(links) async def create_cleanup_links(self, operation): """ For a given operation, create a link for every cleanup action on every executed ability :param operation: :return: None """ op = await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])) link_status = await self._default_link_status(operation) for member in op[0]['host_group']: if (not member['trusted']) and (not op[0]['allow_untrusted']): self.log.debug('Agent %s untrusted: no cleanup-link created' % member['paw']) continue links = [] for link in await self.get_service('data_svc').explode_chain(criteria=dict(paw=member['paw'], op_id=op[0]['id'])): ability = (await self.get_service('data_svc').explode_abilities(criteria=dict(id=link['ability'])))[0] if ability['cleanup'] and link['status'] >= 0: links.append(dict(op_id=op[0]['id'], paw=member['paw'], ability=ability['id'], cleanup=1, command=ability['cleanup'], executor=ability['executor'], score=0, jitter=0, decide=datetime.now(), status=link_status)) links[:] = await self._trim_links(op[0], links, member) for link in reversed(links): link.pop('rewards', []) await self.get_service('data_svc').create('core_chain', link) await self.wait_for_phase(op[0]) async def wait_for_phase(self, operation): """ Wait for all started links to be completed :param operation: :return: None """ for member in operation['host_group']: if (not member['trusted']) and (not operation['allow_untrusted']): continue op = await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])) while next((True for lnk in op[0]['chain'] if lnk['paw'] == member['paw'] and not lnk['finish'] and not lnk['status'] == self.LinkState.DISCARD.value), False): await asyncio.sleep(3) if await self._trust_issues(operation, member['paw']): break op = await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])) async def decode(self, encoded_cmd, agent, group): """ Replace all global variables in a command with the values associated to a specific agent :param encoded_cmd: :param agent: :param group: :return: the updated command string """ decoded_cmd = self.decode_bytes(encoded_cmd) decoded_cmd = decoded_cmd.replace('#{server}', agent['server']) decoded_cmd = decoded_cmd.replace('#{group}', group) decoded_cmd = decoded_cmd.replace('#{paw}', agent['paw']) decoded_cmd = decoded_cmd.replace('#{location}', agent['location']) return decoded_cmd @staticmethod async def capable_agent_abilities(phase_abilities, agent): abilities = [] preferred = next((e['executor'] for e in agent['executors'] if e['preferred'])) executors = [e['executor'] for e in agent['executors']] for ai in set([pa['ability_id'] for pa in phase_abilities]): total_ability = [ab for ab in phase_abilities if (ab['ability_id'] == ai) and (ab['platform'] == agent['platform']) and (ab['executor'] in executors)] if len(total_ability) > 0: val = next((ta for ta in total_ability if ta['executor'] == preferred), total_ability[0]) abilities.append(val) return abilities """ PRIVATE """ @staticmethod async def _sort_links(links): """ sort links by their score then by the order they are defined in an adversary profile """ return sorted(links, key=lambda k: (-k['score'], k['adversary_map_id'])) async def _trim_links(self, operation, links, agent): host_already_ran = [l['command'] for l in operation['chain'] if l['paw'] == agent['paw']] links[:] = await self._add_test_variants(links, agent, operation) links[:] = [l for l in links if l['command'] not in host_already_ran] links[:] = [l for l in links if not re.findall(r'#{(.*?)}', b64decode(l['command']).decode('utf-8'), flags=re.DOTALL)] self.log.debug('Created %d links for %s' % (len(links), agent['paw'])) return links async def _add_test_variants(self, links, agent, operation): """ Create a list of all possible links for a given phase """ group = agent['host_group'] for link in links: decoded_test = await self.decode(link['command'], agent, group) variables = re.findall(r'#{(.*?)}', decoded_test, flags=re.DOTALL) if variables: agent_facts = await self._get_agent_facts(operation['id'], agent['paw']) relevant_facts = await self._build_relevant_facts(variables, operation.get('facts', []), agent_facts) for combo in list(itertools.product(*relevant_facts)): copy_test = copy.deepcopy(decoded_test) copy_link = copy.deepcopy(link) variant, score, rewards = await self._build_single_test_variant(copy_test, combo) copy_link['command'] = self.encode_string(variant) copy_link['score'] = score copy_link['rewards'] = rewards links.append(copy_link) else: link['command'] = self.encode_string(decoded_test) return links @staticmethod def _reward_fact_relationship(combo_set, combo_link, score): if len(combo_set) == 1 and len(combo_link) == 1: score *= 2 return score @staticmethod async def _build_relevant_facts(variables, facts, agent_facts): """ Create a list of ([fact, value, score]) tuples for each variable/fact """ facts = [f for f in facts if f['score'] > 0] relevant_facts = [] for v in variables: variable_facts = [] for fact in [f for f in facts if f['property'] == v]: if fact['property'].startswith('host'): if fact['id'] in agent_facts: variable_facts.append(fact) else: variable_facts.append(fact) relevant_facts.append(variable_facts) return relevant_facts async def _build_single_test_variant(self, copy_test, combo): """ Replace all variables with facts from the combo to build a single test variant """ score, rewards, combo_set_id, combo_link_id = 0, list(), set(), set() for var in combo: score += (score + var['score']) rewards.append(var['id']) copy_test = copy_test.replace('#{%s}' % var['property'], var['value']) combo_set_id.add(var['set_id']) combo_link_id.add(var['link_id']) score = self._reward_fact_relationship(combo_set_id, combo_link_id, score) return copy_test, score, rewards async def _get_agent_facts(self, op_id, paw): """ Collect a list of this agent's facts """ agent_facts = [] for link in await self.get_service('data_svc').dao.get('core_chain', criteria=dict(op_id=op_id, paw=paw)): facts = await self.get_service('data_svc').dao.get('core_fact', criteria=dict(link_id=link['id'])) for f in facts: agent_facts.append(f['id']) return agent_facts async def _trust_issues(self, operation, paw): if not operation['allow_untrusted']: agent = await self.get_service('data_svc').explode_agents(criteria=dict(paw=paw)) return not agent[0]['trusted'] return False async def _default_link_status(self, operation): return self.LinkState.EXECUTE.value if operation['autonomous'] else self.LinkState.PAUSE.value
app/service/planning_svc.py
import asyncio import copy import itertools import re from base64 import b64decode from datetime import datetime from app.service.base_service import BaseService class PlanningService(BaseService): def __init__(self): self.log = self.add_service('planning_svc', self) async def select_links(self, operation, agent, phase): """ For an operation, phase and agent combination, determine which potential links can be executed :param operation: :param agent: :param phase: :return: a list of links """ await self.get_service('parsing_svc').parse_facts(operation) operation = (await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])))[0] if (not agent['trusted']) and (not operation['allow_untrusted']): self.log.debug('Agent %s untrusted: no link created' % agent['paw']) return [] phase_abilities = [i for p, v in operation['adversary']['phases'].items() if p <= phase for i in v] phase_abilities = sorted(phase_abilities, key=lambda i: i['id']) link_status = await self._default_link_status(operation) links = [] for a in await self.capable_agent_abilities(phase_abilities, agent): links.append( dict(op_id=operation['id'], paw=agent['paw'], ability=a['id'], command=a['test'], score=0, status=link_status, decide=datetime.now(), executor=a['executor'], jitter=self.jitter(operation['jitter']), adversary_map_id=a['adversary_map_id'])) links[:] = await self._trim_links(operation, links, agent) return await self._sort_links(links) async def create_cleanup_links(self, operation): """ For a given operation, create a link for every cleanup action on every executed ability :param operation: :return: None """ op = await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])) link_status = await self._default_link_status(operation) for member in op[0]['host_group']: if (not member['trusted']) and (not op[0]['allow_untrusted']): self.log.debug('Agent %s untrusted: no cleanup-link created' % member['paw']) continue links = [] for link in await self.get_service('data_svc').explode_chain(criteria=dict(paw=member['paw'], op_id=op[0]['id'])): ability = (await self.get_service('data_svc').explode_abilities(criteria=dict(id=link['ability'])))[0] if ability['cleanup'] and link['status'] >= 0: links.append(dict(op_id=op[0]['id'], paw=member['paw'], ability=ability['id'], cleanup=1, command=ability['cleanup'], executor=ability['executor'], score=0, jitter=0, decide=datetime.now(), status=link_status)) links[:] = await self._trim_links(op[0], links, member) for link in reversed(links): link.pop('rewards', []) await self.get_service('data_svc').create('core_chain', link) await self.wait_for_phase(op[0]) async def wait_for_phase(self, operation): """ Wait for all started links to be completed :param operation: :return: None """ for member in operation['host_group']: if (not member['trusted']) and (not operation['allow_untrusted']): continue op = await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])) while next((True for lnk in op[0]['chain'] if lnk['paw'] == member['paw'] and not lnk['finish'] and not lnk['status'] == self.LinkState.DISCARD.value), False): await asyncio.sleep(3) if await self._trust_issues(operation, member['paw']): break op = await self.get_service('data_svc').explode_operation(criteria=dict(id=operation['id'])) async def decode(self, encoded_cmd, agent, group): """ Replace all global variables in a command with the values associated to a specific agent :param encoded_cmd: :param agent: :param group: :return: the updated command string """ decoded_cmd = self.decode_bytes(encoded_cmd) decoded_cmd = decoded_cmd.replace('#{server}', agent['server']) decoded_cmd = decoded_cmd.replace('#{group}', group) decoded_cmd = decoded_cmd.replace('#{paw}', agent['paw']) decoded_cmd = decoded_cmd.replace('#{location}', agent['location']) return decoded_cmd @staticmethod async def capable_agent_abilities(phase_abilities, agent): abilities = [] preferred = next((e['executor'] for e in agent['executors'] if e['preferred'])) executors = [e['executor'] for e in agent['executors']] for ai in set([pa['ability_id'] for pa in phase_abilities]): total_ability = [ab for ab in phase_abilities if (ab['ability_id'] == ai) and (ab['platform'] == agent['platform']) and (ab['executor'] in executors)] if len(total_ability) > 0: val = next((ta for ta in total_ability if ta['executor'] == preferred), total_ability[0]) abilities.append(val) return abilities """ PRIVATE """ @staticmethod async def _sort_links(links): """ sort links by their score then by the order they are defined in an adversary profile """ return sorted(links, key=lambda k: (-k['score'], k['adversary_map_id'])) async def _trim_links(self, operation, links, agent): host_already_ran = [l['command'] for l in operation['chain'] if l['paw'] == agent['paw']] links[:] = await self._add_test_variants(links, agent, operation) links[:] = [l for l in links if l['command'] not in host_already_ran] links[:] = [l for l in links if not re.findall(r'#{(.*?)}', b64decode(l['command']).decode('utf-8'), flags=re.DOTALL)] self.log.debug('Created %d links for %s' % (len(links), agent['paw'])) return links async def _add_test_variants(self, links, agent, operation): """ Create a list of all possible links for a given phase """ group = agent['host_group'] for link in links: decoded_test = await self.decode(link['command'], agent, group) variables = re.findall(r'#{(.*?)}', decoded_test, flags=re.DOTALL) if variables: agent_facts = await self._get_agent_facts(operation['id'], agent['paw']) relevant_facts = await self._build_relevant_facts(variables, operation.get('facts', []), agent_facts) for combo in list(itertools.product(*relevant_facts)): copy_test = copy.deepcopy(decoded_test) copy_link = copy.deepcopy(link) variant, score, rewards = await self._build_single_test_variant(copy_test, combo) copy_link['command'] = self.encode_string(variant) copy_link['score'] = score copy_link['rewards'] = rewards links.append(copy_link) else: link['command'] = self.encode_string(decoded_test) return links @staticmethod def _reward_fact_relationship(combo_set, combo_link, score): if len(combo_set) == 1 and len(combo_link) == 1: score *= 2 return score @staticmethod async def _build_relevant_facts(variables, facts, agent_facts): """ Create a list of ([fact, value, score]) tuples for each variable/fact """ facts = [f for f in facts if f['score'] > 0] relevant_facts = [] for v in variables: variable_facts = [] for fact in [f for f in facts if f['property'] == v]: if fact['property'].startswith('host'): if fact['id'] in agent_facts: variable_facts.append(fact) else: variable_facts.append(fact) relevant_facts.append(variable_facts) return relevant_facts async def _build_single_test_variant(self, copy_test, combo): """ Replace all variables with facts from the combo to build a single test variant """ score, rewards, combo_set_id, combo_link_id = 0, list(), set(), set() for var in combo: score += (score + var['score']) rewards.append(var['id']) copy_test = copy_test.replace('#{%s}' % var['property'], var['value']) combo_set_id.add(var['set_id']) combo_link_id.add(var['link_id']) score = self._reward_fact_relationship(combo_set_id, combo_link_id, score) return copy_test, score, rewards async def _get_agent_facts(self, op_id, paw): """ Collect a list of this agent's facts """ agent_facts = [] for link in await self.get_service('data_svc').dao.get('core_chain', criteria=dict(op_id=op_id, paw=paw)): facts = await self.get_service('data_svc').dao.get('core_fact', criteria=dict(link_id=link['id'])) for f in facts: agent_facts.append(f['id']) return agent_facts async def _trust_issues(self, operation, paw): if not operation['allow_untrusted']: agent = await self.get_service('data_svc').explode_agents(criteria=dict(paw=paw)) return not agent[0]['trusted'] return False async def _default_link_status(self, operation): return self.LinkState.EXECUTE.value if operation['autonomous'] else self.LinkState.PAUSE.value
0.529993
0.154217
import itertools import pytest from opentrons.broker import publish from opentrons.api import Session from opentrons.api.session import _accumulate, _get_labware, _dedupe from tests.opentrons.conftest import state from functools import partial state = partial(state, 'session') @pytest.fixture def labware_setup(): from opentrons import containers, instruments tip_racks = \ [containers.load('tiprack-200ul', slot, slot) for slot in ['1', '4']] plates = \ [containers.load('96-PCR-flat', slot, slot) for slot in ['2', '5']] p100 = instruments.Pipette( name='p100', mount='right', channels=8, tip_racks=tip_racks) p1000 = instruments.Pipette( name='p1000', mount='left', channels=8, tip_racks=tip_racks) commands = [ { 'location': plates[0][0], 'instrument': p100 }, { 'location': plates[1] }, { 'locations': [plates[0][0], plates[1]], 'instrument': p1000 } ] return (p100, p1000), tip_racks, plates, commands async def test_load_from_text(session_manager, protocol): session = session_manager.create(name='<blank>', text=protocol.text) assert session.name == '<blank>' acc = [] def traverse(commands): for command in commands: acc.append(command) traverse(command['children']) traverse(session.commands) # Less commands now that trash is built in assert len(acc) == 75 async def test_async_notifications(main_router): publish('session', {'name': 'foo', 'payload': {'bar': 'baz'}}) # Get async iterator aiter = main_router.notifications.__aiter__() # Then read the first item res = await aiter.__anext__() assert res == {'name': 'foo', 'payload': {'bar': 'baz'}} async def test_load_protocol_with_error(session_manager): with pytest.raises(Exception) as e: session = session_manager.create(name='<blank>', text='blah') assert session is None args, = e.value.args assert args == "name 'blah' is not defined" @pytest.mark.parametrize('protocol_file', ['testosaur.py']) async def test_load_and_run( main_router, protocol, protocol_file, loop ): session = main_router.session_manager.create( name='<blank>', text=protocol.text) assert main_router.notifications.queue.qsize() == 0 assert session.command_log == {} assert session.state == 'loaded' main_router.calibration_manager.tip_probe(session.instruments[0]) task = loop.run_in_executor(executor=None, func=session.run) await task assert len(session.command_log) == 7 res = [] index = 0 async for notification in main_router.notifications: payload = notification['payload'] index += 1 # Command log in sync with add-command events emitted if type(payload) is dict: state = payload.get('state') else: state = payload.state res.append(state) if state == 'finished': break assert [key for key, _ in itertools.groupby(res)] == \ ['loaded', 'probing', 'ready', 'running', 'finished'] assert main_router.notifications.queue.qsize() == 0, 'Notification should be empty after receiving "finished" state change event' # noqa session.run() assert len(session.command_log) == 7, \ "Clears command log on the next run" @pytest.fixture def run_session(virtual_smoothie_env): return Session('dino', 'from opentrons import robot') def test_init(run_session): assert run_session.state == 'loaded' assert run_session.name == 'dino' def test_set_state(run_session): states = 'loaded', 'running', 'finished', 'stopped', 'paused' for state in states: run_session.set_state(state) assert run_session.state == state with pytest.raises(ValueError): run_session.set_state('impossible-state') def test_error_append(run_session): foo = Exception('Foo') bar = Exception('Bar') run_session.error_append(foo) run_session.error_append(bar) errors = [ value for value in run_session.errors if isinstance(value.pop('timestamp'), int) ] assert errors == [ {'error': foo}, {'error': bar} ] def test_get_instruments_and_containers(labware_setup, virtual_smoothie_env): instruments, tip_racks, plates, commands = labware_setup p100, p1000 = instruments instruments, containers, interactions = \ _accumulate([_get_labware(command) for command in commands]) session = Session(name='', text='') # We are calling dedupe directly for testing purposes. # Normally it is called from within a session session._instruments.extend(_dedupe(instruments)) session._containers.extend(_dedupe(containers)) session._interactions.extend(_dedupe(interactions)) instruments = session.get_instruments() containers = session.get_containers() assert [i.name for i in instruments] == ['p100', 'p1000'] assert [i.axis for i in instruments] == ['a', 'b'] assert [i.id for i in instruments] == [id(p100), id(p1000)] assert [[t.slot for t in i.tip_racks] for i in instruments] == \ [['1', '4'], ['1', '4']] assert [[c.slot for c in i.containers] for i in instruments] == \ [['2'], ['2', '5']] assert [c.slot for c in containers] == ['2', '5'] assert [[i.id for i in c.instruments] for c in containers] == \ [[id(p100), id(p1000)], [id(p1000)]] assert [c.id for c in containers] == [id(plates[0]), id(plates[1])] def test_accumulate(): res = \ _accumulate([ (['a'], ['d'], ['g', 'h']), (['b', 'c'], ['e', 'f'], ['i']) ]) assert res == (['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']) assert _accumulate([]) == ([], [], []) def test_dedupe(): assert ''.join(_dedupe('aaaaabbbbcbbbbcccaa')) == 'abc' def test_get_labware(labware_setup): instruments, tip_racks, plates, commands = labware_setup p100, p1000 = instruments assert _get_labware(commands[0]) == \ ([p100], [plates[0]], [(p100, plates[0])]) assert _get_labware(commands[1]) == \ ([], [plates[1]], []) assert _get_labware(commands[2]) == \ ([p1000], [plates[0], plates[1]], [(p1000, plates[0]), (p1000, plates[1])]) instruments, containers, interactions = \ _accumulate([_get_labware(command) for command in commands]) assert \ [ list(_dedupe(instruments)), list(_dedupe(containers)), list(_dedupe(interactions)) ] == \ [ [p100, p1000], [plates[0], plates[1]], [(p100, plates[0]), (p1000, plates[0]), (p1000, plates[1])] ] async def test_session_model_functional(session_manager, protocol): session = session_manager.create(name='<blank>', text=protocol.text) assert [container.name for container in session.containers] == \ ['tiprack', 'trough', 'plate', 'tall-fixed-trash'] names = [instrument.name for instrument in session.instruments] assert names == ['p300_single_v1'] # TODO(artyom 20171018): design a small protocol specifically for the test @pytest.mark.parametrize('protocol_file', ['bradford_assay.py']) async def test_drop_tip_with_trash(session_manager, protocol, protocol_file): """ <NAME> is using drop_tip() with no arguments that assumes tip drop into trash-box. In this test we are confirming that that trash location is being inferred from a command, and trash is listed as a container for a protocol, as well as a container instruments are interacting with. """ session = session_manager.create(name='<blank>', text=protocol.text) assert 'tall-fixed-trash' in [c.name for c in session.get_containers()] containers = sum([i.containers for i in session.get_instruments()], []) assert 'tall-fixed-trash' in [c.name for c in containers] async def test_session_create_error(main_router): with pytest.raises(SyntaxError): main_router.session_manager.create( name='<blank>', text='syntax error ;(') with pytest.raises(TimeoutError): # No state change is expected await main_router.wait_until(lambda _: True) with pytest.raises(ZeroDivisionError): main_router.session_manager.create( name='<blank>', text='1/0') with pytest.raises(TimeoutError): # No state change is expected await main_router.wait_until(lambda _: True)
api/tests/opentrons/api/test_session.py
import itertools import pytest from opentrons.broker import publish from opentrons.api import Session from opentrons.api.session import _accumulate, _get_labware, _dedupe from tests.opentrons.conftest import state from functools import partial state = partial(state, 'session') @pytest.fixture def labware_setup(): from opentrons import containers, instruments tip_racks = \ [containers.load('tiprack-200ul', slot, slot) for slot in ['1', '4']] plates = \ [containers.load('96-PCR-flat', slot, slot) for slot in ['2', '5']] p100 = instruments.Pipette( name='p100', mount='right', channels=8, tip_racks=tip_racks) p1000 = instruments.Pipette( name='p1000', mount='left', channels=8, tip_racks=tip_racks) commands = [ { 'location': plates[0][0], 'instrument': p100 }, { 'location': plates[1] }, { 'locations': [plates[0][0], plates[1]], 'instrument': p1000 } ] return (p100, p1000), tip_racks, plates, commands async def test_load_from_text(session_manager, protocol): session = session_manager.create(name='<blank>', text=protocol.text) assert session.name == '<blank>' acc = [] def traverse(commands): for command in commands: acc.append(command) traverse(command['children']) traverse(session.commands) # Less commands now that trash is built in assert len(acc) == 75 async def test_async_notifications(main_router): publish('session', {'name': 'foo', 'payload': {'bar': 'baz'}}) # Get async iterator aiter = main_router.notifications.__aiter__() # Then read the first item res = await aiter.__anext__() assert res == {'name': 'foo', 'payload': {'bar': 'baz'}} async def test_load_protocol_with_error(session_manager): with pytest.raises(Exception) as e: session = session_manager.create(name='<blank>', text='blah') assert session is None args, = e.value.args assert args == "name 'blah' is not defined" @pytest.mark.parametrize('protocol_file', ['testosaur.py']) async def test_load_and_run( main_router, protocol, protocol_file, loop ): session = main_router.session_manager.create( name='<blank>', text=protocol.text) assert main_router.notifications.queue.qsize() == 0 assert session.command_log == {} assert session.state == 'loaded' main_router.calibration_manager.tip_probe(session.instruments[0]) task = loop.run_in_executor(executor=None, func=session.run) await task assert len(session.command_log) == 7 res = [] index = 0 async for notification in main_router.notifications: payload = notification['payload'] index += 1 # Command log in sync with add-command events emitted if type(payload) is dict: state = payload.get('state') else: state = payload.state res.append(state) if state == 'finished': break assert [key for key, _ in itertools.groupby(res)] == \ ['loaded', 'probing', 'ready', 'running', 'finished'] assert main_router.notifications.queue.qsize() == 0, 'Notification should be empty after receiving "finished" state change event' # noqa session.run() assert len(session.command_log) == 7, \ "Clears command log on the next run" @pytest.fixture def run_session(virtual_smoothie_env): return Session('dino', 'from opentrons import robot') def test_init(run_session): assert run_session.state == 'loaded' assert run_session.name == 'dino' def test_set_state(run_session): states = 'loaded', 'running', 'finished', 'stopped', 'paused' for state in states: run_session.set_state(state) assert run_session.state == state with pytest.raises(ValueError): run_session.set_state('impossible-state') def test_error_append(run_session): foo = Exception('Foo') bar = Exception('Bar') run_session.error_append(foo) run_session.error_append(bar) errors = [ value for value in run_session.errors if isinstance(value.pop('timestamp'), int) ] assert errors == [ {'error': foo}, {'error': bar} ] def test_get_instruments_and_containers(labware_setup, virtual_smoothie_env): instruments, tip_racks, plates, commands = labware_setup p100, p1000 = instruments instruments, containers, interactions = \ _accumulate([_get_labware(command) for command in commands]) session = Session(name='', text='') # We are calling dedupe directly for testing purposes. # Normally it is called from within a session session._instruments.extend(_dedupe(instruments)) session._containers.extend(_dedupe(containers)) session._interactions.extend(_dedupe(interactions)) instruments = session.get_instruments() containers = session.get_containers() assert [i.name for i in instruments] == ['p100', 'p1000'] assert [i.axis for i in instruments] == ['a', 'b'] assert [i.id for i in instruments] == [id(p100), id(p1000)] assert [[t.slot for t in i.tip_racks] for i in instruments] == \ [['1', '4'], ['1', '4']] assert [[c.slot for c in i.containers] for i in instruments] == \ [['2'], ['2', '5']] assert [c.slot for c in containers] == ['2', '5'] assert [[i.id for i in c.instruments] for c in containers] == \ [[id(p100), id(p1000)], [id(p1000)]] assert [c.id for c in containers] == [id(plates[0]), id(plates[1])] def test_accumulate(): res = \ _accumulate([ (['a'], ['d'], ['g', 'h']), (['b', 'c'], ['e', 'f'], ['i']) ]) assert res == (['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']) assert _accumulate([]) == ([], [], []) def test_dedupe(): assert ''.join(_dedupe('aaaaabbbbcbbbbcccaa')) == 'abc' def test_get_labware(labware_setup): instruments, tip_racks, plates, commands = labware_setup p100, p1000 = instruments assert _get_labware(commands[0]) == \ ([p100], [plates[0]], [(p100, plates[0])]) assert _get_labware(commands[1]) == \ ([], [plates[1]], []) assert _get_labware(commands[2]) == \ ([p1000], [plates[0], plates[1]], [(p1000, plates[0]), (p1000, plates[1])]) instruments, containers, interactions = \ _accumulate([_get_labware(command) for command in commands]) assert \ [ list(_dedupe(instruments)), list(_dedupe(containers)), list(_dedupe(interactions)) ] == \ [ [p100, p1000], [plates[0], plates[1]], [(p100, plates[0]), (p1000, plates[0]), (p1000, plates[1])] ] async def test_session_model_functional(session_manager, protocol): session = session_manager.create(name='<blank>', text=protocol.text) assert [container.name for container in session.containers] == \ ['tiprack', 'trough', 'plate', 'tall-fixed-trash'] names = [instrument.name for instrument in session.instruments] assert names == ['p300_single_v1'] # TODO(artyom 20171018): design a small protocol specifically for the test @pytest.mark.parametrize('protocol_file', ['bradford_assay.py']) async def test_drop_tip_with_trash(session_manager, protocol, protocol_file): """ <NAME> is using drop_tip() with no arguments that assumes tip drop into trash-box. In this test we are confirming that that trash location is being inferred from a command, and trash is listed as a container for a protocol, as well as a container instruments are interacting with. """ session = session_manager.create(name='<blank>', text=protocol.text) assert 'tall-fixed-trash' in [c.name for c in session.get_containers()] containers = sum([i.containers for i in session.get_instruments()], []) assert 'tall-fixed-trash' in [c.name for c in containers] async def test_session_create_error(main_router): with pytest.raises(SyntaxError): main_router.session_manager.create( name='<blank>', text='syntax error ;(') with pytest.raises(TimeoutError): # No state change is expected await main_router.wait_until(lambda _: True) with pytest.raises(ZeroDivisionError): main_router.session_manager.create( name='<blank>', text='1/0') with pytest.raises(TimeoutError): # No state change is expected await main_router.wait_until(lambda _: True)
0.582729
0.500244
import numpy as np import torch.nn as nn from thop import profile from thop import clever_format class OpsCounter(): def __init__(self, count_backward=False): self.verbose = False self.multiplier=2 if count_backward else 1 # counts foward + backward pass MACs self.task_mac_counter, self.task_params_counter, self.task_time = 0, 0, 0 self.macs, self.params, self.time = [], [], [] def set_base_params(self, base_model): # feature extractor params feature_extractor_params = 0 for param in base_model.feature_extractor.parameters(): feature_extractor_params += param.numel() # classifier params classifier_params = 0 if isinstance(base_model.classifier, nn.Module): for param in base_model.classifier.parameters(): classifier_params += param.numel() feature_adapter_params, set_encoder_params = 0, 0 if base_model.args.adapt_features: # feature adapter params for param in base_model.feature_adapter.parameters(): feature_adapter_params += param.numel() # set encoder params for param in base_model.set_encoder.parameters(): set_encoder_params += param.numel() self.base_params_counter = feature_extractor_params + classifier_params + feature_adapter_params + set_encoder_params feature_extractor_params, classifier_params, feature_adapter_params, set_encoder_params = clever_format([feature_extractor_params, classifier_params, feature_adapter_params, set_encoder_params], "%.2f") self.params_break_down = "feature extractor: {0:}, classifier: {1:}, feature adapter: {2:}, set encoder: {3:}".format(feature_extractor_params, classifier_params, feature_adapter_params, set_encoder_params) def add_macs(self, num_macs): self.task_mac_counter += num_macs def add_params(self, num_params): self.task_params_counter += num_params def log_time(self, time): self.task_time += time def compute_macs(self, module, *inputs): list_inputs = [] for input in inputs: list_inputs.append(input) custom_ops = module.thop_custom_ops if hasattr(module, 'thop_custom_ops') else {} macs, params = profile(module, inputs=inputs, custom_ops=custom_ops, verbose=self.verbose) self.add_macs(macs * self.multiplier) def task_complete(self): self.macs.append(self.task_mac_counter) self.params.append(self.base_params_counter + self.task_params_counter) self.time.append(self.task_time) self.task_mac_counter = 0 self.task_params_counter = 0 self.task_time = 0 def get_macs(self): return clever_format([self.macs[-1]], "%.2f") def get_mean_stats(self): mean_ops = np.mean(self.macs) std_ops = np.std(self.macs) mean_params = np.mean(self.params) mean_ops, std_ops, mean_params = clever_format([mean_ops, std_ops, mean_params], "%.2f") mean_time = np.mean(self.time) std_time = np.std(self.time) return "MACs to personalise: {0:} ({1:}) time to personalise: {2:.2f}s ({3:.2f}s) #learnable params {4:} ({5:})".format(mean_ops, std_ops, mean_time, std_time, mean_params, self.params_break_down)
utils/ops_counter.py
import numpy as np import torch.nn as nn from thop import profile from thop import clever_format class OpsCounter(): def __init__(self, count_backward=False): self.verbose = False self.multiplier=2 if count_backward else 1 # counts foward + backward pass MACs self.task_mac_counter, self.task_params_counter, self.task_time = 0, 0, 0 self.macs, self.params, self.time = [], [], [] def set_base_params(self, base_model): # feature extractor params feature_extractor_params = 0 for param in base_model.feature_extractor.parameters(): feature_extractor_params += param.numel() # classifier params classifier_params = 0 if isinstance(base_model.classifier, nn.Module): for param in base_model.classifier.parameters(): classifier_params += param.numel() feature_adapter_params, set_encoder_params = 0, 0 if base_model.args.adapt_features: # feature adapter params for param in base_model.feature_adapter.parameters(): feature_adapter_params += param.numel() # set encoder params for param in base_model.set_encoder.parameters(): set_encoder_params += param.numel() self.base_params_counter = feature_extractor_params + classifier_params + feature_adapter_params + set_encoder_params feature_extractor_params, classifier_params, feature_adapter_params, set_encoder_params = clever_format([feature_extractor_params, classifier_params, feature_adapter_params, set_encoder_params], "%.2f") self.params_break_down = "feature extractor: {0:}, classifier: {1:}, feature adapter: {2:}, set encoder: {3:}".format(feature_extractor_params, classifier_params, feature_adapter_params, set_encoder_params) def add_macs(self, num_macs): self.task_mac_counter += num_macs def add_params(self, num_params): self.task_params_counter += num_params def log_time(self, time): self.task_time += time def compute_macs(self, module, *inputs): list_inputs = [] for input in inputs: list_inputs.append(input) custom_ops = module.thop_custom_ops if hasattr(module, 'thop_custom_ops') else {} macs, params = profile(module, inputs=inputs, custom_ops=custom_ops, verbose=self.verbose) self.add_macs(macs * self.multiplier) def task_complete(self): self.macs.append(self.task_mac_counter) self.params.append(self.base_params_counter + self.task_params_counter) self.time.append(self.task_time) self.task_mac_counter = 0 self.task_params_counter = 0 self.task_time = 0 def get_macs(self): return clever_format([self.macs[-1]], "%.2f") def get_mean_stats(self): mean_ops = np.mean(self.macs) std_ops = np.std(self.macs) mean_params = np.mean(self.params) mean_ops, std_ops, mean_params = clever_format([mean_ops, std_ops, mean_params], "%.2f") mean_time = np.mean(self.time) std_time = np.std(self.time) return "MACs to personalise: {0:} ({1:}) time to personalise: {2:.2f}s ({3:.2f}s) #learnable params {4:} ({5:})".format(mean_ops, std_ops, mean_time, std_time, mean_params, self.params_break_down)
0.861887
0.159643
from pyfluminus.api import name, modules, get_announcements, current_term from pyfluminus.structs import Module from pyfluminus.fluminus import get_links_for_module from app import db from app.models import User, User_Mods from app.extra_api import get_class_grps from datetime import datetime def get_active_mods(auth): """Gets all active mods taken by authenticated student. :param auth: Authentication token issued from Luminus :type auth: dict :return: Mods and their details in a dictionary e.g.: {CS2030 : {"name" : Module name, "id" : Module id, "term" : Term this module is taken in}} :rtype: dict """ mods = modules(auth).data mods_dict = {} for mod in mods: mods_dict[mod.code] = {"name" : mod.name, "id" : mod.id, "term" : mod.term} return mods_dict def get_all_announcement(auth): """Gets all announcements the current authenticated user has. :param auth: Authentication token issued from Luminus :type auth: dict :return: Dictionary of all announcements the user has, grouped by modules :rtype: dict """ mods = modules(auth).data announcements_list = {} for mod in mods: announcements_list[mod.code] = get_announcements(auth, mod.id, False).data return announcements_list def get_current_term(auth): """Gets the current semester this student is in. :param auth: Authentication token issued by Luminus :type auth: dict :return: Current term of the student :rtype: dict """ return current_term(auth).data def response_json(status, count, data): """Generates JSON for http responses :param status: True if data is valid and there are no errors, False otherwise :type valid: boolean :param code: http response code :type code: int :param count: Total number of fields in data :type count: int :param data: json structure for the actual data :type data: dict :return: Dictionary comprising of all the params to be sent to client as JSON :rtype: dict """ return { "status" : status, #"code" : code, "count" : count, "data" : data } def add_mods(auth, uId): mods = get_active_mods(auth) for key in mods: mod_id = mods[key]["id"] class_grp = get_class_grps(auth, mod_id) if class_grp is not None: print(class_grp) m = User_Mods(code=key, mod_id=mod_id, name=mods[key]["name"], class_grp=class_grp, term=mods[key]["term"], sem=1, student=uId) m.get_timings() db.session.add(m) db.session.commit() def update_mods(auth, uId): mods = get_active_mods(auth) old_mods = User.query.get(uId).mods for mod in old_mods: db.session.delete(mod) db.session.commit() add_mods(auth, uId) def get_mod_files(auth): mods = modules(auth).data files = [] for module in mods: if module is None: continue data = get_links_for_module(auth, module) files.append(data) return files def get_single_mod_files(auth, code): mods = modules(auth).data for mod in mods: if mod is None: continue if mod.code == code: return get_links_for_module(auth, mod) return None def get_single_mod_announcements(auth, mod_id): msgs = get_announcements(auth, mod_id, False).data for msg in msgs: msg['datetime'] = msg['datetime'].strftime("%a, %d %b %Y, %H:%M:%S") return msgs
app/util.py
from pyfluminus.api import name, modules, get_announcements, current_term from pyfluminus.structs import Module from pyfluminus.fluminus import get_links_for_module from app import db from app.models import User, User_Mods from app.extra_api import get_class_grps from datetime import datetime def get_active_mods(auth): """Gets all active mods taken by authenticated student. :param auth: Authentication token issued from Luminus :type auth: dict :return: Mods and their details in a dictionary e.g.: {CS2030 : {"name" : Module name, "id" : Module id, "term" : Term this module is taken in}} :rtype: dict """ mods = modules(auth).data mods_dict = {} for mod in mods: mods_dict[mod.code] = {"name" : mod.name, "id" : mod.id, "term" : mod.term} return mods_dict def get_all_announcement(auth): """Gets all announcements the current authenticated user has. :param auth: Authentication token issued from Luminus :type auth: dict :return: Dictionary of all announcements the user has, grouped by modules :rtype: dict """ mods = modules(auth).data announcements_list = {} for mod in mods: announcements_list[mod.code] = get_announcements(auth, mod.id, False).data return announcements_list def get_current_term(auth): """Gets the current semester this student is in. :param auth: Authentication token issued by Luminus :type auth: dict :return: Current term of the student :rtype: dict """ return current_term(auth).data def response_json(status, count, data): """Generates JSON for http responses :param status: True if data is valid and there are no errors, False otherwise :type valid: boolean :param code: http response code :type code: int :param count: Total number of fields in data :type count: int :param data: json structure for the actual data :type data: dict :return: Dictionary comprising of all the params to be sent to client as JSON :rtype: dict """ return { "status" : status, #"code" : code, "count" : count, "data" : data } def add_mods(auth, uId): mods = get_active_mods(auth) for key in mods: mod_id = mods[key]["id"] class_grp = get_class_grps(auth, mod_id) if class_grp is not None: print(class_grp) m = User_Mods(code=key, mod_id=mod_id, name=mods[key]["name"], class_grp=class_grp, term=mods[key]["term"], sem=1, student=uId) m.get_timings() db.session.add(m) db.session.commit() def update_mods(auth, uId): mods = get_active_mods(auth) old_mods = User.query.get(uId).mods for mod in old_mods: db.session.delete(mod) db.session.commit() add_mods(auth, uId) def get_mod_files(auth): mods = modules(auth).data files = [] for module in mods: if module is None: continue data = get_links_for_module(auth, module) files.append(data) return files def get_single_mod_files(auth, code): mods = modules(auth).data for mod in mods: if mod is None: continue if mod.code == code: return get_links_for_module(auth, mod) return None def get_single_mod_announcements(auth, mod_id): msgs = get_announcements(auth, mod_id, False).data for msg in msgs: msg['datetime'] = msg['datetime'].strftime("%a, %d %b %Y, %H:%M:%S") return msgs
0.614278
0.304882
import numpy as np from .utils_complexity_attractor import _attractor_lorenz def complexity_simulate( duration=10, sampling_rate=1000, method="ornstein", hurst_exponent=0.5, **kwargs ): """**Simulate chaotic time series** This function generates a chaotic signal using different algorithms and complex systems. * **Mackey-Glass:** Generates time series using the discrete approximation of the Mackey-Glass delay differential equation described by Grassberger & Procaccia (1983). * **Ornstein-Uhlenbeck** * **Lorenz** * **Random walk** Parameters ---------- duration : int Desired length of duration (s). sampling_rate : int The desired sampling rate (in Hz, i.e., samples/second). duration : int The desired length in samples. method : str The method. can be ``"hurst"`` for a (fractional) Ornstein-Uhlenbeck process, ``"lorenz"`` for the first dimension of a Lorenz system, ``"mackeyglass"`` to use the Mackey-Glass equation, or ``random`` to generate a random-walk. hurst_exponent : float Defaults to ``0.5``. **kwargs Other arguments. Returns ------- array Simulated complexity time series. Examples ------------ **Lorenz System** .. ipython:: python import neurokit2 as nk signal = nk.complexity_simulate(duration=5, sampling_rate=1000, method="lorenz") @savefig p_complexity_simulate1.png scale=100% nk.signal_plot(signal) @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate2.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 5), alpha=1, color="blue") @suppress plt.close() **Ornstein System** .. ipython:: python signal = nk.complexity_simulate(duration=30, sampling_rate=100, method="ornstein") @savefig p_complexity_simulate3.png scale=100% nk.signal_plot(signal, color = "red") @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate4.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 100), alpha=1, color="red") @suppress plt.close() **Mackey-Glass System** .. ipython:: python signal = nk.complexity_simulate(duration=1, sampling_rate=1000, method="mackeyglass") @savefig p_complexity_simulate5.png scale=100% nk.signal_plot(signal, color = "green") @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate6.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 25), alpha=1, color="green") @suppress plt.close() **Random walk** .. ipython:: python signal = nk.complexity_simulate(duration=30, sampling_rate=100, method="randomwalk") @savefig p_complexity_simulate7.png scale=100% nk.signal_plot(signal, color = "orange") @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate8.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 100), alpha=1, color="orange") @suppress plt.close() """ method = method.lower() if method in ["fractal", "fractional", "hurst", "ornsteinuhlenbeck", "ornstein"]: signal = _complexity_simulate_ornstein( duration=duration, sampling_rate=sampling_rate, hurst_exponent=hurst_exponent, **kwargs ) elif method in ["lorenz"]: # x-dimension of Lorenz system signal = _attractor_lorenz(sampling_rate=sampling_rate, duration=duration, **kwargs)[:, 0] elif method in ["mackeyglass"]: signal = _complexity_simulate_mackeyglass( duration=duration, sampling_rate=sampling_rate, **kwargs ) else: signal = _complexity_simulate_randomwalk(int(duration * sampling_rate)) return signal # ============================================================================= # Methods # ============================================================================= def _complexity_simulate_mackeyglass( duration=10, sampling_rate=1000, x0="fixed", a=0.2, b=0.1, c=10.0, n=1000, discard=250 ): """Generate time series using the Mackey-Glass equation. Generates time series using the discrete approximation of the Mackey-Glass delay differential equation described by Grassberger & Procaccia (1983). Taken from nolitsa (https://github.com/manu-mannattil/nolitsa/blob/master/nolitsa/data.py#L223). Parameters ---------- duration : int Duration of the time series to be generated. sampling_rate : float Sampling step of the time series. It is useful to pick something between tau/100 and tau/10, with tau/sampling_rate being a factor of n. This will make sure that there are only whole number indices. Defaults to 1000. x0 : array Initial condition for the discrete map. Should be of length n. Can be "fixed", "random", or a vector of size n. a : float Constant a in the Mackey-Glass equation. Defaults to 0.2. b : float Constant b in the Mackey-Glass equation. Defaults to 0.1. c : float Constant c in the Mackey-Glass equation. Defaults to 10.0 n : int The number of discrete steps into which the interval between t and t + tau should be divided. This results in a time step of tau/n and an n + 1 dimensional map. Defaults to 1000. discard : int Number of n-steps to discard in order to eliminate transients. A total of n*discard steps will be discarded. Defaults to 250. Returns ------- array Simulated complexity time series. """ length = duration * sampling_rate tau = sampling_rate / 2 * 100 sampling_rate = int(n * sampling_rate / tau) grids = int(n * discard + sampling_rate * length) x = np.zeros(grids) if isinstance(x0, str): if x0 == "random": x[:n] = 0.5 + 0.05 * (-1 + 2 * np.random.random(n)) else: x[:n] = np.ones(n) else: x[:n] = x0 A = (2 * n - b * tau) / (2 * n + b * tau) B = a * tau / (2 * n + b * tau) for i in range(n - 1, grids - 1): x[i + 1] = A * x[i] + B * ( x[i - n] / (1 + x[i - n] ** c) + x[i - n + 1] / (1 + x[i - n + 1] ** c) ) return x[n * discard :: sampling_rate] def _complexity_simulate_ornstein( duration=10, sampling_rate=1000, theta=0.3, sigma=0.1, hurst_exponent=0.7 ): """This is based on https://github.com/LRydin/MFDFA. Parameters ---------- duration : int The desired length in samples. sampling_rate : int The desired sampling rate (in Hz, i.e., samples/second). Defaults to 1000Hz. theta : float Drift. Defaults to 0.3. sigma : float Diffusion. Defaults to 0.1. hurst_exponent : float Defaults to 0.7. Returns ------- array Simulated complexity time series. """ # Time array length = duration * sampling_rate # The fractional Gaussian noise dB = (duration ** hurst_exponent) * _complexity_simulate_fractionalnoise( size=length, hurst_exponent=hurst_exponent ) # Initialise the array y y = np.zeros([length]) # Integrate the process for i in range(1, length): y[i] = y[i - 1] - theta * y[i - 1] * (1 / sampling_rate) + sigma * dB[i] return y def _complexity_simulate_fractionalnoise(size=1000, hurst_exponent=0.5): """Generates fractional Gaussian noise. Generates fractional Gaussian noise with a Hurst index H in (0,1). If H = 1/2 this is simply Gaussian noise. The current method employed is the Davies-Harte method, which fails for H ≈ 0. Looking for help to implement a Cholesky decomposition method and the Hosking's method. This is based on https://github.com/LRydin/MFDFA/blob/master/MFDFA/fgn.py and the work of <NAME> fbm in https://github.com/crflynn/fbm See also Davies, <NAME>., and <NAME>. 'Tests for Hurst effect.' Biometrika 74, no.1 (1987): 95-101. Parameters ---------- size : int Length of fractional Gaussian noise to generate. hurst_exponent : float Hurst exponent H in (0,1). Returns ------- array Simulated complexity time series. """ # Sanity checks assert isinstance(size, int), "Size must be an integer number" assert isinstance(hurst_exponent, float), "Hurst index must be a float in (0,1)" # Generate linspace k = np.linspace(0, size - 1, size) # Correlation function cor = 0.5 * ( np.abs(k - 1) ** (2 * hurst_exponent) - 2 * np.abs(k) ** (2 * hurst_exponent) + np.abs(k + 1) ** (2 * hurst_exponent) ) # Eigenvalues of the correlation function eigenvals = np.sqrt(np.fft.fft(np.concatenate([cor[:], 0, cor[1:][::-1]], axis=None).real)) # Two normal distributed noises to be convoluted gn = np.random.normal(0.0, 1.0, size) gn2 = np.random.normal(0.0, 1.0, size) # This is the Davies–Harte method w = np.concatenate( [ (eigenvals[0] / np.sqrt(2 * size)) * gn[0], (eigenvals[1:size] / np.sqrt(4 * size)) * (gn[1:] + 1j * gn2[1:]), (eigenvals[size] / np.sqrt(2 * size)) * gn2[0], (eigenvals[size + 1 :] / np.sqrt(4 * size)) * (gn[1:][::-1] - 1j * gn2[1:][::-1]), ], axis=None, ) # Perform fft. Only first N entry are useful f = np.fft.fft(w).real[:size] * ((1.0 / size) ** hurst_exponent) return f def _complexity_simulate_randomwalk(size=1000): """Random walk.""" steps = np.random.choice(a=[-1, 0, 1], size=size - 1) return np.concatenate([np.zeros(1), steps]).cumsum(0)
neurokit2/complexity/utils_complexity_simulate.py
import numpy as np from .utils_complexity_attractor import _attractor_lorenz def complexity_simulate( duration=10, sampling_rate=1000, method="ornstein", hurst_exponent=0.5, **kwargs ): """**Simulate chaotic time series** This function generates a chaotic signal using different algorithms and complex systems. * **Mackey-Glass:** Generates time series using the discrete approximation of the Mackey-Glass delay differential equation described by Grassberger & Procaccia (1983). * **Ornstein-Uhlenbeck** * **Lorenz** * **Random walk** Parameters ---------- duration : int Desired length of duration (s). sampling_rate : int The desired sampling rate (in Hz, i.e., samples/second). duration : int The desired length in samples. method : str The method. can be ``"hurst"`` for a (fractional) Ornstein-Uhlenbeck process, ``"lorenz"`` for the first dimension of a Lorenz system, ``"mackeyglass"`` to use the Mackey-Glass equation, or ``random`` to generate a random-walk. hurst_exponent : float Defaults to ``0.5``. **kwargs Other arguments. Returns ------- array Simulated complexity time series. Examples ------------ **Lorenz System** .. ipython:: python import neurokit2 as nk signal = nk.complexity_simulate(duration=5, sampling_rate=1000, method="lorenz") @savefig p_complexity_simulate1.png scale=100% nk.signal_plot(signal) @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate2.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 5), alpha=1, color="blue") @suppress plt.close() **Ornstein System** .. ipython:: python signal = nk.complexity_simulate(duration=30, sampling_rate=100, method="ornstein") @savefig p_complexity_simulate3.png scale=100% nk.signal_plot(signal, color = "red") @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate4.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 100), alpha=1, color="red") @suppress plt.close() **Mackey-Glass System** .. ipython:: python signal = nk.complexity_simulate(duration=1, sampling_rate=1000, method="mackeyglass") @savefig p_complexity_simulate5.png scale=100% nk.signal_plot(signal, color = "green") @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate6.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 25), alpha=1, color="green") @suppress plt.close() **Random walk** .. ipython:: python signal = nk.complexity_simulate(duration=30, sampling_rate=100, method="randomwalk") @savefig p_complexity_simulate7.png scale=100% nk.signal_plot(signal, color = "orange") @suppress plt.close() .. ipython:: python @savefig p_complexity_simulate8.png scale=100% nk.complexity_attractor(nk.complexity_embedding(signal, delay = 100), alpha=1, color="orange") @suppress plt.close() """ method = method.lower() if method in ["fractal", "fractional", "hurst", "ornsteinuhlenbeck", "ornstein"]: signal = _complexity_simulate_ornstein( duration=duration, sampling_rate=sampling_rate, hurst_exponent=hurst_exponent, **kwargs ) elif method in ["lorenz"]: # x-dimension of Lorenz system signal = _attractor_lorenz(sampling_rate=sampling_rate, duration=duration, **kwargs)[:, 0] elif method in ["mackeyglass"]: signal = _complexity_simulate_mackeyglass( duration=duration, sampling_rate=sampling_rate, **kwargs ) else: signal = _complexity_simulate_randomwalk(int(duration * sampling_rate)) return signal # ============================================================================= # Methods # ============================================================================= def _complexity_simulate_mackeyglass( duration=10, sampling_rate=1000, x0="fixed", a=0.2, b=0.1, c=10.0, n=1000, discard=250 ): """Generate time series using the Mackey-Glass equation. Generates time series using the discrete approximation of the Mackey-Glass delay differential equation described by Grassberger & Procaccia (1983). Taken from nolitsa (https://github.com/manu-mannattil/nolitsa/blob/master/nolitsa/data.py#L223). Parameters ---------- duration : int Duration of the time series to be generated. sampling_rate : float Sampling step of the time series. It is useful to pick something between tau/100 and tau/10, with tau/sampling_rate being a factor of n. This will make sure that there are only whole number indices. Defaults to 1000. x0 : array Initial condition for the discrete map. Should be of length n. Can be "fixed", "random", or a vector of size n. a : float Constant a in the Mackey-Glass equation. Defaults to 0.2. b : float Constant b in the Mackey-Glass equation. Defaults to 0.1. c : float Constant c in the Mackey-Glass equation. Defaults to 10.0 n : int The number of discrete steps into which the interval between t and t + tau should be divided. This results in a time step of tau/n and an n + 1 dimensional map. Defaults to 1000. discard : int Number of n-steps to discard in order to eliminate transients. A total of n*discard steps will be discarded. Defaults to 250. Returns ------- array Simulated complexity time series. """ length = duration * sampling_rate tau = sampling_rate / 2 * 100 sampling_rate = int(n * sampling_rate / tau) grids = int(n * discard + sampling_rate * length) x = np.zeros(grids) if isinstance(x0, str): if x0 == "random": x[:n] = 0.5 + 0.05 * (-1 + 2 * np.random.random(n)) else: x[:n] = np.ones(n) else: x[:n] = x0 A = (2 * n - b * tau) / (2 * n + b * tau) B = a * tau / (2 * n + b * tau) for i in range(n - 1, grids - 1): x[i + 1] = A * x[i] + B * ( x[i - n] / (1 + x[i - n] ** c) + x[i - n + 1] / (1 + x[i - n + 1] ** c) ) return x[n * discard :: sampling_rate] def _complexity_simulate_ornstein( duration=10, sampling_rate=1000, theta=0.3, sigma=0.1, hurst_exponent=0.7 ): """This is based on https://github.com/LRydin/MFDFA. Parameters ---------- duration : int The desired length in samples. sampling_rate : int The desired sampling rate (in Hz, i.e., samples/second). Defaults to 1000Hz. theta : float Drift. Defaults to 0.3. sigma : float Diffusion. Defaults to 0.1. hurst_exponent : float Defaults to 0.7. Returns ------- array Simulated complexity time series. """ # Time array length = duration * sampling_rate # The fractional Gaussian noise dB = (duration ** hurst_exponent) * _complexity_simulate_fractionalnoise( size=length, hurst_exponent=hurst_exponent ) # Initialise the array y y = np.zeros([length]) # Integrate the process for i in range(1, length): y[i] = y[i - 1] - theta * y[i - 1] * (1 / sampling_rate) + sigma * dB[i] return y def _complexity_simulate_fractionalnoise(size=1000, hurst_exponent=0.5): """Generates fractional Gaussian noise. Generates fractional Gaussian noise with a Hurst index H in (0,1). If H = 1/2 this is simply Gaussian noise. The current method employed is the Davies-Harte method, which fails for H ≈ 0. Looking for help to implement a Cholesky decomposition method and the Hosking's method. This is based on https://github.com/LRydin/MFDFA/blob/master/MFDFA/fgn.py and the work of <NAME> fbm in https://github.com/crflynn/fbm See also Davies, <NAME>., and <NAME>. 'Tests for Hurst effect.' Biometrika 74, no.1 (1987): 95-101. Parameters ---------- size : int Length of fractional Gaussian noise to generate. hurst_exponent : float Hurst exponent H in (0,1). Returns ------- array Simulated complexity time series. """ # Sanity checks assert isinstance(size, int), "Size must be an integer number" assert isinstance(hurst_exponent, float), "Hurst index must be a float in (0,1)" # Generate linspace k = np.linspace(0, size - 1, size) # Correlation function cor = 0.5 * ( np.abs(k - 1) ** (2 * hurst_exponent) - 2 * np.abs(k) ** (2 * hurst_exponent) + np.abs(k + 1) ** (2 * hurst_exponent) ) # Eigenvalues of the correlation function eigenvals = np.sqrt(np.fft.fft(np.concatenate([cor[:], 0, cor[1:][::-1]], axis=None).real)) # Two normal distributed noises to be convoluted gn = np.random.normal(0.0, 1.0, size) gn2 = np.random.normal(0.0, 1.0, size) # This is the Davies–Harte method w = np.concatenate( [ (eigenvals[0] / np.sqrt(2 * size)) * gn[0], (eigenvals[1:size] / np.sqrt(4 * size)) * (gn[1:] + 1j * gn2[1:]), (eigenvals[size] / np.sqrt(2 * size)) * gn2[0], (eigenvals[size + 1 :] / np.sqrt(4 * size)) * (gn[1:][::-1] - 1j * gn2[1:][::-1]), ], axis=None, ) # Perform fft. Only first N entry are useful f = np.fft.fft(w).real[:size] * ((1.0 / size) ** hurst_exponent) return f def _complexity_simulate_randomwalk(size=1000): """Random walk.""" steps = np.random.choice(a=[-1, 0, 1], size=size - 1) return np.concatenate([np.zeros(1), steps]).cumsum(0)
0.9243
0.659857
__version__ = "1.33.0" __author__ = "AccelByte" __email__ = "<EMAIL>" # pylint: disable=line-too-long from .handlers_get_users_presence_response import HandlersGetUsersPresenceResponse from .handlers_user_presence import HandlersUserPresence from .log_app_message_declaration import LogAppMessageDeclaration from .model_bulk_add_friends_request import ModelBulkAddFriendsRequest from .model_bulk_users_free_form_notification_request_v1 import ModelBulkUsersFreeFormNotificationRequestV1 from .model_chat_message_response import ModelChatMessageResponse from .model_create_template_request import ModelCreateTemplateRequest from .model_create_topic_request import ModelCreateTopicRequest from .model_create_topic_request_v1 import ModelCreateTopicRequestV1 from .model_free_form_notification_request import ModelFreeFormNotificationRequest from .model_free_form_notification_request_v1 import ModelFreeFormNotificationRequestV1 from .model_get_all_notification_template_slug_resp import ModelGetAllNotificationTemplateSlugResp from .model_get_all_notification_topics_response import ModelGetAllNotificationTopicsResponse from .model_get_friends_response import ModelGetFriendsResponse from .model_get_stored_notification_resp import ModelGetStoredNotificationResp from .model_get_user_friends_response import ModelGetUserFriendsResponse from .model_get_user_incoming_friends_response import ModelGetUserIncomingFriendsResponse from .model_get_user_outgoing_friends_response import ModelGetUserOutgoingFriendsResponse from .model_localization import ModelLocalization from .model_notification_response import ModelNotificationResponse from .model_notification_template_response import ModelNotificationTemplateResponse from .model_notification_topic_response import ModelNotificationTopicResponse from .model_notification_topic_response_v1 import ModelNotificationTopicResponseV1 from .model_notification_with_template_request import ModelNotificationWithTemplateRequest from .model_notification_with_template_request_v1 import ModelNotificationWithTemplateRequestV1 from .model_pagination import ModelPagination from .model_request_friends_request import ModelRequestFriendsRequest from .model_template_content import ModelTemplateContent from .model_template_localization import ModelTemplateLocalization from .model_template_localization_response import ModelTemplateLocalizationResponse from .model_template_response import ModelTemplateResponse from .model_topic_by_namespaces_response import ModelTopicByNamespacesResponse from .model_update_template_request import ModelUpdateTemplateRequest from .model_update_topic_request import ModelUpdateTopicRequest from .model_user_accept_friend_request import ModelUserAcceptFriendRequest from .model_user_cancel_friend_request import ModelUserCancelFriendRequest from .model_user_get_friendship_status_response import ModelUserGetFriendshipStatusResponse from .model_user_reject_friend_request import ModelUserRejectFriendRequest from .model_user_unfriend_request import ModelUserUnfriendRequest from .models_admin_add_profanity_filter_into_list_request import ModelsAdminAddProfanityFilterIntoListRequest from .models_admin_add_profanity_filters_filter_request import ModelsAdminAddProfanityFiltersFilterRequest from .models_admin_add_profanity_filters_request import ModelsAdminAddProfanityFiltersRequest from .models_admin_create_profanity_list_request import ModelsAdminCreateProfanityListRequest from .models_admin_delete_profanity_filter_request import ModelsAdminDeleteProfanityFilterRequest from .models_admin_get_profanity_list_filters_v1_response import ModelsAdminGetProfanityListFiltersV1Response from .models_admin_get_profanity_lists_list_response import ModelsAdminGetProfanityListsListResponse from .models_admin_set_profanity_rule_for_namespace_request import ModelsAdminSetProfanityRuleForNamespaceRequest from .models_admin_update_profanity_list import ModelsAdminUpdateProfanityList from .models_admin_verify_message_profanity_request import ModelsAdminVerifyMessageProfanityRequest from .models_admin_verify_message_profanity_response import ModelsAdminVerifyMessageProfanityResponse from .models_blocked_by_player_data import ModelsBlockedByPlayerData from .models_blocked_player_data import ModelsBlockedPlayerData from .models_config import ModelsConfig from .models_config_list import ModelsConfigList from .models_config_req import ModelsConfigReq from .models_create_config_request import ModelsCreateConfigRequest from .models_create_config_response import ModelsCreateConfigResponse from .models_debug_profanity_filter_request import ModelsDebugProfanityFilterRequest from .models_get_all_player_blocked_by_users_response import ModelsGetAllPlayerBlockedByUsersResponse from .models_get_all_player_blocked_users_response import ModelsGetAllPlayerBlockedUsersResponse from .models_get_all_player_session_attribute_response import ModelsGetAllPlayerSessionAttributeResponse from .models_get_config_response import ModelsGetConfigResponse from .models_get_player_session_attribute_response import ModelsGetPlayerSessionAttributeResponse from .models_list_blocked_player_request import ModelsListBlockedPlayerRequest from .models_party_data import ModelsPartyData from .models_party_put_custom_attributes_request import ModelsPartyPUTCustomAttributesRequest from .models_profanity_filter import ModelsProfanityFilter from .models_profanity_rule import ModelsProfanityRule from .models_set_player_session_attribute_request import ModelsSetPlayerSessionAttributeRequest from .models_update_config_request import ModelsUpdateConfigRequest from .models_update_config_response import ModelsUpdateConfigResponse from .restapi_error_response_body import RestapiErrorResponseBody from .restapi_error_response_v1 import RestapiErrorResponseV1
accelbyte_py_sdk/api/lobby/models/__init__.py
__version__ = "1.33.0" __author__ = "AccelByte" __email__ = "<EMAIL>" # pylint: disable=line-too-long from .handlers_get_users_presence_response import HandlersGetUsersPresenceResponse from .handlers_user_presence import HandlersUserPresence from .log_app_message_declaration import LogAppMessageDeclaration from .model_bulk_add_friends_request import ModelBulkAddFriendsRequest from .model_bulk_users_free_form_notification_request_v1 import ModelBulkUsersFreeFormNotificationRequestV1 from .model_chat_message_response import ModelChatMessageResponse from .model_create_template_request import ModelCreateTemplateRequest from .model_create_topic_request import ModelCreateTopicRequest from .model_create_topic_request_v1 import ModelCreateTopicRequestV1 from .model_free_form_notification_request import ModelFreeFormNotificationRequest from .model_free_form_notification_request_v1 import ModelFreeFormNotificationRequestV1 from .model_get_all_notification_template_slug_resp import ModelGetAllNotificationTemplateSlugResp from .model_get_all_notification_topics_response import ModelGetAllNotificationTopicsResponse from .model_get_friends_response import ModelGetFriendsResponse from .model_get_stored_notification_resp import ModelGetStoredNotificationResp from .model_get_user_friends_response import ModelGetUserFriendsResponse from .model_get_user_incoming_friends_response import ModelGetUserIncomingFriendsResponse from .model_get_user_outgoing_friends_response import ModelGetUserOutgoingFriendsResponse from .model_localization import ModelLocalization from .model_notification_response import ModelNotificationResponse from .model_notification_template_response import ModelNotificationTemplateResponse from .model_notification_topic_response import ModelNotificationTopicResponse from .model_notification_topic_response_v1 import ModelNotificationTopicResponseV1 from .model_notification_with_template_request import ModelNotificationWithTemplateRequest from .model_notification_with_template_request_v1 import ModelNotificationWithTemplateRequestV1 from .model_pagination import ModelPagination from .model_request_friends_request import ModelRequestFriendsRequest from .model_template_content import ModelTemplateContent from .model_template_localization import ModelTemplateLocalization from .model_template_localization_response import ModelTemplateLocalizationResponse from .model_template_response import ModelTemplateResponse from .model_topic_by_namespaces_response import ModelTopicByNamespacesResponse from .model_update_template_request import ModelUpdateTemplateRequest from .model_update_topic_request import ModelUpdateTopicRequest from .model_user_accept_friend_request import ModelUserAcceptFriendRequest from .model_user_cancel_friend_request import ModelUserCancelFriendRequest from .model_user_get_friendship_status_response import ModelUserGetFriendshipStatusResponse from .model_user_reject_friend_request import ModelUserRejectFriendRequest from .model_user_unfriend_request import ModelUserUnfriendRequest from .models_admin_add_profanity_filter_into_list_request import ModelsAdminAddProfanityFilterIntoListRequest from .models_admin_add_profanity_filters_filter_request import ModelsAdminAddProfanityFiltersFilterRequest from .models_admin_add_profanity_filters_request import ModelsAdminAddProfanityFiltersRequest from .models_admin_create_profanity_list_request import ModelsAdminCreateProfanityListRequest from .models_admin_delete_profanity_filter_request import ModelsAdminDeleteProfanityFilterRequest from .models_admin_get_profanity_list_filters_v1_response import ModelsAdminGetProfanityListFiltersV1Response from .models_admin_get_profanity_lists_list_response import ModelsAdminGetProfanityListsListResponse from .models_admin_set_profanity_rule_for_namespace_request import ModelsAdminSetProfanityRuleForNamespaceRequest from .models_admin_update_profanity_list import ModelsAdminUpdateProfanityList from .models_admin_verify_message_profanity_request import ModelsAdminVerifyMessageProfanityRequest from .models_admin_verify_message_profanity_response import ModelsAdminVerifyMessageProfanityResponse from .models_blocked_by_player_data import ModelsBlockedByPlayerData from .models_blocked_player_data import ModelsBlockedPlayerData from .models_config import ModelsConfig from .models_config_list import ModelsConfigList from .models_config_req import ModelsConfigReq from .models_create_config_request import ModelsCreateConfigRequest from .models_create_config_response import ModelsCreateConfigResponse from .models_debug_profanity_filter_request import ModelsDebugProfanityFilterRequest from .models_get_all_player_blocked_by_users_response import ModelsGetAllPlayerBlockedByUsersResponse from .models_get_all_player_blocked_users_response import ModelsGetAllPlayerBlockedUsersResponse from .models_get_all_player_session_attribute_response import ModelsGetAllPlayerSessionAttributeResponse from .models_get_config_response import ModelsGetConfigResponse from .models_get_player_session_attribute_response import ModelsGetPlayerSessionAttributeResponse from .models_list_blocked_player_request import ModelsListBlockedPlayerRequest from .models_party_data import ModelsPartyData from .models_party_put_custom_attributes_request import ModelsPartyPUTCustomAttributesRequest from .models_profanity_filter import ModelsProfanityFilter from .models_profanity_rule import ModelsProfanityRule from .models_set_player_session_attribute_request import ModelsSetPlayerSessionAttributeRequest from .models_update_config_request import ModelsUpdateConfigRequest from .models_update_config_response import ModelsUpdateConfigResponse from .restapi_error_response_body import RestapiErrorResponseBody from .restapi_error_response_v1 import RestapiErrorResponseV1
0.360714
0.029987
import json import os import random import sys import time from typing import Dict, Optional, Callable, Any import numpy as np import tensorflow as tf from tensorflow.python.training.tracking import data_structures as tf_data_structures from dpu_utils.utils import RichPath from ..data import DataFold, GraphDataset from ..layers import get_known_message_passing_classes from ..models import GraphTaskModel from .model_utils import save_model, load_weights_verbosely, get_model_and_dataset from .task_utils import get_known_tasks def make_run_id(model_name: str, task_name: str, run_name: Optional[str] = None) -> str: """Choose a run ID, based on the --run-name parameter and the current time.""" if run_name is not None: return run_name else: return "%s_%s__%s" % (model_name, task_name, time.strftime("%Y-%m-%d_%H-%M-%S")) def log_line(log_file: str, msg: str): with open(log_file, "a") as log_fh: log_fh.write(msg + "\n") print(msg) def train_loop( model: GraphTaskModel, train_data: tf.data.Dataset, valid_data: tf.data.Dataset, max_epochs: int, patience: int, log_fun: Callable[[str], None], save_model_fun: Callable[[GraphTaskModel], None], quiet: bool = False, aml_run=None, ) -> float: _, _, initial_valid_results = model.run_one_epoch(valid_data, training=False, quiet=quiet) best_valid_metric, best_val_str = model.compute_epoch_metrics(initial_valid_results) log_fun(f"Initial valid metric: {best_val_str}.") save_model_fun(model) best_valid_epoch = 0 train_time_start = time.time() for epoch in range(1, max_epochs + 1): log_fun(f"== Epoch {epoch}") train_loss, train_speed, train_results = model.run_one_epoch( train_data, training=True, quiet=quiet ) train_metric, train_metric_string = model.compute_epoch_metrics(train_results) log_fun( f" Train: {train_loss:.4f} loss | {train_metric_string} | {train_speed:.2f} graphs/s", ) valid_loss, valid_speed, valid_results = model.run_one_epoch( valid_data, training=False, quiet=quiet ) valid_metric, valid_metric_string = model.compute_epoch_metrics(valid_results) log_fun( f" Valid: {valid_loss:.4f} loss | {valid_metric_string} | {valid_speed:.2f} graphs/s", ) if aml_run is not None: aml_run.log("task_train_metric", float(train_metric)) aml_run.log("train_speed", float(train_speed)) aml_run.log("task_valid_metric", float(valid_metric)) aml_run.log("valid_speed", float(valid_speed)) # Save if good enough. if valid_metric < best_valid_metric: log_fun( f" (Best epoch so far, target metric decreased to {valid_metric:.5f} from {best_valid_metric:.5f}.)", ) save_model_fun(model) best_valid_metric = valid_metric best_valid_epoch = epoch elif epoch - best_valid_epoch >= patience: total_time = time.time() - train_time_start log_fun( f"Stopping training after {patience} epochs without " f"improvement on validation metric.", ) log_fun( f"Training took {total_time}s. Best validation metric: {best_valid_metric}", ) break return best_valid_metric def train( model: GraphTaskModel, dataset: GraphDataset, log_fun: Callable[[str], None], run_id: str, max_epochs: int, patience: int, save_dir: str, quiet: bool = False, aml_run=None, ): """ 根据传入的参数对模型进行训练 Args: model: dataset: log_fun: run_id: max_epochs: patience: save_dir: quiet: aml_run: Returns: tf.data.prefetch()提前从数据集中取出若干数据放到内存中,加快处理速度 """ train_data = dataset.get_tensorflow_dataset(DataFold.TRAIN).prefetch(3) valid_data = dataset.get_tensorflow_dataset(DataFold.VALIDATION).prefetch(3) save_file = os.path.join(save_dir, f"{run_id}_best.pkl") def save_model_fun(model: GraphTaskModel): save_model(save_file, model, dataset) train_loop( model, train_data, valid_data, max_epochs=max_epochs, patience=patience, log_fun=log_fun, save_model_fun=save_model_fun, quiet=quiet, aml_run=aml_run, ) return save_file def unwrap_tf_tracked_data(data: Any) -> Any: if isinstance(data, (tf_data_structures.ListWrapper, list)): return [unwrap_tf_tracked_data(e) for e in data] elif isinstance(data, (tf_data_structures._DictWrapper, dict)): return {k: unwrap_tf_tracked_data(v) for k, v in data.items()} else: return data def run_train_from_args(args, hyperdrive_hyperparameter_overrides: Dict[str, str] = {}) -> None: # Get the housekeeping going and start logging: os.makedirs(args.save_dir, exist_ok=True) run_id = make_run_id(args.model, args.task, args.run_name) log_file = os.path.join(args.save_dir, f"{run_id}.log") def log(msg): log_line(log_file, msg) log(f"Setting random seed {args.random_seed}.") random.seed(args.random_seed) np.random.seed(args.random_seed) tf.random.set_seed(args.random_seed) data_path = RichPath.create(args.data_path, args.azure_info) dataset, model = get_model_and_dataset( msg_passing_implementation=args.model, task_name=args.task, data_path=data_path, trained_model_file=args.load_saved_model, cli_data_hyperparameter_overrides=args.data_param_override, cli_model_hyperparameter_overrides=args.model_param_override, hyperdrive_hyperparameter_overrides=hyperdrive_hyperparameter_overrides, folds_to_load={DataFold.TRAIN, DataFold.VALIDATION}, load_weights_only=args.load_weights_only, disable_tf_function_build=args.disable_tf_func, ) log(f"Dataset parameters: {json.dumps(unwrap_tf_tracked_data(dataset._params))}") log(f"Model parameters: {json.dumps(unwrap_tf_tracked_data(model._params))}") if args.azureml_logging: from azureml.core.run import Run aml_run = Run.get_context() else: aml_run = None trained_model_path = train( model, dataset, log_fun=log, run_id=run_id, max_epochs=args.max_epochs, patience=args.patience, save_dir=args.save_dir, quiet=args.quiet, aml_run=aml_run, ) if args.run_test: data_path = RichPath.create(args.data_path, args.azure_info) log("== Running on test dataset") log(f"Loading data from {data_path}.") dataset.load_data(data_path, {DataFold.TEST}) log(f"Restoring best model state from {trained_model_path}.") load_weights_verbosely(trained_model_path, model) # Test 1: Simply compute same metrics used during training/validation: test_data = dataset.get_tensorflow_dataset(DataFold.TEST) _, _, test_results = model.run_one_epoch(test_data, training=False, quiet=args.quiet) test_metric, test_metric_string = model.compute_epoch_metrics(test_results) log(test_metric_string) if aml_run is not None: aml_run.log("task_test_metric", float(test_metric)) # Test 2: Try to compute fancier metrics, if implemented: try: eval_metrics = model.evaluate_model(test_data) for metric_name, metric_value in eval_metrics.items(): log(f"{metric_name:<30}: {metric_value:8.4f}") if aml_run is not None: aml_run.log(f"task_test_{metric_name}", metric_value) except NotImplementedError: pass # ignore if there are no fancier metrics def get_train_cli_arg_parser(default_model_type: Optional[str] = None): """ Get an argparse argument parser object with common options for training GNN-based models. Args: default_model_type: If provided, the model type is downgraded from a positional parameter on the command line to an option with the given default value. """ import argparse parser = argparse.ArgumentParser(description="Train a GNN model.") # We use a somewhat horrible trick to support both # train.py --model MODEL --task TASK --data_path DATA_PATH # as well as # train.py model task data_path # The former is useful because of limitations in AzureML; the latter is nicer to type. if "--task" in sys.argv: model_param_name, task_param_name, data_path_param_name = "--model", "--task", "--data_path" else: model_param_name, task_param_name, data_path_param_name = "model", "task", "data_path" if default_model_type: model_param_name = "--model" parser.add_argument( model_param_name, type=str, choices=sorted(get_known_message_passing_classes()), default=default_model_type, help="GNN model type to train.", ) parser.add_argument( task_param_name, type=str, choices=sorted(get_known_tasks()), help="Task to train model for.", ) parser.add_argument(data_path_param_name, type=str, help="Directory containing the task data.") parser.add_argument( "--save-dir", dest="save_dir", type=str, default="trained_model", help="Path in which to store the trained model and log.", ) parser.add_argument( "--model-params-override", dest="model_param_override", type=str, help="JSON dictionary overriding model hyperparameter values.", ) parser.add_argument( "--data-params-override", dest="data_param_override", type=str, help="JSON dictionary overriding data hyperparameter values.", ) parser.add_argument( "--max-epochs", dest="max_epochs", type=int, default=10000, help="Maximal number of epochs to train for.", ) parser.add_argument( "--patience", dest="patience", type=int, default=25, help="Maximal number of epochs to continue training without improvement.", ) parser.add_argument( "--seed", dest="random_seed", type=int, default=0, help="Random seed to use.", ) parser.add_argument( "--run-name", dest="run_name", type=str, help="A human-readable name for this run.", ) parser.add_argument( "--azure-info", dest="azure_info", type=str, default="azure_auth.json", help="Azure authentication information file (JSON).", ) parser.add_argument( "--load-saved-model", dest="load_saved_model", help="Optional location to load initial model weights from. Should be model stored in earlier run.", ) parser.add_argument( "--load-weights-only", dest="load_weights_only", action="store_true", help="Optional to only load the weights of the model rather than class and dataset for further training (used in fine-tuning on pretrained network). Should be model stored in earlier run.", ) parser.add_argument( "--disable-tf-func", dest="disable_tf_func", action="store_true", help="Optional to disable the building of tf function graphs and run in eager mode.", ) parser.add_argument( "--quiet", dest="quiet", action="store_true", help="Generate less output during training.", ) parser.add_argument( "--run-test", dest="run_test", action="store_true", default=False, help="Run on testset after training.", ) parser.add_argument( "--azureml_logging", dest="azureml_logging", action="store_true", help="Log task results using AML run context.", ) parser.add_argument("--debug", dest="debug", action="store_true", help="Enable debug routines") parser.add_argument( "--hyperdrive-arg-parse", dest="hyperdrive_arg_parse", action="store_true", help='Enable hyperdrive argument parsing, in which unknown options "--key val" are interpreted as hyperparameter "key" with value "val".', ) return parser
tf2_gnn/cli_utils/training_utils.py
import json import os import random import sys import time from typing import Dict, Optional, Callable, Any import numpy as np import tensorflow as tf from tensorflow.python.training.tracking import data_structures as tf_data_structures from dpu_utils.utils import RichPath from ..data import DataFold, GraphDataset from ..layers import get_known_message_passing_classes from ..models import GraphTaskModel from .model_utils import save_model, load_weights_verbosely, get_model_and_dataset from .task_utils import get_known_tasks def make_run_id(model_name: str, task_name: str, run_name: Optional[str] = None) -> str: """Choose a run ID, based on the --run-name parameter and the current time.""" if run_name is not None: return run_name else: return "%s_%s__%s" % (model_name, task_name, time.strftime("%Y-%m-%d_%H-%M-%S")) def log_line(log_file: str, msg: str): with open(log_file, "a") as log_fh: log_fh.write(msg + "\n") print(msg) def train_loop( model: GraphTaskModel, train_data: tf.data.Dataset, valid_data: tf.data.Dataset, max_epochs: int, patience: int, log_fun: Callable[[str], None], save_model_fun: Callable[[GraphTaskModel], None], quiet: bool = False, aml_run=None, ) -> float: _, _, initial_valid_results = model.run_one_epoch(valid_data, training=False, quiet=quiet) best_valid_metric, best_val_str = model.compute_epoch_metrics(initial_valid_results) log_fun(f"Initial valid metric: {best_val_str}.") save_model_fun(model) best_valid_epoch = 0 train_time_start = time.time() for epoch in range(1, max_epochs + 1): log_fun(f"== Epoch {epoch}") train_loss, train_speed, train_results = model.run_one_epoch( train_data, training=True, quiet=quiet ) train_metric, train_metric_string = model.compute_epoch_metrics(train_results) log_fun( f" Train: {train_loss:.4f} loss | {train_metric_string} | {train_speed:.2f} graphs/s", ) valid_loss, valid_speed, valid_results = model.run_one_epoch( valid_data, training=False, quiet=quiet ) valid_metric, valid_metric_string = model.compute_epoch_metrics(valid_results) log_fun( f" Valid: {valid_loss:.4f} loss | {valid_metric_string} | {valid_speed:.2f} graphs/s", ) if aml_run is not None: aml_run.log("task_train_metric", float(train_metric)) aml_run.log("train_speed", float(train_speed)) aml_run.log("task_valid_metric", float(valid_metric)) aml_run.log("valid_speed", float(valid_speed)) # Save if good enough. if valid_metric < best_valid_metric: log_fun( f" (Best epoch so far, target metric decreased to {valid_metric:.5f} from {best_valid_metric:.5f}.)", ) save_model_fun(model) best_valid_metric = valid_metric best_valid_epoch = epoch elif epoch - best_valid_epoch >= patience: total_time = time.time() - train_time_start log_fun( f"Stopping training after {patience} epochs without " f"improvement on validation metric.", ) log_fun( f"Training took {total_time}s. Best validation metric: {best_valid_metric}", ) break return best_valid_metric def train( model: GraphTaskModel, dataset: GraphDataset, log_fun: Callable[[str], None], run_id: str, max_epochs: int, patience: int, save_dir: str, quiet: bool = False, aml_run=None, ): """ 根据传入的参数对模型进行训练 Args: model: dataset: log_fun: run_id: max_epochs: patience: save_dir: quiet: aml_run: Returns: tf.data.prefetch()提前从数据集中取出若干数据放到内存中,加快处理速度 """ train_data = dataset.get_tensorflow_dataset(DataFold.TRAIN).prefetch(3) valid_data = dataset.get_tensorflow_dataset(DataFold.VALIDATION).prefetch(3) save_file = os.path.join(save_dir, f"{run_id}_best.pkl") def save_model_fun(model: GraphTaskModel): save_model(save_file, model, dataset) train_loop( model, train_data, valid_data, max_epochs=max_epochs, patience=patience, log_fun=log_fun, save_model_fun=save_model_fun, quiet=quiet, aml_run=aml_run, ) return save_file def unwrap_tf_tracked_data(data: Any) -> Any: if isinstance(data, (tf_data_structures.ListWrapper, list)): return [unwrap_tf_tracked_data(e) for e in data] elif isinstance(data, (tf_data_structures._DictWrapper, dict)): return {k: unwrap_tf_tracked_data(v) for k, v in data.items()} else: return data def run_train_from_args(args, hyperdrive_hyperparameter_overrides: Dict[str, str] = {}) -> None: # Get the housekeeping going and start logging: os.makedirs(args.save_dir, exist_ok=True) run_id = make_run_id(args.model, args.task, args.run_name) log_file = os.path.join(args.save_dir, f"{run_id}.log") def log(msg): log_line(log_file, msg) log(f"Setting random seed {args.random_seed}.") random.seed(args.random_seed) np.random.seed(args.random_seed) tf.random.set_seed(args.random_seed) data_path = RichPath.create(args.data_path, args.azure_info) dataset, model = get_model_and_dataset( msg_passing_implementation=args.model, task_name=args.task, data_path=data_path, trained_model_file=args.load_saved_model, cli_data_hyperparameter_overrides=args.data_param_override, cli_model_hyperparameter_overrides=args.model_param_override, hyperdrive_hyperparameter_overrides=hyperdrive_hyperparameter_overrides, folds_to_load={DataFold.TRAIN, DataFold.VALIDATION}, load_weights_only=args.load_weights_only, disable_tf_function_build=args.disable_tf_func, ) log(f"Dataset parameters: {json.dumps(unwrap_tf_tracked_data(dataset._params))}") log(f"Model parameters: {json.dumps(unwrap_tf_tracked_data(model._params))}") if args.azureml_logging: from azureml.core.run import Run aml_run = Run.get_context() else: aml_run = None trained_model_path = train( model, dataset, log_fun=log, run_id=run_id, max_epochs=args.max_epochs, patience=args.patience, save_dir=args.save_dir, quiet=args.quiet, aml_run=aml_run, ) if args.run_test: data_path = RichPath.create(args.data_path, args.azure_info) log("== Running on test dataset") log(f"Loading data from {data_path}.") dataset.load_data(data_path, {DataFold.TEST}) log(f"Restoring best model state from {trained_model_path}.") load_weights_verbosely(trained_model_path, model) # Test 1: Simply compute same metrics used during training/validation: test_data = dataset.get_tensorflow_dataset(DataFold.TEST) _, _, test_results = model.run_one_epoch(test_data, training=False, quiet=args.quiet) test_metric, test_metric_string = model.compute_epoch_metrics(test_results) log(test_metric_string) if aml_run is not None: aml_run.log("task_test_metric", float(test_metric)) # Test 2: Try to compute fancier metrics, if implemented: try: eval_metrics = model.evaluate_model(test_data) for metric_name, metric_value in eval_metrics.items(): log(f"{metric_name:<30}: {metric_value:8.4f}") if aml_run is not None: aml_run.log(f"task_test_{metric_name}", metric_value) except NotImplementedError: pass # ignore if there are no fancier metrics def get_train_cli_arg_parser(default_model_type: Optional[str] = None): """ Get an argparse argument parser object with common options for training GNN-based models. Args: default_model_type: If provided, the model type is downgraded from a positional parameter on the command line to an option with the given default value. """ import argparse parser = argparse.ArgumentParser(description="Train a GNN model.") # We use a somewhat horrible trick to support both # train.py --model MODEL --task TASK --data_path DATA_PATH # as well as # train.py model task data_path # The former is useful because of limitations in AzureML; the latter is nicer to type. if "--task" in sys.argv: model_param_name, task_param_name, data_path_param_name = "--model", "--task", "--data_path" else: model_param_name, task_param_name, data_path_param_name = "model", "task", "data_path" if default_model_type: model_param_name = "--model" parser.add_argument( model_param_name, type=str, choices=sorted(get_known_message_passing_classes()), default=default_model_type, help="GNN model type to train.", ) parser.add_argument( task_param_name, type=str, choices=sorted(get_known_tasks()), help="Task to train model for.", ) parser.add_argument(data_path_param_name, type=str, help="Directory containing the task data.") parser.add_argument( "--save-dir", dest="save_dir", type=str, default="trained_model", help="Path in which to store the trained model and log.", ) parser.add_argument( "--model-params-override", dest="model_param_override", type=str, help="JSON dictionary overriding model hyperparameter values.", ) parser.add_argument( "--data-params-override", dest="data_param_override", type=str, help="JSON dictionary overriding data hyperparameter values.", ) parser.add_argument( "--max-epochs", dest="max_epochs", type=int, default=10000, help="Maximal number of epochs to train for.", ) parser.add_argument( "--patience", dest="patience", type=int, default=25, help="Maximal number of epochs to continue training without improvement.", ) parser.add_argument( "--seed", dest="random_seed", type=int, default=0, help="Random seed to use.", ) parser.add_argument( "--run-name", dest="run_name", type=str, help="A human-readable name for this run.", ) parser.add_argument( "--azure-info", dest="azure_info", type=str, default="azure_auth.json", help="Azure authentication information file (JSON).", ) parser.add_argument( "--load-saved-model", dest="load_saved_model", help="Optional location to load initial model weights from. Should be model stored in earlier run.", ) parser.add_argument( "--load-weights-only", dest="load_weights_only", action="store_true", help="Optional to only load the weights of the model rather than class and dataset for further training (used in fine-tuning on pretrained network). Should be model stored in earlier run.", ) parser.add_argument( "--disable-tf-func", dest="disable_tf_func", action="store_true", help="Optional to disable the building of tf function graphs and run in eager mode.", ) parser.add_argument( "--quiet", dest="quiet", action="store_true", help="Generate less output during training.", ) parser.add_argument( "--run-test", dest="run_test", action="store_true", default=False, help="Run on testset after training.", ) parser.add_argument( "--azureml_logging", dest="azureml_logging", action="store_true", help="Log task results using AML run context.", ) parser.add_argument("--debug", dest="debug", action="store_true", help="Enable debug routines") parser.add_argument( "--hyperdrive-arg-parse", dest="hyperdrive_arg_parse", action="store_true", help='Enable hyperdrive argument parsing, in which unknown options "--key val" are interpreted as hyperparameter "key" with value "val".', ) return parser
0.687525
0.198433
import json import logging from . import model from . import utils class Security(object): """Class to thread hold to sell when the lost in a transaction is too high.""" def __init__(self, config_dict): """Class Initialisation.""" logging.debug('') config = json.load(open(config_dict, mode='r')) self.mean = None self.maxLost = None self.take_profit = None if config.get('maxLost') is not None: model.MaxLost.__PERCENTAGE__ = config.get('maxLost').get( 'percentage') model.MaxLost.__UPDATE__ = config.get('maxLost').get( 'percentage_update', 1) self.mean = config.get('maxLost').get('mean') if config.get('takeProfit') is not None: self.take_profit = config.get('takeProfit').get('percentage') if self.mean: model.create() def process(self, current_value, currency): logging.debug('') if self.mean: real_values = model.pricing.get_last_values( count=self.mean, currency=currency) model.rolling_mean_pricing.insert_value( currency=currency, frequency=self.mean, values=real_values) def sell(self, current_value, transaction): """Process data, it returned 1 to buy and -1 to sell.""" logging.debug('') # Get last security if not self.maxLost or self.maxLost.transaction_id != transaction.id: result = model.MaxLost.select( model.MaxLost.q.transaction_id == transaction.id) self.maxLost = result[0] if result.count() else None if not self.maxLost: self.maxLost = model.MaxLost( buy_value=transaction.currency_buy_value, min_gain=100 - model.MaxLost.__PERCENTAGE__, min_value=(transaction.currency_buy_value * (100 - model.MaxLost.__PERCENTAGE__)/100), transaction_id=transaction.id) if self.maxLost.process(current_value): return True percentage = current_value/transaction.currency_buy_value * 100 if self.take_profit and percentage >= 100 + self.take_profit: logging.warning('Take Profit: value {}, gain:{}'.format( current_value, percentage)) return True # Sell if value goes below this mean. if self.mean: avg = model.rolling_mean_pricing.get_last_values( transaction.currency, frequency=self.mean, count=1)[0] if avg and current_value <= avg: logging.error('SELL: Current Value: {} lower than mean {} ' 'at frequency: {}'.format( current_value, avg, self.mean)) return True return False def buy(self, current_value, currency): # Buy if current_value is upper than mean and mean is increasing. if self.mean: values = model.rolling_mean_pricing.get_last_values( currency, frequency=self.mean, count=10) if all(x is not None for x in values): return utils.is_increasing(values) return False
crypto_trading/algo/security.py
import json import logging from . import model from . import utils class Security(object): """Class to thread hold to sell when the lost in a transaction is too high.""" def __init__(self, config_dict): """Class Initialisation.""" logging.debug('') config = json.load(open(config_dict, mode='r')) self.mean = None self.maxLost = None self.take_profit = None if config.get('maxLost') is not None: model.MaxLost.__PERCENTAGE__ = config.get('maxLost').get( 'percentage') model.MaxLost.__UPDATE__ = config.get('maxLost').get( 'percentage_update', 1) self.mean = config.get('maxLost').get('mean') if config.get('takeProfit') is not None: self.take_profit = config.get('takeProfit').get('percentage') if self.mean: model.create() def process(self, current_value, currency): logging.debug('') if self.mean: real_values = model.pricing.get_last_values( count=self.mean, currency=currency) model.rolling_mean_pricing.insert_value( currency=currency, frequency=self.mean, values=real_values) def sell(self, current_value, transaction): """Process data, it returned 1 to buy and -1 to sell.""" logging.debug('') # Get last security if not self.maxLost or self.maxLost.transaction_id != transaction.id: result = model.MaxLost.select( model.MaxLost.q.transaction_id == transaction.id) self.maxLost = result[0] if result.count() else None if not self.maxLost: self.maxLost = model.MaxLost( buy_value=transaction.currency_buy_value, min_gain=100 - model.MaxLost.__PERCENTAGE__, min_value=(transaction.currency_buy_value * (100 - model.MaxLost.__PERCENTAGE__)/100), transaction_id=transaction.id) if self.maxLost.process(current_value): return True percentage = current_value/transaction.currency_buy_value * 100 if self.take_profit and percentage >= 100 + self.take_profit: logging.warning('Take Profit: value {}, gain:{}'.format( current_value, percentage)) return True # Sell if value goes below this mean. if self.mean: avg = model.rolling_mean_pricing.get_last_values( transaction.currency, frequency=self.mean, count=1)[0] if avg and current_value <= avg: logging.error('SELL: Current Value: {} lower than mean {} ' 'at frequency: {}'.format( current_value, avg, self.mean)) return True return False def buy(self, current_value, currency): # Buy if current_value is upper than mean and mean is increasing. if self.mean: values = model.rolling_mean_pricing.get_last_values( currency, frequency=self.mean, count=10) if all(x is not None for x in values): return utils.is_increasing(values) return False
0.804098
0.10732
from __future__ import unicode_literals try: from collections import Counter except ImportError: from backport_collections import Counter import datetime from operator import attrgetter from django.db import models from django.utils.timezone import now from aldryn_apphooks_config.managers.base import ManagerMixin, QuerySetMixin from parler.managers import TranslatableManager, TranslatableQuerySet from .constants import SERVICES_ENABLE_PUBDATE, SERVICES_ENABLE_IMAGE, TRANSLATE_IS_PUBLISHED class ServiceQuerySet(TranslatableQuerySet): def published(self): """ Returns Services that are published AND have a publishing_date that has actually passed. """ qs = self if SERVICES_ENABLE_PUBDATE: qs = self.filter(publishing_date__lte=now()) if TRANSLATE_IS_PUBLISHED: return qs.translated(is_published_trans=True) return qs.filter(is_published=True) def published_one_of_trans(self): qs = self if SERVICES_ENABLE_PUBDATE: qs = self.filter(publishing_date__lte=now()) if TRANSLATE_IS_PUBLISHED: return qs.filter(translations__is_published_trans=True) return qs.filter(is_published=True) def namespace(self, namespace, to=None): return self.filter(**{'sections__namespace': namespace}) class RelatedManager(ManagerMixin, TranslatableManager): def get_queryset(self): qs = ServiceQuerySet(self.model, using=self.db) if SERVICES_ENABLE_IMAGE: return qs.select_related('featured_image') return qs def published(self): return self.get_queryset().published() def published_one_of_trans(self): return self.get_queryset().published_one_of_trans() def get_months(self, request, namespace): """ Get months and years with Services count for given request and namespace string. This means how many Services there are in each month. The request is required, because logged-in content managers may get different counts. Return list of dictionaries ordered by Service publishing date of the following format: [ { 'date': date(YEAR, MONTH, ARBITRARY_DAY), 'num_services': NUM_SERVICES }, ... ] """ # TODO: check if this limitation still exists in Django 1.6+ # This is done in a naive way as Django is having tough time while # aggregating on date fields if (request and hasattr(request, 'toolbar') and request.toolbar and request.toolbar.edit_mode_active): services = self.namespace(namespace) else: services = self.published().namespace(namespace) dates = services.values_list('publishing_date', flat=True) dates = [(x.year, x.month) for x in dates] date_counter = Counter(dates) dates = set(dates) dates = sorted(dates, reverse=True) months = [ # Use day=3 to make sure timezone won't affect this hacks' # month value. There are UTC+14 and UTC-12 timezones! {'date': datetime.date(year=year, month=month, day=3), 'num_services': date_counter[(year, month)]} for year, month in dates] return months
js_services/managers.py
from __future__ import unicode_literals try: from collections import Counter except ImportError: from backport_collections import Counter import datetime from operator import attrgetter from django.db import models from django.utils.timezone import now from aldryn_apphooks_config.managers.base import ManagerMixin, QuerySetMixin from parler.managers import TranslatableManager, TranslatableQuerySet from .constants import SERVICES_ENABLE_PUBDATE, SERVICES_ENABLE_IMAGE, TRANSLATE_IS_PUBLISHED class ServiceQuerySet(TranslatableQuerySet): def published(self): """ Returns Services that are published AND have a publishing_date that has actually passed. """ qs = self if SERVICES_ENABLE_PUBDATE: qs = self.filter(publishing_date__lte=now()) if TRANSLATE_IS_PUBLISHED: return qs.translated(is_published_trans=True) return qs.filter(is_published=True) def published_one_of_trans(self): qs = self if SERVICES_ENABLE_PUBDATE: qs = self.filter(publishing_date__lte=now()) if TRANSLATE_IS_PUBLISHED: return qs.filter(translations__is_published_trans=True) return qs.filter(is_published=True) def namespace(self, namespace, to=None): return self.filter(**{'sections__namespace': namespace}) class RelatedManager(ManagerMixin, TranslatableManager): def get_queryset(self): qs = ServiceQuerySet(self.model, using=self.db) if SERVICES_ENABLE_IMAGE: return qs.select_related('featured_image') return qs def published(self): return self.get_queryset().published() def published_one_of_trans(self): return self.get_queryset().published_one_of_trans() def get_months(self, request, namespace): """ Get months and years with Services count for given request and namespace string. This means how many Services there are in each month. The request is required, because logged-in content managers may get different counts. Return list of dictionaries ordered by Service publishing date of the following format: [ { 'date': date(YEAR, MONTH, ARBITRARY_DAY), 'num_services': NUM_SERVICES }, ... ] """ # TODO: check if this limitation still exists in Django 1.6+ # This is done in a naive way as Django is having tough time while # aggregating on date fields if (request and hasattr(request, 'toolbar') and request.toolbar and request.toolbar.edit_mode_active): services = self.namespace(namespace) else: services = self.published().namespace(namespace) dates = services.values_list('publishing_date', flat=True) dates = [(x.year, x.month) for x in dates] date_counter = Counter(dates) dates = set(dates) dates = sorted(dates, reverse=True) months = [ # Use day=3 to make sure timezone won't affect this hacks' # month value. There are UTC+14 and UTC-12 timezones! {'date': datetime.date(year=year, month=month, day=3), 'num_services': date_counter[(year, month)]} for year, month in dates] return months
0.486819
0.161783
import logging from django.contrib import messages from django.contrib.auth.decorators import user_passes_test from django.urls import reverse from django.http import HttpResponseRedirect from django.shortcuts import render from dojo.utils import add_breadcrumb from dojo.forms import ToolTypeForm from dojo.models import Tool_Type logger = logging.getLogger(__name__) @user_passes_test(lambda u: u.is_superuser) def new_tool_type(request): if request.method == 'POST': tform = ToolTypeForm(request.POST, instance=Tool_Type()) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, 'Tool Type Configuration Successfully Created.', extra_tags='alert-success') return HttpResponseRedirect(reverse('tool_type', )) else: tform = ToolTypeForm() add_breadcrumb(title="New Tool Type Configuration", top_level=False, request=request) return render(request, 'dojo/new_tool_type.html', {'tform': tform}) @user_passes_test(lambda u: u.is_superuser) def edit_tool_type(request, ttid): tool_type = Tool_Type.objects.get(pk=ttid) if request.method == 'POST': tform = ToolTypeForm(request.POST, instance=tool_type) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, 'Tool Type Configuration Successfully Updated.', extra_tags='alert-success') return HttpResponseRedirect(reverse('tool_type', )) else: tform = ToolTypeForm(instance=tool_type) add_breadcrumb(title="Edit Tool Type Configuration", top_level=False, request=request) return render(request, 'dojo/edit_tool_type.html', { 'tform': tform, }) @user_passes_test(lambda u: u.is_superuser) def tool_type(request): confs = Tool_Type.objects.all().order_by('name') add_breadcrumb(title="Tool Type List", top_level=not len(request.GET), request=request) return render(request, 'dojo/tool_type.html', {'confs': confs, })
dojo/tool_type/views.py
import logging from django.contrib import messages from django.contrib.auth.decorators import user_passes_test from django.urls import reverse from django.http import HttpResponseRedirect from django.shortcuts import render from dojo.utils import add_breadcrumb from dojo.forms import ToolTypeForm from dojo.models import Tool_Type logger = logging.getLogger(__name__) @user_passes_test(lambda u: u.is_superuser) def new_tool_type(request): if request.method == 'POST': tform = ToolTypeForm(request.POST, instance=Tool_Type()) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, 'Tool Type Configuration Successfully Created.', extra_tags='alert-success') return HttpResponseRedirect(reverse('tool_type', )) else: tform = ToolTypeForm() add_breadcrumb(title="New Tool Type Configuration", top_level=False, request=request) return render(request, 'dojo/new_tool_type.html', {'tform': tform}) @user_passes_test(lambda u: u.is_superuser) def edit_tool_type(request, ttid): tool_type = Tool_Type.objects.get(pk=ttid) if request.method == 'POST': tform = ToolTypeForm(request.POST, instance=tool_type) if tform.is_valid(): tform.save() messages.add_message(request, messages.SUCCESS, 'Tool Type Configuration Successfully Updated.', extra_tags='alert-success') return HttpResponseRedirect(reverse('tool_type', )) else: tform = ToolTypeForm(instance=tool_type) add_breadcrumb(title="Edit Tool Type Configuration", top_level=False, request=request) return render(request, 'dojo/edit_tool_type.html', { 'tform': tform, }) @user_passes_test(lambda u: u.is_superuser) def tool_type(request): confs = Tool_Type.objects.all().order_by('name') add_breadcrumb(title="Tool Type List", top_level=not len(request.GET), request=request) return render(request, 'dojo/tool_type.html', {'confs': confs, })
0.389198
0.064624
from collections import namedtuple import numpy as np import scipy.stats import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as cols from .utils import hollow_matrix from .utils import observations from .utils import rgb_is_dark class MarkovError(Exception): pass def regularize(sequence, strings_are_states=False) -> tuple: """ Turn a sequence or sequence of sequences into a tuple of the unique elements in the sequence(s), plus a sequence of sequences (sort of equivalent to `np.atleast_2d()`). Args sequence (list-like): A list-like container of either states, or of list-likes of states. strings_are_states (bool): True if the strings are themselves states (i.e. words or tokens) and not sequences of one-character states. For example, set to True if you provide something like: ['sst', 'mud', 'mud', 'sst', 'lst', 'lst'] Returns tuple. A tuple of the unique states, and a sequence of sequences. """ if strings_are_states: if isinstance(sequence[0], str): seq_of_seqs = [sequence] else: seq_of_seqs = sequence else: # Just try to iterate over the contents of the sequence. try: seq_of_seqs = [list(i) if len(i) > 1 else i for i in sequence] except TypeError: seq_of_seqs = [list(sequence)] # Annoyingly, still have to fix case of single sequence of # strings... this seems really hacky. if len(seq_of_seqs[0]) == 1: seq_of_seqs = [seq_of_seqs] # Now we know we have a sequence of sequences. uniques = set() for seq in seq_of_seqs: for i in seq: uniques.add(i) return np.array(sorted(uniques)), seq_of_seqs class Markov_chain(object): """ Markov_chain object. TODO - Pretty transition matrix printing with state names and row/col sums. - Allow self-transitions. See also this: https://stackoverflow.com/q/49340520/3381305 - Hidden Markov model? - 'Joint' Markov model... where you have lithology and bioturbation index (say). Not sure if this is really a thing, I just made it up. - More generally, explore other sequence models, eg LSTM. """ def __init__(self, observed_counts, states=None, step=1, include_self=None, ): """ Initialize the Markov chain instance. Args observed_counts (ndarray): A 2-D array representing the counts of change of state in the Markov Chain. states (array-like): An array-like representing the possible states of the Markov Chain. Must be in the same order as `observed counts`. step (int): The maximum step size, default 1. include_self (bool): Whether to include self-to-self transitions. """ self.step = step self.observed_counts = np.atleast_2d(observed_counts).astype(int) if include_self is not None: self.include_self = include_self else: self.include_self = np.any(np.diagonal(self.observed_counts)) if not self.include_self: self.observed_counts = hollow_matrix(self.observed_counts) if states is not None: self.states = np.asarray(states) else: self.states = np.arange(self.observed_counts.shape[0]) if self.step > 1: self.expected_counts = self._compute_expected_mc() else: self.expected_counts = self._compute_expected() return def __repr__(self): trans = f"Markov_chain({np.sum(self.observed_counts):.0f} transitions" states = '[{}]'.format(", ".join(s.__repr__() for s in self.states)) return f"{trans}, states={states}, step={self.step}, include_self={self.include_self})" @staticmethod def _compute_freqs(C): """ Compute frequencies from counts. """ epsilon = 1e-12 return (C.T / (epsilon+np.sum(C.T, axis=0))).T @staticmethod def _stop_iter(a, b, tol=0.01): a_small = np.all(np.abs(a[-1] - a[-2]) < tol*a[-1]) b_small = np.all(np.abs(b[-1] - b[-2]) < tol*b[-1]) return (a_small and b_small) @property def _index_dict(self): if self.states is None: return {} return {self.states[index]: index for index in range(len(self.states))} @property def _state_dict(self): if self.states is None: return {} return {index: self.states[index] for index in range(len(self.states))} @property def observed_freqs(self): return self._compute_freqs(self.observed_counts) @property def expected_freqs(self): return self._compute_freqs(self.expected_counts) @property def _state_counts(self): s = self.observed_counts.copy() # Deal with more than 2 dimensions. for _ in range(self.observed_counts.ndim - 2): s = np.sum(s, axis=0) a = np.sum(s, axis=0) b = np.sum(s, axis=1) return np.maximum(a, b) @property def _state_probs(self): return self._state_counts / np.sum(self._state_counts) @property def normalized_difference(self): O = self.observed_counts E = self.expected_counts epsilon = 1e-12 return (O - E) / np.sqrt(E + epsilon) @classmethod def from_sequence(cls, sequence, states=None, strings_are_states=False, include_self=False, step=1, ): """ Parse a sequence and make the transition matrix of the specified order. **Provide sequence(s) ordered in upwards direction.** Args sequence (list-like): A list-like, or list-like of list-likes. The inner list-likes represent sequences of states. For example, can be a string or list of strings, or a list or list of lists. states (list-like): A list or array of the names of the states. If not provided, it will be inferred from the data. strings_are_states (bool): rue if the strings are themselves states (i.e. words or tokens) and not sequences of one-character states. For example, set to True if you provide something like: ['sst', 'mud', 'mud', 'sst', 'lst', 'lst'] include_self (bool): Whether to include self-to-self transitions (default is `False`: do not include them). step (integer): The distance to step. Default is 1: use the previous state only. If 2, then the previous-but- one state is used as well as the previous state (and the matrix has one more dimension). return_states (bool): Whether to return the states. """ uniques, seq_of_seqs = regularize(sequence, strings_are_states=strings_are_states) if states is None: states = uniques else: states = np.asarray(list(states)) O = observations(seq_of_seqs, states=states, step=step, include_self=include_self) return cls(observed_counts=np.array(O), states=states, include_self=include_self, step=step, ) def _conditional_probs(self, state): """ Conditional probabilities of each state, given a current state. """ return self.observed_freqs[self._index_dict[state]] def _next_state(self, current_state: str) -> str: """ Returns the state of the random variable at the next time instance. Args current_state (str): The current state of the system. Returns str. One realization of the next state. """ return np.random.choice(self.states, p=self._conditional_probs(current_state) ) def generate_states(self, n:int=10, current_state:str=None) -> list: """ Generates the next states of the system. Args n (int): The number of future states to generate. current_state (str): The state of the current random variable. Returns list. The next n states. """ if current_state is None: current_state = np.random.choice(self.states, p=self._state_probs) future_states = [] for _ in range(n): next_state = self._next_state(current_state) future_states.append(next_state) current_state = next_state return future_states def _compute_expected(self): """ Try to use Powers & Easterling, fall back on Monte Carlo sampling based on the proportions of states in the data. """ try: E = self._compute_expected_pe() except: E = self._compute_expected_mc() return E def _compute_expected_mc(self, n=100000): """ If we can't use Powers & Easterling's method, and it's possible there's a way to extend it to higher dimensions (which we have for step > 1), the next best thing might be to use brute force and just compute a lot of random sequence transitions, given the observed proportions. This is what P & E's method tries to estimate iteratively. What to do about 'self transitions' is a bit of a problem here, since there are a lot of n-grams that include at least one self-transition. """ seq = np.random.choice(self.states, size=n, p=self._state_probs) E = observations(np.atleast_2d(seq), self.states, step=self.step, include_self=self.include_self) if not self.include_self: E = hollow_matrix(E) return np.sum(self.observed_counts) * E / np.sum(E) def _compute_expected_pe(self, max_iter=100, verbose=False): """ Compute the independent trials matrix, using method of Powers & Easterling 1982. """ m = len(self.states) M = self.observed_counts a, b = [], [] # Loop 1 a.append(np.sum(M, axis=1) / (m - 1)) b.append(np.sum(M, axis=0) / (np.sum(a[-1]) - a[-1])) i = 2 while i < max_iter: if verbose: print(f"iteration: {i-1}") print(f"a: {a[-1]}") print(f"b: {b[-1]}") print() a.append(np.sum(M, axis=1) / (np.sum(b[-1]) - b[-1])) b.append(np.sum(M, axis=0) / (np.sum(a[-1]) - a[-1])) # Check for stopping criterion. if self._stop_iter(a, b, tol=0.001): break i += 1 E = a[-1] * b[-1].reshape(-1, 1) if not self.include_self: return hollow_matrix(E) else: return E @property def degrees_of_freedom(self) -> int: m = len(self.states) return (m - 1)**2 - m def _chi_squared_critical(self, q=0.95, df=None): """ The chi-squared critical value for a confidence level q and degrees of freedom df. """ if df is None: df = self.degrees_of_freedom return scipy.stats.chi2.ppf(q=q, df=df) def _chi_squared_percentile(self, x, df=None): """ The chi-squared critical value for a confidence level q and degrees of freedom df. """ if df is None: df = self.degrees_of_freedom return scipy.stats.chi2.cdf(x, df=df) def chi_squared(self, q=0.95): """ The chi-squared statistic for the given transition frequencies. Also returns the critical statistic at the given confidence level q (default 95%). If the first number is bigger than the second number, then you can reject the hypothesis that the sequence is randomly ordered. Args: q (float): The confidence level, as a float in the range 0 to 1. Default: 0.95. Returns: float: The chi-squared statistic. """ # Observed and Expected matrices: O = self.observed_counts E = self.expected_counts # Adjustment for divide-by-zero epsilon = 1e-12 chi2 = np.sum((O - E)**2 / (E + epsilon)) crit = self._chi_squared_critical(q=q) perc = self._chi_squared_percentile(x=chi2) Chi2 = namedtuple('Chi2', ['chi2', 'crit', 'perc']) return Chi2(chi2, crit, perc) def as_graph(self, directed=True): if self.normalized_difference.ndim > 2: raise MarkovError("You can only graph one-step chains.") try: import networkx as nx except ImportError: nx = None if nx is None: print("Please install networkx with `pip install networkx`.") return if directed: alg = nx.DiGraph else: alg = nx.Graph G = nx.from_numpy_array(self.normalized_difference, create_using=alg) nx.set_node_attributes(G, self._state_dict, 'state') return G def plot_graph(self, ax=None, figsize=None, max_size=1000, directed=True, edge_labels=False, draw_neg=False ): if self.normalized_difference.ndim > 2: raise MarkovError("You can only graph one-step chains.") try: import networkx as nx except ImportError: nx = None if nx is None: print("Please install networkx with `pip install networkx`.") return G = self.as_graph(directed=directed) return_ax = True if ax is None: fig, ax = plt.subplots(figsize=figsize) return_ax = False e_neg = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if d['weight'] <= -1.0} e_small = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if -1.0 < d['weight'] <= 1.0} e_med = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if 1.0 < d['weight'] <= 2.0} e_large = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if d['weight'] > 2.0} pos = nx.spring_layout(G) sizes = max_size * (self._state_counts / max(self._state_counts)) nx.draw_networkx_nodes(G, pos, ax=ax, node_size=sizes, node_color='orange') nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_large, width=10, arrowsize=40, splines='curved') nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_med, width=4, arrowsize=20) nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_small, width=3, alpha=0.1, edge_color='k') if draw_neg: nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_neg, width=2, alpha=0.1, edge_color='k') if edge_labels: nx.draw_networkx_edge_labels(G,pos,edge_labels=e_large) nx.draw_networkx_edge_labels(G,pos,edge_labels=e_med) labels = nx.get_node_attributes(G, 'state') ax = nx.draw_networkx_labels(G, pos, labels=labels, font_size=20, font_family='sans-serif', font_color='blue') if return_ax: return ax else: plt.axis('off') plt.show() return def plot_norm_diff(self, ax=None, cmap='RdBu', center_zero=True, vminmax=None, rotation=0, annotate=False, ): """ A visualization of the normalized difference matrix. Args """ if self.normalized_difference.ndim > 2: raise MarkovError("You can only plot one-step chains.") return_ax = True if ax is None: fig, ax = plt.subplots(figsize=(1 + self.states.size/1.5, self.states.size/1.5)) return_ax = False if vminmax is None: ma = np.ceil(np.max(np.abs(self.normalized_difference))) vmin, vmax = -ma, ma else: vmin, vmax = vminmax im = ax.imshow(self.normalized_difference, cmap=cmap, vmin=vmin, vmax=vmax, interpolation='none') plt.colorbar(im) ax.tick_params(axis='x', which='both', bottom=False, labelbottom=False, top=False, labeltop=True, ) ax.tick_params(axis='y', which='both', left=False, labelleft=True, right=False, labelright=False, ) ticks = np.arange(self.states.size) ax.set_yticks(ticks) ax.set_xticks(ticks) labels = [str(s) for s in self.states] ax.set_yticklabels(labels) if rotation == 0: ax.set_xticklabels(labels) else: ha = 'right' if rotation < 0 else 'left' ax.set_xticklabels(labels, rotation=rotation, ha=ha, rotation_mode='anchor' ) if annotate: for i in range(self.states.size): for j in range(self.states.size): norm = cols.Normalize(vmin=vmin, vmax=vmax) val = self.normalized_difference[i, j] lookup = cm.get_cmap(cmap) col = 'w' if rgb_is_dark(lookup(norm(val))) else 'k' fmt = annotate if isinstance(annotate, str) else '0.1f' s = format(val, fmt) text = ax.text(j, i, s, ha="center", va="center", color=col) # Deal with probable bug in matplotlib 3.1.1 ax.set_ylim(reversed(ax.get_xlim())) if return_ax: return ax else: plt.show() return
striplog/markov.py
from collections import namedtuple import numpy as np import scipy.stats import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as cols from .utils import hollow_matrix from .utils import observations from .utils import rgb_is_dark class MarkovError(Exception): pass def regularize(sequence, strings_are_states=False) -> tuple: """ Turn a sequence or sequence of sequences into a tuple of the unique elements in the sequence(s), plus a sequence of sequences (sort of equivalent to `np.atleast_2d()`). Args sequence (list-like): A list-like container of either states, or of list-likes of states. strings_are_states (bool): True if the strings are themselves states (i.e. words or tokens) and not sequences of one-character states. For example, set to True if you provide something like: ['sst', 'mud', 'mud', 'sst', 'lst', 'lst'] Returns tuple. A tuple of the unique states, and a sequence of sequences. """ if strings_are_states: if isinstance(sequence[0], str): seq_of_seqs = [sequence] else: seq_of_seqs = sequence else: # Just try to iterate over the contents of the sequence. try: seq_of_seqs = [list(i) if len(i) > 1 else i for i in sequence] except TypeError: seq_of_seqs = [list(sequence)] # Annoyingly, still have to fix case of single sequence of # strings... this seems really hacky. if len(seq_of_seqs[0]) == 1: seq_of_seqs = [seq_of_seqs] # Now we know we have a sequence of sequences. uniques = set() for seq in seq_of_seqs: for i in seq: uniques.add(i) return np.array(sorted(uniques)), seq_of_seqs class Markov_chain(object): """ Markov_chain object. TODO - Pretty transition matrix printing with state names and row/col sums. - Allow self-transitions. See also this: https://stackoverflow.com/q/49340520/3381305 - Hidden Markov model? - 'Joint' Markov model... where you have lithology and bioturbation index (say). Not sure if this is really a thing, I just made it up. - More generally, explore other sequence models, eg LSTM. """ def __init__(self, observed_counts, states=None, step=1, include_self=None, ): """ Initialize the Markov chain instance. Args observed_counts (ndarray): A 2-D array representing the counts of change of state in the Markov Chain. states (array-like): An array-like representing the possible states of the Markov Chain. Must be in the same order as `observed counts`. step (int): The maximum step size, default 1. include_self (bool): Whether to include self-to-self transitions. """ self.step = step self.observed_counts = np.atleast_2d(observed_counts).astype(int) if include_self is not None: self.include_self = include_self else: self.include_self = np.any(np.diagonal(self.observed_counts)) if not self.include_self: self.observed_counts = hollow_matrix(self.observed_counts) if states is not None: self.states = np.asarray(states) else: self.states = np.arange(self.observed_counts.shape[0]) if self.step > 1: self.expected_counts = self._compute_expected_mc() else: self.expected_counts = self._compute_expected() return def __repr__(self): trans = f"Markov_chain({np.sum(self.observed_counts):.0f} transitions" states = '[{}]'.format(", ".join(s.__repr__() for s in self.states)) return f"{trans}, states={states}, step={self.step}, include_self={self.include_self})" @staticmethod def _compute_freqs(C): """ Compute frequencies from counts. """ epsilon = 1e-12 return (C.T / (epsilon+np.sum(C.T, axis=0))).T @staticmethod def _stop_iter(a, b, tol=0.01): a_small = np.all(np.abs(a[-1] - a[-2]) < tol*a[-1]) b_small = np.all(np.abs(b[-1] - b[-2]) < tol*b[-1]) return (a_small and b_small) @property def _index_dict(self): if self.states is None: return {} return {self.states[index]: index for index in range(len(self.states))} @property def _state_dict(self): if self.states is None: return {} return {index: self.states[index] for index in range(len(self.states))} @property def observed_freqs(self): return self._compute_freqs(self.observed_counts) @property def expected_freqs(self): return self._compute_freqs(self.expected_counts) @property def _state_counts(self): s = self.observed_counts.copy() # Deal with more than 2 dimensions. for _ in range(self.observed_counts.ndim - 2): s = np.sum(s, axis=0) a = np.sum(s, axis=0) b = np.sum(s, axis=1) return np.maximum(a, b) @property def _state_probs(self): return self._state_counts / np.sum(self._state_counts) @property def normalized_difference(self): O = self.observed_counts E = self.expected_counts epsilon = 1e-12 return (O - E) / np.sqrt(E + epsilon) @classmethod def from_sequence(cls, sequence, states=None, strings_are_states=False, include_self=False, step=1, ): """ Parse a sequence and make the transition matrix of the specified order. **Provide sequence(s) ordered in upwards direction.** Args sequence (list-like): A list-like, or list-like of list-likes. The inner list-likes represent sequences of states. For example, can be a string or list of strings, or a list or list of lists. states (list-like): A list or array of the names of the states. If not provided, it will be inferred from the data. strings_are_states (bool): rue if the strings are themselves states (i.e. words or tokens) and not sequences of one-character states. For example, set to True if you provide something like: ['sst', 'mud', 'mud', 'sst', 'lst', 'lst'] include_self (bool): Whether to include self-to-self transitions (default is `False`: do not include them). step (integer): The distance to step. Default is 1: use the previous state only. If 2, then the previous-but- one state is used as well as the previous state (and the matrix has one more dimension). return_states (bool): Whether to return the states. """ uniques, seq_of_seqs = regularize(sequence, strings_are_states=strings_are_states) if states is None: states = uniques else: states = np.asarray(list(states)) O = observations(seq_of_seqs, states=states, step=step, include_self=include_self) return cls(observed_counts=np.array(O), states=states, include_self=include_self, step=step, ) def _conditional_probs(self, state): """ Conditional probabilities of each state, given a current state. """ return self.observed_freqs[self._index_dict[state]] def _next_state(self, current_state: str) -> str: """ Returns the state of the random variable at the next time instance. Args current_state (str): The current state of the system. Returns str. One realization of the next state. """ return np.random.choice(self.states, p=self._conditional_probs(current_state) ) def generate_states(self, n:int=10, current_state:str=None) -> list: """ Generates the next states of the system. Args n (int): The number of future states to generate. current_state (str): The state of the current random variable. Returns list. The next n states. """ if current_state is None: current_state = np.random.choice(self.states, p=self._state_probs) future_states = [] for _ in range(n): next_state = self._next_state(current_state) future_states.append(next_state) current_state = next_state return future_states def _compute_expected(self): """ Try to use Powers & Easterling, fall back on Monte Carlo sampling based on the proportions of states in the data. """ try: E = self._compute_expected_pe() except: E = self._compute_expected_mc() return E def _compute_expected_mc(self, n=100000): """ If we can't use Powers & Easterling's method, and it's possible there's a way to extend it to higher dimensions (which we have for step > 1), the next best thing might be to use brute force and just compute a lot of random sequence transitions, given the observed proportions. This is what P & E's method tries to estimate iteratively. What to do about 'self transitions' is a bit of a problem here, since there are a lot of n-grams that include at least one self-transition. """ seq = np.random.choice(self.states, size=n, p=self._state_probs) E = observations(np.atleast_2d(seq), self.states, step=self.step, include_self=self.include_self) if not self.include_self: E = hollow_matrix(E) return np.sum(self.observed_counts) * E / np.sum(E) def _compute_expected_pe(self, max_iter=100, verbose=False): """ Compute the independent trials matrix, using method of Powers & Easterling 1982. """ m = len(self.states) M = self.observed_counts a, b = [], [] # Loop 1 a.append(np.sum(M, axis=1) / (m - 1)) b.append(np.sum(M, axis=0) / (np.sum(a[-1]) - a[-1])) i = 2 while i < max_iter: if verbose: print(f"iteration: {i-1}") print(f"a: {a[-1]}") print(f"b: {b[-1]}") print() a.append(np.sum(M, axis=1) / (np.sum(b[-1]) - b[-1])) b.append(np.sum(M, axis=0) / (np.sum(a[-1]) - a[-1])) # Check for stopping criterion. if self._stop_iter(a, b, tol=0.001): break i += 1 E = a[-1] * b[-1].reshape(-1, 1) if not self.include_self: return hollow_matrix(E) else: return E @property def degrees_of_freedom(self) -> int: m = len(self.states) return (m - 1)**2 - m def _chi_squared_critical(self, q=0.95, df=None): """ The chi-squared critical value for a confidence level q and degrees of freedom df. """ if df is None: df = self.degrees_of_freedom return scipy.stats.chi2.ppf(q=q, df=df) def _chi_squared_percentile(self, x, df=None): """ The chi-squared critical value for a confidence level q and degrees of freedom df. """ if df is None: df = self.degrees_of_freedom return scipy.stats.chi2.cdf(x, df=df) def chi_squared(self, q=0.95): """ The chi-squared statistic for the given transition frequencies. Also returns the critical statistic at the given confidence level q (default 95%). If the first number is bigger than the second number, then you can reject the hypothesis that the sequence is randomly ordered. Args: q (float): The confidence level, as a float in the range 0 to 1. Default: 0.95. Returns: float: The chi-squared statistic. """ # Observed and Expected matrices: O = self.observed_counts E = self.expected_counts # Adjustment for divide-by-zero epsilon = 1e-12 chi2 = np.sum((O - E)**2 / (E + epsilon)) crit = self._chi_squared_critical(q=q) perc = self._chi_squared_percentile(x=chi2) Chi2 = namedtuple('Chi2', ['chi2', 'crit', 'perc']) return Chi2(chi2, crit, perc) def as_graph(self, directed=True): if self.normalized_difference.ndim > 2: raise MarkovError("You can only graph one-step chains.") try: import networkx as nx except ImportError: nx = None if nx is None: print("Please install networkx with `pip install networkx`.") return if directed: alg = nx.DiGraph else: alg = nx.Graph G = nx.from_numpy_array(self.normalized_difference, create_using=alg) nx.set_node_attributes(G, self._state_dict, 'state') return G def plot_graph(self, ax=None, figsize=None, max_size=1000, directed=True, edge_labels=False, draw_neg=False ): if self.normalized_difference.ndim > 2: raise MarkovError("You can only graph one-step chains.") try: import networkx as nx except ImportError: nx = None if nx is None: print("Please install networkx with `pip install networkx`.") return G = self.as_graph(directed=directed) return_ax = True if ax is None: fig, ax = plt.subplots(figsize=figsize) return_ax = False e_neg = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if d['weight'] <= -1.0} e_small = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if -1.0 < d['weight'] <= 1.0} e_med = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if 1.0 < d['weight'] <= 2.0} e_large = {(u, v):round(d['weight'],1) for (u, v, d) in G.edges(data=True) if d['weight'] > 2.0} pos = nx.spring_layout(G) sizes = max_size * (self._state_counts / max(self._state_counts)) nx.draw_networkx_nodes(G, pos, ax=ax, node_size=sizes, node_color='orange') nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_large, width=10, arrowsize=40, splines='curved') nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_med, width=4, arrowsize=20) nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_small, width=3, alpha=0.1, edge_color='k') if draw_neg: nx.draw_networkx_edges(G, pos, ax=ax, edgelist=e_neg, width=2, alpha=0.1, edge_color='k') if edge_labels: nx.draw_networkx_edge_labels(G,pos,edge_labels=e_large) nx.draw_networkx_edge_labels(G,pos,edge_labels=e_med) labels = nx.get_node_attributes(G, 'state') ax = nx.draw_networkx_labels(G, pos, labels=labels, font_size=20, font_family='sans-serif', font_color='blue') if return_ax: return ax else: plt.axis('off') plt.show() return def plot_norm_diff(self, ax=None, cmap='RdBu', center_zero=True, vminmax=None, rotation=0, annotate=False, ): """ A visualization of the normalized difference matrix. Args """ if self.normalized_difference.ndim > 2: raise MarkovError("You can only plot one-step chains.") return_ax = True if ax is None: fig, ax = plt.subplots(figsize=(1 + self.states.size/1.5, self.states.size/1.5)) return_ax = False if vminmax is None: ma = np.ceil(np.max(np.abs(self.normalized_difference))) vmin, vmax = -ma, ma else: vmin, vmax = vminmax im = ax.imshow(self.normalized_difference, cmap=cmap, vmin=vmin, vmax=vmax, interpolation='none') plt.colorbar(im) ax.tick_params(axis='x', which='both', bottom=False, labelbottom=False, top=False, labeltop=True, ) ax.tick_params(axis='y', which='both', left=False, labelleft=True, right=False, labelright=False, ) ticks = np.arange(self.states.size) ax.set_yticks(ticks) ax.set_xticks(ticks) labels = [str(s) for s in self.states] ax.set_yticklabels(labels) if rotation == 0: ax.set_xticklabels(labels) else: ha = 'right' if rotation < 0 else 'left' ax.set_xticklabels(labels, rotation=rotation, ha=ha, rotation_mode='anchor' ) if annotate: for i in range(self.states.size): for j in range(self.states.size): norm = cols.Normalize(vmin=vmin, vmax=vmax) val = self.normalized_difference[i, j] lookup = cm.get_cmap(cmap) col = 'w' if rgb_is_dark(lookup(norm(val))) else 'k' fmt = annotate if isinstance(annotate, str) else '0.1f' s = format(val, fmt) text = ax.text(j, i, s, ha="center", va="center", color=col) # Deal with probable bug in matplotlib 3.1.1 ax.set_ylim(reversed(ax.get_xlim())) if return_ax: return ax else: plt.show() return
0.888813
0.572783
from django.conf.urls.defaults import patterns, url from django.core.exceptions import ObjectDoesNotExist import django.db.models from django.http import HttpResponse, HttpResponseNotFound from django.shortcuts import get_object_or_404 from django.template.response import TemplateResponse from ..core.app import SatchlessApp from . import models class ProductApp(SatchlessApp): app_name = 'product' namespace = 'product' Product = None Variant = None product_view_handlers_queue = None def __init__(self, *args, **kwargs): super(ProductApp, self).__init__(*args, **kwargs) self.product_view_handlers_queue = set() assert self.Product, ('You need to subclass ProductApp and provide' ' Product') assert self.Variant, ('You need to subclass ProductApp and provide' ' Variant') def get_product(self, request, **kwargs): raise NotImplementedError() def get_product_details_templates(self, product): return ['satchless/product/view.html'] def product_details(self, request, **kwargs): try: product = self.get_product(request, **kwargs) except ObjectDoesNotExist: return HttpResponseNotFound() context = self.on_product_view(instances=[product], request=request) if isinstance(context, HttpResponse): return context context['product'] = product context = self.get_context_data(request, **context) templates = self.get_product_details_templates(product) return TemplateResponse(request, templates, context) def register_product_view_handler(self, handler): self.product_view_handlers_queue.add(handler) def on_product_view(self, instances, request): context = {} for handler in self.product_view_handlers_queue: context = handler(instances, request=request, extra_context=context) if isinstance(context, HttpResponse): return context return context def get_urls(self): return patterns('', # '+' predeces product slug to prevent conflicts with categories # paths url(r'^\+(?P<product_pk>[0-9]+)-(?P<product_slug>[a-z0-9_-]+)/$', self.product_details, name='details'), ) class MagicProductApp(ProductApp): def __init__(self, **kwargs): self.Product = (self.Product or self.construct_product_class()) self.Variant = (self.Variant or self.construct_variant_class(self.Product)) super(MagicProductApp, self).__init__(**kwargs) def get_product(self, request, product_pk, product_slug): product = get_object_or_404(self.Product, pk=product_pk, slug=product_slug) return product.get_subtype_instance() def construct_product_class(self): class Product(models.Product): pass return Product def construct_variant_class(self, product_class): class Variant(models.Variant): product = django.db.models.ForeignKey(product_class, related_name='variants') return Variant
satchless/product/app.py
from django.conf.urls.defaults import patterns, url from django.core.exceptions import ObjectDoesNotExist import django.db.models from django.http import HttpResponse, HttpResponseNotFound from django.shortcuts import get_object_or_404 from django.template.response import TemplateResponse from ..core.app import SatchlessApp from . import models class ProductApp(SatchlessApp): app_name = 'product' namespace = 'product' Product = None Variant = None product_view_handlers_queue = None def __init__(self, *args, **kwargs): super(ProductApp, self).__init__(*args, **kwargs) self.product_view_handlers_queue = set() assert self.Product, ('You need to subclass ProductApp and provide' ' Product') assert self.Variant, ('You need to subclass ProductApp and provide' ' Variant') def get_product(self, request, **kwargs): raise NotImplementedError() def get_product_details_templates(self, product): return ['satchless/product/view.html'] def product_details(self, request, **kwargs): try: product = self.get_product(request, **kwargs) except ObjectDoesNotExist: return HttpResponseNotFound() context = self.on_product_view(instances=[product], request=request) if isinstance(context, HttpResponse): return context context['product'] = product context = self.get_context_data(request, **context) templates = self.get_product_details_templates(product) return TemplateResponse(request, templates, context) def register_product_view_handler(self, handler): self.product_view_handlers_queue.add(handler) def on_product_view(self, instances, request): context = {} for handler in self.product_view_handlers_queue: context = handler(instances, request=request, extra_context=context) if isinstance(context, HttpResponse): return context return context def get_urls(self): return patterns('', # '+' predeces product slug to prevent conflicts with categories # paths url(r'^\+(?P<product_pk>[0-9]+)-(?P<product_slug>[a-z0-9_-]+)/$', self.product_details, name='details'), ) class MagicProductApp(ProductApp): def __init__(self, **kwargs): self.Product = (self.Product or self.construct_product_class()) self.Variant = (self.Variant or self.construct_variant_class(self.Product)) super(MagicProductApp, self).__init__(**kwargs) def get_product(self, request, product_pk, product_slug): product = get_object_or_404(self.Product, pk=product_pk, slug=product_slug) return product.get_subtype_instance() def construct_product_class(self): class Product(models.Product): pass return Product def construct_variant_class(self, product_class): class Variant(models.Variant): product = django.db.models.ForeignKey(product_class, related_name='variants') return Variant
0.572006
0.072604
from typing import Dict, List, Union, Any import requests from e2e.Classes.Merit.Blockchain import Blockchain from e2e.Meros.RPC import RPC from e2e.Tests.Errors import TestError def request( rpc: RPC, req: Union[List[Any], Dict[str, Any]], headers: Dict[str, str] = {} ) -> Union[List[Any], Dict[str, Any]]: res: requests.Response = requests.post( "http://127.0.0.1:" + str(rpc.meros.rpc), headers=headers, json=req ) if res.status_code != 200: raise TestError("HTTP status isn't 200: " + str(res.status_code)) return res.json() def BatchTest( rpc: RPC ) -> None: #Most basic case; two valid requests. if request( rpc, [ {"jsonrpc": "2.0", "id": 1, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 0, "method": "merit_getDifficulty"} ] ) != [ {"jsonrpc": "2.0", "id": 1, "result": 1}, {"jsonrpc": "2.0", "id": 0, "result": Blockchain().difficulty()} ]: raise TestError("Meros didn't respond to a batch request properly.") #Test handling of empty batches. if request(rpc, []) != {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}: raise TestError("Empty batch wasn't handled correctly.") #Batches with invalid individual requests. if request(rpc, [1, 2, 3]) != [ {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}, {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}, {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None} ]: raise TestError("Batch with invalid individual entries wasn't handled correctly.") if request( rpc, [1, {"jsonrpc": "2.0", "id": 1, "method": "merit_getHeight"}, 2] ) != [ {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}, {"jsonrpc": "2.0", "id": 1, "result": 1}, {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None} ]: raise TestError("Batch with some invalid individual entries wasn't handled correctly.") #Test authorization. #If the token is passed, calling multiple methods requiring authorization should work. #If not passing a token, calling multiple methods not requiring authorization should work. This is tested implicitly by the first test case here. #If not passing a token, or passing an invalid token, calling any method requiring auth should cause the entire request to 401. multipleAuthed: Union[List[Any], Dict[str, Any]] = request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "personal_setWallet"}, {"jsonrpc": "2.0", "id": 1, "method": "personal_getMnemonic"}, ], {"Authorization": "Bearer TEST_TOKEN"} ) if isinstance(multipleAuthed, List): if multipleAuthed != [ {"jsonrpc": "2.0", "id": 0, "result": True}, #The Mnemonic will be random, hence this. {"jsonrpc": "2.0", "id": 1, "result": multipleAuthed[1]["result"]} ]: raise TestError("Batch request didn't work when it had multiple methods requiring authentication.") else: raise TestError("Response to a batch request wasn't a list.") #Not passing a token. try: request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 1, "method": "personal_setWallet"}, {"jsonrpc": "2.0", "id": 2, "method": "merit_getHeight"} ] ) raise Exception() except Exception as e: if str(e) != "HTTP status isn't 200: 401": raise TestError("Meros didn't respond to a batch request without authorization yet needing it as expected.") #Invalid token. try: request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 1, "method": "personal_setWallet"}, {"jsonrpc": "2.0", "id": 2, "method": "merit_getHeight"} ], {"Authorization": "Bearer INVALID_TOKEN"} ) raise Exception() except Exception as e: if str(e) != "HTTP status isn't 200: 401": raise TestError("Meros didn't respond to a batch request without authorization yet needing it as expected.") #Test batch requests containing quit. #Meros should return responses for all requests it handled before quit, yet still quit without further handling. if request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 1, "method": "system_quit"}, {"jsonrpc": "2.0", "id": 2, "method": "merit_getDifficulty"}, ], {"Authorization": "Bearer TEST_TOKEN"} ) != [ {"jsonrpc": "2.0", "id": 0, "result": 1}, {"jsonrpc": "2.0", "id": 1, "result": True} ]: raise TestError("Meros didn't respond to a batch request containing quit as expected.") #Mark Meros as having called quit so teardown works. rpc.meros.calledQuit = True
e2e/Tests/RPC/BatchTest.py
from typing import Dict, List, Union, Any import requests from e2e.Classes.Merit.Blockchain import Blockchain from e2e.Meros.RPC import RPC from e2e.Tests.Errors import TestError def request( rpc: RPC, req: Union[List[Any], Dict[str, Any]], headers: Dict[str, str] = {} ) -> Union[List[Any], Dict[str, Any]]: res: requests.Response = requests.post( "http://127.0.0.1:" + str(rpc.meros.rpc), headers=headers, json=req ) if res.status_code != 200: raise TestError("HTTP status isn't 200: " + str(res.status_code)) return res.json() def BatchTest( rpc: RPC ) -> None: #Most basic case; two valid requests. if request( rpc, [ {"jsonrpc": "2.0", "id": 1, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 0, "method": "merit_getDifficulty"} ] ) != [ {"jsonrpc": "2.0", "id": 1, "result": 1}, {"jsonrpc": "2.0", "id": 0, "result": Blockchain().difficulty()} ]: raise TestError("Meros didn't respond to a batch request properly.") #Test handling of empty batches. if request(rpc, []) != {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}: raise TestError("Empty batch wasn't handled correctly.") #Batches with invalid individual requests. if request(rpc, [1, 2, 3]) != [ {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}, {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}, {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None} ]: raise TestError("Batch with invalid individual entries wasn't handled correctly.") if request( rpc, [1, {"jsonrpc": "2.0", "id": 1, "method": "merit_getHeight"}, 2] ) != [ {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None}, {"jsonrpc": "2.0", "id": 1, "result": 1}, {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": None} ]: raise TestError("Batch with some invalid individual entries wasn't handled correctly.") #Test authorization. #If the token is passed, calling multiple methods requiring authorization should work. #If not passing a token, calling multiple methods not requiring authorization should work. This is tested implicitly by the first test case here. #If not passing a token, or passing an invalid token, calling any method requiring auth should cause the entire request to 401. multipleAuthed: Union[List[Any], Dict[str, Any]] = request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "personal_setWallet"}, {"jsonrpc": "2.0", "id": 1, "method": "personal_getMnemonic"}, ], {"Authorization": "Bearer TEST_TOKEN"} ) if isinstance(multipleAuthed, List): if multipleAuthed != [ {"jsonrpc": "2.0", "id": 0, "result": True}, #The Mnemonic will be random, hence this. {"jsonrpc": "2.0", "id": 1, "result": multipleAuthed[1]["result"]} ]: raise TestError("Batch request didn't work when it had multiple methods requiring authentication.") else: raise TestError("Response to a batch request wasn't a list.") #Not passing a token. try: request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 1, "method": "personal_setWallet"}, {"jsonrpc": "2.0", "id": 2, "method": "merit_getHeight"} ] ) raise Exception() except Exception as e: if str(e) != "HTTP status isn't 200: 401": raise TestError("Meros didn't respond to a batch request without authorization yet needing it as expected.") #Invalid token. try: request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 1, "method": "personal_setWallet"}, {"jsonrpc": "2.0", "id": 2, "method": "merit_getHeight"} ], {"Authorization": "Bearer INVALID_TOKEN"} ) raise Exception() except Exception as e: if str(e) != "HTTP status isn't 200: 401": raise TestError("Meros didn't respond to a batch request without authorization yet needing it as expected.") #Test batch requests containing quit. #Meros should return responses for all requests it handled before quit, yet still quit without further handling. if request( rpc, [ {"jsonrpc": "2.0", "id": 0, "method": "merit_getHeight"}, {"jsonrpc": "2.0", "id": 1, "method": "system_quit"}, {"jsonrpc": "2.0", "id": 2, "method": "merit_getDifficulty"}, ], {"Authorization": "Bearer TEST_TOKEN"} ) != [ {"jsonrpc": "2.0", "id": 0, "result": 1}, {"jsonrpc": "2.0", "id": 1, "result": True} ]: raise TestError("Meros didn't respond to a batch request containing quit as expected.") #Mark Meros as having called quit so teardown works. rpc.meros.calledQuit = True
0.718693
0.315525
import math import argparse import itertools import csv import time import numpy as np from astropy import units as u from astropy.coordinates import SkyCoord from astroquery.simbad import Simbad from os.path import splitext from utils import CatEntry, convertRADEC def generate_guide_catalog(catalog): """Generate a catalog with the angular distance of all stars combinations""" guide_catalog = [] FOV_h = 2 * 14.455 FOV_v = 2 * 10.94 FOV = math.sqrt(FOV_h**2 + FOV_v**2) for a, b in itertools.combinations(catalog, 2): a_car = convertRADEC(a.ra, a.dec + 90) b_car = convertRADEC(b.ra, b.dec + 90) dab = math.degrees(math.acos(a_car[0] * b_car[0] + a_car[1] * b_car[1] + a_car[2] * b_car[2])) if dab < FOV: guide_catalog.append([a.starnumber, b.starnumber, dab]) guide_catalog.sort(key=lambda x: x[2]) return guide_catalog def read_catalog(catalog_csv, Vmag): """Read the propagated catalog and filter out stars with Vmag greater or equal than Vmag""" catalog = [] with open(catalog_csv, 'r') as csv_file: csv_reader = csv.DictReader(csv_file) line_count = 0 for row in csv_reader: if line_count != 0: if float(row['vmag']) >= Vmag: continue star = CatEntry(row['HIP_number'], "HIP", int(row['HIP_number']), float(row['ra_degrees']), float(row['dec_degrees']), row['promora'], row['promodec'], row['parallax'], 0.0, float(row['vmag'])) catalog.append(star) line_count += 1 return catalog def label_guide_stars(filename, catalog, guide_stars): stars = np.array(guide_stars) # Get unique guide_stars unique = np.unique(stars[:,:-1]) hip_names = [] checked = 0 labeled = 0 # Need a tuple of [HIP_number, ra, dec] for all guide stars for hip_star in catalog: # Stop earlier if all guide stars are already labeled. if checked == len(unique): break if hip_star.starnumber in unique: # Query Simbad for its object, then name c = SkyCoord(ra=hip_star.ra, dec=hip_star.dec, unit=u.deg) print(c) while True: try: reg = Simbad.query_region(c, radius=0.01*u.deg) except: time.sleep(5) continue break name = "" if len(reg[0]) > 0: while True: try: objs = Simbad.query_objectids(reg[0][0]) except: time.sleep(5) continue break if len(objs) > 0: noname = True for name in objs: if "NAME" in name[0]: name = name[0].replace("NAME", "").strip().lower() noname = False labeled += 1 print(name) break if noname: name = str(hip_star.starnumber) time.sleep(0.2) hip_names.append([hip_star.starnumber, name]) checked += 1 print(f'{checked=} {labeled=}') with open(filename, mode='w') as csv_file: fieldnames = ['HIP_number', 'Name'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for star in hip_names: writer.writerow({'HIP_number': star[0], 'Name': star[1]}) def main(args): catalog = read_catalog(args.input, float(args.v_mag)) guide_stars = generate_guide_catalog(catalog) split_name = splitext(args.output) # Write guide stars pairs with their angular distance with open(args.output, mode='w') as csv_file: fieldnames = ['HIP_number_a', 'HIP_number_b', 'distance'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for row in guide_stars: writer.writerow({ 'HIP_number_a': row[0], 'HIP_number_b': row[1], 'distance': row[2] }) # Write a file with labels for each guide star labels_filename = split_name[0] + "_labels" + split_name[1] label_guide_stars(labels_filename, catalog, guide_stars) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', required=True, dest='input', help='Hipparcos input CSV path') parser.add_argument('-o', '--output', required=True, dest='output', help='Output CSV converted path') parser.add_argument('-m', '--magnitude', required=True, dest='v_mag', help='Vmag cutoff') main(parser.parse_args())
startrackerpy/server/startracker/catalogs/guide_stars_catalog.py
import math import argparse import itertools import csv import time import numpy as np from astropy import units as u from astropy.coordinates import SkyCoord from astroquery.simbad import Simbad from os.path import splitext from utils import CatEntry, convertRADEC def generate_guide_catalog(catalog): """Generate a catalog with the angular distance of all stars combinations""" guide_catalog = [] FOV_h = 2 * 14.455 FOV_v = 2 * 10.94 FOV = math.sqrt(FOV_h**2 + FOV_v**2) for a, b in itertools.combinations(catalog, 2): a_car = convertRADEC(a.ra, a.dec + 90) b_car = convertRADEC(b.ra, b.dec + 90) dab = math.degrees(math.acos(a_car[0] * b_car[0] + a_car[1] * b_car[1] + a_car[2] * b_car[2])) if dab < FOV: guide_catalog.append([a.starnumber, b.starnumber, dab]) guide_catalog.sort(key=lambda x: x[2]) return guide_catalog def read_catalog(catalog_csv, Vmag): """Read the propagated catalog and filter out stars with Vmag greater or equal than Vmag""" catalog = [] with open(catalog_csv, 'r') as csv_file: csv_reader = csv.DictReader(csv_file) line_count = 0 for row in csv_reader: if line_count != 0: if float(row['vmag']) >= Vmag: continue star = CatEntry(row['HIP_number'], "HIP", int(row['HIP_number']), float(row['ra_degrees']), float(row['dec_degrees']), row['promora'], row['promodec'], row['parallax'], 0.0, float(row['vmag'])) catalog.append(star) line_count += 1 return catalog def label_guide_stars(filename, catalog, guide_stars): stars = np.array(guide_stars) # Get unique guide_stars unique = np.unique(stars[:,:-1]) hip_names = [] checked = 0 labeled = 0 # Need a tuple of [HIP_number, ra, dec] for all guide stars for hip_star in catalog: # Stop earlier if all guide stars are already labeled. if checked == len(unique): break if hip_star.starnumber in unique: # Query Simbad for its object, then name c = SkyCoord(ra=hip_star.ra, dec=hip_star.dec, unit=u.deg) print(c) while True: try: reg = Simbad.query_region(c, radius=0.01*u.deg) except: time.sleep(5) continue break name = "" if len(reg[0]) > 0: while True: try: objs = Simbad.query_objectids(reg[0][0]) except: time.sleep(5) continue break if len(objs) > 0: noname = True for name in objs: if "NAME" in name[0]: name = name[0].replace("NAME", "").strip().lower() noname = False labeled += 1 print(name) break if noname: name = str(hip_star.starnumber) time.sleep(0.2) hip_names.append([hip_star.starnumber, name]) checked += 1 print(f'{checked=} {labeled=}') with open(filename, mode='w') as csv_file: fieldnames = ['HIP_number', 'Name'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for star in hip_names: writer.writerow({'HIP_number': star[0], 'Name': star[1]}) def main(args): catalog = read_catalog(args.input, float(args.v_mag)) guide_stars = generate_guide_catalog(catalog) split_name = splitext(args.output) # Write guide stars pairs with their angular distance with open(args.output, mode='w') as csv_file: fieldnames = ['HIP_number_a', 'HIP_number_b', 'distance'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for row in guide_stars: writer.writerow({ 'HIP_number_a': row[0], 'HIP_number_b': row[1], 'distance': row[2] }) # Write a file with labels for each guide star labels_filename = split_name[0] + "_labels" + split_name[1] label_guide_stars(labels_filename, catalog, guide_stars) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', required=True, dest='input', help='Hipparcos input CSV path') parser.add_argument('-o', '--output', required=True, dest='output', help='Output CSV converted path') parser.add_argument('-m', '--magnitude', required=True, dest='v_mag', help='Vmag cutoff') main(parser.parse_args())
0.381565
0.285328
from __future__ import print_function import amber_repo import boot_data import filecmp import logging import os import re import subprocess import sys import target import tempfile import time import uuid from common import SDK_ROOT, EnsurePathExists, GetHostToolPathFromPlatform # The maximum times to attempt mDNS resolution when connecting to a freshly # booted Fuchsia instance before aborting. BOOT_DISCOVERY_ATTEMPTS = 30 # Number of failed connection attempts before redirecting system logs to stdout. CONNECT_RETRY_COUNT_BEFORE_LOGGING = 10 TARGET_HASH_FILE_PATH = '/data/.hash' # Number of seconds to wait when querying a list of all devices over mDNS. _LIST_DEVICES_TIMEOUT_SECS = 3 # Time between a reboot command is issued and when connection attempts from the # host begin. _REBOOT_SLEEP_PERIOD = 20 def GetTargetType(): return DeviceTarget class DeviceTarget(target.Target): """Prepares a device to be used as a deployment target. Depending on the command line parameters, it automatically handling a number of preparatory steps relating to address resolution. If |_node_name| is unset: If there is one running device, use it for deployment and execution. If there are more than one running devices, then abort and instruct the user to re-run the command with |_node_name| If |_node_name| is set: If there is a running device with a matching nodename, then it is used for deployment and execution. If |_host| is set: Deploy to a device at the host IP address as-is.""" def __init__(self, out_dir, target_cpu, host=None, node_name=None, port=None, ssh_config=None, fuchsia_out_dir=None, os_check='update', system_log_file=None): """out_dir: The directory which will contain the files that are generated to support the deployment. target_cpu: The CPU architecture of the deployment target. Can be "x64" or "arm64". host: The address of the deployment target device. node_name: The node name of the deployment target device. port: The port of the SSH service on the deployment target device. ssh_config: The path to SSH configuration data. fuchsia_out_dir: The path to a Fuchsia build output directory, for deployments to devices paved with local Fuchsia builds. os_check: If 'check', the target's SDK version must match. If 'update', the target will be repaved if the SDK versions mismatch. If 'ignore', the target's SDK version is ignored.""" super(DeviceTarget, self).__init__(out_dir, target_cpu) self._system_log_file = system_log_file self._host = host self._port = port self._fuchsia_out_dir = None if fuchsia_out_dir: self._fuchsia_out_dir = os.path.expanduser(fuchsia_out_dir) self._node_name = node_name self._os_check = os_check self._amber_repo = None if self._host and self._node_name: raise Exception('Only one of "--host" or "--name" can be specified.') if self._fuchsia_out_dir: if ssh_config: raise Exception('Only one of "--fuchsia-out-dir" or "--ssh_config" can ' 'be specified.') self._fuchsia_out_dir = os.path.expanduser(fuchsia_out_dir) # Use SSH keys from the Fuchsia output directory. self._ssh_config_path = os.path.join(self._fuchsia_out_dir, 'ssh-keys', 'ssh_config') self._os_check = 'ignore' elif ssh_config: # Use the SSH config provided via the commandline. self._ssh_config_path = os.path.expanduser(ssh_config) else: # Default to using an automatically generated SSH config and keys. boot_data.ProvisionSSH(out_dir) self._ssh_config_path = boot_data.GetSSHConfigPath(out_dir) @staticmethod def RegisterArgs(arg_parser): target.Target.RegisterArgs(arg_parser) device_args = arg_parser.add_argument_group('device', 'Device Arguments') device_args.add_argument('--host', help='The IP of the target device. Optional.') device_args.add_argument('--node-name', help='The node-name of the device to boot or ' 'deploy to. Optional, will use the first ' 'discovered device if omitted.') device_args.add_argument('--port', '-p', type=int, default=None, help='The port of the SSH service running on the ' 'device. Optional.') device_args.add_argument('--ssh-config', '-F', help='The path to the SSH configuration used for ' 'connecting to the target device.') device_args.add_argument( '--os-check', choices=['check', 'update', 'ignore'], default='update', help="Sets the OS version enforcement policy. If 'check', then the " "deployment process will halt if the target\'s version doesn\'t " "match. If 'update', then the target device will automatically " "be repaved. If 'ignore', then the OS version won\'t be checked.") def _SDKHashMatches(self): """Checks if /data/.hash on the device matches SDK_ROOT/.hash. Returns True if the files are identical, or False otherwise. """ with tempfile.NamedTemporaryFile() as tmp: try: self.GetFile(TARGET_HASH_FILE_PATH, tmp.name) except subprocess.CalledProcessError: # If the file is unretrievable for whatever reason, assume mismatch. return False return filecmp.cmp(tmp.name, os.path.join(SDK_ROOT, '.hash'), False) def _ProvisionDeviceIfNecessary(self): if self._Discover(): self._WaitUntilReady() else: raise Exception('Could not find device. If the device is connected ' 'to the host remotely, make sure that --host flag is ' 'set and that remote serving is set up.') def _Discover(self): """Queries mDNS for the IP address of a booted Fuchsia instance whose name matches |_node_name| on the local area network. If |_node_name| isn't specified, and there is only one device on the network, then returns the IP address of that advice. Sets |_host_name| and returns True if the device was found, or waits up to |timeout| seconds and returns False if the device couldn't be found.""" dev_finder_path = GetHostToolPathFromPlatform('device-finder') if self._node_name: command = [dev_finder_path, 'resolve', '-device-limit', '1', # Exit early as soon as a host is found. self._node_name] else: command = [ dev_finder_path, 'list', '-full', '-timeout', "%ds" % _LIST_DEVICES_TIMEOUT_SECS ] proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')) output = set(proc.communicate()[0].strip().split('\n')) if proc.returncode != 0: return False if self._node_name: # Handle the result of "device-finder resolve". self._host = output.pop().strip() else: name_host_pairs = [x.strip().split(' ') for x in output] # Handle the output of "device-finder list". if len(name_host_pairs) > 1: print('More than one device was discovered on the network.') print('Use --node-name <name> to specify the device to use.') print('\nList of devices:') for pair in name_host_pairs: print(' ' + pair[1]) print() raise Exception('Ambiguous target device specification.') assert len(name_host_pairs) == 1 self._host, self._node_name = name_host_pairs[0] logging.info('Found device "%s" at address %s.' % (self._node_name, self._host)) return True def Start(self): if self._host: self._WaitUntilReady() else: self._ProvisionDeviceIfNecessary() def GetAmberRepo(self): if not self._amber_repo: if self._fuchsia_out_dir: # Deploy to an already-booted device running a local Fuchsia build. self._amber_repo = amber_repo.ExternalAmberRepo( os.path.join(self._fuchsia_out_dir, 'amber-files')) else: # Create an ephemeral Amber repo, then start both "pm serve" as well as # the bootserver. self._amber_repo = amber_repo.ManagedAmberRepo(self) return self._amber_repo def _ParseNodename(self, output): # Parse the nodename from bootserver stdout. m = re.search(r'.*Proceeding with nodename (?P<nodename>.*)$', output, re.MULTILINE) if not m: raise Exception('Couldn\'t parse nodename from bootserver output.') self._node_name = m.groupdict()['nodename'] logging.info('Booted device "%s".' % self._node_name) # Repeatdly query mDNS until we find the device, or we hit the timeout of # DISCOVERY_TIMEOUT_SECS. logging.info('Waiting for device to join network.') for _ in xrange(_BOOT_DISCOVERY_ATTEMPTS): if self.__Discover(): break if not self._host: raise Exception('Device %s couldn\'t be discovered via mDNS.' % self._node_name) self._WaitUntilReady(); # Update the target's hash to match the current tree's. self.PutFile(os.path.join(SDK_ROOT, '.hash'), TARGET_HASH_FILE_PATH) def _GetEndpoint(self): return (self._host, self._port) def _GetSshConfigPath(self): return self._ssh_config_path def Restart(self): """Restart the device.""" self.RunCommandPiped('dm reboot') time.sleep(_REBOOT_SLEEP_PERIOD) self.Start()
build/fuchsia/device_target.py
from __future__ import print_function import amber_repo import boot_data import filecmp import logging import os import re import subprocess import sys import target import tempfile import time import uuid from common import SDK_ROOT, EnsurePathExists, GetHostToolPathFromPlatform # The maximum times to attempt mDNS resolution when connecting to a freshly # booted Fuchsia instance before aborting. BOOT_DISCOVERY_ATTEMPTS = 30 # Number of failed connection attempts before redirecting system logs to stdout. CONNECT_RETRY_COUNT_BEFORE_LOGGING = 10 TARGET_HASH_FILE_PATH = '/data/.hash' # Number of seconds to wait when querying a list of all devices over mDNS. _LIST_DEVICES_TIMEOUT_SECS = 3 # Time between a reboot command is issued and when connection attempts from the # host begin. _REBOOT_SLEEP_PERIOD = 20 def GetTargetType(): return DeviceTarget class DeviceTarget(target.Target): """Prepares a device to be used as a deployment target. Depending on the command line parameters, it automatically handling a number of preparatory steps relating to address resolution. If |_node_name| is unset: If there is one running device, use it for deployment and execution. If there are more than one running devices, then abort and instruct the user to re-run the command with |_node_name| If |_node_name| is set: If there is a running device with a matching nodename, then it is used for deployment and execution. If |_host| is set: Deploy to a device at the host IP address as-is.""" def __init__(self, out_dir, target_cpu, host=None, node_name=None, port=None, ssh_config=None, fuchsia_out_dir=None, os_check='update', system_log_file=None): """out_dir: The directory which will contain the files that are generated to support the deployment. target_cpu: The CPU architecture of the deployment target. Can be "x64" or "arm64". host: The address of the deployment target device. node_name: The node name of the deployment target device. port: The port of the SSH service on the deployment target device. ssh_config: The path to SSH configuration data. fuchsia_out_dir: The path to a Fuchsia build output directory, for deployments to devices paved with local Fuchsia builds. os_check: If 'check', the target's SDK version must match. If 'update', the target will be repaved if the SDK versions mismatch. If 'ignore', the target's SDK version is ignored.""" super(DeviceTarget, self).__init__(out_dir, target_cpu) self._system_log_file = system_log_file self._host = host self._port = port self._fuchsia_out_dir = None if fuchsia_out_dir: self._fuchsia_out_dir = os.path.expanduser(fuchsia_out_dir) self._node_name = node_name self._os_check = os_check self._amber_repo = None if self._host and self._node_name: raise Exception('Only one of "--host" or "--name" can be specified.') if self._fuchsia_out_dir: if ssh_config: raise Exception('Only one of "--fuchsia-out-dir" or "--ssh_config" can ' 'be specified.') self._fuchsia_out_dir = os.path.expanduser(fuchsia_out_dir) # Use SSH keys from the Fuchsia output directory. self._ssh_config_path = os.path.join(self._fuchsia_out_dir, 'ssh-keys', 'ssh_config') self._os_check = 'ignore' elif ssh_config: # Use the SSH config provided via the commandline. self._ssh_config_path = os.path.expanduser(ssh_config) else: # Default to using an automatically generated SSH config and keys. boot_data.ProvisionSSH(out_dir) self._ssh_config_path = boot_data.GetSSHConfigPath(out_dir) @staticmethod def RegisterArgs(arg_parser): target.Target.RegisterArgs(arg_parser) device_args = arg_parser.add_argument_group('device', 'Device Arguments') device_args.add_argument('--host', help='The IP of the target device. Optional.') device_args.add_argument('--node-name', help='The node-name of the device to boot or ' 'deploy to. Optional, will use the first ' 'discovered device if omitted.') device_args.add_argument('--port', '-p', type=int, default=None, help='The port of the SSH service running on the ' 'device. Optional.') device_args.add_argument('--ssh-config', '-F', help='The path to the SSH configuration used for ' 'connecting to the target device.') device_args.add_argument( '--os-check', choices=['check', 'update', 'ignore'], default='update', help="Sets the OS version enforcement policy. If 'check', then the " "deployment process will halt if the target\'s version doesn\'t " "match. If 'update', then the target device will automatically " "be repaved. If 'ignore', then the OS version won\'t be checked.") def _SDKHashMatches(self): """Checks if /data/.hash on the device matches SDK_ROOT/.hash. Returns True if the files are identical, or False otherwise. """ with tempfile.NamedTemporaryFile() as tmp: try: self.GetFile(TARGET_HASH_FILE_PATH, tmp.name) except subprocess.CalledProcessError: # If the file is unretrievable for whatever reason, assume mismatch. return False return filecmp.cmp(tmp.name, os.path.join(SDK_ROOT, '.hash'), False) def _ProvisionDeviceIfNecessary(self): if self._Discover(): self._WaitUntilReady() else: raise Exception('Could not find device. If the device is connected ' 'to the host remotely, make sure that --host flag is ' 'set and that remote serving is set up.') def _Discover(self): """Queries mDNS for the IP address of a booted Fuchsia instance whose name matches |_node_name| on the local area network. If |_node_name| isn't specified, and there is only one device on the network, then returns the IP address of that advice. Sets |_host_name| and returns True if the device was found, or waits up to |timeout| seconds and returns False if the device couldn't be found.""" dev_finder_path = GetHostToolPathFromPlatform('device-finder') if self._node_name: command = [dev_finder_path, 'resolve', '-device-limit', '1', # Exit early as soon as a host is found. self._node_name] else: command = [ dev_finder_path, 'list', '-full', '-timeout', "%ds" % _LIST_DEVICES_TIMEOUT_SECS ] proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')) output = set(proc.communicate()[0].strip().split('\n')) if proc.returncode != 0: return False if self._node_name: # Handle the result of "device-finder resolve". self._host = output.pop().strip() else: name_host_pairs = [x.strip().split(' ') for x in output] # Handle the output of "device-finder list". if len(name_host_pairs) > 1: print('More than one device was discovered on the network.') print('Use --node-name <name> to specify the device to use.') print('\nList of devices:') for pair in name_host_pairs: print(' ' + pair[1]) print() raise Exception('Ambiguous target device specification.') assert len(name_host_pairs) == 1 self._host, self._node_name = name_host_pairs[0] logging.info('Found device "%s" at address %s.' % (self._node_name, self._host)) return True def Start(self): if self._host: self._WaitUntilReady() else: self._ProvisionDeviceIfNecessary() def GetAmberRepo(self): if not self._amber_repo: if self._fuchsia_out_dir: # Deploy to an already-booted device running a local Fuchsia build. self._amber_repo = amber_repo.ExternalAmberRepo( os.path.join(self._fuchsia_out_dir, 'amber-files')) else: # Create an ephemeral Amber repo, then start both "pm serve" as well as # the bootserver. self._amber_repo = amber_repo.ManagedAmberRepo(self) return self._amber_repo def _ParseNodename(self, output): # Parse the nodename from bootserver stdout. m = re.search(r'.*Proceeding with nodename (?P<nodename>.*)$', output, re.MULTILINE) if not m: raise Exception('Couldn\'t parse nodename from bootserver output.') self._node_name = m.groupdict()['nodename'] logging.info('Booted device "%s".' % self._node_name) # Repeatdly query mDNS until we find the device, or we hit the timeout of # DISCOVERY_TIMEOUT_SECS. logging.info('Waiting for device to join network.') for _ in xrange(_BOOT_DISCOVERY_ATTEMPTS): if self.__Discover(): break if not self._host: raise Exception('Device %s couldn\'t be discovered via mDNS.' % self._node_name) self._WaitUntilReady(); # Update the target's hash to match the current tree's. self.PutFile(os.path.join(SDK_ROOT, '.hash'), TARGET_HASH_FILE_PATH) def _GetEndpoint(self): return (self._host, self._port) def _GetSshConfigPath(self): return self._ssh_config_path def Restart(self): """Restart the device.""" self.RunCommandPiped('dm reboot') time.sleep(_REBOOT_SLEEP_PERIOD) self.Start()
0.637595
0.134634
__version__ = '3.2.4rc2' import time import sys from . import ca from . import dbr from . import pv from . import alarm from . import device from . import motor from . import multiproc PV = pv.PV Alarm = alarm.Alarm Motor = motor.Motor Device = device.Device poll = ca.poll get_pv = pv.get_pv CAProcess = multiproc.CAProcess CAPool = multiproc.CAPool # some constants NO_ALARM = 0 MINOR_ALARM = 1 MAJOR_ALARM = 2 INVALID_ALARM = 3 _PVmonitors_ = {} def caput(pvname, value, wait=False, timeout=60): """caput(pvname, value, wait=False, timeout=60) simple put to a pv's value. >>> caput('xx.VAL',3.0) to wait for pv to complete processing, use 'wait=True': >>> caput('xx.VAL',3.0,wait=True) """ thispv = get_pv(pvname, connect=True) if thispv.connected: return thispv.put(value, wait=wait, timeout=timeout) def caget(pvname, as_string=False, count=None, as_numpy=True, use_monitor=False, timeout=None): """caget(pvname, as_string=False) simple get of a pv's value.. >>> x = caget('xx.VAL') to get the character string representation (formatted double, enum string, etc): >>> x = caget('xx.VAL', as_string=True) to get a truncated amount of data from an array, you can specify the count with >>> x = caget('MyArray.VAL', count=1000) """ thispv = get_pv(pvname, connect=True) if thispv.connected: if as_string: thispv.get_ctrlvars() val = thispv.get(count=count, timeout=timeout, use_monitor=use_monitor, as_string=as_string, as_numpy=as_numpy) poll() return val def cainfo(pvname, print_out=True): """cainfo(pvname,print_out=True) return printable information about pv >>>cainfo('xx.VAL') will return a status report for the pv. If print_out=False, the status report will be printed, and not returned. """ thispv = get_pv(pvname, connect=True) if thispv.connected: thispv.get() thispv.get_ctrlvars() if print_out: ca.write(thispv.info) else: return thispv.info def camonitor_clear(pvname): """clear a monitor on a PV""" if pvname in _PVmonitors_: _PVmonitors_[pvname].remove_callback(index=-999) _PVmonitors_.pop(pvname) def camonitor(pvname, writer=None, callback=None): """ camonitor(pvname, writer=None, callback=None) sets a monitor on a PV. >>>camonitor('xx.VAL') This will write a message with the latest value for that PV each time the value changes and when ca.poll() is called. To write the result to a file, provide the writer option a write method to an open file or some other method that accepts a string. To completely control where the output goes, provide a callback method and you can do whatever you'd like with them. Your callback will be sent keyword arguments for pvname, value, and char_value Important: use **kwd!! """ if writer is None: writer = ca.write if callback is None: def callback(pvname=None, value=None, char_value=None, **kwds): "generic monitor callback" if char_value is None: char_value = repr(value) writer("%.32s %s %s" % (pvname, pv.fmt_time(), char_value)) thispv = get_pv(pvname, connect=True) if thispv.connected: thispv.get() thispv.add_callback(callback, index=-999, with_ctrlvars=True) _PVmonitors_[pvname] = thispv def caget_many(pvlist): """get values for a list of PVs This does not maintain PV objects, and works as fast as possible to fetch many values. """ chids, out = [], [] for name in pvlist: chids.append(ca.create_channel(name, auto_cb=False, connect=False)) for chid in chids: ca.connect_channel(chid) for chid in chids: ca.get(chid, wait=False) for chid in chids: out.append(ca.get_complete(chid)) return out
lib/__init__.py
__version__ = '3.2.4rc2' import time import sys from . import ca from . import dbr from . import pv from . import alarm from . import device from . import motor from . import multiproc PV = pv.PV Alarm = alarm.Alarm Motor = motor.Motor Device = device.Device poll = ca.poll get_pv = pv.get_pv CAProcess = multiproc.CAProcess CAPool = multiproc.CAPool # some constants NO_ALARM = 0 MINOR_ALARM = 1 MAJOR_ALARM = 2 INVALID_ALARM = 3 _PVmonitors_ = {} def caput(pvname, value, wait=False, timeout=60): """caput(pvname, value, wait=False, timeout=60) simple put to a pv's value. >>> caput('xx.VAL',3.0) to wait for pv to complete processing, use 'wait=True': >>> caput('xx.VAL',3.0,wait=True) """ thispv = get_pv(pvname, connect=True) if thispv.connected: return thispv.put(value, wait=wait, timeout=timeout) def caget(pvname, as_string=False, count=None, as_numpy=True, use_monitor=False, timeout=None): """caget(pvname, as_string=False) simple get of a pv's value.. >>> x = caget('xx.VAL') to get the character string representation (formatted double, enum string, etc): >>> x = caget('xx.VAL', as_string=True) to get a truncated amount of data from an array, you can specify the count with >>> x = caget('MyArray.VAL', count=1000) """ thispv = get_pv(pvname, connect=True) if thispv.connected: if as_string: thispv.get_ctrlvars() val = thispv.get(count=count, timeout=timeout, use_monitor=use_monitor, as_string=as_string, as_numpy=as_numpy) poll() return val def cainfo(pvname, print_out=True): """cainfo(pvname,print_out=True) return printable information about pv >>>cainfo('xx.VAL') will return a status report for the pv. If print_out=False, the status report will be printed, and not returned. """ thispv = get_pv(pvname, connect=True) if thispv.connected: thispv.get() thispv.get_ctrlvars() if print_out: ca.write(thispv.info) else: return thispv.info def camonitor_clear(pvname): """clear a monitor on a PV""" if pvname in _PVmonitors_: _PVmonitors_[pvname].remove_callback(index=-999) _PVmonitors_.pop(pvname) def camonitor(pvname, writer=None, callback=None): """ camonitor(pvname, writer=None, callback=None) sets a monitor on a PV. >>>camonitor('xx.VAL') This will write a message with the latest value for that PV each time the value changes and when ca.poll() is called. To write the result to a file, provide the writer option a write method to an open file or some other method that accepts a string. To completely control where the output goes, provide a callback method and you can do whatever you'd like with them. Your callback will be sent keyword arguments for pvname, value, and char_value Important: use **kwd!! """ if writer is None: writer = ca.write if callback is None: def callback(pvname=None, value=None, char_value=None, **kwds): "generic monitor callback" if char_value is None: char_value = repr(value) writer("%.32s %s %s" % (pvname, pv.fmt_time(), char_value)) thispv = get_pv(pvname, connect=True) if thispv.connected: thispv.get() thispv.add_callback(callback, index=-999, with_ctrlvars=True) _PVmonitors_[pvname] = thispv def caget_many(pvlist): """get values for a list of PVs This does not maintain PV objects, and works as fast as possible to fetch many values. """ chids, out = [], [] for name in pvlist: chids.append(ca.create_channel(name, auto_cb=False, connect=False)) for chid in chids: ca.connect_channel(chid) for chid in chids: ca.get(chid, wait=False) for chid in chids: out.append(ca.get_complete(chid)) return out
0.405684
0.116286
from telemetry.page import page as page_module from telemetry import story from page_sets import webgl_supported_shared_state class ToughWebglCasesPage(page_module.Page): def __init__(self, url, page_set): super(ToughWebglCasesPage, self).__init__( url=url, page_set=page_set, shared_page_state_class=( webgl_supported_shared_state.WebGLSupportedSharedState), make_javascript_deterministic=False) self.archive_data_file = 'data/tough_webgl_cases.json' @property def skipped_gpus(self): # crbug.com/462729 return ['arm', 'broadcom', 'hisilicon', 'imagination', 'qualcomm', 'vivante'] def RunNavigateSteps(self, action_runner): super(ToughWebglCasesPage, self).RunNavigateSteps(action_runner) action_runner.WaitForJavaScriptCondition( 'document.readyState == "complete"') action_runner.Wait(2) def RunPageInteractions(self, action_runner): with action_runner.CreateInteraction('WebGLAnimation'): action_runner.Wait(5) class ToughWebglCasesPageSet(story.StorySet): """ Description: Self-driven WebGL animation examples """ def __init__(self): super(ToughWebglCasesPageSet, self).__init__( archive_data_file='data/tough_webgl_cases.json', cloud_storage_bucket=story.PUBLIC_BUCKET) urls_list = [ # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/google/nvidia-vertex-buffer-object/index.html', # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/google/san-angeles/index.html', # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/google/particles/index.html', 'http://www.khronos.org/registry/webgl/sdk/demos/webkit/Earth.html', # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/webkit/ManyPlanetsDeep.html', 'http://webglsamples.googlecode.com/hg/aquarium/aquarium.html', 'http://webglsamples.googlecode.com/hg/blob/blob.html', # pylint: disable=line-too-long 'http://webglsamples.googlecode.com/hg/dynamic-cubemap/dynamic-cubemap.html' ] for url in urls_list: self.AddStory(ToughWebglCasesPage(url, self))
src/tools/perf/page_sets/tough_webgl_cases.py
from telemetry.page import page as page_module from telemetry import story from page_sets import webgl_supported_shared_state class ToughWebglCasesPage(page_module.Page): def __init__(self, url, page_set): super(ToughWebglCasesPage, self).__init__( url=url, page_set=page_set, shared_page_state_class=( webgl_supported_shared_state.WebGLSupportedSharedState), make_javascript_deterministic=False) self.archive_data_file = 'data/tough_webgl_cases.json' @property def skipped_gpus(self): # crbug.com/462729 return ['arm', 'broadcom', 'hisilicon', 'imagination', 'qualcomm', 'vivante'] def RunNavigateSteps(self, action_runner): super(ToughWebglCasesPage, self).RunNavigateSteps(action_runner) action_runner.WaitForJavaScriptCondition( 'document.readyState == "complete"') action_runner.Wait(2) def RunPageInteractions(self, action_runner): with action_runner.CreateInteraction('WebGLAnimation'): action_runner.Wait(5) class ToughWebglCasesPageSet(story.StorySet): """ Description: Self-driven WebGL animation examples """ def __init__(self): super(ToughWebglCasesPageSet, self).__init__( archive_data_file='data/tough_webgl_cases.json', cloud_storage_bucket=story.PUBLIC_BUCKET) urls_list = [ # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/google/nvidia-vertex-buffer-object/index.html', # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/google/san-angeles/index.html', # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/google/particles/index.html', 'http://www.khronos.org/registry/webgl/sdk/demos/webkit/Earth.html', # pylint: disable=line-too-long 'http://www.khronos.org/registry/webgl/sdk/demos/webkit/ManyPlanetsDeep.html', 'http://webglsamples.googlecode.com/hg/aquarium/aquarium.html', 'http://webglsamples.googlecode.com/hg/blob/blob.html', # pylint: disable=line-too-long 'http://webglsamples.googlecode.com/hg/dynamic-cubemap/dynamic-cubemap.html' ] for url in urls_list: self.AddStory(ToughWebglCasesPage(url, self))
0.530723
0.059265
from sys import version_info if version_info >= (2, 6, 0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_SimController_ZoneControlTemperature_Thermostat', [dirname(__file__)]) except ImportError: import _SimController_ZoneControlTemperature_Thermostat return _SimController_ZoneControlTemperature_Thermostat if fp is not None: try: _mod = imp.load_module('_SimController_ZoneControlTemperature_Thermostat', fp, pathname, description) finally: fp.close() return _mod _SimController_ZoneControlTemperature_Thermostat = swig_import_helper() del swig_import_helper else: import _SimController_ZoneControlTemperature_Thermostat del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self, class_type, name, value, static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name, None) if method: return method(self, value) if (not static): if _newclass: object.__setattr__(self, name, value) else: self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self, class_type, name, value): return _swig_setattr_nondynamic(self, class_type, name, value, 0) def _swig_getattr_nondynamic(self, class_type, name, static=1): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name, None) if method: return method(self) if (not static): return object.__getattr__(self, name) else: raise AttributeError(name) def _swig_getattr(self, class_type, name): return _swig_getattr_nondynamic(self, class_type, name, 0) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object: pass _newclass = 0 try: import weakref weakref_proxy = weakref.proxy except: weakref_proxy = lambda x: x import base import SimController_SupplyWater_Temperature class SimController_ZoneControlTemperature(SimController_SupplyWater_Temperature.SimController): __swig_setmethods__ = {} for _s in [SimController_SupplyWater_Temperature.SimController]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SimController_ZoneControlTemperature, name, value) __swig_getmethods__ = {} for _s in [SimController_SupplyWater_Temperature.SimController]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SimController_ZoneControlTemperature, name) __repr__ = _swig_repr def SimCntrl_Name(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_SimCntrl_Name(self, *args) def SimCntrl_ZoneOrZoneListName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_SimCntrl_ZoneOrZoneListName(self, *args) def __init__(self, *args): this = _SimController_ZoneControlTemperature_Thermostat.new_SimController_ZoneControlTemperature(*args) try: self.this.append(this) except: self.this = this def _clone(self, f=0, c=None): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature__clone(self, f, c) __swig_destroy__ = _SimController_ZoneControlTemperature_Thermostat.delete_SimController_ZoneControlTemperature __del__ = lambda self: None SimController_ZoneControlTemperature_swigregister = _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_swigregister SimController_ZoneControlTemperature_swigregister(SimController_ZoneControlTemperature) class SimController_ZoneControlTemperature_Thermostat(SimController_ZoneControlTemperature): __swig_setmethods__ = {} for _s in [SimController_ZoneControlTemperature]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SimController_ZoneControlTemperature_Thermostat, name, value) __swig_getmethods__ = {} for _s in [SimController_ZoneControlTemperature]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SimController_ZoneControlTemperature_Thermostat, name) __repr__ = _swig_repr def SimCntrl_ControlTypeScheduleName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_SimCntrl_ControlTypeScheduleName(self, *args) def SimCntrl_Control_1_4_ObjectType(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_SimCntrl_Control_1_4_ObjectType(self, *args) def SimCntrl_Control_1_4_Name(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_SimCntrl_Control_1_4_Name(self, *args) def ZoneCont_Tstat_OperativeTemp_ThermostatName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_ThermostatName(self, *args) def ZoneCont_Tstat_OperativeTemp_RadiativeFractionInputMode(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_RadiativeFractionInputMode(self, *args) def ZoneCont_Tstat_OperativeTemp_FixedRadiativeFraction(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_FixedRadiativeFraction(self, *args) def ZoneCont_Tstat_OperativeTemp_RadiativeFractionScheduleName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_RadiativeFractionScheduleName(self, *args) def ZoneCont_Tstat_TempAndHumid_ThermostatName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_ThermostatName(self, *args) def ZoneCont_Tstat_TempAndHumid_DehumidifyingRelativeHumiditySetpointScheduleName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_DehumidifyingRelativeHumiditySetpointScheduleName(self, *args) def ZoneCont_Tstat_TempAndHumid_DehumidCntlType(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_DehumidCntlType(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolRangeInputMethod(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolRangeInputMethod(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolConstantRange(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolConstantRange(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolRangeSchedName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolRangeSchedName(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolCtrlRatio(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolCtrlRatio(self, *args) def __init__(self, *args): this = _SimController_ZoneControlTemperature_Thermostat.new_SimController_ZoneControlTemperature_Thermostat(*args) try: self.this.append(this) except: self.this = this def _clone(self, f=0, c=None): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat__clone(self, f, c) __swig_destroy__ = _SimController_ZoneControlTemperature_Thermostat.delete_SimController_ZoneControlTemperature_Thermostat __del__ = lambda self: None SimController_ZoneControlTemperature_Thermostat_swigregister = _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_swigregister SimController_ZoneControlTemperature_Thermostat_swigregister(SimController_ZoneControlTemperature_Thermostat) class SimController_ZoneControlTemperature_Thermostat_sequence(base.sequence_common): __swig_setmethods__ = {} for _s in [base.sequence_common]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SimController_ZoneControlTemperature_Thermostat_sequence, name, value) __swig_getmethods__ = {} for _s in [base.sequence_common]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SimController_ZoneControlTemperature_Thermostat_sequence, name) __repr__ = _swig_repr def __init__(self, *args): this = _SimController_ZoneControlTemperature_Thermostat.new_SimController_ZoneControlTemperature_Thermostat_sequence(*args) try: self.this.append(this) except: self.this = this def assign(self, n, x): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_assign(self, n, x) def begin(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_begin(self, *args) def end(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_end(self, *args) def rbegin(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_rbegin(self, *args) def rend(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_rend(self, *args) def at(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_at(self, *args) def front(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_front(self, *args) def back(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_back(self, *args) def push_back(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_push_back(self, *args) def pop_back(self): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_pop_back(self) def detach_back(self, pop=True): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_detach_back(self, pop) def insert(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_insert(self, *args) def erase(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_erase(self, *args) def detach(self, position, r, erase=True): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_detach(self, position, r, erase) def swap(self, x): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_swap(self, x) __swig_destroy__ = _SimController_ZoneControlTemperature_Thermostat.delete_SimController_ZoneControlTemperature_Thermostat_sequence __del__ = lambda self: None SimController_ZoneControlTemperature_Thermostat_sequence_swigregister = _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_swigregister SimController_ZoneControlTemperature_Thermostat_sequence_swigregister(SimController_ZoneControlTemperature_Thermostat_sequence) # This file is compatible with both classic and new-style classes.
SimModel_Python_API/simmodel_swig/Release/SimController_ZoneControlTemperature_Thermostat.py
from sys import version_info if version_info >= (2, 6, 0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_SimController_ZoneControlTemperature_Thermostat', [dirname(__file__)]) except ImportError: import _SimController_ZoneControlTemperature_Thermostat return _SimController_ZoneControlTemperature_Thermostat if fp is not None: try: _mod = imp.load_module('_SimController_ZoneControlTemperature_Thermostat', fp, pathname, description) finally: fp.close() return _mod _SimController_ZoneControlTemperature_Thermostat = swig_import_helper() del swig_import_helper else: import _SimController_ZoneControlTemperature_Thermostat del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self, class_type, name, value, static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name, None) if method: return method(self, value) if (not static): if _newclass: object.__setattr__(self, name, value) else: self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self, class_type, name, value): return _swig_setattr_nondynamic(self, class_type, name, value, 0) def _swig_getattr_nondynamic(self, class_type, name, static=1): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name, None) if method: return method(self) if (not static): return object.__getattr__(self, name) else: raise AttributeError(name) def _swig_getattr(self, class_type, name): return _swig_getattr_nondynamic(self, class_type, name, 0) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object: pass _newclass = 0 try: import weakref weakref_proxy = weakref.proxy except: weakref_proxy = lambda x: x import base import SimController_SupplyWater_Temperature class SimController_ZoneControlTemperature(SimController_SupplyWater_Temperature.SimController): __swig_setmethods__ = {} for _s in [SimController_SupplyWater_Temperature.SimController]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SimController_ZoneControlTemperature, name, value) __swig_getmethods__ = {} for _s in [SimController_SupplyWater_Temperature.SimController]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SimController_ZoneControlTemperature, name) __repr__ = _swig_repr def SimCntrl_Name(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_SimCntrl_Name(self, *args) def SimCntrl_ZoneOrZoneListName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_SimCntrl_ZoneOrZoneListName(self, *args) def __init__(self, *args): this = _SimController_ZoneControlTemperature_Thermostat.new_SimController_ZoneControlTemperature(*args) try: self.this.append(this) except: self.this = this def _clone(self, f=0, c=None): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature__clone(self, f, c) __swig_destroy__ = _SimController_ZoneControlTemperature_Thermostat.delete_SimController_ZoneControlTemperature __del__ = lambda self: None SimController_ZoneControlTemperature_swigregister = _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_swigregister SimController_ZoneControlTemperature_swigregister(SimController_ZoneControlTemperature) class SimController_ZoneControlTemperature_Thermostat(SimController_ZoneControlTemperature): __swig_setmethods__ = {} for _s in [SimController_ZoneControlTemperature]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SimController_ZoneControlTemperature_Thermostat, name, value) __swig_getmethods__ = {} for _s in [SimController_ZoneControlTemperature]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SimController_ZoneControlTemperature_Thermostat, name) __repr__ = _swig_repr def SimCntrl_ControlTypeScheduleName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_SimCntrl_ControlTypeScheduleName(self, *args) def SimCntrl_Control_1_4_ObjectType(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_SimCntrl_Control_1_4_ObjectType(self, *args) def SimCntrl_Control_1_4_Name(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_SimCntrl_Control_1_4_Name(self, *args) def ZoneCont_Tstat_OperativeTemp_ThermostatName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_ThermostatName(self, *args) def ZoneCont_Tstat_OperativeTemp_RadiativeFractionInputMode(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_RadiativeFractionInputMode(self, *args) def ZoneCont_Tstat_OperativeTemp_FixedRadiativeFraction(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_FixedRadiativeFraction(self, *args) def ZoneCont_Tstat_OperativeTemp_RadiativeFractionScheduleName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_OperativeTemp_RadiativeFractionScheduleName(self, *args) def ZoneCont_Tstat_TempAndHumid_ThermostatName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_ThermostatName(self, *args) def ZoneCont_Tstat_TempAndHumid_DehumidifyingRelativeHumiditySetpointScheduleName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_DehumidifyingRelativeHumiditySetpointScheduleName(self, *args) def ZoneCont_Tstat_TempAndHumid_DehumidCntlType(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_DehumidCntlType(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolRangeInputMethod(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolRangeInputMethod(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolConstantRange(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolConstantRange(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolRangeSchedName(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolRangeSchedName(self, *args) def ZoneCont_Tstat_TempAndHumid_OvercoolCtrlRatio(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_ZoneCont_Tstat_TempAndHumid_OvercoolCtrlRatio(self, *args) def __init__(self, *args): this = _SimController_ZoneControlTemperature_Thermostat.new_SimController_ZoneControlTemperature_Thermostat(*args) try: self.this.append(this) except: self.this = this def _clone(self, f=0, c=None): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat__clone(self, f, c) __swig_destroy__ = _SimController_ZoneControlTemperature_Thermostat.delete_SimController_ZoneControlTemperature_Thermostat __del__ = lambda self: None SimController_ZoneControlTemperature_Thermostat_swigregister = _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_swigregister SimController_ZoneControlTemperature_Thermostat_swigregister(SimController_ZoneControlTemperature_Thermostat) class SimController_ZoneControlTemperature_Thermostat_sequence(base.sequence_common): __swig_setmethods__ = {} for _s in [base.sequence_common]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SimController_ZoneControlTemperature_Thermostat_sequence, name, value) __swig_getmethods__ = {} for _s in [base.sequence_common]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SimController_ZoneControlTemperature_Thermostat_sequence, name) __repr__ = _swig_repr def __init__(self, *args): this = _SimController_ZoneControlTemperature_Thermostat.new_SimController_ZoneControlTemperature_Thermostat_sequence(*args) try: self.this.append(this) except: self.this = this def assign(self, n, x): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_assign(self, n, x) def begin(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_begin(self, *args) def end(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_end(self, *args) def rbegin(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_rbegin(self, *args) def rend(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_rend(self, *args) def at(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_at(self, *args) def front(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_front(self, *args) def back(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_back(self, *args) def push_back(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_push_back(self, *args) def pop_back(self): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_pop_back(self) def detach_back(self, pop=True): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_detach_back(self, pop) def insert(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_insert(self, *args) def erase(self, *args): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_erase(self, *args) def detach(self, position, r, erase=True): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_detach(self, position, r, erase) def swap(self, x): return _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_swap(self, x) __swig_destroy__ = _SimController_ZoneControlTemperature_Thermostat.delete_SimController_ZoneControlTemperature_Thermostat_sequence __del__ = lambda self: None SimController_ZoneControlTemperature_Thermostat_sequence_swigregister = _SimController_ZoneControlTemperature_Thermostat.SimController_ZoneControlTemperature_Thermostat_sequence_swigregister SimController_ZoneControlTemperature_Thermostat_sequence_swigregister(SimController_ZoneControlTemperature_Thermostat_sequence) # This file is compatible with both classic and new-style classes.
0.258139
0.071916
import numpy as np from pymanopt.manifolds.manifold import Manifold class PoincareBall(Manifold): r"""The Poincare ball. The Poincare ball of dimension ``n``. Elements are represented as arrays of shape ``(n,)`` if ``k = 1``. For ``k > 1``, the class represents the product manifold of ``k`` Poincare balls of dimension ``n``, in which case points are represented as arrays of shape ``(k, n)``. Since the manifold is open, the tangent space at every point is a copy of :math:`\R^n`. The Poincare ball is embedded in :math:`\R^n` and is a Riemannian manifold, but it is not an embedded Riemannian submanifold since the metric is not inherited from the Euclidean inner product of its ambient space. Instead, the Riemannian metric is conformal to the Euclidean one (angles are preserved), and it is given at every point :math:`\vmx` by :math:`\inner{\vmu}{\vmv}_\vmx = \lambda_\vmx^2 \inner{\vmu}{\vmv}` where :math:`\lambda_\vmx = 2 / (1 - \norm{\vmx}^2)` is the conformal factor. This induces the following distance between two points :math:`\vmx` and :math:`\vmy` on the manifold: :math:`\dist_\manM(\vmx, \vmy) = \arccosh\parens{1 + 2 \frac{\norm{\vmx - \vmy}^2}{(1 - \norm{\vmx}^2) (1 - \norm{\vmy}^2)}}.` The norm here is understood as the Euclidean norm in the ambient space. Args: n: The dimension of the Poincare ball. k: The number of elements in the product of Poincare balls. """ def __init__(self, n: int, *, k: int = 1): self._n = n self._k = k if n < 1: raise ValueError(f"Need n >= 1. Value given was n = {n}") if k < 1: raise ValueError(f"Need k >= 1. Value given was k = {k}") if k == 1: name = f"Poincare ball B({n})" elif k >= 2: name = f"Poincare ball B({n})^{k}" dimension = k * n super().__init__(name, dimension) @property def typical_dist(self): return self.dim / 8 def inner_product(self, point, tangent_vector_a, tangent_vector_b): factor = self.conformal_factor(point) return np.tensordot( tangent_vector_a, tangent_vector_b * factor**2, axes=tangent_vector_a.ndim, ) def projection(self, point, vector): return vector to_tangent_space = projection def norm(self, point, tangent_vector): return np.sqrt( self.inner_product(point, tangent_vector, tangent_vector) ) def random_point(self): array = np.random.normal(size=(self._k, self._n)) norm = np.linalg.norm(array, axis=-1, keepdims=True) radius = np.random.uniform(size=(self._k, 1)) ** (1.0 / self._n) point = array / norm * radius if self._k == 1: return point[0] return point def random_tangent_vector(self, point): return np.random.normal(size=point.shape) def zero_vector(self, point): return np.zeros_like(point) def dist(self, point_a, point_b): norm_point_a = np.sum(point_a * point_a, axis=-1) norm_point_b = np.sum(point_b * point_b, axis=-1) difference = point_a - point_b norm_difference = np.sum(difference * difference, axis=-1) columns_dist = np.arccosh( 1 + 2 * norm_difference / ((1 - norm_point_a) * (1 - norm_point_b)) ) return np.linalg.norm(columns_dist) def euclidean_to_riemannian_gradient(self, point, euclidean_gradient): # The hyperbolic metric tensor is conformal to the Euclidean one, so # the Euclidean gradient is simply rescaled. factor = 1 / self.conformal_factor(point) ** 2 return euclidean_gradient * factor def euclidean_to_riemannian_hessian( self, point, euclidean_gradient, euclidean_hessian, tangent_vector ): # This expression is derived from the Koszul formula. factor = self.conformal_factor(point) return ( np.sum(euclidean_gradient * point, axis=-1, keepdims=True) * tangent_vector - np.sum(point * tangent_vector, axis=-1, keepdims=True) * euclidean_gradient - np.sum( euclidean_gradient * tangent_vector, axis=-1, keepdims=True ) * point + euclidean_hessian / factor ) / factor def exp(self, point, tangent_vector): norm_point = np.linalg.norm(tangent_vector, axis=-1, keepdims=True) # Handle the case where tangent_vector is 0. W = tangent_vector * np.divide( np.tanh( norm_point / (1 - np.sum(point * point, axis=-1, keepdims=True)) ), norm_point, out=np.zeros_like(tangent_vector), where=norm_point != 0, ) return self.mobius_addition(point, W) retraction = exp def log(self, point_a, point_b): W = self.mobius_addition(-point_a, point_b) norm_W = np.linalg.norm(W, axis=-1, keepdims=True) return ( (1 - np.sum(point_a * point_a, axis=-1, keepdims=True)) * np.arctanh(norm_W) * W / norm_W ) def pair_mean(self, point_a, point_b): return self.exp(point_a, self.log(point_a, point_b) / 2) def mobius_addition(self, point_a, point_b): """Möbius addition. Special non-associative and non-commutative operation which is closed in the Poincare ball. Args: point_a: The first point. point_b: The second point. Returns: The Möbius sum of ``point_a`` and ``point_b``. """ scalar_product = np.sum(point_a * point_b, axis=-1, keepdims=True) norm_point_a = np.sum(point_a * point_a, axis=-1, keepdims=True) norm_point_b = np.sum(point_b * point_b, axis=-1, keepdims=True) return ( point_a * (1 + 2 * scalar_product + norm_point_b) + point_b * (1 - norm_point_a) ) / (1 + 2 * scalar_product + norm_point_a * norm_point_b) def conformal_factor(self, point): """The conformal factor for a point. Args: point: The point for which to compute the conformal factor. Returns: The conformal factor. If ``point`` is a point on the product manifold of ``k`` Poincare balls, the return value will be an array of shape ``(k,1)``. The singleton dimension is explicitly kept to simplify multiplication of ``point`` by the conformal factor on product manifolds. """ return 2 / (1 - np.sum(point * point, axis=-1, keepdims=True))
pymanopt/manifolds/hyperbolic.py
import numpy as np from pymanopt.manifolds.manifold import Manifold class PoincareBall(Manifold): r"""The Poincare ball. The Poincare ball of dimension ``n``. Elements are represented as arrays of shape ``(n,)`` if ``k = 1``. For ``k > 1``, the class represents the product manifold of ``k`` Poincare balls of dimension ``n``, in which case points are represented as arrays of shape ``(k, n)``. Since the manifold is open, the tangent space at every point is a copy of :math:`\R^n`. The Poincare ball is embedded in :math:`\R^n` and is a Riemannian manifold, but it is not an embedded Riemannian submanifold since the metric is not inherited from the Euclidean inner product of its ambient space. Instead, the Riemannian metric is conformal to the Euclidean one (angles are preserved), and it is given at every point :math:`\vmx` by :math:`\inner{\vmu}{\vmv}_\vmx = \lambda_\vmx^2 \inner{\vmu}{\vmv}` where :math:`\lambda_\vmx = 2 / (1 - \norm{\vmx}^2)` is the conformal factor. This induces the following distance between two points :math:`\vmx` and :math:`\vmy` on the manifold: :math:`\dist_\manM(\vmx, \vmy) = \arccosh\parens{1 + 2 \frac{\norm{\vmx - \vmy}^2}{(1 - \norm{\vmx}^2) (1 - \norm{\vmy}^2)}}.` The norm here is understood as the Euclidean norm in the ambient space. Args: n: The dimension of the Poincare ball. k: The number of elements in the product of Poincare balls. """ def __init__(self, n: int, *, k: int = 1): self._n = n self._k = k if n < 1: raise ValueError(f"Need n >= 1. Value given was n = {n}") if k < 1: raise ValueError(f"Need k >= 1. Value given was k = {k}") if k == 1: name = f"Poincare ball B({n})" elif k >= 2: name = f"Poincare ball B({n})^{k}" dimension = k * n super().__init__(name, dimension) @property def typical_dist(self): return self.dim / 8 def inner_product(self, point, tangent_vector_a, tangent_vector_b): factor = self.conformal_factor(point) return np.tensordot( tangent_vector_a, tangent_vector_b * factor**2, axes=tangent_vector_a.ndim, ) def projection(self, point, vector): return vector to_tangent_space = projection def norm(self, point, tangent_vector): return np.sqrt( self.inner_product(point, tangent_vector, tangent_vector) ) def random_point(self): array = np.random.normal(size=(self._k, self._n)) norm = np.linalg.norm(array, axis=-1, keepdims=True) radius = np.random.uniform(size=(self._k, 1)) ** (1.0 / self._n) point = array / norm * radius if self._k == 1: return point[0] return point def random_tangent_vector(self, point): return np.random.normal(size=point.shape) def zero_vector(self, point): return np.zeros_like(point) def dist(self, point_a, point_b): norm_point_a = np.sum(point_a * point_a, axis=-1) norm_point_b = np.sum(point_b * point_b, axis=-1) difference = point_a - point_b norm_difference = np.sum(difference * difference, axis=-1) columns_dist = np.arccosh( 1 + 2 * norm_difference / ((1 - norm_point_a) * (1 - norm_point_b)) ) return np.linalg.norm(columns_dist) def euclidean_to_riemannian_gradient(self, point, euclidean_gradient): # The hyperbolic metric tensor is conformal to the Euclidean one, so # the Euclidean gradient is simply rescaled. factor = 1 / self.conformal_factor(point) ** 2 return euclidean_gradient * factor def euclidean_to_riemannian_hessian( self, point, euclidean_gradient, euclidean_hessian, tangent_vector ): # This expression is derived from the Koszul formula. factor = self.conformal_factor(point) return ( np.sum(euclidean_gradient * point, axis=-1, keepdims=True) * tangent_vector - np.sum(point * tangent_vector, axis=-1, keepdims=True) * euclidean_gradient - np.sum( euclidean_gradient * tangent_vector, axis=-1, keepdims=True ) * point + euclidean_hessian / factor ) / factor def exp(self, point, tangent_vector): norm_point = np.linalg.norm(tangent_vector, axis=-1, keepdims=True) # Handle the case where tangent_vector is 0. W = tangent_vector * np.divide( np.tanh( norm_point / (1 - np.sum(point * point, axis=-1, keepdims=True)) ), norm_point, out=np.zeros_like(tangent_vector), where=norm_point != 0, ) return self.mobius_addition(point, W) retraction = exp def log(self, point_a, point_b): W = self.mobius_addition(-point_a, point_b) norm_W = np.linalg.norm(W, axis=-1, keepdims=True) return ( (1 - np.sum(point_a * point_a, axis=-1, keepdims=True)) * np.arctanh(norm_W) * W / norm_W ) def pair_mean(self, point_a, point_b): return self.exp(point_a, self.log(point_a, point_b) / 2) def mobius_addition(self, point_a, point_b): """Möbius addition. Special non-associative and non-commutative operation which is closed in the Poincare ball. Args: point_a: The first point. point_b: The second point. Returns: The Möbius sum of ``point_a`` and ``point_b``. """ scalar_product = np.sum(point_a * point_b, axis=-1, keepdims=True) norm_point_a = np.sum(point_a * point_a, axis=-1, keepdims=True) norm_point_b = np.sum(point_b * point_b, axis=-1, keepdims=True) return ( point_a * (1 + 2 * scalar_product + norm_point_b) + point_b * (1 - norm_point_a) ) / (1 + 2 * scalar_product + norm_point_a * norm_point_b) def conformal_factor(self, point): """The conformal factor for a point. Args: point: The point for which to compute the conformal factor. Returns: The conformal factor. If ``point`` is a point on the product manifold of ``k`` Poincare balls, the return value will be an array of shape ``(k,1)``. The singleton dimension is explicitly kept to simplify multiplication of ``point`` by the conformal factor on product manifolds. """ return 2 / (1 - np.sum(point * point, axis=-1, keepdims=True))
0.949902
0.732089
import pytest from pandas import DataFrame from pandas.testing import assert_frame_equal from numpy.testing import assert_array_equal from tidybear import rename @pytest.fixture def df(): return DataFrame({"A": [1, 2],"B": [3, 4]}) def test_canary(): pass def test_rename_no_args(df): assert_frame_equal(df, rename(df)) def test_rename_with_list(df): renamed = rename(df, ["X", "Y"]) assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_with_args(df): renamed = rename(df, "X", "Y") assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_all_with_dict(df): renamed = rename(df, {"A": "X", "B": "Y"}) assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_some_with_dict(df): renamed = rename(df, {"B": "Y"}) assert renamed.columns.tolist() == ["A", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_no_cols_dict(df): renamed = rename(df, {"C": "Z"}) assert_frame_equal(df, renamed) def test_rename_all_with_kwargs(df): renamed = rename(df, A="X", B="Y") assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_some_with_kwargs(df): renamed = rename(df, B="Y") assert renamed.columns.tolist() == ["A", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_no_cols_kwargs(df): renamed = rename(df, C="Z") assert_frame_equal(df, renamed) def test_rename_fails_with_list(df): # too few with pytest.raises(AssertionError): rename(df, ["X"]) # too many with pytest.raises(AssertionError): rename(df, ["X", "Y", "Z"]) def test_rename_fails_with_args(df): # too few with pytest.raises(AssertionError): rename(df, "X") # too many with pytest.raises(AssertionError): rename(df, "X", "Y", "Z")
tests/test_df_functions/test_rename.py
import pytest from pandas import DataFrame from pandas.testing import assert_frame_equal from numpy.testing import assert_array_equal from tidybear import rename @pytest.fixture def df(): return DataFrame({"A": [1, 2],"B": [3, 4]}) def test_canary(): pass def test_rename_no_args(df): assert_frame_equal(df, rename(df)) def test_rename_with_list(df): renamed = rename(df, ["X", "Y"]) assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_with_args(df): renamed = rename(df, "X", "Y") assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_all_with_dict(df): renamed = rename(df, {"A": "X", "B": "Y"}) assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_some_with_dict(df): renamed = rename(df, {"B": "Y"}) assert renamed.columns.tolist() == ["A", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_no_cols_dict(df): renamed = rename(df, {"C": "Z"}) assert_frame_equal(df, renamed) def test_rename_all_with_kwargs(df): renamed = rename(df, A="X", B="Y") assert renamed.columns.tolist() == ["X", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_some_with_kwargs(df): renamed = rename(df, B="Y") assert renamed.columns.tolist() == ["A", "Y"] assert_array_equal(df.values, renamed.values) def test_rename_no_cols_kwargs(df): renamed = rename(df, C="Z") assert_frame_equal(df, renamed) def test_rename_fails_with_list(df): # too few with pytest.raises(AssertionError): rename(df, ["X"]) # too many with pytest.raises(AssertionError): rename(df, ["X", "Y", "Z"]) def test_rename_fails_with_args(df): # too few with pytest.raises(AssertionError): rename(df, "X") # too many with pytest.raises(AssertionError): rename(df, "X", "Y", "Z")
0.536313
0.784505
import requests import sys import logging logging.captureWarnings(True) import config """ Get all the users of the VOMS VO with their detailed information :return: user dictionary keyed by the user DN """ class vomsApi: def getUsers(self, hostname, port, vo_name): result = None url = "https://%s:%s/voms/%s/apiv2/users" % (hostname, port, vo_name) logging.debug("Processing '%s'" % (url)) rawUserList = [] startIndex = 0 result = None error = None urlDone = False while not urlDone: try: result = requests.get(url, headers={"X-VOMS-CSRF-GUARD": "y"}, cert=(config.voms['api']['cert_path'], config.voms['api']['key_path']), verify=config.voms['api']['ca_path'], params={"startIndex": str(startIndex), "pageSize": "100"}) except requests.ConnectionError as exc: error = "%s:%s" % (url, repr(exc)) urlDone = True continue if result.status_code != 200: error = "Failed to contact the VOMS server: %s" % result.text urlDone = True continue userList = result.json()['result'] rawUserList.extend(userList) if len(userList) < 100: urlDone = True startIndex += 100 if error: logging.debug("Failed to contact the VOMS server: %s" % error) return None # We have got the user info, reformat it resultDict = {} for user in rawUserList: if user.get('suspended'): logging.debug("Ignoring suspended user: %s" % user) continue for cert in user['certificates']: dn = cert['subjectString'] # resultDict[dn] = user resultDict[dn] = dict() resultDict[dn]['issuer'] = cert['issuerString'] resultDict[dn]['Roles'] = user['fqans'] attributes = user.get('attributes') if attributes: for attribute in user.get('attributes', []): if attribute.get('name') == 'nickname': resultDict[dn]['nickname'] = attribute.get('value') return resultDict
lib/vomsApi.py
import requests import sys import logging logging.captureWarnings(True) import config """ Get all the users of the VOMS VO with their detailed information :return: user dictionary keyed by the user DN """ class vomsApi: def getUsers(self, hostname, port, vo_name): result = None url = "https://%s:%s/voms/%s/apiv2/users" % (hostname, port, vo_name) logging.debug("Processing '%s'" % (url)) rawUserList = [] startIndex = 0 result = None error = None urlDone = False while not urlDone: try: result = requests.get(url, headers={"X-VOMS-CSRF-GUARD": "y"}, cert=(config.voms['api']['cert_path'], config.voms['api']['key_path']), verify=config.voms['api']['ca_path'], params={"startIndex": str(startIndex), "pageSize": "100"}) except requests.ConnectionError as exc: error = "%s:%s" % (url, repr(exc)) urlDone = True continue if result.status_code != 200: error = "Failed to contact the VOMS server: %s" % result.text urlDone = True continue userList = result.json()['result'] rawUserList.extend(userList) if len(userList) < 100: urlDone = True startIndex += 100 if error: logging.debug("Failed to contact the VOMS server: %s" % error) return None # We have got the user info, reformat it resultDict = {} for user in rawUserList: if user.get('suspended'): logging.debug("Ignoring suspended user: %s" % user) continue for cert in user['certificates']: dn = cert['subjectString'] # resultDict[dn] = user resultDict[dn] = dict() resultDict[dn]['issuer'] = cert['issuerString'] resultDict[dn]['Roles'] = user['fqans'] attributes = user.get('attributes') if attributes: for attribute in user.get('attributes', []): if attribute.get('name') == 'nickname': resultDict[dn]['nickname'] = attribute.get('value') return resultDict
0.16398
0.0771
import cv2 import numpy as np from common import HostNode, get_property_value from PyFlow.Core.Common import * from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper from config import DEBUG def hex_to_rgb(hex_string): r_hex = hex_string[1:3] g_hex = hex_string[3:5] b_hex = hex_string[5:7] return int(r_hex, 16), int(g_hex, 16), int(b_hex, 16) def frame_norm(frame, bbox): if len(bbox) != 4: raise ValueError("BBox is malformed, should have length of 4 - received {}".format(bbox)) return (np.array(bbox) * np.array([*frame.shape[:2], *frame.shape[:2]])[::-1]).astype(int) class BBoxOverlayNode(HostNode): def __init__(self, name): super(BBoxOverlayNode, self).__init__(name) self.data = self.createInputPin('frame', 'FramePin') self.width = self.createInputPin('bbox', 'BoundingBoxPin') self.height = self.createInputPin('color_hex', 'StringPin') self.frame = self.createOutputPin('result', 'FramePin') self.data.enableOptions(PinOptions.AllowMultipleConnections) self.frame.enableOptions(PinOptions.AllowMultipleConnections) @staticmethod def pinTypeHints(): helper = NodePinsSuggestionsHelper() helper.addInputDataType('FramePin') helper.addInputDataType('BoundingBoxPin') helper.addInputDataType('StringPin') helper.addInputStruct(StructureType.Multi) helper.addOutputDataType('FramePin') helper.addOutputStruct(StructureType.Multi) return helper @staticmethod def category(): return 'FrameOps' @staticmethod def keywords(): return [] @staticmethod def description(): return "Description in rst format." def run(self, device): color = hex_to_rgb(get_property_value(self, "color_hex")) bboxes = [] frame = None while self._running: in_data = self.queue.get() if in_data is None: continue if in_data['name'] == "frame": frame = in_data['data'] elif in_data['name'] == "bbox": bboxes = in_data['data'] if frame is None: continue frame = frame.copy() for raw_bbox in bboxes: bbox = frame_norm(frame, raw_bbox) cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2) self.send("result", frame)
PyFlow/Packages/DepthAI_Host/Nodes/FrameOps/BBoxOverlayNode.py
import cv2 import numpy as np from common import HostNode, get_property_value from PyFlow.Core.Common import * from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper from config import DEBUG def hex_to_rgb(hex_string): r_hex = hex_string[1:3] g_hex = hex_string[3:5] b_hex = hex_string[5:7] return int(r_hex, 16), int(g_hex, 16), int(b_hex, 16) def frame_norm(frame, bbox): if len(bbox) != 4: raise ValueError("BBox is malformed, should have length of 4 - received {}".format(bbox)) return (np.array(bbox) * np.array([*frame.shape[:2], *frame.shape[:2]])[::-1]).astype(int) class BBoxOverlayNode(HostNode): def __init__(self, name): super(BBoxOverlayNode, self).__init__(name) self.data = self.createInputPin('frame', 'FramePin') self.width = self.createInputPin('bbox', 'BoundingBoxPin') self.height = self.createInputPin('color_hex', 'StringPin') self.frame = self.createOutputPin('result', 'FramePin') self.data.enableOptions(PinOptions.AllowMultipleConnections) self.frame.enableOptions(PinOptions.AllowMultipleConnections) @staticmethod def pinTypeHints(): helper = NodePinsSuggestionsHelper() helper.addInputDataType('FramePin') helper.addInputDataType('BoundingBoxPin') helper.addInputDataType('StringPin') helper.addInputStruct(StructureType.Multi) helper.addOutputDataType('FramePin') helper.addOutputStruct(StructureType.Multi) return helper @staticmethod def category(): return 'FrameOps' @staticmethod def keywords(): return [] @staticmethod def description(): return "Description in rst format." def run(self, device): color = hex_to_rgb(get_property_value(self, "color_hex")) bboxes = [] frame = None while self._running: in_data = self.queue.get() if in_data is None: continue if in_data['name'] == "frame": frame = in_data['data'] elif in_data['name'] == "bbox": bboxes = in_data['data'] if frame is None: continue frame = frame.copy() for raw_bbox in bboxes: bbox = frame_norm(frame, raw_bbox) cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2) self.send("result", frame)
0.592313
0.287693
from .base import * # noqa from .base import env from datetime import timedelta # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = False # https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = env("DJANGO_SECRET_KEY", default="<KEY>") # https://docs.djangoproject.com/en/dev/ref/settings/#test-runner TEST_RUNNER = "django.test.runner.DiscoverRunner" # CACHES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#caches CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "" } } # PASSWORDS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"] # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405 TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405 ( "django.template.loaders.cached.Loader", [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ], ) ] # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend" # https://docs.djangoproject.com/en/dev/ref/settings/#email-host EMAIL_HOST = "localhost" # https://docs.djangoproject.com/en/dev/ref/settings/#email-port EMAIL_PORT = 1025 # http://getblimp.github.io/django-rest-framework-jwt/#additional-settings JWT_AUTH = { 'JWT_ENCODE_HANDLER': 'rest_framework_jwt.utils.jwt_encode_handler', 'JWT_DECODE_HANDLER': 'rest_framework_jwt.utils.jwt_decode_handler', 'JWT_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_payload_handler', 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler', 'JWT_RESPONSE_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_response_payload_handler', 'JWT_SECRET_KEY': SECRET_KEY, 'JWT_GET_USER_SECRET_KEY': None, 'JWT_PUBLIC_KEY': None, 'JWT_PRIVATE_KEY': None, 'JWT_ALGORITHM': 'HS256', 'JWT_VERIFY': True, 'JWT_VERIFY_EXPIRATION': True, 'JWT_LEEWAY': 0, 'JWT_EXPIRATION_DELTA': timedelta(seconds=300), 'JWT_AUDIENCE': None, 'JWT_ISSUER': None, 'JWT_ALLOW_REFRESH': False, 'JWT_REFRESH_EXPIRATION_DELTA': timedelta(days=7), 'JWT_AUTH_HEADER_PREFIX': 'JWT', 'JWT_AUTH_COOKIE': None, }
CritsAndCoffee.Instagram.API/config/settings/test.py
from .base import * # noqa from .base import env from datetime import timedelta # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = False # https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = env("DJANGO_SECRET_KEY", default="<KEY>") # https://docs.djangoproject.com/en/dev/ref/settings/#test-runner TEST_RUNNER = "django.test.runner.DiscoverRunner" # CACHES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#caches CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "" } } # PASSWORDS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"] # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405 TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405 ( "django.template.loaders.cached.Loader", [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ], ) ] # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend" # https://docs.djangoproject.com/en/dev/ref/settings/#email-host EMAIL_HOST = "localhost" # https://docs.djangoproject.com/en/dev/ref/settings/#email-port EMAIL_PORT = 1025 # http://getblimp.github.io/django-rest-framework-jwt/#additional-settings JWT_AUTH = { 'JWT_ENCODE_HANDLER': 'rest_framework_jwt.utils.jwt_encode_handler', 'JWT_DECODE_HANDLER': 'rest_framework_jwt.utils.jwt_decode_handler', 'JWT_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_payload_handler', 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler', 'JWT_RESPONSE_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_response_payload_handler', 'JWT_SECRET_KEY': SECRET_KEY, 'JWT_GET_USER_SECRET_KEY': None, 'JWT_PUBLIC_KEY': None, 'JWT_PRIVATE_KEY': None, 'JWT_ALGORITHM': 'HS256', 'JWT_VERIFY': True, 'JWT_VERIFY_EXPIRATION': True, 'JWT_LEEWAY': 0, 'JWT_EXPIRATION_DELTA': timedelta(seconds=300), 'JWT_AUDIENCE': None, 'JWT_ISSUER': None, 'JWT_ALLOW_REFRESH': False, 'JWT_REFRESH_EXPIRATION_DELTA': timedelta(days=7), 'JWT_AUTH_HEADER_PREFIX': 'JWT', 'JWT_AUTH_COOKIE': None, }
0.384912
0.071949
from mlmo.utils.constants.checkpoint import MODEL_PARAMS, OPTIMIZER_STATE from mlmo.interfaces import BaseIModel from torch.nn import Module from mlmo.utils.helpers.loading_and_saving import load_embeddings from torch.optim import Adam from logging import getLogger import torch as T from torch.nn.utils import clip_grad_norm_ from mlutils.tools.signature_scraper import repr_func from mlutils.helpers.general import select_matching_kwargs from mlmo.utils.helpers.pytorch.init import get_init_func import os logger = getLogger(os.path.basename(__file__)) class ITorchModel(BaseIModel): """This model interface is specific to models implemented in PyTorch.""" def __init__(self, model, learning_rate, device='cpu', optimizer=Adam, grads_clip=None, **kwargs): """ :param grads_clip: if float is passed will not allow gradients to exceed a certain threshold. Allows to prevent gradients explosion associated with RNNs. """ if not isinstance(model, Module): raise ValueError("Please provide a valid PyTorch model.") super(ITorchModel, self).__init__(model, **kwargs) self.optimizer = optimizer(self.model.parameters(), lr=learning_rate) self.device = device self.model.to(device) logger.info("Moved the model to: '%s'" % device) self.scraper.scrape_obj_vals = False self.grads_clip = grads_clip def train(self, batch, **kwrgs): """ Performs a training step on a single batch. Returns the internal metrics in a dict, such as negative log-likelihood or KL divergence. """ self.model.train() # setting the model to the train mode self.optimizer.zero_grad() # zero the gradients before backward pass kwargs = select_matching_kwargs(self.model.compute_loss, **batch.data) kwargs = _move_kwargs_to_device(device=self.device, **kwargs) _add_kwargs_to_dict(dct=kwargs, **kwrgs) loss, metrs = self.model.compute_loss(**kwargs) loss.backward() # clips the gradient if self.grads_clip is not None: clip_grad_norm_(self.model.parameters(), self.grads_clip) self.optimizer.step() return metrs def eval(self, batch, **kwrgs): """ Performs computation of loss and internal metrics on a single batch. Same as training, but the model is not updated. Returns the internal metrics in a dict. """ self.model.eval() # setting the model to the test mode kwargs = select_matching_kwargs(self.model.compute_loss, **batch.data) kwargs = _move_kwargs_to_device(device=self.device, **kwargs) _add_kwargs_to_dict(dct=kwargs, **kwrgs) with T.no_grad(): loss, metrs = self.model.compute_loss(**kwargs) return metrs def save_state(self, file_path, excl_model_params=None): model_params = self.model.state_dict() optimizer_params = self.optimizer.state_dict() if excl_model_params is not None: for p in excl_model_params: del model_params[p] T.save({MODEL_PARAMS: model_params, OPTIMIZER_STATE: optimizer_params}, file_path) logger.info("Saved the model's and optimizer's state to: '%s'." % file_path) def load_state(self, file_path, optimizer_state=False, strict=True): checkpoint = T.load(file_path, map_location=self.device) self.model.load_state_dict(checkpoint[MODEL_PARAMS], strict=strict) logger.info("Loaded the model's state from: '%s'." % file_path) if optimizer_state: self.optimizer.load_state_dict(checkpoint[OPTIMIZER_STATE]) logger.info("Loaded the optimizer's state from: '%s'." % file_path) def init_weights(self, multi_dim_init_func, single_dim_init_func): """Initializes weights using provided functions.""" logger.info("Initializing multi-dim weights with:" " %s." % repr_func(multi_dim_init_func)) logger.info("Initializing single-dim weights with:" " %s." % repr_func(single_dim_init_func)) init = get_init_func(multi_dim_init_func, single_dim_init_func) self.model.apply(init) def init_embeddings(self, file_path, embds_layer_name, vocab): """Sets input and output embedding tensors with pre-trained ones.""" embs = load_embeddings(file_path, vocab=vocab) embd_matr = T.tensor(embs).to(self.device) getattr(self.model, embds_layer_name).weight.data = embd_matr def __str__(self): return str(self.model) + "\n" + str(self.optimizer) def _move_kwargs_to_device(device, **kwargs): for k in kwargs: kwargs[k] = kwargs[k].to(device) return kwargs def _add_kwargs_to_dict(dct, **kwargs): """Adds new key-value pairs in-place to 'dct'.""" for k, v in kwargs.items(): if k in dct: raise ValueError("Key '%s' is already present in 'dct'.") dct[k] = v
mlmo/interfaces/i_torch_model.py
from mlmo.utils.constants.checkpoint import MODEL_PARAMS, OPTIMIZER_STATE from mlmo.interfaces import BaseIModel from torch.nn import Module from mlmo.utils.helpers.loading_and_saving import load_embeddings from torch.optim import Adam from logging import getLogger import torch as T from torch.nn.utils import clip_grad_norm_ from mlutils.tools.signature_scraper import repr_func from mlutils.helpers.general import select_matching_kwargs from mlmo.utils.helpers.pytorch.init import get_init_func import os logger = getLogger(os.path.basename(__file__)) class ITorchModel(BaseIModel): """This model interface is specific to models implemented in PyTorch.""" def __init__(self, model, learning_rate, device='cpu', optimizer=Adam, grads_clip=None, **kwargs): """ :param grads_clip: if float is passed will not allow gradients to exceed a certain threshold. Allows to prevent gradients explosion associated with RNNs. """ if not isinstance(model, Module): raise ValueError("Please provide a valid PyTorch model.") super(ITorchModel, self).__init__(model, **kwargs) self.optimizer = optimizer(self.model.parameters(), lr=learning_rate) self.device = device self.model.to(device) logger.info("Moved the model to: '%s'" % device) self.scraper.scrape_obj_vals = False self.grads_clip = grads_clip def train(self, batch, **kwrgs): """ Performs a training step on a single batch. Returns the internal metrics in a dict, such as negative log-likelihood or KL divergence. """ self.model.train() # setting the model to the train mode self.optimizer.zero_grad() # zero the gradients before backward pass kwargs = select_matching_kwargs(self.model.compute_loss, **batch.data) kwargs = _move_kwargs_to_device(device=self.device, **kwargs) _add_kwargs_to_dict(dct=kwargs, **kwrgs) loss, metrs = self.model.compute_loss(**kwargs) loss.backward() # clips the gradient if self.grads_clip is not None: clip_grad_norm_(self.model.parameters(), self.grads_clip) self.optimizer.step() return metrs def eval(self, batch, **kwrgs): """ Performs computation of loss and internal metrics on a single batch. Same as training, but the model is not updated. Returns the internal metrics in a dict. """ self.model.eval() # setting the model to the test mode kwargs = select_matching_kwargs(self.model.compute_loss, **batch.data) kwargs = _move_kwargs_to_device(device=self.device, **kwargs) _add_kwargs_to_dict(dct=kwargs, **kwrgs) with T.no_grad(): loss, metrs = self.model.compute_loss(**kwargs) return metrs def save_state(self, file_path, excl_model_params=None): model_params = self.model.state_dict() optimizer_params = self.optimizer.state_dict() if excl_model_params is not None: for p in excl_model_params: del model_params[p] T.save({MODEL_PARAMS: model_params, OPTIMIZER_STATE: optimizer_params}, file_path) logger.info("Saved the model's and optimizer's state to: '%s'." % file_path) def load_state(self, file_path, optimizer_state=False, strict=True): checkpoint = T.load(file_path, map_location=self.device) self.model.load_state_dict(checkpoint[MODEL_PARAMS], strict=strict) logger.info("Loaded the model's state from: '%s'." % file_path) if optimizer_state: self.optimizer.load_state_dict(checkpoint[OPTIMIZER_STATE]) logger.info("Loaded the optimizer's state from: '%s'." % file_path) def init_weights(self, multi_dim_init_func, single_dim_init_func): """Initializes weights using provided functions.""" logger.info("Initializing multi-dim weights with:" " %s." % repr_func(multi_dim_init_func)) logger.info("Initializing single-dim weights with:" " %s." % repr_func(single_dim_init_func)) init = get_init_func(multi_dim_init_func, single_dim_init_func) self.model.apply(init) def init_embeddings(self, file_path, embds_layer_name, vocab): """Sets input and output embedding tensors with pre-trained ones.""" embs = load_embeddings(file_path, vocab=vocab) embd_matr = T.tensor(embs).to(self.device) getattr(self.model, embds_layer_name).weight.data = embd_matr def __str__(self): return str(self.model) + "\n" + str(self.optimizer) def _move_kwargs_to_device(device, **kwargs): for k in kwargs: kwargs[k] = kwargs[k].to(device) return kwargs def _add_kwargs_to_dict(dct, **kwargs): """Adds new key-value pairs in-place to 'dct'.""" for k, v in kwargs.items(): if k in dct: raise ValueError("Key '%s' is already present in 'dct'.") dct[k] = v
0.894208
0.345547
from __future__ import division import datetime import logging import numpy as np from ...simple import load_result_file from utils.misc import display_progress from video.filters import FilterCrop, FilterDropFrames from video.io import VideoComposer from video.analysis.shapes import Rectangle def make_cropped_video(result_file, output_video=None, display='{time} [{frame}]', scale_bar=True, border_buffer_cm=0, frame_compression=1, time_duration=None, progress=True): """ function that crops a video to an antfarm. `result_file` is the file where the results from the video analysis are stored. This is usually a *.yaml file `output_video` denotes the filename where the result video should be written to. `display` determines what information is displayed. There are several variables that would be replaced by data: {time} the current time stamp {frame} the current frame number `scale_bar` determines whether a scale bar is shown `border_buffer_cm` sets the extra space (in units of cm) around the cropping rectangle that is included in the analysis `frame_compression` sets the compression factor that determines how many frames are dropped compared to the original video `time_duration` sets the maximal number of seconds the video is supposed to last. Additional frames will not be written. `progress` flag that determines whether the progress is displayed """ logging.info('Analyze video `%s`', result_file) # load the respective result file analyzer = load_result_file(result_file) # load the full original video video_info = analyzer.load_video(frames=(0, None)) # crop the video to the cage video_input = analyzer.video cropping_cage = analyzer.data['pass1/video/cropping_cage'] border_buffer_px = int(border_buffer_cm / analyzer.length_scale_mag) # change rectangle size if necessary if border_buffer_px != 0: cropping_rect = Rectangle.from_list(cropping_cage) video_rect = Rectangle(0, 0, video_input.width - 1, video_input.height - 1) cropping_rect.buffer(border_buffer_px) cropping_rect.intersect(video_rect) cropping_cage = cropping_rect.to_list() if cropping_cage: # size_alignment=2 makes sure that the width and height are even numbers video_input = FilterCrop(video_input, rect=cropping_cage, size_alignment=2) if frame_compression is not None and frame_compression != 1: video_input = FilterDropFrames(video_input, compression=frame_compression) if time_duration is not None: index_max = int(time_duration * video_input.fps) video_input = video_input[:index_max] # determine the filename of the output video if output_video is None: # determine the complete filename automatically movie_ext = analyzer.params['output/video/extension'] filename = 'cropped' output_video = analyzer.get_filename(filename + movie_ext, 'video') elif '.' not in output_video: # determine the extension automatically movie_ext = analyzer.params['output/video/extension'] output_video = output_video + movie_ext logging.info('Write output to `%s`', output_video) # create the video writer video_codec = analyzer.params['output/video/codec'] video_bitrate = analyzer.params['output/video/crop_bitrate'] if video_bitrate is None: video_bitrate = analyzer.params['output/video/bitrate'] fps = video_input.fps video_output = VideoComposer( output_video, size=video_input.size, fps=fps, is_color=video_input.is_color, codec=video_codec, bitrate=video_bitrate, ) # time label position label_pos = video_input.width // 2, 30 # calculate size of scale bar and the position of its label pixel_size_cm = analyzer.data['pass2/pixel_size_cm'] scale_bar_size_cm = 10 scale_bar_size_px = np.round(scale_bar_size_cm / pixel_size_cm) scale_bar_rect = Rectangle(30, 50, scale_bar_size_px, 5) scale_bar_pos = (30 + scale_bar_size_px//2, 30) if progress: video_input = display_progress(video_input) for frame_id, frame in enumerate(video_input): video_output.set_frame(frame, copy=True) if scale_bar: # show a scale bar video_output.add_rectangle(scale_bar_rect, width=-1) video_output.add_text(str('%g cm' % scale_bar_size_cm), scale_bar_pos, color='w', anchor='upper center') # gather data about this frame frame_data = {'frame': frame_id} # calculate time stamp time_secs, time_frac = divmod(frame_id, fps) time_msecs = int(1000 * time_frac / fps) dt = datetime.timedelta(seconds=time_secs, milliseconds=time_msecs) frame_data['time'] = str(dt) # output the display data if display: display_text = display.format(**frame_data) video_output.add_text(display_text, label_pos, color='w', anchor='upper center') # show summary frames_total = video_info['frames'][1] - video_info['frames'][0] frames_written = video_output.frames_written logging.info('%d (%d%%) of %d frames written', frames_written, 100 * frames_written // frames_total, frames_total) # close and finalize video try: video_output.close() except IOError: logging.exception('Error while writing out the debug video `%s`', video_output)
mouse_burrows/scripts/functions/cropped_movie.py
from __future__ import division import datetime import logging import numpy as np from ...simple import load_result_file from utils.misc import display_progress from video.filters import FilterCrop, FilterDropFrames from video.io import VideoComposer from video.analysis.shapes import Rectangle def make_cropped_video(result_file, output_video=None, display='{time} [{frame}]', scale_bar=True, border_buffer_cm=0, frame_compression=1, time_duration=None, progress=True): """ function that crops a video to an antfarm. `result_file` is the file where the results from the video analysis are stored. This is usually a *.yaml file `output_video` denotes the filename where the result video should be written to. `display` determines what information is displayed. There are several variables that would be replaced by data: {time} the current time stamp {frame} the current frame number `scale_bar` determines whether a scale bar is shown `border_buffer_cm` sets the extra space (in units of cm) around the cropping rectangle that is included in the analysis `frame_compression` sets the compression factor that determines how many frames are dropped compared to the original video `time_duration` sets the maximal number of seconds the video is supposed to last. Additional frames will not be written. `progress` flag that determines whether the progress is displayed """ logging.info('Analyze video `%s`', result_file) # load the respective result file analyzer = load_result_file(result_file) # load the full original video video_info = analyzer.load_video(frames=(0, None)) # crop the video to the cage video_input = analyzer.video cropping_cage = analyzer.data['pass1/video/cropping_cage'] border_buffer_px = int(border_buffer_cm / analyzer.length_scale_mag) # change rectangle size if necessary if border_buffer_px != 0: cropping_rect = Rectangle.from_list(cropping_cage) video_rect = Rectangle(0, 0, video_input.width - 1, video_input.height - 1) cropping_rect.buffer(border_buffer_px) cropping_rect.intersect(video_rect) cropping_cage = cropping_rect.to_list() if cropping_cage: # size_alignment=2 makes sure that the width and height are even numbers video_input = FilterCrop(video_input, rect=cropping_cage, size_alignment=2) if frame_compression is not None and frame_compression != 1: video_input = FilterDropFrames(video_input, compression=frame_compression) if time_duration is not None: index_max = int(time_duration * video_input.fps) video_input = video_input[:index_max] # determine the filename of the output video if output_video is None: # determine the complete filename automatically movie_ext = analyzer.params['output/video/extension'] filename = 'cropped' output_video = analyzer.get_filename(filename + movie_ext, 'video') elif '.' not in output_video: # determine the extension automatically movie_ext = analyzer.params['output/video/extension'] output_video = output_video + movie_ext logging.info('Write output to `%s`', output_video) # create the video writer video_codec = analyzer.params['output/video/codec'] video_bitrate = analyzer.params['output/video/crop_bitrate'] if video_bitrate is None: video_bitrate = analyzer.params['output/video/bitrate'] fps = video_input.fps video_output = VideoComposer( output_video, size=video_input.size, fps=fps, is_color=video_input.is_color, codec=video_codec, bitrate=video_bitrate, ) # time label position label_pos = video_input.width // 2, 30 # calculate size of scale bar and the position of its label pixel_size_cm = analyzer.data['pass2/pixel_size_cm'] scale_bar_size_cm = 10 scale_bar_size_px = np.round(scale_bar_size_cm / pixel_size_cm) scale_bar_rect = Rectangle(30, 50, scale_bar_size_px, 5) scale_bar_pos = (30 + scale_bar_size_px//2, 30) if progress: video_input = display_progress(video_input) for frame_id, frame in enumerate(video_input): video_output.set_frame(frame, copy=True) if scale_bar: # show a scale bar video_output.add_rectangle(scale_bar_rect, width=-1) video_output.add_text(str('%g cm' % scale_bar_size_cm), scale_bar_pos, color='w', anchor='upper center') # gather data about this frame frame_data = {'frame': frame_id} # calculate time stamp time_secs, time_frac = divmod(frame_id, fps) time_msecs = int(1000 * time_frac / fps) dt = datetime.timedelta(seconds=time_secs, milliseconds=time_msecs) frame_data['time'] = str(dt) # output the display data if display: display_text = display.format(**frame_data) video_output.add_text(display_text, label_pos, color='w', anchor='upper center') # show summary frames_total = video_info['frames'][1] - video_info['frames'][0] frames_written = video_output.frames_written logging.info('%d (%d%%) of %d frames written', frames_written, 100 * frames_written // frames_total, frames_total) # close and finalize video try: video_output.close() except IOError: logging.exception('Error while writing out the debug video `%s`', video_output)
0.627267
0.295471
import ddt from ggrc.models import all_models from integration.ggrc.access_control import rbac_factories from integration.ggrc.access_control.acl_propagation import base from integration.ggrc.utils import helpers @ddt.ddt class TestAuditorsPropagation(base.TestACLPropagation): """Test Audit Captains role permissions propagation.""" PERMISSIONS = { "Creator": { "Audit": { "read": True, "update": True, "delete": True, "clone": (True, "unimplemented"), "read_revisions": True, "map_control": True, "map_external_control": (False, "unimplemented"), "deprecate": True, "archive": False, "unarchive": (False, "unimplemented"), "summary": True, }, "Assessment": { "create": True, "generate": True, "read": True, "update": True, "delete": True, "read_revisions": True, "map_snapshot": True, "deprecate": True, "map_comment": True, "map_evidence": True, "related_assessments": True, "related_objects": True, "complete": True, "in_progress": True, "not_started": True, "decline": (False, "unimplemented"), "verify": (False, "unimplemented"), }, "AssessmentTemplate": { "create": True, "read": True, "update": True, "delete": True, "read_revisions": True, }, "Snapshot Assessment": { "read": True, "read_original": False, "update": True, "get_latest_version": (True, "unimplemented"), }, "Snapshot Audit": { "read": True, "read_original": False, "update": True, "get_latest_version": (True, "unimplemented"), }, "Issue Assessment": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "raise_issue": True, "unmap": True, }, "Issue Audit": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "unmap": True, }, "Evidence Audit": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True }, "Evidence Assessment": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True } }, "Reader": { "Audit": { "read": True, "update": True, "delete": True, "clone": (True, "unimplemented"), "read_revisions": True, "map_control": True, "map_external_control": (False, "unimplemented"), "deprecate": True, "archive": False, "unarchive": (False, "unimplemented"), "summary": True, }, "Assessment": { "create": True, "generate": True, "read": True, "update": True, "delete": True, "read_revisions": True, "map_snapshot": True, "deprecate": True, "map_comment": True, "map_evidence": True, "related_assessments": True, "related_objects": True, "complete": True, "in_progress": True, "not_started": True, "decline": (False, "unimplemented"), "verify": (False, "unimplemented"), }, "AssessmentTemplate": { "create": True, "read": True, "update": True, "delete": True, "read_revisions": True, }, "Snapshot Assessment": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Snapshot Audit": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Issue Assessment": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "raise_issue": True, "unmap": True, }, "Issue Audit": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "unmap": True, }, "Evidence Audit": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True }, "Evidence Assessment": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True } }, "Editor": { "Audit": { "read": True, "update": True, "delete": True, "clone": True, "read_revisions": True, "map_control": True, "map_external_control": True, "deprecate": True, "archive": False, "unarchive": (False, "unimplemented"), "summary": True, }, "Assessment": { "create": (False, "unimplemented"), "generate": (False, "unimplemented"), "read": True, "update": True, "delete": True, "read_revisions": True, "map_snapshot": True, "deprecate": True, "map_comment": True, "map_evidence": True, "related_assessments": True, "related_objects": True, "complete": True, "in_progress": True, "not_started": True, "decline": (False, "unimplemented"), "verify": (False, "unimplemented"), }, "AssessmentTemplate": { "create": True, "read": True, "update": True, "delete": True, "read_revisions": True, }, "Snapshot Assessment": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Snapshot Audit": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Issue Assessment": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": True, "create_and_map": True, "raise_issue": True, "unmap": True, }, "Issue Audit": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": True, "create_and_map": True, "unmap": True, }, "Evidence Audit": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True }, "Evidence Assessment": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True } }, } def init_factory(self, role, model, parent): """Initialize RBAC factory with propagated Audit Captains role. Args: role: Global Custom role that user have (Creator/Reader/Editor). model: Model name for which factory should be got. parent: Model name in scope of which objects should be installed. Returns: Initialized RBACFactory object. """ self.setup_people() captain_acr = all_models.AccessControlRole.query.filter_by( name="Audit Captains", object_type="Audit", ).first() rbac_factory = rbac_factories.TEST_FACTORIES_MAPPING[model] return rbac_factory(self.people[role].id, captain_acr, parent) @helpers.unwrap(PERMISSIONS) def test_access(self, role, model, action_name, expected_result): self.runtest(role, model, action_name, expected_result)
test/integration/ggrc/access_control/acl_propagation/test_audit_captains.py
import ddt from ggrc.models import all_models from integration.ggrc.access_control import rbac_factories from integration.ggrc.access_control.acl_propagation import base from integration.ggrc.utils import helpers @ddt.ddt class TestAuditorsPropagation(base.TestACLPropagation): """Test Audit Captains role permissions propagation.""" PERMISSIONS = { "Creator": { "Audit": { "read": True, "update": True, "delete": True, "clone": (True, "unimplemented"), "read_revisions": True, "map_control": True, "map_external_control": (False, "unimplemented"), "deprecate": True, "archive": False, "unarchive": (False, "unimplemented"), "summary": True, }, "Assessment": { "create": True, "generate": True, "read": True, "update": True, "delete": True, "read_revisions": True, "map_snapshot": True, "deprecate": True, "map_comment": True, "map_evidence": True, "related_assessments": True, "related_objects": True, "complete": True, "in_progress": True, "not_started": True, "decline": (False, "unimplemented"), "verify": (False, "unimplemented"), }, "AssessmentTemplate": { "create": True, "read": True, "update": True, "delete": True, "read_revisions": True, }, "Snapshot Assessment": { "read": True, "read_original": False, "update": True, "get_latest_version": (True, "unimplemented"), }, "Snapshot Audit": { "read": True, "read_original": False, "update": True, "get_latest_version": (True, "unimplemented"), }, "Issue Assessment": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "raise_issue": True, "unmap": True, }, "Issue Audit": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "unmap": True, }, "Evidence Audit": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True }, "Evidence Assessment": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True } }, "Reader": { "Audit": { "read": True, "update": True, "delete": True, "clone": (True, "unimplemented"), "read_revisions": True, "map_control": True, "map_external_control": (False, "unimplemented"), "deprecate": True, "archive": False, "unarchive": (False, "unimplemented"), "summary": True, }, "Assessment": { "create": True, "generate": True, "read": True, "update": True, "delete": True, "read_revisions": True, "map_snapshot": True, "deprecate": True, "map_comment": True, "map_evidence": True, "related_assessments": True, "related_objects": True, "complete": True, "in_progress": True, "not_started": True, "decline": (False, "unimplemented"), "verify": (False, "unimplemented"), }, "AssessmentTemplate": { "create": True, "read": True, "update": True, "delete": True, "read_revisions": True, }, "Snapshot Assessment": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Snapshot Audit": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Issue Assessment": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "raise_issue": True, "unmap": True, }, "Issue Audit": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": False, "create_and_map": True, "unmap": True, }, "Evidence Audit": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True }, "Evidence Assessment": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True } }, "Editor": { "Audit": { "read": True, "update": True, "delete": True, "clone": True, "read_revisions": True, "map_control": True, "map_external_control": True, "deprecate": True, "archive": False, "unarchive": (False, "unimplemented"), "summary": True, }, "Assessment": { "create": (False, "unimplemented"), "generate": (False, "unimplemented"), "read": True, "update": True, "delete": True, "read_revisions": True, "map_snapshot": True, "deprecate": True, "map_comment": True, "map_evidence": True, "related_assessments": True, "related_objects": True, "complete": True, "in_progress": True, "not_started": True, "decline": (False, "unimplemented"), "verify": (False, "unimplemented"), }, "AssessmentTemplate": { "create": True, "read": True, "update": True, "delete": True, "read_revisions": True, }, "Snapshot Assessment": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Snapshot Audit": { "read": True, "read_original": True, "update": True, "get_latest_version": True, }, "Issue Assessment": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": True, "create_and_map": True, "raise_issue": True, "unmap": True, }, "Issue Audit": { "read": True, "update": True, "delete": True, "read_revisions": True, "map": True, "create_and_map": True, "unmap": True, }, "Evidence Audit": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True }, "Evidence Assessment": { "create_and_map": True, "read": True, "update": True, "delete": False, "read_comments": True, "add_comment": True } }, } def init_factory(self, role, model, parent): """Initialize RBAC factory with propagated Audit Captains role. Args: role: Global Custom role that user have (Creator/Reader/Editor). model: Model name for which factory should be got. parent: Model name in scope of which objects should be installed. Returns: Initialized RBACFactory object. """ self.setup_people() captain_acr = all_models.AccessControlRole.query.filter_by( name="Audit Captains", object_type="Audit", ).first() rbac_factory = rbac_factories.TEST_FACTORIES_MAPPING[model] return rbac_factory(self.people[role].id, captain_acr, parent) @helpers.unwrap(PERMISSIONS) def test_access(self, role, model, action_name, expected_result): self.runtest(role, model, action_name, expected_result)
0.397354
0.278574
import os import torch import torchvision import cnn_models.conv_forward_model as convForwModel import cnn_models.help_fun as cnn_hf import datasets import model_manager from cnn_models.wide_resnet_imagenet import Wide_ResNet_imagenet import cnn_models.resnet_kfilters as resnet_kfilters import functools import quantization import helpers.functions as mhf cuda_devices = os.environ['CUDA_VISIBLE_DEVICES'].split(',') print('CUDA_VISIBLE_DEVICES: {} for a total of {} GPUs'.format(cuda_devices, len(cuda_devices))) print('Number of bits in training: {}'.format(4)) datasets.BASE_DATA_FOLDER = '...' SAVED_MODELS_FOLDER = '...' USE_CUDA = torch.cuda.is_available() NUM_GPUS = len(cuda_devices) try: os.mkdir(datasets.BASE_DATA_FOLDER) except:pass try: os.mkdir(SAVED_MODELS_FOLDER) except:pass epochsToTrainImageNet = 90 imageNet12modelsFolder = os.path.join(SAVED_MODELS_FOLDER, 'imagenet12_new') imagenet_manager = model_manager.ModelManager('model_manager_resnet34double.tst', 'model_manager', create_new_model_manager=False) for x in imagenet_manager.list_models(): if imagenet_manager.get_num_training_runs(x) >= 1: s = '{}; Last prediction acc: {}, Best prediction acc: {}'.format(x, imagenet_manager.load_metadata(x)[1]['predictionAccuracy'][-1], max(imagenet_manager.load_metadata(x)[1]['predictionAccuracy'])) print(s) try: os.mkdir(imageNet12modelsFolder) except:pass TRAIN_QUANTIZED_DISTILLED = True print('Batch size: {}'.format(batch_size)) if batch_size % NUM_GPUS != 0: raise ValueError('Batch size: {} must be a multiple of the number of gpus:{}'.format(batch_size, NUM_GPUS)) imageNet12 = datasets.ImageNet12('...', '...', type_of_data_augmentation='extended', already_scaled=False, pin_memory=True) train_loader = imageNet12.getTrainLoader(batch_size, shuffle=True) test_loader = imageNet12.getTestLoader(batch_size, shuffle=False) # # Teacher model resnet34 = torchvision.models.resnet34(True) #already trained if USE_CUDA: resnet34 = resnet34.cuda() if NUM_GPUS > 1: resnet34 = torch.nn.parallel.DataParallel(resnet34) #Train a wide-resNet with quantized distillation quant_distilled_model_name = 'resnet18_1.5xfilters_quant_distilled4bits' quantDistilledModelPath = os.path.join(imageNet12modelsFolder, quant_distilled_model_name) quantDistilledOptions = {} quant_distilled_model = resnet_kfilters.resnet18(k=1.5) if USE_CUDA: quant_distilled_model = quant_distilled_model.cuda() if NUM_GPUS > 1: quant_distilled_model = torch.nn.parallel.DataParallel(quant_distilled_model) if not quant_distilled_model_name in imagenet_manager.saved_models: imagenet_manager.add_new_model(quant_distilled_model_name, quantDistilledModelPath, arguments_creator_function=quantDistilledOptions) if TRAIN_QUANTIZED_DISTILLED: imagenet_manager.train_model(quant_distilled_model, model_name=quant_distilled_model_name, train_function=convForwModel.train_model, arguments_train_function={'epochs_to_train': epochsToTrainImageNet, 'learning_rate_style': 'imagenet', 'initial_learning_rate': 0.1, 'use_nesterov':True, 'initial_momentum':0.9, 'weight_decayL2':1e-4, 'start_epoch': 0, 'print_every':30, 'use_distillation_loss':True, 'teacher_model': resnet34, 'quantizeWeights':True, 'numBits':4, 'bucket_size':256, 'quantize_first_and_last_layer': False}, train_loader=train_loader, test_loader=test_loader) quant_distilled_model.load_state_dict(imagenet_manager.load_model_state_dict(quant_distilled_model_name)) # print(cnn_hf.evaluateModel(quant_distilled_model, test_loader, fastEvaluation=False)) # print(cnn_hf.evaluateModel(quant_distilled_model, test_loader, fastEvaluation=False, k=5)) # print(cnn_hf.evaluateModel(resnet34, test_loader, fastEvaluation=False)) # print(cnn_hf.evaluateModel(resnet34, test_loader, fastEvaluation=False, k=5)) # quant_fun = functools.partial(quantization.uniformQuantization, s=2**4, bucket_size=256) # size_mb = mhf.get_size_quantized_model(quant_distilled_model, 4, quant_fun, 256, # quantizeFirstLastLayer=False) # print(size_mb) # print(mhf.getNumberOfParameters(quant_distilled_model)/1000000) # print(mhf.getNumberOfParameters(resnet34) / 1000000)
resnet34_doublefilters.py
import os import torch import torchvision import cnn_models.conv_forward_model as convForwModel import cnn_models.help_fun as cnn_hf import datasets import model_manager from cnn_models.wide_resnet_imagenet import Wide_ResNet_imagenet import cnn_models.resnet_kfilters as resnet_kfilters import functools import quantization import helpers.functions as mhf cuda_devices = os.environ['CUDA_VISIBLE_DEVICES'].split(',') print('CUDA_VISIBLE_DEVICES: {} for a total of {} GPUs'.format(cuda_devices, len(cuda_devices))) print('Number of bits in training: {}'.format(4)) datasets.BASE_DATA_FOLDER = '...' SAVED_MODELS_FOLDER = '...' USE_CUDA = torch.cuda.is_available() NUM_GPUS = len(cuda_devices) try: os.mkdir(datasets.BASE_DATA_FOLDER) except:pass try: os.mkdir(SAVED_MODELS_FOLDER) except:pass epochsToTrainImageNet = 90 imageNet12modelsFolder = os.path.join(SAVED_MODELS_FOLDER, 'imagenet12_new') imagenet_manager = model_manager.ModelManager('model_manager_resnet34double.tst', 'model_manager', create_new_model_manager=False) for x in imagenet_manager.list_models(): if imagenet_manager.get_num_training_runs(x) >= 1: s = '{}; Last prediction acc: {}, Best prediction acc: {}'.format(x, imagenet_manager.load_metadata(x)[1]['predictionAccuracy'][-1], max(imagenet_manager.load_metadata(x)[1]['predictionAccuracy'])) print(s) try: os.mkdir(imageNet12modelsFolder) except:pass TRAIN_QUANTIZED_DISTILLED = True print('Batch size: {}'.format(batch_size)) if batch_size % NUM_GPUS != 0: raise ValueError('Batch size: {} must be a multiple of the number of gpus:{}'.format(batch_size, NUM_GPUS)) imageNet12 = datasets.ImageNet12('...', '...', type_of_data_augmentation='extended', already_scaled=False, pin_memory=True) train_loader = imageNet12.getTrainLoader(batch_size, shuffle=True) test_loader = imageNet12.getTestLoader(batch_size, shuffle=False) # # Teacher model resnet34 = torchvision.models.resnet34(True) #already trained if USE_CUDA: resnet34 = resnet34.cuda() if NUM_GPUS > 1: resnet34 = torch.nn.parallel.DataParallel(resnet34) #Train a wide-resNet with quantized distillation quant_distilled_model_name = 'resnet18_1.5xfilters_quant_distilled4bits' quantDistilledModelPath = os.path.join(imageNet12modelsFolder, quant_distilled_model_name) quantDistilledOptions = {} quant_distilled_model = resnet_kfilters.resnet18(k=1.5) if USE_CUDA: quant_distilled_model = quant_distilled_model.cuda() if NUM_GPUS > 1: quant_distilled_model = torch.nn.parallel.DataParallel(quant_distilled_model) if not quant_distilled_model_name in imagenet_manager.saved_models: imagenet_manager.add_new_model(quant_distilled_model_name, quantDistilledModelPath, arguments_creator_function=quantDistilledOptions) if TRAIN_QUANTIZED_DISTILLED: imagenet_manager.train_model(quant_distilled_model, model_name=quant_distilled_model_name, train_function=convForwModel.train_model, arguments_train_function={'epochs_to_train': epochsToTrainImageNet, 'learning_rate_style': 'imagenet', 'initial_learning_rate': 0.1, 'use_nesterov':True, 'initial_momentum':0.9, 'weight_decayL2':1e-4, 'start_epoch': 0, 'print_every':30, 'use_distillation_loss':True, 'teacher_model': resnet34, 'quantizeWeights':True, 'numBits':4, 'bucket_size':256, 'quantize_first_and_last_layer': False}, train_loader=train_loader, test_loader=test_loader) quant_distilled_model.load_state_dict(imagenet_manager.load_model_state_dict(quant_distilled_model_name)) # print(cnn_hf.evaluateModel(quant_distilled_model, test_loader, fastEvaluation=False)) # print(cnn_hf.evaluateModel(quant_distilled_model, test_loader, fastEvaluation=False, k=5)) # print(cnn_hf.evaluateModel(resnet34, test_loader, fastEvaluation=False)) # print(cnn_hf.evaluateModel(resnet34, test_loader, fastEvaluation=False, k=5)) # quant_fun = functools.partial(quantization.uniformQuantization, s=2**4, bucket_size=256) # size_mb = mhf.get_size_quantized_model(quant_distilled_model, 4, quant_fun, 256, # quantizeFirstLastLayer=False) # print(size_mb) # print(mhf.getNumberOfParameters(quant_distilled_model)/1000000) # print(mhf.getNumberOfParameters(resnet34) / 1000000)
0.346099
0.152253
import time from gaiatest import GaiaTestCase from gaiatest.apps.messages.app import Messages from gaiatest.mocks.mock_contact import MockContact from gaiatest.apps.contacts.regions.contact_form import NewContact from gaiatest.apps.contacts.app import Contacts class TestSmsCreateContact(GaiaTestCase): def setUp(self): GaiaTestCase.setUp(self) _text_message_content = "Automated Test %s" % str(time.time()) self.data_layer.send_sms(self.testvars['carrier']['phone_number'], _text_message_content) self.messages = Messages(self.marionette) self.messages.launch() def test_sms_create_new_contact(self): self.contact = MockContact() self.message_thread = self.messages.tap_first_received_message() self.message_thread.wait_for_received_messages() # Check that we received the correct message self.assertEqual(self.message_thread.header_text, self.testvars['carrier']['phone_number']) activities = self.message_thread.tap_header() # Create a new contact activities.tap_create_new_contact() # Populate new contact fields new_contact = NewContact(self.marionette) new_contact.type_given_name(self.contact['givenName']) new_contact.type_family_name(self.contact['familyName']) new_contact.type_email(self.contact['email']['value']) new_contact.type_street(self.contact['adr']['streetAddress']) new_contact.type_zip_code(self.contact['adr']['postalCode']) new_contact.type_city(self.contact['adr']['locality']) new_contact.type_country(self.contact['adr']['countryName']) new_contact.type_comment(self.contact['note']) new_contact.tap_done(return_contacts=False) self.messages.switch_to_messages_frame() self.wait_for_condition(lambda m: self.message_thread.header_text == self.contact['name']) contacts = Contacts(self.marionette) contacts.launch() contact_details = contacts.contacts[0].tap() self.assertEqual(contact_details.phone_numbers[0], self.testvars['carrier']['phone_number'])
tests/python/gaia-ui-tests/gaiatest/tests/functional/messages/test_add_to_new_contact_from_messages.py
import time from gaiatest import GaiaTestCase from gaiatest.apps.messages.app import Messages from gaiatest.mocks.mock_contact import MockContact from gaiatest.apps.contacts.regions.contact_form import NewContact from gaiatest.apps.contacts.app import Contacts class TestSmsCreateContact(GaiaTestCase): def setUp(self): GaiaTestCase.setUp(self) _text_message_content = "Automated Test %s" % str(time.time()) self.data_layer.send_sms(self.testvars['carrier']['phone_number'], _text_message_content) self.messages = Messages(self.marionette) self.messages.launch() def test_sms_create_new_contact(self): self.contact = MockContact() self.message_thread = self.messages.tap_first_received_message() self.message_thread.wait_for_received_messages() # Check that we received the correct message self.assertEqual(self.message_thread.header_text, self.testvars['carrier']['phone_number']) activities = self.message_thread.tap_header() # Create a new contact activities.tap_create_new_contact() # Populate new contact fields new_contact = NewContact(self.marionette) new_contact.type_given_name(self.contact['givenName']) new_contact.type_family_name(self.contact['familyName']) new_contact.type_email(self.contact['email']['value']) new_contact.type_street(self.contact['adr']['streetAddress']) new_contact.type_zip_code(self.contact['adr']['postalCode']) new_contact.type_city(self.contact['adr']['locality']) new_contact.type_country(self.contact['adr']['countryName']) new_contact.type_comment(self.contact['note']) new_contact.tap_done(return_contacts=False) self.messages.switch_to_messages_frame() self.wait_for_condition(lambda m: self.message_thread.header_text == self.contact['name']) contacts = Contacts(self.marionette) contacts.launch() contact_details = contacts.contacts[0].tap() self.assertEqual(contact_details.phone_numbers[0], self.testvars['carrier']['phone_number'])
0.558688
0.117016
# # s_shrink_cov_glasso [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_shrink_cov_glasso&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=Glasso_estimate). # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import networkx as nx import seaborn as sns from arpym.estimation import cov_2_corr, markov_network from arpym.tools import add_logo # - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_cov_glasso-parameters) i_ = 60 # number of invariants lambda_vec = np.arange(0, 0.6, 10**-2) # glasso penalty n_plot = 40 # number of stocks for plotting # ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_cov_glasso-implementation-step00): Load data # + path_temp = '../../../databases/temporary-databases/' # Invariants db_epsi = pd.read_csv(path_temp + 'db_fit_garch_stocks_epsi.csv', index_col=0, parse_dates=True) db_epsi = db_epsi.iloc[:, :i_] dates = db_epsi.index t_ = len(dates) stocks_names = db_epsi.columns epsi = db_epsi.values # Location-dispersion db_locdisp = pd.read_csv(path_temp + 'db_fit_garch_stocks_locdisp.csv') mu_hat = db_locdisp.loc[:, 'mu_hat'].values[:i_] sig2_hat = db_locdisp.loc[:, 'sig2_hat'].values i_tot = int(np.sqrt(len(sig2_hat))) sig2_hat = sig2_hat.reshape(i_tot, i_tot)[:i_, :i_] sig2_hat = cov_2_corr(sig2_hat)[0] phi2_hat = np.linalg.solve(sig2_hat, np.eye(i_)) # - # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_cov_glasso-implementation-step01): Glasso shrinkage k = int(i_*(i_-1)) # shrink all covariances to 0 sig2_glasso, _, phi2_glasso, lam, conv, _ =\ markov_network(sig2_hat, k, lambda_vec) # ## Plots # + plt.style.use('arpm') # Graph nonzero = np.count_nonzero(phi2_glasso[:n_plot, :n_plot]) num_edge = (nonzero - i_) / 2 fig = plt.figure(figsize=(1280.0/72, 720.0/72), dpi=72) ax = plt.subplot2grid((2, 4), (0, 1), colspan=2) bb = np.where(phi2_glasso[:n_plot, :n_plot] != 0, 1, 0) rows, cols = np.where(bb != 0) edges = list(zip(rows.tolist(), cols.tolist())) gr = nx.Graph() gr.add_edges_from(edges) nx.draw_circular(gr, node_shape='o', node_color='b', ax=ax) plt.axis([-1.05, 1.05, -1.05, 1.5]) text1 = 'Optimal penalty = %1.2e' % lam plt.text(-1, 1.25, text1, verticalalignment='bottom', horizontalalignment='left', fontsize=20) text2 = 'Num. edges = %3.0f' % num_edge plt.text(-1, 1.1, text2, verticalalignment='bottom', horizontalalignment='left', fontsize=20) plt.title('Markov network structure', fontweight='bold', fontsize=20) # Covariances minncov = np.min(np.c_[sig2_hat[:n_plot, :n_plot], sig2_glasso[:n_plot, :n_plot]]) maxxcov = np.max(np.c_[sig2_hat[:n_plot, :n_plot], sig2_glasso[:n_plot, :n_plot]]) minncorr = np.min(np.c_[phi2_hat[:n_plot, :n_plot], phi2_glasso[:n_plot, :n_plot]]) maxxcorr = np.max(np.c_[phi2_hat[:n_plot, :n_plot], phi2_glasso[:n_plot, :n_plot]]) ax1 = plt.subplot2grid((2, 4), (1, 0), colspan=1) ax1 = sns.heatmap(sig2_hat[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncov, vmax=maxxcov, square=True) plt.title('HFP corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) ax12 = plt.subplot2grid((2, 4), (1, 1), colspan=1) ax12 = sns.heatmap(phi2_hat[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncorr, vmax=maxxcorr, square=True) plt.title('HFP inv. corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) ax2 = plt.subplot2grid((2, 4), (1, 2), colspan=1) ax2 = sns.heatmap(sig2_glasso[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncov, vmax=maxxcov, square=True) plt.title('Glasso corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) ax22 = plt.subplot2grid((2, 4), (1, 3), colspan=1) ax22 = sns.heatmap(phi2_glasso[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncorr, vmax=maxxcorr, square=True) plt.title('Glasso inv. corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) add_logo(fig, axis=ax, set_fig_size=False) plt.tight_layout()
scripts/sources/s_shrink_cov_glasso.py
# # s_shrink_cov_glasso [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_shrink_cov_glasso&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=Glasso_estimate). # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import networkx as nx import seaborn as sns from arpym.estimation import cov_2_corr, markov_network from arpym.tools import add_logo # - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_cov_glasso-parameters) i_ = 60 # number of invariants lambda_vec = np.arange(0, 0.6, 10**-2) # glasso penalty n_plot = 40 # number of stocks for plotting # ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_cov_glasso-implementation-step00): Load data # + path_temp = '../../../databases/temporary-databases/' # Invariants db_epsi = pd.read_csv(path_temp + 'db_fit_garch_stocks_epsi.csv', index_col=0, parse_dates=True) db_epsi = db_epsi.iloc[:, :i_] dates = db_epsi.index t_ = len(dates) stocks_names = db_epsi.columns epsi = db_epsi.values # Location-dispersion db_locdisp = pd.read_csv(path_temp + 'db_fit_garch_stocks_locdisp.csv') mu_hat = db_locdisp.loc[:, 'mu_hat'].values[:i_] sig2_hat = db_locdisp.loc[:, 'sig2_hat'].values i_tot = int(np.sqrt(len(sig2_hat))) sig2_hat = sig2_hat.reshape(i_tot, i_tot)[:i_, :i_] sig2_hat = cov_2_corr(sig2_hat)[0] phi2_hat = np.linalg.solve(sig2_hat, np.eye(i_)) # - # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_shrink_cov_glasso-implementation-step01): Glasso shrinkage k = int(i_*(i_-1)) # shrink all covariances to 0 sig2_glasso, _, phi2_glasso, lam, conv, _ =\ markov_network(sig2_hat, k, lambda_vec) # ## Plots # + plt.style.use('arpm') # Graph nonzero = np.count_nonzero(phi2_glasso[:n_plot, :n_plot]) num_edge = (nonzero - i_) / 2 fig = plt.figure(figsize=(1280.0/72, 720.0/72), dpi=72) ax = plt.subplot2grid((2, 4), (0, 1), colspan=2) bb = np.where(phi2_glasso[:n_plot, :n_plot] != 0, 1, 0) rows, cols = np.where(bb != 0) edges = list(zip(rows.tolist(), cols.tolist())) gr = nx.Graph() gr.add_edges_from(edges) nx.draw_circular(gr, node_shape='o', node_color='b', ax=ax) plt.axis([-1.05, 1.05, -1.05, 1.5]) text1 = 'Optimal penalty = %1.2e' % lam plt.text(-1, 1.25, text1, verticalalignment='bottom', horizontalalignment='left', fontsize=20) text2 = 'Num. edges = %3.0f' % num_edge plt.text(-1, 1.1, text2, verticalalignment='bottom', horizontalalignment='left', fontsize=20) plt.title('Markov network structure', fontweight='bold', fontsize=20) # Covariances minncov = np.min(np.c_[sig2_hat[:n_plot, :n_plot], sig2_glasso[:n_plot, :n_plot]]) maxxcov = np.max(np.c_[sig2_hat[:n_plot, :n_plot], sig2_glasso[:n_plot, :n_plot]]) minncorr = np.min(np.c_[phi2_hat[:n_plot, :n_plot], phi2_glasso[:n_plot, :n_plot]]) maxxcorr = np.max(np.c_[phi2_hat[:n_plot, :n_plot], phi2_glasso[:n_plot, :n_plot]]) ax1 = plt.subplot2grid((2, 4), (1, 0), colspan=1) ax1 = sns.heatmap(sig2_hat[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncov, vmax=maxxcov, square=True) plt.title('HFP corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) ax12 = plt.subplot2grid((2, 4), (1, 1), colspan=1) ax12 = sns.heatmap(phi2_hat[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncorr, vmax=maxxcorr, square=True) plt.title('HFP inv. corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) ax2 = plt.subplot2grid((2, 4), (1, 2), colspan=1) ax2 = sns.heatmap(sig2_glasso[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncov, vmax=maxxcov, square=True) plt.title('Glasso corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) ax22 = plt.subplot2grid((2, 4), (1, 3), colspan=1) ax22 = sns.heatmap(phi2_glasso[:n_plot, :n_plot], cmap='BrBG', center=0, xticklabels=stocks_names[:n_plot], yticklabels=stocks_names[:n_plot], vmin=minncorr, vmax=maxxcorr, square=True) plt.title('Glasso inv. corr.', fontweight='bold', fontsize=20) plt.xticks(fontsize=9) plt.yticks(fontsize=9) add_logo(fig, axis=ax, set_fig_size=False) plt.tight_layout()
0.500244
0.567577
from numpy.linalg import svd from numpy import array, sqrt, sum, zeros, trace, dot, transpose,\ divide, square, subtract, shape, any, abs, mean from numpy import append as numpy_append __author__ = "<NAME>" __copyright__ = "Copyright 2007-2012, The Cogent Project" __credits__ = ["<NAME>"] __license__ = "GPL" __version__ = "1.5.3" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Production" def procrustes(data1, data2): """Procrustes analysis, a similarity test for two data sets. Each input matrix is a set of points or vectors (the rows of the matrix) The dimension of the space is the number of columns of each matrix. Given two identially sized matrices, procrustes standardizes both such that: - trace(AA') = 1 (A' is the transpose, and the product is a standard matrix product). - Both sets of points are centered around the origin Procrustes then applies the optimal transform to the second matrix (including scaling/dilation, rotations, and reflections) to minimize M^2 = sum(square(mtx1 - mtx2)), or the sum of the squares of the pointwise differences between the two input datasets If two data sets have different dimensionality (different number of columns), simply add columns of zeros the the smaller of the two. This function was not designed to handle datasets with different numbers of datapoints (rows) Arguments: - data1: matrix, n rows represent points in k (columns) space data1 is the reference data, after it is standardised, the data from data2 will be transformed to fit the pattern in data1 - data2: n rows of data in k space to be fit to data1. Must be the same shape (numrows, numcols) as data1 - both must have >1 unique points Returns: - mtx1: a standardized version of data1 - mtx2: the orientation of data2 that best fits data1. centered, but not necessarily trace(mtx2*mtx2') = 1 - disparity: a metric for the dissimilarity of the two datasets, disparity = M^2 defined above Notes: - The disparity should not depend on the order of the input matrices, but the output matrices will, as only the first output matrix is guaranteed to be scaled such that trace(AA') = 1. - duplicate datapoints are generally ok, duplicating a data point will increase it's effect on the procrustes fit. - the disparity scales as the number of points per input matrix """ SMALL_NUM = 1e-6 # used to check for zero values in added dimension # make local copies # mtx1 = array(data1.copy(),'d') # mtx2 = array(data2.copy(),'d') num_rows, num_cols = shape(data1) if (num_rows, num_cols) != shape(data2): raise ValueError("input matrices must be of same shape") if (num_rows == 0 or num_cols == 0): raise ValueError("input matrices must be >0 rows, >0 cols") # add a dimension to allow reflections (rotations in n + 1 dimensions) mtx1 = numpy_append(data1, zeros((num_rows, 1)), 1) mtx2 = numpy_append(data2, zeros((num_rows, 1)), 1) # standardize each matrix mtx1 = center(mtx1) mtx2 = center(mtx2) if ((not any(mtx1)) or (not any(mtx2))): raise ValueError("input matrices must contain >1 unique points") mtx1 = normalize(mtx1) mtx2 = normalize(mtx2) # transform mtx2 to minimize disparity (sum( (mtx1[i,j] - mtx2[i,j])^2) ) mtx2 = match_points(mtx1, mtx2) # WARNING: I haven't proven that after matching the matrices, no point has # a nonzero component in the added dimension. I believe it is true, # though, since the unchanged matrix has no points extending into # that dimension if any(abs(mtx2[:,-1]) > SMALL_NUM): raise StandardError("we have accidentially added a dimension to \ the matrix, and the vectors have nonzero components in that dimension") # strip extra dimension which was added to allow reflections mtx1 = mtx1[:,:-1] mtx2 = mtx2[:,:-1] disparity = get_disparity(mtx1, mtx2) return mtx1, mtx2, disparity def center(mtx): """translate all data (rows of the matrix) to center on the origin returns a shifted version of the input data. The new matrix is such that the center of mass of the row vectors is centered at the origin. Returns a numpy float ('d') array """ result = array(mtx, 'd') result -= mean(result, 0) # subtract each column's mean from each element in that column return result def normalize(mtx): """change scaling of data (in rows) such that trace(mtx*mtx') = 1 mtx' denotes the transpose of mtx """ result = array(mtx, 'd') num_pts, num_dims = shape(result) mag = trace(dot(result, transpose(result))) norm = sqrt(mag) result /= norm return result def match_points(mtx1, mtx2): """returns a transformed mtx2 that matches mtx1. returns a new matrix which is a transform of mtx2. Scales and rotates a copy of mtx 2. See procrustes docs for details. """ u,s,vh = svd(dot(transpose(mtx1), mtx2)) q = dot(transpose(vh), transpose(u)) new_mtx2 = dot(mtx2, q) new_mtx2 *= sum(s) return new_mtx2 def get_disparity(mtx1, mtx2): """ returns a measure of the dissimilarity between two data sets returns M^2 = sum(square(mtx1 - mtx2)), the pointwise sum of squared differences""" return(sum(square(mtx1 - mtx2)))
scripts/venv/lib/python2.7/site-packages/cogent/cluster/procrustes.py
from numpy.linalg import svd from numpy import array, sqrt, sum, zeros, trace, dot, transpose,\ divide, square, subtract, shape, any, abs, mean from numpy import append as numpy_append __author__ = "<NAME>" __copyright__ = "Copyright 2007-2012, The Cogent Project" __credits__ = ["<NAME>"] __license__ = "GPL" __version__ = "1.5.3" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Production" def procrustes(data1, data2): """Procrustes analysis, a similarity test for two data sets. Each input matrix is a set of points or vectors (the rows of the matrix) The dimension of the space is the number of columns of each matrix. Given two identially sized matrices, procrustes standardizes both such that: - trace(AA') = 1 (A' is the transpose, and the product is a standard matrix product). - Both sets of points are centered around the origin Procrustes then applies the optimal transform to the second matrix (including scaling/dilation, rotations, and reflections) to minimize M^2 = sum(square(mtx1 - mtx2)), or the sum of the squares of the pointwise differences between the two input datasets If two data sets have different dimensionality (different number of columns), simply add columns of zeros the the smaller of the two. This function was not designed to handle datasets with different numbers of datapoints (rows) Arguments: - data1: matrix, n rows represent points in k (columns) space data1 is the reference data, after it is standardised, the data from data2 will be transformed to fit the pattern in data1 - data2: n rows of data in k space to be fit to data1. Must be the same shape (numrows, numcols) as data1 - both must have >1 unique points Returns: - mtx1: a standardized version of data1 - mtx2: the orientation of data2 that best fits data1. centered, but not necessarily trace(mtx2*mtx2') = 1 - disparity: a metric for the dissimilarity of the two datasets, disparity = M^2 defined above Notes: - The disparity should not depend on the order of the input matrices, but the output matrices will, as only the first output matrix is guaranteed to be scaled such that trace(AA') = 1. - duplicate datapoints are generally ok, duplicating a data point will increase it's effect on the procrustes fit. - the disparity scales as the number of points per input matrix """ SMALL_NUM = 1e-6 # used to check for zero values in added dimension # make local copies # mtx1 = array(data1.copy(),'d') # mtx2 = array(data2.copy(),'d') num_rows, num_cols = shape(data1) if (num_rows, num_cols) != shape(data2): raise ValueError("input matrices must be of same shape") if (num_rows == 0 or num_cols == 0): raise ValueError("input matrices must be >0 rows, >0 cols") # add a dimension to allow reflections (rotations in n + 1 dimensions) mtx1 = numpy_append(data1, zeros((num_rows, 1)), 1) mtx2 = numpy_append(data2, zeros((num_rows, 1)), 1) # standardize each matrix mtx1 = center(mtx1) mtx2 = center(mtx2) if ((not any(mtx1)) or (not any(mtx2))): raise ValueError("input matrices must contain >1 unique points") mtx1 = normalize(mtx1) mtx2 = normalize(mtx2) # transform mtx2 to minimize disparity (sum( (mtx1[i,j] - mtx2[i,j])^2) ) mtx2 = match_points(mtx1, mtx2) # WARNING: I haven't proven that after matching the matrices, no point has # a nonzero component in the added dimension. I believe it is true, # though, since the unchanged matrix has no points extending into # that dimension if any(abs(mtx2[:,-1]) > SMALL_NUM): raise StandardError("we have accidentially added a dimension to \ the matrix, and the vectors have nonzero components in that dimension") # strip extra dimension which was added to allow reflections mtx1 = mtx1[:,:-1] mtx2 = mtx2[:,:-1] disparity = get_disparity(mtx1, mtx2) return mtx1, mtx2, disparity def center(mtx): """translate all data (rows of the matrix) to center on the origin returns a shifted version of the input data. The new matrix is such that the center of mass of the row vectors is centered at the origin. Returns a numpy float ('d') array """ result = array(mtx, 'd') result -= mean(result, 0) # subtract each column's mean from each element in that column return result def normalize(mtx): """change scaling of data (in rows) such that trace(mtx*mtx') = 1 mtx' denotes the transpose of mtx """ result = array(mtx, 'd') num_pts, num_dims = shape(result) mag = trace(dot(result, transpose(result))) norm = sqrt(mag) result /= norm return result def match_points(mtx1, mtx2): """returns a transformed mtx2 that matches mtx1. returns a new matrix which is a transform of mtx2. Scales and rotates a copy of mtx 2. See procrustes docs for details. """ u,s,vh = svd(dot(transpose(mtx1), mtx2)) q = dot(transpose(vh), transpose(u)) new_mtx2 = dot(mtx2, q) new_mtx2 *= sum(s) return new_mtx2 def get_disparity(mtx1, mtx2): """ returns a measure of the dissimilarity between two data sets returns M^2 = sum(square(mtx1 - mtx2)), the pointwise sum of squared differences""" return(sum(square(mtx1 - mtx2)))
0.871338
0.76238
# Lint as: python3 """TODO(tsitsulin): add headers, tests, and improve style.""" from absl import app from absl import flags import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import normalized_mutual_info_score import tensorflow.compat.v2 as tf from graph_embedding.dmon.models.multilayer_gcn import multilayer_gcn from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph from graph_embedding.dmon.synthetic_data.overlapping_gaussians import line_gaussians tf.compat.v1.enable_v2_behavior() FLAGS = flags.FLAGS flags.DEFINE_integer( 'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0) flags.DEFINE_integer( 'n_clusters', 2, 'Number of clusters for the synthetic graph.', lower_bound=0) flags.DEFINE_float( 'train_size', 0.2, 'Training data proportion.', lower_bound=0) flags.DEFINE_integer( 'n_epochs', 200, 'Number of epochs to train.', lower_bound=0) flags.DEFINE_float( 'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') print('<NAME> i have some self-lööps') n_nodes = FLAGS.n_nodes n_clusters = FLAGS.n_clusters train_size = FLAGS.train_size data_clean, data_dirty, labels = line_gaussians(n_nodes, n_clusters) graph_clean = construct_knn_graph(data_clean).todense().A1.reshape( n_nodes, n_nodes) train_mask = np.zeros(n_nodes, dtype=np.bool) train_mask[np.random.choice( np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True test_mask = ~train_mask print(f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}') print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}') input_features = tf.keras.layers.Input(shape=(2,)) input_graph = tf.keras.layers.Input((n_nodes,)) output = multilayer_gcn([input_features, input_graph], [64, 32, n_clusters]) model = tf.keras.Model(inputs=[input_features, input_graph], outputs=output) model.compile( optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) for epoch in range(FLAGS.n_epochs): model.fit([data_dirty, graph_clean], labels, n_nodes, shuffle=False, sample_weight=train_mask) clusters = model([data_dirty, graph_clean]).numpy().argmax(axis=1)[test_mask] print( 'NMI:', normalized_mutual_info_score( labels[test_mask], clusters, average_method='arithmetic')) print('Accuracy:', accuracy_score(labels[test_mask], clusters)) if __name__ == '__main__': app.run(main)
graph_embedding/dmon/train_gcn.py
# Lint as: python3 """TODO(tsitsulin): add headers, tests, and improve style.""" from absl import app from absl import flags import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import normalized_mutual_info_score import tensorflow.compat.v2 as tf from graph_embedding.dmon.models.multilayer_gcn import multilayer_gcn from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph from graph_embedding.dmon.synthetic_data.overlapping_gaussians import line_gaussians tf.compat.v1.enable_v2_behavior() FLAGS = flags.FLAGS flags.DEFINE_integer( 'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0) flags.DEFINE_integer( 'n_clusters', 2, 'Number of clusters for the synthetic graph.', lower_bound=0) flags.DEFINE_float( 'train_size', 0.2, 'Training data proportion.', lower_bound=0) flags.DEFINE_integer( 'n_epochs', 200, 'Number of epochs to train.', lower_bound=0) flags.DEFINE_float( 'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') print('<NAME> i have some self-lööps') n_nodes = FLAGS.n_nodes n_clusters = FLAGS.n_clusters train_size = FLAGS.train_size data_clean, data_dirty, labels = line_gaussians(n_nodes, n_clusters) graph_clean = construct_knn_graph(data_clean).todense().A1.reshape( n_nodes, n_nodes) train_mask = np.zeros(n_nodes, dtype=np.bool) train_mask[np.random.choice( np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True test_mask = ~train_mask print(f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}') print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}') input_features = tf.keras.layers.Input(shape=(2,)) input_graph = tf.keras.layers.Input((n_nodes,)) output = multilayer_gcn([input_features, input_graph], [64, 32, n_clusters]) model = tf.keras.Model(inputs=[input_features, input_graph], outputs=output) model.compile( optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) for epoch in range(FLAGS.n_epochs): model.fit([data_dirty, graph_clean], labels, n_nodes, shuffle=False, sample_weight=train_mask) clusters = model([data_dirty, graph_clean]).numpy().argmax(axis=1)[test_mask] print( 'NMI:', normalized_mutual_info_score( labels[test_mask], clusters, average_method='arithmetic')) print('Accuracy:', accuracy_score(labels[test_mask], clusters)) if __name__ == '__main__': app.run(main)
0.544801
0.424889
import logging import pytest from collections import namedtuple from streamsets.testframework.markers import sdc_min_version logger = logging.getLogger(__name__) # Port for SDC RPC stages to exchange error records SDC_RPC_LISTENING_PORT = 20000 SDC_RPC_ID = 'lifecycle' @pytest.fixture(scope='module') def sdc_common_hook(): def hook(data_collector): data_collector.add_stage_lib('streamsets-datacollector-jython_2_7-lib') return hook @pytest.fixture(scope='function') def generator_trash_builder(sdc_builder): builder = sdc_builder.get_pipeline_builder() dev_data_generator = builder.add_stage('Dev Data Generator') trash = builder.add_stage('Trash') dev_data_generator >> trash yield builder @pytest.fixture(scope='function') def generator_finisher_builder(sdc_builder): builder = sdc_builder.get_pipeline_builder() dev_data_generator = builder.add_stage('Dev Data Generator') finisher = builder.add_stage('Pipeline Finisher Executor') dev_data_generator >> finisher yield builder @pytest.fixture(scope='function') def generator_failure_builder(sdc_builder): builder = sdc_builder.get_pipeline_builder() dev_data_generator = builder.add_stage('Dev Data Generator') jython = builder.add_stage('Jython Evaluator') jython.script = '1 / 0' # ~ throw exception and stop the pipeline trash = builder.add_stage('Trash') dev_data_generator >> jython >> trash yield builder @pytest.fixture(scope='function') def successful_receiver_pipeline(sdc_builder): builder = sdc_builder.get_pipeline_builder() origin = builder.add_stage('SDC RPC', type='origin') origin.sdc_rpc_listening_port = SDC_RPC_LISTENING_PORT origin.sdc_rpc_id = SDC_RPC_ID wiretap = builder.add_wiretap() origin >> wiretap.destination pipeline = builder.build('Succeeding Lifecycle Receiver') yield namedtuple('Pipeline', ['pipeline', 'wiretap'])(pipeline, wiretap) @pytest.fixture(scope='function') def failing_receiver_pipeline(sdc_builder): builder = sdc_builder.get_pipeline_builder() origin = builder.add_stage('SDC RPC', type='origin') origin.sdc_rpc_listening_port = SDC_RPC_LISTENING_PORT origin.sdc_rpc_id = SDC_RPC_ID jython = builder.add_stage('Jython Evaluator') jython.script = '1 / 0' # ~ throw exception and stop the pipeline trash = builder.add_stage('Trash') origin >> jython >> trash pipeline = builder.build('Failing Lifecycle Receiver') pipeline.configuration['shouldRetry'] = False yield pipeline @sdc_min_version('2.7.0.0') def test_start_event(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event on pipeline start.""" start_stage = generator_trash_builder.add_start_event_stage('Write to Another Pipeline') start_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] start_stage.sdc_rpc_id = SDC_RPC_ID start_event_pipeline = generator_trash_builder.build('Start Event') sdc_executor.add_pipeline(start_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline) sdc_executor.start_pipeline(start_event_pipeline) # And validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-start' assert record.field['user'].value == 'admin' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) sdc_executor.stop_pipeline(start_event_pipeline) @sdc_min_version('3.17.0') def test_start_event_with_job_info(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate jobId and jobName to pipeline start event""" start_stage = generator_trash_builder.add_start_event_stage('Write to Another Pipeline') start_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] start_stage.sdc_rpc_id = SDC_RPC_ID start_event_pipeline = generator_trash_builder.build('Start Event') start_event_pipeline.add_parameters(JOB_ID='stfJobId', JOB_NAME='stfJobName') sdc_executor.add_pipeline(start_event_pipeline, successful_receiver_pipeline.pipeline) try: sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(start_event_pipeline) # And validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-start' assert record.field['user'].value == 'admin' assert record.field['jobId'].value == 'stfJobId' assert record.field['jobName'].value == 'stfJobName' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) sdc_executor.stop_pipeline(start_event_pipeline) @sdc_min_version('2.7.0.0') def test_stop_event_user_action(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event when pipeline is stopped by user.""" stop_stage = generator_trash_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_trash_builder.build('Stop Event - User Action') sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) sdc_executor.stop_pipeline(stop_event_pipeline) # And validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'USER_ACTION' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('3.17.0') def test_stop_event_with_job_info(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate jobId and jobName to pipeline stop event""" stop_stage = generator_trash_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_trash_builder.build('Stop Event - Job Info') stop_event_pipeline.add_parameters(JOB_ID='stfJobId', JOB_NAME='stfJobName') sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) sdc_executor.stop_pipeline(stop_event_pipeline) #Validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'USER_ACTION' assert record.field['jobId'].value == 'stfJobId' assert record.field['jobName'].value == 'stfJobName' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('2.7.0.0') def test_stop_event_finished(generator_finisher_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event when pipeline finishes.""" stop_stage = generator_finisher_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_finisher_builder.build('Stop Event - Finished') sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) #Validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'FINISHED' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('2.7.0.0') def test_stop_event_failure(generator_failure_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event when pipeline crashes.""" stop_stage = generator_failure_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_failure_builder.build('Stop Event - Failure') stop_event_pipeline.configuration['shouldRetry'] = False sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) #Validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'FAILURE' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('2.7.0.0') def test_start_event_handler_failure(generator_trash_builder, failing_receiver_pipeline, sdc_executor): """ Validate that failure to process start event will terminate the pipeline.""" start_stage = generator_trash_builder.add_start_event_stage('Write to Another Pipeline') start_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] start_stage.sdc_rpc_id = SDC_RPC_ID start_event_pipeline = generator_trash_builder.build('Start Event: Handler Failure') start_event_pipeline.configuration['shouldRetry'] = False sdc_executor.add_pipeline(start_event_pipeline, failing_receiver_pipeline) # Start the event handling pipeline sdc_executor.start_pipeline(failing_receiver_pipeline, wait=False) # Start the actual event generating pipeline sdc_executor.start_pipeline(start_event_pipeline, wait=False) # Which should kill the receiver pipeline sdc_executor.get_pipeline_status(failing_receiver_pipeline).wait_for_status('RUN_ERROR', ignore_errors=True) # And that in turns will also kill the event generating pipeline sdc_executor.get_pipeline_status(start_event_pipeline).wait_for_status('START_ERROR', ignore_errors=True) # Validate history is as expected history = sdc_executor.get_pipeline_history(start_event_pipeline) entry = history.entries[0] assert entry['status'] == 'START_ERROR' @sdc_min_version('2.7.0.0') def test_stop_event_handler_failure(generator_trash_builder, failing_receiver_pipeline, sdc_executor): """ Validate that failure to process stop event will terminate the pipeline.""" stop_stage = generator_trash_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_trash_builder.build('Stop Event: Handler Failure') stop_event_pipeline.configuration['shouldRetry'] = False sdc_executor.add_pipeline(stop_event_pipeline, failing_receiver_pipeline) # Start the event handling pipeline sdc_executor.start_pipeline(failing_receiver_pipeline) # Start the actual event generating pipeline sdc_executor.start_pipeline(stop_event_pipeline) sdc_executor.stop_pipeline(stop_event_pipeline, wait=False) # Which should kill the receiver pipeline sdc_executor.get_pipeline_status(failing_receiver_pipeline).wait_for_status('RUNNING_ERROR', ignore_errors=True) # And that in turns will also kill the event generating pipeline sdc_executor.get_pipeline_status(stop_event_pipeline).wait_for_status('STOP_ERROR', ignore_errors=True) # Validate history is as expected history = sdc_executor.get_pipeline_history(stop_event_pipeline) entry = history.entries[0] assert entry['status'] == 'STOP_ERROR'
pipeline/test_lifecycle_events.py
import logging import pytest from collections import namedtuple from streamsets.testframework.markers import sdc_min_version logger = logging.getLogger(__name__) # Port for SDC RPC stages to exchange error records SDC_RPC_LISTENING_PORT = 20000 SDC_RPC_ID = 'lifecycle' @pytest.fixture(scope='module') def sdc_common_hook(): def hook(data_collector): data_collector.add_stage_lib('streamsets-datacollector-jython_2_7-lib') return hook @pytest.fixture(scope='function') def generator_trash_builder(sdc_builder): builder = sdc_builder.get_pipeline_builder() dev_data_generator = builder.add_stage('Dev Data Generator') trash = builder.add_stage('Trash') dev_data_generator >> trash yield builder @pytest.fixture(scope='function') def generator_finisher_builder(sdc_builder): builder = sdc_builder.get_pipeline_builder() dev_data_generator = builder.add_stage('Dev Data Generator') finisher = builder.add_stage('Pipeline Finisher Executor') dev_data_generator >> finisher yield builder @pytest.fixture(scope='function') def generator_failure_builder(sdc_builder): builder = sdc_builder.get_pipeline_builder() dev_data_generator = builder.add_stage('Dev Data Generator') jython = builder.add_stage('Jython Evaluator') jython.script = '1 / 0' # ~ throw exception and stop the pipeline trash = builder.add_stage('Trash') dev_data_generator >> jython >> trash yield builder @pytest.fixture(scope='function') def successful_receiver_pipeline(sdc_builder): builder = sdc_builder.get_pipeline_builder() origin = builder.add_stage('SDC RPC', type='origin') origin.sdc_rpc_listening_port = SDC_RPC_LISTENING_PORT origin.sdc_rpc_id = SDC_RPC_ID wiretap = builder.add_wiretap() origin >> wiretap.destination pipeline = builder.build('Succeeding Lifecycle Receiver') yield namedtuple('Pipeline', ['pipeline', 'wiretap'])(pipeline, wiretap) @pytest.fixture(scope='function') def failing_receiver_pipeline(sdc_builder): builder = sdc_builder.get_pipeline_builder() origin = builder.add_stage('SDC RPC', type='origin') origin.sdc_rpc_listening_port = SDC_RPC_LISTENING_PORT origin.sdc_rpc_id = SDC_RPC_ID jython = builder.add_stage('Jython Evaluator') jython.script = '1 / 0' # ~ throw exception and stop the pipeline trash = builder.add_stage('Trash') origin >> jython >> trash pipeline = builder.build('Failing Lifecycle Receiver') pipeline.configuration['shouldRetry'] = False yield pipeline @sdc_min_version('2.7.0.0') def test_start_event(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event on pipeline start.""" start_stage = generator_trash_builder.add_start_event_stage('Write to Another Pipeline') start_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] start_stage.sdc_rpc_id = SDC_RPC_ID start_event_pipeline = generator_trash_builder.build('Start Event') sdc_executor.add_pipeline(start_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline) sdc_executor.start_pipeline(start_event_pipeline) # And validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-start' assert record.field['user'].value == 'admin' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) sdc_executor.stop_pipeline(start_event_pipeline) @sdc_min_version('3.17.0') def test_start_event_with_job_info(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate jobId and jobName to pipeline start event""" start_stage = generator_trash_builder.add_start_event_stage('Write to Another Pipeline') start_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] start_stage.sdc_rpc_id = SDC_RPC_ID start_event_pipeline = generator_trash_builder.build('Start Event') start_event_pipeline.add_parameters(JOB_ID='stfJobId', JOB_NAME='stfJobName') sdc_executor.add_pipeline(start_event_pipeline, successful_receiver_pipeline.pipeline) try: sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(start_event_pipeline) # And validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-start' assert record.field['user'].value == 'admin' assert record.field['jobId'].value == 'stfJobId' assert record.field['jobName'].value == 'stfJobName' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) sdc_executor.stop_pipeline(start_event_pipeline) @sdc_min_version('2.7.0.0') def test_stop_event_user_action(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event when pipeline is stopped by user.""" stop_stage = generator_trash_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_trash_builder.build('Stop Event - User Action') sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) sdc_executor.stop_pipeline(stop_event_pipeline) # And validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'USER_ACTION' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('3.17.0') def test_stop_event_with_job_info(generator_trash_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate jobId and jobName to pipeline stop event""" stop_stage = generator_trash_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_trash_builder.build('Stop Event - Job Info') stop_event_pipeline.add_parameters(JOB_ID='stfJobId', JOB_NAME='stfJobName') sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) sdc_executor.stop_pipeline(stop_event_pipeline) #Validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'USER_ACTION' assert record.field['jobId'].value == 'stfJobId' assert record.field['jobName'].value == 'stfJobName' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('2.7.0.0') def test_stop_event_finished(generator_finisher_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event when pipeline finishes.""" stop_stage = generator_finisher_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_finisher_builder.build('Stop Event - Finished') sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) #Validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'FINISHED' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('2.7.0.0') def test_stop_event_failure(generator_failure_builder, successful_receiver_pipeline, sdc_executor): """ Validate that we properly generate and process event when pipeline crashes.""" stop_stage = generator_failure_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_failure_builder.build('Stop Event - Failure') stop_event_pipeline.configuration['shouldRetry'] = False sdc_executor.add_pipeline(stop_event_pipeline, successful_receiver_pipeline.pipeline) try: # Since there will be exactly one event generated we need to make sure that: # * Wiretap output records has one record # * The receiver pipeline is 'RUNNING' otherwise event generating pipeline will fail to start sdc_executor.start_pipeline(successful_receiver_pipeline.pipeline, wait=False) sdc_executor.start_pipeline(stop_event_pipeline) #Validate that the event arrived to the receiver pipeline sdc_executor.wait_for_pipeline_metric(successful_receiver_pipeline.pipeline, 'input_record_count', 1) assert len(successful_receiver_pipeline.wiretap.output_records) == 1 record = successful_receiver_pipeline.wiretap.output_records[0] assert record is not None assert record.header['values']['sdc.event.type'] == 'pipeline-stop' assert record.field['reason'].value == 'FAILURE' finally: sdc_executor.stop_pipeline(successful_receiver_pipeline.pipeline) @sdc_min_version('2.7.0.0') def test_start_event_handler_failure(generator_trash_builder, failing_receiver_pipeline, sdc_executor): """ Validate that failure to process start event will terminate the pipeline.""" start_stage = generator_trash_builder.add_start_event_stage('Write to Another Pipeline') start_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] start_stage.sdc_rpc_id = SDC_RPC_ID start_event_pipeline = generator_trash_builder.build('Start Event: Handler Failure') start_event_pipeline.configuration['shouldRetry'] = False sdc_executor.add_pipeline(start_event_pipeline, failing_receiver_pipeline) # Start the event handling pipeline sdc_executor.start_pipeline(failing_receiver_pipeline, wait=False) # Start the actual event generating pipeline sdc_executor.start_pipeline(start_event_pipeline, wait=False) # Which should kill the receiver pipeline sdc_executor.get_pipeline_status(failing_receiver_pipeline).wait_for_status('RUN_ERROR', ignore_errors=True) # And that in turns will also kill the event generating pipeline sdc_executor.get_pipeline_status(start_event_pipeline).wait_for_status('START_ERROR', ignore_errors=True) # Validate history is as expected history = sdc_executor.get_pipeline_history(start_event_pipeline) entry = history.entries[0] assert entry['status'] == 'START_ERROR' @sdc_min_version('2.7.0.0') def test_stop_event_handler_failure(generator_trash_builder, failing_receiver_pipeline, sdc_executor): """ Validate that failure to process stop event will terminate the pipeline.""" stop_stage = generator_trash_builder.add_stop_event_stage('Write to Another Pipeline') stop_stage.sdc_rpc_connection = [f'{sdc_executor.server_host}:{SDC_RPC_LISTENING_PORT}'] stop_stage.sdc_rpc_id = SDC_RPC_ID stop_event_pipeline = generator_trash_builder.build('Stop Event: Handler Failure') stop_event_pipeline.configuration['shouldRetry'] = False sdc_executor.add_pipeline(stop_event_pipeline, failing_receiver_pipeline) # Start the event handling pipeline sdc_executor.start_pipeline(failing_receiver_pipeline) # Start the actual event generating pipeline sdc_executor.start_pipeline(stop_event_pipeline) sdc_executor.stop_pipeline(stop_event_pipeline, wait=False) # Which should kill the receiver pipeline sdc_executor.get_pipeline_status(failing_receiver_pipeline).wait_for_status('RUNNING_ERROR', ignore_errors=True) # And that in turns will also kill the event generating pipeline sdc_executor.get_pipeline_status(stop_event_pipeline).wait_for_status('STOP_ERROR', ignore_errors=True) # Validate history is as expected history = sdc_executor.get_pipeline_history(stop_event_pipeline) entry = history.entries[0] assert entry['status'] == 'STOP_ERROR'
0.650689
0.236373
import io import os import subprocess from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, "requirements.txt")) as f: requirements = f.read().splitlines() def readme(): with open("README.rst") as f: return f.read() git_version = "2.3.0" if __name__ == "__main__": setup( name="raredecay", version=git_version, description="A package with multivariate analysis and reweighting " "algorithms", long_description=readme(), classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: Linux", "Operating System :: Unix", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering :: Physics", "Topic :: Scientific/Engineering :: Information Analysis", ], keywords="particle physics, analysis, machine learning, reweight, high energy physics", url="https://github.com/mayou36/raredecay", author="<NAME>", author_email="<EMAIL>", license="Apache-2.0 License", install_requires=requirements, packages=[ "raredecay", "raredecay.analysis", "raredecay.tools", ], include_package_data=True, python_requires=">2.7,!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", zip_safe=False, ) # build docs try: subprocess.Popen( "chmod u+x " + os.path.join(here, "docs/make_docs.sh"), shell=True ) subprocess.Popen("bash " + os.path.join(here, "docs/make_docs.sh"), shell=True) except Exception as err: print("Failed to build docs.") raise err
setup.py
import io import os import subprocess from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, "requirements.txt")) as f: requirements = f.read().splitlines() def readme(): with open("README.rst") as f: return f.read() git_version = "2.3.0" if __name__ == "__main__": setup( name="raredecay", version=git_version, description="A package with multivariate analysis and reweighting " "algorithms", long_description=readme(), classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: Linux", "Operating System :: Unix", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering :: Physics", "Topic :: Scientific/Engineering :: Information Analysis", ], keywords="particle physics, analysis, machine learning, reweight, high energy physics", url="https://github.com/mayou36/raredecay", author="<NAME>", author_email="<EMAIL>", license="Apache-2.0 License", install_requires=requirements, packages=[ "raredecay", "raredecay.analysis", "raredecay.tools", ], include_package_data=True, python_requires=">2.7,!=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", zip_safe=False, ) # build docs try: subprocess.Popen( "chmod u+x " + os.path.join(here, "docs/make_docs.sh"), shell=True ) subprocess.Popen("bash " + os.path.join(here, "docs/make_docs.sh"), shell=True) except Exception as err: print("Failed to build docs.") raise err
0.359589
0.126434
import os import unittest from dart.client.python.dart_client import Dart from dart.engine.no_op.metadata import NoOpActionTypes from dart.model.action import ActionData, Action, ActionState from dart.model.dataset import Column, DatasetData, Dataset, DataFormat, DataType, FileFormat, RowFormat, LoadType from dart.model.datastore import Datastore, DatastoreData, DatastoreState from dart.model.subscription import Subscription, SubscriptionData, SubscriptionElementStats, SubscriptionElementState, \ SubscriptionState from dart.model.trigger import Trigger, TriggerState from dart.model.trigger import TriggerData from dart.model.workflow import WorkflowData, WorkflowState, WorkflowInstanceState from dart.model.workflow import Workflow class TestConsumeSubscription(unittest.TestCase): def setUp(self): dart = Dart(host='localhost', port=5000) """ :type dart: dart.client.python.dart_client.Dart """ self.dart = dart cs = [Column('c1', DataType.VARCHAR, 50), Column('c2', DataType.BIGINT)] df = DataFormat(FileFormat.TEXTFILE, RowFormat.DELIMITED) dataset_data = DatasetData(name='test-dataset', table_name='test_dataset_table', load_type=LoadType.INSERT, location=('s3://' + os.environ['DART_TEST_BUCKET'] + '/impala'), data_format=df, columns=cs, tags=[]) self.dataset = self.dart.save_dataset(Dataset(data=dataset_data)) start = 's3://' + os.environ['DART_TEST_BUCKET'] + '/impala/impala' end = 's3://' + os.environ['DART_TEST_BUCKET'] + '/impala/install' regex = '.*\\.rpm' ds = Subscription(data=SubscriptionData('test-subscription', self.dataset.id, start, end, regex)) self.subscription = self.dart.save_subscription(ds) dst_args = {'action_sleep_time_in_seconds': 0} dst = Datastore(data=DatastoreData('test-datastore', 'no_op_engine', args=dst_args, state=DatastoreState.TEMPLATE)) self.datastore = self.dart.save_datastore(dst) wf = Workflow(data=WorkflowData('test-workflow', self.datastore.id, state=WorkflowState.ACTIVE)) self.workflow = self.dart.save_workflow(wf, self.datastore.id) a_args = {'subscription_id': self.subscription.id} a0 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE)) a1 = Action(data=ActionData(NoOpActionTypes.consume_subscription.name, NoOpActionTypes.consume_subscription.name, a_args, state=ActionState.TEMPLATE)) self.action0, self.action1 = self.dart.save_actions([a0, a1], workflow_id=self.workflow.id) def tearDown(self): for a in self.dart.get_actions(workflow_id=self.workflow.id): self.dart.delete_action(a.id) for wfi in self.dart.get_workflow_instances(self.workflow.id): self.dart.delete_datastore(wfi.data.datastore_id) self.dart.delete_workflow_instances(self.workflow.id) self.dart.delete_workflow(self.workflow.id) self.dart.delete_datastore(self.datastore.id) self.dart.delete_subscription(self.subscription.id) self.dart.delete_dataset(self.dataset.id) def test_consume_subscription(self): subscription = self.dart.await_subscription_generation(self.subscription.id) self.assertEqual(subscription.data.state, SubscriptionState.ACTIVE) tr_args = {'subscription_id': self.subscription.id, 'unconsumed_data_size_in_bytes': 49524} tr = Trigger(data=TriggerData('test-trigger', 'subscription_batch', [self.workflow.id], tr_args, TriggerState.ACTIVE)) self.trigger = self.dart.save_trigger(tr) wf_instances = self.dart.await_workflow_completion(self.workflow.id, num_instances=3) for wfi in wf_instances: self.assertEqual(wfi.data.state, WorkflowInstanceState.COMPLETED) stats = self.dart.get_subscription_element_stats(self.subscription.id) ses = SubscriptionElementStats(SubscriptionElementState.CONSUMED, 3, 152875004 + 834620 + 49524) self.assertEqual([s.to_dict() for s in stats], [ses.to_dict()]) self.dart.delete_trigger(self.trigger.id) if __name__ == '__main__': unittest.main()
src/python/dart/test/full/test_consume_subscription.py
import os import unittest from dart.client.python.dart_client import Dart from dart.engine.no_op.metadata import NoOpActionTypes from dart.model.action import ActionData, Action, ActionState from dart.model.dataset import Column, DatasetData, Dataset, DataFormat, DataType, FileFormat, RowFormat, LoadType from dart.model.datastore import Datastore, DatastoreData, DatastoreState from dart.model.subscription import Subscription, SubscriptionData, SubscriptionElementStats, SubscriptionElementState, \ SubscriptionState from dart.model.trigger import Trigger, TriggerState from dart.model.trigger import TriggerData from dart.model.workflow import WorkflowData, WorkflowState, WorkflowInstanceState from dart.model.workflow import Workflow class TestConsumeSubscription(unittest.TestCase): def setUp(self): dart = Dart(host='localhost', port=5000) """ :type dart: dart.client.python.dart_client.Dart """ self.dart = dart cs = [Column('c1', DataType.VARCHAR, 50), Column('c2', DataType.BIGINT)] df = DataFormat(FileFormat.TEXTFILE, RowFormat.DELIMITED) dataset_data = DatasetData(name='test-dataset', table_name='test_dataset_table', load_type=LoadType.INSERT, location=('s3://' + os.environ['DART_TEST_BUCKET'] + '/impala'), data_format=df, columns=cs, tags=[]) self.dataset = self.dart.save_dataset(Dataset(data=dataset_data)) start = 's3://' + os.environ['DART_TEST_BUCKET'] + '/impala/impala' end = 's3://' + os.environ['DART_TEST_BUCKET'] + '/impala/install' regex = '.*\\.rpm' ds = Subscription(data=SubscriptionData('test-subscription', self.dataset.id, start, end, regex)) self.subscription = self.dart.save_subscription(ds) dst_args = {'action_sleep_time_in_seconds': 0} dst = Datastore(data=DatastoreData('test-datastore', 'no_op_engine', args=dst_args, state=DatastoreState.TEMPLATE)) self.datastore = self.dart.save_datastore(dst) wf = Workflow(data=WorkflowData('test-workflow', self.datastore.id, state=WorkflowState.ACTIVE)) self.workflow = self.dart.save_workflow(wf, self.datastore.id) a_args = {'subscription_id': self.subscription.id} a0 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE)) a1 = Action(data=ActionData(NoOpActionTypes.consume_subscription.name, NoOpActionTypes.consume_subscription.name, a_args, state=ActionState.TEMPLATE)) self.action0, self.action1 = self.dart.save_actions([a0, a1], workflow_id=self.workflow.id) def tearDown(self): for a in self.dart.get_actions(workflow_id=self.workflow.id): self.dart.delete_action(a.id) for wfi in self.dart.get_workflow_instances(self.workflow.id): self.dart.delete_datastore(wfi.data.datastore_id) self.dart.delete_workflow_instances(self.workflow.id) self.dart.delete_workflow(self.workflow.id) self.dart.delete_datastore(self.datastore.id) self.dart.delete_subscription(self.subscription.id) self.dart.delete_dataset(self.dataset.id) def test_consume_subscription(self): subscription = self.dart.await_subscription_generation(self.subscription.id) self.assertEqual(subscription.data.state, SubscriptionState.ACTIVE) tr_args = {'subscription_id': self.subscription.id, 'unconsumed_data_size_in_bytes': 49524} tr = Trigger(data=TriggerData('test-trigger', 'subscription_batch', [self.workflow.id], tr_args, TriggerState.ACTIVE)) self.trigger = self.dart.save_trigger(tr) wf_instances = self.dart.await_workflow_completion(self.workflow.id, num_instances=3) for wfi in wf_instances: self.assertEqual(wfi.data.state, WorkflowInstanceState.COMPLETED) stats = self.dart.get_subscription_element_stats(self.subscription.id) ses = SubscriptionElementStats(SubscriptionElementState.CONSUMED, 3, 152875004 + 834620 + 49524) self.assertEqual([s.to_dict() for s in stats], [ses.to_dict()]) self.dart.delete_trigger(self.trigger.id) if __name__ == '__main__': unittest.main()
0.394901
0.335514
"""A function to build an object detection box coder from configuration.""" from tfold.object_detection.box_coders import faster_rcnn_box_coder from tfold.object_detection.box_coders import keypoint_box_coder from tfold.object_detection.box_coders import mean_stddev_box_coder from tfold.object_detection.box_coders import square_box_coder from tfold.object_detection.protos import box_coder_pb2 def build(box_coder_config): """Builds a box coder object based on the box coder config. Args: box_coder_config: A box_coder.proto object containing the config for the desired box coder. Returns: BoxCoder based on the config. Raises: ValueError: On empty box coder proto. """ if not isinstance(box_coder_config, box_coder_pb2.BoxCoder): raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.') if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder': return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[ box_coder_config.faster_rcnn_box_coder.y_scale, box_coder_config.faster_rcnn_box_coder.x_scale, box_coder_config.faster_rcnn_box_coder.height_scale, box_coder_config.faster_rcnn_box_coder.width_scale ]) if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder': return keypoint_box_coder.KeypointBoxCoder( box_coder_config.keypoint_box_coder.num_keypoints, scale_factors=[ box_coder_config.keypoint_box_coder.y_scale, box_coder_config.keypoint_box_coder.x_scale, box_coder_config.keypoint_box_coder.height_scale, box_coder_config.keypoint_box_coder.width_scale ]) if (box_coder_config.WhichOneof('box_coder_oneof') == 'mean_stddev_box_coder'): return mean_stddev_box_coder.MeanStddevBoxCoder( stddev=box_coder_config.mean_stddev_box_coder.stddev) if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder': return square_box_coder.SquareBoxCoder(scale_factors=[ box_coder_config.square_box_coder.y_scale, box_coder_config.square_box_coder.x_scale, box_coder_config.square_box_coder.length_scale ]) raise ValueError('Empty box coder.')
tfold/object_detection/builders/box_coder_builder.py
"""A function to build an object detection box coder from configuration.""" from tfold.object_detection.box_coders import faster_rcnn_box_coder from tfold.object_detection.box_coders import keypoint_box_coder from tfold.object_detection.box_coders import mean_stddev_box_coder from tfold.object_detection.box_coders import square_box_coder from tfold.object_detection.protos import box_coder_pb2 def build(box_coder_config): """Builds a box coder object based on the box coder config. Args: box_coder_config: A box_coder.proto object containing the config for the desired box coder. Returns: BoxCoder based on the config. Raises: ValueError: On empty box coder proto. """ if not isinstance(box_coder_config, box_coder_pb2.BoxCoder): raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.') if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder': return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[ box_coder_config.faster_rcnn_box_coder.y_scale, box_coder_config.faster_rcnn_box_coder.x_scale, box_coder_config.faster_rcnn_box_coder.height_scale, box_coder_config.faster_rcnn_box_coder.width_scale ]) if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder': return keypoint_box_coder.KeypointBoxCoder( box_coder_config.keypoint_box_coder.num_keypoints, scale_factors=[ box_coder_config.keypoint_box_coder.y_scale, box_coder_config.keypoint_box_coder.x_scale, box_coder_config.keypoint_box_coder.height_scale, box_coder_config.keypoint_box_coder.width_scale ]) if (box_coder_config.WhichOneof('box_coder_oneof') == 'mean_stddev_box_coder'): return mean_stddev_box_coder.MeanStddevBoxCoder( stddev=box_coder_config.mean_stddev_box_coder.stddev) if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder': return square_box_coder.SquareBoxCoder(scale_factors=[ box_coder_config.square_box_coder.y_scale, box_coder_config.square_box_coder.x_scale, box_coder_config.square_box_coder.length_scale ]) raise ValueError('Empty box coder.')
0.930844
0.303474
from discord.ext import commands from config import REDDIT_APP_ID, REDDIT_APP_SECRET, REDDIT_ENABLED_MEME_SUBREDDITS, REDDIT_ADULT, REDDIT_ADULT_GIF, ADULT_NSFW_CHANNEL_ID import random import praw import aiohttp import discord class adult(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def prn(self, ctx): """Resim yada gif atar """ #channel = ctx.get_channel(791740048273440768) jacking topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: # ADULT_NSFW_CHANNEL_ID SORUN CIKARSA DİREKT CHANNEL_ID SİNİ BURAYA YAZIN async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit(topıc).hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prngif(self, ctx): """gif yollar """ #channel = ctx.get_channel(791740048273440768) gıfP = random.choice(REDDIT_ADULT_GIF) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit(gıfP).hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prnbj(self, ctx): """Blowjob""" #channel = ctx.get_channel(791740048273440768) anal #gıfV = random.choice(REDDIT_ADULT_VID) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('BlowJob').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def pussy(self, ctx): """Amcıq atar """ #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('pussy').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prnanal(self, ctx): """Anal gif/resim atar """ #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('anal').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prncum(self, ctx): """Cumshot gif/resim atar""" #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('cumshot').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def hentai(self, ctx): """Cumshot gif/resim atar""" #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('hentai').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') def setup(bot): bot.add_cog(adult(bot))
BOT-ALTYAPI-PYTHON/Dbot/cogs/nsfw.py
from discord.ext import commands from config import REDDIT_APP_ID, REDDIT_APP_SECRET, REDDIT_ENABLED_MEME_SUBREDDITS, REDDIT_ADULT, REDDIT_ADULT_GIF, ADULT_NSFW_CHANNEL_ID import random import praw import aiohttp import discord class adult(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def prn(self, ctx): """Resim yada gif atar """ #channel = ctx.get_channel(791740048273440768) jacking topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: # ADULT_NSFW_CHANNEL_ID SORUN CIKARSA DİREKT CHANNEL_ID SİNİ BURAYA YAZIN async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit(topıc).hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prngif(self, ctx): """gif yollar """ #channel = ctx.get_channel(791740048273440768) gıfP = random.choice(REDDIT_ADULT_GIF) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit(gıfP).hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prnbj(self, ctx): """Blowjob""" #channel = ctx.get_channel(791740048273440768) anal #gıfV = random.choice(REDDIT_ADULT_VID) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('BlowJob').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def pussy(self, ctx): """Amcıq atar """ #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('pussy').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prnanal(self, ctx): """Anal gif/resim atar """ #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('anal').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def prncum(self, ctx): """Cumshot gif/resim atar""" #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('cumshot').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') @commands.command() async def hentai(self, ctx): """Cumshot gif/resim atar""" #channel = ctx.get_channel(791740048273440768) #topıc = random.choice(REDDIT_ADULT) if ctx.channel.id == ADULT_NSFW_CHANNEL_ID: async with ctx.channel.typing(): reddit = praw.Reddit(client_id=REDDIT_APP_ID, client_secret=REDDIT_APP_SECRET, user_agent="Discord_Bot:%s:1.0" % REDDIT_APP_ID) memes_submissions = reddit.subreddit('hentai').hot() post_to_pick = random.randint(1, 10) for i in range(0, post_to_pick): submission = next(x for x in memes_submissions if not x.stickied) await ctx.send(submission.url) if ctx.channel.id != ADULT_NSFW_CHANNEL_ID: await ctx.send(f'Bu komut burda devre dışı!') def setup(bot): bot.add_cog(adult(bot))
0.121477
0.071494
import os import sys import json import urllib.parse import httplib2 import logging import re from colorama import Fore, Back, Style from lib.search.result import SearchResult from lib.doi import DOI from lib.fulltexturl import FullTextURL from lib.helper import Helper from lib.filter import Filters class Request(object): """ Manages requests to the background online service at `crossref.org` using the official API. """ URL_PROTOCOL = "http" URL_API_BASE = "api.crossref.org" URL_SERVICE_DOIS = "api.crossref.org/works" def __init__(self): self.colored_output = False def set_colored_output(self, value, doi=None, title=None, more=None): if type(value) != type(True): raise ValueError("set_colored_output() must be called with boolean \ value") self.colored_output = value self.color_doi = doi self.color_title = title self.color_more = more return True def prepare_search_query(self, string, sort='score', order='desc', \ year=None, type_=None, rows=20): valid_sort_methods = ('score', 'updated', 'deposited', 'indexed', 'published') if sort not in valid_sort_methods: raise ValueError("Sort method not supported. Valid values are: \ {}".format(", ".join(valid_sort_methods))) valid_order_methods = ('asc', 'desc') if order not in valid_order_methods: raise ValueError("Order method not supported. Valid values are: \ {}".format(", ".join(valid_order_methods))) filters = Filters() payload = {'query': string, 'sort': sort, 'order': order, 'rows': rows} if year is not None: filters.add('from-pub-date', year) if type_ is not None: filters.add('type', type_) # load all filter values payload['filter'] = filters.get_formatted_filters() return urllib.parse.urlencode(payload) def prepare_citation_query(self, doi_identifier): doi = DOI(doi_identifier) return doi.get_identifier() + '/transform' def search(self, query): url = "{}://{}?{}".format(self.URL_PROTOCOL, \ self.URL_SERVICE_DOIS, query) logging.debug("Search URL: {}".format(url)) response = self._request(url) return response def print_search_content(self, content, show_authors=False, show_type=False, show_publisher=False, show_url=False): base_template = "{score:.2f} - {year:4d} - {cfg_doi}{doi:40}{cfg_end} \ - {cfg_title}{title}{cfg_end}" template = base_template if show_authors: template += "\n {cfg_more}AUTHORS{cfg_end} : {authors}" if show_type: template += "\n {cfg_more}TYPE{cfg_end} : {type}" if show_publisher: template += "\n {cfg_more}PUBLISHER{cfg_end} : {publisher}" if show_url: template += "\n {cfg_more}URL{cfg_end} : {url}" for result in content.get('items', ()): sr = SearchResult(result) payload = { "score" : sr.get_score(), "year" : sr.get_year(), "doi" : sr.get_doi().get_identifier(), "title" : sr.get_title(), "authors" : sr.get_authors(), "type" : sr.get_type(), "publisher" : sr.get_publisher(), "url" : sr.get_url(), "cfg_more" : '', "cfg_end" : '', "cfg_doi" : '', "cfg_title" : '', } if self.colored_output: color_options = [ ("cfg_doi", self.color_doi), ("cfg_title", self.color_title), ("cfg_more", self.color_more), ("cfg_end", 'reset'), ] for key, value in color_options: payload[key] = Helper.get_fg_colorcode_by_identifier(value) print(template.format(**payload)) def citation(self, query, style='bibtex'): url = "{}://{}/{}".format(self.URL_PROTOCOL, self.URL_SERVICE_DOIS, query) headers={'Accept':'text/x-bibliography; style={}'.format(style)} logging.debug("Cite URL: {}".format(url)) logging.debug("Query headers: {}".format(headers)) logging.debug("Style: {}".format(style)) response = self._request(url, headers, json_message=False) return response.strip() def print_citation(self, content): print(self.__clean_html(content)) def get_download_links(self, identifier): url = "{}://{}/{}".format(self.URL_PROTOCOL, self.URL_SERVICE_DOIS, identifier) logging.debug("Query URL: {}".format(url)) response = self._request(url) links = [] for link in response.get('link', ()): content_version = link['content-version'] license = self._find_license(response, content_version) links.append(FullTextURL(link.get("URL", ""), license.get("URL", None))) return links def _find_license(self, response, content_version): for license in response.get('license', ()): if license.get("content-version", "") == content_version: return license return None def _request(self, url, headers={'content-type': 'application/json'}, method="GET", json_message=True): h = httplib2.Http(".cache") resp, content = h.request(url, method, headers=headers) request_status = int(resp['status']) if request_status != 200: raise RuntimeError("The server responded with code {:d}, which the \ script cannot deal with. Aborting.".format(request_status)) if json_message: return json.loads(content.decode('utf-8'))['message'] return content.decode('utf-8') def __clean_html(self, raw_html): regex = re.compile(r'<(.*?)>(.*?)</\1>') return re.sub(regex, r"\2", raw_html)
lib/search/request.py
import os import sys import json import urllib.parse import httplib2 import logging import re from colorama import Fore, Back, Style from lib.search.result import SearchResult from lib.doi import DOI from lib.fulltexturl import FullTextURL from lib.helper import Helper from lib.filter import Filters class Request(object): """ Manages requests to the background online service at `crossref.org` using the official API. """ URL_PROTOCOL = "http" URL_API_BASE = "api.crossref.org" URL_SERVICE_DOIS = "api.crossref.org/works" def __init__(self): self.colored_output = False def set_colored_output(self, value, doi=None, title=None, more=None): if type(value) != type(True): raise ValueError("set_colored_output() must be called with boolean \ value") self.colored_output = value self.color_doi = doi self.color_title = title self.color_more = more return True def prepare_search_query(self, string, sort='score', order='desc', \ year=None, type_=None, rows=20): valid_sort_methods = ('score', 'updated', 'deposited', 'indexed', 'published') if sort not in valid_sort_methods: raise ValueError("Sort method not supported. Valid values are: \ {}".format(", ".join(valid_sort_methods))) valid_order_methods = ('asc', 'desc') if order not in valid_order_methods: raise ValueError("Order method not supported. Valid values are: \ {}".format(", ".join(valid_order_methods))) filters = Filters() payload = {'query': string, 'sort': sort, 'order': order, 'rows': rows} if year is not None: filters.add('from-pub-date', year) if type_ is not None: filters.add('type', type_) # load all filter values payload['filter'] = filters.get_formatted_filters() return urllib.parse.urlencode(payload) def prepare_citation_query(self, doi_identifier): doi = DOI(doi_identifier) return doi.get_identifier() + '/transform' def search(self, query): url = "{}://{}?{}".format(self.URL_PROTOCOL, \ self.URL_SERVICE_DOIS, query) logging.debug("Search URL: {}".format(url)) response = self._request(url) return response def print_search_content(self, content, show_authors=False, show_type=False, show_publisher=False, show_url=False): base_template = "{score:.2f} - {year:4d} - {cfg_doi}{doi:40}{cfg_end} \ - {cfg_title}{title}{cfg_end}" template = base_template if show_authors: template += "\n {cfg_more}AUTHORS{cfg_end} : {authors}" if show_type: template += "\n {cfg_more}TYPE{cfg_end} : {type}" if show_publisher: template += "\n {cfg_more}PUBLISHER{cfg_end} : {publisher}" if show_url: template += "\n {cfg_more}URL{cfg_end} : {url}" for result in content.get('items', ()): sr = SearchResult(result) payload = { "score" : sr.get_score(), "year" : sr.get_year(), "doi" : sr.get_doi().get_identifier(), "title" : sr.get_title(), "authors" : sr.get_authors(), "type" : sr.get_type(), "publisher" : sr.get_publisher(), "url" : sr.get_url(), "cfg_more" : '', "cfg_end" : '', "cfg_doi" : '', "cfg_title" : '', } if self.colored_output: color_options = [ ("cfg_doi", self.color_doi), ("cfg_title", self.color_title), ("cfg_more", self.color_more), ("cfg_end", 'reset'), ] for key, value in color_options: payload[key] = Helper.get_fg_colorcode_by_identifier(value) print(template.format(**payload)) def citation(self, query, style='bibtex'): url = "{}://{}/{}".format(self.URL_PROTOCOL, self.URL_SERVICE_DOIS, query) headers={'Accept':'text/x-bibliography; style={}'.format(style)} logging.debug("Cite URL: {}".format(url)) logging.debug("Query headers: {}".format(headers)) logging.debug("Style: {}".format(style)) response = self._request(url, headers, json_message=False) return response.strip() def print_citation(self, content): print(self.__clean_html(content)) def get_download_links(self, identifier): url = "{}://{}/{}".format(self.URL_PROTOCOL, self.URL_SERVICE_DOIS, identifier) logging.debug("Query URL: {}".format(url)) response = self._request(url) links = [] for link in response.get('link', ()): content_version = link['content-version'] license = self._find_license(response, content_version) links.append(FullTextURL(link.get("URL", ""), license.get("URL", None))) return links def _find_license(self, response, content_version): for license in response.get('license', ()): if license.get("content-version", "") == content_version: return license return None def _request(self, url, headers={'content-type': 'application/json'}, method="GET", json_message=True): h = httplib2.Http(".cache") resp, content = h.request(url, method, headers=headers) request_status = int(resp['status']) if request_status != 200: raise RuntimeError("The server responded with code {:d}, which the \ script cannot deal with. Aborting.".format(request_status)) if json_message: return json.loads(content.decode('utf-8'))['message'] return content.decode('utf-8') def __clean_html(self, raw_html): regex = re.compile(r'<(.*?)>(.*?)</\1>') return re.sub(regex, r"\2", raw_html)
0.346431
0.12817
import os import pandas as pd from settings import * """ Takes as input the dataframe with the raw data collected from the .xar files and transforms it: - Keyframes to timestamps (keyframe number * 1/fps) - Joint values to radians from degrees (all joints except LHand, RHand) - Keyframes shifted forward by 20 - Init posture added at 1 = 0.04 sec """ # Original keyframes and destination file data_raw = 'df10_KF.csv' dest_x_set = 'df11_KF.csv' path_raw = os.path.join(ROOT_PATH, RAW_DATA, data_raw) dest = os.path.join(ROOT_PATH, DATA_X_PATH, dest_x_set) df_raw = pd.read_csv(path_raw, index_col=0) # Print some stuff for confirmation # First keyframe, last keyframe, #keyframes (all stored in data/external/plymouth_animations_descrptive.ods) print(df_raw.groupby('id', )['keyframe'].agg(['first', 'last', 'count'])) # Convert degrees to radians for every joint EXCEPT LHand, RHand (since their values are not in degrees) j_cols_deg = [0,1,2,3,4,5,6,8,9,10,11,12,14,15,16] df_raw.iloc[:, j_cols_deg] = df_raw.iloc[:, j_cols_deg].apply(lambda x: np.radians(x)) # # Shift keyframes forward by 20 df_raw['keyframe'] = df_raw['keyframe'].apply(lambda x: x + 20) # Some corrections due inconsistencies related to fps in .xar files # Convert keyframes to timestamp in secs for all animations except 'Happy_4' fps1 = 25 df_raw.loc[df_raw['id'] != 'Happy_4', 'keyframe'] = df_raw.loc[df_raw['id'] != 'Happy_4', 'keyframe'].apply(lambda x: np.round(x * (1 / fps1), 2)) # Convert keyframes to timestamp in secs only 'Happy_4' fps2 = 15 df_raw.loc[df_raw['id'] == 'Happy_4', 'keyframe'] = df_raw.loc[df_raw['id'] == 'Happy_4', 'keyframe'].apply(lambda x: np.round(x * (1 / fps2), 2)) # Add StandInit posture at the first keyframe (0.04 secs) # Get animations ids id = list(df_raw['id'].unique()) # Create init dataframe df_init = pd.DataFrame(columns=joints_names+['keyframe', 'id']) df_init['id'] = id df_init['keyframe'] = 0.04 df_init.loc[:, :-2] = standInit df_trans = df_raw.append(df_init) df_trans.sort_values(by=['id', 'keyframe'], inplace=True) df_trans.reset_index(drop=True, inplace=True) # Change column name 'keyframe' to 'time' df_trans.rename(columns={'keyframe': 'time'}, inplace=True) # Save transformed dataframe df_trans.to_csv(dest)
src/data/transform_raw.py
import os import pandas as pd from settings import * """ Takes as input the dataframe with the raw data collected from the .xar files and transforms it: - Keyframes to timestamps (keyframe number * 1/fps) - Joint values to radians from degrees (all joints except LHand, RHand) - Keyframes shifted forward by 20 - Init posture added at 1 = 0.04 sec """ # Original keyframes and destination file data_raw = 'df10_KF.csv' dest_x_set = 'df11_KF.csv' path_raw = os.path.join(ROOT_PATH, RAW_DATA, data_raw) dest = os.path.join(ROOT_PATH, DATA_X_PATH, dest_x_set) df_raw = pd.read_csv(path_raw, index_col=0) # Print some stuff for confirmation # First keyframe, last keyframe, #keyframes (all stored in data/external/plymouth_animations_descrptive.ods) print(df_raw.groupby('id', )['keyframe'].agg(['first', 'last', 'count'])) # Convert degrees to radians for every joint EXCEPT LHand, RHand (since their values are not in degrees) j_cols_deg = [0,1,2,3,4,5,6,8,9,10,11,12,14,15,16] df_raw.iloc[:, j_cols_deg] = df_raw.iloc[:, j_cols_deg].apply(lambda x: np.radians(x)) # # Shift keyframes forward by 20 df_raw['keyframe'] = df_raw['keyframe'].apply(lambda x: x + 20) # Some corrections due inconsistencies related to fps in .xar files # Convert keyframes to timestamp in secs for all animations except 'Happy_4' fps1 = 25 df_raw.loc[df_raw['id'] != 'Happy_4', 'keyframe'] = df_raw.loc[df_raw['id'] != 'Happy_4', 'keyframe'].apply(lambda x: np.round(x * (1 / fps1), 2)) # Convert keyframes to timestamp in secs only 'Happy_4' fps2 = 15 df_raw.loc[df_raw['id'] == 'Happy_4', 'keyframe'] = df_raw.loc[df_raw['id'] == 'Happy_4', 'keyframe'].apply(lambda x: np.round(x * (1 / fps2), 2)) # Add StandInit posture at the first keyframe (0.04 secs) # Get animations ids id = list(df_raw['id'].unique()) # Create init dataframe df_init = pd.DataFrame(columns=joints_names+['keyframe', 'id']) df_init['id'] = id df_init['keyframe'] = 0.04 df_init.loc[:, :-2] = standInit df_trans = df_raw.append(df_init) df_trans.sort_values(by=['id', 'keyframe'], inplace=True) df_trans.reset_index(drop=True, inplace=True) # Change column name 'keyframe' to 'time' df_trans.rename(columns={'keyframe': 'time'}, inplace=True) # Save transformed dataframe df_trans.to_csv(dest)
0.586641
0.299284
import urllib.request import ssl import json from settings import Settings from urllib.parse import urlparse class Openapis: api_listing = [] api_short_words = ["mine", "marble", "mellow", "futuristic", "zippy", "cap", "fragile", "torpid", "debt","exuberant", "lovely", "subsequent", "advertisement", "fence", "steady", "impulse", "alive", "back", "overrated", "romantic", "office", "entertain", "employ", "knowledgeable", "church", "follow", "amazing","watery", "embarrassed", "curved", "rely", "chilly", "domineering", "elastic", "influence", "appear", "squirrel", "breakable", "distance", "snow", "truthful", "wriggle", "merciful", "bustling", "wool", "stare", "tap", "sticky", "honey", "analyze", "load", "acidic", "happen", "hallowed", "humdrum", "glistening", "step", "advise", "chemical", "tomatoes", "spiders", "nice", "bulb", "memory", "suspend", "royal", "hill", "abashed", "suppose", "stereotyped", "wax", "surprise", "cup", "lazy", "astonishing", "crabby", "festive", "fasten", "calendar", "adorable", "country", "wistful", "tenuous", "sloppy", "jazzy", "sister", "horses", "towering", "illustrious", "fall", "spotless", "eye", "system", "thick"] @staticmethod def get_all_apis(): return Openapis.api_listing @staticmethod def get_base_url(): oapi_url = Settings.get_value_by_key("oapi_url") parsed_uri = urlparse(oapi_url) result = '{uri.scheme}://{uri.netloc}'.format(uri=parsed_uri) return result # todo: add try...except @staticmethod def populate_all_apis(url): print("easyApi Shell: getting the apis...please wait") try: ctx = ssl.create_default_context() if Settings.is_ssl_enabled(): ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE oapi_json = urllib.request.urlopen(url, context=ctx).read() oapi_config = json.loads(oapi_json) i = 1 for path in oapi_config["paths"]: api_details = oapi_config["paths"][path] if "get" in api_details.keys(): getter = api_details["get"] if getter is not None: Openapis.api_listing.append({ "id": str(i), "key": Openapis.api_short_words[i - 1], "url": path, "method": "get" }) i = i + 1 print("easyApi Shell: processed the apis") return True except: print("easyApi Shell: unable to read the api spec") return False @staticmethod def print_all_apis(): print("{0: >5} | {1: >15} | {2}".format("Id", "Key", "Url")) print("--------------------------------------------------------------------------") for api in Openapis.api_listing: print("{0: >5} | {1: >15} | {2}".format(api["id"],api["key"],api["url"])) @staticmethod def print_selected_apis(apis): print("{0: >5} | {1: >15} | {2}".format("Id", "Key", "Url")) print("--------------------------------------------------------------------------") for api in apis: print("{0: >5} | {1: >15} | {2}".format(api["id"],api["key"],api["url"]))
openapis.py
import urllib.request import ssl import json from settings import Settings from urllib.parse import urlparse class Openapis: api_listing = [] api_short_words = ["mine", "marble", "mellow", "futuristic", "zippy", "cap", "fragile", "torpid", "debt","exuberant", "lovely", "subsequent", "advertisement", "fence", "steady", "impulse", "alive", "back", "overrated", "romantic", "office", "entertain", "employ", "knowledgeable", "church", "follow", "amazing","watery", "embarrassed", "curved", "rely", "chilly", "domineering", "elastic", "influence", "appear", "squirrel", "breakable", "distance", "snow", "truthful", "wriggle", "merciful", "bustling", "wool", "stare", "tap", "sticky", "honey", "analyze", "load", "acidic", "happen", "hallowed", "humdrum", "glistening", "step", "advise", "chemical", "tomatoes", "spiders", "nice", "bulb", "memory", "suspend", "royal", "hill", "abashed", "suppose", "stereotyped", "wax", "surprise", "cup", "lazy", "astonishing", "crabby", "festive", "fasten", "calendar", "adorable", "country", "wistful", "tenuous", "sloppy", "jazzy", "sister", "horses", "towering", "illustrious", "fall", "spotless", "eye", "system", "thick"] @staticmethod def get_all_apis(): return Openapis.api_listing @staticmethod def get_base_url(): oapi_url = Settings.get_value_by_key("oapi_url") parsed_uri = urlparse(oapi_url) result = '{uri.scheme}://{uri.netloc}'.format(uri=parsed_uri) return result # todo: add try...except @staticmethod def populate_all_apis(url): print("easyApi Shell: getting the apis...please wait") try: ctx = ssl.create_default_context() if Settings.is_ssl_enabled(): ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE oapi_json = urllib.request.urlopen(url, context=ctx).read() oapi_config = json.loads(oapi_json) i = 1 for path in oapi_config["paths"]: api_details = oapi_config["paths"][path] if "get" in api_details.keys(): getter = api_details["get"] if getter is not None: Openapis.api_listing.append({ "id": str(i), "key": Openapis.api_short_words[i - 1], "url": path, "method": "get" }) i = i + 1 print("easyApi Shell: processed the apis") return True except: print("easyApi Shell: unable to read the api spec") return False @staticmethod def print_all_apis(): print("{0: >5} | {1: >15} | {2}".format("Id", "Key", "Url")) print("--------------------------------------------------------------------------") for api in Openapis.api_listing: print("{0: >5} | {1: >15} | {2}".format(api["id"],api["key"],api["url"])) @staticmethod def print_selected_apis(apis): print("{0: >5} | {1: >15} | {2}".format("Id", "Key", "Url")) print("--------------------------------------------------------------------------") for api in apis: print("{0: >5} | {1: >15} | {2}".format(api["id"],api["key"],api["url"]))
0.202286
0.182389
class Node(object): '''A sentinel node or data node of a circular doubly-linked list (CDLL).''' def __init__(self, sentinel=None, key=None, value=None): '''Constructs a node and places it in a new or existing CDLL.''' if sentinel is None: # This is a new CDLL's sentinel node. assert key is None and value is None self.key = self.value = None self.left = self.right = self else: # This is a new data node at the front of an existing CDLL. assert key is not None and value is not None self.key = key self.value = value sentinel._splice_detached_right(self) def splice_right(self, other): '''Inserts other right of self, after relinking other's neighbors.''' other.left.right = other.right other.right.left = other.left self._splice_detached_right(other) def _splice_detached_right(self, other): '''Inserts other right of self. Assumes other is not already linked.''' other.left = self other.right = self.right self.right.left = other self.right = other class LRUCache(object): '''A least-recently-used cache, storing keys and associated values.''' def __init__(self, capacity, default_value=-1): '''Initializes the cache.''' if capacity <= 0: raise ValueError('capacity must be positive') self._capacity = capacity self._length = 0 self._default_value = default_value self._queue = Node() self._table = {} def get(self, key): '''Bumps the node for the specified key, returning the stored value.''' try: node = self._table[key] self._queue.splice_right(node) return node.value except KeyError: return self._default_value def put(self, key, value): '''Inserts or updates (and bumps) the key with the specified value.''' try: node = self._table[key] node.value = value self._queue.splice_right(node) except KeyError: if self._length == self._capacity: # reuse the lowest-priority node node = self._queue.left del self._table[node.key] self._table[key] = node self._queue.splice_right(node) node.key = key node.value = value else: # add a new node assert self._length < self._capacity self._length += 1 self._table[key] = Node(self._queue, key, value) # Your LRUCache object will be instantiated and called as such: # obj = LRUCache(capacity) # param_1 = obj.get(key) # obj.put(key,value)
main/lru-cache-lc/lru-cache-lc.py
class Node(object): '''A sentinel node or data node of a circular doubly-linked list (CDLL).''' def __init__(self, sentinel=None, key=None, value=None): '''Constructs a node and places it in a new or existing CDLL.''' if sentinel is None: # This is a new CDLL's sentinel node. assert key is None and value is None self.key = self.value = None self.left = self.right = self else: # This is a new data node at the front of an existing CDLL. assert key is not None and value is not None self.key = key self.value = value sentinel._splice_detached_right(self) def splice_right(self, other): '''Inserts other right of self, after relinking other's neighbors.''' other.left.right = other.right other.right.left = other.left self._splice_detached_right(other) def _splice_detached_right(self, other): '''Inserts other right of self. Assumes other is not already linked.''' other.left = self other.right = self.right self.right.left = other self.right = other class LRUCache(object): '''A least-recently-used cache, storing keys and associated values.''' def __init__(self, capacity, default_value=-1): '''Initializes the cache.''' if capacity <= 0: raise ValueError('capacity must be positive') self._capacity = capacity self._length = 0 self._default_value = default_value self._queue = Node() self._table = {} def get(self, key): '''Bumps the node for the specified key, returning the stored value.''' try: node = self._table[key] self._queue.splice_right(node) return node.value except KeyError: return self._default_value def put(self, key, value): '''Inserts or updates (and bumps) the key with the specified value.''' try: node = self._table[key] node.value = value self._queue.splice_right(node) except KeyError: if self._length == self._capacity: # reuse the lowest-priority node node = self._queue.left del self._table[node.key] self._table[key] = node self._queue.splice_right(node) node.key = key node.value = value else: # add a new node assert self._length < self._capacity self._length += 1 self._table[key] = Node(self._queue, key, value) # Your LRUCache object will be instantiated and called as such: # obj = LRUCache(capacity) # param_1 = obj.get(key) # obj.put(key,value)
0.724481
0.356447
from .network import Network import tensorflow as tf class VisualContext(Network): """Visual context feature fusion.""" def _interpolate(self, xy1, xy2, points2): batch_size = tf.shape(xy1)[0] ndataset1 = tf.shape(xy1)[1] eps = 1e-6 dist_mat = tf.matmul(xy1, xy2, transpose_b=True) norm1 = tf.reduce_sum(xy1 * xy1, axis=-1, keepdims=True) norm2 = tf.reduce_sum(xy2 * xy2, axis=-1, keepdims=True) dist_mat = tf.sqrt(norm1 - 2 * dist_mat + tf.linalg.matrix_transpose(norm2) + eps) dist, idx = tf.math.top_k(tf.negative(dist_mat), k=3) dist = tf.maximum(dist, 1e-10) norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True) norm = tf.tile(norm, [1, 1, 3]) weight = (1.0 / dist) / norm idx = tf.reshape(idx, (batch_size, -1)) nn_points = tf.batch_gather(points2, idx) nn_points = tf.reshape(nn_points, (batch_size, ndataset1, 3, points2.get_shape()[-1].value)) interpolated_points = tf.reduce_sum(weight[..., tf.newaxis] * nn_points, axis=-2) return interpolated_points def _mlp(module, inputs, mlp, last_relu=True, reuse=False, name='conv_%d'): new_points = tf.expand_dims(inputs, axis=2) for i, num_out_channel in enumerate(mlp): if (i == len(mlp) - 1) and not last_relu: new_points = module.conv_bn(new_points, 1, num_out_channel, 1, padding='VALID', relu=False, reuse=reuse, name=name % (i)) else: new_points = module.conv_bn(new_points, 1, num_out_channel, 1, padding='VALID', reuse=reuse, name=name % (i)) new_points = tf.squeeze(new_points, axis=2) return new_points def setup(self): grid_pts = self.inputs['grid_pts'] local_feat = self.inputs['local_feat'] vis_context_feat = self.inputs['img_feat'] kpt_param = self.inputs['kpt_param'] batch_size = tf.shape(vis_context_feat)[0] img_feat_dim = vis_context_feat.get_shape()[-1].value out_vis_context = None (self.feed('img_feat') .reshape((batch_size, -1, 1, img_feat_dim)) .conv(1, 1024, 1, relu=False, name='conv1') .context_normalization(name='conv1_cn') .batch_normalization(relu=True, name='conv1_bn') .conv(1, 512, 1, relu=False, name='conv2') .context_normalization(name='conv2_cn') .batch_normalization(relu=True, name='conv2_bn') .squeeze(axis=2, name='squeeze')) trans_vis_context_feat = self.layers['squeeze'] inter_vis_context_feat = self._interpolate( kpt_param, tf.reshape(grid_pts, (batch_size, -1, 2)), trans_vis_context_feat) fused_feat = tf.concat((inter_vis_context_feat, local_feat), axis=-1) out_vis_context = self._mlp(fused_feat, [512, 256, 128], name='fuse_photo_context_%d') self.terminals.append(out_vis_context) class MatchabilityPrediction(Network): """Matchability prediction.""" def setup(self): (self.feed('data') .conv_bn(8, 128, 1, padding='VALID', name='kpt_m_conv0') .conv_bn(1, 32, 1, padding='VALID', name='kpt_m_conv1') .conv_bn(1, 32, 1, padding='VALID', name='kpt_m_conv2') .conv(1, 1, 1, biased=True, relu=False, padding='VALID', name='kpt_m') .fc(1, biased=True, relu=False, flatten=False) .tanh(name='kpt_m_rescale')) class LightContextNormalization(Network): """Context normalization definition.""" def setup(self): (self.feed('points') .conv(1, 128, 1, relu=False, name='dim_control') .context_normalization(name='cn1_cn1') .batch_normalization(relu=True, name='cn1_bn1') .conv(1, 128, 1, relu=False, name='cn1_conv1')) (self.feed('dim_control', 'cn1_conv1') .add(name='res1')) (self.feed('res1') .context_normalization(name='cn2_cn1') .batch_normalization(relu=True, name='cn2_bn1') .conv(1, 128, 1, relu=False, name='cn2_conv1')) (self.feed('res1', 'cn2_conv1') .add(name='res2')) (self.feed('res2') .context_normalization(name='cn3_cn1') .batch_normalization(relu=True, name='cn3_bn1') .conv(1, 128, 1, relu=False, name='cn3_conv1')) (self.feed('res2', 'cn3_conv1') .add(name='res3')) (self.feed('res3') .context_normalization(name='cn4_cn1') .batch_normalization(relu=True, name='cn4_bn1') .conv(1, 128, 1, relu=False, name='cn4_conv1')) (self.feed('res3', 'cn4_conv1') .add(name='res4') .conv(1, 128, 1, relu=False, name='context_trans') .squeeze(axis=2, name='context_feat'))
pyslam/thirdparty/contextdesc/models/cnn_wrapper/augdesc.py
from .network import Network import tensorflow as tf class VisualContext(Network): """Visual context feature fusion.""" def _interpolate(self, xy1, xy2, points2): batch_size = tf.shape(xy1)[0] ndataset1 = tf.shape(xy1)[1] eps = 1e-6 dist_mat = tf.matmul(xy1, xy2, transpose_b=True) norm1 = tf.reduce_sum(xy1 * xy1, axis=-1, keepdims=True) norm2 = tf.reduce_sum(xy2 * xy2, axis=-1, keepdims=True) dist_mat = tf.sqrt(norm1 - 2 * dist_mat + tf.linalg.matrix_transpose(norm2) + eps) dist, idx = tf.math.top_k(tf.negative(dist_mat), k=3) dist = tf.maximum(dist, 1e-10) norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True) norm = tf.tile(norm, [1, 1, 3]) weight = (1.0 / dist) / norm idx = tf.reshape(idx, (batch_size, -1)) nn_points = tf.batch_gather(points2, idx) nn_points = tf.reshape(nn_points, (batch_size, ndataset1, 3, points2.get_shape()[-1].value)) interpolated_points = tf.reduce_sum(weight[..., tf.newaxis] * nn_points, axis=-2) return interpolated_points def _mlp(module, inputs, mlp, last_relu=True, reuse=False, name='conv_%d'): new_points = tf.expand_dims(inputs, axis=2) for i, num_out_channel in enumerate(mlp): if (i == len(mlp) - 1) and not last_relu: new_points = module.conv_bn(new_points, 1, num_out_channel, 1, padding='VALID', relu=False, reuse=reuse, name=name % (i)) else: new_points = module.conv_bn(new_points, 1, num_out_channel, 1, padding='VALID', reuse=reuse, name=name % (i)) new_points = tf.squeeze(new_points, axis=2) return new_points def setup(self): grid_pts = self.inputs['grid_pts'] local_feat = self.inputs['local_feat'] vis_context_feat = self.inputs['img_feat'] kpt_param = self.inputs['kpt_param'] batch_size = tf.shape(vis_context_feat)[0] img_feat_dim = vis_context_feat.get_shape()[-1].value out_vis_context = None (self.feed('img_feat') .reshape((batch_size, -1, 1, img_feat_dim)) .conv(1, 1024, 1, relu=False, name='conv1') .context_normalization(name='conv1_cn') .batch_normalization(relu=True, name='conv1_bn') .conv(1, 512, 1, relu=False, name='conv2') .context_normalization(name='conv2_cn') .batch_normalization(relu=True, name='conv2_bn') .squeeze(axis=2, name='squeeze')) trans_vis_context_feat = self.layers['squeeze'] inter_vis_context_feat = self._interpolate( kpt_param, tf.reshape(grid_pts, (batch_size, -1, 2)), trans_vis_context_feat) fused_feat = tf.concat((inter_vis_context_feat, local_feat), axis=-1) out_vis_context = self._mlp(fused_feat, [512, 256, 128], name='fuse_photo_context_%d') self.terminals.append(out_vis_context) class MatchabilityPrediction(Network): """Matchability prediction.""" def setup(self): (self.feed('data') .conv_bn(8, 128, 1, padding='VALID', name='kpt_m_conv0') .conv_bn(1, 32, 1, padding='VALID', name='kpt_m_conv1') .conv_bn(1, 32, 1, padding='VALID', name='kpt_m_conv2') .conv(1, 1, 1, biased=True, relu=False, padding='VALID', name='kpt_m') .fc(1, biased=True, relu=False, flatten=False) .tanh(name='kpt_m_rescale')) class LightContextNormalization(Network): """Context normalization definition.""" def setup(self): (self.feed('points') .conv(1, 128, 1, relu=False, name='dim_control') .context_normalization(name='cn1_cn1') .batch_normalization(relu=True, name='cn1_bn1') .conv(1, 128, 1, relu=False, name='cn1_conv1')) (self.feed('dim_control', 'cn1_conv1') .add(name='res1')) (self.feed('res1') .context_normalization(name='cn2_cn1') .batch_normalization(relu=True, name='cn2_bn1') .conv(1, 128, 1, relu=False, name='cn2_conv1')) (self.feed('res1', 'cn2_conv1') .add(name='res2')) (self.feed('res2') .context_normalization(name='cn3_cn1') .batch_normalization(relu=True, name='cn3_bn1') .conv(1, 128, 1, relu=False, name='cn3_conv1')) (self.feed('res2', 'cn3_conv1') .add(name='res3')) (self.feed('res3') .context_normalization(name='cn4_cn1') .batch_normalization(relu=True, name='cn4_bn1') .conv(1, 128, 1, relu=False, name='cn4_conv1')) (self.feed('res3', 'cn4_conv1') .add(name='res4') .conv(1, 128, 1, relu=False, name='context_trans') .squeeze(axis=2, name='context_feat'))
0.829871
0.376165
from parameterized import parameterized import unittest from lib import coordinate_calculator as cc class TransformTest(unittest.TestCase): def test_transform(self): origin_val = (1999, 30) origin_pos = (40, 200) scale = (10, 3) data_list = [(1999.1, 33), (2000.4, 44), (2001.8, 56), (2002.8, 79), (2003.9, 100.2), (2005.2, 82.1)] expected_transformed_list = [] for data, expected in zip(data_list, expected_transformed_list): self.assertEqual( cc.transform(data, origin_val, origin_pos, scale), expected) class ComputeTest(unittest.TestCase): def test_calculate_coordinate_one_line(self): input_data = [[('1999', 21), ('2000', 31), ('2001', 45), ('2002', 11), ('2003', 52), ('2004', 78), ('2005', 132), ('2006', 69), ('2007', 89), ('2008', 98), ('2009', 100), ('2010', 121)]] expected_output_data = [[(125.0, 176.2), (145.0, 164.9), (165.0, 149.0), (185.0, 187.5), (205.0, 141.1), (225.0, 111.6), (245.0, 50.4), (265.0, 121.8), (285.0, 99.13), (305.0, 88.93), (325.0, 86.67), (345.0, 62.87)]] expected_x_ticks = [(45.0, 200.0, '1995'), (145.0, 200.0, '2000'), (245.0, 200.0, '2005'), (345.0, 200.0, '2010')] expected_y_ticks = [(30.0, 200.0, '0'), (30.0, 143.3, '50'), (30.0, 86.67, '100'), (30.0, 30.0, '150')] data, xticks, yticks = cc.compute(input_data, (30, 360), (30, 200)) self.assertEqual(data, expected_output_data) self.assertEqual(xticks, expected_x_ticks) self.assertEqual(yticks, expected_y_ticks) def test_calculate_coordinate_two_lines(self): input_data = [[('1999', 21), ('2000', 31), ('2001', 45), ('2002', 11), ('2003', 52)], [('2002', 31), ('2003', 98), ('2004', 78), ('2009', 100), ('2010', 121)]] expected_output_data = [[(125.0, 176.2), (145.0, 164.9), (165.0, 149.0), (185.0, 187.5), (205.0, 141.1)], [(185.0, 164.9), (205.0, 88.93), (225.0, 111.6), (325.0, 86.67), (345.0, 62.87)]] expected_x_ticks = [(45.0, 200.0, '1995'), (145.0, 200.0, '2000'), (245.0, 200.0, '2005'), (345.0, 200.0, '2010')] expected_y_ticks = [(30.0, 200.0, '0'), (30.0, 143.3, '50'), (30.0, 86.67, '100'), (30.0, 30.0, '150')] data, xticks, yticks = cc.compute(input_data, (30, 360), (30, 200)) self.assertEqual(data, expected_output_data) self.assertEqual(xticks, expected_x_ticks) self.assertEqual(yticks, expected_y_ticks) class ComputeYTickTest(unittest.TestCase): @parameterized.expand([ ('1e-4', 0.0004, [0, 1e-4, 2e-4, 3e-4, 4e-4, 5e-4], ['0', '100e-6', '200e-6', '300e-6', '400e-6', '500e-6']), ('1e-3', 0.004, [0, 1e-3, 2e-3, 3e-3, 4e-3, 5e-3 ], ['0', '0.001', '0.002', '0.003', '0.004', '0.005']), ('1e-2', 0.04, [0, 0.01, 0.02, 0.03, 0.04, 0.05 ], ['0', '0.01', '0.02', '0.03', '0.04', '0.05']), ('1e-1', 0.4, [0, 0.1, 0.2, 0.3, 0.4, 0.5 ], ['0', '0.1', '0.2', '0.3', '0.4', '0.5']), ('single', 4, [0, 1, 2, 3, 4, 5], ['0', '1', '2', '3', '4', '5']), ('thousand_1', 2010, [0, 500, 1000, 1500, 2000, 2500 ], ['0', '0.5K', '1.0K', '1.5K', '2.0K', '2.5K']), ('thousand_2', 3203, [0, 1000, 2000, 3000, 4000 ], ['0', '1K', '2K', '3K', '4K']), ('thousand_3', 5203, [0, 2000, 4000, 6000], ['0', '2K', '4K', '6K']), ('thousand_4', 15001, [0, 5000, 10000, 15000, 20000 ], ['0', '5K', '10K', '15K', '20K']), ('thousand_5', 62032, [0, 20000, 40000, 60000, 80000 ], ['0', '20K', '40K', '60K', '80K']), ('million_1', 1.8e6, [0, 0.5e6, 1e6, 1.5e6, 2e6 ], ['0', '0.5M', '1.0M', '1.5M', '2.0M']), ('billion_1', 1.8e9, [0, 0.5e9, 1e9, 1.5e9, 2e9 ], ['0', '0.5B', '1.0B', '1.5B', '2.0B']), ('trillion_1', 1.8e12, [0, 0.5e12, 1e12, 1.5e12, 2e12 ], ['0', '0.5T', '1.0T', '1.5T', '2.0T']), ('quadrillion_1', 1.8e15, [0, 0.5e15, 1e15, 1.5e15, 2e15 ], ['0', '0.5Q', '1.0Q', '1.5Q', '2.0Q']) ]) def test_positive_y_tick(self, name, y, expected_value, expected_text): res = cc._compute_y_ticks(y / 2.0, y) res_val = [x[0] for x in res] res_text = [x[1] for x in res] for a, b in zip(res_val, expected_value): self.assertAlmostEqual(a, b) self.assertEqual(res_text, expected_text) @parameterized.expand([ ('cross', -4, 3, [-4, -2, 0, 2, 4], ['-4', '-2', '0', '2', '4']), ('single', -4, -2, [-4, -3, -2, -1, 0], ['-4', '-3', '-2', '-1', '0']), ('1e-1', -0.39, -0.02, [-0.4, -0.3, -0.2, -0.1, 0 ], ['-0.4', '-0.3', '-0.2', '-0.1', '0']) ]) def test_negative_y_tick(self, name, y_min, y_max, expected_value, expected_text): res = cc._compute_y_ticks(y_min, y_max) res_val = [x[0] for x in res] res_text = [x[1] for x in res] for a, b in zip(res_val, expected_value): self.assertAlmostEqual(a, b) self.assertEqual(res_text, expected_text) def test_invalid_y(self): with self.assertRaises(ValueError): cc._compute_y_ticks(0, 1e20) class ComputeXTickTest(unittest.TestCase): @parameterized.expand([ ('fraction', (2013.1, 2015.1, 2017.7), [(2013, '2013'), (2015, '2015'), (2017, '2017'), (2018, '2018')]), ('test1', range(2011, 2017), [(2011, '2011'), (2013, '2013'), (2015, '2015'), (2016, '2016')]), ('test2', range(1998, 2015), [(1995, '1995'), (2000, '2000'), (2005, '2005'), (2010, '2010'), (2014, '2014')]), ('test3', range(1998, 2017), [(1995, '1995'), (2000, '2000'), (2005, '2005'), (2010, '2010'), (2015, '2015'), (2016, '2016')]), ('test3_day', [1997.313, 2016], [(1995, '1995'), (2000, '2000'), (2005, '2005'), (2010, '2010'), (2015, '2015'), (2016, '2016')]), # Very much under 5/12 threshold ('test_m1', [2002.1, 2002.12, 2002.13], [(2002.0876712328768, 'Feb 2002'), (2002.164383561644, 'Mar 2002')] ), # Just under 5/12 threshold ('test_m2', [2002.1, 2002.12, 2002.5], [(2002.0876712328768, 'Feb 2002'), (2002.164383561644, 'Mar 2002'), (2002.2493150684932, 'Apr 2002'), (2002.331506849315, 'May 2002'), (2002.4164383561645, 'Jun 2002'), (2002.4986301369863, 'Jul 2002'), (2002.5835616438355, 'Aug 2002')] ), # Just over 5/12 threshold ('test_b1', [2000.01, 2000.4, 2000.45], [(2000.0027322404371, 'Jan 2000'), (2000.1666666666667, 'Mar 2000'), (2000.3333333333333, 'May 2000'), (2000.5, 'Jul 2000')]), # Just under 11/12 threshold ('test_b2', [2000.01, 2000.4, 2000.9], [(2000.0027322404371, 'Jan 2000'), (2000.1666666666667, 'Mar 2000'), (2000.3333333333333, 'May 2000'), (2000.5, 'Jul 2000'), (2000.6693989071039, 'Sep 2000'), (2000.8360655737704, 'Nov 2000'), (2001.0027397260274, 'Jan 2001')] ), # Just over 11/12 threshold ('test_q1', [2000.1, 2001.02], [(2000.0874316939892, 'Feb 2000'), (2000.3333333333333, 'May 2000'), (2000.5846994535518, 'Aug 2000'), (2000.8360655737704, 'Nov 2000'), (2001.0876712328768, 'Feb 2001')]), # Just under 17/12 threshold ('test_q2', [2000.1, 2000.7, 2001.5], [(2000.0874316939892, 'Feb 2000'), (2000.3333333333333, 'May 2000'), (2000.5846994535518, 'Aug 2000'), (2000.8360655737704, 'Nov 2000'), (2001.0876712328768, 'Feb 2001'), (2001.331506849315, 'May 2001'), (2001.5835616438355, 'Aug 2001')]), # Just over 17/12 threshold ('test_day_large', [2000.1, 2000.7, 2001.6], [(2000, '2000'), (2002, '2002')]) ]) def test_x_tick(self, name, x, expected): res = cc._compute_x_ticks(x) self.assertEqual(res, expected) if __name__ == '__main__': unittest.main()
server/tests/coordinate_calculator_test.py
from parameterized import parameterized import unittest from lib import coordinate_calculator as cc class TransformTest(unittest.TestCase): def test_transform(self): origin_val = (1999, 30) origin_pos = (40, 200) scale = (10, 3) data_list = [(1999.1, 33), (2000.4, 44), (2001.8, 56), (2002.8, 79), (2003.9, 100.2), (2005.2, 82.1)] expected_transformed_list = [] for data, expected in zip(data_list, expected_transformed_list): self.assertEqual( cc.transform(data, origin_val, origin_pos, scale), expected) class ComputeTest(unittest.TestCase): def test_calculate_coordinate_one_line(self): input_data = [[('1999', 21), ('2000', 31), ('2001', 45), ('2002', 11), ('2003', 52), ('2004', 78), ('2005', 132), ('2006', 69), ('2007', 89), ('2008', 98), ('2009', 100), ('2010', 121)]] expected_output_data = [[(125.0, 176.2), (145.0, 164.9), (165.0, 149.0), (185.0, 187.5), (205.0, 141.1), (225.0, 111.6), (245.0, 50.4), (265.0, 121.8), (285.0, 99.13), (305.0, 88.93), (325.0, 86.67), (345.0, 62.87)]] expected_x_ticks = [(45.0, 200.0, '1995'), (145.0, 200.0, '2000'), (245.0, 200.0, '2005'), (345.0, 200.0, '2010')] expected_y_ticks = [(30.0, 200.0, '0'), (30.0, 143.3, '50'), (30.0, 86.67, '100'), (30.0, 30.0, '150')] data, xticks, yticks = cc.compute(input_data, (30, 360), (30, 200)) self.assertEqual(data, expected_output_data) self.assertEqual(xticks, expected_x_ticks) self.assertEqual(yticks, expected_y_ticks) def test_calculate_coordinate_two_lines(self): input_data = [[('1999', 21), ('2000', 31), ('2001', 45), ('2002', 11), ('2003', 52)], [('2002', 31), ('2003', 98), ('2004', 78), ('2009', 100), ('2010', 121)]] expected_output_data = [[(125.0, 176.2), (145.0, 164.9), (165.0, 149.0), (185.0, 187.5), (205.0, 141.1)], [(185.0, 164.9), (205.0, 88.93), (225.0, 111.6), (325.0, 86.67), (345.0, 62.87)]] expected_x_ticks = [(45.0, 200.0, '1995'), (145.0, 200.0, '2000'), (245.0, 200.0, '2005'), (345.0, 200.0, '2010')] expected_y_ticks = [(30.0, 200.0, '0'), (30.0, 143.3, '50'), (30.0, 86.67, '100'), (30.0, 30.0, '150')] data, xticks, yticks = cc.compute(input_data, (30, 360), (30, 200)) self.assertEqual(data, expected_output_data) self.assertEqual(xticks, expected_x_ticks) self.assertEqual(yticks, expected_y_ticks) class ComputeYTickTest(unittest.TestCase): @parameterized.expand([ ('1e-4', 0.0004, [0, 1e-4, 2e-4, 3e-4, 4e-4, 5e-4], ['0', '100e-6', '200e-6', '300e-6', '400e-6', '500e-6']), ('1e-3', 0.004, [0, 1e-3, 2e-3, 3e-3, 4e-3, 5e-3 ], ['0', '0.001', '0.002', '0.003', '0.004', '0.005']), ('1e-2', 0.04, [0, 0.01, 0.02, 0.03, 0.04, 0.05 ], ['0', '0.01', '0.02', '0.03', '0.04', '0.05']), ('1e-1', 0.4, [0, 0.1, 0.2, 0.3, 0.4, 0.5 ], ['0', '0.1', '0.2', '0.3', '0.4', '0.5']), ('single', 4, [0, 1, 2, 3, 4, 5], ['0', '1', '2', '3', '4', '5']), ('thousand_1', 2010, [0, 500, 1000, 1500, 2000, 2500 ], ['0', '0.5K', '1.0K', '1.5K', '2.0K', '2.5K']), ('thousand_2', 3203, [0, 1000, 2000, 3000, 4000 ], ['0', '1K', '2K', '3K', '4K']), ('thousand_3', 5203, [0, 2000, 4000, 6000], ['0', '2K', '4K', '6K']), ('thousand_4', 15001, [0, 5000, 10000, 15000, 20000 ], ['0', '5K', '10K', '15K', '20K']), ('thousand_5', 62032, [0, 20000, 40000, 60000, 80000 ], ['0', '20K', '40K', '60K', '80K']), ('million_1', 1.8e6, [0, 0.5e6, 1e6, 1.5e6, 2e6 ], ['0', '0.5M', '1.0M', '1.5M', '2.0M']), ('billion_1', 1.8e9, [0, 0.5e9, 1e9, 1.5e9, 2e9 ], ['0', '0.5B', '1.0B', '1.5B', '2.0B']), ('trillion_1', 1.8e12, [0, 0.5e12, 1e12, 1.5e12, 2e12 ], ['0', '0.5T', '1.0T', '1.5T', '2.0T']), ('quadrillion_1', 1.8e15, [0, 0.5e15, 1e15, 1.5e15, 2e15 ], ['0', '0.5Q', '1.0Q', '1.5Q', '2.0Q']) ]) def test_positive_y_tick(self, name, y, expected_value, expected_text): res = cc._compute_y_ticks(y / 2.0, y) res_val = [x[0] for x in res] res_text = [x[1] for x in res] for a, b in zip(res_val, expected_value): self.assertAlmostEqual(a, b) self.assertEqual(res_text, expected_text) @parameterized.expand([ ('cross', -4, 3, [-4, -2, 0, 2, 4], ['-4', '-2', '0', '2', '4']), ('single', -4, -2, [-4, -3, -2, -1, 0], ['-4', '-3', '-2', '-1', '0']), ('1e-1', -0.39, -0.02, [-0.4, -0.3, -0.2, -0.1, 0 ], ['-0.4', '-0.3', '-0.2', '-0.1', '0']) ]) def test_negative_y_tick(self, name, y_min, y_max, expected_value, expected_text): res = cc._compute_y_ticks(y_min, y_max) res_val = [x[0] for x in res] res_text = [x[1] for x in res] for a, b in zip(res_val, expected_value): self.assertAlmostEqual(a, b) self.assertEqual(res_text, expected_text) def test_invalid_y(self): with self.assertRaises(ValueError): cc._compute_y_ticks(0, 1e20) class ComputeXTickTest(unittest.TestCase): @parameterized.expand([ ('fraction', (2013.1, 2015.1, 2017.7), [(2013, '2013'), (2015, '2015'), (2017, '2017'), (2018, '2018')]), ('test1', range(2011, 2017), [(2011, '2011'), (2013, '2013'), (2015, '2015'), (2016, '2016')]), ('test2', range(1998, 2015), [(1995, '1995'), (2000, '2000'), (2005, '2005'), (2010, '2010'), (2014, '2014')]), ('test3', range(1998, 2017), [(1995, '1995'), (2000, '2000'), (2005, '2005'), (2010, '2010'), (2015, '2015'), (2016, '2016')]), ('test3_day', [1997.313, 2016], [(1995, '1995'), (2000, '2000'), (2005, '2005'), (2010, '2010'), (2015, '2015'), (2016, '2016')]), # Very much under 5/12 threshold ('test_m1', [2002.1, 2002.12, 2002.13], [(2002.0876712328768, 'Feb 2002'), (2002.164383561644, 'Mar 2002')] ), # Just under 5/12 threshold ('test_m2', [2002.1, 2002.12, 2002.5], [(2002.0876712328768, 'Feb 2002'), (2002.164383561644, 'Mar 2002'), (2002.2493150684932, 'Apr 2002'), (2002.331506849315, 'May 2002'), (2002.4164383561645, 'Jun 2002'), (2002.4986301369863, 'Jul 2002'), (2002.5835616438355, 'Aug 2002')] ), # Just over 5/12 threshold ('test_b1', [2000.01, 2000.4, 2000.45], [(2000.0027322404371, 'Jan 2000'), (2000.1666666666667, 'Mar 2000'), (2000.3333333333333, 'May 2000'), (2000.5, 'Jul 2000')]), # Just under 11/12 threshold ('test_b2', [2000.01, 2000.4, 2000.9], [(2000.0027322404371, 'Jan 2000'), (2000.1666666666667, 'Mar 2000'), (2000.3333333333333, 'May 2000'), (2000.5, 'Jul 2000'), (2000.6693989071039, 'Sep 2000'), (2000.8360655737704, 'Nov 2000'), (2001.0027397260274, 'Jan 2001')] ), # Just over 11/12 threshold ('test_q1', [2000.1, 2001.02], [(2000.0874316939892, 'Feb 2000'), (2000.3333333333333, 'May 2000'), (2000.5846994535518, 'Aug 2000'), (2000.8360655737704, 'Nov 2000'), (2001.0876712328768, 'Feb 2001')]), # Just under 17/12 threshold ('test_q2', [2000.1, 2000.7, 2001.5], [(2000.0874316939892, 'Feb 2000'), (2000.3333333333333, 'May 2000'), (2000.5846994535518, 'Aug 2000'), (2000.8360655737704, 'Nov 2000'), (2001.0876712328768, 'Feb 2001'), (2001.331506849315, 'May 2001'), (2001.5835616438355, 'Aug 2001')]), # Just over 17/12 threshold ('test_day_large', [2000.1, 2000.7, 2001.6], [(2000, '2000'), (2002, '2002')]) ]) def test_x_tick(self, name, x, expected): res = cc._compute_x_ticks(x) self.assertEqual(res, expected) if __name__ == '__main__': unittest.main()
0.658527
0.608391
import sys import re from optparse import OptionParser import subprocess import os import time #modify /Lustre02/data/hic/highres_human/allsamples/'''+mytissue+'''_forsort/allcisandtrans.hic for your specific case #modify alltissues=['A1','A2','A3','A4'] for your specific case class generate: def __init__ (self,path='/Lustre01/tangqianzi/forJinOrth/expresion_level_coreAtlas/',mytype='lncRNA',mysample='BC1'): self.path = path self.input1 = open(path+'/TSS_'+mytype+'.annotate.txt','r') self.input2 = open('/Lustre01/tangqianzi/forJinOrth/expresion_level_coreAtlas/ULB_TADfiles/'+mysample+'/total.combined.domaincalls','r') self.input3 = open(path+'/TSS_'+mytype+'.txt','r') self.output = open(path+'/forplot_'+mytype+'_'+mysample+'.txt','w') ## self.input2 = open(path+'/spearman1_'+mytype+'.txt','r') self.maxchr = '18' self.mytype = mytype self.cutoff = '0.5' def generate (self): gene2TSS={} mychroms=[] allTADs={} for i in range(1,int(self.maxchr)+1): mychroms.append(str(i)) allTADs[str(i)]=[] gene2TSS[str(i)]={} for line in self.input3: line=line.rstrip() parts=line.split('\t') if parts[0] in mychroms: gene2TSS[parts[0]][parts[2]]=int(parts[1]) for line in self.input2: line=line.rstrip() parts=line.split('\t') if parts[0] in mychroms: allTADs[parts[0]].append([int(parts[1]),int(parts[2])]) allpairs_res={} allpairs={} for i in range(0,6): #all>0.5, all, all>0.5 withinTAD, all withinTAD allpairs_res[str(i)]=[0,0,0,0] ## allpairs[str(i)]={} for mychr in mychroms: allpairs[mychr]={} for line in self.input1: line=line.rstrip() parts=line.split('\t') pairnames=[parts[0],parts[1]] pairnames.sort() allpairs[parts[3]][pairnames[0]+'-'+pairnames[1]]=parts[2] for mychr in mychroms: handle=open(self.path+'/spearman1_'+self.mytype+'_'+mychr+'.txt','r') m=0 ## allkeep={} for line in handle: line=line.rstrip() parts=line.split('\t') if m==0: allnames=parts else: for n in range(1,len(parts)): name1=parts[0] name2=allnames[n-1] if name1==name2: continue pairnames=[name1,name2] pairnames.sort() name1=pairnames[0] name2=pairnames[1] if pairnames[0]+'-'+pairnames[1] not in allpairs[mychr]: continue mytype=allpairs[mychr][pairnames[0]+'-'+pairnames[1]] if float(parts[n])>float(self.cutoff): allpairs_res[mytype][0]+=1 allpairs_res[mytype][1]+=1 gene1pos=gene2TSS[mychr][name1] gene2pos=gene2TSS[mychr][name2] sigsameTAD=0 for eachparts in allTADs[mychr]: start=eachparts[0] end=eachparts[1] if gene1pos>start and gene1pos<end and gene2pos>start and gene2pos<end: sigsameTAD=1 break if sigsameTAD==1: if float(parts[n])>float(self.cutoff): allpairs_res[mytype][2]+=1 allpairs_res[mytype][3]+=1 m+=1 handle.close() for i in range(0,6): ratio1=float(allpairs_res[str(i)][2])/allpairs_res[str(i)][0] ratio2=float(allpairs_res[str(i)][3])/allpairs_res[str(i)][1] print>>self.output,str(i)+'\t'+str(allpairs_res[str(i)][0])+'\t'+str(allpairs_res[str(i)][1])+'\t'+str(allpairs_res[str(i)][2])+'\t'+str(allpairs_res[str(i)][3])+'\t'+str(ratio1)+'\t'+str(ratio2) self.input1.close() self.input2.close() self.input3.close() self.output.close() def main(): usage = "usage: %prog [options] <pathandfiles>" description = "Generate jobs." optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False) optparser.add_option("-h","--help",action="help",help="Show this help message and exit.") (options,pathandfiles) = optparser.parse_args() generate(mytype=pathandfiles[0],mysample=pathandfiles[1]).generate() if __name__ == '__main__': try: main() except KeyboardInterrupt: sys.stderr.write("User interrupt me! ;-) See you!\n") sys.exit(0)
code/06.prepare_forfinalplot.py
import sys import re from optparse import OptionParser import subprocess import os import time #modify /Lustre02/data/hic/highres_human/allsamples/'''+mytissue+'''_forsort/allcisandtrans.hic for your specific case #modify alltissues=['A1','A2','A3','A4'] for your specific case class generate: def __init__ (self,path='/Lustre01/tangqianzi/forJinOrth/expresion_level_coreAtlas/',mytype='lncRNA',mysample='BC1'): self.path = path self.input1 = open(path+'/TSS_'+mytype+'.annotate.txt','r') self.input2 = open('/Lustre01/tangqianzi/forJinOrth/expresion_level_coreAtlas/ULB_TADfiles/'+mysample+'/total.combined.domaincalls','r') self.input3 = open(path+'/TSS_'+mytype+'.txt','r') self.output = open(path+'/forplot_'+mytype+'_'+mysample+'.txt','w') ## self.input2 = open(path+'/spearman1_'+mytype+'.txt','r') self.maxchr = '18' self.mytype = mytype self.cutoff = '0.5' def generate (self): gene2TSS={} mychroms=[] allTADs={} for i in range(1,int(self.maxchr)+1): mychroms.append(str(i)) allTADs[str(i)]=[] gene2TSS[str(i)]={} for line in self.input3: line=line.rstrip() parts=line.split('\t') if parts[0] in mychroms: gene2TSS[parts[0]][parts[2]]=int(parts[1]) for line in self.input2: line=line.rstrip() parts=line.split('\t') if parts[0] in mychroms: allTADs[parts[0]].append([int(parts[1]),int(parts[2])]) allpairs_res={} allpairs={} for i in range(0,6): #all>0.5, all, all>0.5 withinTAD, all withinTAD allpairs_res[str(i)]=[0,0,0,0] ## allpairs[str(i)]={} for mychr in mychroms: allpairs[mychr]={} for line in self.input1: line=line.rstrip() parts=line.split('\t') pairnames=[parts[0],parts[1]] pairnames.sort() allpairs[parts[3]][pairnames[0]+'-'+pairnames[1]]=parts[2] for mychr in mychroms: handle=open(self.path+'/spearman1_'+self.mytype+'_'+mychr+'.txt','r') m=0 ## allkeep={} for line in handle: line=line.rstrip() parts=line.split('\t') if m==0: allnames=parts else: for n in range(1,len(parts)): name1=parts[0] name2=allnames[n-1] if name1==name2: continue pairnames=[name1,name2] pairnames.sort() name1=pairnames[0] name2=pairnames[1] if pairnames[0]+'-'+pairnames[1] not in allpairs[mychr]: continue mytype=allpairs[mychr][pairnames[0]+'-'+pairnames[1]] if float(parts[n])>float(self.cutoff): allpairs_res[mytype][0]+=1 allpairs_res[mytype][1]+=1 gene1pos=gene2TSS[mychr][name1] gene2pos=gene2TSS[mychr][name2] sigsameTAD=0 for eachparts in allTADs[mychr]: start=eachparts[0] end=eachparts[1] if gene1pos>start and gene1pos<end and gene2pos>start and gene2pos<end: sigsameTAD=1 break if sigsameTAD==1: if float(parts[n])>float(self.cutoff): allpairs_res[mytype][2]+=1 allpairs_res[mytype][3]+=1 m+=1 handle.close() for i in range(0,6): ratio1=float(allpairs_res[str(i)][2])/allpairs_res[str(i)][0] ratio2=float(allpairs_res[str(i)][3])/allpairs_res[str(i)][1] print>>self.output,str(i)+'\t'+str(allpairs_res[str(i)][0])+'\t'+str(allpairs_res[str(i)][1])+'\t'+str(allpairs_res[str(i)][2])+'\t'+str(allpairs_res[str(i)][3])+'\t'+str(ratio1)+'\t'+str(ratio2) self.input1.close() self.input2.close() self.input3.close() self.output.close() def main(): usage = "usage: %prog [options] <pathandfiles>" description = "Generate jobs." optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False) optparser.add_option("-h","--help",action="help",help="Show this help message and exit.") (options,pathandfiles) = optparser.parse_args() generate(mytype=pathandfiles[0],mysample=pathandfiles[1]).generate() if __name__ == '__main__': try: main() except KeyboardInterrupt: sys.stderr.write("User interrupt me! ;-) See you!\n") sys.exit(0)
0.05572
0.161122
import openmdao.api as om import numpy as np class TireODE(om.ExplicitComponent): def initialize(self): self.options.declare('num_nodes', types=int) def setup(self): nn = self.options['num_nodes'] #constants self.add_input('M', val=0.0, desc='mass', units='kg') self.add_input('g', val=9.8, desc='mass', units='m/s**2') #N self.add_input('a', val=1.8, desc='cg to front distance', units='m') self.add_input('b', val=1.6, desc='cg to rear distance', units='m') self.add_input('tw', val=0.73, desc='half track width', units='m') self.add_input('beta', val=0.0, desc='braking bias', units=None) #val = 0.62 self.add_input('k_lambda', val=44.0, desc='tire lateral stiffness', units=None) #states self.add_input('V', val=np.zeros(nn), desc='speed', units='m/s') self.add_input('lambda', val=np.zeros(nn), desc='body slip angle', units='rad') self.add_input('omega', val=np.zeros(nn), desc='yaw rate', units='rad/s') #normal load inputs self.add_input('N_rr', val=np.zeros(nn), desc='normal load rr', units='N') self.add_input('N_fr', val=np.zeros(nn), desc='normal load fr', units='N') self.add_input('N_rl', val=np.zeros(nn), desc='normal load rl', units='N') self.add_input('N_fl', val=np.zeros(nn), desc='normal load fl', units='N') #tire load outputs self.add_output('S_fl', val=np.zeros(nn), desc='longitudinal force fl', units='N') self.add_output('S_fr', val=np.zeros(nn), desc='longitudinal force fr', units='N') self.add_output('S_rl', val=np.zeros(nn), desc='longitudinal force rl', units='N') self.add_output('S_rr', val=np.zeros(nn), desc='longitudinal force rr', units='N') self.add_output('F_fl', val=np.zeros(nn), desc='lateral force fl', units='N') self.add_output('F_fr', val=np.zeros(nn), desc='lateral force fr', units='N') self.add_output('F_rl', val=np.zeros(nn), desc='lateral force rl', units='N') self.add_output('F_rr', val=np.zeros(nn), desc='lateral force rr', units='N') #controls self.add_input('thrust', val=np.zeros(nn), desc='thrust', units=None) self.add_input('delta', val=np.zeros(nn), desc='steering angle', units='rad') # Setup partials arange = np.arange(self.options['num_nodes'], dtype=int) self.declare_partials(of='S_fl', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='S_fr', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='S_rl', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='S_rr', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='F_rr',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_rr',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_rr',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_rr',wrt='N_rr',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='N_fr',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='N_rl',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='N_fl',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='delta',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='delta',rows=arange,cols=arange) def compute(self, inputs, outputs): omega = inputs['omega'] V = inputs['V'] lamb = inputs['lambda'] M = inputs['M'] g = inputs['g'] a = inputs['a'] b = inputs['b'] tw = inputs['tw'] N_rr = inputs['N_rr'] N_rl = inputs['N_rl'] N_fr = inputs['N_fr'] N_fl = inputs['N_fl'] delta = inputs['delta'] beta = inputs['beta'] k_lambda = inputs['k_lambda'] thrust = inputs['thrust'] #split thrust signal into a throttle and brake signal signs = np.sign(thrust) signs2 = np.where(signs<1,0,1) brake = (signs2-1)*thrust #postive throttle = signs2*thrust outputs['S_fl'] = -(M*g/2)*brake*beta outputs['S_fr'] = -(M*g/2)*brake*beta outputs['S_rl'] = (M*g/2)*(throttle-brake*(1-beta)) outputs['S_rr'] = (M*g/2)*(throttle-brake*(1-beta)) outputs['F_rr'] = N_rr*k_lambda*(lamb+(omega*(b+lamb*tw))/V) outputs['F_rl'] = N_rl*k_lambda*(lamb+(omega*(b-lamb*tw))/V) outputs['F_fr'] = N_fr*k_lambda*(lamb+delta-(omega*(a-lamb*tw))/V) outputs['F_fl'] = N_fl*k_lambda*(lamb+delta-(omega*(a+lamb*tw))/V) def compute_partials(self, inputs, jacobian): omega = inputs['omega'] V = inputs['V'] lamb = inputs['lambda'] M = inputs['M'] g = inputs['g'] a = inputs['a'] b = inputs['b'] tw = inputs['tw'] N_rr = inputs['N_rr'] N_rl = inputs['N_rl'] N_fr = inputs['N_fr'] N_fl = inputs['N_fl'] delta = inputs['delta'] beta = inputs['beta'] k_lambda = inputs['k_lambda'] thrust = inputs['thrust'] signs = np.sign(thrust) signs2 = np.where(signs<1,0,1) #-1 where brake is active brake = (signs2-1) #positive throttle = signs2 jacobian['S_fl', 'thrust'] = -(M*g/2)*beta*brake jacobian['S_fr', 'thrust'] = -(M*g/2)*beta*brake jacobian['S_rl', 'thrust'] = -(M*g/2)*(1-beta)*brake+(M*g/2)*throttle jacobian['S_rr', 'thrust'] = -(M*g/2)*(1-beta)*brake+(M*g/2)*throttle jacobian['F_rr','N_rr'] = k_lambda*(lamb+(omega*(b+lamb*tw))/V) jacobian['F_rl','N_rl'] = k_lambda*(lamb+(omega*(b-lamb*tw))/V) jacobian['F_fr','N_fr'] = k_lambda*(lamb+delta-(omega*(a-lamb*tw))/V) jacobian['F_fl','N_fl'] = k_lambda*(lamb+delta-(omega*(a+lamb*tw))/V) jacobian['F_rr','lambda'] = N_rr*k_lambda*(1+(omega*tw/V)) jacobian['F_fr','lambda'] = N_fr*k_lambda*(1+(omega*tw/V)) jacobian['F_rl','lambda'] = N_rl*k_lambda*(1-(omega*tw/V)) jacobian['F_fl','lambda'] = N_fl*k_lambda*(1-(omega*tw/V)) jacobian['F_rr','omega'] = N_rr*k_lambda*((b+lamb*tw)/V) jacobian['F_fr','omega'] = N_fr*k_lambda*(-(a-lamb*tw)/V) jacobian['F_rl','omega'] = N_rl*k_lambda*((b-lamb*tw)/V) jacobian['F_fl','omega'] = N_fl*k_lambda*(-(a+lamb*tw)/V) jacobian['F_fr','delta'] = N_fr*k_lambda jacobian['F_fl','delta'] = N_fl*k_lambda jacobian['F_rr','V'] = N_rr*k_lambda*-(omega*(b+lamb*tw))/V**2 jacobian['F_fr','V'] = N_fr*k_lambda*(omega*(a-lamb*tw))/V**2 jacobian['F_rl','V'] = N_rl*k_lambda*-(omega*(b-lamb*tw))/V**2 jacobian['F_fl','V'] = N_fl*k_lambda*(omega*(a+lamb*tw))/V**2
dymos/examples/racecar/tireODE.py
import openmdao.api as om import numpy as np class TireODE(om.ExplicitComponent): def initialize(self): self.options.declare('num_nodes', types=int) def setup(self): nn = self.options['num_nodes'] #constants self.add_input('M', val=0.0, desc='mass', units='kg') self.add_input('g', val=9.8, desc='mass', units='m/s**2') #N self.add_input('a', val=1.8, desc='cg to front distance', units='m') self.add_input('b', val=1.6, desc='cg to rear distance', units='m') self.add_input('tw', val=0.73, desc='half track width', units='m') self.add_input('beta', val=0.0, desc='braking bias', units=None) #val = 0.62 self.add_input('k_lambda', val=44.0, desc='tire lateral stiffness', units=None) #states self.add_input('V', val=np.zeros(nn), desc='speed', units='m/s') self.add_input('lambda', val=np.zeros(nn), desc='body slip angle', units='rad') self.add_input('omega', val=np.zeros(nn), desc='yaw rate', units='rad/s') #normal load inputs self.add_input('N_rr', val=np.zeros(nn), desc='normal load rr', units='N') self.add_input('N_fr', val=np.zeros(nn), desc='normal load fr', units='N') self.add_input('N_rl', val=np.zeros(nn), desc='normal load rl', units='N') self.add_input('N_fl', val=np.zeros(nn), desc='normal load fl', units='N') #tire load outputs self.add_output('S_fl', val=np.zeros(nn), desc='longitudinal force fl', units='N') self.add_output('S_fr', val=np.zeros(nn), desc='longitudinal force fr', units='N') self.add_output('S_rl', val=np.zeros(nn), desc='longitudinal force rl', units='N') self.add_output('S_rr', val=np.zeros(nn), desc='longitudinal force rr', units='N') self.add_output('F_fl', val=np.zeros(nn), desc='lateral force fl', units='N') self.add_output('F_fr', val=np.zeros(nn), desc='lateral force fr', units='N') self.add_output('F_rl', val=np.zeros(nn), desc='lateral force rl', units='N') self.add_output('F_rr', val=np.zeros(nn), desc='lateral force rr', units='N') #controls self.add_input('thrust', val=np.zeros(nn), desc='thrust', units=None) self.add_input('delta', val=np.zeros(nn), desc='steering angle', units='rad') # Setup partials arange = np.arange(self.options['num_nodes'], dtype=int) self.declare_partials(of='S_fl', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='S_fr', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='S_rl', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='S_rr', wrt='thrust', rows=arange, cols=arange) self.declare_partials(of='F_rr',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_rr',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_rr',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_rr',wrt='N_rr',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='N_fr',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_rl',wrt='N_rl',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='V',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='lambda',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='omega',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='N_fl',rows=arange,cols=arange) self.declare_partials(of='F_fr',wrt='delta',rows=arange,cols=arange) self.declare_partials(of='F_fl',wrt='delta',rows=arange,cols=arange) def compute(self, inputs, outputs): omega = inputs['omega'] V = inputs['V'] lamb = inputs['lambda'] M = inputs['M'] g = inputs['g'] a = inputs['a'] b = inputs['b'] tw = inputs['tw'] N_rr = inputs['N_rr'] N_rl = inputs['N_rl'] N_fr = inputs['N_fr'] N_fl = inputs['N_fl'] delta = inputs['delta'] beta = inputs['beta'] k_lambda = inputs['k_lambda'] thrust = inputs['thrust'] #split thrust signal into a throttle and brake signal signs = np.sign(thrust) signs2 = np.where(signs<1,0,1) brake = (signs2-1)*thrust #postive throttle = signs2*thrust outputs['S_fl'] = -(M*g/2)*brake*beta outputs['S_fr'] = -(M*g/2)*brake*beta outputs['S_rl'] = (M*g/2)*(throttle-brake*(1-beta)) outputs['S_rr'] = (M*g/2)*(throttle-brake*(1-beta)) outputs['F_rr'] = N_rr*k_lambda*(lamb+(omega*(b+lamb*tw))/V) outputs['F_rl'] = N_rl*k_lambda*(lamb+(omega*(b-lamb*tw))/V) outputs['F_fr'] = N_fr*k_lambda*(lamb+delta-(omega*(a-lamb*tw))/V) outputs['F_fl'] = N_fl*k_lambda*(lamb+delta-(omega*(a+lamb*tw))/V) def compute_partials(self, inputs, jacobian): omega = inputs['omega'] V = inputs['V'] lamb = inputs['lambda'] M = inputs['M'] g = inputs['g'] a = inputs['a'] b = inputs['b'] tw = inputs['tw'] N_rr = inputs['N_rr'] N_rl = inputs['N_rl'] N_fr = inputs['N_fr'] N_fl = inputs['N_fl'] delta = inputs['delta'] beta = inputs['beta'] k_lambda = inputs['k_lambda'] thrust = inputs['thrust'] signs = np.sign(thrust) signs2 = np.where(signs<1,0,1) #-1 where brake is active brake = (signs2-1) #positive throttle = signs2 jacobian['S_fl', 'thrust'] = -(M*g/2)*beta*brake jacobian['S_fr', 'thrust'] = -(M*g/2)*beta*brake jacobian['S_rl', 'thrust'] = -(M*g/2)*(1-beta)*brake+(M*g/2)*throttle jacobian['S_rr', 'thrust'] = -(M*g/2)*(1-beta)*brake+(M*g/2)*throttle jacobian['F_rr','N_rr'] = k_lambda*(lamb+(omega*(b+lamb*tw))/V) jacobian['F_rl','N_rl'] = k_lambda*(lamb+(omega*(b-lamb*tw))/V) jacobian['F_fr','N_fr'] = k_lambda*(lamb+delta-(omega*(a-lamb*tw))/V) jacobian['F_fl','N_fl'] = k_lambda*(lamb+delta-(omega*(a+lamb*tw))/V) jacobian['F_rr','lambda'] = N_rr*k_lambda*(1+(omega*tw/V)) jacobian['F_fr','lambda'] = N_fr*k_lambda*(1+(omega*tw/V)) jacobian['F_rl','lambda'] = N_rl*k_lambda*(1-(omega*tw/V)) jacobian['F_fl','lambda'] = N_fl*k_lambda*(1-(omega*tw/V)) jacobian['F_rr','omega'] = N_rr*k_lambda*((b+lamb*tw)/V) jacobian['F_fr','omega'] = N_fr*k_lambda*(-(a-lamb*tw)/V) jacobian['F_rl','omega'] = N_rl*k_lambda*((b-lamb*tw)/V) jacobian['F_fl','omega'] = N_fl*k_lambda*(-(a+lamb*tw)/V) jacobian['F_fr','delta'] = N_fr*k_lambda jacobian['F_fl','delta'] = N_fl*k_lambda jacobian['F_rr','V'] = N_rr*k_lambda*-(omega*(b+lamb*tw))/V**2 jacobian['F_fr','V'] = N_fr*k_lambda*(omega*(a-lamb*tw))/V**2 jacobian['F_rl','V'] = N_rl*k_lambda*-(omega*(b-lamb*tw))/V**2 jacobian['F_fl','V'] = N_fl*k_lambda*(omega*(a+lamb*tw))/V**2
0.409575
0.164382
from flask import Flask, render_template import pandas as pd import matplotlib import matplotlib.pyplot as plt from io import BytesIO import base64 from bs4 import BeautifulSoup import requests #don't change this matplotlib.use('Agg') app = Flask(__name__) #do not change this #insert the scrapping here url_get = requests.get('https://www.coingecko.com/en/coins/ethereum/historical_data/usd?start_date=2020-01-01&end_date=2021-06-30#panel') soup = BeautifulSoup(url_get.content,"html.parser") #find your right key here table = soup.find('table',attrs={'class':'table table-striped text-sm text-lg-normal'}) temp = soup.find_all('th',attrs={'class':'font-semibold text-center'}) row_length = len(temp) temp = [] #initiating a list for i in range(0, row_length): #insert the scrapping process here # get period period = soup.find_all('th',attrs={'class':'font-semibold text-center'})[i].text period = period.strip('\n') # get Market Cap MarketCap = soup.find_all('td',attrs={'class':'text-center'})[i*4].text MarketCap = MarketCap.strip('\n') # get Volume Volume = soup.find_all('td',attrs={'class':'text-center'})[i*4+1].text Volume = Volume.strip('\n') # get Open Open = soup.find_all('td',attrs={'class':'text-center'})[i*4+2].text Open = Open.strip('\n') # get Close Close = soup.find_all('td',attrs={'class':'text-center'})[i*4+3].text Close = Close.strip('\n') #scrapping process temp.append((period,Volume)) temp = temp[::-1] #change into dataframe #df = pd.DataFrame(temp,columns = ('period','MarketCap','Volume','Open','Close')) df = pd.DataFrame(temp,columns = ('period','Volume')) #insert data wrangling here df['period'] = df['period'].astype('datetime64') #df['MarketCap'] = df['MarketCap'].str.replace("$","") #df['MarketCap'] = df['MarketCap'].str.replace(",","") #df['MarketCap'] = df['MarketCap'].astype('float64') df['Volume'] = df['Volume'].str.replace("$","") df['Volume'] = df['Volume'].str.replace(",","") df['Volume'] = df['Volume'].astype('float64') #df['Open'] = df['Open'].str.replace("$","") #df['Open'] = df['Open'].str.replace(",","") #df['Open'] = df['Open'].astype('float64') #df['Close'] = df['Close'].str.replace("$","") #df['Close'] = df['Close'].str.replace(",","") #df['Close'] = df['Close'].str.replace("N/A","2169.40") #df['Close'] = df['Close'].astype('float64') df=df.set_index('period') #end of data wranggling @app.route("/") def index(): card_data = f'{df["Volume"].mean().round(2)}' #be careful with the " and ' # generate plot ax = df.plot(figsize = (20,9)) # Rendering plot # Do not change this figfile = BytesIO() plt.savefig(figfile, format='png', transparent=True) figfile.seek(0) figdata_png = base64.b64encode(figfile.getvalue()) plot_result = str(figdata_png)[2:-1] # render to html return render_template('index.html', card_data = card_data, plot_result=plot_result ) if __name__ == "__main__": app.run(debug=True)
app.py
from flask import Flask, render_template import pandas as pd import matplotlib import matplotlib.pyplot as plt from io import BytesIO import base64 from bs4 import BeautifulSoup import requests #don't change this matplotlib.use('Agg') app = Flask(__name__) #do not change this #insert the scrapping here url_get = requests.get('https://www.coingecko.com/en/coins/ethereum/historical_data/usd?start_date=2020-01-01&end_date=2021-06-30#panel') soup = BeautifulSoup(url_get.content,"html.parser") #find your right key here table = soup.find('table',attrs={'class':'table table-striped text-sm text-lg-normal'}) temp = soup.find_all('th',attrs={'class':'font-semibold text-center'}) row_length = len(temp) temp = [] #initiating a list for i in range(0, row_length): #insert the scrapping process here # get period period = soup.find_all('th',attrs={'class':'font-semibold text-center'})[i].text period = period.strip('\n') # get Market Cap MarketCap = soup.find_all('td',attrs={'class':'text-center'})[i*4].text MarketCap = MarketCap.strip('\n') # get Volume Volume = soup.find_all('td',attrs={'class':'text-center'})[i*4+1].text Volume = Volume.strip('\n') # get Open Open = soup.find_all('td',attrs={'class':'text-center'})[i*4+2].text Open = Open.strip('\n') # get Close Close = soup.find_all('td',attrs={'class':'text-center'})[i*4+3].text Close = Close.strip('\n') #scrapping process temp.append((period,Volume)) temp = temp[::-1] #change into dataframe #df = pd.DataFrame(temp,columns = ('period','MarketCap','Volume','Open','Close')) df = pd.DataFrame(temp,columns = ('period','Volume')) #insert data wrangling here df['period'] = df['period'].astype('datetime64') #df['MarketCap'] = df['MarketCap'].str.replace("$","") #df['MarketCap'] = df['MarketCap'].str.replace(",","") #df['MarketCap'] = df['MarketCap'].astype('float64') df['Volume'] = df['Volume'].str.replace("$","") df['Volume'] = df['Volume'].str.replace(",","") df['Volume'] = df['Volume'].astype('float64') #df['Open'] = df['Open'].str.replace("$","") #df['Open'] = df['Open'].str.replace(",","") #df['Open'] = df['Open'].astype('float64') #df['Close'] = df['Close'].str.replace("$","") #df['Close'] = df['Close'].str.replace(",","") #df['Close'] = df['Close'].str.replace("N/A","2169.40") #df['Close'] = df['Close'].astype('float64') df=df.set_index('period') #end of data wranggling @app.route("/") def index(): card_data = f'{df["Volume"].mean().round(2)}' #be careful with the " and ' # generate plot ax = df.plot(figsize = (20,9)) # Rendering plot # Do not change this figfile = BytesIO() plt.savefig(figfile, format='png', transparent=True) figfile.seek(0) figdata_png = base64.b64encode(figfile.getvalue()) plot_result = str(figdata_png)[2:-1] # render to html return render_template('index.html', card_data = card_data, plot_result=plot_result ) if __name__ == "__main__": app.run(debug=True)
0.264074
0.116362
r""" Analytic Acquisition Functions that evaluate the posterior without performing Monte-Carlo sampling. """ from __future__ import annotations from abc import ABC from copy import deepcopy from typing import Dict, Optional, Tuple, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.objective import ScalarizedObjective from botorch.exceptions import UnsupportedError from botorch.models.gp_regression import FixedNoiseGP from botorch.models.gpytorch import GPyTorchModel from botorch.models.model import Model from botorch.posteriors.posterior import Posterior from botorch.sampling.samplers import SobolQMCNormalSampler from botorch.utils.transforms import convert_to_target_pre_hook, t_batch_mode_transform from torch import Tensor from torch.distributions import Normal class AnalyticAcquisitionFunction(AcquisitionFunction, ABC): r"""Base class for analytic acquisition functions.""" def __init__( self, model: Model, objective: Optional[ScalarizedObjective] = None ) -> None: r"""Base constructor for analytic acquisition functions. Args: model: A fitted single-outcome model. objective: A ScalarizedObjective (optional). """ super().__init__(model=model) if objective is None: if model.num_outputs != 1: raise UnsupportedError( "Must specify an objective when using a multi-output model." ) elif not isinstance(objective, ScalarizedObjective): raise UnsupportedError( "Only objectives of type ScalarizedObjective are supported for " "analytic acquisition functions." ) self.objective = objective def _get_posterior(self, X: Tensor) -> Posterior: r"""Compute the posterior at the input candidate set X. Applies the objective if provided. Args: X: The input candidate set. Returns: The posterior at X. If a ScalarizedObjective is defined, this posterior can be single-output even if the underlying model is a multi-output model. """ posterior = self.model.posterior(X) if self.objective is not None: # Unlike MCAcquisitionObjective (which transform samples), this # transforms the posterior posterior = self.objective(posterior) return posterior def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None: raise UnsupportedError( "Analytic acquisition functions do not account for X_pending yet." ) class ExpectedImprovement(AnalyticAcquisitionFunction): r"""Single-outcome Expected Improvement (analytic). Computes classic Expected Improvement over the current best observed value, using the analytic formula for a Normal posterior distribution. Unlike the MC-based acquisition functions, this relies on the posterior at single test point being Gaussian (and require the posterior to implement `mean` and `variance` properties). Only supports the case of `q=1`. The model must be single-outcome. `EI(x) = E(max(y - best_f, 0)), y ~ f(x)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> EI = ExpectedImprovement(model, best_f=0.2) >>> ei = EI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], objective: Optional[ScalarizedObjective] = None, maximize: bool = True, ) -> None: r"""Single-outcome Expected Improvement (analytic). Args: model: A fitted single-outcome model. best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing the best function value observed so far (assumed noiseless). objective: A ScalarizedObjective (optional). maximize: If True, consider the problem a maximization problem. """ super().__init__(model=model, objective=objective) self.maximize = maximize if not torch.is_tensor(best_f): best_f = torch.tensor(best_f) self.register_buffer("best_f", best_f) @t_batch_mode_transform(expected_q=1, assert_output_shape=False) def forward(self, X: Tensor) -> Tensor: r"""Evaluate Expected Improvement on the candidate set X. Args: X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points. Expected Improvement is computed for each point individually, i.e., what is considered are the marginal posteriors, not the joint. Returns: A `b1 x ... bk`-dim tensor of Expected Improvement values at the given design points `X`. """ self.best_f = self.best_f.to(X) posterior = self._get_posterior(X=X) mean = posterior.mean # deal with batch evaluation and broadcasting view_shape = mean.shape[:-2] if mean.dim() >= X.dim() else X.shape[:-2] mean = mean.view(view_shape) sigma = posterior.variance.clamp_min(1e-9).sqrt().view(view_shape) u = (mean - self.best_f.expand_as(mean)) / sigma if not self.maximize: u = -u normal = Normal(torch.zeros_like(u), torch.ones_like(u)) ucdf = normal.cdf(u) updf = torch.exp(normal.log_prob(u)) ei = sigma * (updf + u * ucdf) return ei class PosteriorMean(AnalyticAcquisitionFunction): r"""Single-outcome Posterior Mean. Only supports the case of q=1. Requires the model's posterior to have a `mean` property. The model must be single-outcome. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> PM = PosteriorMean(model) >>> pm = PM(test_X) """ @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate the posterior mean on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Posterior Mean values at the given design points `X`. """ posterior = self._get_posterior(X=X) return posterior.mean.view(X.shape[:-2]) class ProbabilityOfImprovement(AnalyticAcquisitionFunction): r"""Single-outcome Probability of Improvement. Probability of improvment over the current best observed value, computed using the analytic formula under a Normal posterior distribution. Only supports the case of q=1. Requires the posterior to be Gaussian. The model must be single-outcome. `PI(x) = P(y >= best_f), y ~ f(x)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> PI = ProbabilityOfImprovement(model, best_f=0.2) >>> pi = PI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], objective: Optional[ScalarizedObjective] = None, maximize: bool = True, ) -> None: r"""Single-outcome analytic Probability of Improvement. Args: model: A fitted single-outcome model. best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing the best function value observed so far (assumed noiseless). objective: A ScalarizedObjective (optional). maximize: If True, consider the problem a maximization problem. """ super().__init__(model=model, objective=objective) self.maximize = maximize if not torch.is_tensor(best_f): best_f = torch.tensor(best_f) self.register_buffer("best_f", best_f) @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate the Probability of Improvement on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim tensor of Probability of Improvement values at the given design points `X`. """ self.best_f = self.best_f.to(X) posterior = self._get_posterior(X=X) mean, sigma = posterior.mean, posterior.variance.sqrt() batch_shape = X.shape[:-2] mean = posterior.mean.view(batch_shape) sigma = posterior.variance.sqrt().clamp_min(1e-9).view(batch_shape) u = (mean - self.best_f.expand_as(mean)) / sigma if not self.maximize: u = -u normal = Normal(torch.zeros_like(u), torch.ones_like(u)) return normal.cdf(u) class UpperConfidenceBound(AnalyticAcquisitionFunction): r"""Single-outcome Upper Confidence Bound (UCB). Analytic upper confidence bound that comprises of the posterior mean plus an additional term: the posterior standard deviation weighted by a trade-off parameter, `beta`. Only supports the case of `q=1` (i.e. greedy, non-batch selection of design points). The model must be single-outcome. `UCB(x) = mu(x) + sqrt(beta) * sigma(x)`, where `mu` and `sigma` are the posterior mean and standard deviation, respectively. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> UCB = UpperConfidenceBound(model, beta=0.2) >>> ucb = UCB(test_X) """ def __init__( self, model: Model, beta: Union[float, Tensor], objective: Optional[ScalarizedObjective] = None, maximize: bool = True, ) -> None: r"""Single-outcome Upper Confidence Bound. Args: model: A fitted single-outcome GP model (must be in batch mode if candidate sets X will be) beta: Either a scalar or a one-dim tensor with `b` elements (batch mode) representing the trade-off parameter between mean and covariance objective: A ScalarizedObjective (optional). maximize: If True, consider the problem a maximization problem. """ super().__init__(model=model, objective=objective) self.maximize = maximize if not torch.is_tensor(beta): beta = torch.tensor(beta) self.register_buffer("beta", beta) @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate the Upper Confidence Bound on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ self.beta = self.beta.to(X) posterior = self._get_posterior(X=X) batch_shape = X.shape[:-2] mean = posterior.mean.view(batch_shape) variance = posterior.variance.view(batch_shape) delta = (self.beta.expand_as(mean) * variance).sqrt() if self.maximize: return mean + delta else: return -mean + delta class ConstrainedExpectedImprovement(AnalyticAcquisitionFunction): r"""Constrained Expected Improvement (feasibility-weighted). Computes the analytic expected improvement for a Normal posterior distribution, weighted by a probability of feasibility. The objective and constraints are assumed to be independent and have Gaussian posterior distributions. Only supports the case `q=1`. The model should be multi-outcome, with the index of the objective and constraints passed to the constructor. `Constrained_EI(x) = EI(x) * Product_i P(y_i \in [lower_i, upper_i])`, where `y_i ~ constraint_i(x)` and `lower_i`, `upper_i` are the lower and upper bounds for the i-th constraint, respectively. Example: >>> # example where 0th output has a non-negativity constraint and ... # 1st output is the objective >>> model = SingleTaskGP(train_X, train_Y) >>> constraints = {0: (0.0, None)} >>> cEI = ConstrainedExpectedImprovement(model, 0.2, 1, constraints) >>> cei = cEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], objective_index: int, constraints: Dict[int, Tuple[Optional[float], Optional[float]]], maximize: bool = True, ) -> None: r"""Analytic Constrained Expected Improvement. Args: model: A fitted single-outcome model. best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing the best function value observed so far (assumed noiseless). objective_index: The index of the objective. constraints: A dictionary of the form `{i: [lower, upper]}`, where `i` is the output index, and `lower` and `upper` are lower and upper bounds on that output (resp. interpreted as -Inf / Inf if None) maximize: If True, consider the problem a maximization problem. """ # use AcquisitionFunction constructor to avoid check for objective super(AnalyticAcquisitionFunction, self).__init__(model=model) self.objective = None self.maximize = maximize self.objective_index = objective_index self.constraints = constraints self.register_buffer("best_f", torch.as_tensor(best_f)) self._preprocess_constraint_bounds(constraints=constraints) self.register_forward_pre_hook(convert_to_target_pre_hook) @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate Constrained Expected Improvement on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ self.best_f = self.best_f.to(X) posterior = self._get_posterior(X=X) means = posterior.mean.squeeze(dim=-2) # (b) x m sigmas = posterior.variance.squeeze(dim=-2).sqrt().clamp_min(1e-9) # (b) x m # (b) x 1 oi = self.objective_index mean_obj = means[..., oi : oi + 1] sigma_obj = sigmas[..., oi : oi + 1] u = (mean_obj - self.best_f.expand_as(mean_obj)) / sigma_obj if not self.maximize: u = -u normal = Normal( torch.zeros(1, device=u.device, dtype=u.dtype), torch.ones(1, device=u.device, dtype=u.dtype), ) ei_pdf = torch.exp(normal.log_prob(u)) # (b) x 1 ei_cdf = normal.cdf(u) ei = sigma_obj * (ei_pdf + u * ei_cdf) prob_feas = self._compute_prob_feas(X=X, means=means, sigmas=sigmas) ei = ei.mul(prob_feas) return ei.squeeze(dim=-1) def _preprocess_constraint_bounds( self, constraints: Dict[int, Tuple[Optional[float], Optional[float]]] ) -> None: r"""Set up constraint bounds. Args: constraints: A dictionary of the form `{i: [lower, upper]}`, where `i` is the output index, and `lower` and `upper` are lower and upper bounds on that output (resp. interpreted as -Inf / Inf if None) """ con_lower, con_lower_inds = [], [] con_upper, con_upper_inds = [], [] con_both, con_both_inds = [], [] con_indices = list(constraints.keys()) if len(con_indices) == 0: raise ValueError("There must be at least one constraint.") if self.objective_index in con_indices: raise ValueError( "Output corresponding to objective should not be a constraint." ) for k in con_indices: if constraints[k][0] is not None and constraints[k][1] is not None: if constraints[k][1] <= constraints[k][0]: raise ValueError("Upper bound is less than the lower bound.") con_both_inds.append(k) con_both.append([constraints[k][0], constraints[k][1]]) elif constraints[k][0] is not None: con_lower_inds.append(k) con_lower.append(constraints[k][0]) elif constraints[k][1] is not None: con_upper_inds.append(k) con_upper.append(constraints[k][1]) # tensor-based indexing is much faster than list-based advanced indexing self.register_buffer("con_lower_inds", torch.tensor(con_lower_inds)) self.register_buffer("con_upper_inds", torch.tensor(con_upper_inds)) self.register_buffer("con_both_inds", torch.tensor(con_both_inds)) # tensor indexing self.register_buffer("con_both", torch.tensor(con_both, dtype=torch.float)) self.register_buffer("con_lower", torch.tensor(con_lower, dtype=torch.float)) self.register_buffer("con_upper", torch.tensor(con_upper, dtype=torch.float)) def _compute_prob_feas(self, X: Tensor, means: Tensor, sigmas: Tensor) -> Tensor: r"""Compute feasibility probability for each batch of X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. means: A `(b) x m`-dim Tensor of means. sigmas: A `(b) x m`-dim Tensor of standard deviations. Returns: A `(b) x 1`-dim tensor of feasibility probabilities Note: This function does case-work for upper bound, lower bound, and both-sided bounds. Another way to do it would be to use 'inf' and -'inf' for the one-sided bounds and use the logic for the both-sided case. But this causes an issue with autograd since we get 0 * inf. TODO: Investigate further. """ output_shape = X.shape[:-2] + torch.Size([1]) prob_feas = torch.ones(output_shape, device=X.device, dtype=X.dtype) if len(self.con_lower_inds) > 0: self.con_lower_inds = self.con_lower_inds.to(device=X.device) normal_lower = _construct_dist(means, sigmas, self.con_lower_inds) prob_l = 1 - normal_lower.cdf(self.con_lower) prob_feas = prob_feas.mul(torch.prod(prob_l, dim=-1, keepdim=True)) if len(self.con_upper_inds) > 0: self.con_upper_inds = self.con_upper_inds.to(device=X.device) normal_upper = _construct_dist(means, sigmas, self.con_upper_inds) prob_u = normal_upper.cdf(self.con_upper) prob_feas = prob_feas.mul(torch.prod(prob_u, dim=-1, keepdim=True)) if len(self.con_both_inds) > 0: self.con_both_inds = self.con_both_inds.to(device=X.device) normal_both = _construct_dist(means, sigmas, self.con_both_inds) prob_u = normal_both.cdf(self.con_both[:, 1]) prob_l = normal_both.cdf(self.con_both[:, 0]) prob_feas = prob_feas.mul(torch.prod(prob_u - prob_l, dim=-1, keepdim=True)) return prob_feas class NoisyExpectedImprovement(ExpectedImprovement): r"""Single-outcome Noisy Expected Improvement (via fantasies). This computes Noisy Expected Improvement by averaging over the Expected Improvemnt values of a number of fantasy models. Only supports the case `q=1`. Assumes that the posterior distribution of the model is Gaussian. The model must be single-outcome. `NEI(x) = E(max(y - max Y_baseline), 0)), (y, Y_baseline) ~ f((x, X_baseline))`, where `X_baseline` are previously observed points. Note: This acquisition function currently relies on using a FixedNoiseGP (required for noiseless fantasies). Example: >>> model = FixedNoiseGP(train_X, train_Y, train_Yvar=train_Yvar) >>> NEI = NoisyExpectedImprovement(model, train_X) >>> nei = NEI(test_X) """ def __init__( self, model: GPyTorchModel, X_observed: Tensor, num_fantasies: int = 20, maximize: bool = True, ) -> None: r"""Single-outcome Noisy Expected Improvement (via fantasies). Args: model: A fitted single-outcome model. X_observed: A `n x d` Tensor of observed points that are likely to be the best observed points so far. num_fantasies: The number of fantasies to generate. The higher this number the more accurate the model (at the expense of model complexity and performance). maximize: If True, consider the problem a maximization problem. """ if not isinstance(model, FixedNoiseGP): raise UnsupportedError( "Only FixedNoiseGPs are currently supported for fantasy NEI" ) # sample fantasies with torch.no_grad(): posterior = model.posterior(X=X_observed) sampler = SobolQMCNormalSampler(num_fantasies) Y_fantasized = sampler(posterior).squeeze(-1) batch_X_observed = X_observed.expand(num_fantasies, *X_observed.shape) # The fantasy model will operate in batch mode fantasy_model = _get_noiseless_fantasy_model( model=model, batch_X_observed=batch_X_observed, Y_fantasized=Y_fantasized ) if maximize: best_f = Y_fantasized.max(dim=-1)[0] else: best_f = Y_fantasized.min(dim=-1)[0] super().__init__(model=fantasy_model, best_f=best_f, maximize=maximize) def forward(self, X: Tensor) -> Tensor: r"""Evaluate Expected Improvement on the candidate set X. Args: X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points. Returns: A `b1 x ... bk`-dim tensor of Noisy Expected Improvement values at the given design points `X`. """ # add batch dimension for broadcasting to fantasy models return super().forward(X.unsqueeze(-3)).mean(dim=-1) def _construct_dist(means: Tensor, sigmas: Tensor, inds: Tensor) -> Normal: mean = means.index_select(dim=-1, index=inds) sigma = sigmas.index_select(dim=-1, index=inds) return Normal(loc=mean, scale=sigma) def _get_noiseless_fantasy_model( model: FixedNoiseGP, batch_X_observed: Tensor, Y_fantasized: Tensor ) -> FixedNoiseGP: r"""Construct a fantasy model from a fitted model and provided fantasies. The fantasy model uses the hyperparameters from the original fitted model and assumes the fantasies are noiseless. Args: model: a fitted FixedNoiseGP batch_X_observed: A `b x n x d` tensor of inputs where `b` is the number of fantasies. Y_fantasized: A `b x n` tensor of fantasized targets where `b` is the number of fantasies. Returns: The fantasy model. """ # initialize a copy of FixedNoiseGP on the original training inputs # this makes FixedNoiseGP a non-batch GP, so that the same hyperparameters # are used across all batches (by default, a GP with batched training data # uses independent hyperparameters for each batch). fantasy_model = FixedNoiseGP( train_X=model.train_inputs[0], train_Y=model.train_targets.unsqueeze(-1), train_Yvar=model.likelihood.noise_covar.noise.unsqueeze(-1), ) # update training inputs/targets to be batch mode fantasies fantasy_model.set_train_data( inputs=batch_X_observed, targets=Y_fantasized, strict=False ) # use noiseless fantasies fantasy_model.likelihood.noise_covar.noise = torch.full_like(Y_fantasized, 1e-7) # load hyperparameters from original model state_dict = deepcopy(model.state_dict()) fantasy_model.load_state_dict(state_dict) return fantasy_model class ScalarizedPosteriorMean(AnalyticAcquisitionFunction): r"""Scalarized Posterior Mean. This acquisition function returns a scalarized (across the q-batch) posterior mean given a vector of weights. """ def __init__( self, model: Model, weights: Tensor, objective: Optional[ScalarizedObjective] = None, ) -> None: r"""Scalarized Posterior Mean. Args: model: A fitted single-outcome model. weights: A tensor of shape `q` for scalarization. objective: A ScalarizedObjective. Required for multi-output models. """ super().__init__(model=model, objective=objective) self.register_buffer("weights", weights.unsqueeze(dim=0)) @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate the scalarized posterior mean on the candidate set X. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Posterior Mean values at the given design points `X`. """ posterior = self._get_posterior(X=X) weighted_means = posterior.mean.squeeze(dim=-1) * self.weights return weighted_means.sum(dim=-1)
botorch/acquisition/analytic.py
r""" Analytic Acquisition Functions that evaluate the posterior without performing Monte-Carlo sampling. """ from __future__ import annotations from abc import ABC from copy import deepcopy from typing import Dict, Optional, Tuple, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.objective import ScalarizedObjective from botorch.exceptions import UnsupportedError from botorch.models.gp_regression import FixedNoiseGP from botorch.models.gpytorch import GPyTorchModel from botorch.models.model import Model from botorch.posteriors.posterior import Posterior from botorch.sampling.samplers import SobolQMCNormalSampler from botorch.utils.transforms import convert_to_target_pre_hook, t_batch_mode_transform from torch import Tensor from torch.distributions import Normal class AnalyticAcquisitionFunction(AcquisitionFunction, ABC): r"""Base class for analytic acquisition functions.""" def __init__( self, model: Model, objective: Optional[ScalarizedObjective] = None ) -> None: r"""Base constructor for analytic acquisition functions. Args: model: A fitted single-outcome model. objective: A ScalarizedObjective (optional). """ super().__init__(model=model) if objective is None: if model.num_outputs != 1: raise UnsupportedError( "Must specify an objective when using a multi-output model." ) elif not isinstance(objective, ScalarizedObjective): raise UnsupportedError( "Only objectives of type ScalarizedObjective are supported for " "analytic acquisition functions." ) self.objective = objective def _get_posterior(self, X: Tensor) -> Posterior: r"""Compute the posterior at the input candidate set X. Applies the objective if provided. Args: X: The input candidate set. Returns: The posterior at X. If a ScalarizedObjective is defined, this posterior can be single-output even if the underlying model is a multi-output model. """ posterior = self.model.posterior(X) if self.objective is not None: # Unlike MCAcquisitionObjective (which transform samples), this # transforms the posterior posterior = self.objective(posterior) return posterior def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None: raise UnsupportedError( "Analytic acquisition functions do not account for X_pending yet." ) class ExpectedImprovement(AnalyticAcquisitionFunction): r"""Single-outcome Expected Improvement (analytic). Computes classic Expected Improvement over the current best observed value, using the analytic formula for a Normal posterior distribution. Unlike the MC-based acquisition functions, this relies on the posterior at single test point being Gaussian (and require the posterior to implement `mean` and `variance` properties). Only supports the case of `q=1`. The model must be single-outcome. `EI(x) = E(max(y - best_f, 0)), y ~ f(x)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> EI = ExpectedImprovement(model, best_f=0.2) >>> ei = EI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], objective: Optional[ScalarizedObjective] = None, maximize: bool = True, ) -> None: r"""Single-outcome Expected Improvement (analytic). Args: model: A fitted single-outcome model. best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing the best function value observed so far (assumed noiseless). objective: A ScalarizedObjective (optional). maximize: If True, consider the problem a maximization problem. """ super().__init__(model=model, objective=objective) self.maximize = maximize if not torch.is_tensor(best_f): best_f = torch.tensor(best_f) self.register_buffer("best_f", best_f) @t_batch_mode_transform(expected_q=1, assert_output_shape=False) def forward(self, X: Tensor) -> Tensor: r"""Evaluate Expected Improvement on the candidate set X. Args: X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points. Expected Improvement is computed for each point individually, i.e., what is considered are the marginal posteriors, not the joint. Returns: A `b1 x ... bk`-dim tensor of Expected Improvement values at the given design points `X`. """ self.best_f = self.best_f.to(X) posterior = self._get_posterior(X=X) mean = posterior.mean # deal with batch evaluation and broadcasting view_shape = mean.shape[:-2] if mean.dim() >= X.dim() else X.shape[:-2] mean = mean.view(view_shape) sigma = posterior.variance.clamp_min(1e-9).sqrt().view(view_shape) u = (mean - self.best_f.expand_as(mean)) / sigma if not self.maximize: u = -u normal = Normal(torch.zeros_like(u), torch.ones_like(u)) ucdf = normal.cdf(u) updf = torch.exp(normal.log_prob(u)) ei = sigma * (updf + u * ucdf) return ei class PosteriorMean(AnalyticAcquisitionFunction): r"""Single-outcome Posterior Mean. Only supports the case of q=1. Requires the model's posterior to have a `mean` property. The model must be single-outcome. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> PM = PosteriorMean(model) >>> pm = PM(test_X) """ @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate the posterior mean on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Posterior Mean values at the given design points `X`. """ posterior = self._get_posterior(X=X) return posterior.mean.view(X.shape[:-2]) class ProbabilityOfImprovement(AnalyticAcquisitionFunction): r"""Single-outcome Probability of Improvement. Probability of improvment over the current best observed value, computed using the analytic formula under a Normal posterior distribution. Only supports the case of q=1. Requires the posterior to be Gaussian. The model must be single-outcome. `PI(x) = P(y >= best_f), y ~ f(x)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> PI = ProbabilityOfImprovement(model, best_f=0.2) >>> pi = PI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], objective: Optional[ScalarizedObjective] = None, maximize: bool = True, ) -> None: r"""Single-outcome analytic Probability of Improvement. Args: model: A fitted single-outcome model. best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing the best function value observed so far (assumed noiseless). objective: A ScalarizedObjective (optional). maximize: If True, consider the problem a maximization problem. """ super().__init__(model=model, objective=objective) self.maximize = maximize if not torch.is_tensor(best_f): best_f = torch.tensor(best_f) self.register_buffer("best_f", best_f) @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate the Probability of Improvement on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim tensor of Probability of Improvement values at the given design points `X`. """ self.best_f = self.best_f.to(X) posterior = self._get_posterior(X=X) mean, sigma = posterior.mean, posterior.variance.sqrt() batch_shape = X.shape[:-2] mean = posterior.mean.view(batch_shape) sigma = posterior.variance.sqrt().clamp_min(1e-9).view(batch_shape) u = (mean - self.best_f.expand_as(mean)) / sigma if not self.maximize: u = -u normal = Normal(torch.zeros_like(u), torch.ones_like(u)) return normal.cdf(u) class UpperConfidenceBound(AnalyticAcquisitionFunction): r"""Single-outcome Upper Confidence Bound (UCB). Analytic upper confidence bound that comprises of the posterior mean plus an additional term: the posterior standard deviation weighted by a trade-off parameter, `beta`. Only supports the case of `q=1` (i.e. greedy, non-batch selection of design points). The model must be single-outcome. `UCB(x) = mu(x) + sqrt(beta) * sigma(x)`, where `mu` and `sigma` are the posterior mean and standard deviation, respectively. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> UCB = UpperConfidenceBound(model, beta=0.2) >>> ucb = UCB(test_X) """ def __init__( self, model: Model, beta: Union[float, Tensor], objective: Optional[ScalarizedObjective] = None, maximize: bool = True, ) -> None: r"""Single-outcome Upper Confidence Bound. Args: model: A fitted single-outcome GP model (must be in batch mode if candidate sets X will be) beta: Either a scalar or a one-dim tensor with `b` elements (batch mode) representing the trade-off parameter between mean and covariance objective: A ScalarizedObjective (optional). maximize: If True, consider the problem a maximization problem. """ super().__init__(model=model, objective=objective) self.maximize = maximize if not torch.is_tensor(beta): beta = torch.tensor(beta) self.register_buffer("beta", beta) @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate the Upper Confidence Bound on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ self.beta = self.beta.to(X) posterior = self._get_posterior(X=X) batch_shape = X.shape[:-2] mean = posterior.mean.view(batch_shape) variance = posterior.variance.view(batch_shape) delta = (self.beta.expand_as(mean) * variance).sqrt() if self.maximize: return mean + delta else: return -mean + delta class ConstrainedExpectedImprovement(AnalyticAcquisitionFunction): r"""Constrained Expected Improvement (feasibility-weighted). Computes the analytic expected improvement for a Normal posterior distribution, weighted by a probability of feasibility. The objective and constraints are assumed to be independent and have Gaussian posterior distributions. Only supports the case `q=1`. The model should be multi-outcome, with the index of the objective and constraints passed to the constructor. `Constrained_EI(x) = EI(x) * Product_i P(y_i \in [lower_i, upper_i])`, where `y_i ~ constraint_i(x)` and `lower_i`, `upper_i` are the lower and upper bounds for the i-th constraint, respectively. Example: >>> # example where 0th output has a non-negativity constraint and ... # 1st output is the objective >>> model = SingleTaskGP(train_X, train_Y) >>> constraints = {0: (0.0, None)} >>> cEI = ConstrainedExpectedImprovement(model, 0.2, 1, constraints) >>> cei = cEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], objective_index: int, constraints: Dict[int, Tuple[Optional[float], Optional[float]]], maximize: bool = True, ) -> None: r"""Analytic Constrained Expected Improvement. Args: model: A fitted single-outcome model. best_f: Either a scalar or a `b`-dim Tensor (batch mode) representing the best function value observed so far (assumed noiseless). objective_index: The index of the objective. constraints: A dictionary of the form `{i: [lower, upper]}`, where `i` is the output index, and `lower` and `upper` are lower and upper bounds on that output (resp. interpreted as -Inf / Inf if None) maximize: If True, consider the problem a maximization problem. """ # use AcquisitionFunction constructor to avoid check for objective super(AnalyticAcquisitionFunction, self).__init__(model=model) self.objective = None self.maximize = maximize self.objective_index = objective_index self.constraints = constraints self.register_buffer("best_f", torch.as_tensor(best_f)) self._preprocess_constraint_bounds(constraints=constraints) self.register_forward_pre_hook(convert_to_target_pre_hook) @t_batch_mode_transform(expected_q=1) def forward(self, X: Tensor) -> Tensor: r"""Evaluate Constrained Expected Improvement on the candidate set X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ self.best_f = self.best_f.to(X) posterior = self._get_posterior(X=X) means = posterior.mean.squeeze(dim=-2) # (b) x m sigmas = posterior.variance.squeeze(dim=-2).sqrt().clamp_min(1e-9) # (b) x m # (b) x 1 oi = self.objective_index mean_obj = means[..., oi : oi + 1] sigma_obj = sigmas[..., oi : oi + 1] u = (mean_obj - self.best_f.expand_as(mean_obj)) / sigma_obj if not self.maximize: u = -u normal = Normal( torch.zeros(1, device=u.device, dtype=u.dtype), torch.ones(1, device=u.device, dtype=u.dtype), ) ei_pdf = torch.exp(normal.log_prob(u)) # (b) x 1 ei_cdf = normal.cdf(u) ei = sigma_obj * (ei_pdf + u * ei_cdf) prob_feas = self._compute_prob_feas(X=X, means=means, sigmas=sigmas) ei = ei.mul(prob_feas) return ei.squeeze(dim=-1) def _preprocess_constraint_bounds( self, constraints: Dict[int, Tuple[Optional[float], Optional[float]]] ) -> None: r"""Set up constraint bounds. Args: constraints: A dictionary of the form `{i: [lower, upper]}`, where `i` is the output index, and `lower` and `upper` are lower and upper bounds on that output (resp. interpreted as -Inf / Inf if None) """ con_lower, con_lower_inds = [], [] con_upper, con_upper_inds = [], [] con_both, con_both_inds = [], [] con_indices = list(constraints.keys()) if len(con_indices) == 0: raise ValueError("There must be at least one constraint.") if self.objective_index in con_indices: raise ValueError( "Output corresponding to objective should not be a constraint." ) for k in con_indices: if constraints[k][0] is not None and constraints[k][1] is not None: if constraints[k][1] <= constraints[k][0]: raise ValueError("Upper bound is less than the lower bound.") con_both_inds.append(k) con_both.append([constraints[k][0], constraints[k][1]]) elif constraints[k][0] is not None: con_lower_inds.append(k) con_lower.append(constraints[k][0]) elif constraints[k][1] is not None: con_upper_inds.append(k) con_upper.append(constraints[k][1]) # tensor-based indexing is much faster than list-based advanced indexing self.register_buffer("con_lower_inds", torch.tensor(con_lower_inds)) self.register_buffer("con_upper_inds", torch.tensor(con_upper_inds)) self.register_buffer("con_both_inds", torch.tensor(con_both_inds)) # tensor indexing self.register_buffer("con_both", torch.tensor(con_both, dtype=torch.float)) self.register_buffer("con_lower", torch.tensor(con_lower, dtype=torch.float)) self.register_buffer("con_upper", torch.tensor(con_upper, dtype=torch.float)) def _compute_prob_feas(self, X: Tensor, means: Tensor, sigmas: Tensor) -> Tensor: r"""Compute feasibility probability for each batch of X. Args: X: A `(b) x 1 x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. means: A `(b) x m`-dim Tensor of means. sigmas: A `(b) x m`-dim Tensor of standard deviations. Returns: A `(b) x 1`-dim tensor of feasibility probabilities Note: This function does case-work for upper bound, lower bound, and both-sided bounds. Another way to do it would be to use 'inf' and -'inf' for the one-sided bounds and use the logic for the both-sided case. But this causes an issue with autograd since we get 0 * inf. TODO: Investigate further. """ output_shape = X.shape[:-2] + torch.Size([1]) prob_feas = torch.ones(output_shape, device=X.device, dtype=X.dtype) if len(self.con_lower_inds) > 0: self.con_lower_inds = self.con_lower_inds.to(device=X.device) normal_lower = _construct_dist(means, sigmas, self.con_lower_inds) prob_l = 1 - normal_lower.cdf(self.con_lower) prob_feas = prob_feas.mul(torch.prod(prob_l, dim=-1, keepdim=True)) if len(self.con_upper_inds) > 0: self.con_upper_inds = self.con_upper_inds.to(device=X.device) normal_upper = _construct_dist(means, sigmas, self.con_upper_inds) prob_u = normal_upper.cdf(self.con_upper) prob_feas = prob_feas.mul(torch.prod(prob_u, dim=-1, keepdim=True)) if len(self.con_both_inds) > 0: self.con_both_inds = self.con_both_inds.to(device=X.device) normal_both = _construct_dist(means, sigmas, self.con_both_inds) prob_u = normal_both.cdf(self.con_both[:, 1]) prob_l = normal_both.cdf(self.con_both[:, 0]) prob_feas = prob_feas.mul(torch.prod(prob_u - prob_l, dim=-1, keepdim=True)) return prob_feas class NoisyExpectedImprovement(ExpectedImprovement): r"""Single-outcome Noisy Expected Improvement (via fantasies). This computes Noisy Expected Improvement by averaging over the Expected Improvemnt values of a number of fantasy models. Only supports the case `q=1`. Assumes that the posterior distribution of the model is Gaussian. The model must be single-outcome. `NEI(x) = E(max(y - max Y_baseline), 0)), (y, Y_baseline) ~ f((x, X_baseline))`, where `X_baseline` are previously observed points. Note: This acquisition function currently relies on using a FixedNoiseGP (required for noiseless fantasies). Example: >>> model = FixedNoiseGP(train_X, train_Y, train_Yvar=train_Yvar) >>> NEI = NoisyExpectedImprovement(model, train_X) >>> nei = NEI(test_X) """ def __init__( self, model: GPyTorchModel, X_observed: Tensor, num_fantasies: int = 20, maximize: bool = True, ) -> None: r"""Single-outcome Noisy Expected Improvement (via fantasies). Args: model: A fitted single-outcome model. X_observed: A `n x d` Tensor of observed points that are likely to be the best observed points so far. num_fantasies: The number of fantasies to generate. The higher this number the more accurate the model (at the expense of model complexity and performance). maximize: If True, consider the problem a maximization problem. """ if not isinstance(model, FixedNoiseGP): raise UnsupportedError( "Only FixedNoiseGPs are currently supported for fantasy NEI" ) # sample fantasies with torch.no_grad(): posterior = model.posterior(X=X_observed) sampler = SobolQMCNormalSampler(num_fantasies) Y_fantasized = sampler(posterior).squeeze(-1) batch_X_observed = X_observed.expand(num_fantasies, *X_observed.shape) # The fantasy model will operate in batch mode fantasy_model = _get_noiseless_fantasy_model( model=model, batch_X_observed=batch_X_observed, Y_fantasized=Y_fantasized ) if maximize: best_f = Y_fantasized.max(dim=-1)[0] else: best_f = Y_fantasized.min(dim=-1)[0] super().__init__(model=fantasy_model, best_f=best_f, maximize=maximize) def forward(self, X: Tensor) -> Tensor: r"""Evaluate Expected Improvement on the candidate set X. Args: X: A `b1 x ... bk x 1 x d`-dim batched tensor of `d`-dim design points. Returns: A `b1 x ... bk`-dim tensor of Noisy Expected Improvement values at the given design points `X`. """ # add batch dimension for broadcasting to fantasy models return super().forward(X.unsqueeze(-3)).mean(dim=-1) def _construct_dist(means: Tensor, sigmas: Tensor, inds: Tensor) -> Normal: mean = means.index_select(dim=-1, index=inds) sigma = sigmas.index_select(dim=-1, index=inds) return Normal(loc=mean, scale=sigma) def _get_noiseless_fantasy_model( model: FixedNoiseGP, batch_X_observed: Tensor, Y_fantasized: Tensor ) -> FixedNoiseGP: r"""Construct a fantasy model from a fitted model and provided fantasies. The fantasy model uses the hyperparameters from the original fitted model and assumes the fantasies are noiseless. Args: model: a fitted FixedNoiseGP batch_X_observed: A `b x n x d` tensor of inputs where `b` is the number of fantasies. Y_fantasized: A `b x n` tensor of fantasized targets where `b` is the number of fantasies. Returns: The fantasy model. """ # initialize a copy of FixedNoiseGP on the original training inputs # this makes FixedNoiseGP a non-batch GP, so that the same hyperparameters # are used across all batches (by default, a GP with batched training data # uses independent hyperparameters for each batch). fantasy_model = FixedNoiseGP( train_X=model.train_inputs[0], train_Y=model.train_targets.unsqueeze(-1), train_Yvar=model.likelihood.noise_covar.noise.unsqueeze(-1), ) # update training inputs/targets to be batch mode fantasies fantasy_model.set_train_data( inputs=batch_X_observed, targets=Y_fantasized, strict=False ) # use noiseless fantasies fantasy_model.likelihood.noise_covar.noise = torch.full_like(Y_fantasized, 1e-7) # load hyperparameters from original model state_dict = deepcopy(model.state_dict()) fantasy_model.load_state_dict(state_dict) return fantasy_model class ScalarizedPosteriorMean(AnalyticAcquisitionFunction): r"""Scalarized Posterior Mean. This acquisition function returns a scalarized (across the q-batch) posterior mean given a vector of weights. """ def __init__( self, model: Model, weights: Tensor, objective: Optional[ScalarizedObjective] = None, ) -> None: r"""Scalarized Posterior Mean. Args: model: A fitted single-outcome model. weights: A tensor of shape `q` for scalarization. objective: A ScalarizedObjective. Required for multi-output models. """ super().__init__(model=model, objective=objective) self.register_buffer("weights", weights.unsqueeze(dim=0)) @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate the scalarized posterior mean on the candidate set X. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches of `d`-dim design points each. Returns: A `(b)`-dim Tensor of Posterior Mean values at the given design points `X`. """ posterior = self._get_posterior(X=X) weighted_means = posterior.mean.squeeze(dim=-1) * self.weights return weighted_means.sum(dim=-1)
0.9842
0.702098
from ..debian import DebianReleaseSpec from ..debian import get_spec_from_release_file from ..debian import parse_dpkgquery_line from niceman.tests.utils import eq_, assert_is_subset_recur def test_get_spec_from_release_file(f=None): content = """\ -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 Origin: NeuroDebian Label: NeuroDebian2 Suite: stretch Codename: stretch2 Date: Thu, 15 Sep 2016 01:30:57 UTC Architectures: i386 amd64 sparc Components: main non-free contrib Description: NeuroDebian repository with perspective, inofficial and backported packages -- mostly neuroscience-related MD5Sum: d9650396c56a6f9521d0bbd9f719efbe 482669 main/binary-i386/Packages 34134c9a64b847d33eeeb3cc7291f855ab9f0969e8ad7c92cd2a0c1aebc19d1e 14199 contrib/Contents-sparc.gz -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iEYEAREIAAYFAlfZ+dEACgkQpdMvASZJpamBowCfXOPQimiIy2wnVY5U9sLs1jSn JZ0An0Uoocusvjco1t6RAwxt/y3lQoWV =a3Nn -----END PGP SIGNATURE----- """ eq_(get_spec_from_release_file(content), DebianReleaseSpec( origin='NeuroDebian', label='NeuroDebian2', codename='stretch2', version=None, suite='stretch', date='Thu, 15 Sep 2016 01:30:57 UTC', components='main non-free contrib', architectures='i386 amd64 sparc', )) def test_parse_apt_cache_show_pkgs_output(): from ..debian import parse_apt_cache_show_pkgs_output txt1 = """\ Package: openssl Status: install ok installed Priority: optional Section: utils Installed-Size: 934 Maintainer: Ubuntu Developers <<EMAIL>> Architecture: amd64 Version: 1.0.2g-1ubuntu4.5 Depends: libc6 (>= 2.15), libssl1.0.0 (>= 1.0.2g) Suggests: ca-certificates Conffiles: /etc/ssl/openssl.cnf 7df26c55291b33344dc15e3935dabaf3 Description-en: Secure Sockets Layer toolkit - cryptographic utility This package is part of the OpenSSL project's implementation of the SSL and TLS cryptographic protocols for secure communication over the Internet. . It contains the general-purpose command line binary /usr/bin/openssl, useful for cryptographic operations such as: * creating RSA, DH, and DSA key parameters; * creating X.509 certificates, CSRs, and CRLs; * calculating message digests; * encrypting and decrypting with ciphers; * testing SSL/TLS clients and servers; * handling S/MIME signed or encrypted mail. Description-md5: 9b6de2bb6e1d9016aeb0f00bcf6617bd Original-Maintainer: Debian OpenSSL Team <<EMAIL>> Package: openssl Priority: standard Section: utils Installed-Size: 934 Maintainer: <NAME> <<EMAIL>> Original-Maintainer: Debian OpenSSL Team <<EMAIL>> Architecture: amd64 Source: openssl-src (1.0.2g) Version: 1.0.2g-1ubuntu4 Depends: libc6 (>= 2.15), libssl1.0.0 (>= 1.0.2g) Suggests: ca-certificates Filename: pool/main/o/openssl/openssl_1.0.2g-1ubuntu4_amd64.deb Size: 492190 MD5sum: 8280148dc2991da94be5810ad4d91552 SHA1: b5326f27aae83c303ff934121dede47d9fce7c76 SHA256: e897ffc8d84b0d436baca5dbd684a85146ffa78d3f2d15093779d3f5a8189690 Description-en: Secure Sockets Layer toolkit - cryptographic utility This package is part of the OpenSSL project's implementation of the SSL and TLS cryptographic protocols for secure communication over the Internet. . It contains the general-purpose command line binary /usr/bin/openssl, useful for cryptographic operations such as: * creating RSA, DH, and DSA key parameters; * creating X.509 certificates, CSRs, and CRLs; * calculating message digests; * encrypting and decrypting with ciphers; * testing SSL/TLS clients and servers; * handling S/MIME signed or encrypted mail. Description-md5: 9b6de2bb6e1d9016aeb0f00bcf6617bd Bugs: https://bugs.launchpad.net/ubuntu/+filebug Origin: Ubuntu Supported: 5y Task: standard, ubuntu-core, ubuntu-core, mythbuntu-frontend, mythbuntu-backend-slave, mythbuntu-backend-master, ubuntu-touch-core, ubuntu-touch, ubuntu-sdk-libs-tools, ubuntu-sdk Package: alienblaster Priority: extra Section: universe/games Installed-Size: 668 Maintainer: <NAME> <<EMAIL>> Original-Maintainer: Debian Games Team <<EMAIL>> Architecture: amd64 Source: alienblaster-src Version: 1.1.0-9 Depends: alienblaster-data, libc6 (>= 2.14), libgcc1 (>= 1:3.0), libsdl-mixer1.2, libsdl1.2debian (>= 1.2.11), libstdc++6 (>= 5.2) Filename: pool/universe/a/alienblaster/alienblaster_1.1.0-9_amd64.deb Size: 180278 MD5sum: e53379fd0d60e0af6304af78aa8ef2b7 SHA1: ca405056cf66a1c2ae3ae1674c22b7d24cda4986 SHA256: ff25bd843420801e9adea4f5ec1ca9656b2aeb327d8102107bf5ebbdb3046c38 Description-en: Classic 2D shoot 'em up Your mission is simple: Stop the invasion of the aliens and blast them! . Alien Blaster is a classic 2D shoot 'em up featuring lots of different weapons, special items, aliens to blast and a big bad boss. . It supports both a single player mode and a cooperative two player mode for two persons playing on one computer. Description-md5: da1f8f1a6453d62874036331e075d65f Homepage: http://www.schwardtnet.de/alienblaster/ Bugs: https://bugs.launchpad.net/ubuntu/+filebug Origin: Ubuntu """ out1 = [{'architecture': 'amd64', 'package': 'openssl', 'status': 'install ok installed', 'version': '1.0.2g-1ubuntu4.5'}, {'architecture': 'amd64', 'source_name': 'openssl-src', 'source_version': '1.0.2g', 'package': 'openssl', 'version': '1.0.2g-1ubuntu4'}, {'architecture': 'amd64', 'source_name': 'alienblaster-src', 'package': 'alienblaster', 'md5': 'e53379fd0d60e0af6304af78aa8ef2b7', 'version': '1.1.0-9'}, ] out = parse_apt_cache_show_pkgs_output(txt1) assert_is_subset_recur(out1, out, [dict, list]) def test_parse_apt_cache_policy_pkgs_output(): from ..debian import parse_apt_cache_policy_pkgs_output txt1 = """\ afni: Installed: 16.2.07~dfsg.1-2~nd90+1 Candidate: 16.2.07~dfsg.1-2~nd90+1 Version table: *** 16.2.07~dfsg.1-2~nd90+1 500 500 http://neuro.debian.net/debian stretch/contrib amd64 Packages 100 /var/lib/dpkg/status openssl: Installed: 1.0.2g-1ubuntu4.5 Candidate: 1.0.2g-1ubuntu4.8 Version table: 1.0.2g-1ubuntu4.8 500 500 http://us.archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages 1.0.2g-1ubuntu4.6 500 500 http://security.ubuntu.com/ubuntu xenial-security/main amd64 Packages *** 1.0.2g-1ubuntu4.5 100 100 /var/lib/dpkg/status 1.0.2g-1ubuntu4 500 500 http://us.archive.ubuntu.com/ubuntu xenial/main amd64 Packages python-nibabel: Installed: 2.1.0-1 Candidate: 2.1.0-1 Version table: *** 2.1.0-1 900 900 http://http.debian.net/debian stretch/main amd64 Packages 900 http://http.debian.net/debian stretch/main i386 Packages 600 http://http.debian.net/debian sid/main amd64 Packages 600 http://http.debian.net/debian sid/main i386 Packages 100 /var/lib/dpkg/status 2.1.0-1~nd90+1 500 500 http://neuro.debian.net/debian stretch/main amd64 Packages 500 http://neuro.debian.net/debian stretch/main i386 Packages python-biotools: Installed: (none) Candidate: 1.2.12-2 Version table: 1.2.12-2 600 600 http://http.debian.net/debian sid/main amd64 Packages 600 http://http.debian.net/debian sid/main i386 Packages alienblaster: Installed: 1.1.0-9 Candidate: 1.1.0-9 Version table: *** 1.1.0-9 500 500 http://us.archive.ubuntu.com/ubuntu xenial/universe amd64 Packages 500 file:/my/repo ./ Packages 500 file:/my/repo2 ubuntu/ Packages 100 /var/lib/dpkg/status skype:i386: Installed: (none) Candidate: (none) Version table: 4.3.0.37-1 -1 100 /var/lib/dpkg/status """ out1 = {'openssl': {'architecture': None, 'candidate': '1.0.2g-1ubuntu4.8', 'installed': '1.0.2g-1ubuntu4.5', 'versions': [{'installed': None, 'priority': '500', 'sources': [{'priority': '500', 'source': 'http://us.archive.ubuntu.com/ubuntu ' 'xenial-updates/main amd64 ' 'Packages'}], 'version': '1.0.2g-1ubuntu4.8'}, {'installed': None, 'priority': '500', 'sources': [{'priority': '500', 'source': 'http://security.ubuntu.com/ubuntu ' 'xenial-security/main amd64 ' 'Packages'}], 'version': '1.0.2g-1ubuntu4.6'}, {'installed': '***', 'priority': '100', 'sources': [{'priority': '100', 'source': '/var/lib/dpkg/status'}], 'version': '1.0.2g-1ubuntu4.5'}, {'installed': None, 'priority': '500', 'sources': [{'priority': '500', 'source': 'http://us.archive.ubuntu.com/ubuntu ' 'xenial/main amd64 ' 'Packages'}], 'version': '1.0.2g-1ubuntu4'}]}} out = parse_apt_cache_policy_pkgs_output(txt1) assert_is_subset_recur(out1, out, [dict]) def test_parse_apt_cache_policy_source_info(): from ..debian import parse_apt_cache_policy_source_info txt = """\ Package files: 100 /var/lib/dpkg/status release a=now 500 http://neuro.debian.net/debian xenial/non-free i386 Packages release o=NeuroDebian,a=xenial,n=xenial,l=NeuroDebian,c=non-free,b=i386 origin neuro.debian.net 500 http://neuro.debian.net/debian xenial/non-free amd64 Packages release o=NeuroDebian,a=xenial,n=xenial,l=NeuroDebian,c=non-free,b=amd64 origin neuro.debian.net 500 http://neuro.debian.net/debian data/non-free i386 Packages release o=NeuroDebian,a=data,n=data,l=NeuroDebian,c=non-free,b=i386 origin neuro.debian.net 500 http://neuro.debian.net/debian data/non-free amd64 Packages release o=NeuroDebian,a=data,n=data,l=NeuroDebian,c=non-free,b=amd64 origin neuro.debian.net 500 file:/my/repo2 ubuntu/ Packages release c= 500 file:/my/repo ./ Packages release c= 500 http://dl.google.com/linux/chrome/deb stable/main amd64 Packages release v=1.0,o=Google, Inc.,a=stable,n=stable,l=Google,c=main,b=amd64 origin dl.google.com 500 http://security.ubuntu.com/ubuntu xenial-security/restricted i386 Packages release v=16.04,o=Ubuntu,a=xenial-security,n=xenial,l=Ubuntu,c=restricted,b=i386 origin security.ubuntu.com 500 http://security.ubuntu.com/ubuntu xenial-security/restricted amd64 Packages release v=16.04,o=Ubuntu,a=xenial-security,n=xenial,l=Ubuntu,c=restricted,b=amd64 origin security.ubuntu.com 500 http://debproxy:9999/debian/ jessie-backports/contrib Translation-en 100 http://debproxy:9999/debian/ jessie-backports/non-free amd64 Packages release o=Debian Backports,a=jessie-backports,n=jessie-backports,l=Debian Backports,c=non-free origin debproxy 500 http://us.archive.ubuntu.com/ubuntu xenial-updates/universe amd64 Packages release v=16.04,o=Ubuntu,a=xenial-updates,n=xenial,l=Ubuntu,c=universe,b=amd64 origin us.archive.ubuntu.com 500 http://us.archive.ubuntu.com/ubuntu xenial-updates/multiverse i386 Packages release v=16.04,o=Ubuntu,a=xenial-updates,n=xenial,l=Ubuntu,c=multiverse,b=i386 origin us.archive.ubuntu.com Pinned packages: """ out1 = {'http://neuro.debian.net/debian xenial/non-free i386 Packages': {'architecture': 'i386', 'archive': 'xenial', 'archive_uri': 'http://neuro.debian.net/debian', 'uri_suite': 'xenial', 'codename': 'xenial', 'component': 'non-free', 'label': 'NeuroDebian', 'origin': 'NeuroDebian', 'site': 'neuro.debian.net' }, 'http://security.ubuntu.com/ubuntu xenial-security/restricted amd64 Packages': {'architecture': 'amd64', 'archive': 'xenial-security', 'archive_uri': 'http://security.ubuntu.com/ubuntu', 'uri_suite': 'xenial-security', 'codename': 'xenial', 'component': 'restricted', 'label': 'Ubuntu', 'origin': 'Ubuntu', 'site': 'security.ubuntu.com' }, 'http://debproxy:9999/debian/ jessie-backports/contrib Translation-en': {'archive_uri': 'http://debproxy:9999/debian/', 'uri_suite': 'jessie-backports' }, 'http://debproxy:9999/debian/ jessie-backports/non-free amd64 Packages': {'archive': 'jessie-backports', 'archive_uri': 'http://debproxy:9999/debian/', 'codename': 'jessie-backports', 'component': 'non-free', 'label': 'Debian Backports', 'origin': 'Debian Backports', 'site': 'debproxy', 'uri_suite': 'jessie-backports' }, } out = parse_apt_cache_policy_source_info(txt) assert_is_subset_recur(out1, out, [dict]) def test_get_apt_release_file_names(): from ..debian import get_apt_release_file_names fn = get_apt_release_file_names('http://us.archive.ubuntu.com/ubuntu', 'xenial-backports') assert "/var/lib/apt/lists/us.archive.ubuntu.com_ubuntu_dists_xenial-backports_InRelease" in fn assert "/var/lib/apt/lists/us.archive.ubuntu.com_ubuntu_dists_xenial-backports_Release" in fn fn = get_apt_release_file_names('file:/my/repo2/ubuntu',None) assert "/var/lib/apt/lists/_my_repo2_ubuntu_InRelease" in fn assert "/var/lib/apt/lists/_my_repo2_ubuntu_Release" in fn def test_parse_dpkgquery_line(): for line, expected in [ ('zlib1g:i386: /lib/i386-linux-gnu/libz.so.1.2.8', {'name': 'zlib1g', 'architecture': 'i386', 'path': '/lib/i386-linux-gnu/libz.so.1.2.8', 'pkgs_rest': None}), ('fail2ban: /usr/bin/fail2ban-client', {'name': 'fail2ban', 'path': '/usr/bin/fail2ban-client', 'pkgs_rest': None}), ('fsl-5.0-eddy-nonfree, fsl-5.0-core: /usr/lib/fsl/5.0', {'name': 'fsl-5.0-eddy-nonfree', 'path': '/usr/lib/fsl/5.0', 'pkgs_rest': ', fsl-5.0-core'}), ('pkg: path,with,commas', {'name': 'pkg', 'path': 'path,with,commas', 'pkgs_rest': None}), ('diversion by dash from: /bin/sh', None) ]: assert parse_dpkgquery_line(line) == expected
niceman/support/distributions/tests/test_debian.py
from ..debian import DebianReleaseSpec from ..debian import get_spec_from_release_file from ..debian import parse_dpkgquery_line from niceman.tests.utils import eq_, assert_is_subset_recur def test_get_spec_from_release_file(f=None): content = """\ -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 Origin: NeuroDebian Label: NeuroDebian2 Suite: stretch Codename: stretch2 Date: Thu, 15 Sep 2016 01:30:57 UTC Architectures: i386 amd64 sparc Components: main non-free contrib Description: NeuroDebian repository with perspective, inofficial and backported packages -- mostly neuroscience-related MD5Sum: d9650396c56a6f9521d0bbd9f719efbe 482669 main/binary-i386/Packages 34134c9a64b847d33eeeb3cc7291f855ab9f0969e8ad7c92cd2a0c1aebc19d1e 14199 contrib/Contents-sparc.gz -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iEYEAREIAAYFAlfZ+dEACgkQpdMvASZJpamBowCfXOPQimiIy2wnVY5U9sLs1jSn JZ0An0Uoocusvjco1t6RAwxt/y3lQoWV =a3Nn -----END PGP SIGNATURE----- """ eq_(get_spec_from_release_file(content), DebianReleaseSpec( origin='NeuroDebian', label='NeuroDebian2', codename='stretch2', version=None, suite='stretch', date='Thu, 15 Sep 2016 01:30:57 UTC', components='main non-free contrib', architectures='i386 amd64 sparc', )) def test_parse_apt_cache_show_pkgs_output(): from ..debian import parse_apt_cache_show_pkgs_output txt1 = """\ Package: openssl Status: install ok installed Priority: optional Section: utils Installed-Size: 934 Maintainer: Ubuntu Developers <<EMAIL>> Architecture: amd64 Version: 1.0.2g-1ubuntu4.5 Depends: libc6 (>= 2.15), libssl1.0.0 (>= 1.0.2g) Suggests: ca-certificates Conffiles: /etc/ssl/openssl.cnf 7df26c55291b33344dc15e3935dabaf3 Description-en: Secure Sockets Layer toolkit - cryptographic utility This package is part of the OpenSSL project's implementation of the SSL and TLS cryptographic protocols for secure communication over the Internet. . It contains the general-purpose command line binary /usr/bin/openssl, useful for cryptographic operations such as: * creating RSA, DH, and DSA key parameters; * creating X.509 certificates, CSRs, and CRLs; * calculating message digests; * encrypting and decrypting with ciphers; * testing SSL/TLS clients and servers; * handling S/MIME signed or encrypted mail. Description-md5: 9b6de2bb6e1d9016aeb0f00bcf6617bd Original-Maintainer: Debian OpenSSL Team <<EMAIL>> Package: openssl Priority: standard Section: utils Installed-Size: 934 Maintainer: <NAME> <<EMAIL>> Original-Maintainer: Debian OpenSSL Team <<EMAIL>> Architecture: amd64 Source: openssl-src (1.0.2g) Version: 1.0.2g-1ubuntu4 Depends: libc6 (>= 2.15), libssl1.0.0 (>= 1.0.2g) Suggests: ca-certificates Filename: pool/main/o/openssl/openssl_1.0.2g-1ubuntu4_amd64.deb Size: 492190 MD5sum: 8280148dc2991da94be5810ad4d91552 SHA1: b5326f27aae83c303ff934121dede47d9fce7c76 SHA256: e897ffc8d84b0d436baca5dbd684a85146ffa78d3f2d15093779d3f5a8189690 Description-en: Secure Sockets Layer toolkit - cryptographic utility This package is part of the OpenSSL project's implementation of the SSL and TLS cryptographic protocols for secure communication over the Internet. . It contains the general-purpose command line binary /usr/bin/openssl, useful for cryptographic operations such as: * creating RSA, DH, and DSA key parameters; * creating X.509 certificates, CSRs, and CRLs; * calculating message digests; * encrypting and decrypting with ciphers; * testing SSL/TLS clients and servers; * handling S/MIME signed or encrypted mail. Description-md5: 9b6de2bb6e1d9016aeb0f00bcf6617bd Bugs: https://bugs.launchpad.net/ubuntu/+filebug Origin: Ubuntu Supported: 5y Task: standard, ubuntu-core, ubuntu-core, mythbuntu-frontend, mythbuntu-backend-slave, mythbuntu-backend-master, ubuntu-touch-core, ubuntu-touch, ubuntu-sdk-libs-tools, ubuntu-sdk Package: alienblaster Priority: extra Section: universe/games Installed-Size: 668 Maintainer: <NAME> <<EMAIL>> Original-Maintainer: Debian Games Team <<EMAIL>> Architecture: amd64 Source: alienblaster-src Version: 1.1.0-9 Depends: alienblaster-data, libc6 (>= 2.14), libgcc1 (>= 1:3.0), libsdl-mixer1.2, libsdl1.2debian (>= 1.2.11), libstdc++6 (>= 5.2) Filename: pool/universe/a/alienblaster/alienblaster_1.1.0-9_amd64.deb Size: 180278 MD5sum: e53379fd0d60e0af6304af78aa8ef2b7 SHA1: ca405056cf66a1c2ae3ae1674c22b7d24cda4986 SHA256: ff25bd843420801e9adea4f5ec1ca9656b2aeb327d8102107bf5ebbdb3046c38 Description-en: Classic 2D shoot 'em up Your mission is simple: Stop the invasion of the aliens and blast them! . Alien Blaster is a classic 2D shoot 'em up featuring lots of different weapons, special items, aliens to blast and a big bad boss. . It supports both a single player mode and a cooperative two player mode for two persons playing on one computer. Description-md5: da1f8f1a6453d62874036331e075d65f Homepage: http://www.schwardtnet.de/alienblaster/ Bugs: https://bugs.launchpad.net/ubuntu/+filebug Origin: Ubuntu """ out1 = [{'architecture': 'amd64', 'package': 'openssl', 'status': 'install ok installed', 'version': '1.0.2g-1ubuntu4.5'}, {'architecture': 'amd64', 'source_name': 'openssl-src', 'source_version': '1.0.2g', 'package': 'openssl', 'version': '1.0.2g-1ubuntu4'}, {'architecture': 'amd64', 'source_name': 'alienblaster-src', 'package': 'alienblaster', 'md5': 'e53379fd0d60e0af6304af78aa8ef2b7', 'version': '1.1.0-9'}, ] out = parse_apt_cache_show_pkgs_output(txt1) assert_is_subset_recur(out1, out, [dict, list]) def test_parse_apt_cache_policy_pkgs_output(): from ..debian import parse_apt_cache_policy_pkgs_output txt1 = """\ afni: Installed: 16.2.07~dfsg.1-2~nd90+1 Candidate: 16.2.07~dfsg.1-2~nd90+1 Version table: *** 16.2.07~dfsg.1-2~nd90+1 500 500 http://neuro.debian.net/debian stretch/contrib amd64 Packages 100 /var/lib/dpkg/status openssl: Installed: 1.0.2g-1ubuntu4.5 Candidate: 1.0.2g-1ubuntu4.8 Version table: 1.0.2g-1ubuntu4.8 500 500 http://us.archive.ubuntu.com/ubuntu xenial-updates/main amd64 Packages 1.0.2g-1ubuntu4.6 500 500 http://security.ubuntu.com/ubuntu xenial-security/main amd64 Packages *** 1.0.2g-1ubuntu4.5 100 100 /var/lib/dpkg/status 1.0.2g-1ubuntu4 500 500 http://us.archive.ubuntu.com/ubuntu xenial/main amd64 Packages python-nibabel: Installed: 2.1.0-1 Candidate: 2.1.0-1 Version table: *** 2.1.0-1 900 900 http://http.debian.net/debian stretch/main amd64 Packages 900 http://http.debian.net/debian stretch/main i386 Packages 600 http://http.debian.net/debian sid/main amd64 Packages 600 http://http.debian.net/debian sid/main i386 Packages 100 /var/lib/dpkg/status 2.1.0-1~nd90+1 500 500 http://neuro.debian.net/debian stretch/main amd64 Packages 500 http://neuro.debian.net/debian stretch/main i386 Packages python-biotools: Installed: (none) Candidate: 1.2.12-2 Version table: 1.2.12-2 600 600 http://http.debian.net/debian sid/main amd64 Packages 600 http://http.debian.net/debian sid/main i386 Packages alienblaster: Installed: 1.1.0-9 Candidate: 1.1.0-9 Version table: *** 1.1.0-9 500 500 http://us.archive.ubuntu.com/ubuntu xenial/universe amd64 Packages 500 file:/my/repo ./ Packages 500 file:/my/repo2 ubuntu/ Packages 100 /var/lib/dpkg/status skype:i386: Installed: (none) Candidate: (none) Version table: 4.3.0.37-1 -1 100 /var/lib/dpkg/status """ out1 = {'openssl': {'architecture': None, 'candidate': '1.0.2g-1ubuntu4.8', 'installed': '1.0.2g-1ubuntu4.5', 'versions': [{'installed': None, 'priority': '500', 'sources': [{'priority': '500', 'source': 'http://us.archive.ubuntu.com/ubuntu ' 'xenial-updates/main amd64 ' 'Packages'}], 'version': '1.0.2g-1ubuntu4.8'}, {'installed': None, 'priority': '500', 'sources': [{'priority': '500', 'source': 'http://security.ubuntu.com/ubuntu ' 'xenial-security/main amd64 ' 'Packages'}], 'version': '1.0.2g-1ubuntu4.6'}, {'installed': '***', 'priority': '100', 'sources': [{'priority': '100', 'source': '/var/lib/dpkg/status'}], 'version': '1.0.2g-1ubuntu4.5'}, {'installed': None, 'priority': '500', 'sources': [{'priority': '500', 'source': 'http://us.archive.ubuntu.com/ubuntu ' 'xenial/main amd64 ' 'Packages'}], 'version': '1.0.2g-1ubuntu4'}]}} out = parse_apt_cache_policy_pkgs_output(txt1) assert_is_subset_recur(out1, out, [dict]) def test_parse_apt_cache_policy_source_info(): from ..debian import parse_apt_cache_policy_source_info txt = """\ Package files: 100 /var/lib/dpkg/status release a=now 500 http://neuro.debian.net/debian xenial/non-free i386 Packages release o=NeuroDebian,a=xenial,n=xenial,l=NeuroDebian,c=non-free,b=i386 origin neuro.debian.net 500 http://neuro.debian.net/debian xenial/non-free amd64 Packages release o=NeuroDebian,a=xenial,n=xenial,l=NeuroDebian,c=non-free,b=amd64 origin neuro.debian.net 500 http://neuro.debian.net/debian data/non-free i386 Packages release o=NeuroDebian,a=data,n=data,l=NeuroDebian,c=non-free,b=i386 origin neuro.debian.net 500 http://neuro.debian.net/debian data/non-free amd64 Packages release o=NeuroDebian,a=data,n=data,l=NeuroDebian,c=non-free,b=amd64 origin neuro.debian.net 500 file:/my/repo2 ubuntu/ Packages release c= 500 file:/my/repo ./ Packages release c= 500 http://dl.google.com/linux/chrome/deb stable/main amd64 Packages release v=1.0,o=Google, Inc.,a=stable,n=stable,l=Google,c=main,b=amd64 origin dl.google.com 500 http://security.ubuntu.com/ubuntu xenial-security/restricted i386 Packages release v=16.04,o=Ubuntu,a=xenial-security,n=xenial,l=Ubuntu,c=restricted,b=i386 origin security.ubuntu.com 500 http://security.ubuntu.com/ubuntu xenial-security/restricted amd64 Packages release v=16.04,o=Ubuntu,a=xenial-security,n=xenial,l=Ubuntu,c=restricted,b=amd64 origin security.ubuntu.com 500 http://debproxy:9999/debian/ jessie-backports/contrib Translation-en 100 http://debproxy:9999/debian/ jessie-backports/non-free amd64 Packages release o=Debian Backports,a=jessie-backports,n=jessie-backports,l=Debian Backports,c=non-free origin debproxy 500 http://us.archive.ubuntu.com/ubuntu xenial-updates/universe amd64 Packages release v=16.04,o=Ubuntu,a=xenial-updates,n=xenial,l=Ubuntu,c=universe,b=amd64 origin us.archive.ubuntu.com 500 http://us.archive.ubuntu.com/ubuntu xenial-updates/multiverse i386 Packages release v=16.04,o=Ubuntu,a=xenial-updates,n=xenial,l=Ubuntu,c=multiverse,b=i386 origin us.archive.ubuntu.com Pinned packages: """ out1 = {'http://neuro.debian.net/debian xenial/non-free i386 Packages': {'architecture': 'i386', 'archive': 'xenial', 'archive_uri': 'http://neuro.debian.net/debian', 'uri_suite': 'xenial', 'codename': 'xenial', 'component': 'non-free', 'label': 'NeuroDebian', 'origin': 'NeuroDebian', 'site': 'neuro.debian.net' }, 'http://security.ubuntu.com/ubuntu xenial-security/restricted amd64 Packages': {'architecture': 'amd64', 'archive': 'xenial-security', 'archive_uri': 'http://security.ubuntu.com/ubuntu', 'uri_suite': 'xenial-security', 'codename': 'xenial', 'component': 'restricted', 'label': 'Ubuntu', 'origin': 'Ubuntu', 'site': 'security.ubuntu.com' }, 'http://debproxy:9999/debian/ jessie-backports/contrib Translation-en': {'archive_uri': 'http://debproxy:9999/debian/', 'uri_suite': 'jessie-backports' }, 'http://debproxy:9999/debian/ jessie-backports/non-free amd64 Packages': {'archive': 'jessie-backports', 'archive_uri': 'http://debproxy:9999/debian/', 'codename': 'jessie-backports', 'component': 'non-free', 'label': 'Debian Backports', 'origin': 'Debian Backports', 'site': 'debproxy', 'uri_suite': 'jessie-backports' }, } out = parse_apt_cache_policy_source_info(txt) assert_is_subset_recur(out1, out, [dict]) def test_get_apt_release_file_names(): from ..debian import get_apt_release_file_names fn = get_apt_release_file_names('http://us.archive.ubuntu.com/ubuntu', 'xenial-backports') assert "/var/lib/apt/lists/us.archive.ubuntu.com_ubuntu_dists_xenial-backports_InRelease" in fn assert "/var/lib/apt/lists/us.archive.ubuntu.com_ubuntu_dists_xenial-backports_Release" in fn fn = get_apt_release_file_names('file:/my/repo2/ubuntu',None) assert "/var/lib/apt/lists/_my_repo2_ubuntu_InRelease" in fn assert "/var/lib/apt/lists/_my_repo2_ubuntu_Release" in fn def test_parse_dpkgquery_line(): for line, expected in [ ('zlib1g:i386: /lib/i386-linux-gnu/libz.so.1.2.8', {'name': 'zlib1g', 'architecture': 'i386', 'path': '/lib/i386-linux-gnu/libz.so.1.2.8', 'pkgs_rest': None}), ('fail2ban: /usr/bin/fail2ban-client', {'name': 'fail2ban', 'path': '/usr/bin/fail2ban-client', 'pkgs_rest': None}), ('fsl-5.0-eddy-nonfree, fsl-5.0-core: /usr/lib/fsl/5.0', {'name': 'fsl-5.0-eddy-nonfree', 'path': '/usr/lib/fsl/5.0', 'pkgs_rest': ', fsl-5.0-core'}), ('pkg: path,with,commas', {'name': 'pkg', 'path': 'path,with,commas', 'pkgs_rest': None}), ('diversion by dash from: /bin/sh', None) ]: assert parse_dpkgquery_line(line) == expected
0.368065
0.150653
import os import re from mmpython import mediainfo import mmpython from discinfo import DiscInfo LSDVD_EXE='lsdvd' class DVDAudio(mediainfo.AudioInfo): def __init__(self, data): mediainfo.AudioInfo.__init__(self) self.number = int(data[1]) if data[3] != 'xx': self.language = data[3] try: # some DVDs have a very bad language setting self.language.encode() except UnicodeError: self.language = '' try: self.codec = data[7] try: self.samplerate = int(data[9]) except ValueError, e: if data[9].lower().find('khz') > 0: pos = data[9].lower().find('khz') self.samplerate = int(data[9][:pos]) * 1000 else: raise e self.channels = data[13] except Exception, e: # WTF, strange DVD, now try to find the bug (may not work) self.codec = data[data.index('Format:') + 1] try: freq = data[data.index('Frequency:') + 1] self.samplerate = int(freq) except ValueError: if freq.lower().find('khz') > 0: self.samplerate = int(freq[:freq.lower().find('khz')])*1000 self.channels = int(data[data.index('Channels:') + 1]) class DVDVideo(mediainfo.VideoInfo): def __init__(self, data): mediainfo.VideoInfo.__init__(self) self.width = int(data[12]) self.height = int(data[14]) self.fps = float(data[5]) self.aspect = data[10] class DVDTitle(mediainfo.AVInfo): def __init__(self, data): mediainfo.AVInfo.__init__(self) self.number = int(data[1]) self.keys.append('subtitles') self.keys.append('chapters') self.mime = 'video/mpeg' l = re.split('[:.]', data[3]) self.length = (int(l[0])*60+int(l[1]))*60+int(l[2]) self.trackno = int(data[1]) self.chapters = int(data[5]) class DVDInfo(DiscInfo): def __init__(self, device): DiscInfo.__init__(self) self.context = 'video' self.offset = 0 if mediainfo.DEBUG > 1: print 'trying lsdvd for scanning the disc' if os.path.isdir(device): self.valid = self.isDVDdir(device) else: self.valid = self.isDisc(device) if self.valid and self.tracks: self.keys.append('length') self.length = 0 first = 0 for t in self.tracks: self.length += t.length if not first: first = t.length if self.length/len(self.tracks) == first: # badly mastered dvd self.length = first if mediainfo.DEBUG > 1: print 'lsdvd detection ok' self.mime = 'video/dvd' self.type = 'DVD' self.subtype = 'video' def lsdvd(self, path): """ use lsdvd to get informations about this disc """ import popen2 child = popen2.Popen3('%s -v -n -a -s "%s"' % \ (LSDVD_EXE, path), 1, 100) for line in child.fromchild.readlines(): data = line.replace(',', '').replace('\t', '').\ replace('\n', '').lstrip(' ').split(' ') if len(data) > 2: if data[0] == 'Title:': ti = DVDTitle(data) self.appendtrack(ti) elif data[0] == 'Audio:': self.tracks[-1].audio.append(DVDAudio(data)) elif data[0] == 'Subtitle:': self.tracks[-1].subtitles.append(data[3]) elif data[0] == 'VTS:': self.tracks[-1].video.append(DVDVideo(data)) self.tracks[-1].video[-1].length = self.tracks[-1].length elif data[:3] == ['Number', 'of', 'Angles:']: self.tracks[-1].angles = int(data[3]) self.tracks[-1].keys.append('angles') child.wait() child.fromchild.close() child.childerr.close() child.tochild.close() if len(self.tracks) > 0: for ti in self.tracks: ti.trackof = len(self.tracks) return 1 return 0 def isDVDdir(self, dirname): if os.path.isdir(dirname+'/VIDEO_TS') or \ os.path.isdir(dirname+'/video_ts') or \ os.path.isdir(dirname+'/Video_ts'): return self.lsdvd(dirname) return 0 def isDisc(self, device): if DiscInfo.isDisc(self, device) != 2: return 0 # brute force reading of the device to find out if it is a DVD f = open(device,'rb') f.seek(32768, 0) buffer = f.read(60000) if buffer.find('UDF') == -1: f.close() return 0 # seems to be a DVD, read a little bit more buffer += f.read(550000) f.close() if buffer.find('VIDEO_TS') == -1 and \ buffer.find('VIDEO_TS.IFO') == -1 and \ buffer.find('OSTA UDF Compliant') == -1: return 0 ret = self.lsdvd(device) if not ret: # we are very sure this is a DVD, maybe the drive was not # ready, let's try again return self.lsdvd(device) return 1 if os.environ.has_key('LSDVD') and os.environ['LSDVD']: LSDVD_EXE = os.environ['LSDVD'] else: for path in os.environ['PATH'].split(':'): if os.path.isfile(os.path.join(path, 'lsdvd')): LSDVD_EXE = os.path.join(path, 'lsdvd') break else: if mediainfo.DEBUG: print 'ImportError: lsdvd not found' raise ImportError mmpython.registertype( 'video/dvd', mediainfo.EXTENSION_DEVICE, mediainfo.TYPE_AV, DVDInfo ) mmpython.registertype( 'video/dvd', mediainfo.EXTENSION_DIRECTORY, mediainfo.TYPE_AV, DVDInfo )
app/FileViewer/FileServer/misc/mmpython/disc/lsdvd.py
import os import re from mmpython import mediainfo import mmpython from discinfo import DiscInfo LSDVD_EXE='lsdvd' class DVDAudio(mediainfo.AudioInfo): def __init__(self, data): mediainfo.AudioInfo.__init__(self) self.number = int(data[1]) if data[3] != 'xx': self.language = data[3] try: # some DVDs have a very bad language setting self.language.encode() except UnicodeError: self.language = '' try: self.codec = data[7] try: self.samplerate = int(data[9]) except ValueError, e: if data[9].lower().find('khz') > 0: pos = data[9].lower().find('khz') self.samplerate = int(data[9][:pos]) * 1000 else: raise e self.channels = data[13] except Exception, e: # WTF, strange DVD, now try to find the bug (may not work) self.codec = data[data.index('Format:') + 1] try: freq = data[data.index('Frequency:') + 1] self.samplerate = int(freq) except ValueError: if freq.lower().find('khz') > 0: self.samplerate = int(freq[:freq.lower().find('khz')])*1000 self.channels = int(data[data.index('Channels:') + 1]) class DVDVideo(mediainfo.VideoInfo): def __init__(self, data): mediainfo.VideoInfo.__init__(self) self.width = int(data[12]) self.height = int(data[14]) self.fps = float(data[5]) self.aspect = data[10] class DVDTitle(mediainfo.AVInfo): def __init__(self, data): mediainfo.AVInfo.__init__(self) self.number = int(data[1]) self.keys.append('subtitles') self.keys.append('chapters') self.mime = 'video/mpeg' l = re.split('[:.]', data[3]) self.length = (int(l[0])*60+int(l[1]))*60+int(l[2]) self.trackno = int(data[1]) self.chapters = int(data[5]) class DVDInfo(DiscInfo): def __init__(self, device): DiscInfo.__init__(self) self.context = 'video' self.offset = 0 if mediainfo.DEBUG > 1: print 'trying lsdvd for scanning the disc' if os.path.isdir(device): self.valid = self.isDVDdir(device) else: self.valid = self.isDisc(device) if self.valid and self.tracks: self.keys.append('length') self.length = 0 first = 0 for t in self.tracks: self.length += t.length if not first: first = t.length if self.length/len(self.tracks) == first: # badly mastered dvd self.length = first if mediainfo.DEBUG > 1: print 'lsdvd detection ok' self.mime = 'video/dvd' self.type = 'DVD' self.subtype = 'video' def lsdvd(self, path): """ use lsdvd to get informations about this disc """ import popen2 child = popen2.Popen3('%s -v -n -a -s "%s"' % \ (LSDVD_EXE, path), 1, 100) for line in child.fromchild.readlines(): data = line.replace(',', '').replace('\t', '').\ replace('\n', '').lstrip(' ').split(' ') if len(data) > 2: if data[0] == 'Title:': ti = DVDTitle(data) self.appendtrack(ti) elif data[0] == 'Audio:': self.tracks[-1].audio.append(DVDAudio(data)) elif data[0] == 'Subtitle:': self.tracks[-1].subtitles.append(data[3]) elif data[0] == 'VTS:': self.tracks[-1].video.append(DVDVideo(data)) self.tracks[-1].video[-1].length = self.tracks[-1].length elif data[:3] == ['Number', 'of', 'Angles:']: self.tracks[-1].angles = int(data[3]) self.tracks[-1].keys.append('angles') child.wait() child.fromchild.close() child.childerr.close() child.tochild.close() if len(self.tracks) > 0: for ti in self.tracks: ti.trackof = len(self.tracks) return 1 return 0 def isDVDdir(self, dirname): if os.path.isdir(dirname+'/VIDEO_TS') or \ os.path.isdir(dirname+'/video_ts') or \ os.path.isdir(dirname+'/Video_ts'): return self.lsdvd(dirname) return 0 def isDisc(self, device): if DiscInfo.isDisc(self, device) != 2: return 0 # brute force reading of the device to find out if it is a DVD f = open(device,'rb') f.seek(32768, 0) buffer = f.read(60000) if buffer.find('UDF') == -1: f.close() return 0 # seems to be a DVD, read a little bit more buffer += f.read(550000) f.close() if buffer.find('VIDEO_TS') == -1 and \ buffer.find('VIDEO_TS.IFO') == -1 and \ buffer.find('OSTA UDF Compliant') == -1: return 0 ret = self.lsdvd(device) if not ret: # we are very sure this is a DVD, maybe the drive was not # ready, let's try again return self.lsdvd(device) return 1 if os.environ.has_key('LSDVD') and os.environ['LSDVD']: LSDVD_EXE = os.environ['LSDVD'] else: for path in os.environ['PATH'].split(':'): if os.path.isfile(os.path.join(path, 'lsdvd')): LSDVD_EXE = os.path.join(path, 'lsdvd') break else: if mediainfo.DEBUG: print 'ImportError: lsdvd not found' raise ImportError mmpython.registertype( 'video/dvd', mediainfo.EXTENSION_DEVICE, mediainfo.TYPE_AV, DVDInfo ) mmpython.registertype( 'video/dvd', mediainfo.EXTENSION_DIRECTORY, mediainfo.TYPE_AV, DVDInfo )
0.26341
0.157266
import sys print(sys.version) from mpi4py import MPI comm = MPI.COMM_WORLD num_procs = comm.Get_size() rank = comm.Get_rank() run_test=False rank_i = rank//3 rank_j = rank%3 import gc import pandas as pd import numpy as np import os from sklearn.model_selection import GridSearchCV, cross_val_score, cross_validate, KFold import pickle import re df = pd.read_csv('0_labelled_documents.csv') df = (df .query('driver_coded==1') .query('relevant==1') .sort_values('id') .sample(frac=1, random_state=1) .reset_index(drop=True) ) df.loc[df['representative_relevant_sample']==1,'random_sample'] = 1 if run_test: df = df.head(200) print(df.shape) def KFoldRandom(n_splits, X, no_test, shuffle=False, discard=True): kf = KFold(n_splits=n_splits, shuffle=shuffle) for train, test in kf.split(X): if not discard: train = list(train) + [x for x in test if x in no_test] test = [x for x in test if x not in no_test] yield (train, test) from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification import tensorflow as tf import tensorflow_addons as tfa tf.config.threading.set_intra_op_parallelism_threads(8) tf.config.threading.set_inter_op_parallelism_threads(8) MODEL_NAME = 'distilbert-base-uncased' tokenizer = DistilBertTokenizer.from_pretrained(MODEL_NAME) def create_train_val(x,y,train,val): train_encodings = tokenizer(list(x[train].values), truncation=True, padding=True) val_encodings = tokenizer(list(x[val].values), truncation=True, padding=True) train_dataset = tf.data.Dataset.from_tensor_slices(( dict(train_encodings), list(y[train].values) )) val_dataset = tf.data.Dataset.from_tensor_slices(( dict(val_encodings), list(y[val].values) )) MAX_LEN = train_dataset._structure[0]['input_ids'].shape[0] return train_dataset, val_dataset, MAX_LEN def init_model(MODEL_NAME, num_labels, params): model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=num_labels) optimizer = tfa.optimizers.AdamW(learning_rate=params['learning_rate'], weight_decay=params['weight_decay']) loss = tf.keras.losses.BinaryCrossentropy(from_logits=True) metrics = tf.metrics.BinaryAccuracy() model.compile( optimizer=optimizer, loss=loss, metrics=metrics ) return model from sklearn.metrics import roc_curve, accuracy_score, roc_auc_score, precision_recall_curve, f1_score from sklearn.metrics import precision_score, recall_score def evaluate_preds(y_true, y_pred, targets): res = {} for average in ["micro","macro","weighted", "samples"]: try: res[f'ROC AUC {average}'] = roc_auc_score(y_true, y_pred, average=average) except: res[f'ROC AUC {average}'] = np.NaN res[f'F1 {average}'] = f1_score(y_true, y_pred.round(), average=average) res[f'precision {average}'] = precision_score(y_true, y_pred.round(), average=average) res[f'recall {average}'] = recall_score(y_true, y_pred.round(), average=average) for i, target in enumerate(targets): try: res[f'ROC AUC - {target}'] = roc_auc_score(y_true[:,i], y_pred[:,i]) except: res[f'ROC AUC - {target}'] = np.NaN res[f'precision - {target}'] = precision_score(y_true[:,i], y_pred[:,i].round()) res[f'recall - {target}'] = recall_score(y_true[:,i], y_pred[:,i].round()) res[f'F1 - {target}'] = f1_score(y_true[:,i], y_pred[:,i].round()) res[f'accuracy - {target}'] = accuracy_score(y_true[:,i], y_pred[:,i].round()) res[f'n_target - {target}'] = y_true[:,i].sum() return res #targets = [x for x in df.columns if "12 - " in x and "Physical systems" not in x] targets = ['6 - Temperature','6 - Precipitation','6 - Other'] df['labels'] = list(df[targets].values) class_weight = {} for i, t in enumerate(targets): cw = df[(df['random_sample']==1) & (df[t]==0)].shape[0] / df[(df['random_sample']==1) & (df[t]==1)].shape[0] class_weight[i] = cw class_weight bert_params = { "class_weight": [None,class_weight], "batch_size": [16, 32], "weight_decay": (0, 0.3), "learning_rate": (1e-5, 5e-5), "num_epochs": [2, 3, 4] } import itertools def product_dict(**kwargs): keys = kwargs.keys() vals = kwargs.values() for instance in itertools.product(*vals): yield dict(zip(keys, instance)) param_space = list(product_dict(**bert_params)) outer_cv = KFoldRandom(3, df.index, df[df['random_sample']!=1].index) outer_scores = [] clfs = [] def train_eval_bert(params, df, train, test): train_dataset, val_dataset, MAX_LEN = create_train_val(df['content'], df['labels'], train, test) print("training bert with these params") print(params) model = init_model('distilbert-base-uncased', len(targets), params) model.fit(train_dataset.shuffle(100).batch(params['batch_size']), epochs=params['num_epochs'], batch_size=params['batch_size'], class_weight=params['class_weight'] ) preds = model.predict(val_dataset.batch(1)).logits y_pred = tf.keras.activations.sigmoid(tf.convert_to_tensor(preds)).numpy() ai = np.expand_dims(np.argmax(y_pred, axis=1), axis=1) maximums = np.maximum(y_pred.max(1),0.51) np.put_along_axis(y_pred, ai, maximums.reshape(ai.shape), axis=1) eps = evaluate_preds(np.array(df.loc[test,targets]), y_pred, targets) print(eps) for key, value in params.items(): eps[key] = value return eps restart = True for k, (train, test) in enumerate(outer_cv): if k!=rank_i: continue inner_cv = KFoldRandom(3, train, df[df['random_sample']!=1].index, discard=False) inner_scores = [] for l, (l_train, l_test) in enumerate(inner_cv): if l!=rank_j: continue cv_results = [] params_tested = [] if not restart: try: pr = param_space[0] cv_results=pd.read_csv(f'cv_3/cv_results_multi_driver_{rank_i}_{rank_j}.csv').to_dict('records') params_tested=pd.read_csv(f'cv_3/cv_results_multi_driver_{rank_i}_{rank_j}.csv')[list(pr.keys())].to_dict('records') except: pass for pr in param_space: if pr in params_tested: continue cv_results.append(train_eval_bert(pr, df=df, train=l_train, test=l_test)) pd.DataFrame.from_dict(cv_results).to_csv(f'cv_3/cv_results_multi_driver_{rank_i}_{rank_j}.csv',index=False) gc.collect()
analysis/2-cluster-ml-scripts/cv_bert_drivers.py
import sys print(sys.version) from mpi4py import MPI comm = MPI.COMM_WORLD num_procs = comm.Get_size() rank = comm.Get_rank() run_test=False rank_i = rank//3 rank_j = rank%3 import gc import pandas as pd import numpy as np import os from sklearn.model_selection import GridSearchCV, cross_val_score, cross_validate, KFold import pickle import re df = pd.read_csv('0_labelled_documents.csv') df = (df .query('driver_coded==1') .query('relevant==1') .sort_values('id') .sample(frac=1, random_state=1) .reset_index(drop=True) ) df.loc[df['representative_relevant_sample']==1,'random_sample'] = 1 if run_test: df = df.head(200) print(df.shape) def KFoldRandom(n_splits, X, no_test, shuffle=False, discard=True): kf = KFold(n_splits=n_splits, shuffle=shuffle) for train, test in kf.split(X): if not discard: train = list(train) + [x for x in test if x in no_test] test = [x for x in test if x not in no_test] yield (train, test) from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification import tensorflow as tf import tensorflow_addons as tfa tf.config.threading.set_intra_op_parallelism_threads(8) tf.config.threading.set_inter_op_parallelism_threads(8) MODEL_NAME = 'distilbert-base-uncased' tokenizer = DistilBertTokenizer.from_pretrained(MODEL_NAME) def create_train_val(x,y,train,val): train_encodings = tokenizer(list(x[train].values), truncation=True, padding=True) val_encodings = tokenizer(list(x[val].values), truncation=True, padding=True) train_dataset = tf.data.Dataset.from_tensor_slices(( dict(train_encodings), list(y[train].values) )) val_dataset = tf.data.Dataset.from_tensor_slices(( dict(val_encodings), list(y[val].values) )) MAX_LEN = train_dataset._structure[0]['input_ids'].shape[0] return train_dataset, val_dataset, MAX_LEN def init_model(MODEL_NAME, num_labels, params): model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=num_labels) optimizer = tfa.optimizers.AdamW(learning_rate=params['learning_rate'], weight_decay=params['weight_decay']) loss = tf.keras.losses.BinaryCrossentropy(from_logits=True) metrics = tf.metrics.BinaryAccuracy() model.compile( optimizer=optimizer, loss=loss, metrics=metrics ) return model from sklearn.metrics import roc_curve, accuracy_score, roc_auc_score, precision_recall_curve, f1_score from sklearn.metrics import precision_score, recall_score def evaluate_preds(y_true, y_pred, targets): res = {} for average in ["micro","macro","weighted", "samples"]: try: res[f'ROC AUC {average}'] = roc_auc_score(y_true, y_pred, average=average) except: res[f'ROC AUC {average}'] = np.NaN res[f'F1 {average}'] = f1_score(y_true, y_pred.round(), average=average) res[f'precision {average}'] = precision_score(y_true, y_pred.round(), average=average) res[f'recall {average}'] = recall_score(y_true, y_pred.round(), average=average) for i, target in enumerate(targets): try: res[f'ROC AUC - {target}'] = roc_auc_score(y_true[:,i], y_pred[:,i]) except: res[f'ROC AUC - {target}'] = np.NaN res[f'precision - {target}'] = precision_score(y_true[:,i], y_pred[:,i].round()) res[f'recall - {target}'] = recall_score(y_true[:,i], y_pred[:,i].round()) res[f'F1 - {target}'] = f1_score(y_true[:,i], y_pred[:,i].round()) res[f'accuracy - {target}'] = accuracy_score(y_true[:,i], y_pred[:,i].round()) res[f'n_target - {target}'] = y_true[:,i].sum() return res #targets = [x for x in df.columns if "12 - " in x and "Physical systems" not in x] targets = ['6 - Temperature','6 - Precipitation','6 - Other'] df['labels'] = list(df[targets].values) class_weight = {} for i, t in enumerate(targets): cw = df[(df['random_sample']==1) & (df[t]==0)].shape[0] / df[(df['random_sample']==1) & (df[t]==1)].shape[0] class_weight[i] = cw class_weight bert_params = { "class_weight": [None,class_weight], "batch_size": [16, 32], "weight_decay": (0, 0.3), "learning_rate": (1e-5, 5e-5), "num_epochs": [2, 3, 4] } import itertools def product_dict(**kwargs): keys = kwargs.keys() vals = kwargs.values() for instance in itertools.product(*vals): yield dict(zip(keys, instance)) param_space = list(product_dict(**bert_params)) outer_cv = KFoldRandom(3, df.index, df[df['random_sample']!=1].index) outer_scores = [] clfs = [] def train_eval_bert(params, df, train, test): train_dataset, val_dataset, MAX_LEN = create_train_val(df['content'], df['labels'], train, test) print("training bert with these params") print(params) model = init_model('distilbert-base-uncased', len(targets), params) model.fit(train_dataset.shuffle(100).batch(params['batch_size']), epochs=params['num_epochs'], batch_size=params['batch_size'], class_weight=params['class_weight'] ) preds = model.predict(val_dataset.batch(1)).logits y_pred = tf.keras.activations.sigmoid(tf.convert_to_tensor(preds)).numpy() ai = np.expand_dims(np.argmax(y_pred, axis=1), axis=1) maximums = np.maximum(y_pred.max(1),0.51) np.put_along_axis(y_pred, ai, maximums.reshape(ai.shape), axis=1) eps = evaluate_preds(np.array(df.loc[test,targets]), y_pred, targets) print(eps) for key, value in params.items(): eps[key] = value return eps restart = True for k, (train, test) in enumerate(outer_cv): if k!=rank_i: continue inner_cv = KFoldRandom(3, train, df[df['random_sample']!=1].index, discard=False) inner_scores = [] for l, (l_train, l_test) in enumerate(inner_cv): if l!=rank_j: continue cv_results = [] params_tested = [] if not restart: try: pr = param_space[0] cv_results=pd.read_csv(f'cv_3/cv_results_multi_driver_{rank_i}_{rank_j}.csv').to_dict('records') params_tested=pd.read_csv(f'cv_3/cv_results_multi_driver_{rank_i}_{rank_j}.csv')[list(pr.keys())].to_dict('records') except: pass for pr in param_space: if pr in params_tested: continue cv_results.append(train_eval_bert(pr, df=df, train=l_train, test=l_test)) pd.DataFrame.from_dict(cv_results).to_csv(f'cv_3/cv_results_multi_driver_{rank_i}_{rank_j}.csv',index=False) gc.collect()
0.389547
0.252747
from pandac.PandaModules import * from toontown.toonbase.ToonBaseGlobal import * from direct.interval.IntervalGlobal import * from toontown.toonbase.ToontownGlobals import * from toontown.distributed.DelayDelete import DelayDelete from direct.directnotify import DirectNotifyGlobal from direct.fsm import StateData from direct.fsm import ClassicFSM, State from direct.fsm import State from . import CatchGameGlobals from direct.task.Task import Task class CatchGameToonSD(StateData.StateData): notify = DirectNotifyGlobal.directNotify.newCategory('CatchGameToonSD') FallBackAnim = 'slip-backward' FallFwdAnim = 'slip-forward' CatchNeutralAnim = 'catch-neutral' CatchRunAnim = 'catch-run' EatNeutralAnim = 'catch-eatneutral' EatNRunAnim = 'catch-eatnrun' animList = [FallBackAnim, FallFwdAnim, CatchNeutralAnim, CatchRunAnim, EatNeutralAnim, EatNRunAnim] def __init__(self, avId, game): self.avId = avId self.game = game self.isLocal = avId == base.localAvatar.doId self.toon = self.game.getAvatar(self.avId) self._delayDelete = DelayDelete(self.toon, 'CatchGameToonSD') self.unexpectedExit = False self.fsm = ClassicFSM.ClassicFSM('CatchGameAnimFSM-%s' % self.avId, [State.State('init', self.enterInit, self.exitInit, ['normal']), State.State('normal', self.enterNormal, self.exitNormal, ['eatFruit', 'fallBack', 'fallForward']), State.State('eatFruit', self.enterEatFruit, self.exitEatFruit, ['normal', 'fallBack', 'fallForward', 'eatFruit']), State.State('fallBack', self.enterFallBack, self.exitFallBack, ['normal']), State.State('fallForward', self.enterFallForward, self.exitFallForward, ['normal']), State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'init', 'cleanup') def load(self): self.setAnimState('off', 1.0) for anim in self.animList: self.toon.pose(anim, 0) def unload(self): self._delayDelete.destroy() del self.fsm def enter(self): self.fsm.enterInitialState() self._exiting = False def exit(self, unexpectedExit = False): if self._exiting: return self._exiting = True self.unexpectedExit = unexpectedExit self.fsm.requestFinalState() del self._exiting def enterInit(self): self.notify.debug('enterInit') self.toon.startBlink() self.toon.stopLookAround() if self.isLocal: self.game.initOrthoWalk() self.toon.useLOD(1000) self.dropShadow = self.toon.dropShadow self.origDropShadowColor = self.dropShadow.getColor() c = self.origDropShadowColor alpha = 0.35 self.dropShadow.setColor(c[0], c[1], c[2], alpha) def exitInit(self): pass def enterNormal(self): self.notify.debug('enterNormal') self.setAnimState('Catching', 1.0) if self.isLocal: self.game.orthoWalk.start() self.toon.lerpLookAt(Vec3.forward() + Vec3.up(), time=0.2, blink=0) def exitNormal(self): self.setAnimState('off', 1.0) if self.isLocal: self.game.orthoWalk.stop() self.toon.lerpLookAt(Vec3.forward(), time=0.2, blink=0) def eatFruit(self, fruitModel, handNode): if self.fsm.getCurrentState().getName() == 'eatFruit': self.fsm.request('normal') self.fsm.request('eatFruit', [fruitModel, handNode]) def enterEatFruit(self, fruitModel, handNode): self.notify.debug('enterEatFruit') self.setAnimState('CatchEating', 1.0) if self.isLocal: self.game.orthoWalk.start() self.fruitModel = fruitModel renderScale = fruitModel.getScale(render) fruitModel.reparentTo(handNode) fruitModel.setScale(render, renderScale) def finishedEating(self = self, fruitModel = fruitModel): self.fsm.request('normal') return Task.done duration = self.toon.getDuration('catch-eatneutral') self.eatIval = Sequence(Parallel(WaitInterval(duration), Sequence(LerpScaleInterval(fruitModel, duration / 2.0, fruitModel.getScale() * 0.5, blendType='easeInOut'), Func(fruitModel.hide))), Func(finishedEating), name=self.toon.uniqueName('eatingIval')) self.eatIval.start() def exitEatFruit(self): self.eatIval.pause() del self.eatIval self.fruitModel.reparentTo(hidden) self.fruitModel.removeNode() del self.fruitModel self.setAnimState('off', 1.0) if self.isLocal: self.game.orthoWalk.stop() def enterFallBack(self): self.notify.debug('enterFallBack') if self.isLocal: base.playSfx(self.game.sndOof) duration = 1.0 animName = self.FallBackAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame frameRate = self.toon.getFrameRate(animName) newRate = frames / duration playRate = newRate / frameRate def resume(self = self): self.fsm.request('normal') self.fallBackIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume)) self.fallBackIval.start() def exitFallBack(self): self.fallBackIval.pause() del self.fallBackIval def enterFallForward(self): self.notify.debug('enterFallForward') if self.isLocal: base.playSfx(self.game.sndOof) duration = 1.0 animName = self.FallFwdAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame frameRate = self.toon.getFrameRate(animName) newRate = frames / duration playRate = newRate / frameRate def resume(self = self): self.fsm.request('normal') self.fallFwdIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume)) self.fallFwdIval.start() def exitFallForward(self): self.fallFwdIval.pause() del self.fallFwdIval def enterCleanup(self): self.notify.debug('enterCleanup') self.toon.stopBlink() self.toon.startLookAround() if self.isLocal: self.game.orthoWalk.stop() self.game.destroyOrthoWalk() self.toon.resetLOD() self.dropShadow.setColor(self.origDropShadowColor) def exitCleanup(self): pass def setAnimState(self, newState, playRate): if not self.unexpectedExit: self.toon.setAnimState(newState, playRate)
toontown/minigame/CatchGameToonSD.py
from pandac.PandaModules import * from toontown.toonbase.ToonBaseGlobal import * from direct.interval.IntervalGlobal import * from toontown.toonbase.ToontownGlobals import * from toontown.distributed.DelayDelete import DelayDelete from direct.directnotify import DirectNotifyGlobal from direct.fsm import StateData from direct.fsm import ClassicFSM, State from direct.fsm import State from . import CatchGameGlobals from direct.task.Task import Task class CatchGameToonSD(StateData.StateData): notify = DirectNotifyGlobal.directNotify.newCategory('CatchGameToonSD') FallBackAnim = 'slip-backward' FallFwdAnim = 'slip-forward' CatchNeutralAnim = 'catch-neutral' CatchRunAnim = 'catch-run' EatNeutralAnim = 'catch-eatneutral' EatNRunAnim = 'catch-eatnrun' animList = [FallBackAnim, FallFwdAnim, CatchNeutralAnim, CatchRunAnim, EatNeutralAnim, EatNRunAnim] def __init__(self, avId, game): self.avId = avId self.game = game self.isLocal = avId == base.localAvatar.doId self.toon = self.game.getAvatar(self.avId) self._delayDelete = DelayDelete(self.toon, 'CatchGameToonSD') self.unexpectedExit = False self.fsm = ClassicFSM.ClassicFSM('CatchGameAnimFSM-%s' % self.avId, [State.State('init', self.enterInit, self.exitInit, ['normal']), State.State('normal', self.enterNormal, self.exitNormal, ['eatFruit', 'fallBack', 'fallForward']), State.State('eatFruit', self.enterEatFruit, self.exitEatFruit, ['normal', 'fallBack', 'fallForward', 'eatFruit']), State.State('fallBack', self.enterFallBack, self.exitFallBack, ['normal']), State.State('fallForward', self.enterFallForward, self.exitFallForward, ['normal']), State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'init', 'cleanup') def load(self): self.setAnimState('off', 1.0) for anim in self.animList: self.toon.pose(anim, 0) def unload(self): self._delayDelete.destroy() del self.fsm def enter(self): self.fsm.enterInitialState() self._exiting = False def exit(self, unexpectedExit = False): if self._exiting: return self._exiting = True self.unexpectedExit = unexpectedExit self.fsm.requestFinalState() del self._exiting def enterInit(self): self.notify.debug('enterInit') self.toon.startBlink() self.toon.stopLookAround() if self.isLocal: self.game.initOrthoWalk() self.toon.useLOD(1000) self.dropShadow = self.toon.dropShadow self.origDropShadowColor = self.dropShadow.getColor() c = self.origDropShadowColor alpha = 0.35 self.dropShadow.setColor(c[0], c[1], c[2], alpha) def exitInit(self): pass def enterNormal(self): self.notify.debug('enterNormal') self.setAnimState('Catching', 1.0) if self.isLocal: self.game.orthoWalk.start() self.toon.lerpLookAt(Vec3.forward() + Vec3.up(), time=0.2, blink=0) def exitNormal(self): self.setAnimState('off', 1.0) if self.isLocal: self.game.orthoWalk.stop() self.toon.lerpLookAt(Vec3.forward(), time=0.2, blink=0) def eatFruit(self, fruitModel, handNode): if self.fsm.getCurrentState().getName() == 'eatFruit': self.fsm.request('normal') self.fsm.request('eatFruit', [fruitModel, handNode]) def enterEatFruit(self, fruitModel, handNode): self.notify.debug('enterEatFruit') self.setAnimState('CatchEating', 1.0) if self.isLocal: self.game.orthoWalk.start() self.fruitModel = fruitModel renderScale = fruitModel.getScale(render) fruitModel.reparentTo(handNode) fruitModel.setScale(render, renderScale) def finishedEating(self = self, fruitModel = fruitModel): self.fsm.request('normal') return Task.done duration = self.toon.getDuration('catch-eatneutral') self.eatIval = Sequence(Parallel(WaitInterval(duration), Sequence(LerpScaleInterval(fruitModel, duration / 2.0, fruitModel.getScale() * 0.5, blendType='easeInOut'), Func(fruitModel.hide))), Func(finishedEating), name=self.toon.uniqueName('eatingIval')) self.eatIval.start() def exitEatFruit(self): self.eatIval.pause() del self.eatIval self.fruitModel.reparentTo(hidden) self.fruitModel.removeNode() del self.fruitModel self.setAnimState('off', 1.0) if self.isLocal: self.game.orthoWalk.stop() def enterFallBack(self): self.notify.debug('enterFallBack') if self.isLocal: base.playSfx(self.game.sndOof) duration = 1.0 animName = self.FallBackAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame frameRate = self.toon.getFrameRate(animName) newRate = frames / duration playRate = newRate / frameRate def resume(self = self): self.fsm.request('normal') self.fallBackIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume)) self.fallBackIval.start() def exitFallBack(self): self.fallBackIval.pause() del self.fallBackIval def enterFallForward(self): self.notify.debug('enterFallForward') if self.isLocal: base.playSfx(self.game.sndOof) duration = 1.0 animName = self.FallFwdAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame frameRate = self.toon.getFrameRate(animName) newRate = frames / duration playRate = newRate / frameRate def resume(self = self): self.fsm.request('normal') self.fallFwdIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume)) self.fallFwdIval.start() def exitFallForward(self): self.fallFwdIval.pause() del self.fallFwdIval def enterCleanup(self): self.notify.debug('enterCleanup') self.toon.stopBlink() self.toon.startLookAround() if self.isLocal: self.game.orthoWalk.stop() self.game.destroyOrthoWalk() self.toon.resetLOD() self.dropShadow.setColor(self.origDropShadowColor) def exitCleanup(self): pass def setAnimState(self, newState, playRate): if not self.unexpectedExit: self.toon.setAnimState(newState, playRate)
0.439747
0.092155
__author__ = '<EMAIL> (<NAME>)' import document CUSTOM_SERIALIZE_METHOD_NAME = 'Serialize' def IsListOrDict(inst): """Returns whether or not this is a list, tuple, set or dict .""" return hasattr(inst, '__iter__') def IsDict(inst): """Returns whether or not the specified instance is a dict.""" return hasattr(inst, 'iteritems') def IsInstance(obj): """Returns whether or not the specified instance is a user-defined type.""" # NOTE(davidbyttow): This seems like a reasonably safe hack for now... # I'm not exactly sure how to test if something is a subclass of object. # And no, "is InstanceType" does not work here. :( return str(type(obj)).startswith('<class ') def CollapseJavaCollections(data): """Collapses the unnecessary extra data structures in the wire format. Currently the wire format is built from marshalling of Java objects. This introduces overhead of extra key/value pairs with respect to collections and superfluous fields. As such, this method attempts to collapse those structures out of the data format by collapsing the collection objects and removing the java class fields. This preserves the data that is passed in and only removes the collection types. Args: data: Some arbitrary dict, list or primitive type. Returns: The same data structure with the collapsed and unnecessary objects removed. """ if IsDict(data): java_class = data.get('javaClass') if java_class: del data['javaClass'] if java_class == 'java.util.HashMap': return CollapseJavaCollections(data['map']) elif java_class == 'java.util.ArrayList': return CollapseJavaCollections(data['list']) for key, val in data.iteritems(): data[key] = CollapseJavaCollections(val) elif IsListOrDict(data): for index in range(len(data)): data[index] = CollapseJavaCollections(data[index]) return data return data def ToLowerCamelCase(s): """Converts a string to lower camel case. Examples: foo => foo foo_bar => fooBar foo__bar => fooBar foo_bar_baz => fooBarBaz Args: s: The string to convert to lower camel case. Returns: The lower camel cased string. """ return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_')) def ToUpperCamelCase(s): """Converts a string to upper camel case. Examples: foo => Foo foo_bar => FooBar foo__bar => FooBar foo_bar_baz => FooBarBaz Args: s: The string to convert to upper camel case. Returns: The upper camel cased string. """ return ''.join(fragment.capitalize() for fragment in s.split('_')) def DefaultKeyWriter(key_name): """This key writer rewrites keys as lower camel case. Expects that the input is formed by '_' delimited words. Args: key_name: Name of the key to serialize. Returns: Key name in lower camel-cased form. """ return ToLowerCamelCase(key_name) def _SerializeAttributes(obj, key_writer=DefaultKeyWriter): """Serializes attributes of an instance. Iterates all attributes of an object and invokes serialize if they are public and not callable. Args: obj: The instance to serialize. key_writer: Optional function that takes a string key and optionally mutates it before serialization. For example: def randomize(key_name): return key_name += str(random.random()) Returns: The serialized object. """ data = {} for attr_name in dir(obj): if attr_name.startswith('_'): continue attr = getattr(obj, attr_name) if callable(attr): continue if attr is None: continue # Looks okay, serialize it. data[key_writer(attr_name)] = Serialize(attr) return data def _SerializeList(l): """Invokes Serialize on all of its elements. Args: l: The list object to serialize. Returns: The serialized list. """ data = [Serialize(v) for v in l] return { 'javaClass': 'java.util.ArrayList', 'list': data } def _SerializeDict(d, key_writer=DefaultKeyWriter): """Invokes serialize on all of its key/value pairs. Args: d: The dict instance to serialize. key_writer: Optional key writer function. Returns: The serialized dict. """ data = {} for k, v in d.iteritems(): data[key_writer(k)] = Serialize(v) return { 'javaClass': 'java.util.HashMap', 'map': data } def Serialize(obj, key_writer=DefaultKeyWriter): """Serializes any instance. If this is a user-defined instance type, it will first check for a custom Serialize() function and use that if it exists. Otherwise, it will invoke serialize all of its public attributes. Lists and dicts are serialized trivially. Args: obj: The instance to serialize. key_writer: Optional key writer function. Returns: The serialized object. """ if IsInstance(obj): if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME): method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME) if callable(method): return method() return _SerializeAttributes(obj, key_writer) elif IsDict(obj): return _SerializeDict(obj, key_writer) elif IsListOrDict(obj): return _SerializeList(obj) return obj def ClipRange(r, clip_range): """Clips one range to another. Given a range to be clipped and a clipping range, will result in a list of 0-2 new ranges. If the range is completely inside of the clipping range then an empty list will be returned. If it is completely outside, then a list with only the same range will be returned. Otherwise, other permutations may result in a single clipped range or two ranges that were the result of a split. Args: r: The range to be clipped. clip_range: The range that is clipping the other. Returns: A list of 0-2 ranges as a result of performing the clip. """ # Check if completely outside the clipping range. if r.end <= clip_range.start or r.start >= clip_range.end: return [r] # Check if completely clipped. if r.start >= clip_range.start and r.end <= clip_range.end: return [] # Check if split. if clip_range.start >= r.start and clip_range.end <= r.end: splits = [] if r.start < clip_range.start: splits.append(document.Range(r.start, clip_range.start)) if clip_range.end < r.end: splits.append(document.Range(clip_range.end, r.end)) return splits # Just a trim. if clip_range.start < r.start: return [document.Range(clip_range.end, r.end)] return [document.Range(r.start, clip_range.start)]
app/waveapi/util.py
__author__ = '<EMAIL> (<NAME>)' import document CUSTOM_SERIALIZE_METHOD_NAME = 'Serialize' def IsListOrDict(inst): """Returns whether or not this is a list, tuple, set or dict .""" return hasattr(inst, '__iter__') def IsDict(inst): """Returns whether or not the specified instance is a dict.""" return hasattr(inst, 'iteritems') def IsInstance(obj): """Returns whether or not the specified instance is a user-defined type.""" # NOTE(davidbyttow): This seems like a reasonably safe hack for now... # I'm not exactly sure how to test if something is a subclass of object. # And no, "is InstanceType" does not work here. :( return str(type(obj)).startswith('<class ') def CollapseJavaCollections(data): """Collapses the unnecessary extra data structures in the wire format. Currently the wire format is built from marshalling of Java objects. This introduces overhead of extra key/value pairs with respect to collections and superfluous fields. As such, this method attempts to collapse those structures out of the data format by collapsing the collection objects and removing the java class fields. This preserves the data that is passed in and only removes the collection types. Args: data: Some arbitrary dict, list or primitive type. Returns: The same data structure with the collapsed and unnecessary objects removed. """ if IsDict(data): java_class = data.get('javaClass') if java_class: del data['javaClass'] if java_class == 'java.util.HashMap': return CollapseJavaCollections(data['map']) elif java_class == 'java.util.ArrayList': return CollapseJavaCollections(data['list']) for key, val in data.iteritems(): data[key] = CollapseJavaCollections(val) elif IsListOrDict(data): for index in range(len(data)): data[index] = CollapseJavaCollections(data[index]) return data return data def ToLowerCamelCase(s): """Converts a string to lower camel case. Examples: foo => foo foo_bar => fooBar foo__bar => fooBar foo_bar_baz => fooBarBaz Args: s: The string to convert to lower camel case. Returns: The lower camel cased string. """ return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_')) def ToUpperCamelCase(s): """Converts a string to upper camel case. Examples: foo => Foo foo_bar => FooBar foo__bar => FooBar foo_bar_baz => FooBarBaz Args: s: The string to convert to upper camel case. Returns: The upper camel cased string. """ return ''.join(fragment.capitalize() for fragment in s.split('_')) def DefaultKeyWriter(key_name): """This key writer rewrites keys as lower camel case. Expects that the input is formed by '_' delimited words. Args: key_name: Name of the key to serialize. Returns: Key name in lower camel-cased form. """ return ToLowerCamelCase(key_name) def _SerializeAttributes(obj, key_writer=DefaultKeyWriter): """Serializes attributes of an instance. Iterates all attributes of an object and invokes serialize if they are public and not callable. Args: obj: The instance to serialize. key_writer: Optional function that takes a string key and optionally mutates it before serialization. For example: def randomize(key_name): return key_name += str(random.random()) Returns: The serialized object. """ data = {} for attr_name in dir(obj): if attr_name.startswith('_'): continue attr = getattr(obj, attr_name) if callable(attr): continue if attr is None: continue # Looks okay, serialize it. data[key_writer(attr_name)] = Serialize(attr) return data def _SerializeList(l): """Invokes Serialize on all of its elements. Args: l: The list object to serialize. Returns: The serialized list. """ data = [Serialize(v) for v in l] return { 'javaClass': 'java.util.ArrayList', 'list': data } def _SerializeDict(d, key_writer=DefaultKeyWriter): """Invokes serialize on all of its key/value pairs. Args: d: The dict instance to serialize. key_writer: Optional key writer function. Returns: The serialized dict. """ data = {} for k, v in d.iteritems(): data[key_writer(k)] = Serialize(v) return { 'javaClass': 'java.util.HashMap', 'map': data } def Serialize(obj, key_writer=DefaultKeyWriter): """Serializes any instance. If this is a user-defined instance type, it will first check for a custom Serialize() function and use that if it exists. Otherwise, it will invoke serialize all of its public attributes. Lists and dicts are serialized trivially. Args: obj: The instance to serialize. key_writer: Optional key writer function. Returns: The serialized object. """ if IsInstance(obj): if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME): method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME) if callable(method): return method() return _SerializeAttributes(obj, key_writer) elif IsDict(obj): return _SerializeDict(obj, key_writer) elif IsListOrDict(obj): return _SerializeList(obj) return obj def ClipRange(r, clip_range): """Clips one range to another. Given a range to be clipped and a clipping range, will result in a list of 0-2 new ranges. If the range is completely inside of the clipping range then an empty list will be returned. If it is completely outside, then a list with only the same range will be returned. Otherwise, other permutations may result in a single clipped range or two ranges that were the result of a split. Args: r: The range to be clipped. clip_range: The range that is clipping the other. Returns: A list of 0-2 ranges as a result of performing the clip. """ # Check if completely outside the clipping range. if r.end <= clip_range.start or r.start >= clip_range.end: return [r] # Check if completely clipped. if r.start >= clip_range.start and r.end <= clip_range.end: return [] # Check if split. if clip_range.start >= r.start and clip_range.end <= r.end: splits = [] if r.start < clip_range.start: splits.append(document.Range(r.start, clip_range.start)) if clip_range.end < r.end: splits.append(document.Range(clip_range.end, r.end)) return splits # Just a trim. if clip_range.start < r.start: return [document.Range(clip_range.end, r.end)] return [document.Range(r.start, clip_range.start)]
0.865849
0.343975
import re import os from subprocess import check_call from setuptools import setup, find_packages, Command from setuptools.command.sdist import sdist cmdclass = {} try: from pyqt_distutils.build_ui import build_ui has_build_ui = True except ImportError: has_build_ui = False try: from sphinx.setup_command import BuildDoc cmdclass["build_docs"] = BuildDoc except ImportError: pass with open("app/__init__.py") as f: _version = re.search(r"__version__\s+=\s+\'(.*)\'", f.read()).group(1) if has_build_ui: class build_res(build_ui): """Build UI, resources and translations.""" def run(self): # build translations check_call(["pyside2-lupdate", "app.pro"]) lrelease = os.environ.get("LRELEASE_BIN") if not lrelease: lrelease = "lrelease" check_call([lrelease, "app.pro"]) # build UI & resources build_ui.run(self) # create __init__ file for compiled ui open("app/_ui/__init__.py", "a").close() cmdclass["build_res"] = build_res class custom_sdist(sdist): """Custom sdist command.""" def run(self): self.run_command("build_res") sdist.run(self) cmdclass["sdist"] = custom_sdist class bdist_app(Command): """Custom command to build the application. """ description = "Build the application" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): self.run_command("build_res") check_call(["pyinstaller", "-y", "app.spec"]) cmdclass["bdist_app"] = bdist_app CURDIR = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(CURDIR, "requirements.txt")) as requirements: REQUIREMENTS = requirements.read().splitlines() setup( name="app", version=_version, packages=find_packages(), description="PySide2 Boilerplate", author="<NAME>", author_email="<EMAIL>", license="MIT", url="http://www.teslabs.com", entry_points={"gui_scripts": ["app=app.__main__:main"]}, install_requires=REQUIREMENTS, cmdclass=cmdclass, )
setup.py
import re import os from subprocess import check_call from setuptools import setup, find_packages, Command from setuptools.command.sdist import sdist cmdclass = {} try: from pyqt_distutils.build_ui import build_ui has_build_ui = True except ImportError: has_build_ui = False try: from sphinx.setup_command import BuildDoc cmdclass["build_docs"] = BuildDoc except ImportError: pass with open("app/__init__.py") as f: _version = re.search(r"__version__\s+=\s+\'(.*)\'", f.read()).group(1) if has_build_ui: class build_res(build_ui): """Build UI, resources and translations.""" def run(self): # build translations check_call(["pyside2-lupdate", "app.pro"]) lrelease = os.environ.get("LRELEASE_BIN") if not lrelease: lrelease = "lrelease" check_call([lrelease, "app.pro"]) # build UI & resources build_ui.run(self) # create __init__ file for compiled ui open("app/_ui/__init__.py", "a").close() cmdclass["build_res"] = build_res class custom_sdist(sdist): """Custom sdist command.""" def run(self): self.run_command("build_res") sdist.run(self) cmdclass["sdist"] = custom_sdist class bdist_app(Command): """Custom command to build the application. """ description = "Build the application" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): self.run_command("build_res") check_call(["pyinstaller", "-y", "app.spec"]) cmdclass["bdist_app"] = bdist_app CURDIR = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(CURDIR, "requirements.txt")) as requirements: REQUIREMENTS = requirements.read().splitlines() setup( name="app", version=_version, packages=find_packages(), description="PySide2 Boilerplate", author="<NAME>", author_email="<EMAIL>", license="MIT", url="http://www.teslabs.com", entry_points={"gui_scripts": ["app=app.__main__:main"]}, install_requires=REQUIREMENTS, cmdclass=cmdclass, )
0.352648
0.076098
import logging import traceback import sublime import sublime_plugin from ..anaconda_lib.worker import Worker from ..anaconda_lib._typing import Dict, Any from ..anaconda_lib.progress_bar import ProgressBar from ..anaconda_lib.helpers import get_settings, is_python, get_window_view from ..anaconda_lib.jsonclient import Callback class AnacondaAutoFormat(sublime_plugin.TextCommand): """Execute autopep8 formating """ data = None def run(self, edit: sublime.Edit) -> None: if self.data is not None: self.replace(edit) return aggresive_level = get_settings(self.view, 'aggressive', 0) if aggresive_level > 0: if not sublime.ok_cancel_dialog( 'You have an aggressive level of {} this may cause ' 'anaconda to change things that you don\'t really want to ' 'change.\n\nAre you sure do you want to continue?'.format( aggresive_level ) ): return self.code = self.view.substr(sublime.Region(0, self.view.size())) settings = { 'aggressive': aggresive_level, 'list-fixes': get_settings(self.view, 'list-fixes', False), 'autoformat_ignore': get_settings( self.view, 'autoformat_ignore', [] ), 'autoformat_select': get_settings( self.view, 'autoformat_select', [] ), 'pep8_max_line_length': get_settings( self.view, 'pep8_max_line_length', 79 ), 'tab_size': get_settings(self.view, 'tab_size', 4) } try: messages = { 'start': 'Autoformatting please wait... ', 'end': 'Autoformatting done!', 'fail': 'Autoformatting failed, buffer not changed.', 'timeout': 'Autoformatting failed, buffer not changed.', } self.pbar = ProgressBar(messages) self.pbar.start() self.view.set_read_only(True) data = { 'vid': self.view.id(), 'code': self.code, 'method': 'pep8', 'settings': settings, 'handler': 'autoformat' } timeout = get_settings(self.view, 'auto_formatting_timeout', 1) callback = Callback(timeout=timeout) callback.on(success=self.get_data) callback.on(error=self.on_failure) callback.on(timeout=self.on_failure) Worker().execute(callback, **data) except: logging.error(traceback.format_exc()) def on_failure(self, *args: Any, **kwargs: Any) -> None: self.pbar.terminate(status=self.pbar.Status.FAILURE) self.view.set_read_only(False) def is_enabled(self) -> bool: """Determine if this command is enabled or not """ return is_python(self.view, True) def get_data(self, data: Dict[str, Any]) -> None: """Collect the returned data from autopep8 """ self.data = data self.pbar.terminate() self.view.set_read_only(False) self.view.run_command('anaconda_auto_format') def replace(self, edit: sublime.Edit) -> None: """Replace the old code with what autopep8 gave to us """ view = get_window_view(self.data['vid']) if self.code != self.data.get('buffer'): region = sublime.Region(0, view.size()) view.replace(edit, region, self.data.get('buffer')) if get_settings(view, 'auto_formatting'): sublime.set_timeout(lambda: view.run_command("save"), 0) self.code = None self.data = None
sublime/Packages/Anaconda/commands/autoformat.py
import logging import traceback import sublime import sublime_plugin from ..anaconda_lib.worker import Worker from ..anaconda_lib._typing import Dict, Any from ..anaconda_lib.progress_bar import ProgressBar from ..anaconda_lib.helpers import get_settings, is_python, get_window_view from ..anaconda_lib.jsonclient import Callback class AnacondaAutoFormat(sublime_plugin.TextCommand): """Execute autopep8 formating """ data = None def run(self, edit: sublime.Edit) -> None: if self.data is not None: self.replace(edit) return aggresive_level = get_settings(self.view, 'aggressive', 0) if aggresive_level > 0: if not sublime.ok_cancel_dialog( 'You have an aggressive level of {} this may cause ' 'anaconda to change things that you don\'t really want to ' 'change.\n\nAre you sure do you want to continue?'.format( aggresive_level ) ): return self.code = self.view.substr(sublime.Region(0, self.view.size())) settings = { 'aggressive': aggresive_level, 'list-fixes': get_settings(self.view, 'list-fixes', False), 'autoformat_ignore': get_settings( self.view, 'autoformat_ignore', [] ), 'autoformat_select': get_settings( self.view, 'autoformat_select', [] ), 'pep8_max_line_length': get_settings( self.view, 'pep8_max_line_length', 79 ), 'tab_size': get_settings(self.view, 'tab_size', 4) } try: messages = { 'start': 'Autoformatting please wait... ', 'end': 'Autoformatting done!', 'fail': 'Autoformatting failed, buffer not changed.', 'timeout': 'Autoformatting failed, buffer not changed.', } self.pbar = ProgressBar(messages) self.pbar.start() self.view.set_read_only(True) data = { 'vid': self.view.id(), 'code': self.code, 'method': 'pep8', 'settings': settings, 'handler': 'autoformat' } timeout = get_settings(self.view, 'auto_formatting_timeout', 1) callback = Callback(timeout=timeout) callback.on(success=self.get_data) callback.on(error=self.on_failure) callback.on(timeout=self.on_failure) Worker().execute(callback, **data) except: logging.error(traceback.format_exc()) def on_failure(self, *args: Any, **kwargs: Any) -> None: self.pbar.terminate(status=self.pbar.Status.FAILURE) self.view.set_read_only(False) def is_enabled(self) -> bool: """Determine if this command is enabled or not """ return is_python(self.view, True) def get_data(self, data: Dict[str, Any]) -> None: """Collect the returned data from autopep8 """ self.data = data self.pbar.terminate() self.view.set_read_only(False) self.view.run_command('anaconda_auto_format') def replace(self, edit: sublime.Edit) -> None: """Replace the old code with what autopep8 gave to us """ view = get_window_view(self.data['vid']) if self.code != self.data.get('buffer'): region = sublime.Region(0, view.size()) view.replace(edit, region, self.data.get('buffer')) if get_settings(view, 'auto_formatting'): sublime.set_timeout(lambda: view.run_command("save"), 0) self.code = None self.data = None
0.394318
0.069827
# Copyright (c) 2021 <NAME> # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ PyTkAnim is extension for tkinter that provides animator and simple usage. # animate Y when button is clicked import tkinter as tk from pytkanim import normalAnims root = tk.Tk() #Widget Name can be also 'up' Label = normalAnims.NormalAnimY(tk.Label(bg="Black"),"down") Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() # animate X when button is clicked import tkinter as tk from pytkanim import normalAnims root = tk.Tk() #Widget Name can be also 'backwards' Label = normalAnims.NormalAnimX(tk.Label(bg="Black"),"forward") Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() # You can add starter X and Y positions too! by simple doing this import tkinter as tk from pytkanim import normalAnims root = tk.Tk() Label = normalAnims.NormalAnimX(tk.Label(bg="Black"),"backwards",startAX=0.5,startAY=0.5) Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() # and also how speed to animate is. import tkinter as tk from pytkanim import normalAnims root = tk.Tk() Label = normalAnims.NormalAnimX(tk.Label(bg="Black"),"forward",startAX=0.5,startAY=0.5,speed=10) #Higher amount of speed the more it goes slower.r Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() """ import os __author__ = "<NAME>" __version__ = "1.0.1" printIntro = True if printIntro: print(f"Using PyTkAnim Version {__version__} Author: {__author__}") print(f"You can turn off this message by turning 'printIntro' to False in __init__ file.") else: pass
pytkanim/__init__.py
# Copyright (c) 2021 <NAME> # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ PyTkAnim is extension for tkinter that provides animator and simple usage. # animate Y when button is clicked import tkinter as tk from pytkanim import normalAnims root = tk.Tk() #Widget Name can be also 'up' Label = normalAnims.NormalAnimY(tk.Label(bg="Black"),"down") Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() # animate X when button is clicked import tkinter as tk from pytkanim import normalAnims root = tk.Tk() #Widget Name can be also 'backwards' Label = normalAnims.NormalAnimX(tk.Label(bg="Black"),"forward") Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() # You can add starter X and Y positions too! by simple doing this import tkinter as tk from pytkanim import normalAnims root = tk.Tk() Label = normalAnims.NormalAnimX(tk.Label(bg="Black"),"backwards",startAX=0.5,startAY=0.5) Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() # and also how speed to animate is. import tkinter as tk from pytkanim import normalAnims root = tk.Tk() Label = normalAnims.NormalAnimX(tk.Label(bg="Black"),"forward",startAX=0.5,startAY=0.5,speed=10) #Higher amount of speed the more it goes slower.r Button = tk.Button(text="Click Me",command=Label.run) Button.pack() root.geometry("800x600") root.mainloop() """ import os __author__ = "<NAME>" __version__ = "1.0.1" printIntro = True if printIntro: print(f"Using PyTkAnim Version {__version__} Author: {__author__}") print(f"You can turn off this message by turning 'printIntro' to False in __init__ file.") else: pass
0.577257
0.093678
import lightgbm import numpy as np import pandas as pd # functions to test are imported from train.py from train import split_data, train_model, get_model_metrics """A set of simple unit tests for protecting against regressions in train.py""" def test_split_data(): test_data = { 'id': [0, 1, 2, 3, 4], 'target': [0, 0, 1, 0, 1], 'col1': [1, 2, 3, 4, 5], 'col2': [2, 1, 1, 2, 1] } data_df = pd.DataFrame(data=test_data) data = split_data(data_df) # verify that columns were removed correctly assert "target" not in data[0].data.columns assert "id" not in data[0].data.columns assert "col1" in data[0].data.columns # verify that data was split as desired assert data[0].data.shape == (4, 2) assert data[1].data.shape == (1, 2) # the valid_data set's raw data is used for metric calculation, so # free_raw_data should be False assert not data[1].free_raw_data def test_train_model(): data = __get_test_datasets() params = { "learning_rate": 0.05, "metric": "auc", "min_data": 1 } model = train_model(data, params) # verify that parameters are passed in to the model correctly for param_name in params.keys(): assert param_name in model.params assert params[param_name] == model.params[param_name] def test_get_model_metrics(): class MockModel: @staticmethod def predict(data): return np.array([0, 0]) data = __get_test_datasets() metrics = get_model_metrics(MockModel(), data) # verify that metrics is a dictionary containing the auc value. assert "auc" in metrics auc = metrics["auc"] np.testing.assert_almost_equal(auc, 0.5) def __get_test_datasets(): """This is a helper function to set up some test data""" X_train = np.array([1, 2, 3, 4, 5, 6]).reshape(-1, 1) y_train = np.array([1, 1, 0, 1, 0, 1]) X_test = np.array([7, 8]).reshape(-1, 1) y_test = np.array([0, 1]) train_data = lightgbm.Dataset(X_train, y_train) valid_data = lightgbm.Dataset(X_test, y_test) data = (train_data, valid_data) return data
openhackmlops/training/test_train.py
import lightgbm import numpy as np import pandas as pd # functions to test are imported from train.py from train import split_data, train_model, get_model_metrics """A set of simple unit tests for protecting against regressions in train.py""" def test_split_data(): test_data = { 'id': [0, 1, 2, 3, 4], 'target': [0, 0, 1, 0, 1], 'col1': [1, 2, 3, 4, 5], 'col2': [2, 1, 1, 2, 1] } data_df = pd.DataFrame(data=test_data) data = split_data(data_df) # verify that columns were removed correctly assert "target" not in data[0].data.columns assert "id" not in data[0].data.columns assert "col1" in data[0].data.columns # verify that data was split as desired assert data[0].data.shape == (4, 2) assert data[1].data.shape == (1, 2) # the valid_data set's raw data is used for metric calculation, so # free_raw_data should be False assert not data[1].free_raw_data def test_train_model(): data = __get_test_datasets() params = { "learning_rate": 0.05, "metric": "auc", "min_data": 1 } model = train_model(data, params) # verify that parameters are passed in to the model correctly for param_name in params.keys(): assert param_name in model.params assert params[param_name] == model.params[param_name] def test_get_model_metrics(): class MockModel: @staticmethod def predict(data): return np.array([0, 0]) data = __get_test_datasets() metrics = get_model_metrics(MockModel(), data) # verify that metrics is a dictionary containing the auc value. assert "auc" in metrics auc = metrics["auc"] np.testing.assert_almost_equal(auc, 0.5) def __get_test_datasets(): """This is a helper function to set up some test data""" X_train = np.array([1, 2, 3, 4, 5, 6]).reshape(-1, 1) y_train = np.array([1, 1, 0, 1, 0, 1]) X_test = np.array([7, 8]).reshape(-1, 1) y_test = np.array([0, 1]) train_data = lightgbm.Dataset(X_train, y_train) valid_data = lightgbm.Dataset(X_test, y_test) data = (train_data, valid_data) return data
0.814422
0.751238
import pytest from graphdatascience.graph.graph_object import Graph from graphdatascience.graph_data_science import GraphDataScience from graphdatascience.model.link_prediction_model import LPModel from graphdatascience.model.model import Model from graphdatascience.model.node_classification_model import NCModel from .conftest import CollectingQueryRunner PIPE_NAME = "pipe" @pytest.fixture(scope="module") def G(gds: GraphDataScience) -> Graph: G_, _ = gds.graph.project("g", "Node", "REL") return G_ @pytest.fixture def lp_model(gds: GraphDataScience, G: Graph) -> Model: pipe, _ = gds.beta.pipeline.linkPrediction.create("pipe") trainedPipe, _ = pipe.train(G, modelName="m", concurrency=2) return trainedPipe @pytest.fixture def nc_model(gds: GraphDataScience, G: Graph) -> Model: pipe, _ = gds.beta.pipeline.nodeClassification.create("pipe") trainedPipe, _ = pipe.train(G, modelName="m", concurrency=2) return trainedPipe def test_predict_stream_lp_model( runner: CollectingQueryRunner, lp_model: LPModel, G: Graph ) -> None: lp_model.predict_stream(G, topN=2) assert ( runner.last_query() == "CALL gds.beta.pipeline.linkPrediction.predict.stream($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": lp_model.name(), "topN": 2}, } def test_predict_mutate_lp_model( runner: CollectingQueryRunner, lp_model: LPModel, G: Graph ) -> None: lp_model.predict_mutate(G, topN=2, mutateRelationshipType="HELO") assert ( runner.last_query() == "CALL gds.beta.pipeline.linkPrediction.predict.mutate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": { "modelName": lp_model.name(), "topN": 2, "mutateRelationshipType": "HELO", }, } def test_estimate_predict_stream_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_stream_estimate(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.stream.estimate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_predict_stream_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_stream(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.stream($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_estimate_predict_write_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_write_estimate(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.write.estimate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_predict_mutate_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_mutate(G, mutateProperty="helo") assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.mutate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": { "modelName": nc_model.name(), "mutateProperty": "helo", }, } def test_estimate_predict_mutate_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_mutate_estimate(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.mutate.estimate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_predict_write_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_write(G, writeProperty="helo") assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.write($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": { "modelName": nc_model.name(), "writeProperty": "helo", }, }
graphdatascience/tests/unit/test_prediction_models.py
import pytest from graphdatascience.graph.graph_object import Graph from graphdatascience.graph_data_science import GraphDataScience from graphdatascience.model.link_prediction_model import LPModel from graphdatascience.model.model import Model from graphdatascience.model.node_classification_model import NCModel from .conftest import CollectingQueryRunner PIPE_NAME = "pipe" @pytest.fixture(scope="module") def G(gds: GraphDataScience) -> Graph: G_, _ = gds.graph.project("g", "Node", "REL") return G_ @pytest.fixture def lp_model(gds: GraphDataScience, G: Graph) -> Model: pipe, _ = gds.beta.pipeline.linkPrediction.create("pipe") trainedPipe, _ = pipe.train(G, modelName="m", concurrency=2) return trainedPipe @pytest.fixture def nc_model(gds: GraphDataScience, G: Graph) -> Model: pipe, _ = gds.beta.pipeline.nodeClassification.create("pipe") trainedPipe, _ = pipe.train(G, modelName="m", concurrency=2) return trainedPipe def test_predict_stream_lp_model( runner: CollectingQueryRunner, lp_model: LPModel, G: Graph ) -> None: lp_model.predict_stream(G, topN=2) assert ( runner.last_query() == "CALL gds.beta.pipeline.linkPrediction.predict.stream($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": lp_model.name(), "topN": 2}, } def test_predict_mutate_lp_model( runner: CollectingQueryRunner, lp_model: LPModel, G: Graph ) -> None: lp_model.predict_mutate(G, topN=2, mutateRelationshipType="HELO") assert ( runner.last_query() == "CALL gds.beta.pipeline.linkPrediction.predict.mutate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": { "modelName": lp_model.name(), "topN": 2, "mutateRelationshipType": "HELO", }, } def test_estimate_predict_stream_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_stream_estimate(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.stream.estimate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_predict_stream_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_stream(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.stream($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_estimate_predict_write_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_write_estimate(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.write.estimate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_predict_mutate_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_mutate(G, mutateProperty="helo") assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.mutate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": { "modelName": nc_model.name(), "mutateProperty": "helo", }, } def test_estimate_predict_mutate_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_mutate_estimate(G) assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.mutate.estimate($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": {"modelName": nc_model.name()}, } def test_predict_write_nc_model( runner: CollectingQueryRunner, nc_model: NCModel, G: Graph ) -> None: nc_model.predict_write(G, writeProperty="helo") assert ( runner.last_query() == "CALL gds.beta.pipeline.nodeClassification.predict.write($graph_name, $config)" ) assert runner.last_params() == { "graph_name": G.name(), "config": { "modelName": nc_model.name(), "writeProperty": "helo", }, }
0.625552
0.515559
import multiprocessing import sys import time import unittest import percy from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC TIMEOUT = 20 class IntegrationTests(unittest.TestCase): def percy_snapshot(cls, name=''): snapshot_name = '{} - py{}.{}'.format(name, sys.version_info.major, sys.version_info.minor) print(snapshot_name) cls.percy_runner.snapshot( name=snapshot_name ) def wait_for_element_by_css_selector(self, selector, timeout=TIMEOUT): return WebDriverWait(self.driver, timeout).until( EC.presence_of_element_located((By.CSS_SELECTOR, selector)), 'Could not find element with selector "{}"'.format(selector) ) def wait_for_text_to_equal(self, selector, assertion_text, timeout=TIMEOUT): el = self.wait_for_element_by_css_selector(selector) WebDriverWait(self.driver, timeout).until( lambda *args: ( (str(el.text) == assertion_text) or (str(el.get_attribute('value')) == assertion_text) ), "Element '{}' text was supposed to equal '{}' but it didn't".format( selector, assertion_text ) ) @classmethod def setUpClass(cls): super(IntegrationTests, cls).setUpClass() cls.driver = webdriver.Chrome() loader = percy.ResourceLoader( webdriver=cls.driver, base_url='/assets', root_dir='tests/assets' ) cls.percy_runner = percy.Runner(loader=loader) cls.percy_runner.initialize_build() @classmethod def tearDownClass(cls): super(IntegrationTests, cls).tearDownClass() cls.driver.quit() cls.percy_runner.finalize_build() def setUp(s): pass def tearDown(s): if hasattr(s, 'server_process'): time.sleep(2) s.server_process.terminate() time.sleep(2) def startServer(s, dash): def run(): dash.scripts.config.serve_locally = True dash.run_server( port=8050, debug=False, processes=4, threaded=False ) # Run on a separate process so that it doesn't block s.server_process = multiprocessing.Process(target=run) s.server_process.start() time.sleep(0.5) # Visit the dash page s.driver.get('http://localhost:8050') time.sleep(0.5) # Inject an error and warning logger logger = ''' window.tests = {}; window.tests.console = {error: [], warn: [], log: []}; var _log = console.log; var _warn = console.warn; var _error = console.error; console.log = function() { window.tests.console.log.push({method: 'log', arguments: arguments}); return _log.apply(console, arguments); }; console.warn = function() { window.tests.console.warn.push({method: 'warn', arguments: arguments}); return _warn.apply(console, arguments); }; console.error = function() { window.tests.console.error.push({method: 'error', arguments: arguments}); return _error.apply(console, arguments); }; ''' s.driver.execute_script(logger)
tests/IntegrationTests.py
import multiprocessing import sys import time import unittest import percy from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC TIMEOUT = 20 class IntegrationTests(unittest.TestCase): def percy_snapshot(cls, name=''): snapshot_name = '{} - py{}.{}'.format(name, sys.version_info.major, sys.version_info.minor) print(snapshot_name) cls.percy_runner.snapshot( name=snapshot_name ) def wait_for_element_by_css_selector(self, selector, timeout=TIMEOUT): return WebDriverWait(self.driver, timeout).until( EC.presence_of_element_located((By.CSS_SELECTOR, selector)), 'Could not find element with selector "{}"'.format(selector) ) def wait_for_text_to_equal(self, selector, assertion_text, timeout=TIMEOUT): el = self.wait_for_element_by_css_selector(selector) WebDriverWait(self.driver, timeout).until( lambda *args: ( (str(el.text) == assertion_text) or (str(el.get_attribute('value')) == assertion_text) ), "Element '{}' text was supposed to equal '{}' but it didn't".format( selector, assertion_text ) ) @classmethod def setUpClass(cls): super(IntegrationTests, cls).setUpClass() cls.driver = webdriver.Chrome() loader = percy.ResourceLoader( webdriver=cls.driver, base_url='/assets', root_dir='tests/assets' ) cls.percy_runner = percy.Runner(loader=loader) cls.percy_runner.initialize_build() @classmethod def tearDownClass(cls): super(IntegrationTests, cls).tearDownClass() cls.driver.quit() cls.percy_runner.finalize_build() def setUp(s): pass def tearDown(s): if hasattr(s, 'server_process'): time.sleep(2) s.server_process.terminate() time.sleep(2) def startServer(s, dash): def run(): dash.scripts.config.serve_locally = True dash.run_server( port=8050, debug=False, processes=4, threaded=False ) # Run on a separate process so that it doesn't block s.server_process = multiprocessing.Process(target=run) s.server_process.start() time.sleep(0.5) # Visit the dash page s.driver.get('http://localhost:8050') time.sleep(0.5) # Inject an error and warning logger logger = ''' window.tests = {}; window.tests.console = {error: [], warn: [], log: []}; var _log = console.log; var _warn = console.warn; var _error = console.error; console.log = function() { window.tests.console.log.push({method: 'log', arguments: arguments}); return _log.apply(console, arguments); }; console.warn = function() { window.tests.console.warn.push({method: 'warn', arguments: arguments}); return _warn.apply(console, arguments); }; console.error = function() { window.tests.console.error.push({method: 'error', arguments: arguments}); return _error.apply(console, arguments); }; ''' s.driver.execute_script(logger)
0.276691
0.094887
import numpy as np import unittest from itertools import permutations from colour.colorimetry import TVS_ILLUMINANTS_HUNTERLAB from colour.models import ( XYZ_to_K_ab_HunterLab1966, XYZ_to_Hunter_Lab, Hunter_Lab_to_XYZ, ) from colour.utilities import domain_range_scale, ignore_numpy_errors __author__ = "Colour Developers" __copyright__ = "Copyright 2013 Colour Developers" __license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "<EMAIL>" __status__ = "Production" __all__ = [ "TestXYZ_to_K_ab_HunterLab1966", "TestXYZ_to_Hunter_Lab", "TestHunter_Lab_to_XYZ", ] class TestXYZ_to_K_ab_HunterLab1966(unittest.TestCase): """ Define :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition unit tests methods. """ def test_XYZ_to_K_ab_HunterLab1966(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition. """ np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966( np.array([0.20654008, 0.12197225, 0.05136952]) * 100 ), np.array([80.32152090, 14.59816495]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966( np.array([0.14222010, 0.23042768, 0.10495772]) * 100 ), np.array([66.65154834, 20.86664881]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966( np.array([0.07818780, 0.06157201, 0.28099326]) * 100 ), np.array([49.41960269, 34.14235426]), decimal=7, ) def test_n_dimensional_XYZ_to_K_ab_HunterLab1966(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition n-dimensional support. """ XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100 K_ab = XYZ_to_K_ab_HunterLab1966(XYZ) XYZ = np.tile(XYZ, (6, 1)) K_ab = np.tile(K_ab, (6, 1)) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966(XYZ), K_ab, decimal=7 ) XYZ = np.reshape(XYZ, (2, 3, 3)) K_ab = np.reshape(K_ab, (2, 3, 2)) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966(XYZ), K_ab, decimal=7 ) @ignore_numpy_errors def test_nan_XYZ_to_K_ab_HunterLab1966(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: XYZ_to_K_ab_HunterLab1966(np.array(case)) class TestXYZ_to_Hunter_Lab(unittest.TestCase): """ Define :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition unit tests methods. """ def test_XYZ_to_Hunter_Lab(self): """Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition.""" np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100 ), np.array([34.92452577, 47.06189858, 14.38615107]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.14222010, 0.23042768, 0.10495772]) * 100 ), np.array([48.00288325, -28.98551622, 18.75564181]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.07818780, 0.06157201, 0.28099326]) * 100 ), np.array([24.81370791, 14.38300039, -53.25539126]), decimal=7, ) h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] A = h_i["A"] np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100, A.XYZ_n, A.K_ab, ), np.array([34.92452577, 35.04243086, -2.47688619]), decimal=7, ) D65 = h_i["D65"] np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100, D65.XYZ_n, D65.K_ab, ), np.array([34.92452577, 47.06189858, 14.38615107]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100, D65.XYZ_n, K_ab=None, ), np.array([34.92452577, 47.05669614, 14.38385238]), decimal=7, ) def test_n_dimensional_XYZ_to_Hunter_Lab(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition n-dimensional support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100 XYZ_n = D65.XYZ_n K_ab = D65.K_ab Lab = XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab) XYZ = np.tile(XYZ, (6, 1)) Lab = np.tile(Lab, (6, 1)) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab), Lab, decimal=7 ) XYZ_n = np.tile(XYZ_n, (6, 1)) K_ab = np.tile(K_ab, (6, 1)) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab), Lab, decimal=7 ) XYZ = np.reshape(XYZ, (2, 3, 3)) XYZ_n = np.reshape(XYZ_n, (2, 3, 3)) K_ab = np.reshape(K_ab, (2, 3, 2)) Lab = np.reshape(Lab, (2, 3, 3)) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab), Lab, decimal=7 ) def test_domain_range_scale_XYZ_to_Hunter_Lab(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition domain and range scale support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100 XYZ_n = D65.XYZ_n K_ab = D65.K_ab Lab = XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab) d_r = (("reference", 1), ("1", 0.01), ("100", 1)) for scale, factor in d_r: with domain_range_scale(scale): np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ * factor, XYZ_n * factor, K_ab), Lab * factor, decimal=7, ) @ignore_numpy_errors def test_nan_XYZ_to_Hunter_Lab(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: XYZ = np.array(case) XYZ_n = np.array(case[0:3]) K_ab = np.array(case[0:2]) XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab) class TestHunter_Lab_to_XYZ(unittest.TestCase): """ Define :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition unit tests methods. """ def test_Hunter_Lab_to_XYZ(self): """Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition.""" np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 47.06189858, 14.38615107]) ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([48.00288325, -28.98551622, 18.75564181]) ), np.array([14.22201000, 23.04276800, 10.49577200]), decimal=7, ) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([24.81370791, 14.38300039, -53.25539126]) ), np.array([7.81878000, 6.15720100, 28.09932601]), decimal=7, ) h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] A = h_i["A"] np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 35.04243086, -2.47688619]), A.XYZ_n, A.K_ab, ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) D65 = h_i["D65"] np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 47.06189858, 14.38615107]), D65.XYZ_n, D65.K_ab, ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 47.05669614, 14.38385238]), D65.XYZ_n, K_ab=None, ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) def test_n_dimensional_Hunter_Lab_to_XYZ(self): """ Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition n-dimensional support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] Lab = np.array([34.92452577, 47.06189858, 14.38615107]) XYZ_n = D65.XYZ_n K_ab = D65.K_ab XYZ = Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab) Lab = np.tile(Lab, (6, 1)) XYZ = np.tile(XYZ, (6, 1)) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab), XYZ, decimal=7 ) K_ab = np.tile(K_ab, (6, 1)) XYZ_n = np.tile(XYZ_n, (6, 1)) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab), XYZ, decimal=7 ) Lab = np.reshape(Lab, (2, 3, 3)) XYZ_n = np.reshape(XYZ_n, (2, 3, 3)) K_ab = np.reshape(K_ab, (2, 3, 2)) XYZ = np.reshape(XYZ, (2, 3, 3)) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab), XYZ, decimal=7 ) def test_domain_range_scale_Hunter_Lab_to_XYZ(self): """ Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition domain and range scale support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] Lab = np.array([34.92452577, 47.06189858, 14.38615107]) XYZ_n = D65.XYZ_n K_ab = D65.K_ab XYZ = Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab) d_r = (("reference", 1), ("1", 0.01), ("100", 1)) for scale, factor in d_r: with domain_range_scale(scale): np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab * factor, XYZ_n * factor, K_ab), XYZ * factor, decimal=7, ) @ignore_numpy_errors def test_nan_Hunter_Lab_to_XYZ(self): """ Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: Lab = np.array(case) XYZ_n = np.array(case[0:3]) K_ab = np.array(case[0:2]) Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab) if __name__ == "__main__": unittest.main()
colour/models/tests/test_hunter_lab.py
import numpy as np import unittest from itertools import permutations from colour.colorimetry import TVS_ILLUMINANTS_HUNTERLAB from colour.models import ( XYZ_to_K_ab_HunterLab1966, XYZ_to_Hunter_Lab, Hunter_Lab_to_XYZ, ) from colour.utilities import domain_range_scale, ignore_numpy_errors __author__ = "Colour Developers" __copyright__ = "Copyright 2013 Colour Developers" __license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "<EMAIL>" __status__ = "Production" __all__ = [ "TestXYZ_to_K_ab_HunterLab1966", "TestXYZ_to_Hunter_Lab", "TestHunter_Lab_to_XYZ", ] class TestXYZ_to_K_ab_HunterLab1966(unittest.TestCase): """ Define :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition unit tests methods. """ def test_XYZ_to_K_ab_HunterLab1966(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition. """ np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966( np.array([0.20654008, 0.12197225, 0.05136952]) * 100 ), np.array([80.32152090, 14.59816495]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966( np.array([0.14222010, 0.23042768, 0.10495772]) * 100 ), np.array([66.65154834, 20.86664881]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966( np.array([0.07818780, 0.06157201, 0.28099326]) * 100 ), np.array([49.41960269, 34.14235426]), decimal=7, ) def test_n_dimensional_XYZ_to_K_ab_HunterLab1966(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition n-dimensional support. """ XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100 K_ab = XYZ_to_K_ab_HunterLab1966(XYZ) XYZ = np.tile(XYZ, (6, 1)) K_ab = np.tile(K_ab, (6, 1)) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966(XYZ), K_ab, decimal=7 ) XYZ = np.reshape(XYZ, (2, 3, 3)) K_ab = np.reshape(K_ab, (2, 3, 2)) np.testing.assert_almost_equal( XYZ_to_K_ab_HunterLab1966(XYZ), K_ab, decimal=7 ) @ignore_numpy_errors def test_nan_XYZ_to_K_ab_HunterLab1966(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_K_ab_HunterLab1966` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: XYZ_to_K_ab_HunterLab1966(np.array(case)) class TestXYZ_to_Hunter_Lab(unittest.TestCase): """ Define :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition unit tests methods. """ def test_XYZ_to_Hunter_Lab(self): """Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition.""" np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100 ), np.array([34.92452577, 47.06189858, 14.38615107]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.14222010, 0.23042768, 0.10495772]) * 100 ), np.array([48.00288325, -28.98551622, 18.75564181]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.07818780, 0.06157201, 0.28099326]) * 100 ), np.array([24.81370791, 14.38300039, -53.25539126]), decimal=7, ) h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] A = h_i["A"] np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100, A.XYZ_n, A.K_ab, ), np.array([34.92452577, 35.04243086, -2.47688619]), decimal=7, ) D65 = h_i["D65"] np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100, D65.XYZ_n, D65.K_ab, ), np.array([34.92452577, 47.06189858, 14.38615107]), decimal=7, ) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab( np.array([0.20654008, 0.12197225, 0.05136952]) * 100, D65.XYZ_n, K_ab=None, ), np.array([34.92452577, 47.05669614, 14.38385238]), decimal=7, ) def test_n_dimensional_XYZ_to_Hunter_Lab(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition n-dimensional support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100 XYZ_n = D65.XYZ_n K_ab = D65.K_ab Lab = XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab) XYZ = np.tile(XYZ, (6, 1)) Lab = np.tile(Lab, (6, 1)) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab), Lab, decimal=7 ) XYZ_n = np.tile(XYZ_n, (6, 1)) K_ab = np.tile(K_ab, (6, 1)) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab), Lab, decimal=7 ) XYZ = np.reshape(XYZ, (2, 3, 3)) XYZ_n = np.reshape(XYZ_n, (2, 3, 3)) K_ab = np.reshape(K_ab, (2, 3, 2)) Lab = np.reshape(Lab, (2, 3, 3)) np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab), Lab, decimal=7 ) def test_domain_range_scale_XYZ_to_Hunter_Lab(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition domain and range scale support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) * 100 XYZ_n = D65.XYZ_n K_ab = D65.K_ab Lab = XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab) d_r = (("reference", 1), ("1", 0.01), ("100", 1)) for scale, factor in d_r: with domain_range_scale(scale): np.testing.assert_almost_equal( XYZ_to_Hunter_Lab(XYZ * factor, XYZ_n * factor, K_ab), Lab * factor, decimal=7, ) @ignore_numpy_errors def test_nan_XYZ_to_Hunter_Lab(self): """ Test :func:`colour.models.hunter_lab.XYZ_to_Hunter_Lab` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: XYZ = np.array(case) XYZ_n = np.array(case[0:3]) K_ab = np.array(case[0:2]) XYZ_to_Hunter_Lab(XYZ, XYZ_n, K_ab) class TestHunter_Lab_to_XYZ(unittest.TestCase): """ Define :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition unit tests methods. """ def test_Hunter_Lab_to_XYZ(self): """Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition.""" np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 47.06189858, 14.38615107]) ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([48.00288325, -28.98551622, 18.75564181]) ), np.array([14.22201000, 23.04276800, 10.49577200]), decimal=7, ) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([24.81370791, 14.38300039, -53.25539126]) ), np.array([7.81878000, 6.15720100, 28.09932601]), decimal=7, ) h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] A = h_i["A"] np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 35.04243086, -2.47688619]), A.XYZ_n, A.K_ab, ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) D65 = h_i["D65"] np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 47.06189858, 14.38615107]), D65.XYZ_n, D65.K_ab, ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ( np.array([34.92452577, 47.05669614, 14.38385238]), D65.XYZ_n, K_ab=None, ), np.array([20.65400800, 12.19722500, 5.13695200]), decimal=7, ) def test_n_dimensional_Hunter_Lab_to_XYZ(self): """ Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition n-dimensional support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] Lab = np.array([34.92452577, 47.06189858, 14.38615107]) XYZ_n = D65.XYZ_n K_ab = D65.K_ab XYZ = Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab) Lab = np.tile(Lab, (6, 1)) XYZ = np.tile(XYZ, (6, 1)) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab), XYZ, decimal=7 ) K_ab = np.tile(K_ab, (6, 1)) XYZ_n = np.tile(XYZ_n, (6, 1)) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab), XYZ, decimal=7 ) Lab = np.reshape(Lab, (2, 3, 3)) XYZ_n = np.reshape(XYZ_n, (2, 3, 3)) K_ab = np.reshape(K_ab, (2, 3, 2)) XYZ = np.reshape(XYZ, (2, 3, 3)) np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab), XYZ, decimal=7 ) def test_domain_range_scale_Hunter_Lab_to_XYZ(self): """ Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition domain and range scale support. """ h_i = TVS_ILLUMINANTS_HUNTERLAB["CIE 1931 2 Degree Standard Observer"] D65 = h_i["D65"] Lab = np.array([34.92452577, 47.06189858, 14.38615107]) XYZ_n = D65.XYZ_n K_ab = D65.K_ab XYZ = Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab) d_r = (("reference", 1), ("1", 0.01), ("100", 1)) for scale, factor in d_r: with domain_range_scale(scale): np.testing.assert_almost_equal( Hunter_Lab_to_XYZ(Lab * factor, XYZ_n * factor, K_ab), XYZ * factor, decimal=7, ) @ignore_numpy_errors def test_nan_Hunter_Lab_to_XYZ(self): """ Test :func:`colour.models.hunter_lab.Hunter_Lab_to_XYZ` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: Lab = np.array(case) XYZ_n = np.array(case[0:3]) K_ab = np.array(case[0:2]) Hunter_Lab_to_XYZ(Lab, XYZ_n, K_ab) if __name__ == "__main__": unittest.main()
0.730866
0.411288
import os import sys import time import getopt import signal import logging import subprocess import configparser try: import gi except: print("no modules named 'gi', please install python-gobject") sys.exit() try: import cairo except: print("no modules named 'cairo', please install python-cairo") sys.exit() gi.require_version('Gtk', '3.0') gi.require_version('Gdk', '3.0') from gi.repository import Gtk, Gdk, Pango, GLib from gi.repository.GdkPixbuf import Pixbuf from Clearine.helper import SignalHandler from Clearine.helper import Helper config = {} shortcuts = {} root_module = os.path.dirname(os.path.abspath(__file__)) class Clearine(Gtk.Window): def __init__(self): super(Clearine, self).__init__() # initialize a fullscreen window self.setcontent() self.setprops() def setprops(self): self.set_visual(self.get_screen().get_rgba_visual()) self.set_app_paintable(True) self.fullscreen() self.set_skip_pager_hint(True) self.set_keep_above(True) self.realize() self.connect('destroy', Gtk.main_quit) self.connect('draw', self.draw_background) self.connect('delete-event', Gtk.main_quit) self.connect('key-press-event', self.on_keypressed) self.connect('window-state-event', self.on_state_changed) def setcontent(self): # fetch a plain-text clearine.conf configuration global config global shortcuts status = logging.getLogger(self.__class__.__name__) dotcat = configparser.ConfigParser() file_home = "%s/.config/clearine.conf" % os.environ['HOME'] file_etc = "/etc/clearine.conf" file_share = "/usr/share/clearine/clearine.conf" file_dot_config = "%s/.config/clearine/clearine.conf" % os.environ['HOME'] file_default = "%s/data/clearine.conf" % root_module try: if os.path.exists(file_home): status.info("load config from: %s"% (file_home)) dotcat.read(file_home) elif os.path.exists(file_dot_config): status.info("load config from: %s"% (file_dot_config)) dotcat.read(file_dot_config) elif os.path.exists(file_etc): status.info("load config from: %s"% (file_etc)) dotcat.read(file_etc) elif os.path.exists(file_share): status.info("load config from: %s"% (file_share)) dotcat.read(file_share) elif os.path.exists(file_default): status.info("load config from: %s"% (file_default)) dotcat.read(file_default) except: status.error("failed to find configuration file. exiting.") sys.exit() def find_key(data, section, key, default): helper = Helper() try: if data=="arr": return dotcat.get(section, key).split(",") if data=="str": return dotcat.get(section, key, raw=True) if data=="int": return dotcat.getint(section, key) if data=="flo": return dotcat.getfloat(section, key) if data=="clr": data = dotcat.get(section, key, raw=True) if data.startswith("#") or data.startswith("rgba("): return data elif data.startswith("{") and data.endswith("}"): data = data.lstrip('{').rstrip('}') return helper.xrdb(data) except: status.info("failed to find key named '%s' in section '[%s]'. use fallback value insteads." % (key, section)) return default config["button-label-font"] = find_key("str", "button", "label-font", "DejaVu Sans Book") config["button-label-size"] = find_key("int", "button", "label-size", 9) config["button-label-color"] = find_key("clr", "button", "label-color", "#101314") config["button-height"] = find_key("int", "button", "height", 70) config["button-icon-height"] = find_key("int", "button", "icon-height", 32) config["button-icon-width"] = find_key("int", "button", "icon-width", 32) config["button-items"] = find_key("arr", "button", "items", "suspend, logout, lock, hibernate, restart, shutdown, cancel") config["button-margin-bottom"] = find_key("int", "button", "margin-bottom", 30) config["button-margin-left"] = find_key("int", "button", "margin-left", 10) config["button-margin-right"] = find_key("int", "button", "margin-right", 10) config["button-margin-top"] = find_key("int", "button", "margin-top", 30) config["button-spacing"] = find_key("int", "button", "spacing", 10) config["button-theme"] = find_key("str", "button", "theme", "Clearine-Fallback") config["button-width"] = find_key("int", "button", "width", 100) config["button-opacity-normal"] = find_key("flo", "button", "opacity-normal", 0.7) config["button-opacity-focus"] = find_key("flo", "button", "opacity-focus", 1.0) config["card-background-color"] = find_key("clr", "card", "background-color", "#e1e5e8") config["card-border-radius"] = find_key("int", "card", "border-radius", 20) config["card-padding-bottom"] = find_key("int", "card", "padding-bottom", 10) config["card-padding-left"] = find_key("int", "card", "padding-left", 10) config["card-padding-right"] = find_key("int", "card", "padding-right", 10) config["card-padding-top"] = find_key("int", "card", "padding-top", 10) config["main-mode"] = find_key("str", "main", "mode", "V") config["main-spacing"] = find_key("int", "main", "spacing", 10) config["main-gap-left"] = find_key("int", "main", "gap-left", 50) config["main-gap-right"] = find_key("int", "main", "gap-right", 50) config["main-gap-top"] = find_key("int", "main", "gap-top", 50) config["main-gap-bottom"] = find_key("int", "main", "gap-bottom", 50) config["main-opacity"] = find_key("flo", "main", "opacity", 0.8) config["widget-firstline-font"] = find_key("str", "widget", "firstline-font", "DejaVu Sans ExtraLight") config["widget-firstline-size"] = find_key("int", "widget", "firstline-size", 90) config["widget-firstline-color"] = find_key("clr", "widget", "firstline-color", "#e1e5e8") config["widget-firstline-format"] = find_key("str", "widget", "firstline-format", "%H.%M") config["widget-secondline-font"] = find_key("str", "widget", "secondline-font", "DejaVu Sans Book") config["widget-secondline-size"] = find_key("int", "widget", "secondline-size", 14) config["widget-secondline-color"] = find_key("clr", "widget", "secondline-color", "#e1e5e8") config["widget-secondline-format"] = find_key("str", "widget", "secondline-format", "%A, %d %B %Y") config["command-logout"] = find_key("str", "command", "logout", "pkexec pkill X") config["command-restart"] = find_key("str", "command", "restart", "pkexec reboot -h now") config["command-shutdown"] = find_key("str", "command", "shutdown", "pkexec shutdown -h now") config["command-lock"] = find_key("str", "command", "lock", "i3lock") config["command-suspend"] = find_key("str", "command", "suspend", "systemctl suspend") config["command-hibernate"] = find_key("str", "command", "hibernate", "systemctl hibernate") shortcuts = { find_key("str", "shortcuts", "cancel", "Escape"): "cancel", find_key("str", "shortcuts", "lock", "K"): "lock", find_key("str", "shortcuts", "suspend", "P"): "suspend", find_key("str", "shortcuts", "hibernate", "H"): "hibernate", find_key("str", "shortcuts", "logout", "L"): "logout", find_key("str", "shortcuts", "restart", "R"): "restart", find_key("str", "shortcuts", "shutdown", "S"): "shutdown" } # Setup all content inside Gtk.Window if config["main-mode"] == "horizontal": button_group = Gtk.VBox() content = Gtk.HBox() else: button_group = Gtk.HBox() content = Gtk.VBox() button_group.set_margin_top(config["button-margin-top"]) button_group.set_margin_start(config["button-margin-left"]) button_group.set_margin_bottom(config["button-margin-bottom"]) button_group.set_margin_end(config["button-margin-right"]) button_group.set_spacing(config["button-spacing"]) card_container = Gtk.Box() card_container.set_margin_top(config["card-padding-top"]) card_container.set_margin_start(config["card-padding-left"]) card_container.set_margin_bottom(config["card-padding-bottom"]) card_container.set_margin_end(config["card-padding-right"]) card_container.pack_start(button_group, False, False, False) card = Gtk.Box() card.pack_start(card_container, False, False, False) container = Gtk.Grid() if config["main-mode"] == "horizontal": container.set_halign(Gtk.Align.END) container.set_valign(Gtk.Align.CENTER) else: container.set_halign(Gtk.Align.CENTER) container.set_valign(Gtk.Align.START) container.attach(card, 1, 1, 1, 1) self.first_widget = Gtk.Label() self.first_widget.set_label(time.strftime(config["widget-firstline-format"])) self.second_widget = Gtk.Label() self.second_widget.set_label(time.strftime(config["widget-secondline-format"])) widgets = Gtk.Grid() if config["main-mode"] == "horizontal": widgets.set_halign(Gtk.Align.START) widgets.set_valign(Gtk.Align.CENTER) else: widgets.set_halign(Gtk.Align.CENTER) widgets.set_valign(Gtk.Align.END) widgets.attach(self.first_widget, 1, 1, 1, 1) widgets.attach(self.second_widget, 1, 2, 1, 1) content.set_margin_start(config["main-gap-left"]) content.set_margin_end(config["main-gap-right"]) content.set_margin_top(config["main-gap-top"]) content.set_margin_bottom(config["main-gap-bottom"]) content.set_spacing(config["main-spacing"]) content.pack_start(widgets, True, True, 0) content.pack_end(container, True, True, 0) GLib.timeout_add(200, self.update_widgets) self.card_style = Gtk.CssProvider() self.card_css = """ .clearine-button {{ background: {_cbg}; color: {_bcol}; font-family: '{_bfont}'; font-size: {_bsize}px; box-shadow: none; opacity: {_bopa}; border-width: 0; }} .clearine-button:focus {{ opacity: {_bopaf}; border-width: 0; }} .clearine-card {{ background: {_cbg}; border-width: 0; border-radius:{_crad}px; }} .clearine-widget-first {{ color: {_w1col}; font-family:'{_w1font}'; font-size: {_w1size}px; }} .clearine-widget-second {{ color: {_w2col}; font-family:'{_w2font}'; font-size: {_w2size}px; }} """.format( _bsize=str(config["button-label-size"]), _bcol=str(config["button-label-color"]), _bfont=str(config["button-label-font"]), _bopa=str(config["button-opacity-normal"]), _bopaf=str(config["button-opacity-focus"]), _cbg=str(config["card-background-color"]), _crad=str(config["card-border-radius"]), _w1col=str(config["widget-firstline-color"]), _w2col=str(config["widget-secondline-color"]), _w1font=str(config["widget-firstline-font"]), _w2font=str(config["widget-secondline-font"]), _w1size=str(config["widget-firstline-size"]), _w2size=str(config["widget-secondline-size"]), ) self.card_style.load_from_data(self.card_css.encode()) Gtk.StyleContext.add_class(card.get_style_context(), "clearine-card") Gtk.StyleContext.add_class(self.first_widget.get_style_context(), "clearine-widget-first") Gtk.StyleContext.add_class(self.second_widget.get_style_context(), "clearine-widget-second") Gtk.StyleContext.add_provider(card.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) Gtk.StyleContext.add_provider(self.first_widget.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) Gtk.StyleContext.add_provider(self.second_widget.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) for button in config["button-items"]: try: count += 1 except NameError: count = 1 self.draw_button(button, button_group, count) body = Gtk.EventBox() body.add(content) body.connect('button-press-event', Gtk.main_quit) self.add(body) def update_widgets(self): # update first and second-line widget self.first_widget.set_label(time.strftime(config["widget-firstline-format"])) self.second_widget.set_label(time.strftime(config["widget-secondline-format"])) def draw_button(self, name, widget, index): # setup a buttons inside card status = logging.getLogger(self.__class__.__name__) button_name = name.strip() dir_ic_home = "%s/.themes/%s/clearine" % (os.environ['HOME'], config["button-theme"]) dir_ic_share = "%s/%s/clearine" % ("%s/share/themes" % sys.prefix, config["button-theme"]) dir_ic_share_fb = "%s/%s/clearine" % ("%s/share/themes" % sys.prefix, 'Clearine-Fallback') dir_ic_default = "%s/data" % root_module print(dir_ic_share) ic_png_home = "%s/%s.png" % (dir_ic_home, button_name) ic_png_share = "%s/%s.png" % (dir_ic_share, button_name) ic_svg_home = "%s/%s.svg" % (dir_ic_home, button_name) ic_svg_share = "%s/%s.svg" % (dir_ic_share, button_name) ic_svg_share_fb = "%s/%s.svg" % (dir_ic_share_fb, button_name) ic_svg_default = "%s/%s.svg" % (dir_ic_default, button_name) if os.path.exists(ic_png_home): iconfile = ic_png_home elif os.path.exists(ic_svg_home): iconfile = ic_svg_home elif os.path.exists(ic_png_share): iconfile = ic_png_share elif os.path.exists(ic_svg_share): iconfile = ic_svg_share elif os.path.exists(ic_svg_share_fb): iconfile = ic_svg_share_fb elif os.path.exists(ic_svg_default): iconfile = ic_svg_default else: status.info("No Clearine theme available, exiting") sys.exit() icon_buffer = Pixbuf.new_from_file_at_size(iconfile, config["button-icon-width"], config["button-icon-height"]) icon = Gtk.Image() icon.set_from_pixbuf(icon_buffer) icon.set_margin_bottom(10) icon.set_margin_top(10) button = Gtk.Button() button.set_always_show_image(True) button.set_image_position(2) button.set_label(button_name.capitalize()) button.set_image(icon) button.connect("clicked", self.do, button_name) button.set_size_request(config["button-width"], config["button-height"]) button.set_can_focus(True) Gtk.StyleContext.add_class(button.get_style_context(), "clearine-button") Gtk.StyleContext.add_provider(button.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) widget.pack_start(button, False, False, False) def draw_background(self, widget, context): # setup a semi-transparent background context.set_source_rgba(0, 0, 0, config["main-opacity"]) context.set_operator(cairo.OPERATOR_SOURCE) context.paint() context.set_operator(cairo.OPERATOR_OVER) def do(self, widget, button): # handle every button action if button == 'cancel': sys.exit() else: os.system(config["command-%s" % button]) def on_keypressed (self, widget, event): # handling an event when user press some key key = Gdk.keyval_name(event.keyval) if key in shortcuts.keys(): command = shortcuts[key] if command == "cancel": sys.exit() else: os.system(config["command-%s" % command]) def on_state_changed(self, widget, event): if event.new_window_state: self.fullscreen() def main(): status_format = logging.StreamHandler(sys.stdout) status_format.setFormatter(logging.Formatter("Clearine: %(message)s")) status = logging.getLogger() status.addHandler(status_format) status.setLevel(logging.INFO) try: if len(sys.argv) > 1: options, arguments = getopt.getopt(sys.argv[1:], "h", ["help"]) for option, argument in options: if option in ("-h", "--help"): print (__doc__) return 0 except: status.error("unused options '%s'. see -h (or --help) for more information" % sys.argv[1]) return 2 handle = SignalHandler() signal.signal(signal.SIGINT, handle.SIGINT) w = Clearine() w.show_all() Gtk.main() if __name__ == "__main__": sys.exit(main())
src/clearine.py
import os import sys import time import getopt import signal import logging import subprocess import configparser try: import gi except: print("no modules named 'gi', please install python-gobject") sys.exit() try: import cairo except: print("no modules named 'cairo', please install python-cairo") sys.exit() gi.require_version('Gtk', '3.0') gi.require_version('Gdk', '3.0') from gi.repository import Gtk, Gdk, Pango, GLib from gi.repository.GdkPixbuf import Pixbuf from Clearine.helper import SignalHandler from Clearine.helper import Helper config = {} shortcuts = {} root_module = os.path.dirname(os.path.abspath(__file__)) class Clearine(Gtk.Window): def __init__(self): super(Clearine, self).__init__() # initialize a fullscreen window self.setcontent() self.setprops() def setprops(self): self.set_visual(self.get_screen().get_rgba_visual()) self.set_app_paintable(True) self.fullscreen() self.set_skip_pager_hint(True) self.set_keep_above(True) self.realize() self.connect('destroy', Gtk.main_quit) self.connect('draw', self.draw_background) self.connect('delete-event', Gtk.main_quit) self.connect('key-press-event', self.on_keypressed) self.connect('window-state-event', self.on_state_changed) def setcontent(self): # fetch a plain-text clearine.conf configuration global config global shortcuts status = logging.getLogger(self.__class__.__name__) dotcat = configparser.ConfigParser() file_home = "%s/.config/clearine.conf" % os.environ['HOME'] file_etc = "/etc/clearine.conf" file_share = "/usr/share/clearine/clearine.conf" file_dot_config = "%s/.config/clearine/clearine.conf" % os.environ['HOME'] file_default = "%s/data/clearine.conf" % root_module try: if os.path.exists(file_home): status.info("load config from: %s"% (file_home)) dotcat.read(file_home) elif os.path.exists(file_dot_config): status.info("load config from: %s"% (file_dot_config)) dotcat.read(file_dot_config) elif os.path.exists(file_etc): status.info("load config from: %s"% (file_etc)) dotcat.read(file_etc) elif os.path.exists(file_share): status.info("load config from: %s"% (file_share)) dotcat.read(file_share) elif os.path.exists(file_default): status.info("load config from: %s"% (file_default)) dotcat.read(file_default) except: status.error("failed to find configuration file. exiting.") sys.exit() def find_key(data, section, key, default): helper = Helper() try: if data=="arr": return dotcat.get(section, key).split(",") if data=="str": return dotcat.get(section, key, raw=True) if data=="int": return dotcat.getint(section, key) if data=="flo": return dotcat.getfloat(section, key) if data=="clr": data = dotcat.get(section, key, raw=True) if data.startswith("#") or data.startswith("rgba("): return data elif data.startswith("{") and data.endswith("}"): data = data.lstrip('{').rstrip('}') return helper.xrdb(data) except: status.info("failed to find key named '%s' in section '[%s]'. use fallback value insteads." % (key, section)) return default config["button-label-font"] = find_key("str", "button", "label-font", "DejaVu Sans Book") config["button-label-size"] = find_key("int", "button", "label-size", 9) config["button-label-color"] = find_key("clr", "button", "label-color", "#101314") config["button-height"] = find_key("int", "button", "height", 70) config["button-icon-height"] = find_key("int", "button", "icon-height", 32) config["button-icon-width"] = find_key("int", "button", "icon-width", 32) config["button-items"] = find_key("arr", "button", "items", "suspend, logout, lock, hibernate, restart, shutdown, cancel") config["button-margin-bottom"] = find_key("int", "button", "margin-bottom", 30) config["button-margin-left"] = find_key("int", "button", "margin-left", 10) config["button-margin-right"] = find_key("int", "button", "margin-right", 10) config["button-margin-top"] = find_key("int", "button", "margin-top", 30) config["button-spacing"] = find_key("int", "button", "spacing", 10) config["button-theme"] = find_key("str", "button", "theme", "Clearine-Fallback") config["button-width"] = find_key("int", "button", "width", 100) config["button-opacity-normal"] = find_key("flo", "button", "opacity-normal", 0.7) config["button-opacity-focus"] = find_key("flo", "button", "opacity-focus", 1.0) config["card-background-color"] = find_key("clr", "card", "background-color", "#e1e5e8") config["card-border-radius"] = find_key("int", "card", "border-radius", 20) config["card-padding-bottom"] = find_key("int", "card", "padding-bottom", 10) config["card-padding-left"] = find_key("int", "card", "padding-left", 10) config["card-padding-right"] = find_key("int", "card", "padding-right", 10) config["card-padding-top"] = find_key("int", "card", "padding-top", 10) config["main-mode"] = find_key("str", "main", "mode", "V") config["main-spacing"] = find_key("int", "main", "spacing", 10) config["main-gap-left"] = find_key("int", "main", "gap-left", 50) config["main-gap-right"] = find_key("int", "main", "gap-right", 50) config["main-gap-top"] = find_key("int", "main", "gap-top", 50) config["main-gap-bottom"] = find_key("int", "main", "gap-bottom", 50) config["main-opacity"] = find_key("flo", "main", "opacity", 0.8) config["widget-firstline-font"] = find_key("str", "widget", "firstline-font", "DejaVu Sans ExtraLight") config["widget-firstline-size"] = find_key("int", "widget", "firstline-size", 90) config["widget-firstline-color"] = find_key("clr", "widget", "firstline-color", "#e1e5e8") config["widget-firstline-format"] = find_key("str", "widget", "firstline-format", "%H.%M") config["widget-secondline-font"] = find_key("str", "widget", "secondline-font", "DejaVu Sans Book") config["widget-secondline-size"] = find_key("int", "widget", "secondline-size", 14) config["widget-secondline-color"] = find_key("clr", "widget", "secondline-color", "#e1e5e8") config["widget-secondline-format"] = find_key("str", "widget", "secondline-format", "%A, %d %B %Y") config["command-logout"] = find_key("str", "command", "logout", "pkexec pkill X") config["command-restart"] = find_key("str", "command", "restart", "pkexec reboot -h now") config["command-shutdown"] = find_key("str", "command", "shutdown", "pkexec shutdown -h now") config["command-lock"] = find_key("str", "command", "lock", "i3lock") config["command-suspend"] = find_key("str", "command", "suspend", "systemctl suspend") config["command-hibernate"] = find_key("str", "command", "hibernate", "systemctl hibernate") shortcuts = { find_key("str", "shortcuts", "cancel", "Escape"): "cancel", find_key("str", "shortcuts", "lock", "K"): "lock", find_key("str", "shortcuts", "suspend", "P"): "suspend", find_key("str", "shortcuts", "hibernate", "H"): "hibernate", find_key("str", "shortcuts", "logout", "L"): "logout", find_key("str", "shortcuts", "restart", "R"): "restart", find_key("str", "shortcuts", "shutdown", "S"): "shutdown" } # Setup all content inside Gtk.Window if config["main-mode"] == "horizontal": button_group = Gtk.VBox() content = Gtk.HBox() else: button_group = Gtk.HBox() content = Gtk.VBox() button_group.set_margin_top(config["button-margin-top"]) button_group.set_margin_start(config["button-margin-left"]) button_group.set_margin_bottom(config["button-margin-bottom"]) button_group.set_margin_end(config["button-margin-right"]) button_group.set_spacing(config["button-spacing"]) card_container = Gtk.Box() card_container.set_margin_top(config["card-padding-top"]) card_container.set_margin_start(config["card-padding-left"]) card_container.set_margin_bottom(config["card-padding-bottom"]) card_container.set_margin_end(config["card-padding-right"]) card_container.pack_start(button_group, False, False, False) card = Gtk.Box() card.pack_start(card_container, False, False, False) container = Gtk.Grid() if config["main-mode"] == "horizontal": container.set_halign(Gtk.Align.END) container.set_valign(Gtk.Align.CENTER) else: container.set_halign(Gtk.Align.CENTER) container.set_valign(Gtk.Align.START) container.attach(card, 1, 1, 1, 1) self.first_widget = Gtk.Label() self.first_widget.set_label(time.strftime(config["widget-firstline-format"])) self.second_widget = Gtk.Label() self.second_widget.set_label(time.strftime(config["widget-secondline-format"])) widgets = Gtk.Grid() if config["main-mode"] == "horizontal": widgets.set_halign(Gtk.Align.START) widgets.set_valign(Gtk.Align.CENTER) else: widgets.set_halign(Gtk.Align.CENTER) widgets.set_valign(Gtk.Align.END) widgets.attach(self.first_widget, 1, 1, 1, 1) widgets.attach(self.second_widget, 1, 2, 1, 1) content.set_margin_start(config["main-gap-left"]) content.set_margin_end(config["main-gap-right"]) content.set_margin_top(config["main-gap-top"]) content.set_margin_bottom(config["main-gap-bottom"]) content.set_spacing(config["main-spacing"]) content.pack_start(widgets, True, True, 0) content.pack_end(container, True, True, 0) GLib.timeout_add(200, self.update_widgets) self.card_style = Gtk.CssProvider() self.card_css = """ .clearine-button {{ background: {_cbg}; color: {_bcol}; font-family: '{_bfont}'; font-size: {_bsize}px; box-shadow: none; opacity: {_bopa}; border-width: 0; }} .clearine-button:focus {{ opacity: {_bopaf}; border-width: 0; }} .clearine-card {{ background: {_cbg}; border-width: 0; border-radius:{_crad}px; }} .clearine-widget-first {{ color: {_w1col}; font-family:'{_w1font}'; font-size: {_w1size}px; }} .clearine-widget-second {{ color: {_w2col}; font-family:'{_w2font}'; font-size: {_w2size}px; }} """.format( _bsize=str(config["button-label-size"]), _bcol=str(config["button-label-color"]), _bfont=str(config["button-label-font"]), _bopa=str(config["button-opacity-normal"]), _bopaf=str(config["button-opacity-focus"]), _cbg=str(config["card-background-color"]), _crad=str(config["card-border-radius"]), _w1col=str(config["widget-firstline-color"]), _w2col=str(config["widget-secondline-color"]), _w1font=str(config["widget-firstline-font"]), _w2font=str(config["widget-secondline-font"]), _w1size=str(config["widget-firstline-size"]), _w2size=str(config["widget-secondline-size"]), ) self.card_style.load_from_data(self.card_css.encode()) Gtk.StyleContext.add_class(card.get_style_context(), "clearine-card") Gtk.StyleContext.add_class(self.first_widget.get_style_context(), "clearine-widget-first") Gtk.StyleContext.add_class(self.second_widget.get_style_context(), "clearine-widget-second") Gtk.StyleContext.add_provider(card.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) Gtk.StyleContext.add_provider(self.first_widget.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) Gtk.StyleContext.add_provider(self.second_widget.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) for button in config["button-items"]: try: count += 1 except NameError: count = 1 self.draw_button(button, button_group, count) body = Gtk.EventBox() body.add(content) body.connect('button-press-event', Gtk.main_quit) self.add(body) def update_widgets(self): # update first and second-line widget self.first_widget.set_label(time.strftime(config["widget-firstline-format"])) self.second_widget.set_label(time.strftime(config["widget-secondline-format"])) def draw_button(self, name, widget, index): # setup a buttons inside card status = logging.getLogger(self.__class__.__name__) button_name = name.strip() dir_ic_home = "%s/.themes/%s/clearine" % (os.environ['HOME'], config["button-theme"]) dir_ic_share = "%s/%s/clearine" % ("%s/share/themes" % sys.prefix, config["button-theme"]) dir_ic_share_fb = "%s/%s/clearine" % ("%s/share/themes" % sys.prefix, 'Clearine-Fallback') dir_ic_default = "%s/data" % root_module print(dir_ic_share) ic_png_home = "%s/%s.png" % (dir_ic_home, button_name) ic_png_share = "%s/%s.png" % (dir_ic_share, button_name) ic_svg_home = "%s/%s.svg" % (dir_ic_home, button_name) ic_svg_share = "%s/%s.svg" % (dir_ic_share, button_name) ic_svg_share_fb = "%s/%s.svg" % (dir_ic_share_fb, button_name) ic_svg_default = "%s/%s.svg" % (dir_ic_default, button_name) if os.path.exists(ic_png_home): iconfile = ic_png_home elif os.path.exists(ic_svg_home): iconfile = ic_svg_home elif os.path.exists(ic_png_share): iconfile = ic_png_share elif os.path.exists(ic_svg_share): iconfile = ic_svg_share elif os.path.exists(ic_svg_share_fb): iconfile = ic_svg_share_fb elif os.path.exists(ic_svg_default): iconfile = ic_svg_default else: status.info("No Clearine theme available, exiting") sys.exit() icon_buffer = Pixbuf.new_from_file_at_size(iconfile, config["button-icon-width"], config["button-icon-height"]) icon = Gtk.Image() icon.set_from_pixbuf(icon_buffer) icon.set_margin_bottom(10) icon.set_margin_top(10) button = Gtk.Button() button.set_always_show_image(True) button.set_image_position(2) button.set_label(button_name.capitalize()) button.set_image(icon) button.connect("clicked", self.do, button_name) button.set_size_request(config["button-width"], config["button-height"]) button.set_can_focus(True) Gtk.StyleContext.add_class(button.get_style_context(), "clearine-button") Gtk.StyleContext.add_provider(button.get_style_context(), self.card_style, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION) widget.pack_start(button, False, False, False) def draw_background(self, widget, context): # setup a semi-transparent background context.set_source_rgba(0, 0, 0, config["main-opacity"]) context.set_operator(cairo.OPERATOR_SOURCE) context.paint() context.set_operator(cairo.OPERATOR_OVER) def do(self, widget, button): # handle every button action if button == 'cancel': sys.exit() else: os.system(config["command-%s" % button]) def on_keypressed (self, widget, event): # handling an event when user press some key key = Gdk.keyval_name(event.keyval) if key in shortcuts.keys(): command = shortcuts[key] if command == "cancel": sys.exit() else: os.system(config["command-%s" % command]) def on_state_changed(self, widget, event): if event.new_window_state: self.fullscreen() def main(): status_format = logging.StreamHandler(sys.stdout) status_format.setFormatter(logging.Formatter("Clearine: %(message)s")) status = logging.getLogger() status.addHandler(status_format) status.setLevel(logging.INFO) try: if len(sys.argv) > 1: options, arguments = getopt.getopt(sys.argv[1:], "h", ["help"]) for option, argument in options: if option in ("-h", "--help"): print (__doc__) return 0 except: status.error("unused options '%s'. see -h (or --help) for more information" % sys.argv[1]) return 2 handle = SignalHandler() signal.signal(signal.SIGINT, handle.SIGINT) w = Clearine() w.show_all() Gtk.main() if __name__ == "__main__": sys.exit(main())
0.150996
0.071429
import argparse import os import sys import errno import precision_recall_per_genome import precision_recall_average import precision_recall_by_bpcount import rand_index import genome_recovery import plot_by_genome import matplotlib.pyplot as plt from utils import exclude_genomes from utils import load_data from utils import argparse_parents from utils import labels def make_sure_path_exists(path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def evaluate_all(gold_standard_file, fasta_file, query_files, labels, filter_tail_percentage, genomes_file, keyword, output_dir): gold_standard = load_data.get_genome_mapping(gold_standard_file, fasta_file) labels_iterator = iter(labels) summary_per_query = [] for query_file in query_files: tool_id = query_file.split('/')[-1] binning_label = next(labels_iterator) if len(labels) > 0 else tool_id path = os.path.join(output_dir, tool_id) make_sure_path_exists(path) query = load_data.open_query(query_file) # PRECISION RECALL PER GENOME bin_metrics = precision_recall_per_genome.compute_metrics(query, gold_standard) if genomes_file: bin_metrics = exclude_genomes.filter_data(bin_metrics, genomes_file, keyword) f = open(path + "/precision_recall.tsv", 'w') precision_recall_per_genome.print_metrics(bin_metrics, f) plot_by_genome.plot_by_genome(bin_metrics, path + '/genomes_sorted_by_recall', 'recall') plot_by_genome.plot_by_genome(bin_metrics, path + '/genomes_sorted_by_precision', 'precision') f.close() # AVG PRECISION RECALL avg_precision, avg_recall, std_deviation_precision, std_deviation_recall, std_error_precision, std_error_recall = \ precision_recall_average.compute_precision_and_recall(bin_metrics, filter_tail_percentage) f = open(path + "/precision_recall_avg.tsv", 'w') precision_recall_average.print_precision_recall_table_header(f) precision_recall_average.print_precision_recall(binning_label, avg_precision, avg_recall, std_deviation_precision, std_deviation_recall, std_error_precision, std_error_recall, f) f.close() # PRECISION RECALL BY BP COUNTS precision, recall = precision_recall_by_bpcount.compute_metrics(query, gold_standard) f = open(path + "/precision_recall_by_bpcount.tsv", 'w') precision_recall_by_bpcount.print_precision_recall_by_bpcount(precision, recall, f) f.close() # (ADJUSTED) RAND INDEX ri_by_seq, ri_by_bp, ari_by_bp, ari_by_seq, percentage_of_assigned_bps = rand_index.compute_metrics(query, gold_standard) f = open(path + "/rand_index.tsv", 'w') rand_index.print_rand_indices(ri_by_seq, ri_by_bp, ari_by_bp, ari_by_seq, percentage_of_assigned_bps, f) f.close() # GENOME RECOVERY genome_recovery_val = genome_recovery.calc_table(bin_metrics) summary_per_query.append(({'binning_label': binning_label, 'avg_precision': avg_precision, 'std_deviation_precision': std_deviation_precision, 'std_error_precision': std_error_precision, 'avg_recall': avg_recall, 'std_deviation_recall': std_deviation_recall, 'std_error_recall': std_error_recall, 'precision': precision, 'recall': recall, 'ri_by_bp': ri_by_bp, 'ri_by_seq': ri_by_seq, 'ari_by_bp': ari_by_bp, 'ari_by_seq': ari_by_seq, 'percentage_of_assigned_bps': percentage_of_assigned_bps, '_05compl_01cont': genome_recovery_val[5], '_07compl_01cont': genome_recovery_val[3], '_09compl_01cont': genome_recovery_val[1], '_05compl_005cont': genome_recovery_val[4], '_07compl_005cont': genome_recovery_val[2], '_09compl_005cont': genome_recovery_val[0]}, bin_metrics)) return summary_per_query def convert_summary_to_tuples_of_strings(summary_per_query): tuples = [] for summary in summary_per_query: tuples.append(((summary['binning_label']), format(summary['avg_precision'], '.3f'), format(summary['std_deviation_precision'], '.3f'), format(summary['std_error_precision'], '.3f'), format(summary['avg_recall'], '.3f'), format(summary['std_deviation_recall'], '.3f'), format(summary['std_error_recall'], '.3f'), format(summary['precision'], '.3f'), format(summary['recall'], '.3f'), format(summary['ri_by_bp'], '.3f'), format(summary['ri_by_seq'], '.3f'), format(summary['ari_by_bp'], '.3f'), format(summary['ari_by_seq'], '.3f'), format(summary['percentage_of_assigned_bps'], '.3f'), str(summary['_05compl_01cont']), str(summary['_07compl_01cont']), str(summary['_09compl_01cont']), str(summary['_05compl_005cont']), str(summary['_07compl_005cont']), str(summary['_09compl_005cont']))) return tuples def plot_summary(summary_per_query, output_dir, plot_type, file_name, xlabel, ylabel): colors_list = plot_by_genome.create_colors_list() if len(summary_per_query) > len(colors_list): raise RuntimeError("Plot only supports 29 colors") fig, axs = plt.subplots(figsize=(6, 5)) # force axis to be from 0 to 100% axs.set_xlim([0.0, 1.0]) axs.set_ylim([0.0, 1.0]) i = 0 plot_labels = [] if plot_type == 'e': for summary in summary_per_query: axs.errorbar(summary['avg_precision'], summary['avg_recall'], xerr=summary['std_error_precision'], yerr=summary['std_error_recall'], fmt='o', ecolor=colors_list[i], mec=colors_list[i], mfc=colors_list[i], capsize=3) plot_labels.append(summary['binning_label']) i += 1 elif plot_type == 'p': for summary in summary_per_query: axs.plot(summary['ari_by_bp'], summary['percentage_of_assigned_bps'], marker='o', color=colors_list[i]) plot_labels.append(summary['binning_label']) i += 1 # turn on grid axs.minorticks_on() axs.grid(which='major', linestyle='-', linewidth='0.5') axs.grid(which='minor', linestyle=':', linewidth='0.5') # transform plot_labels to percentages vals = axs.get_xticks() axs.set_xticklabels(['{:3.0f}%'.format(x * 100) for x in vals]) vals = axs.get_yticks() axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals]) lgd = plt.legend(plot_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.tight_layout() fig.savefig(os.path.normpath(output_dir + '/' + file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight') fig.savefig(os.path.normpath(output_dir + '/' + file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') def plot_avg_precision_recall(summary_per_query, output_dir): plot_summary(summary_per_query, output_dir, 'e', 'avg_precision_recall', 'Precision', 'Recall') def plot_adjusted_rand_index_vs_assigned_bps(summary_per_query, output_dir): plot_summary(summary_per_query, output_dir, 'p', 'ari_vs_assigned_bps', 'Adjusted Rand index', 'Percentage of assigned base pairs') def print_summary(summary_per_query, stream=sys.stdout): stream.write("%s\n" % "\t".join((labels.TOOL, labels.AVG_PRECISION, labels.STD_DEV_PRECISION, labels.SEM_PRECISION, labels.AVG_RECALL, labels.STD_DEV_RECALL, labels.SEM_RECALL, labels.PRECISION, labels.RECALL, labels.RI_BY_BP, labels.RI_BY_SEQ, labels.ARI_BY_BP, labels.ARI_BY_SEQ, labels.PERCENTAGE_ASSIGNED_BPS, ">0.5compl<0.1cont", ">0.7compl<0.1cont", ">0.9compl<0.1cont", ">0.5compl<0.05cont", ">0.7compl<0.05cont", ">0.9compl<0.05cont"))) for summary in summary_per_query: stream.write("%s\n" % "\t".join(summary)) def compute_rankings(summary_per_query, output_dir): f = open(os.path.normpath(output_dir + '/rankings.txt'), 'w') f.write("Average precision\n") sorted_by = sorted(summary_per_query, key=lambda x: x['avg_precision'], reverse=True) for summary in sorted_by: f.write("%s \t %1.3f\n" % (summary['binning_label'], summary['avg_precision'])) sorted_by = sorted(summary_per_query, key=lambda x: x['avg_recall'], reverse=True) f.write("\nAverage recall\n") for summary in sorted_by: f.write("%s \t %1.3f\n" % (summary['binning_label'], summary['avg_recall'])) sorted_by = sorted(summary_per_query, key=lambda x: x['avg_precision'] + x['avg_recall'], reverse=True) f.write("\nAverage precision + average recall\n") for summary in sorted_by: f.write("%s \t %1.3f\n" % (summary['binning_label'], summary['avg_precision'] + summary['avg_recall'])) f.close() def main(): parser = argparse.ArgumentParser(description="Compute all metrics and figures for one or more binning files; output summary to screen and results per binning file to chosen directory", parents=[argparse_parents.PARSER_MULTI2]) parser.add_argument('-o', '--output_dir', help="Directory to write the results to", required=True) args = parser.parse_args() binning_labels = [] if args.labels: binning_labels = [x.strip() for x in args.labels.split(',')] if len(binning_labels) != len(args.bin_files): parser.error('number of labels does not match the number of binning files') summary_per_query = evaluate_all(args.gold_standard_file, args.fasta_file, args.bin_files, binning_labels, args.filter, args.genomes_file, args.keyword, args.output_dir) summary_dict = [x[0] for x in summary_per_query] print_summary(convert_summary_to_tuples_of_strings(summary_dict)) plot_avg_precision_recall(summary_dict, args.output_dir) plot_adjusted_rand_index_vs_assigned_bps(summary_dict, args.output_dir) plot_by_genome.plot_by_genome2(summary_per_query, args.output_dir) compute_rankings(summary_dict, args.output_dir) if __name__ == "__main__": main()
evaluate.py
import argparse import os import sys import errno import precision_recall_per_genome import precision_recall_average import precision_recall_by_bpcount import rand_index import genome_recovery import plot_by_genome import matplotlib.pyplot as plt from utils import exclude_genomes from utils import load_data from utils import argparse_parents from utils import labels def make_sure_path_exists(path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def evaluate_all(gold_standard_file, fasta_file, query_files, labels, filter_tail_percentage, genomes_file, keyword, output_dir): gold_standard = load_data.get_genome_mapping(gold_standard_file, fasta_file) labels_iterator = iter(labels) summary_per_query = [] for query_file in query_files: tool_id = query_file.split('/')[-1] binning_label = next(labels_iterator) if len(labels) > 0 else tool_id path = os.path.join(output_dir, tool_id) make_sure_path_exists(path) query = load_data.open_query(query_file) # PRECISION RECALL PER GENOME bin_metrics = precision_recall_per_genome.compute_metrics(query, gold_standard) if genomes_file: bin_metrics = exclude_genomes.filter_data(bin_metrics, genomes_file, keyword) f = open(path + "/precision_recall.tsv", 'w') precision_recall_per_genome.print_metrics(bin_metrics, f) plot_by_genome.plot_by_genome(bin_metrics, path + '/genomes_sorted_by_recall', 'recall') plot_by_genome.plot_by_genome(bin_metrics, path + '/genomes_sorted_by_precision', 'precision') f.close() # AVG PRECISION RECALL avg_precision, avg_recall, std_deviation_precision, std_deviation_recall, std_error_precision, std_error_recall = \ precision_recall_average.compute_precision_and_recall(bin_metrics, filter_tail_percentage) f = open(path + "/precision_recall_avg.tsv", 'w') precision_recall_average.print_precision_recall_table_header(f) precision_recall_average.print_precision_recall(binning_label, avg_precision, avg_recall, std_deviation_precision, std_deviation_recall, std_error_precision, std_error_recall, f) f.close() # PRECISION RECALL BY BP COUNTS precision, recall = precision_recall_by_bpcount.compute_metrics(query, gold_standard) f = open(path + "/precision_recall_by_bpcount.tsv", 'w') precision_recall_by_bpcount.print_precision_recall_by_bpcount(precision, recall, f) f.close() # (ADJUSTED) RAND INDEX ri_by_seq, ri_by_bp, ari_by_bp, ari_by_seq, percentage_of_assigned_bps = rand_index.compute_metrics(query, gold_standard) f = open(path + "/rand_index.tsv", 'w') rand_index.print_rand_indices(ri_by_seq, ri_by_bp, ari_by_bp, ari_by_seq, percentage_of_assigned_bps, f) f.close() # GENOME RECOVERY genome_recovery_val = genome_recovery.calc_table(bin_metrics) summary_per_query.append(({'binning_label': binning_label, 'avg_precision': avg_precision, 'std_deviation_precision': std_deviation_precision, 'std_error_precision': std_error_precision, 'avg_recall': avg_recall, 'std_deviation_recall': std_deviation_recall, 'std_error_recall': std_error_recall, 'precision': precision, 'recall': recall, 'ri_by_bp': ri_by_bp, 'ri_by_seq': ri_by_seq, 'ari_by_bp': ari_by_bp, 'ari_by_seq': ari_by_seq, 'percentage_of_assigned_bps': percentage_of_assigned_bps, '_05compl_01cont': genome_recovery_val[5], '_07compl_01cont': genome_recovery_val[3], '_09compl_01cont': genome_recovery_val[1], '_05compl_005cont': genome_recovery_val[4], '_07compl_005cont': genome_recovery_val[2], '_09compl_005cont': genome_recovery_val[0]}, bin_metrics)) return summary_per_query def convert_summary_to_tuples_of_strings(summary_per_query): tuples = [] for summary in summary_per_query: tuples.append(((summary['binning_label']), format(summary['avg_precision'], '.3f'), format(summary['std_deviation_precision'], '.3f'), format(summary['std_error_precision'], '.3f'), format(summary['avg_recall'], '.3f'), format(summary['std_deviation_recall'], '.3f'), format(summary['std_error_recall'], '.3f'), format(summary['precision'], '.3f'), format(summary['recall'], '.3f'), format(summary['ri_by_bp'], '.3f'), format(summary['ri_by_seq'], '.3f'), format(summary['ari_by_bp'], '.3f'), format(summary['ari_by_seq'], '.3f'), format(summary['percentage_of_assigned_bps'], '.3f'), str(summary['_05compl_01cont']), str(summary['_07compl_01cont']), str(summary['_09compl_01cont']), str(summary['_05compl_005cont']), str(summary['_07compl_005cont']), str(summary['_09compl_005cont']))) return tuples def plot_summary(summary_per_query, output_dir, plot_type, file_name, xlabel, ylabel): colors_list = plot_by_genome.create_colors_list() if len(summary_per_query) > len(colors_list): raise RuntimeError("Plot only supports 29 colors") fig, axs = plt.subplots(figsize=(6, 5)) # force axis to be from 0 to 100% axs.set_xlim([0.0, 1.0]) axs.set_ylim([0.0, 1.0]) i = 0 plot_labels = [] if plot_type == 'e': for summary in summary_per_query: axs.errorbar(summary['avg_precision'], summary['avg_recall'], xerr=summary['std_error_precision'], yerr=summary['std_error_recall'], fmt='o', ecolor=colors_list[i], mec=colors_list[i], mfc=colors_list[i], capsize=3) plot_labels.append(summary['binning_label']) i += 1 elif plot_type == 'p': for summary in summary_per_query: axs.plot(summary['ari_by_bp'], summary['percentage_of_assigned_bps'], marker='o', color=colors_list[i]) plot_labels.append(summary['binning_label']) i += 1 # turn on grid axs.minorticks_on() axs.grid(which='major', linestyle='-', linewidth='0.5') axs.grid(which='minor', linestyle=':', linewidth='0.5') # transform plot_labels to percentages vals = axs.get_xticks() axs.set_xticklabels(['{:3.0f}%'.format(x * 100) for x in vals]) vals = axs.get_yticks() axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals]) lgd = plt.legend(plot_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.tight_layout() fig.savefig(os.path.normpath(output_dir + '/' + file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight') fig.savefig(os.path.normpath(output_dir + '/' + file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') def plot_avg_precision_recall(summary_per_query, output_dir): plot_summary(summary_per_query, output_dir, 'e', 'avg_precision_recall', 'Precision', 'Recall') def plot_adjusted_rand_index_vs_assigned_bps(summary_per_query, output_dir): plot_summary(summary_per_query, output_dir, 'p', 'ari_vs_assigned_bps', 'Adjusted Rand index', 'Percentage of assigned base pairs') def print_summary(summary_per_query, stream=sys.stdout): stream.write("%s\n" % "\t".join((labels.TOOL, labels.AVG_PRECISION, labels.STD_DEV_PRECISION, labels.SEM_PRECISION, labels.AVG_RECALL, labels.STD_DEV_RECALL, labels.SEM_RECALL, labels.PRECISION, labels.RECALL, labels.RI_BY_BP, labels.RI_BY_SEQ, labels.ARI_BY_BP, labels.ARI_BY_SEQ, labels.PERCENTAGE_ASSIGNED_BPS, ">0.5compl<0.1cont", ">0.7compl<0.1cont", ">0.9compl<0.1cont", ">0.5compl<0.05cont", ">0.7compl<0.05cont", ">0.9compl<0.05cont"))) for summary in summary_per_query: stream.write("%s\n" % "\t".join(summary)) def compute_rankings(summary_per_query, output_dir): f = open(os.path.normpath(output_dir + '/rankings.txt'), 'w') f.write("Average precision\n") sorted_by = sorted(summary_per_query, key=lambda x: x['avg_precision'], reverse=True) for summary in sorted_by: f.write("%s \t %1.3f\n" % (summary['binning_label'], summary['avg_precision'])) sorted_by = sorted(summary_per_query, key=lambda x: x['avg_recall'], reverse=True) f.write("\nAverage recall\n") for summary in sorted_by: f.write("%s \t %1.3f\n" % (summary['binning_label'], summary['avg_recall'])) sorted_by = sorted(summary_per_query, key=lambda x: x['avg_precision'] + x['avg_recall'], reverse=True) f.write("\nAverage precision + average recall\n") for summary in sorted_by: f.write("%s \t %1.3f\n" % (summary['binning_label'], summary['avg_precision'] + summary['avg_recall'])) f.close() def main(): parser = argparse.ArgumentParser(description="Compute all metrics and figures for one or more binning files; output summary to screen and results per binning file to chosen directory", parents=[argparse_parents.PARSER_MULTI2]) parser.add_argument('-o', '--output_dir', help="Directory to write the results to", required=True) args = parser.parse_args() binning_labels = [] if args.labels: binning_labels = [x.strip() for x in args.labels.split(',')] if len(binning_labels) != len(args.bin_files): parser.error('number of labels does not match the number of binning files') summary_per_query = evaluate_all(args.gold_standard_file, args.fasta_file, args.bin_files, binning_labels, args.filter, args.genomes_file, args.keyword, args.output_dir) summary_dict = [x[0] for x in summary_per_query] print_summary(convert_summary_to_tuples_of_strings(summary_dict)) plot_avg_precision_recall(summary_dict, args.output_dir) plot_adjusted_rand_index_vs_assigned_bps(summary_dict, args.output_dir) plot_by_genome.plot_by_genome2(summary_per_query, args.output_dir) compute_rankings(summary_dict, args.output_dir) if __name__ == "__main__": main()
0.385259
0.128991
import json import sys import matplotlib matplotlib.use('Agg') import mpl_toolkits.mplot3d.axes3d as p3 import pylab as p import numpy as np from matplotlib import cm import matplotlib.pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas import requests import cgi import cgitb import re import tempfile cgitb.enable() message = 'Content-Type:text/html' + '\n\n' + '<h1>Endowment Graph</h1>\n<br /><br />' wwwDir = '/var/www/html' tempDir = wwwDir + '/images/' # Other defaults Duration=100 debug=False verbose=True Defaults = '{' + '"DriveCost": "250.0",' + '"SlotCost": "150.0",' + '"SlotTeraByte": "7.2",' + '"SlotCostPerYear": "100.0",' + '"DriveLife": "4",' + '"DriveFailRate": "2",' + '"SlotLife": "8",' + '"DiscountRate": "2",' + '"KryderRate": "10",' + '"SlotRate": "0.0",' + '"LaborPowerRate": "4",' + '"ReplicationFactor": "2"'+ '}' inputData = json.loads(Defaults) # Convert POST data to dictionary def parsePostData(postData): global message ret = {} for p in postData: ret[p] = postData[p].value if(verbose): message = message + "{0} = {1}<br />\n".format(p, ret[p]) return ret # Compute endowment for a given DiscountRate and KryderRate def EndowmentPerTB(x,y): # Real interest rate per year DiscountRate = x # Disk capacity increment per year KryderRate = y # Initial conditions Year=0 Endowment=0.0 DiscountFactor=1.0 if(debug): print("inputData: {}<br />\n".format(inputData)) params = inputData # Cost per drive (assumed constant, capacity increases) DriveCost = float(params["DriveCost"]) # Per-slot cost of rack/server/etc SlotCost = float(params["SlotCost"]) # Disk capacity in TB SlotTeraByte = float(params["SlotTeraByte"]) # Annual labor + power cost of a drive slot SlotCostPerYear = float(params["SlotCostPerYear"]) # Service life of drive in years DriveLife = int(params["DriveLife"]) # Annual proportion of drives that fail DriveFailRate = float(params["DriveFailRate"])/100.0 # Service life of rack in years SlotLife = int(params["SlotLife"]) # Real interest rate per year #DiscountRate = float(params["DiscountRate"]) # Disk capacity increment per year #KryderRate = float(params["KryderRate"]) # Rack + server cost decrease per year SlotRate = float(params["SlotRate"]) if(SlotRate >= 1.0): SlotRate = SlotRate/100.0 # Labor + power cost increment per year LaborPowerRate = float(params["LaborPowerRate"]) if(LaborPowerRate >= 1.0): LaborPowerRate = LaborPowerRate/100.0 # Replication factor ReplicationFactor = float(params["ReplicationFactor"]) # Follow the history of a TB while(Year<Duration): # Incur this year's costs Cost = (SlotCostPerYear/SlotTeraByte) if(debug): print("cost: " + str(Cost) + '<br />') if((Year % DriveLife) == 0): # Time to replace drive and buy enough spares to # cover the (DriveFailRate*DriveLife) of each drive # that will fail in service. SpareFactor = DriveFailRate*DriveLife if(debug): print("Spare factor: " + str(SpareFactor) + '<br />') Cost += (DriveCost*(1 + SpareFactor)/SlotTeraByte) if(debug): print("buy disk: " + str(Cost) + '<br />') if((Year % SlotLife) == 0): # Time to replace rack Cost += (SlotCost/SlotTeraByte) if(debug): print("buy rack: " + str(Cost) + '<br />') # Deflate the cost by the discount rate Cost *= DiscountFactor if(debug): print("Discounted: " + str(Cost) + '<br />') # Account for replication Cost *= ReplicationFactor if(debug): print("Replicated: " + str(Cost) + '<br />') # Add to the endowment Endowment += Cost if(debug): print("Endowment: " + str(Endowment) + '<br />') # Adjust costs by the parameters SlotTeraByte *= (1.0+KryderRate) SlotCost *= (1.0-SlotRate) SlotCostPerYear *= (1.0+LaborPowerRate) DiscountFactor *= (1.0-DiscountRate) Year += 1 return Endowment try: if(len(sys.argv) > 1): inputData = json.loads(sys.argv[1]) tempDir = '/tmp/' wwwDir = tempDir else: postData = cgi.FieldStorage() inputData = parsePostData(postData) if(len(inputData) == 0): inputData = json.loads(Defaults) tempDir = '/tmp/' wwwDir = tempDir # Discount rate range x = np.arange(0.01, 0.11, 0.005) # Kryder rate range y = np.arange(0.05, 0.25, 0.01) X, Y = p.meshgrid(x, y) Z = EndowmentPerTB(X, Y) fig=p.figure() ax = p3.Axes3D(fig) surf = ax.plot_surface(X,Y,Z, rstride=1, cstride=1, cmap=cm.coolwarm) m = cm.ScalarMappable(cmap=cm.coolwarm) m.set_array(Z) cbar = plt.colorbar(m, shrink=0.5, aspect=5) #fig.colorbar(surf, shrink=0.5, aspect=5) ax.set_xlabel('DiscountRate') ax.set_ylabel('KryderRate') ax.set_zlabel('Endowment$') ax.invert_yaxis() f = tempfile.NamedTemporaryFile(dir=tempDir, suffix='.png', delete=False) fn = f.name[len(wwwDir):len(f.name)] plt.savefig(f.name) message = message + '<img src="' + fn + '">' print(message) except: e = sys.exc_info() try: message = message + cgitb.text(e) except AttributeError: message = message + "Got AttributeError: {}\n".format(e) print(message)
cgi-bin/graph-endowment.py
import json import sys import matplotlib matplotlib.use('Agg') import mpl_toolkits.mplot3d.axes3d as p3 import pylab as p import numpy as np from matplotlib import cm import matplotlib.pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas import requests import cgi import cgitb import re import tempfile cgitb.enable() message = 'Content-Type:text/html' + '\n\n' + '<h1>Endowment Graph</h1>\n<br /><br />' wwwDir = '/var/www/html' tempDir = wwwDir + '/images/' # Other defaults Duration=100 debug=False verbose=True Defaults = '{' + '"DriveCost": "250.0",' + '"SlotCost": "150.0",' + '"SlotTeraByte": "7.2",' + '"SlotCostPerYear": "100.0",' + '"DriveLife": "4",' + '"DriveFailRate": "2",' + '"SlotLife": "8",' + '"DiscountRate": "2",' + '"KryderRate": "10",' + '"SlotRate": "0.0",' + '"LaborPowerRate": "4",' + '"ReplicationFactor": "2"'+ '}' inputData = json.loads(Defaults) # Convert POST data to dictionary def parsePostData(postData): global message ret = {} for p in postData: ret[p] = postData[p].value if(verbose): message = message + "{0} = {1}<br />\n".format(p, ret[p]) return ret # Compute endowment for a given DiscountRate and KryderRate def EndowmentPerTB(x,y): # Real interest rate per year DiscountRate = x # Disk capacity increment per year KryderRate = y # Initial conditions Year=0 Endowment=0.0 DiscountFactor=1.0 if(debug): print("inputData: {}<br />\n".format(inputData)) params = inputData # Cost per drive (assumed constant, capacity increases) DriveCost = float(params["DriveCost"]) # Per-slot cost of rack/server/etc SlotCost = float(params["SlotCost"]) # Disk capacity in TB SlotTeraByte = float(params["SlotTeraByte"]) # Annual labor + power cost of a drive slot SlotCostPerYear = float(params["SlotCostPerYear"]) # Service life of drive in years DriveLife = int(params["DriveLife"]) # Annual proportion of drives that fail DriveFailRate = float(params["DriveFailRate"])/100.0 # Service life of rack in years SlotLife = int(params["SlotLife"]) # Real interest rate per year #DiscountRate = float(params["DiscountRate"]) # Disk capacity increment per year #KryderRate = float(params["KryderRate"]) # Rack + server cost decrease per year SlotRate = float(params["SlotRate"]) if(SlotRate >= 1.0): SlotRate = SlotRate/100.0 # Labor + power cost increment per year LaborPowerRate = float(params["LaborPowerRate"]) if(LaborPowerRate >= 1.0): LaborPowerRate = LaborPowerRate/100.0 # Replication factor ReplicationFactor = float(params["ReplicationFactor"]) # Follow the history of a TB while(Year<Duration): # Incur this year's costs Cost = (SlotCostPerYear/SlotTeraByte) if(debug): print("cost: " + str(Cost) + '<br />') if((Year % DriveLife) == 0): # Time to replace drive and buy enough spares to # cover the (DriveFailRate*DriveLife) of each drive # that will fail in service. SpareFactor = DriveFailRate*DriveLife if(debug): print("Spare factor: " + str(SpareFactor) + '<br />') Cost += (DriveCost*(1 + SpareFactor)/SlotTeraByte) if(debug): print("buy disk: " + str(Cost) + '<br />') if((Year % SlotLife) == 0): # Time to replace rack Cost += (SlotCost/SlotTeraByte) if(debug): print("buy rack: " + str(Cost) + '<br />') # Deflate the cost by the discount rate Cost *= DiscountFactor if(debug): print("Discounted: " + str(Cost) + '<br />') # Account for replication Cost *= ReplicationFactor if(debug): print("Replicated: " + str(Cost) + '<br />') # Add to the endowment Endowment += Cost if(debug): print("Endowment: " + str(Endowment) + '<br />') # Adjust costs by the parameters SlotTeraByte *= (1.0+KryderRate) SlotCost *= (1.0-SlotRate) SlotCostPerYear *= (1.0+LaborPowerRate) DiscountFactor *= (1.0-DiscountRate) Year += 1 return Endowment try: if(len(sys.argv) > 1): inputData = json.loads(sys.argv[1]) tempDir = '/tmp/' wwwDir = tempDir else: postData = cgi.FieldStorage() inputData = parsePostData(postData) if(len(inputData) == 0): inputData = json.loads(Defaults) tempDir = '/tmp/' wwwDir = tempDir # Discount rate range x = np.arange(0.01, 0.11, 0.005) # Kryder rate range y = np.arange(0.05, 0.25, 0.01) X, Y = p.meshgrid(x, y) Z = EndowmentPerTB(X, Y) fig=p.figure() ax = p3.Axes3D(fig) surf = ax.plot_surface(X,Y,Z, rstride=1, cstride=1, cmap=cm.coolwarm) m = cm.ScalarMappable(cmap=cm.coolwarm) m.set_array(Z) cbar = plt.colorbar(m, shrink=0.5, aspect=5) #fig.colorbar(surf, shrink=0.5, aspect=5) ax.set_xlabel('DiscountRate') ax.set_ylabel('KryderRate') ax.set_zlabel('Endowment$') ax.invert_yaxis() f = tempfile.NamedTemporaryFile(dir=tempDir, suffix='.png', delete=False) fn = f.name[len(wwwDir):len(f.name)] plt.savefig(f.name) message = message + '<img src="' + fn + '">' print(message) except: e = sys.exc_info() try: message = message + cgitb.text(e) except AttributeError: message = message + "Got AttributeError: {}\n".format(e) print(message)
0.263126
0.251475
import unittest from unittest import TestCase from math import isnan from pymatgen.core.periodic_table import Specie from matminer.utils.data import DemlData, MagpieData, PymatgenData, \ MixingEnthalpy, MatscholarElementData, MEGNetElementData, IUCrBondValenceData from pymatgen import Element class TestDemlData(TestCase): """Tests for the DemlData Class""" def setUp(self): self.data_source = DemlData() def test_get_property(self): self.assertAlmostEqual(-4.3853, self.data_source.get_elemental_property(Element("Bi"), "mus_fere"), 4) self.assertEqual(59600, self.data_source.get_elemental_property(Element("Li"), "electron_affin")) self.assertAlmostEqual(2372300, self.data_source.get_elemental_property(Element("He"), "first_ioniz")) self.assertAlmostEqual(sum([2372300,5250500]), self.data_source.get_charge_dependent_property_from_specie(Specie("He", 2), "total_ioniz")) self.assertAlmostEqual(18.6, self.data_source.get_charge_dependent_property_from_specie(Specie("V", 3), "xtal_field_split")) def test_get_oxidation(self): self.assertEqual([1], self.data_source.get_oxidation_states(Element("Li"))) class TestMagpieData(TestCase): def setUp(self): self.data_source = MagpieData() def test_get_property(self): self.assertAlmostEqual(9.012182, self.data_source.get_elemental_property(Element("Be"), "AtomicWeight")) def test_get_oxidation(self): self.assertEqual([-4, 2, 4], self.data_source.get_oxidation_states(Element("C"))) class TestPymatgenData(TestCase): def setUp(self): self.data_source = PymatgenData() def test_get_property(self): self.assertAlmostEqual(9.012182, self.data_source.get_elemental_property(Element("Be"), "atomic_mass")) self.assertAlmostEqual(1.26, self.data_source.get_charge_dependent_property(Element("Ac"), 3, "ionic_radii")) def test_get_oxidation(self): self.assertEqual((3,), self.data_source.get_oxidation_states(Element("Nd"))) self.data_source.use_common_oxi_states = False self.assertEqual((2, 3), self.data_source.get_oxidation_states(Element("Nd"))) class TestMatScholarData(TestCase): def setUp(self): self.data_source = MatscholarElementData() def test_get_property(self): embedding_cu = self.data_source.get_elemental_property(Element("Cu"), "embedding 3") self.assertAlmostEqual(0.028666902333498, embedding_cu) with self.assertRaises(ValueError): self.data_source.get_elemental_property(Element("Db"), "embedding 9") class TestMEGNetData(TestCase): def setUp(self): self.data_source= MEGNetElementData() def test_get_property(self): embedding_cu = self.data_source.get_elemental_property(Element("Cu"), "embedding 1") self.assertAlmostEqual(0.18259364366531372, embedding_cu) # MEGNet embeddings have element data for elements 1-94, plus 0 for # "dummy" atoms. embedding_md = self.data_source.get_elemental_property(Element("Md"), "embedding 1") self.assertAlmostEqual(-0.044910576194524765, embedding_md) embedding_dummy = self.data_source.all_element_data["Dummy"]["embedding 1"] self.assertAlmostEqual(-0.044910576194524765, embedding_dummy) class TestMixingEnthalpy(TestCase): def setUp(self): self.data = MixingEnthalpy() def test_get_data(self): self.assertEqual(-27, self.data.get_mixing_enthalpy(Element('H'), Element('Pd'))) self.assertEqual(-27, self.data.get_mixing_enthalpy(Element('Pd'), Element('H'))) self.assertTrue(isnan(self.data.get_mixing_enthalpy(Element('He'), Element('H')))) class TestIUCrBondValenceData(TestCase): def setUp(self): self.data = IUCrBondValenceData() def test_get_data(self): nacl = self.data.get_bv_params("Na", "Cl", 1, -1) self.assertAlmostEqual(nacl['Ro'], 2.15) if __name__ == "__main__": unittest.main()
matminer/utils/tests/test_data.py
import unittest from unittest import TestCase from math import isnan from pymatgen.core.periodic_table import Specie from matminer.utils.data import DemlData, MagpieData, PymatgenData, \ MixingEnthalpy, MatscholarElementData, MEGNetElementData, IUCrBondValenceData from pymatgen import Element class TestDemlData(TestCase): """Tests for the DemlData Class""" def setUp(self): self.data_source = DemlData() def test_get_property(self): self.assertAlmostEqual(-4.3853, self.data_source.get_elemental_property(Element("Bi"), "mus_fere"), 4) self.assertEqual(59600, self.data_source.get_elemental_property(Element("Li"), "electron_affin")) self.assertAlmostEqual(2372300, self.data_source.get_elemental_property(Element("He"), "first_ioniz")) self.assertAlmostEqual(sum([2372300,5250500]), self.data_source.get_charge_dependent_property_from_specie(Specie("He", 2), "total_ioniz")) self.assertAlmostEqual(18.6, self.data_source.get_charge_dependent_property_from_specie(Specie("V", 3), "xtal_field_split")) def test_get_oxidation(self): self.assertEqual([1], self.data_source.get_oxidation_states(Element("Li"))) class TestMagpieData(TestCase): def setUp(self): self.data_source = MagpieData() def test_get_property(self): self.assertAlmostEqual(9.012182, self.data_source.get_elemental_property(Element("Be"), "AtomicWeight")) def test_get_oxidation(self): self.assertEqual([-4, 2, 4], self.data_source.get_oxidation_states(Element("C"))) class TestPymatgenData(TestCase): def setUp(self): self.data_source = PymatgenData() def test_get_property(self): self.assertAlmostEqual(9.012182, self.data_source.get_elemental_property(Element("Be"), "atomic_mass")) self.assertAlmostEqual(1.26, self.data_source.get_charge_dependent_property(Element("Ac"), 3, "ionic_radii")) def test_get_oxidation(self): self.assertEqual((3,), self.data_source.get_oxidation_states(Element("Nd"))) self.data_source.use_common_oxi_states = False self.assertEqual((2, 3), self.data_source.get_oxidation_states(Element("Nd"))) class TestMatScholarData(TestCase): def setUp(self): self.data_source = MatscholarElementData() def test_get_property(self): embedding_cu = self.data_source.get_elemental_property(Element("Cu"), "embedding 3") self.assertAlmostEqual(0.028666902333498, embedding_cu) with self.assertRaises(ValueError): self.data_source.get_elemental_property(Element("Db"), "embedding 9") class TestMEGNetData(TestCase): def setUp(self): self.data_source= MEGNetElementData() def test_get_property(self): embedding_cu = self.data_source.get_elemental_property(Element("Cu"), "embedding 1") self.assertAlmostEqual(0.18259364366531372, embedding_cu) # MEGNet embeddings have element data for elements 1-94, plus 0 for # "dummy" atoms. embedding_md = self.data_source.get_elemental_property(Element("Md"), "embedding 1") self.assertAlmostEqual(-0.044910576194524765, embedding_md) embedding_dummy = self.data_source.all_element_data["Dummy"]["embedding 1"] self.assertAlmostEqual(-0.044910576194524765, embedding_dummy) class TestMixingEnthalpy(TestCase): def setUp(self): self.data = MixingEnthalpy() def test_get_data(self): self.assertEqual(-27, self.data.get_mixing_enthalpy(Element('H'), Element('Pd'))) self.assertEqual(-27, self.data.get_mixing_enthalpy(Element('Pd'), Element('H'))) self.assertTrue(isnan(self.data.get_mixing_enthalpy(Element('He'), Element('H')))) class TestIUCrBondValenceData(TestCase): def setUp(self): self.data = IUCrBondValenceData() def test_get_data(self): nacl = self.data.get_bv_params("Na", "Cl", 1, -1) self.assertAlmostEqual(nacl['Ro'], 2.15) if __name__ == "__main__": unittest.main()
0.74055
0.678653
import os import csv from sklearn.datasets import fetch_mldata from sklearn.cross_validation import train_test_split from sklearn.naive_bayes import MultinomialNB from DenoisingAutoencoder import DenoisingAutoencoder from StackedDenoisingAutoencoders import StackedDenoisingAutoencoders custom_data_home = os.path.join(os.path.split(__file__)[0], "data") mnist = fetch_mldata('MNIST original', data_home=custom_data_home) X, y = mnist.data / 255., mnist.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42) # Stacked AE test hidden_layer_one = [200,300,400] hidden_layer_two = [50 ,100,150,200] num_epochs = [1,2,3,4,5,6,7] num_noise = [0.1, 0.2, 0.3,0.4] for epoch in num_epochs: for h_one in hidden_layer_one: for h_two in (x for x in hidden_layer_two if x < h_one): for noise_level in num_noise: print "EPOCHS: %d " % epoch print "LAYER_1: %d " % h_one print "LAYER_2: %d " % h_two stacked_ae = StackedDenoisingAutoencoders(hidden_layers_sizes=[h_one, h_two], corruption_level=noise_level,verbose=True, training_epochs=epoch) stacked_ae.fit(X_train) X_train_latent = stacked_ae.transform_latent_representation(X_train) X_test_latent = stacked_ae.transform_latent_representation(X_test) clf = MultinomialNB() # Fit the model clf.fit(X_train_latent, y_train) # Perform the predictions y_predicted = clf.predict(X_test_latent) from sklearn.metrics import accuracy_score print "Accuracy = {} %".format(accuracy_score(y_test, y_predicted)*100) from sklearn.metrics import classification_report print "Classification Report \n {}".format(classification_report(y_test, y_predicted, labels=range(0,10))) with open('stackedNoiseAEs.csv', 'a') as csvfile: outputFile = csv.writer(csvfile, delimiter=',') outputFile.writerow([epoch, h_one, h_two, noise_level, accuracy_score(y_test, y_predicted)*100])
run_mnist_stacked_ae.py
import os import csv from sklearn.datasets import fetch_mldata from sklearn.cross_validation import train_test_split from sklearn.naive_bayes import MultinomialNB from DenoisingAutoencoder import DenoisingAutoencoder from StackedDenoisingAutoencoders import StackedDenoisingAutoencoders custom_data_home = os.path.join(os.path.split(__file__)[0], "data") mnist = fetch_mldata('MNIST original', data_home=custom_data_home) X, y = mnist.data / 255., mnist.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42) # Stacked AE test hidden_layer_one = [200,300,400] hidden_layer_two = [50 ,100,150,200] num_epochs = [1,2,3,4,5,6,7] num_noise = [0.1, 0.2, 0.3,0.4] for epoch in num_epochs: for h_one in hidden_layer_one: for h_two in (x for x in hidden_layer_two if x < h_one): for noise_level in num_noise: print "EPOCHS: %d " % epoch print "LAYER_1: %d " % h_one print "LAYER_2: %d " % h_two stacked_ae = StackedDenoisingAutoencoders(hidden_layers_sizes=[h_one, h_two], corruption_level=noise_level,verbose=True, training_epochs=epoch) stacked_ae.fit(X_train) X_train_latent = stacked_ae.transform_latent_representation(X_train) X_test_latent = stacked_ae.transform_latent_representation(X_test) clf = MultinomialNB() # Fit the model clf.fit(X_train_latent, y_train) # Perform the predictions y_predicted = clf.predict(X_test_latent) from sklearn.metrics import accuracy_score print "Accuracy = {} %".format(accuracy_score(y_test, y_predicted)*100) from sklearn.metrics import classification_report print "Classification Report \n {}".format(classification_report(y_test, y_predicted, labels=range(0,10))) with open('stackedNoiseAEs.csv', 'a') as csvfile: outputFile = csv.writer(csvfile, delimiter=',') outputFile.writerow([epoch, h_one, h_two, noise_level, accuracy_score(y_test, y_predicted)*100])
0.458349
0.385375
import mock """ result module - Unit tests for Result class """ import unittest from cloudant.error import ResultException from cloudant.result import Result, ResultByKey from cloudant.view import View from nose.plugins.attrib import attr from requests.exceptions import HTTPError from .unit_t_db_base import UnitTestDbBase class ResultExceptionTests(unittest.TestCase): """ Ensure ResultException functions as expected. """ def test_raise_without_code(self): """ Ensure that a default exception/code is used if none is provided. """ with self.assertRaises(ResultException) as cm: raise ResultException() self.assertEqual(cm.exception.status_code, 100) def test_raise_using_invalid_code(self): """ Ensure that a default exception/code is used if invalid code is provided. """ with self.assertRaises(ResultException) as cm: raise ResultException('foo') self.assertEqual(cm.exception.status_code, 100) def test_raise_without_args(self): """ Ensure that a default exception/code is used if the message requested by the code provided requires an argument list and none is provided. """ with self.assertRaises(ResultException) as cm: raise ResultException(101) self.assertEqual(cm.exception.status_code, 100) def test_raise_without_insufficient_args(self): """ Ensure that a default exception/code is used if the message requested by the code provided requires an argument list but the one provided does not contain the correct amount of arguments. """ with self.assertRaises(ResultException) as cm: raise ResultException(102, 'foo') self.assertEqual(cm.exception.status_code, 100) def test_raise_with_proper_code_and_args(self): """ Ensure that the requested exception is raised. """ with self.assertRaises(ResultException) as cm: raise ResultException(102, 'foo', 'bar') self.assertEqual(cm.exception.status_code, 102) @attr(db=['cloudant','couch']) class ResultTests(UnitTestDbBase): """ Result unit tests """ def setUp(self): """ Set up test attributes """ super(ResultTests, self).setUp() self.db_set_up() self.populate_db_with_documents() self.create_views() def tearDown(self): """ Reset test attributes """ self.db_tear_down() super(ResultTests, self).tearDown() def test_constructor(self): """ Test instantiating a Result """ result = Result( self.ddoc.get_view('view001'), startkey='1', endkey='9', page_size=1000 ) self.assertIsInstance(result, Result) self.assertDictEqual(result.options, {'startkey': '1', 'endkey': '9'}) def test_get_item_by_index(self): """ Test retrieving a result using a value that refers to an index of the result. """ result = Result(self.view001) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result[10], expected) expected = [{'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[99], expected) self.assertEqual(result[100], []) self.assertEqual(result[110], []) def test_get_item_by_index_using_skip_limit(self): """ Test retrieving a result using a value that refers to an index of the result when the result uses skip and limit. """ result = Result(self.view001, skip=10, limit=10) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia015', 'id': 'julia015', 'value': 1}] self.assertEqual(result[5], expected) expected = [{'key': 'julia019', 'id': 'julia019', 'value': 1}] self.assertEqual(result[9], expected) self.assertEqual(result[10], []) self.assertEqual(result[20], []) def test_get_item_by_index_using_limit(self): """ Test retrieving a result using a value that refers to an index of the result when the result uses limit. """ result = Result(self.view001, limit=10) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia005', 'id': 'julia005', 'value': 1}] self.assertEqual(result[5], expected) expected = [{'key': 'julia009', 'id': 'julia009', 'value': 1}] self.assertEqual(result[9], expected) self.assertEqual(result[10], []) self.assertEqual(result[20], []) def test_get_item_by_index_using_skip(self): """ Test retrieving a result using a value that refers to an index of the result when the result uses limit. """ result = Result(self.view001, skip=10) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia015', 'id': 'julia015', 'value': 1}] self.assertEqual(result[5], expected) expected = [{'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[89], expected) self.assertEqual(result[90], []) self.assertEqual(result[100], []) def test_get_item_by_negative_index(self): """ Test retrieving a result raises an exception when using a negative index. """ result = Result(self.view001) with self.assertRaises(ResultException) as cm: invalid_result = result[-1] self.assertEqual(cm.exception.status_code, 101) def test_get_item_by_key_using_invalid_options(self): """ Since the __getitem__ method uses the 'key' parameter to retrieve the specified data using a Result, any Result that uses any of 'key', 'keys', 'startkey' or 'endkey' as arguments would yield unexpected results. For this reason a check was added to ensure that these options are not used in this case. This test verifies that check. """ options = ('key', 'keys', 'startkey', 'endkey') for option in options: result = Result(self.view001, **{option: 'julia010'}) with self.assertRaises(ResultException) as cm: invalid_result = result['julia000'] self.assertEqual(cm.exception.status_code, 102) def test_get_item_by_key(self): """ Test retrieving a result using value that refers to a key of the result. """ result = Result(self.view001) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result['julia010'], expected) self.assertEqual(result[ResultByKey('julia010')], expected) def test_get_item_by_missing_key(self): """ Test retrieving a result using value that refers to a key that does not exist in the result. """ result = Result(self.view001) self.assertEqual(result['ruby010'], []) self.assertEqual(result[ResultByKey('ruby010')], []) def test_get_item_by_complex_key(self): """ Test retrieving a result using value that refers to a complex key of the result. """ result = Result(self.view005) expected = [{'key': ['julia', 10], 'id': 'julia010', 'value': 1}] self.assertEqual(result[['julia', 10]], expected) self.assertEqual(result[ResultByKey(['julia', 10])], expected) def test_get_item_by_integer_key(self): """ Test retrieving a result using an integer value that refers to a key of the result. """ result = Result(self.view003) expected = [{'key': 10, 'id': 'julia020', 'value': 1}, {'key': 10, 'id': 'julia021', 'value': 1}] self.assertEqual(result[ResultByKey(10)], expected) def test_get_item_by_missing_integer_key(self): """ Test retrieving a result using an integer value that refers to a key that does not exist in the result. """ result = Result(self.view003) self.assertEqual(result[ResultByKey(99)], []) def test_get_item_slice_no_start_no_stop(self): """ Test that by not providing a start and a stop slice value, the entire result is returned. """ result = Result(self.view001, limit=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual(result[:], expected) def test_get_all_items(self): """ Test that all results can be retrieved. """ result = Result(self.view001, limit=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual(result.all(), expected) def test_get_item_invalid_index_slice(self): """ Test that when invalid start and stop values are provided in a slice an exception is raised. """ result = Result(self.view001) with self.assertRaises(ResultException) as cm: invalid_result = result[-1: 10] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[1: -10] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[-1: -10] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[2: 2] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[5: 2] self.assertEqual(cm.exception.status_code, 101) def test_get_item_index_slice_using_start_stop(self): """ Test getting an index slice by using start and stop slice values. """ result = Result(self.view001) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[98:100], expected) self.assertEqual(result[98:102], expected) self.assertEqual(result[100:102], []) result = Result(self.view001, limit=20) expected = [{'key': 'julia018', 'id': 'julia018', 'value': 1}, {'key': 'julia019', 'id': 'julia019', 'value': 1}] self.assertEqual(result[18:20], expected) self.assertEqual(result[18:22], expected) self.assertEqual(result[20:22], []) result = Result(self.view001, skip=98) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[0:2], expected) self.assertEqual(result[0:4], expected) self.assertEqual(result[2:4], []) result = Result(self.view001, limit=20, skip=20) expected = [{'key': 'julia038', 'id': 'julia038', 'value': 1}, {'key': 'julia039', 'id': 'julia039', 'value': 1}] self.assertEqual(result[18:20], expected) self.assertEqual(result[18:22], expected) self.assertEqual(result[20:22], []) def test_get_item_index_slice_using_start_only(self): """ Test getting an index slice by using start slice value only. """ result = Result(self.view001) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[98:], expected) self.assertEqual(result[100:], []) result = Result(self.view001, limit=20) expected = [{'key': 'julia018', 'id': 'julia018', 'value': 1}, {'key': 'julia019', 'id': 'julia019', 'value': 1}] self.assertEqual(result[18:], expected) self.assertEqual(result[20:], []) result = Result(self.view001, skip=98) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[0:], expected) self.assertEqual(result[2:], []) result = Result(self.view001, limit=20, skip=20) expected = [{'key': 'julia038', 'id': 'julia038', 'value': 1}, {'key': 'julia039', 'id': 'julia039', 'value': 1}] self.assertEqual(result[18:], expected) self.assertEqual(result[20:], []) def test_get_item_index_slice_using_stop_only(self): """ Test getting an index slice by using stop slice value only. """ result = Result(self.view001) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}] self.assertEqual(result[:2], expected) expected = [{'key': 'julia{0:03d}'.format(x), 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] self.assertEqual(result[:102], expected) result = Result(self.view001, limit=20) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}] self.assertEqual(result[:2], expected) expected = [{'key': 'julia{0:03d}'.format(x), 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(20)] self.assertEqual(result[:22], expected) result = Result(self.view001, skip=98) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[:2], expected) self.assertEqual(result[:4], expected) result = Result(self.view001, limit=2, skip=20) expected = [{'key': 'julia020', 'id': 'julia020', 'value': 1}, {'key': 'julia021', 'id': 'julia021', 'value': 1}] self.assertEqual(result[:2], expected) self.assertEqual(result[:4], expected) def test_get_item_key_slice_using_invalid_options(self): """ Test that when "key" and/or "keys" are used in the result an exception is raised. """ result = Result(self.view001, key='foo') with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) result = Result(self.view001, keys=['foo', 'bar']) with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) result = Result(self.view001, startkey='foo') with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) result = Result(self.view001, endkey='foo') with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) def test_get_item_invalid_key_slice(self): """ Test that when invalid start and stop values are provided in a slice an exception is raised. Specifically this happens when the slice start and stop are different types. """ result = Result(self.view001) with self.assertRaises(ResultException) as cm: invalid_result = result['foo': ['bar', 'baz']] self.assertEqual(cm.exception.status_code, 101) ten = ResultByKey(10) with self.assertRaises(ResultException) as cm: invalid_result = result['foo': ten] self.assertEqual(cm.exception.status_code, 101) def test_get_item_key_slice_using_start_stop(self): """ Test getting a key slice by using start and stop slice values. """ result = Result(self.view001) expected = [{'key': 'julia097', 'id': 'julia097', 'value': 1}, {'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result['julia097': 'julia099'], expected) self.assertEqual( result[ResultByKey('julia097'): ResultByKey('julia099')], expected ) self.assertEqual(result['julia097': 'ruby'], expected) self.assertEqual( result['julia098': 'julia098'], [{'key': 'julia098', 'id': 'julia098', 'value': 1}] ) self.assertEqual(result['bar': 'foo'], []) result = Result(self.view003) expected = [{'key': 47, 'id': 'julia094', 'value': 1}, {'key': 47, 'id': 'julia095', 'value': 1}, {'key': 48, 'id': 'julia096', 'value': 1}, {'key': 48, 'id': 'julia097', 'value': 1}, {'key': 49, 'id': 'julia098', 'value': 1}, {'key': 49, 'id': 'julia099', 'value': 1}] self.assertEqual(result[ResultByKey(47): ResultByKey(49)], expected) self.assertEqual(result[ResultByKey(47): ResultByKey(52)], expected) self.assertEqual( result[ResultByKey(48): ResultByKey(48)], [{'key': 48, 'id': 'julia096', 'value': 1}, {'key': 48, 'id': 'julia097', 'value': 1}] ) self.assertEqual(result[ResultByKey(52): ResultByKey(54)], []) result = Result(self.view005) expected = [{'key': ['julia', 97], 'id': 'julia097', 'value': 1}, {'key': ['julia', 98], 'id': 'julia098', 'value': 1}, {'key': ['julia', 99], 'id': 'julia099', 'value': 1}] self.assertEqual(result[['julia', 97]: ['julia', 99]], expected) self.assertEqual( result[ResultByKey(['julia', 97]): ResultByKey(['julia', 99])], expected ) self.assertEqual(result[['julia', 97]: ['ruby', 97]], expected) self.assertEqual( result[['julia', 98]: ['julia', 98]], [{'key': ['julia', 98], 'id': 'julia098', 'value': 1}] ) self.assertEqual(result[['ruby', 'bar']: ['ruby', 'foo']], []) def test_get_item_key_slice_start_greater_than_stop(self): """ Test getting a key slice by using start value greater than stop value. The behavior when using CouchDB and newer versions of Cloudant is to return an HTTP 400 Bad Request. """ result = Result(self.view001) with self.assertRaises(HTTPError) as cm: invalid_result = result['foo': 'bar'] self.assertTrue( str(cm.exception).startswith('400 Client Error: Bad Request')) def test_get_item_key_slice_using_start_only(self): """ Test getting a key slice by using the start slice value only. """ result = Result(self.view001) expected = [{'key': 'julia097', 'id': 'julia097', 'value': 1}, {'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result['julia097':], expected) self.assertEqual(result[ResultByKey('julia097'):], expected) self.assertEqual(result['ruby':], []) result = Result(self.view003) expected = [{'key': 47, 'id': 'julia094', 'value': 1}, {'key': 47, 'id': 'julia095', 'value': 1}, {'key': 48, 'id': 'julia096', 'value': 1}, {'key': 48, 'id': 'julia097', 'value': 1}, {'key': 49, 'id': 'julia098', 'value': 1}, {'key': 49, 'id': 'julia099', 'value': 1}] self.assertEqual(result[ResultByKey(47):], expected) self.assertEqual(result[ResultByKey(52):], []) result = Result(self.view005) expected = [{'key': ['julia', 97], 'id': 'julia097', 'value': 1}, {'key': ['julia', 98], 'id': 'julia098', 'value': 1}, {'key': ['julia', 99], 'id': 'julia099', 'value': 1}] self.assertEqual(result[['julia', 97]:], expected) self.assertEqual(result[ResultByKey(['julia', 97]):], expected) self.assertEqual(result[ResultByKey(['ruby', 'foo']):], []) def test_get_item_key_slice_using_stop_only(self): """ Test getting a key slice by using the stop slice value only. """ result = Result(self.view001) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual(result[:'julia002'], expected) self.assertEqual(result[:ResultByKey('julia002')], expected) self.assertEqual( result[:'ruby'], [{'key': 'julia{0:03d}'.format(x), 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] ) self.assertEqual(result[:'foo'], []) result = Result(self.view003) expected = [{'key': 0, 'id': 'julia000', 'value': 1}, {'key': 0, 'id': 'julia001', 'value': 1}, {'key': 1, 'id': 'julia002', 'value': 1}, {'key': 1, 'id': 'julia003', 'value': 1}, {'key': 2, 'id': 'julia004', 'value': 1}, {'key': 2, 'id': 'julia005', 'value': 1}] self.assertEqual(result[:ResultByKey(2)], expected) self.assertEqual( result[:ResultByKey(51)], [{'key': x // 2, 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] ) self.assertEqual(result[:ResultByKey(-10)], []) result = Result(self.view005) expected = [{'key': ['julia', 0], 'id': 'julia000', 'value': 1}, {'key': ['julia', 1], 'id': 'julia001', 'value': 1}, {'key': ['julia', 2], 'id': 'julia002', 'value': 1}] self.assertEqual(result[:['julia', 2]], expected) self.assertEqual(result[:ResultByKey(['julia', 2])], expected) self.assertEqual( result[:['julia', 102]], [{'key': ['julia', x], 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] ) self.assertEqual(result[:ResultByKey(['foo', 'bar'])], []) def test_iteration_with_invalid_options(self): """ Test that iteration raises an exception when "limit" is used as option for the result. """ result = Result(self.view001, limit=10) with self.assertRaises(ResultException) as cm: invalid_result = [row for row in result] self.assertEqual(cm.exception.status_code, 103) def test_iteration_invalid_page_size(self): """ Test that iteration raises an exception when and invalid "page_size" is is used as an option for the result. """ result = Result(self.view001, page_size=-1) with self.assertRaises(ResultException) as cm: invalid_result = [row for row in result] self.assertEqual(cm.exception.status_code, 104) result = Result(self.view001, page_size='foo') with self.assertRaises(ResultException) as cm: invalid_result = [row for row in result] self.assertEqual(cm.exception.status_code, 104) def test_iteration_using_valid_page_size(self): """ Test that iteration works as expected when "page_size" is provided as an option for the result. """ result = Result(self.view001, endkey='julia004', page_size=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}, {'key': 'julia003', 'id': 'julia003', 'value': 1}, {'key': 'julia004', 'id': 'julia004', 'value': 1}] self.assertEqual([x for x in result], expected) result = Result(self.view001, endkey='julia004', page_size='3') self.assertEqual([x for x in result], expected) result = Result(self.view001, endkey='julia002', page_size=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual([x for x in result], expected) result = Result(self.view001, endkey='julia001', page_size=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}] self.assertEqual([x for x in result], expected) def test_iteration_using_default_page_size(self): """ Test that iteration works as expected when "page_size" is not provided as an option for the result. """ result = Result(self.view001, endkey='julia004') expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}, {'key': 'julia003', 'id': 'julia003', 'value': 1}, {'key': 'julia004', 'id': 'julia004', 'value': 1}] self.assertEqual([x for x in result], expected) def test_iteration_no_data(self): """ Test that iteration works as expected when no data matches the result. """ result = Result(self.view001, startkey='ruby') self.assertEqual([x for x in result], []) def test_iteration_integer_keys(self): """ Test that iteration works as expected when keys are integer. """ result = Result(self.view007, page_size=10) self.assertEqual(len([x for x in result]), 100) def test_iteration_pagination(self): """ Test that iteration pagination works as expected. """ class CallMock: expected_calls = [ {'limit': 28}, {'limit': 28, 'startkey': 1, 'startkey_docid': 'julia027'}, {'limit': 28, 'startkey': 1, 'startkey_docid': 'julia054'}, {'limit': 28, 'startkey': 1, 'startkey_docid': 'julia081'}, ] def __init__(self, outer): self.outer = outer self.expected_calls.reverse() def call(self, *args, **kwargs): self.outer.assertEqual(dict(kwargs), self.expected_calls.pop(), 'pagination error') return View.__call__(self.outer.view007, *args, **kwargs) with mock.patch.object(self, 'view007', CallMock(self).call) as _: result = Result(self.view007, page_size=27) expected = [ {'id': 'julia{0:03d}'.format(i), 'key': 1, 'value': 'julia'} for i in range(100) ] self.assertEqual([x for x in result], expected) if __name__ == '__main__': unittest.main()
tests/unit/result_tests.py
import mock """ result module - Unit tests for Result class """ import unittest from cloudant.error import ResultException from cloudant.result import Result, ResultByKey from cloudant.view import View from nose.plugins.attrib import attr from requests.exceptions import HTTPError from .unit_t_db_base import UnitTestDbBase class ResultExceptionTests(unittest.TestCase): """ Ensure ResultException functions as expected. """ def test_raise_without_code(self): """ Ensure that a default exception/code is used if none is provided. """ with self.assertRaises(ResultException) as cm: raise ResultException() self.assertEqual(cm.exception.status_code, 100) def test_raise_using_invalid_code(self): """ Ensure that a default exception/code is used if invalid code is provided. """ with self.assertRaises(ResultException) as cm: raise ResultException('foo') self.assertEqual(cm.exception.status_code, 100) def test_raise_without_args(self): """ Ensure that a default exception/code is used if the message requested by the code provided requires an argument list and none is provided. """ with self.assertRaises(ResultException) as cm: raise ResultException(101) self.assertEqual(cm.exception.status_code, 100) def test_raise_without_insufficient_args(self): """ Ensure that a default exception/code is used if the message requested by the code provided requires an argument list but the one provided does not contain the correct amount of arguments. """ with self.assertRaises(ResultException) as cm: raise ResultException(102, 'foo') self.assertEqual(cm.exception.status_code, 100) def test_raise_with_proper_code_and_args(self): """ Ensure that the requested exception is raised. """ with self.assertRaises(ResultException) as cm: raise ResultException(102, 'foo', 'bar') self.assertEqual(cm.exception.status_code, 102) @attr(db=['cloudant','couch']) class ResultTests(UnitTestDbBase): """ Result unit tests """ def setUp(self): """ Set up test attributes """ super(ResultTests, self).setUp() self.db_set_up() self.populate_db_with_documents() self.create_views() def tearDown(self): """ Reset test attributes """ self.db_tear_down() super(ResultTests, self).tearDown() def test_constructor(self): """ Test instantiating a Result """ result = Result( self.ddoc.get_view('view001'), startkey='1', endkey='9', page_size=1000 ) self.assertIsInstance(result, Result) self.assertDictEqual(result.options, {'startkey': '1', 'endkey': '9'}) def test_get_item_by_index(self): """ Test retrieving a result using a value that refers to an index of the result. """ result = Result(self.view001) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result[10], expected) expected = [{'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[99], expected) self.assertEqual(result[100], []) self.assertEqual(result[110], []) def test_get_item_by_index_using_skip_limit(self): """ Test retrieving a result using a value that refers to an index of the result when the result uses skip and limit. """ result = Result(self.view001, skip=10, limit=10) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia015', 'id': 'julia015', 'value': 1}] self.assertEqual(result[5], expected) expected = [{'key': 'julia019', 'id': 'julia019', 'value': 1}] self.assertEqual(result[9], expected) self.assertEqual(result[10], []) self.assertEqual(result[20], []) def test_get_item_by_index_using_limit(self): """ Test retrieving a result using a value that refers to an index of the result when the result uses limit. """ result = Result(self.view001, limit=10) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia005', 'id': 'julia005', 'value': 1}] self.assertEqual(result[5], expected) expected = [{'key': 'julia009', 'id': 'julia009', 'value': 1}] self.assertEqual(result[9], expected) self.assertEqual(result[10], []) self.assertEqual(result[20], []) def test_get_item_by_index_using_skip(self): """ Test retrieving a result using a value that refers to an index of the result when the result uses limit. """ result = Result(self.view001, skip=10) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result[0], expected) expected = [{'key': 'julia015', 'id': 'julia015', 'value': 1}] self.assertEqual(result[5], expected) expected = [{'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[89], expected) self.assertEqual(result[90], []) self.assertEqual(result[100], []) def test_get_item_by_negative_index(self): """ Test retrieving a result raises an exception when using a negative index. """ result = Result(self.view001) with self.assertRaises(ResultException) as cm: invalid_result = result[-1] self.assertEqual(cm.exception.status_code, 101) def test_get_item_by_key_using_invalid_options(self): """ Since the __getitem__ method uses the 'key' parameter to retrieve the specified data using a Result, any Result that uses any of 'key', 'keys', 'startkey' or 'endkey' as arguments would yield unexpected results. For this reason a check was added to ensure that these options are not used in this case. This test verifies that check. """ options = ('key', 'keys', 'startkey', 'endkey') for option in options: result = Result(self.view001, **{option: 'julia010'}) with self.assertRaises(ResultException) as cm: invalid_result = result['julia000'] self.assertEqual(cm.exception.status_code, 102) def test_get_item_by_key(self): """ Test retrieving a result using value that refers to a key of the result. """ result = Result(self.view001) expected = [{'key': 'julia010', 'id': 'julia010', 'value': 1}] self.assertEqual(result['julia010'], expected) self.assertEqual(result[ResultByKey('julia010')], expected) def test_get_item_by_missing_key(self): """ Test retrieving a result using value that refers to a key that does not exist in the result. """ result = Result(self.view001) self.assertEqual(result['ruby010'], []) self.assertEqual(result[ResultByKey('ruby010')], []) def test_get_item_by_complex_key(self): """ Test retrieving a result using value that refers to a complex key of the result. """ result = Result(self.view005) expected = [{'key': ['julia', 10], 'id': 'julia010', 'value': 1}] self.assertEqual(result[['julia', 10]], expected) self.assertEqual(result[ResultByKey(['julia', 10])], expected) def test_get_item_by_integer_key(self): """ Test retrieving a result using an integer value that refers to a key of the result. """ result = Result(self.view003) expected = [{'key': 10, 'id': 'julia020', 'value': 1}, {'key': 10, 'id': 'julia021', 'value': 1}] self.assertEqual(result[ResultByKey(10)], expected) def test_get_item_by_missing_integer_key(self): """ Test retrieving a result using an integer value that refers to a key that does not exist in the result. """ result = Result(self.view003) self.assertEqual(result[ResultByKey(99)], []) def test_get_item_slice_no_start_no_stop(self): """ Test that by not providing a start and a stop slice value, the entire result is returned. """ result = Result(self.view001, limit=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual(result[:], expected) def test_get_all_items(self): """ Test that all results can be retrieved. """ result = Result(self.view001, limit=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual(result.all(), expected) def test_get_item_invalid_index_slice(self): """ Test that when invalid start and stop values are provided in a slice an exception is raised. """ result = Result(self.view001) with self.assertRaises(ResultException) as cm: invalid_result = result[-1: 10] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[1: -10] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[-1: -10] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[2: 2] self.assertEqual(cm.exception.status_code, 101) with self.assertRaises(ResultException) as cm: invalid_result = result[5: 2] self.assertEqual(cm.exception.status_code, 101) def test_get_item_index_slice_using_start_stop(self): """ Test getting an index slice by using start and stop slice values. """ result = Result(self.view001) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[98:100], expected) self.assertEqual(result[98:102], expected) self.assertEqual(result[100:102], []) result = Result(self.view001, limit=20) expected = [{'key': 'julia018', 'id': 'julia018', 'value': 1}, {'key': 'julia019', 'id': 'julia019', 'value': 1}] self.assertEqual(result[18:20], expected) self.assertEqual(result[18:22], expected) self.assertEqual(result[20:22], []) result = Result(self.view001, skip=98) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[0:2], expected) self.assertEqual(result[0:4], expected) self.assertEqual(result[2:4], []) result = Result(self.view001, limit=20, skip=20) expected = [{'key': 'julia038', 'id': 'julia038', 'value': 1}, {'key': 'julia039', 'id': 'julia039', 'value': 1}] self.assertEqual(result[18:20], expected) self.assertEqual(result[18:22], expected) self.assertEqual(result[20:22], []) def test_get_item_index_slice_using_start_only(self): """ Test getting an index slice by using start slice value only. """ result = Result(self.view001) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[98:], expected) self.assertEqual(result[100:], []) result = Result(self.view001, limit=20) expected = [{'key': 'julia018', 'id': 'julia018', 'value': 1}, {'key': 'julia019', 'id': 'julia019', 'value': 1}] self.assertEqual(result[18:], expected) self.assertEqual(result[20:], []) result = Result(self.view001, skip=98) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[0:], expected) self.assertEqual(result[2:], []) result = Result(self.view001, limit=20, skip=20) expected = [{'key': 'julia038', 'id': 'julia038', 'value': 1}, {'key': 'julia039', 'id': 'julia039', 'value': 1}] self.assertEqual(result[18:], expected) self.assertEqual(result[20:], []) def test_get_item_index_slice_using_stop_only(self): """ Test getting an index slice by using stop slice value only. """ result = Result(self.view001) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}] self.assertEqual(result[:2], expected) expected = [{'key': 'julia{0:03d}'.format(x), 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] self.assertEqual(result[:102], expected) result = Result(self.view001, limit=20) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}] self.assertEqual(result[:2], expected) expected = [{'key': 'julia{0:03d}'.format(x), 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(20)] self.assertEqual(result[:22], expected) result = Result(self.view001, skip=98) expected = [{'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result[:2], expected) self.assertEqual(result[:4], expected) result = Result(self.view001, limit=2, skip=20) expected = [{'key': 'julia020', 'id': 'julia020', 'value': 1}, {'key': 'julia021', 'id': 'julia021', 'value': 1}] self.assertEqual(result[:2], expected) self.assertEqual(result[:4], expected) def test_get_item_key_slice_using_invalid_options(self): """ Test that when "key" and/or "keys" are used in the result an exception is raised. """ result = Result(self.view001, key='foo') with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) result = Result(self.view001, keys=['foo', 'bar']) with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) result = Result(self.view001, startkey='foo') with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) result = Result(self.view001, endkey='foo') with self.assertRaises(ResultException) as cm: invalid_result = result['foo':] self.assertEqual(cm.exception.status_code, 102) def test_get_item_invalid_key_slice(self): """ Test that when invalid start and stop values are provided in a slice an exception is raised. Specifically this happens when the slice start and stop are different types. """ result = Result(self.view001) with self.assertRaises(ResultException) as cm: invalid_result = result['foo': ['bar', 'baz']] self.assertEqual(cm.exception.status_code, 101) ten = ResultByKey(10) with self.assertRaises(ResultException) as cm: invalid_result = result['foo': ten] self.assertEqual(cm.exception.status_code, 101) def test_get_item_key_slice_using_start_stop(self): """ Test getting a key slice by using start and stop slice values. """ result = Result(self.view001) expected = [{'key': 'julia097', 'id': 'julia097', 'value': 1}, {'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result['julia097': 'julia099'], expected) self.assertEqual( result[ResultByKey('julia097'): ResultByKey('julia099')], expected ) self.assertEqual(result['julia097': 'ruby'], expected) self.assertEqual( result['julia098': 'julia098'], [{'key': 'julia098', 'id': 'julia098', 'value': 1}] ) self.assertEqual(result['bar': 'foo'], []) result = Result(self.view003) expected = [{'key': 47, 'id': 'julia094', 'value': 1}, {'key': 47, 'id': 'julia095', 'value': 1}, {'key': 48, 'id': 'julia096', 'value': 1}, {'key': 48, 'id': 'julia097', 'value': 1}, {'key': 49, 'id': 'julia098', 'value': 1}, {'key': 49, 'id': 'julia099', 'value': 1}] self.assertEqual(result[ResultByKey(47): ResultByKey(49)], expected) self.assertEqual(result[ResultByKey(47): ResultByKey(52)], expected) self.assertEqual( result[ResultByKey(48): ResultByKey(48)], [{'key': 48, 'id': 'julia096', 'value': 1}, {'key': 48, 'id': 'julia097', 'value': 1}] ) self.assertEqual(result[ResultByKey(52): ResultByKey(54)], []) result = Result(self.view005) expected = [{'key': ['julia', 97], 'id': 'julia097', 'value': 1}, {'key': ['julia', 98], 'id': 'julia098', 'value': 1}, {'key': ['julia', 99], 'id': 'julia099', 'value': 1}] self.assertEqual(result[['julia', 97]: ['julia', 99]], expected) self.assertEqual( result[ResultByKey(['julia', 97]): ResultByKey(['julia', 99])], expected ) self.assertEqual(result[['julia', 97]: ['ruby', 97]], expected) self.assertEqual( result[['julia', 98]: ['julia', 98]], [{'key': ['julia', 98], 'id': 'julia098', 'value': 1}] ) self.assertEqual(result[['ruby', 'bar']: ['ruby', 'foo']], []) def test_get_item_key_slice_start_greater_than_stop(self): """ Test getting a key slice by using start value greater than stop value. The behavior when using CouchDB and newer versions of Cloudant is to return an HTTP 400 Bad Request. """ result = Result(self.view001) with self.assertRaises(HTTPError) as cm: invalid_result = result['foo': 'bar'] self.assertTrue( str(cm.exception).startswith('400 Client Error: Bad Request')) def test_get_item_key_slice_using_start_only(self): """ Test getting a key slice by using the start slice value only. """ result = Result(self.view001) expected = [{'key': 'julia097', 'id': 'julia097', 'value': 1}, {'key': 'julia098', 'id': 'julia098', 'value': 1}, {'key': 'julia099', 'id': 'julia099', 'value': 1}] self.assertEqual(result['julia097':], expected) self.assertEqual(result[ResultByKey('julia097'):], expected) self.assertEqual(result['ruby':], []) result = Result(self.view003) expected = [{'key': 47, 'id': 'julia094', 'value': 1}, {'key': 47, 'id': 'julia095', 'value': 1}, {'key': 48, 'id': 'julia096', 'value': 1}, {'key': 48, 'id': 'julia097', 'value': 1}, {'key': 49, 'id': 'julia098', 'value': 1}, {'key': 49, 'id': 'julia099', 'value': 1}] self.assertEqual(result[ResultByKey(47):], expected) self.assertEqual(result[ResultByKey(52):], []) result = Result(self.view005) expected = [{'key': ['julia', 97], 'id': 'julia097', 'value': 1}, {'key': ['julia', 98], 'id': 'julia098', 'value': 1}, {'key': ['julia', 99], 'id': 'julia099', 'value': 1}] self.assertEqual(result[['julia', 97]:], expected) self.assertEqual(result[ResultByKey(['julia', 97]):], expected) self.assertEqual(result[ResultByKey(['ruby', 'foo']):], []) def test_get_item_key_slice_using_stop_only(self): """ Test getting a key slice by using the stop slice value only. """ result = Result(self.view001) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual(result[:'julia002'], expected) self.assertEqual(result[:ResultByKey('julia002')], expected) self.assertEqual( result[:'ruby'], [{'key': 'julia{0:03d}'.format(x), 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] ) self.assertEqual(result[:'foo'], []) result = Result(self.view003) expected = [{'key': 0, 'id': 'julia000', 'value': 1}, {'key': 0, 'id': 'julia001', 'value': 1}, {'key': 1, 'id': 'julia002', 'value': 1}, {'key': 1, 'id': 'julia003', 'value': 1}, {'key': 2, 'id': 'julia004', 'value': 1}, {'key': 2, 'id': 'julia005', 'value': 1}] self.assertEqual(result[:ResultByKey(2)], expected) self.assertEqual( result[:ResultByKey(51)], [{'key': x // 2, 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] ) self.assertEqual(result[:ResultByKey(-10)], []) result = Result(self.view005) expected = [{'key': ['julia', 0], 'id': 'julia000', 'value': 1}, {'key': ['julia', 1], 'id': 'julia001', 'value': 1}, {'key': ['julia', 2], 'id': 'julia002', 'value': 1}] self.assertEqual(result[:['julia', 2]], expected) self.assertEqual(result[:ResultByKey(['julia', 2])], expected) self.assertEqual( result[:['julia', 102]], [{'key': ['julia', x], 'id': 'julia{0:03d}'.format(x), 'value': 1} for x in range(100)] ) self.assertEqual(result[:ResultByKey(['foo', 'bar'])], []) def test_iteration_with_invalid_options(self): """ Test that iteration raises an exception when "limit" is used as option for the result. """ result = Result(self.view001, limit=10) with self.assertRaises(ResultException) as cm: invalid_result = [row for row in result] self.assertEqual(cm.exception.status_code, 103) def test_iteration_invalid_page_size(self): """ Test that iteration raises an exception when and invalid "page_size" is is used as an option for the result. """ result = Result(self.view001, page_size=-1) with self.assertRaises(ResultException) as cm: invalid_result = [row for row in result] self.assertEqual(cm.exception.status_code, 104) result = Result(self.view001, page_size='foo') with self.assertRaises(ResultException) as cm: invalid_result = [row for row in result] self.assertEqual(cm.exception.status_code, 104) def test_iteration_using_valid_page_size(self): """ Test that iteration works as expected when "page_size" is provided as an option for the result. """ result = Result(self.view001, endkey='julia004', page_size=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}, {'key': 'julia003', 'id': 'julia003', 'value': 1}, {'key': 'julia004', 'id': 'julia004', 'value': 1}] self.assertEqual([x for x in result], expected) result = Result(self.view001, endkey='julia004', page_size='3') self.assertEqual([x for x in result], expected) result = Result(self.view001, endkey='julia002', page_size=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}] self.assertEqual([x for x in result], expected) result = Result(self.view001, endkey='julia001', page_size=3) expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}] self.assertEqual([x for x in result], expected) def test_iteration_using_default_page_size(self): """ Test that iteration works as expected when "page_size" is not provided as an option for the result. """ result = Result(self.view001, endkey='julia004') expected = [{'key': 'julia000', 'id': 'julia000', 'value': 1}, {'key': 'julia001', 'id': 'julia001', 'value': 1}, {'key': 'julia002', 'id': 'julia002', 'value': 1}, {'key': 'julia003', 'id': 'julia003', 'value': 1}, {'key': 'julia004', 'id': 'julia004', 'value': 1}] self.assertEqual([x for x in result], expected) def test_iteration_no_data(self): """ Test that iteration works as expected when no data matches the result. """ result = Result(self.view001, startkey='ruby') self.assertEqual([x for x in result], []) def test_iteration_integer_keys(self): """ Test that iteration works as expected when keys are integer. """ result = Result(self.view007, page_size=10) self.assertEqual(len([x for x in result]), 100) def test_iteration_pagination(self): """ Test that iteration pagination works as expected. """ class CallMock: expected_calls = [ {'limit': 28}, {'limit': 28, 'startkey': 1, 'startkey_docid': 'julia027'}, {'limit': 28, 'startkey': 1, 'startkey_docid': 'julia054'}, {'limit': 28, 'startkey': 1, 'startkey_docid': 'julia081'}, ] def __init__(self, outer): self.outer = outer self.expected_calls.reverse() def call(self, *args, **kwargs): self.outer.assertEqual(dict(kwargs), self.expected_calls.pop(), 'pagination error') return View.__call__(self.outer.view007, *args, **kwargs) with mock.patch.object(self, 'view007', CallMock(self).call) as _: result = Result(self.view007, page_size=27) expected = [ {'id': 'julia{0:03d}'.format(i), 'key': 1, 'value': 'julia'} for i in range(100) ] self.assertEqual([x for x in result], expected) if __name__ == '__main__': unittest.main()
0.832169
0.545104
import os import sys import json from PyQt5 import QtCore, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtGui import * class WindowClassificationTrainUpdateTransformParam(QtWidgets.QWidget): forward_model_param = QtCore.pyqtSignal(); backward_data_param = QtCore.pyqtSignal(); def __init__(self): super().__init__() self.cfg_setup() self.title = 'Experiment {} - Update Transform Params'.format(self.system["experiment"]) self.left = 10 self.top = 10 self.width = 900 self.height = 600 self.transform_ui_mxnet = []; self.transform_ui_keras = []; self.transform_ui_pytorch = []; self.current_transform = {}; self.current_transform["name"] = ""; self.current_transform["params"] = {}; self.initUI() def cfg_setup(self): with open('base_classification.json') as json_file: self.system = json.load(json_file) def initUI(self): self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height); # Backward self.b1 = QPushButton('Back', self) self.b1.move(600,550) self.b1.clicked.connect(self.backward) # Backward self.b2 = QPushButton('Next', self) self.b2.move(700,550) self.b2.clicked.connect(self.forward) # Quit self.b3 = QPushButton('Quit', self) self.b3.move(800,550) self.b3.clicked.connect(self.close) self.cb1 = QComboBox(self); self.cb1.move(20, 20); self.cb1.activated.connect(self.select_transform); self.cb2 = QComboBox(self); self.cb2.move(20, 20); self.cb2.activated.connect(self.select_transform); self.cb3 = QComboBox(self); self.cb3.move(20, 20); self.cb3.activated.connect(self.select_transform); self.mxnet_transforms_list = ["select", "apply_random_resized_crop", "apply_center_crop", "apply_color_jitter", "apply_random_horizontal_flip", "apply_random_vertical_flip", "apply_random_lighting", "apply_resize", "apply_normalize"]; self.keras_transforms_list = ["select", "apply_color_jitter", "apply_random_affine", "apply_random_horizontal_flip", "apply_random_vertical_flip", "apply_random_rotation", "apply_mean_subtraction", "apply_normalize"]; self.pytorch_transforms_list = ["select", "apply_center_crop", "apply_color_jitter", "apply_random_affine", "apply_random_crop", "apply_random_horizontal_flip", "apply_random_perspective", "apply_random_resized_crop", "apply_random_rotation", "apply_random_vertical_flip", "apply_resize", "apply_normalize"]; if(self.system["backend"] == "Mxnet-1.5.1"): self.cb1.addItems(self.mxnet_transforms_list); self.cb1.show(); self.cb2.hide(); self.cb3.hide(); elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"): self.cb2.addItems(self.keras_transforms_list); self.cb2.show(); self.cb1.hide(); self.cb3.hide(); elif(self.system["backend"] == "Pytorch-1.3.1"): self.cb3.addItems(self.pytorch_transforms_list); self.cb3.show(); self.cb1.hide(); self.cb2.hide(); tmp = []; self.mx_tf1_l1 = QLabel(self); self.mx_tf1_l1.setText("1. Crop Size:"); self.mx_tf1_l1.move(20, 100); tmp.append(self.mx_tf1_l1); self.mx_tf1_e1 = QLineEdit(self) self.mx_tf1_e1.move(150, 100); self.mx_tf1_e1.setText("224"); tmp.append(self.mx_tf1_e1); self.mx_tf1_l2 = QLabel(self); self.mx_tf1_l2.setText("2. Scale limits"); self.mx_tf1_l2.move(20, 150); tmp.append(self.mx_tf1_l2); self.mx_tf1_e2_1 = QLineEdit(self) self.mx_tf1_e2_1.move(150, 150); self.mx_tf1_e2_1.setText("0.08"); tmp.append(self.mx_tf1_e2_1); self.mx_tf1_e2_2 = QLineEdit(self) self.mx_tf1_e2_2.move(300, 150); self.mx_tf1_e2_2.setText("1.0"); tmp.append(self.mx_tf1_e2_2); self.mx_tf1_l3 = QLabel(self); self.mx_tf1_l3.setText("3. Aspect ratio limits"); self.mx_tf1_l3.move(20, 200); tmp.append(self.mx_tf1_l3); self.mx_tf1_e3_1 = QLineEdit(self) self.mx_tf1_e3_1.move(180, 200); self.mx_tf1_e3_1.setText("0.75"); tmp.append(self.mx_tf1_e3_1); self.mx_tf1_e3_2 = QLineEdit(self) self.mx_tf1_e3_2.move(330, 200); self.mx_tf1_e3_2.setText("1.33"); tmp.append(self.mx_tf1_e3_2); self.mx_tf1_l4 = QLabel(self); self.mx_tf1_l4.setText("4. Apply at"); self.mx_tf1_l4.move(20, 250); tmp.append(self.mx_tf1_l4); self.mx_tf1_cbox1 = QCheckBox("Training", self) self.mx_tf1_cbox1.setChecked(True) self.mx_tf1_cbox1.move(110, 250); tmp.append(self.mx_tf1_cbox1); self.mx_tf1_cbox2 = QCheckBox("Validation", self) self.mx_tf1_cbox2.setChecked(True) self.mx_tf1_cbox2.move(210, 250); tmp.append(self.mx_tf1_cbox2); self.mx_tf1_cbox3 = QCheckBox("Testing", self) self.mx_tf1_cbox3.setChecked(False) self.mx_tf1_cbox3.move(310, 250); tmp.append(self.mx_tf1_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf2_l1 = QLabel(self); self.mx_tf2_l1.setText("1. Crop Size:"); self.mx_tf2_l1.move(20, 100); tmp.append(self.mx_tf2_l1); self.mx_tf2_e1 = QLineEdit(self) self.mx_tf2_e1.move(150, 100); self.mx_tf2_e1.setText("224"); tmp.append(self.mx_tf2_e1); self.mx_tf2_l2 = QLabel(self); self.mx_tf2_l2.setText("2. Apply at"); self.mx_tf2_l2.move(20, 150); tmp.append(self.mx_tf2_l2); self.mx_tf2_cbox1 = QCheckBox("Training", self) self.mx_tf2_cbox1.setChecked(True) self.mx_tf2_cbox1.move(110, 150); tmp.append(self.mx_tf2_cbox1); self.mx_tf2_cbox2 = QCheckBox("Validation", self) self.mx_tf2_cbox2.setChecked(True) self.mx_tf2_cbox2.move(210, 150); tmp.append(self.mx_tf2_cbox2); self.mx_tf2_cbox3 = QCheckBox("Testing", self) self.mx_tf2_cbox3.setChecked(False) self.mx_tf2_cbox3.move(310, 150); tmp.append(self.mx_tf2_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf3_l1 = QLabel(self); self.mx_tf3_l1.setText("1. Brightness (0-1):"); self.mx_tf3_l1.move(20, 100); tmp.append(self.mx_tf3_l1); self.mx_tf3_e1 = QLineEdit(self) self.mx_tf3_e1.move(150, 100); self.mx_tf3_e1.setText("0.0"); tmp.append(self.mx_tf3_e1); self.mx_tf3_l2 = QLabel(self); self.mx_tf3_l2.setText("2. Contrast (0-1):"); self.mx_tf3_l2.move(20, 150); tmp.append(self.mx_tf3_l2); self.mx_tf3_e2 = QLineEdit(self) self.mx_tf3_e2.move(150, 150); self.mx_tf3_e2.setText("0.0"); tmp.append(self.mx_tf3_e2); self.mx_tf3_l3 = QLabel(self); self.mx_tf3_l3.setText("3. Saturation (0-1):"); self.mx_tf3_l3.move(20, 200); tmp.append(self.mx_tf3_l3); self.mx_tf3_e3 = QLineEdit(self) self.mx_tf3_e3.move(150, 200); self.mx_tf3_e3.setText("0.0"); tmp.append(self.mx_tf3_e3); self.mx_tf3_l4 = QLabel(self); self.mx_tf3_l4.setText("4. Hue (0-1):"); self.mx_tf3_l4.move(20, 250); tmp.append(self.mx_tf3_l4); self.mx_tf3_e4 = QLineEdit(self) self.mx_tf3_e4.move(150, 250); self.mx_tf3_e4.setText("0.0"); tmp.append(self.mx_tf3_e4); self.mx_tf3_l5 = QLabel(self); self.mx_tf3_l5.setText("5. Apply at"); self.mx_tf3_l5.move(20, 300); tmp.append(self.mx_tf3_l5); self.mx_tf3_cbox1 = QCheckBox("Training", self) self.mx_tf3_cbox1.setChecked(True) self.mx_tf3_cbox1.move(110, 300); tmp.append(self.mx_tf3_cbox1); self.mx_tf3_cbox2 = QCheckBox("Validation", self) self.mx_tf3_cbox2.setChecked(True) self.mx_tf3_cbox2.move(210, 300); tmp.append(self.mx_tf3_cbox2); self.mx_tf3_cbox3 = QCheckBox("Testing", self) self.mx_tf3_cbox3.setChecked(False) self.mx_tf3_cbox3.move(310, 300); tmp.append(self.mx_tf3_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf4_l1 = QLabel(self); self.mx_tf4_l1.setText("1. Flip probability (0-1):"); self.mx_tf4_l1.move(20, 100); tmp.append(self.mx_tf4_l1); self.mx_tf4_e1 = QLineEdit(self) self.mx_tf4_e1.move(180, 100); self.mx_tf4_e1.setText("0.5"); tmp.append(self.mx_tf4_e1); self.mx_tf4_l2 = QLabel(self); self.mx_tf4_l2.setText("2. Apply at"); self.mx_tf4_l2.move(20, 150); tmp.append(self.mx_tf4_l2); self.mx_tf4_cbox1 = QCheckBox("Training", self) self.mx_tf4_cbox1.setChecked(True) self.mx_tf4_cbox1.move(110, 150); tmp.append(self.mx_tf4_cbox1); self.mx_tf4_cbox2 = QCheckBox("Validation", self) self.mx_tf4_cbox2.setChecked(True) self.mx_tf4_cbox2.move(210, 150); tmp.append(self.mx_tf4_cbox2); self.mx_tf4_cbox3 = QCheckBox("Testing", self) self.mx_tf4_cbox3.setChecked(False) self.mx_tf4_cbox3.move(310, 150); tmp.append(self.mx_tf4_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf5_l1 = QLabel(self); self.mx_tf5_l1.setText("1. Flip probability (0-1):"); self.mx_tf5_l1.move(20, 100); tmp.append(self.mx_tf5_l1); self.mx_tf5_e1 = QLineEdit(self) self.mx_tf5_e1.move(180, 100); self.mx_tf5_e1.setText("0.5"); tmp.append(self.mx_tf5_e1); self.mx_tf5_l2 = QLabel(self); self.mx_tf5_l2.setText("2. Apply at"); self.mx_tf5_l2.move(20, 150); tmp.append(self.mx_tf5_l2); self.mx_tf5_cbox1 = QCheckBox("Training", self) self.mx_tf5_cbox1.setChecked(True) self.mx_tf5_cbox1.move(110, 150); tmp.append(self.mx_tf5_cbox1); self.mx_tf5_cbox2 = QCheckBox("Validation", self) self.mx_tf5_cbox2.setChecked(True) self.mx_tf5_cbox2.move(210, 150); tmp.append(self.mx_tf5_cbox2); self.mx_tf5_cbox3 = QCheckBox("Testing", self) self.mx_tf5_cbox3.setChecked(False) self.mx_tf5_cbox3.move(310, 150); tmp.append(self.mx_tf5_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.mx_tf6_l1 = QLabel(self); self.mx_tf6_l1.setText("1. Alpha:"); self.mx_tf6_l1.move(20, 100); tmp.append(self.mx_tf6_l1); self.mx_tf6_e1 = QLineEdit(self) self.mx_tf6_e1.move(120, 100); self.mx_tf6_e1.setText("1.0"); tmp.append(self.mx_tf6_e1); self.mx_tf6_l2 = QLabel(self); self.mx_tf6_l2.setText("2. Apply at"); self.mx_tf6_l2.move(20, 150); tmp.append(self.mx_tf6_l2); self.mx_tf6_cbox1 = QCheckBox("Training", self) self.mx_tf6_cbox1.setChecked(True) self.mx_tf6_cbox1.move(110, 150); tmp.append(self.mx_tf6_cbox1); self.mx_tf6_cbox2 = QCheckBox("Validation", self) self.mx_tf6_cbox2.setChecked(True) self.mx_tf6_cbox2.move(210, 150); tmp.append(self.mx_tf6_cbox2); self.mx_tf6_cbox3 = QCheckBox("Testing", self) self.mx_tf6_cbox3.setChecked(False) self.mx_tf6_cbox3.move(310, 150); tmp.append(self.mx_tf6_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.mx_tf7_l1 = QLabel(self); self.mx_tf7_l1.setText("1. New size:"); self.mx_tf7_l1.move(20, 100); tmp.append(self.mx_tf7_l1); self.mx_tf7_e1 = QLineEdit(self) self.mx_tf7_e1.move(120, 100); self.mx_tf7_e1.setText("224"); tmp.append(self.mx_tf7_e1); self.mx_tf7_l2 = QLabel(self); self.mx_tf7_l2.setText("2. Apply at"); self.mx_tf7_l2.move(20, 150); tmp.append(self.mx_tf7_l2); self.mx_tf7_cbox1 = QCheckBox("Training", self) self.mx_tf7_cbox1.setChecked(True) self.mx_tf7_cbox1.move(110, 150); tmp.append(self.mx_tf7_cbox1); self.mx_tf7_cbox2 = QCheckBox("Validation", self) self.mx_tf7_cbox2.setChecked(True) self.mx_tf7_cbox2.move(210, 150); tmp.append(self.mx_tf7_cbox2); self.mx_tf7_cbox3 = QCheckBox("Testing", self) self.mx_tf7_cbox3.setChecked(False) self.mx_tf7_cbox3.move(310, 150); tmp.append(self.mx_tf7_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.mx_tf8_l1 = QLabel(self); self.mx_tf8_l1.setText("1. Mean:"); self.mx_tf8_l1.move(20, 100); tmp.append(self.mx_tf8_l1); self.mx_tf8_e1_1 = QLineEdit(self) self.mx_tf8_e1_1.move(120, 100); self.mx_tf8_e1_1.setText("0.485"); self.mx_tf8_e1_1.resize(70, 25); tmp.append(self.mx_tf8_e1_1); self.mx_tf8_e1_2 = QLineEdit(self) self.mx_tf8_e1_2.move(220, 100); self.mx_tf8_e1_2.setText("0.456"); self.mx_tf8_e1_2.resize(70, 25); tmp.append(self.mx_tf8_e1_2); self.mx_tf8_e1_3 = QLineEdit(self) self.mx_tf8_e1_3.move(320, 100); self.mx_tf8_e1_3.setText("0.406"); self.mx_tf8_e1_3.resize(70, 25); tmp.append(self.mx_tf8_e1_3); self.mx_tf8_l2 = QLabel(self); self.mx_tf8_l2.setText("2. Standard deviation:"); self.mx_tf8_l2.move(20, 150); tmp.append(self.mx_tf8_l2); self.mx_tf8_e2_1 = QLineEdit(self) self.mx_tf8_e2_1.move(180, 150); self.mx_tf8_e2_1.setText("0.229"); self.mx_tf8_e2_1.resize(70, 25); tmp.append(self.mx_tf8_e2_1); self.mx_tf8_e2_2 = QLineEdit(self) self.mx_tf8_e2_2.move(280, 150); self.mx_tf8_e2_2.setText("0.224"); self.mx_tf8_e2_2.resize(70, 25); tmp.append(self.mx_tf8_e2_2); self.mx_tf8_e2_3 = QLineEdit(self) self.mx_tf8_e2_3.move(380, 150); self.mx_tf8_e2_3.setText("0.225"); self.mx_tf8_e2_3.resize(70, 25); tmp.append(self.mx_tf8_e2_3); self.mx_tf8_l3 = QLabel(self); self.mx_tf8_l3.setText("3. Apply at"); self.mx_tf8_l3.move(20, 200); tmp.append(self.mx_tf8_l3); self.mx_tf8_cbox1 = QCheckBox("Training", self) self.mx_tf8_cbox1.setChecked(True) self.mx_tf8_cbox1.move(110, 200); tmp.append(self.mx_tf8_cbox1); self.mx_tf8_cbox2 = QCheckBox("Validation", self) self.mx_tf8_cbox2.setChecked(True) self.mx_tf8_cbox2.move(210, 200); tmp.append(self.mx_tf8_cbox2); self.mx_tf8_cbox3 = QCheckBox("Testing", self) self.mx_tf8_cbox3.setChecked(False) self.mx_tf8_cbox3.move(310, 200); tmp.append(self.mx_tf8_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.py_tf1_l1 = QLabel(self); self.py_tf1_l1.setText("1. Crop Size:"); self.py_tf1_l1.move(20, 100); tmp.append(self.py_tf1_l1); self.py_tf1_e1 = QLineEdit(self) self.py_tf1_e1.move(150, 100); self.py_tf1_e1.setText("224"); tmp.append(self.py_tf1_e1); self.py_tf1_l2 = QLabel(self); self.py_tf1_l2.setText("2. Apply at"); self.py_tf1_l2.move(20, 150); tmp.append(self.py_tf1_l2); self.py_tf1_cbox1 = QCheckBox("Training", self) self.py_tf1_cbox1.setChecked(True) self.py_tf1_cbox1.move(110, 150); tmp.append(self.py_tf1_cbox1); self.py_tf1_cbox2 = QCheckBox("Validation", self) self.py_tf1_cbox2.setChecked(True) self.py_tf1_cbox2.move(210, 150); tmp.append(self.py_tf1_cbox2); self.py_tf1_cbox3 = QCheckBox("Testing", self) self.py_tf1_cbox3.setChecked(False) self.py_tf1_cbox3.move(310, 150); tmp.append(self.py_tf1_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf2_l1 = QLabel(self); self.py_tf2_l1.setText("1. Brightness (0-1):"); self.py_tf2_l1.move(20, 100); tmp.append(self.py_tf2_l1); self.py_tf2_e1 = QLineEdit(self) self.py_tf2_e1.move(150, 100); self.py_tf2_e1.setText("0.0"); tmp.append(self.py_tf2_e1); self.py_tf2_l2 = QLabel(self); self.py_tf2_l2.setText("2. Contrast (0-1):"); self.py_tf2_l2.move(20, 150); tmp.append(self.py_tf2_l2); self.py_tf2_e2 = QLineEdit(self) self.py_tf2_e2.move(150, 150); self.py_tf2_e2.setText("0.0"); tmp.append(self.py_tf2_e2); self.py_tf2_l3 = QLabel(self); self.py_tf2_l3.setText("3. Saturation (0-1):"); self.py_tf2_l3.move(20, 200); tmp.append(self.py_tf2_l3); self.py_tf2_e3 = QLineEdit(self) self.py_tf2_e3.move(150, 200); self.py_tf2_e3.setText("0.0"); tmp.append(self.py_tf2_e3); self.py_tf2_l4 = QLabel(self); self.py_tf2_l4.setText("4. Hue (0-1):"); self.py_tf2_l4.move(20, 250); tmp.append(self.py_tf2_l4); self.py_tf2_e4 = QLineEdit(self) self.py_tf2_e4.move(150, 250); self.py_tf2_e4.setText("0.0"); tmp.append(self.py_tf2_e4); self.py_tf2_l5 = QLabel(self); self.py_tf2_l5.setText("5. Apply at"); self.py_tf2_l5.move(20, 300); tmp.append(self.py_tf2_l5); self.py_tf2_cbox1 = QCheckBox("Training", self) self.py_tf2_cbox1.setChecked(True) self.py_tf2_cbox1.move(110, 300); tmp.append(self.py_tf2_cbox1); self.py_tf2_cbox2 = QCheckBox("Validation", self) self.py_tf2_cbox2.setChecked(True) self.py_tf2_cbox2.move(210, 300); tmp.append(self.py_tf2_cbox2); self.py_tf2_cbox3 = QCheckBox("Testing", self) self.py_tf2_cbox3.setChecked(False) self.py_tf2_cbox3.move(310, 300); tmp.append(self.py_tf2_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf3_l1 = QLabel(self); self.py_tf3_l1.setText("1. Rotation (angle):"); self.py_tf3_l1.move(20, 100); tmp.append(self.py_tf3_l1); self.py_tf3_e1 = QLineEdit(self) self.py_tf3_e1.move(150, 100); self.py_tf3_e1.setText("0.0"); self.py_tf3_e1.resize(50, 25); tmp.append(self.py_tf3_e1); self.py_tf3_l2 = QLabel(self); self.py_tf3_l2.setText("2. Translation (ratio):"); self.py_tf3_l2.move(20, 150); tmp.append(self.py_tf3_l2); self.py_tf3_e2_1 = QLineEdit(self) self.py_tf3_e2_1.move(200, 150); self.py_tf3_e2_1.setText("None"); self.py_tf3_e2_1.resize(50, 25); tmp.append(self.py_tf3_e2_1); self.py_tf3_e2_2 = QLineEdit(self) self.py_tf3_e2_2.move(300, 150); self.py_tf3_e2_2.setText("None"); self.py_tf3_e2_2.resize(50, 25); tmp.append(self.py_tf3_e2_2); self.py_tf3_l3 = QLabel(self); self.py_tf3_l3.setText("3. Scale (ratio):"); self.py_tf3_l3.move(20, 200); tmp.append(self.py_tf3_l3); self.py_tf3_e3_1 = QLineEdit(self) self.py_tf3_e3_1.move(200, 200); self.py_tf3_e3_1.setText("None"); self.py_tf3_e3_1.resize(50, 25); tmp.append(self.py_tf3_e3_1); self.py_tf3_e3_2 = QLineEdit(self) self.py_tf3_e3_2.move(300, 200); self.py_tf3_e3_2.setText("None"); self.py_tf3_e3_2.resize(50, 25); tmp.append(self.py_tf3_e3_2); self.py_tf3_l4 = QLabel(self); self.py_tf3_l4.setText("4. Sheer (ratio):"); self.py_tf3_l4.move(20, 250); tmp.append(self.py_tf3_l4); self.py_tf3_e4_1 = QLineEdit(self) self.py_tf3_e4_1.move(200, 250); self.py_tf3_e4_1.setText("None"); self.py_tf3_e4_1.resize(50, 25); tmp.append(self.py_tf3_e4_1); self.py_tf3_e4_2 = QLineEdit(self) self.py_tf3_e4_2.move(300, 250); self.py_tf3_e4_2.setText("None"); self.py_tf3_e4_2.resize(50, 25); tmp.append(self.py_tf3_e4_2); self.py_tf3_l5 = QLabel(self); self.py_tf3_l5.setText("5. Apply at"); self.py_tf3_l5.move(20, 300); tmp.append(self.py_tf3_l5); self.py_tf3_cbox1 = QCheckBox("Training", self) self.py_tf3_cbox1.setChecked(True) self.py_tf3_cbox1.move(110, 300); tmp.append(self.py_tf3_cbox1); self.py_tf3_cbox2 = QCheckBox("Validation", self) self.py_tf3_cbox2.setChecked(True) self.py_tf3_cbox2.move(210, 300); tmp.append(self.py_tf3_cbox2); self.py_tf3_cbox3 = QCheckBox("Testing", self) self.py_tf3_cbox3.setChecked(False) self.py_tf3_cbox3.move(310, 300); tmp.append(self.py_tf3_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf4_l1 = QLabel(self); self.py_tf4_l1.setText("1. Crop Size:"); self.py_tf4_l1.move(20, 100); tmp.append(self.py_tf4_l1); self.py_tf4_e1 = QLineEdit(self) self.py_tf4_e1.move(150, 100); self.py_tf4_e1.setText("224"); tmp.append(self.py_tf4_e1); self.py_tf4_l2 = QLabel(self); self.py_tf4_l2.setText("2. Apply at"); self.py_tf4_l2.move(20, 150); tmp.append(self.py_tf4_l2); self.py_tf4_cbox1 = QCheckBox("Training", self) self.py_tf4_cbox1.setChecked(True) self.py_tf4_cbox1.move(110, 150); tmp.append(self.py_tf4_cbox1); self.py_tf4_cbox2 = QCheckBox("Validation", self) self.py_tf4_cbox2.setChecked(True) self.py_tf4_cbox2.move(210, 150); tmp.append(self.py_tf4_cbox2); self.py_tf4_cbox3 = QCheckBox("Testing", self) self.py_tf4_cbox3.setChecked(False) self.py_tf4_cbox3.move(310, 150); tmp.append(self.py_tf4_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf5_l1 = QLabel(self); self.py_tf5_l1.setText("1. Probability (0-1):"); self.py_tf5_l1.move(20, 100); tmp.append(self.py_tf5_l1); self.py_tf5_e1 = QLineEdit(self) self.py_tf5_e1.move(200, 100); self.py_tf5_e1.setText("0.5"); tmp.append(self.py_tf5_e1); self.py_tf5_l2 = QLabel(self); self.py_tf5_l2.setText("2. Apply at"); self.py_tf5_l2.move(20, 150); tmp.append(self.py_tf5_l2); self.py_tf5_cbox1 = QCheckBox("Training", self) self.py_tf5_cbox1.setChecked(True) self.py_tf5_cbox1.move(110, 150); tmp.append(self.py_tf5_cbox1); self.py_tf5_cbox2 = QCheckBox("Validation", self) self.py_tf5_cbox2.setChecked(True) self.py_tf5_cbox2.move(210, 150); tmp.append(self.py_tf5_cbox2); self.py_tf5_cbox3 = QCheckBox("Testing", self) self.py_tf5_cbox3.setChecked(False) self.py_tf5_cbox3.move(310, 150); tmp.append(self.py_tf5_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf6_l1 = QLabel(self); self.py_tf6_l1.setText("1. Distrotion (0-1):"); self.py_tf6_l1.move(20, 100); tmp.append(self.py_tf6_l1); self.py_tf6_e1 = QLineEdit(self) self.py_tf6_e1.move(200, 100); self.py_tf6_e1.setText("0.5"); tmp.append(self.py_tf6_e1); self.py_tf6_l2 = QLabel(self); self.py_tf6_l2.setText("2. Probability (0-1):"); self.py_tf6_l2.move(20, 150); tmp.append(self.py_tf6_l2); self.py_tf6_e2 = QLineEdit(self) self.py_tf6_e2.move(200, 150); self.py_tf6_e2.setText("0.5"); tmp.append(self.py_tf6_e2); self.py_tf6_l3 = QLabel(self); self.py_tf6_l3.setText("3. Apply at"); self.py_tf6_l3.move(20, 200); tmp.append(self.py_tf6_l3); self.py_tf6_cbox1 = QCheckBox("Training", self) self.py_tf6_cbox1.setChecked(True) self.py_tf6_cbox1.move(110, 200); tmp.append(self.py_tf6_cbox1); self.py_tf6_cbox2 = QCheckBox("Validation", self) self.py_tf6_cbox2.setChecked(True) self.py_tf6_cbox2.move(210, 200); tmp.append(self.py_tf6_cbox2); self.py_tf6_cbox3 = QCheckBox("Testing", self) self.py_tf6_cbox3.setChecked(False) self.py_tf6_cbox3.move(310, 200); tmp.append(self.py_tf6_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf7_l1 = QLabel(self); self.py_tf7_l1.setText("1. Crop Size:"); self.py_tf7_l1.move(20, 100); tmp.append(self.py_tf7_l1); self.py_tf7_e1 = QLineEdit(self) self.py_tf7_e1.move(150, 100); self.py_tf7_e1.setText("224"); tmp.append(self.py_tf7_e1); self.py_tf7_l2 = QLabel(self); self.py_tf7_l2.setText("2. Scale:"); self.py_tf7_l2.move(20, 150); tmp.append(self.py_tf7_l2); self.py_tf7_e2_1 = QLineEdit(self) self.py_tf7_e2_1.move(120, 150); self.py_tf7_e2_1.setText("0.08"); self.py_tf7_e2_1.resize(50, 25); tmp.append(self.py_tf7_e2_1); self.py_tf7_e2_2 = QLineEdit(self) self.py_tf7_e2_2.move(220, 150); self.py_tf7_e2_2.setText("1.0"); self.py_tf7_e2_2.resize(50, 25); tmp.append(self.py_tf7_e2_2); self.py_tf7_l3 = QLabel(self); self.py_tf7_l3.setText("3. Ratio:"); self.py_tf7_l3.move(20, 200); tmp.append(self.py_tf7_l3); self.py_tf7_e3_1 = QLineEdit(self) self.py_tf7_e3_1.move(120, 200); self.py_tf7_e3_1.setText("0.75"); self.py_tf7_e3_1.resize(50, 25); tmp.append(self.py_tf7_e3_1); self.py_tf7_e3_2 = QLineEdit(self) self.py_tf7_e3_2.move(220, 200); self.py_tf7_e3_2.setText("1.33"); self.py_tf7_e3_2.resize(50, 25); tmp.append(self.py_tf7_e3_2); self.py_tf7_l4 = QLabel(self); self.py_tf7_l4.setText("4. Apply at"); self.py_tf7_l4.move(20, 300); tmp.append(self.py_tf7_l4); self.py_tf7_cbox1 = QCheckBox("Training", self) self.py_tf7_cbox1.setChecked(True) self.py_tf7_cbox1.move(110, 300); tmp.append(self.py_tf7_cbox1); self.py_tf7_cbox2 = QCheckBox("Validation", self) self.py_tf7_cbox2.setChecked(True) self.py_tf7_cbox2.move(210, 300); tmp.append(self.py_tf7_cbox2); self.py_tf7_cbox3 = QCheckBox("Testing", self) self.py_tf7_cbox3.setChecked(False) self.py_tf7_cbox3.move(310, 300); tmp.append(self.py_tf7_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf8_l1 = QLabel(self); self.py_tf8_l1.setText("1. Degrees:"); self.py_tf8_l1.move(20, 100); tmp.append(self.py_tf8_l1); self.py_tf8_e1 = QLineEdit(self) self.py_tf8_e1.move(200, 100); self.py_tf8_e1.setText("10"); tmp.append(self.py_tf8_e1); self.py_tf8_l2 = QLabel(self); self.py_tf8_l2.setText("2. Apply at"); self.py_tf8_l2.move(20, 200); tmp.append(self.py_tf8_l2); self.py_tf8_cbox1 = QCheckBox("Training", self) self.py_tf8_cbox1.setChecked(True) self.py_tf8_cbox1.move(110, 200); tmp.append(self.py_tf8_cbox1); self.py_tf8_cbox2 = QCheckBox("Validation", self) self.py_tf8_cbox2.setChecked(True) self.py_tf8_cbox2.move(210, 200); tmp.append(self.py_tf8_cbox2); self.py_tf8_cbox3 = QCheckBox("Testing", self) self.py_tf8_cbox3.setChecked(False) self.py_tf8_cbox3.move(310, 200); tmp.append(self.py_tf8_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf9_l1 = QLabel(self); self.py_tf9_l1.setText("1. Probability (0-1):"); self.py_tf9_l1.move(20, 100); tmp.append(self.py_tf9_l1); self.py_tf9_e1 = QLineEdit(self) self.py_tf9_e1.move(200, 100); self.py_tf9_e1.setText("0.5"); tmp.append(self.py_tf9_e1); self.py_tf9_l2 = QLabel(self); self.py_tf9_l2.setText("2. Apply at"); self.py_tf9_l2.move(20, 200); tmp.append(self.py_tf9_l2); self.py_tf9_cbox1 = QCheckBox("Training", self) self.py_tf9_cbox1.setChecked(True) self.py_tf9_cbox1.move(110, 200); tmp.append(self.py_tf9_cbox1); self.py_tf9_cbox2 = QCheckBox("Validation", self) self.py_tf9_cbox2.setChecked(True) self.py_tf9_cbox2.move(210, 200); tmp.append(self.py_tf9_cbox2); self.py_tf9_cbox3 = QCheckBox("Testing", self) self.py_tf9_cbox3.setChecked(False) self.py_tf9_cbox3.move(310, 200); tmp.append(self.py_tf9_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf10_l1 = QLabel(self); self.py_tf10_l1.setText("1. New size:"); self.py_tf10_l1.move(20, 100); tmp.append(self.py_tf10_l1); self.py_tf10_e1 = QLineEdit(self) self.py_tf10_e1.move(200, 100); self.py_tf10_e1.setText("224"); tmp.append(self.py_tf10_e1); self.py_tf10_l2 = QLabel(self); self.py_tf10_l2.setText("2. Apply at"); self.py_tf10_l2.move(20, 200); tmp.append(self.py_tf10_l2); self.py_tf10_cbox1 = QCheckBox("Training", self) self.py_tf10_cbox1.setChecked(True) self.py_tf10_cbox1.move(110, 200); tmp.append(self.py_tf10_cbox1); self.py_tf10_cbox2 = QCheckBox("Validation", self) self.py_tf10_cbox2.setChecked(True) self.py_tf10_cbox2.move(210, 200); tmp.append(self.py_tf10_cbox2); self.py_tf10_cbox3 = QCheckBox("Testing", self) self.py_tf10_cbox3.setChecked(False) self.py_tf10_cbox3.move(310, 200); tmp.append(self.py_tf10_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf11_l1 = QLabel(self); self.py_tf11_l1.setText("1. Mean:"); self.py_tf11_l1.move(20, 100); tmp.append(self.py_tf11_l1); self.py_tf11_e1_1 = QLineEdit(self) self.py_tf11_e1_1.move(120, 100); self.py_tf11_e1_1.setText("0.485"); self.py_tf11_e1_1.resize(70, 25); tmp.append(self.py_tf11_e1_1); self.py_tf11_e1_2 = QLineEdit(self) self.py_tf11_e1_2.move(220, 100); self.py_tf11_e1_2.setText("0.456"); self.py_tf11_e1_2.resize(70, 25); tmp.append(self.py_tf11_e1_2); self.py_tf11_e1_3 = QLineEdit(self) self.py_tf11_e1_3.move(320, 100); self.py_tf11_e1_3.setText("0.406"); self.py_tf11_e1_3.resize(70, 25); tmp.append(self.py_tf11_e1_3); self.py_tf11_l2 = QLabel(self); self.py_tf11_l2.setText("2. Standard deviation:"); self.py_tf11_l2.move(20, 150); tmp.append(self.py_tf11_l2); self.py_tf11_e2_1 = QLineEdit(self) self.py_tf11_e2_1.move(180, 150); self.py_tf11_e2_1.setText("0.229"); self.py_tf11_e2_1.resize(70, 25); tmp.append(self.py_tf11_e2_1); self.py_tf11_e2_2 = QLineEdit(self) self.py_tf11_e2_2.move(280, 150); self.py_tf11_e2_2.setText("0.224"); self.py_tf11_e2_2.resize(70, 25); tmp.append(self.py_tf11_e2_2); self.py_tf11_e2_3 = QLineEdit(self) self.py_tf11_e2_3.move(380, 150); self.py_tf11_e2_3.setText("0.225"); self.py_tf11_e2_3.resize(70, 25); tmp.append(self.py_tf11_e2_3); self.py_tf11_l3 = QLabel(self); self.py_tf11_l3.setText("3. Apply at"); self.py_tf11_l3.move(20, 200); tmp.append(self.py_tf11_l3); self.py_tf11_cbox1 = QCheckBox("Training", self) self.py_tf11_cbox1.setChecked(True) self.py_tf11_cbox1.move(110, 200); tmp.append(self.py_tf11_cbox1); self.py_tf11_cbox2 = QCheckBox("Validation", self) self.py_tf11_cbox2.setChecked(True) self.py_tf11_cbox2.move(210, 200); tmp.append(self.py_tf11_cbox2); self.py_tf11_cbox3 = QCheckBox("Testing", self) self.py_tf11_cbox3.setChecked(False) self.py_tf11_cbox3.move(310, 200); tmp.append(self.py_tf11_cbox3); self.transform_ui_pytorch.append(tmp); tmp = []; self.ke_tf1_l1 = QLabel(self); self.ke_tf1_l1.setText("1. Brightness (0-1):"); self.ke_tf1_l1.move(20, 100); tmp.append(self.ke_tf1_l1); self.ke_tf1_e1 = QLineEdit(self) self.ke_tf1_e1.move(150, 100); self.ke_tf1_e1.setText("0.0"); tmp.append(self.ke_tf1_e1); self.ke_tf1_l2 = QLabel(self); self.ke_tf1_l2.setText("2. Contrast (0-1):"); self.ke_tf1_l2.move(20, 150); tmp.append(self.ke_tf1_l2); self.ke_tf1_e2 = QLineEdit(self) self.ke_tf1_e2.move(150, 150); self.ke_tf1_e2.setText("0.0"); tmp.append(self.ke_tf1_e2); self.ke_tf1_l3 = QLabel(self); self.ke_tf1_l3.setText("3. Saturation (0-1):"); self.ke_tf1_l3.move(20, 200); tmp.append(self.ke_tf1_l3); self.ke_tf1_e3 = QLineEdit(self) self.ke_tf1_e3.move(150, 200); self.ke_tf1_e3.setText("0.0"); tmp.append(self.ke_tf1_e3); self.ke_tf1_l4 = QLabel(self); self.ke_tf1_l4.setText("4. Hue (0-1):"); self.ke_tf1_l4.move(20, 250); tmp.append(self.ke_tf1_l4); self.ke_tf1_e4 = QLineEdit(self) self.ke_tf1_e4.move(150, 250); self.ke_tf1_e4.setText("0.0"); tmp.append(self.ke_tf1_e4); self.ke_tf1_l5 = QLabel(self); self.ke_tf1_l5.setText("5. Apply at"); self.ke_tf1_l5.move(20, 300); tmp.append(self.ke_tf1_l5); self.ke_tf1_cbox1 = QCheckBox("Training", self) self.ke_tf1_cbox1.setChecked(True) self.ke_tf1_cbox1.move(110, 300); tmp.append(self.ke_tf1_cbox1); self.ke_tf1_cbox2 = QCheckBox("Validation", self) self.ke_tf1_cbox2.setChecked(True) self.ke_tf1_cbox2.move(210, 300); tmp.append(self.ke_tf1_cbox2); self.ke_tf1_cbox3 = QCheckBox("Testing", self) self.ke_tf1_cbox3.setChecked(False) self.ke_tf1_cbox3.move(310, 300); tmp.append(self.ke_tf1_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf2_l1 = QLabel(self); self.ke_tf2_l1.setText("1. Crop Size:"); self.ke_tf2_l1.move(20, 100); tmp.append(self.ke_tf2_l1); self.ke_tf2_e1 = QLineEdit(self) self.ke_tf2_e1.move(150, 100); self.ke_tf2_e1.setText("224"); tmp.append(self.ke_tf2_e1); self.ke_tf2_l2 = QLabel(self); self.ke_tf2_l2.setText("2. Scale:"); self.ke_tf2_l2.move(20, 150); tmp.append(self.ke_tf2_l2); self.ke_tf2_e2_1 = QLineEdit(self) self.ke_tf2_e2_1.move(120, 150); self.ke_tf2_e2_1.setText("0.08"); self.ke_tf2_e2_1.resize(50, 25); tmp.append(self.ke_tf2_e2_1); self.ke_tf2_e2_2 = QLineEdit(self) self.ke_tf2_e2_2.move(220, 150); self.ke_tf2_e2_2.setText("1.0"); self.ke_tf2_e2_2.resize(50, 25); tmp.append(self.ke_tf2_e2_2); self.ke_tf2_l3 = QLabel(self); self.ke_tf2_l3.setText("3. Ratio:"); self.ke_tf2_l3.move(20, 200); tmp.append(self.ke_tf2_l3); self.ke_tf2_e3_1 = QLineEdit(self) self.ke_tf2_e3_1.move(120, 200); self.ke_tf2_e3_1.setText("0.75"); self.ke_tf2_e3_1.resize(50, 25); tmp.append(self.ke_tf2_e3_1); self.ke_tf2_e3_2 = QLineEdit(self) self.ke_tf2_e3_2.move(220, 200); self.ke_tf2_e3_2.setText("1.33"); self.ke_tf2_e3_2.resize(50, 25); tmp.append(self.ke_tf2_e3_2); self.ke_tf2_l4 = QLabel(self); self.ke_tf2_l4.setText("4. Apply at"); self.ke_tf2_l4.move(20, 300); tmp.append(self.ke_tf2_l4); self.ke_tf2_cbox1 = QCheckBox("Training", self) self.ke_tf2_cbox1.setChecked(True) self.ke_tf2_cbox1.move(110, 300); tmp.append(self.ke_tf2_cbox1); self.ke_tf2_cbox2 = QCheckBox("Validation", self) self.ke_tf2_cbox2.setChecked(True) self.ke_tf2_cbox2.move(210, 300); tmp.append(self.ke_tf2_cbox2); self.ke_tf2_cbox3 = QCheckBox("Testing", self) self.ke_tf2_cbox3.setChecked(False) self.ke_tf2_cbox3.move(310, 300); tmp.append(self.ke_tf2_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf3_l1 = QLabel(self); self.ke_tf3_l1.setText("1. Probability (0-1):"); self.ke_tf3_l1.move(20, 100); tmp.append(self.ke_tf3_l1); self.ke_tf3_e1 = QLineEdit(self) self.ke_tf3_e1.move(150, 100); self.ke_tf3_e1.setText("0.5"); tmp.append(self.ke_tf3_e1); self.ke_tf3_l2 = QLabel(self); self.ke_tf3_l2.setText("2. Apply at"); self.ke_tf3_l2.move(20, 150); tmp.append(self.ke_tf3_l2); self.ke_tf3_cbox1 = QCheckBox("Training", self) self.ke_tf3_cbox1.setChecked(True) self.ke_tf3_cbox1.move(110, 150); tmp.append(self.ke_tf3_cbox1); self.ke_tf3_cbox2 = QCheckBox("Validation", self) self.ke_tf3_cbox2.setChecked(True) self.ke_tf3_cbox2.move(210, 150); tmp.append(self.ke_tf3_cbox2); self.ke_tf3_cbox3 = QCheckBox("Testing", self) self.ke_tf3_cbox3.setChecked(False) self.ke_tf3_cbox3.move(310, 150); tmp.append(self.ke_tf3_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf4_l1 = QLabel(self); self.ke_tf4_l1.setText("1. Probability (0-1):"); self.ke_tf4_l1.move(20, 100); tmp.append(self.ke_tf4_l1); self.ke_tf4_e1 = QLineEdit(self) self.ke_tf4_e1.move(150, 100); self.ke_tf4_e1.setText("0.5"); tmp.append(self.ke_tf4_e1); self.ke_tf4_l2 = QLabel(self); self.ke_tf4_l2.setText("2. Apply at"); self.ke_tf4_l2.move(20, 150); tmp.append(self.ke_tf4_l2); self.ke_tf4_cbox1 = QCheckBox("Training", self) self.ke_tf4_cbox1.setChecked(True) self.ke_tf4_cbox1.move(110, 150); tmp.append(self.ke_tf4_cbox1); self.ke_tf4_cbox2 = QCheckBox("Validation", self) self.ke_tf4_cbox2.setChecked(True) self.ke_tf4_cbox2.move(210, 150); tmp.append(self.ke_tf4_cbox2); self.ke_tf4_cbox3 = QCheckBox("Testing", self) self.ke_tf4_cbox3.setChecked(False) self.ke_tf4_cbox3.move(310, 150); tmp.append(self.ke_tf4_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf5_l1 = QLabel(self); self.ke_tf5_l1.setText("1. Degrees:"); self.ke_tf5_l1.move(20, 100); tmp.append(self.ke_tf5_l1); self.ke_tf5_e1 = QLineEdit(self) self.ke_tf5_e1.move(150, 100); self.ke_tf5_e1.setText("0.5"); tmp.append(self.ke_tf5_e1); self.ke_tf5_l2 = QLabel(self); self.ke_tf5_l2.setText("2. Apply at"); self.ke_tf5_l2.move(20, 150); tmp.append(self.ke_tf5_l2); self.ke_tf5_cbox1 = QCheckBox("Training", self) self.ke_tf5_cbox1.setChecked(True) self.ke_tf5_cbox1.move(110, 150); tmp.append(self.ke_tf5_cbox1); self.ke_tf5_cbox2 = QCheckBox("Validation", self) self.ke_tf5_cbox2.setChecked(True) self.ke_tf5_cbox2.move(210, 150); tmp.append(self.ke_tf5_cbox2); self.ke_tf5_cbox3 = QCheckBox("Testing", self) self.ke_tf5_cbox3.setChecked(False) self.ke_tf5_cbox3.move(310, 150); tmp.append(self.ke_tf5_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf6_l1 = QLabel(self); self.ke_tf6_l1.setText("1. Mean:"); self.ke_tf6_l1.move(20, 100); tmp.append(self.ke_tf6_l1); self.ke_tf6_e1_1 = QLineEdit(self) self.ke_tf6_e1_1.move(120, 100); self.ke_tf6_e1_1.setText("0.485"); self.ke_tf6_e1_1.resize(70, 25); tmp.append(self.ke_tf6_e1_1); self.ke_tf6_e1_2 = QLineEdit(self) self.ke_tf6_e1_2.move(220, 100); self.ke_tf6_e1_2.setText("0.456"); self.ke_tf6_e1_2.resize(70, 25); tmp.append(self.ke_tf6_e1_2); self.ke_tf6_e1_3 = QLineEdit(self) self.ke_tf6_e1_3.move(320, 100); self.ke_tf6_e1_3.setText("0.406"); self.ke_tf6_e1_3.resize(70, 25); tmp.append(self.ke_tf6_e1_3); self.ke_tf6_l2 = QLabel(self); self.ke_tf6_l2.setText("2. Apply at"); self.ke_tf6_l2.move(20, 150); tmp.append(self.ke_tf6_l2); self.ke_tf6_cbox1 = QCheckBox("Training", self) self.ke_tf6_cbox1.setChecked(True) self.ke_tf6_cbox1.move(110, 150); tmp.append(self.ke_tf6_cbox1); self.ke_tf6_cbox2 = QCheckBox("Validation", self) self.ke_tf6_cbox2.setChecked(True) self.ke_tf6_cbox2.move(210, 150); tmp.append(self.ke_tf6_cbox2); self.ke_tf6_cbox3 = QCheckBox("Testing", self) self.ke_tf6_cbox3.setChecked(False) self.ke_tf6_cbox3.move(310, 150); tmp.append(self.ke_tf6_cbox3); self.transform_ui_keras.append(tmp); tmp = []; self.ke_tf7_l1 = QLabel(self); self.ke_tf7_l1.setText("1. Mean:"); self.ke_tf7_l1.move(20, 100); tmp.append(self.ke_tf7_l1); self.ke_tf7_e1_1 = QLineEdit(self) self.ke_tf7_e1_1.move(120, 100); self.ke_tf7_e1_1.setText("0.485"); self.ke_tf7_e1_1.resize(70, 25); tmp.append(self.ke_tf7_e1_1); self.ke_tf7_e1_2 = QLineEdit(self) self.ke_tf7_e1_2.move(220, 100); self.ke_tf7_e1_2.setText("0.456"); self.ke_tf7_e1_2.resize(70, 25); tmp.append(self.ke_tf7_e1_2); self.ke_tf7_e1_3 = QLineEdit(self) self.ke_tf7_e1_3.move(320, 100); self.ke_tf7_e1_3.setText("0.406"); self.ke_tf7_e1_3.resize(70, 25); tmp.append(self.ke_tf7_e1_3); self.ke_tf7_l2 = QLabel(self); self.ke_tf7_l2.setText("2. Standard deviation:"); self.ke_tf7_l2.move(20, 150); tmp.append(self.ke_tf7_l2); self.ke_tf7_e2_1 = QLineEdit(self) self.ke_tf7_e2_1.move(180, 150); self.ke_tf7_e2_1.setText("0.229"); self.ke_tf7_e2_1.resize(70, 25); tmp.append(self.ke_tf7_e2_1); self.ke_tf7_e2_2 = QLineEdit(self) self.ke_tf7_e2_2.move(280, 150); self.ke_tf7_e2_2.setText("0.224"); self.ke_tf7_e2_2.resize(70, 25); tmp.append(self.ke_tf7_e2_2); self.ke_tf7_e2_3 = QLineEdit(self) self.ke_tf7_e2_3.move(380, 150); self.ke_tf7_e2_3.setText("0.225"); self.ke_tf7_e2_3.resize(70, 25); tmp.append(self.ke_tf7_e2_3); self.ke_tf7_l2 = QLabel(self); self.ke_tf7_l2.setText("3. Apply at"); self.ke_tf7_l2.move(20, 200); tmp.append(self.ke_tf7_l2); self.ke_tf7_cbox1 = QCheckBox("Training", self) self.ke_tf7_cbox1.setChecked(True) self.ke_tf7_cbox1.move(110, 200); tmp.append(self.ke_tf7_cbox1); self.ke_tf7_cbox2 = QCheckBox("Validation", self) self.ke_tf7_cbox2.setChecked(True) self.ke_tf7_cbox2.move(210, 200); tmp.append(self.ke_tf7_cbox2); self.ke_tf7_cbox3 = QCheckBox("Testing", self) self.ke_tf7_cbox3.setChecked(False) self.ke_tf7_cbox3.move(310, 200); tmp.append(self.ke_tf7_cbox3); self.transform_ui_keras.append(tmp); self.select_transform(); self.tb1 = QTextEdit(self) self.tb1.move(550, 20) self.tb1.resize(300, 500) wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); self.b4 = QPushButton('Add Transform', self) self.b4.move(400,400) self.b4.clicked.connect(self.add_transform) self.b5 = QPushButton('Remove last transform', self) self.b5.move(370,450) self.b5.clicked.connect(self.remove_transform) self.b6 = QPushButton('Clear transforms', self) self.b6.move(400,500) self.b6.clicked.connect(self.clear_transform) def select_transform(self): self.current_transform = {}; self.current_transform["name"] = ""; self.current_transform["params"] = {}; if(self.system["backend"] == "Mxnet-1.5.1"): self.current_transform["name"] = self.cb1.currentText(); index = self.mxnet_transforms_list.index(self.cb1.currentText()); for i in range(len(self.transform_ui_mxnet)): for j in range(len(self.transform_ui_mxnet[i])): if((index-1)==i): self.transform_ui_mxnet[i][j].show(); else: self.transform_ui_mxnet[i][j].hide(); for i in range(len(self.transform_ui_keras)): for j in range(len(self.transform_ui_keras[i])): self.transform_ui_keras[i][j].hide(); for i in range(len(self.transform_ui_pytorch)): for j in range(len(self.transform_ui_pytorch[i])): self.transform_ui_pytorch[i][j].hide(); elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"): self.current_transform["name"] = self.cb2.currentText(); index = self.keras_transforms_list.index(self.cb2.currentText()); for i in range(len(self.transform_ui_keras)): for j in range(len(self.transform_ui_keras[i])): if((index-1)==i): self.transform_ui_keras[i][j].show(); else: self.transform_ui_keras[i][j].hide(); for i in range(len(self.transform_ui_mxnet)): for j in range(len(self.transform_ui_mxnet[i])): self.transform_ui_mxnet[i][j].hide(); for i in range(len(self.transform_ui_pytorch)): for j in range(len(self.transform_ui_pytorch[i])): self.transform_ui_pytorch[i][j].hide(); elif(self.system["backend"] == "Pytorch-1.3.1"): self.current_transform["name"] = self.cb3.currentText(); index = self.pytorch_transforms_list.index(self.cb3.currentText()); for i in range(len(self.transform_ui_pytorch)): for j in range(len(self.transform_ui_pytorch[i])): if((index-1)==i): self.transform_ui_pytorch[i][j].show(); else: self.transform_ui_pytorch[i][j].hide(); for i in range(len(self.transform_ui_keras)): for j in range(len(self.transform_ui_keras[i])): self.transform_ui_keras[i][j].hide(); for i in range(len(self.transform_ui_mxnet)): for j in range(len(self.transform_ui_mxnet[i])): self.transform_ui_mxnet[i][j].hide(); def add_transform(self): self.system["update"]["transforms"]["active"] = True; if(self.system["backend"] == "Mxnet-1.5.1"): if(self.current_transform["name"] == self.mxnet_transforms_list[1]): self.current_transform["params"]["input_size"] = self.mx_tf1_e1.text(); self.current_transform["params"]["scale"] = [self.mx_tf1_e2_1.text(), self.mx_tf1_e2_2.text()]; self.current_transform["params"]["ratio"] = [self.mx_tf1_e3_1.text(), self.mx_tf1_e3_2.text()]; if(self.mx_tf1_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf1_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf1_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[2]): self.current_transform["params"]["input_size"] = self.mx_tf2_e1.text(); if(self.mx_tf2_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf2_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf2_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[3]): self.current_transform["params"]["brightness"] = self.mx_tf3_e1.text(); self.current_transform["params"]["contrast"] = self.mx_tf3_e2.text(); self.current_transform["params"]["saturation"] = self.mx_tf3_e3.text(); self.current_transform["params"]["hue"] = self.mx_tf3_e4.text(); if(self.mx_tf3_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf3_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf3_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[4]): self.current_transform["params"]["probability"] = self.mx_tf4_e1.text(); if(self.mx_tf4_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf4_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf4_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[5]): self.current_transform["params"]["probability"] = self.mx_tf5_e1.text(); if(self.mx_tf5_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf5_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf5_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[6]): self.current_transform["params"]["alpha"] = self.mx_tf6_e1.text(); if(self.mx_tf6_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf6_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf6_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[7]): self.current_transform["params"]["input_size"] = self.mx_tf7_e1.text(); if(self.mx_tf7_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf7_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf7_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[8]): self.current_transform["params"]["mean"] = [self.mx_tf8_e1_1.text(), self.mx_tf8_e1_2.text(), self.mx_tf8_e1_3.text()]; self.current_transform["params"]["std"] = [self.mx_tf8_e2_1.text(), self.mx_tf8_e2_2.text(), self.mx_tf8_e2_3.text()]; if(self.mx_tf8_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf8_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf8_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"): if(self.current_transform["name"] == self.keras_transforms_list[1]): self.current_transform["params"]["brightness"] = self.ke_tf1_e1.text(); self.current_transform["params"]["contrast"] = self.ke_tf1_e2.text(); self.current_transform["params"]["saturation"] = self.ke_tf1_e3.text(); self.current_transform["params"]["hue"] = self.ke_tf1_e4.text(); if(self.ke_tf1_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf1_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf1_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[2]): self.current_transform["params"]["input_size"] = self.ke_tf2_e1.text(); self.current_transform["params"]["scale"] = [self.ke_tf2_e2_1.text(), self.ke_tf2_e2_2.text()]; self.current_transform["params"]["ratio"] = [self.ke_tf2_e3_1.text(), self.ke_tf2_e3_2.text()]; if(self.ke_tf2_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf2_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf2_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[3]): self.current_transform["params"]["probability"] = self.ke_tf3_e1.text(); if(self.ke_tf3_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf3_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf3_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[4]): self.current_transform["params"]["probability"] = self.ke_tf4_e1.text(); if(self.ke_tf4_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf4_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf4_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[5]): self.current_transform["params"]["degrees"] = self.ke_tf5_e1.text(); if(self.ke_tf5_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf5_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf5_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[6]): self.current_transform["params"]["mean"] = [self.ke_tf6_e1_1.text(), self.ke_tf6_e1_2.text(), self.ke_tf6_e1_3.text()]; if(self.ke_tf6_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf6_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf6_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[7]): self.current_transform["params"]["mean"] = [self.ke_tf7_e1_1.text(), self.ke_tf7_e1_2.text(), self.ke_tf7_e1_3.text()]; self.current_transform["params"]["std"] = [self.ke_tf7_e2_1.text(), self.ke_tf7_e2_2.text(), self.ke_tf7_e2_3.text()]; if(self.ke_tf7_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf7_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf7_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.system["backend"] == "Pytorch-1.3.1"): if(self.current_transform["name"] == self.pytorch_transforms_list[1]): self.current_transform["params"]["input_size"] = self.py_tf1_e1.text(); if(self.py_tf1_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf1_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf1_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[2]): self.current_transform["params"]["brightness"] = self.py_tf2_e1.text(); self.current_transform["params"]["contrast"] = self.py_tf2_e2.text(); self.current_transform["params"]["saturation"] = self.py_tf2_e3.text(); self.current_transform["params"]["hue"] = self.py_tf2_e4.text(); if(self.py_tf2_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf2_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf2_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[3]): self.current_transform["params"]["degrees"] = self.py_tf3_e1.text(); self.current_transform["params"]["translate"] = [self.py_tf3_e2_1.text(), self.py_tf3_e2_2.text()]; self.current_transform["params"]["scale"] = [self.py_tf3_e3_1.text(), self.py_tf3_e3_2.text()]; self.current_transform["params"]["sheer"] = [self.py_tf3_e4_1.text(), self.py_tf3_e4_2.text()]; if(self.py_tf3_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf3_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf3_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[4]): self.current_transform["params"]["input_size"] = self.py_tf4_e1.text(); if(self.py_tf4_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf4_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf4_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[5]): self.current_transform["params"]["probability"] = self.py_tf5_e1.text(); if(self.py_tf5_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf5_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf5_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[6]): self.current_transform["params"]["distortion_scale"] = self.py_tf6_e1.text(); self.current_transform["params"]["probability"] = self.py_tf6_e2.text(); if(self.py_tf6_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf6_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf6_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[7]): self.current_transform["params"]["input_size"] = self.py_tf7_e1.text(); self.current_transform["params"]["scale"] = [self.py_tf7_e2_1.text(), self.py_tf7_e2_2.text()]; self.current_transform["params"]["ratio"] = [self.py_tf7_e3_1.text(), self.py_tf7_e3_2.text()]; if(self.py_tf7_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf7_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf7_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[8]): self.current_transform["params"]["degrees"] = self.py_tf8_e1.text(); if(self.py_tf8_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf8_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf8_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[9]): self.current_transform["params"]["probability"] = self.py_tf9_e1.text(); if(self.py_tf9_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf9_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf9_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[10]): self.current_transform["params"]["input_size"] = self.py_tf10_e1.text(); if(self.py_tf10_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf10_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf10_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[11]): self.current_transform["params"]["mean"] = [self.py_tf11_e1_1.text(), self.py_tf11_e1_2.text(), self.py_tf11_e1_3.text()]; self.current_transform["params"]["std"] = [self.py_tf11_e2_1.text(), self.py_tf11_e2_2.text(), self.py_tf11_e2_3.text()]; if(self.py_tf11_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf11_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf11_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); def remove_transform(self): if(len(self.system["update"]["transforms"]["value"]) > 0): del self.system["update"]["transforms"]["value"][-1] else: self.system["update"]["transforms"]["active"] = False; wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); if(len(self.system["update"]["transforms"]["value"]) == 0): self.system["update"]["transforms"]["active"] = False; def clear_transform(self): self.system["update"]["transforms"]["value"] = []; self.system["update"]["transforms"]["active"] = False; wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); def forward(self): with open('base_classification.json', 'w') as outfile: json.dump(self.system, outfile) self.forward_model_param.emit(); def backward(self): with open('base_classification.json', 'w') as outfile: json.dump(self.system, outfile) self.backward_data_param.emit(); ''' app = QApplication(sys.argv) screen = WindowClassificationTrainUpdateTransformParam() screen.show() sys.exit(app.exec_()) '''
classification/training/update/WindowClassificationTrainUpdateTransformParam.py
import os import sys import json from PyQt5 import QtCore, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtGui import * class WindowClassificationTrainUpdateTransformParam(QtWidgets.QWidget): forward_model_param = QtCore.pyqtSignal(); backward_data_param = QtCore.pyqtSignal(); def __init__(self): super().__init__() self.cfg_setup() self.title = 'Experiment {} - Update Transform Params'.format(self.system["experiment"]) self.left = 10 self.top = 10 self.width = 900 self.height = 600 self.transform_ui_mxnet = []; self.transform_ui_keras = []; self.transform_ui_pytorch = []; self.current_transform = {}; self.current_transform["name"] = ""; self.current_transform["params"] = {}; self.initUI() def cfg_setup(self): with open('base_classification.json') as json_file: self.system = json.load(json_file) def initUI(self): self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height); # Backward self.b1 = QPushButton('Back', self) self.b1.move(600,550) self.b1.clicked.connect(self.backward) # Backward self.b2 = QPushButton('Next', self) self.b2.move(700,550) self.b2.clicked.connect(self.forward) # Quit self.b3 = QPushButton('Quit', self) self.b3.move(800,550) self.b3.clicked.connect(self.close) self.cb1 = QComboBox(self); self.cb1.move(20, 20); self.cb1.activated.connect(self.select_transform); self.cb2 = QComboBox(self); self.cb2.move(20, 20); self.cb2.activated.connect(self.select_transform); self.cb3 = QComboBox(self); self.cb3.move(20, 20); self.cb3.activated.connect(self.select_transform); self.mxnet_transforms_list = ["select", "apply_random_resized_crop", "apply_center_crop", "apply_color_jitter", "apply_random_horizontal_flip", "apply_random_vertical_flip", "apply_random_lighting", "apply_resize", "apply_normalize"]; self.keras_transforms_list = ["select", "apply_color_jitter", "apply_random_affine", "apply_random_horizontal_flip", "apply_random_vertical_flip", "apply_random_rotation", "apply_mean_subtraction", "apply_normalize"]; self.pytorch_transforms_list = ["select", "apply_center_crop", "apply_color_jitter", "apply_random_affine", "apply_random_crop", "apply_random_horizontal_flip", "apply_random_perspective", "apply_random_resized_crop", "apply_random_rotation", "apply_random_vertical_flip", "apply_resize", "apply_normalize"]; if(self.system["backend"] == "Mxnet-1.5.1"): self.cb1.addItems(self.mxnet_transforms_list); self.cb1.show(); self.cb2.hide(); self.cb3.hide(); elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"): self.cb2.addItems(self.keras_transforms_list); self.cb2.show(); self.cb1.hide(); self.cb3.hide(); elif(self.system["backend"] == "Pytorch-1.3.1"): self.cb3.addItems(self.pytorch_transforms_list); self.cb3.show(); self.cb1.hide(); self.cb2.hide(); tmp = []; self.mx_tf1_l1 = QLabel(self); self.mx_tf1_l1.setText("1. Crop Size:"); self.mx_tf1_l1.move(20, 100); tmp.append(self.mx_tf1_l1); self.mx_tf1_e1 = QLineEdit(self) self.mx_tf1_e1.move(150, 100); self.mx_tf1_e1.setText("224"); tmp.append(self.mx_tf1_e1); self.mx_tf1_l2 = QLabel(self); self.mx_tf1_l2.setText("2. Scale limits"); self.mx_tf1_l2.move(20, 150); tmp.append(self.mx_tf1_l2); self.mx_tf1_e2_1 = QLineEdit(self) self.mx_tf1_e2_1.move(150, 150); self.mx_tf1_e2_1.setText("0.08"); tmp.append(self.mx_tf1_e2_1); self.mx_tf1_e2_2 = QLineEdit(self) self.mx_tf1_e2_2.move(300, 150); self.mx_tf1_e2_2.setText("1.0"); tmp.append(self.mx_tf1_e2_2); self.mx_tf1_l3 = QLabel(self); self.mx_tf1_l3.setText("3. Aspect ratio limits"); self.mx_tf1_l3.move(20, 200); tmp.append(self.mx_tf1_l3); self.mx_tf1_e3_1 = QLineEdit(self) self.mx_tf1_e3_1.move(180, 200); self.mx_tf1_e3_1.setText("0.75"); tmp.append(self.mx_tf1_e3_1); self.mx_tf1_e3_2 = QLineEdit(self) self.mx_tf1_e3_2.move(330, 200); self.mx_tf1_e3_2.setText("1.33"); tmp.append(self.mx_tf1_e3_2); self.mx_tf1_l4 = QLabel(self); self.mx_tf1_l4.setText("4. Apply at"); self.mx_tf1_l4.move(20, 250); tmp.append(self.mx_tf1_l4); self.mx_tf1_cbox1 = QCheckBox("Training", self) self.mx_tf1_cbox1.setChecked(True) self.mx_tf1_cbox1.move(110, 250); tmp.append(self.mx_tf1_cbox1); self.mx_tf1_cbox2 = QCheckBox("Validation", self) self.mx_tf1_cbox2.setChecked(True) self.mx_tf1_cbox2.move(210, 250); tmp.append(self.mx_tf1_cbox2); self.mx_tf1_cbox3 = QCheckBox("Testing", self) self.mx_tf1_cbox3.setChecked(False) self.mx_tf1_cbox3.move(310, 250); tmp.append(self.mx_tf1_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf2_l1 = QLabel(self); self.mx_tf2_l1.setText("1. Crop Size:"); self.mx_tf2_l1.move(20, 100); tmp.append(self.mx_tf2_l1); self.mx_tf2_e1 = QLineEdit(self) self.mx_tf2_e1.move(150, 100); self.mx_tf2_e1.setText("224"); tmp.append(self.mx_tf2_e1); self.mx_tf2_l2 = QLabel(self); self.mx_tf2_l2.setText("2. Apply at"); self.mx_tf2_l2.move(20, 150); tmp.append(self.mx_tf2_l2); self.mx_tf2_cbox1 = QCheckBox("Training", self) self.mx_tf2_cbox1.setChecked(True) self.mx_tf2_cbox1.move(110, 150); tmp.append(self.mx_tf2_cbox1); self.mx_tf2_cbox2 = QCheckBox("Validation", self) self.mx_tf2_cbox2.setChecked(True) self.mx_tf2_cbox2.move(210, 150); tmp.append(self.mx_tf2_cbox2); self.mx_tf2_cbox3 = QCheckBox("Testing", self) self.mx_tf2_cbox3.setChecked(False) self.mx_tf2_cbox3.move(310, 150); tmp.append(self.mx_tf2_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf3_l1 = QLabel(self); self.mx_tf3_l1.setText("1. Brightness (0-1):"); self.mx_tf3_l1.move(20, 100); tmp.append(self.mx_tf3_l1); self.mx_tf3_e1 = QLineEdit(self) self.mx_tf3_e1.move(150, 100); self.mx_tf3_e1.setText("0.0"); tmp.append(self.mx_tf3_e1); self.mx_tf3_l2 = QLabel(self); self.mx_tf3_l2.setText("2. Contrast (0-1):"); self.mx_tf3_l2.move(20, 150); tmp.append(self.mx_tf3_l2); self.mx_tf3_e2 = QLineEdit(self) self.mx_tf3_e2.move(150, 150); self.mx_tf3_e2.setText("0.0"); tmp.append(self.mx_tf3_e2); self.mx_tf3_l3 = QLabel(self); self.mx_tf3_l3.setText("3. Saturation (0-1):"); self.mx_tf3_l3.move(20, 200); tmp.append(self.mx_tf3_l3); self.mx_tf3_e3 = QLineEdit(self) self.mx_tf3_e3.move(150, 200); self.mx_tf3_e3.setText("0.0"); tmp.append(self.mx_tf3_e3); self.mx_tf3_l4 = QLabel(self); self.mx_tf3_l4.setText("4. Hue (0-1):"); self.mx_tf3_l4.move(20, 250); tmp.append(self.mx_tf3_l4); self.mx_tf3_e4 = QLineEdit(self) self.mx_tf3_e4.move(150, 250); self.mx_tf3_e4.setText("0.0"); tmp.append(self.mx_tf3_e4); self.mx_tf3_l5 = QLabel(self); self.mx_tf3_l5.setText("5. Apply at"); self.mx_tf3_l5.move(20, 300); tmp.append(self.mx_tf3_l5); self.mx_tf3_cbox1 = QCheckBox("Training", self) self.mx_tf3_cbox1.setChecked(True) self.mx_tf3_cbox1.move(110, 300); tmp.append(self.mx_tf3_cbox1); self.mx_tf3_cbox2 = QCheckBox("Validation", self) self.mx_tf3_cbox2.setChecked(True) self.mx_tf3_cbox2.move(210, 300); tmp.append(self.mx_tf3_cbox2); self.mx_tf3_cbox3 = QCheckBox("Testing", self) self.mx_tf3_cbox3.setChecked(False) self.mx_tf3_cbox3.move(310, 300); tmp.append(self.mx_tf3_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf4_l1 = QLabel(self); self.mx_tf4_l1.setText("1. Flip probability (0-1):"); self.mx_tf4_l1.move(20, 100); tmp.append(self.mx_tf4_l1); self.mx_tf4_e1 = QLineEdit(self) self.mx_tf4_e1.move(180, 100); self.mx_tf4_e1.setText("0.5"); tmp.append(self.mx_tf4_e1); self.mx_tf4_l2 = QLabel(self); self.mx_tf4_l2.setText("2. Apply at"); self.mx_tf4_l2.move(20, 150); tmp.append(self.mx_tf4_l2); self.mx_tf4_cbox1 = QCheckBox("Training", self) self.mx_tf4_cbox1.setChecked(True) self.mx_tf4_cbox1.move(110, 150); tmp.append(self.mx_tf4_cbox1); self.mx_tf4_cbox2 = QCheckBox("Validation", self) self.mx_tf4_cbox2.setChecked(True) self.mx_tf4_cbox2.move(210, 150); tmp.append(self.mx_tf4_cbox2); self.mx_tf4_cbox3 = QCheckBox("Testing", self) self.mx_tf4_cbox3.setChecked(False) self.mx_tf4_cbox3.move(310, 150); tmp.append(self.mx_tf4_cbox3); self.transform_ui_mxnet.append(tmp) tmp = []; self.mx_tf5_l1 = QLabel(self); self.mx_tf5_l1.setText("1. Flip probability (0-1):"); self.mx_tf5_l1.move(20, 100); tmp.append(self.mx_tf5_l1); self.mx_tf5_e1 = QLineEdit(self) self.mx_tf5_e1.move(180, 100); self.mx_tf5_e1.setText("0.5"); tmp.append(self.mx_tf5_e1); self.mx_tf5_l2 = QLabel(self); self.mx_tf5_l2.setText("2. Apply at"); self.mx_tf5_l2.move(20, 150); tmp.append(self.mx_tf5_l2); self.mx_tf5_cbox1 = QCheckBox("Training", self) self.mx_tf5_cbox1.setChecked(True) self.mx_tf5_cbox1.move(110, 150); tmp.append(self.mx_tf5_cbox1); self.mx_tf5_cbox2 = QCheckBox("Validation", self) self.mx_tf5_cbox2.setChecked(True) self.mx_tf5_cbox2.move(210, 150); tmp.append(self.mx_tf5_cbox2); self.mx_tf5_cbox3 = QCheckBox("Testing", self) self.mx_tf5_cbox3.setChecked(False) self.mx_tf5_cbox3.move(310, 150); tmp.append(self.mx_tf5_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.mx_tf6_l1 = QLabel(self); self.mx_tf6_l1.setText("1. Alpha:"); self.mx_tf6_l1.move(20, 100); tmp.append(self.mx_tf6_l1); self.mx_tf6_e1 = QLineEdit(self) self.mx_tf6_e1.move(120, 100); self.mx_tf6_e1.setText("1.0"); tmp.append(self.mx_tf6_e1); self.mx_tf6_l2 = QLabel(self); self.mx_tf6_l2.setText("2. Apply at"); self.mx_tf6_l2.move(20, 150); tmp.append(self.mx_tf6_l2); self.mx_tf6_cbox1 = QCheckBox("Training", self) self.mx_tf6_cbox1.setChecked(True) self.mx_tf6_cbox1.move(110, 150); tmp.append(self.mx_tf6_cbox1); self.mx_tf6_cbox2 = QCheckBox("Validation", self) self.mx_tf6_cbox2.setChecked(True) self.mx_tf6_cbox2.move(210, 150); tmp.append(self.mx_tf6_cbox2); self.mx_tf6_cbox3 = QCheckBox("Testing", self) self.mx_tf6_cbox3.setChecked(False) self.mx_tf6_cbox3.move(310, 150); tmp.append(self.mx_tf6_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.mx_tf7_l1 = QLabel(self); self.mx_tf7_l1.setText("1. New size:"); self.mx_tf7_l1.move(20, 100); tmp.append(self.mx_tf7_l1); self.mx_tf7_e1 = QLineEdit(self) self.mx_tf7_e1.move(120, 100); self.mx_tf7_e1.setText("224"); tmp.append(self.mx_tf7_e1); self.mx_tf7_l2 = QLabel(self); self.mx_tf7_l2.setText("2. Apply at"); self.mx_tf7_l2.move(20, 150); tmp.append(self.mx_tf7_l2); self.mx_tf7_cbox1 = QCheckBox("Training", self) self.mx_tf7_cbox1.setChecked(True) self.mx_tf7_cbox1.move(110, 150); tmp.append(self.mx_tf7_cbox1); self.mx_tf7_cbox2 = QCheckBox("Validation", self) self.mx_tf7_cbox2.setChecked(True) self.mx_tf7_cbox2.move(210, 150); tmp.append(self.mx_tf7_cbox2); self.mx_tf7_cbox3 = QCheckBox("Testing", self) self.mx_tf7_cbox3.setChecked(False) self.mx_tf7_cbox3.move(310, 150); tmp.append(self.mx_tf7_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.mx_tf8_l1 = QLabel(self); self.mx_tf8_l1.setText("1. Mean:"); self.mx_tf8_l1.move(20, 100); tmp.append(self.mx_tf8_l1); self.mx_tf8_e1_1 = QLineEdit(self) self.mx_tf8_e1_1.move(120, 100); self.mx_tf8_e1_1.setText("0.485"); self.mx_tf8_e1_1.resize(70, 25); tmp.append(self.mx_tf8_e1_1); self.mx_tf8_e1_2 = QLineEdit(self) self.mx_tf8_e1_2.move(220, 100); self.mx_tf8_e1_2.setText("0.456"); self.mx_tf8_e1_2.resize(70, 25); tmp.append(self.mx_tf8_e1_2); self.mx_tf8_e1_3 = QLineEdit(self) self.mx_tf8_e1_3.move(320, 100); self.mx_tf8_e1_3.setText("0.406"); self.mx_tf8_e1_3.resize(70, 25); tmp.append(self.mx_tf8_e1_3); self.mx_tf8_l2 = QLabel(self); self.mx_tf8_l2.setText("2. Standard deviation:"); self.mx_tf8_l2.move(20, 150); tmp.append(self.mx_tf8_l2); self.mx_tf8_e2_1 = QLineEdit(self) self.mx_tf8_e2_1.move(180, 150); self.mx_tf8_e2_1.setText("0.229"); self.mx_tf8_e2_1.resize(70, 25); tmp.append(self.mx_tf8_e2_1); self.mx_tf8_e2_2 = QLineEdit(self) self.mx_tf8_e2_2.move(280, 150); self.mx_tf8_e2_2.setText("0.224"); self.mx_tf8_e2_2.resize(70, 25); tmp.append(self.mx_tf8_e2_2); self.mx_tf8_e2_3 = QLineEdit(self) self.mx_tf8_e2_3.move(380, 150); self.mx_tf8_e2_3.setText("0.225"); self.mx_tf8_e2_3.resize(70, 25); tmp.append(self.mx_tf8_e2_3); self.mx_tf8_l3 = QLabel(self); self.mx_tf8_l3.setText("3. Apply at"); self.mx_tf8_l3.move(20, 200); tmp.append(self.mx_tf8_l3); self.mx_tf8_cbox1 = QCheckBox("Training", self) self.mx_tf8_cbox1.setChecked(True) self.mx_tf8_cbox1.move(110, 200); tmp.append(self.mx_tf8_cbox1); self.mx_tf8_cbox2 = QCheckBox("Validation", self) self.mx_tf8_cbox2.setChecked(True) self.mx_tf8_cbox2.move(210, 200); tmp.append(self.mx_tf8_cbox2); self.mx_tf8_cbox3 = QCheckBox("Testing", self) self.mx_tf8_cbox3.setChecked(False) self.mx_tf8_cbox3.move(310, 200); tmp.append(self.mx_tf8_cbox3); self.transform_ui_mxnet.append(tmp); tmp = []; self.py_tf1_l1 = QLabel(self); self.py_tf1_l1.setText("1. Crop Size:"); self.py_tf1_l1.move(20, 100); tmp.append(self.py_tf1_l1); self.py_tf1_e1 = QLineEdit(self) self.py_tf1_e1.move(150, 100); self.py_tf1_e1.setText("224"); tmp.append(self.py_tf1_e1); self.py_tf1_l2 = QLabel(self); self.py_tf1_l2.setText("2. Apply at"); self.py_tf1_l2.move(20, 150); tmp.append(self.py_tf1_l2); self.py_tf1_cbox1 = QCheckBox("Training", self) self.py_tf1_cbox1.setChecked(True) self.py_tf1_cbox1.move(110, 150); tmp.append(self.py_tf1_cbox1); self.py_tf1_cbox2 = QCheckBox("Validation", self) self.py_tf1_cbox2.setChecked(True) self.py_tf1_cbox2.move(210, 150); tmp.append(self.py_tf1_cbox2); self.py_tf1_cbox3 = QCheckBox("Testing", self) self.py_tf1_cbox3.setChecked(False) self.py_tf1_cbox3.move(310, 150); tmp.append(self.py_tf1_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf2_l1 = QLabel(self); self.py_tf2_l1.setText("1. Brightness (0-1):"); self.py_tf2_l1.move(20, 100); tmp.append(self.py_tf2_l1); self.py_tf2_e1 = QLineEdit(self) self.py_tf2_e1.move(150, 100); self.py_tf2_e1.setText("0.0"); tmp.append(self.py_tf2_e1); self.py_tf2_l2 = QLabel(self); self.py_tf2_l2.setText("2. Contrast (0-1):"); self.py_tf2_l2.move(20, 150); tmp.append(self.py_tf2_l2); self.py_tf2_e2 = QLineEdit(self) self.py_tf2_e2.move(150, 150); self.py_tf2_e2.setText("0.0"); tmp.append(self.py_tf2_e2); self.py_tf2_l3 = QLabel(self); self.py_tf2_l3.setText("3. Saturation (0-1):"); self.py_tf2_l3.move(20, 200); tmp.append(self.py_tf2_l3); self.py_tf2_e3 = QLineEdit(self) self.py_tf2_e3.move(150, 200); self.py_tf2_e3.setText("0.0"); tmp.append(self.py_tf2_e3); self.py_tf2_l4 = QLabel(self); self.py_tf2_l4.setText("4. Hue (0-1):"); self.py_tf2_l4.move(20, 250); tmp.append(self.py_tf2_l4); self.py_tf2_e4 = QLineEdit(self) self.py_tf2_e4.move(150, 250); self.py_tf2_e4.setText("0.0"); tmp.append(self.py_tf2_e4); self.py_tf2_l5 = QLabel(self); self.py_tf2_l5.setText("5. Apply at"); self.py_tf2_l5.move(20, 300); tmp.append(self.py_tf2_l5); self.py_tf2_cbox1 = QCheckBox("Training", self) self.py_tf2_cbox1.setChecked(True) self.py_tf2_cbox1.move(110, 300); tmp.append(self.py_tf2_cbox1); self.py_tf2_cbox2 = QCheckBox("Validation", self) self.py_tf2_cbox2.setChecked(True) self.py_tf2_cbox2.move(210, 300); tmp.append(self.py_tf2_cbox2); self.py_tf2_cbox3 = QCheckBox("Testing", self) self.py_tf2_cbox3.setChecked(False) self.py_tf2_cbox3.move(310, 300); tmp.append(self.py_tf2_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf3_l1 = QLabel(self); self.py_tf3_l1.setText("1. Rotation (angle):"); self.py_tf3_l1.move(20, 100); tmp.append(self.py_tf3_l1); self.py_tf3_e1 = QLineEdit(self) self.py_tf3_e1.move(150, 100); self.py_tf3_e1.setText("0.0"); self.py_tf3_e1.resize(50, 25); tmp.append(self.py_tf3_e1); self.py_tf3_l2 = QLabel(self); self.py_tf3_l2.setText("2. Translation (ratio):"); self.py_tf3_l2.move(20, 150); tmp.append(self.py_tf3_l2); self.py_tf3_e2_1 = QLineEdit(self) self.py_tf3_e2_1.move(200, 150); self.py_tf3_e2_1.setText("None"); self.py_tf3_e2_1.resize(50, 25); tmp.append(self.py_tf3_e2_1); self.py_tf3_e2_2 = QLineEdit(self) self.py_tf3_e2_2.move(300, 150); self.py_tf3_e2_2.setText("None"); self.py_tf3_e2_2.resize(50, 25); tmp.append(self.py_tf3_e2_2); self.py_tf3_l3 = QLabel(self); self.py_tf3_l3.setText("3. Scale (ratio):"); self.py_tf3_l3.move(20, 200); tmp.append(self.py_tf3_l3); self.py_tf3_e3_1 = QLineEdit(self) self.py_tf3_e3_1.move(200, 200); self.py_tf3_e3_1.setText("None"); self.py_tf3_e3_1.resize(50, 25); tmp.append(self.py_tf3_e3_1); self.py_tf3_e3_2 = QLineEdit(self) self.py_tf3_e3_2.move(300, 200); self.py_tf3_e3_2.setText("None"); self.py_tf3_e3_2.resize(50, 25); tmp.append(self.py_tf3_e3_2); self.py_tf3_l4 = QLabel(self); self.py_tf3_l4.setText("4. Sheer (ratio):"); self.py_tf3_l4.move(20, 250); tmp.append(self.py_tf3_l4); self.py_tf3_e4_1 = QLineEdit(self) self.py_tf3_e4_1.move(200, 250); self.py_tf3_e4_1.setText("None"); self.py_tf3_e4_1.resize(50, 25); tmp.append(self.py_tf3_e4_1); self.py_tf3_e4_2 = QLineEdit(self) self.py_tf3_e4_2.move(300, 250); self.py_tf3_e4_2.setText("None"); self.py_tf3_e4_2.resize(50, 25); tmp.append(self.py_tf3_e4_2); self.py_tf3_l5 = QLabel(self); self.py_tf3_l5.setText("5. Apply at"); self.py_tf3_l5.move(20, 300); tmp.append(self.py_tf3_l5); self.py_tf3_cbox1 = QCheckBox("Training", self) self.py_tf3_cbox1.setChecked(True) self.py_tf3_cbox1.move(110, 300); tmp.append(self.py_tf3_cbox1); self.py_tf3_cbox2 = QCheckBox("Validation", self) self.py_tf3_cbox2.setChecked(True) self.py_tf3_cbox2.move(210, 300); tmp.append(self.py_tf3_cbox2); self.py_tf3_cbox3 = QCheckBox("Testing", self) self.py_tf3_cbox3.setChecked(False) self.py_tf3_cbox3.move(310, 300); tmp.append(self.py_tf3_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf4_l1 = QLabel(self); self.py_tf4_l1.setText("1. Crop Size:"); self.py_tf4_l1.move(20, 100); tmp.append(self.py_tf4_l1); self.py_tf4_e1 = QLineEdit(self) self.py_tf4_e1.move(150, 100); self.py_tf4_e1.setText("224"); tmp.append(self.py_tf4_e1); self.py_tf4_l2 = QLabel(self); self.py_tf4_l2.setText("2. Apply at"); self.py_tf4_l2.move(20, 150); tmp.append(self.py_tf4_l2); self.py_tf4_cbox1 = QCheckBox("Training", self) self.py_tf4_cbox1.setChecked(True) self.py_tf4_cbox1.move(110, 150); tmp.append(self.py_tf4_cbox1); self.py_tf4_cbox2 = QCheckBox("Validation", self) self.py_tf4_cbox2.setChecked(True) self.py_tf4_cbox2.move(210, 150); tmp.append(self.py_tf4_cbox2); self.py_tf4_cbox3 = QCheckBox("Testing", self) self.py_tf4_cbox3.setChecked(False) self.py_tf4_cbox3.move(310, 150); tmp.append(self.py_tf4_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf5_l1 = QLabel(self); self.py_tf5_l1.setText("1. Probability (0-1):"); self.py_tf5_l1.move(20, 100); tmp.append(self.py_tf5_l1); self.py_tf5_e1 = QLineEdit(self) self.py_tf5_e1.move(200, 100); self.py_tf5_e1.setText("0.5"); tmp.append(self.py_tf5_e1); self.py_tf5_l2 = QLabel(self); self.py_tf5_l2.setText("2. Apply at"); self.py_tf5_l2.move(20, 150); tmp.append(self.py_tf5_l2); self.py_tf5_cbox1 = QCheckBox("Training", self) self.py_tf5_cbox1.setChecked(True) self.py_tf5_cbox1.move(110, 150); tmp.append(self.py_tf5_cbox1); self.py_tf5_cbox2 = QCheckBox("Validation", self) self.py_tf5_cbox2.setChecked(True) self.py_tf5_cbox2.move(210, 150); tmp.append(self.py_tf5_cbox2); self.py_tf5_cbox3 = QCheckBox("Testing", self) self.py_tf5_cbox3.setChecked(False) self.py_tf5_cbox3.move(310, 150); tmp.append(self.py_tf5_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf6_l1 = QLabel(self); self.py_tf6_l1.setText("1. Distrotion (0-1):"); self.py_tf6_l1.move(20, 100); tmp.append(self.py_tf6_l1); self.py_tf6_e1 = QLineEdit(self) self.py_tf6_e1.move(200, 100); self.py_tf6_e1.setText("0.5"); tmp.append(self.py_tf6_e1); self.py_tf6_l2 = QLabel(self); self.py_tf6_l2.setText("2. Probability (0-1):"); self.py_tf6_l2.move(20, 150); tmp.append(self.py_tf6_l2); self.py_tf6_e2 = QLineEdit(self) self.py_tf6_e2.move(200, 150); self.py_tf6_e2.setText("0.5"); tmp.append(self.py_tf6_e2); self.py_tf6_l3 = QLabel(self); self.py_tf6_l3.setText("3. Apply at"); self.py_tf6_l3.move(20, 200); tmp.append(self.py_tf6_l3); self.py_tf6_cbox1 = QCheckBox("Training", self) self.py_tf6_cbox1.setChecked(True) self.py_tf6_cbox1.move(110, 200); tmp.append(self.py_tf6_cbox1); self.py_tf6_cbox2 = QCheckBox("Validation", self) self.py_tf6_cbox2.setChecked(True) self.py_tf6_cbox2.move(210, 200); tmp.append(self.py_tf6_cbox2); self.py_tf6_cbox3 = QCheckBox("Testing", self) self.py_tf6_cbox3.setChecked(False) self.py_tf6_cbox3.move(310, 200); tmp.append(self.py_tf6_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf7_l1 = QLabel(self); self.py_tf7_l1.setText("1. Crop Size:"); self.py_tf7_l1.move(20, 100); tmp.append(self.py_tf7_l1); self.py_tf7_e1 = QLineEdit(self) self.py_tf7_e1.move(150, 100); self.py_tf7_e1.setText("224"); tmp.append(self.py_tf7_e1); self.py_tf7_l2 = QLabel(self); self.py_tf7_l2.setText("2. Scale:"); self.py_tf7_l2.move(20, 150); tmp.append(self.py_tf7_l2); self.py_tf7_e2_1 = QLineEdit(self) self.py_tf7_e2_1.move(120, 150); self.py_tf7_e2_1.setText("0.08"); self.py_tf7_e2_1.resize(50, 25); tmp.append(self.py_tf7_e2_1); self.py_tf7_e2_2 = QLineEdit(self) self.py_tf7_e2_2.move(220, 150); self.py_tf7_e2_2.setText("1.0"); self.py_tf7_e2_2.resize(50, 25); tmp.append(self.py_tf7_e2_2); self.py_tf7_l3 = QLabel(self); self.py_tf7_l3.setText("3. Ratio:"); self.py_tf7_l3.move(20, 200); tmp.append(self.py_tf7_l3); self.py_tf7_e3_1 = QLineEdit(self) self.py_tf7_e3_1.move(120, 200); self.py_tf7_e3_1.setText("0.75"); self.py_tf7_e3_1.resize(50, 25); tmp.append(self.py_tf7_e3_1); self.py_tf7_e3_2 = QLineEdit(self) self.py_tf7_e3_2.move(220, 200); self.py_tf7_e3_2.setText("1.33"); self.py_tf7_e3_2.resize(50, 25); tmp.append(self.py_tf7_e3_2); self.py_tf7_l4 = QLabel(self); self.py_tf7_l4.setText("4. Apply at"); self.py_tf7_l4.move(20, 300); tmp.append(self.py_tf7_l4); self.py_tf7_cbox1 = QCheckBox("Training", self) self.py_tf7_cbox1.setChecked(True) self.py_tf7_cbox1.move(110, 300); tmp.append(self.py_tf7_cbox1); self.py_tf7_cbox2 = QCheckBox("Validation", self) self.py_tf7_cbox2.setChecked(True) self.py_tf7_cbox2.move(210, 300); tmp.append(self.py_tf7_cbox2); self.py_tf7_cbox3 = QCheckBox("Testing", self) self.py_tf7_cbox3.setChecked(False) self.py_tf7_cbox3.move(310, 300); tmp.append(self.py_tf7_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf8_l1 = QLabel(self); self.py_tf8_l1.setText("1. Degrees:"); self.py_tf8_l1.move(20, 100); tmp.append(self.py_tf8_l1); self.py_tf8_e1 = QLineEdit(self) self.py_tf8_e1.move(200, 100); self.py_tf8_e1.setText("10"); tmp.append(self.py_tf8_e1); self.py_tf8_l2 = QLabel(self); self.py_tf8_l2.setText("2. Apply at"); self.py_tf8_l2.move(20, 200); tmp.append(self.py_tf8_l2); self.py_tf8_cbox1 = QCheckBox("Training", self) self.py_tf8_cbox1.setChecked(True) self.py_tf8_cbox1.move(110, 200); tmp.append(self.py_tf8_cbox1); self.py_tf8_cbox2 = QCheckBox("Validation", self) self.py_tf8_cbox2.setChecked(True) self.py_tf8_cbox2.move(210, 200); tmp.append(self.py_tf8_cbox2); self.py_tf8_cbox3 = QCheckBox("Testing", self) self.py_tf8_cbox3.setChecked(False) self.py_tf8_cbox3.move(310, 200); tmp.append(self.py_tf8_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf9_l1 = QLabel(self); self.py_tf9_l1.setText("1. Probability (0-1):"); self.py_tf9_l1.move(20, 100); tmp.append(self.py_tf9_l1); self.py_tf9_e1 = QLineEdit(self) self.py_tf9_e1.move(200, 100); self.py_tf9_e1.setText("0.5"); tmp.append(self.py_tf9_e1); self.py_tf9_l2 = QLabel(self); self.py_tf9_l2.setText("2. Apply at"); self.py_tf9_l2.move(20, 200); tmp.append(self.py_tf9_l2); self.py_tf9_cbox1 = QCheckBox("Training", self) self.py_tf9_cbox1.setChecked(True) self.py_tf9_cbox1.move(110, 200); tmp.append(self.py_tf9_cbox1); self.py_tf9_cbox2 = QCheckBox("Validation", self) self.py_tf9_cbox2.setChecked(True) self.py_tf9_cbox2.move(210, 200); tmp.append(self.py_tf9_cbox2); self.py_tf9_cbox3 = QCheckBox("Testing", self) self.py_tf9_cbox3.setChecked(False) self.py_tf9_cbox3.move(310, 200); tmp.append(self.py_tf9_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf10_l1 = QLabel(self); self.py_tf10_l1.setText("1. New size:"); self.py_tf10_l1.move(20, 100); tmp.append(self.py_tf10_l1); self.py_tf10_e1 = QLineEdit(self) self.py_tf10_e1.move(200, 100); self.py_tf10_e1.setText("224"); tmp.append(self.py_tf10_e1); self.py_tf10_l2 = QLabel(self); self.py_tf10_l2.setText("2. Apply at"); self.py_tf10_l2.move(20, 200); tmp.append(self.py_tf10_l2); self.py_tf10_cbox1 = QCheckBox("Training", self) self.py_tf10_cbox1.setChecked(True) self.py_tf10_cbox1.move(110, 200); tmp.append(self.py_tf10_cbox1); self.py_tf10_cbox2 = QCheckBox("Validation", self) self.py_tf10_cbox2.setChecked(True) self.py_tf10_cbox2.move(210, 200); tmp.append(self.py_tf10_cbox2); self.py_tf10_cbox3 = QCheckBox("Testing", self) self.py_tf10_cbox3.setChecked(False) self.py_tf10_cbox3.move(310, 200); tmp.append(self.py_tf10_cbox3); self.transform_ui_pytorch.append(tmp) tmp = []; self.py_tf11_l1 = QLabel(self); self.py_tf11_l1.setText("1. Mean:"); self.py_tf11_l1.move(20, 100); tmp.append(self.py_tf11_l1); self.py_tf11_e1_1 = QLineEdit(self) self.py_tf11_e1_1.move(120, 100); self.py_tf11_e1_1.setText("0.485"); self.py_tf11_e1_1.resize(70, 25); tmp.append(self.py_tf11_e1_1); self.py_tf11_e1_2 = QLineEdit(self) self.py_tf11_e1_2.move(220, 100); self.py_tf11_e1_2.setText("0.456"); self.py_tf11_e1_2.resize(70, 25); tmp.append(self.py_tf11_e1_2); self.py_tf11_e1_3 = QLineEdit(self) self.py_tf11_e1_3.move(320, 100); self.py_tf11_e1_3.setText("0.406"); self.py_tf11_e1_3.resize(70, 25); tmp.append(self.py_tf11_e1_3); self.py_tf11_l2 = QLabel(self); self.py_tf11_l2.setText("2. Standard deviation:"); self.py_tf11_l2.move(20, 150); tmp.append(self.py_tf11_l2); self.py_tf11_e2_1 = QLineEdit(self) self.py_tf11_e2_1.move(180, 150); self.py_tf11_e2_1.setText("0.229"); self.py_tf11_e2_1.resize(70, 25); tmp.append(self.py_tf11_e2_1); self.py_tf11_e2_2 = QLineEdit(self) self.py_tf11_e2_2.move(280, 150); self.py_tf11_e2_2.setText("0.224"); self.py_tf11_e2_2.resize(70, 25); tmp.append(self.py_tf11_e2_2); self.py_tf11_e2_3 = QLineEdit(self) self.py_tf11_e2_3.move(380, 150); self.py_tf11_e2_3.setText("0.225"); self.py_tf11_e2_3.resize(70, 25); tmp.append(self.py_tf11_e2_3); self.py_tf11_l3 = QLabel(self); self.py_tf11_l3.setText("3. Apply at"); self.py_tf11_l3.move(20, 200); tmp.append(self.py_tf11_l3); self.py_tf11_cbox1 = QCheckBox("Training", self) self.py_tf11_cbox1.setChecked(True) self.py_tf11_cbox1.move(110, 200); tmp.append(self.py_tf11_cbox1); self.py_tf11_cbox2 = QCheckBox("Validation", self) self.py_tf11_cbox2.setChecked(True) self.py_tf11_cbox2.move(210, 200); tmp.append(self.py_tf11_cbox2); self.py_tf11_cbox3 = QCheckBox("Testing", self) self.py_tf11_cbox3.setChecked(False) self.py_tf11_cbox3.move(310, 200); tmp.append(self.py_tf11_cbox3); self.transform_ui_pytorch.append(tmp); tmp = []; self.ke_tf1_l1 = QLabel(self); self.ke_tf1_l1.setText("1. Brightness (0-1):"); self.ke_tf1_l1.move(20, 100); tmp.append(self.ke_tf1_l1); self.ke_tf1_e1 = QLineEdit(self) self.ke_tf1_e1.move(150, 100); self.ke_tf1_e1.setText("0.0"); tmp.append(self.ke_tf1_e1); self.ke_tf1_l2 = QLabel(self); self.ke_tf1_l2.setText("2. Contrast (0-1):"); self.ke_tf1_l2.move(20, 150); tmp.append(self.ke_tf1_l2); self.ke_tf1_e2 = QLineEdit(self) self.ke_tf1_e2.move(150, 150); self.ke_tf1_e2.setText("0.0"); tmp.append(self.ke_tf1_e2); self.ke_tf1_l3 = QLabel(self); self.ke_tf1_l3.setText("3. Saturation (0-1):"); self.ke_tf1_l3.move(20, 200); tmp.append(self.ke_tf1_l3); self.ke_tf1_e3 = QLineEdit(self) self.ke_tf1_e3.move(150, 200); self.ke_tf1_e3.setText("0.0"); tmp.append(self.ke_tf1_e3); self.ke_tf1_l4 = QLabel(self); self.ke_tf1_l4.setText("4. Hue (0-1):"); self.ke_tf1_l4.move(20, 250); tmp.append(self.ke_tf1_l4); self.ke_tf1_e4 = QLineEdit(self) self.ke_tf1_e4.move(150, 250); self.ke_tf1_e4.setText("0.0"); tmp.append(self.ke_tf1_e4); self.ke_tf1_l5 = QLabel(self); self.ke_tf1_l5.setText("5. Apply at"); self.ke_tf1_l5.move(20, 300); tmp.append(self.ke_tf1_l5); self.ke_tf1_cbox1 = QCheckBox("Training", self) self.ke_tf1_cbox1.setChecked(True) self.ke_tf1_cbox1.move(110, 300); tmp.append(self.ke_tf1_cbox1); self.ke_tf1_cbox2 = QCheckBox("Validation", self) self.ke_tf1_cbox2.setChecked(True) self.ke_tf1_cbox2.move(210, 300); tmp.append(self.ke_tf1_cbox2); self.ke_tf1_cbox3 = QCheckBox("Testing", self) self.ke_tf1_cbox3.setChecked(False) self.ke_tf1_cbox3.move(310, 300); tmp.append(self.ke_tf1_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf2_l1 = QLabel(self); self.ke_tf2_l1.setText("1. Crop Size:"); self.ke_tf2_l1.move(20, 100); tmp.append(self.ke_tf2_l1); self.ke_tf2_e1 = QLineEdit(self) self.ke_tf2_e1.move(150, 100); self.ke_tf2_e1.setText("224"); tmp.append(self.ke_tf2_e1); self.ke_tf2_l2 = QLabel(self); self.ke_tf2_l2.setText("2. Scale:"); self.ke_tf2_l2.move(20, 150); tmp.append(self.ke_tf2_l2); self.ke_tf2_e2_1 = QLineEdit(self) self.ke_tf2_e2_1.move(120, 150); self.ke_tf2_e2_1.setText("0.08"); self.ke_tf2_e2_1.resize(50, 25); tmp.append(self.ke_tf2_e2_1); self.ke_tf2_e2_2 = QLineEdit(self) self.ke_tf2_e2_2.move(220, 150); self.ke_tf2_e2_2.setText("1.0"); self.ke_tf2_e2_2.resize(50, 25); tmp.append(self.ke_tf2_e2_2); self.ke_tf2_l3 = QLabel(self); self.ke_tf2_l3.setText("3. Ratio:"); self.ke_tf2_l3.move(20, 200); tmp.append(self.ke_tf2_l3); self.ke_tf2_e3_1 = QLineEdit(self) self.ke_tf2_e3_1.move(120, 200); self.ke_tf2_e3_1.setText("0.75"); self.ke_tf2_e3_1.resize(50, 25); tmp.append(self.ke_tf2_e3_1); self.ke_tf2_e3_2 = QLineEdit(self) self.ke_tf2_e3_2.move(220, 200); self.ke_tf2_e3_2.setText("1.33"); self.ke_tf2_e3_2.resize(50, 25); tmp.append(self.ke_tf2_e3_2); self.ke_tf2_l4 = QLabel(self); self.ke_tf2_l4.setText("4. Apply at"); self.ke_tf2_l4.move(20, 300); tmp.append(self.ke_tf2_l4); self.ke_tf2_cbox1 = QCheckBox("Training", self) self.ke_tf2_cbox1.setChecked(True) self.ke_tf2_cbox1.move(110, 300); tmp.append(self.ke_tf2_cbox1); self.ke_tf2_cbox2 = QCheckBox("Validation", self) self.ke_tf2_cbox2.setChecked(True) self.ke_tf2_cbox2.move(210, 300); tmp.append(self.ke_tf2_cbox2); self.ke_tf2_cbox3 = QCheckBox("Testing", self) self.ke_tf2_cbox3.setChecked(False) self.ke_tf2_cbox3.move(310, 300); tmp.append(self.ke_tf2_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf3_l1 = QLabel(self); self.ke_tf3_l1.setText("1. Probability (0-1):"); self.ke_tf3_l1.move(20, 100); tmp.append(self.ke_tf3_l1); self.ke_tf3_e1 = QLineEdit(self) self.ke_tf3_e1.move(150, 100); self.ke_tf3_e1.setText("0.5"); tmp.append(self.ke_tf3_e1); self.ke_tf3_l2 = QLabel(self); self.ke_tf3_l2.setText("2. Apply at"); self.ke_tf3_l2.move(20, 150); tmp.append(self.ke_tf3_l2); self.ke_tf3_cbox1 = QCheckBox("Training", self) self.ke_tf3_cbox1.setChecked(True) self.ke_tf3_cbox1.move(110, 150); tmp.append(self.ke_tf3_cbox1); self.ke_tf3_cbox2 = QCheckBox("Validation", self) self.ke_tf3_cbox2.setChecked(True) self.ke_tf3_cbox2.move(210, 150); tmp.append(self.ke_tf3_cbox2); self.ke_tf3_cbox3 = QCheckBox("Testing", self) self.ke_tf3_cbox3.setChecked(False) self.ke_tf3_cbox3.move(310, 150); tmp.append(self.ke_tf3_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf4_l1 = QLabel(self); self.ke_tf4_l1.setText("1. Probability (0-1):"); self.ke_tf4_l1.move(20, 100); tmp.append(self.ke_tf4_l1); self.ke_tf4_e1 = QLineEdit(self) self.ke_tf4_e1.move(150, 100); self.ke_tf4_e1.setText("0.5"); tmp.append(self.ke_tf4_e1); self.ke_tf4_l2 = QLabel(self); self.ke_tf4_l2.setText("2. Apply at"); self.ke_tf4_l2.move(20, 150); tmp.append(self.ke_tf4_l2); self.ke_tf4_cbox1 = QCheckBox("Training", self) self.ke_tf4_cbox1.setChecked(True) self.ke_tf4_cbox1.move(110, 150); tmp.append(self.ke_tf4_cbox1); self.ke_tf4_cbox2 = QCheckBox("Validation", self) self.ke_tf4_cbox2.setChecked(True) self.ke_tf4_cbox2.move(210, 150); tmp.append(self.ke_tf4_cbox2); self.ke_tf4_cbox3 = QCheckBox("Testing", self) self.ke_tf4_cbox3.setChecked(False) self.ke_tf4_cbox3.move(310, 150); tmp.append(self.ke_tf4_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf5_l1 = QLabel(self); self.ke_tf5_l1.setText("1. Degrees:"); self.ke_tf5_l1.move(20, 100); tmp.append(self.ke_tf5_l1); self.ke_tf5_e1 = QLineEdit(self) self.ke_tf5_e1.move(150, 100); self.ke_tf5_e1.setText("0.5"); tmp.append(self.ke_tf5_e1); self.ke_tf5_l2 = QLabel(self); self.ke_tf5_l2.setText("2. Apply at"); self.ke_tf5_l2.move(20, 150); tmp.append(self.ke_tf5_l2); self.ke_tf5_cbox1 = QCheckBox("Training", self) self.ke_tf5_cbox1.setChecked(True) self.ke_tf5_cbox1.move(110, 150); tmp.append(self.ke_tf5_cbox1); self.ke_tf5_cbox2 = QCheckBox("Validation", self) self.ke_tf5_cbox2.setChecked(True) self.ke_tf5_cbox2.move(210, 150); tmp.append(self.ke_tf5_cbox2); self.ke_tf5_cbox3 = QCheckBox("Testing", self) self.ke_tf5_cbox3.setChecked(False) self.ke_tf5_cbox3.move(310, 150); tmp.append(self.ke_tf5_cbox3); self.transform_ui_keras.append(tmp) tmp = []; self.ke_tf6_l1 = QLabel(self); self.ke_tf6_l1.setText("1. Mean:"); self.ke_tf6_l1.move(20, 100); tmp.append(self.ke_tf6_l1); self.ke_tf6_e1_1 = QLineEdit(self) self.ke_tf6_e1_1.move(120, 100); self.ke_tf6_e1_1.setText("0.485"); self.ke_tf6_e1_1.resize(70, 25); tmp.append(self.ke_tf6_e1_1); self.ke_tf6_e1_2 = QLineEdit(self) self.ke_tf6_e1_2.move(220, 100); self.ke_tf6_e1_2.setText("0.456"); self.ke_tf6_e1_2.resize(70, 25); tmp.append(self.ke_tf6_e1_2); self.ke_tf6_e1_3 = QLineEdit(self) self.ke_tf6_e1_3.move(320, 100); self.ke_tf6_e1_3.setText("0.406"); self.ke_tf6_e1_3.resize(70, 25); tmp.append(self.ke_tf6_e1_3); self.ke_tf6_l2 = QLabel(self); self.ke_tf6_l2.setText("2. Apply at"); self.ke_tf6_l2.move(20, 150); tmp.append(self.ke_tf6_l2); self.ke_tf6_cbox1 = QCheckBox("Training", self) self.ke_tf6_cbox1.setChecked(True) self.ke_tf6_cbox1.move(110, 150); tmp.append(self.ke_tf6_cbox1); self.ke_tf6_cbox2 = QCheckBox("Validation", self) self.ke_tf6_cbox2.setChecked(True) self.ke_tf6_cbox2.move(210, 150); tmp.append(self.ke_tf6_cbox2); self.ke_tf6_cbox3 = QCheckBox("Testing", self) self.ke_tf6_cbox3.setChecked(False) self.ke_tf6_cbox3.move(310, 150); tmp.append(self.ke_tf6_cbox3); self.transform_ui_keras.append(tmp); tmp = []; self.ke_tf7_l1 = QLabel(self); self.ke_tf7_l1.setText("1. Mean:"); self.ke_tf7_l1.move(20, 100); tmp.append(self.ke_tf7_l1); self.ke_tf7_e1_1 = QLineEdit(self) self.ke_tf7_e1_1.move(120, 100); self.ke_tf7_e1_1.setText("0.485"); self.ke_tf7_e1_1.resize(70, 25); tmp.append(self.ke_tf7_e1_1); self.ke_tf7_e1_2 = QLineEdit(self) self.ke_tf7_e1_2.move(220, 100); self.ke_tf7_e1_2.setText("0.456"); self.ke_tf7_e1_2.resize(70, 25); tmp.append(self.ke_tf7_e1_2); self.ke_tf7_e1_3 = QLineEdit(self) self.ke_tf7_e1_3.move(320, 100); self.ke_tf7_e1_3.setText("0.406"); self.ke_tf7_e1_3.resize(70, 25); tmp.append(self.ke_tf7_e1_3); self.ke_tf7_l2 = QLabel(self); self.ke_tf7_l2.setText("2. Standard deviation:"); self.ke_tf7_l2.move(20, 150); tmp.append(self.ke_tf7_l2); self.ke_tf7_e2_1 = QLineEdit(self) self.ke_tf7_e2_1.move(180, 150); self.ke_tf7_e2_1.setText("0.229"); self.ke_tf7_e2_1.resize(70, 25); tmp.append(self.ke_tf7_e2_1); self.ke_tf7_e2_2 = QLineEdit(self) self.ke_tf7_e2_2.move(280, 150); self.ke_tf7_e2_2.setText("0.224"); self.ke_tf7_e2_2.resize(70, 25); tmp.append(self.ke_tf7_e2_2); self.ke_tf7_e2_3 = QLineEdit(self) self.ke_tf7_e2_3.move(380, 150); self.ke_tf7_e2_3.setText("0.225"); self.ke_tf7_e2_3.resize(70, 25); tmp.append(self.ke_tf7_e2_3); self.ke_tf7_l2 = QLabel(self); self.ke_tf7_l2.setText("3. Apply at"); self.ke_tf7_l2.move(20, 200); tmp.append(self.ke_tf7_l2); self.ke_tf7_cbox1 = QCheckBox("Training", self) self.ke_tf7_cbox1.setChecked(True) self.ke_tf7_cbox1.move(110, 200); tmp.append(self.ke_tf7_cbox1); self.ke_tf7_cbox2 = QCheckBox("Validation", self) self.ke_tf7_cbox2.setChecked(True) self.ke_tf7_cbox2.move(210, 200); tmp.append(self.ke_tf7_cbox2); self.ke_tf7_cbox3 = QCheckBox("Testing", self) self.ke_tf7_cbox3.setChecked(False) self.ke_tf7_cbox3.move(310, 200); tmp.append(self.ke_tf7_cbox3); self.transform_ui_keras.append(tmp); self.select_transform(); self.tb1 = QTextEdit(self) self.tb1.move(550, 20) self.tb1.resize(300, 500) wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); self.b4 = QPushButton('Add Transform', self) self.b4.move(400,400) self.b4.clicked.connect(self.add_transform) self.b5 = QPushButton('Remove last transform', self) self.b5.move(370,450) self.b5.clicked.connect(self.remove_transform) self.b6 = QPushButton('Clear transforms', self) self.b6.move(400,500) self.b6.clicked.connect(self.clear_transform) def select_transform(self): self.current_transform = {}; self.current_transform["name"] = ""; self.current_transform["params"] = {}; if(self.system["backend"] == "Mxnet-1.5.1"): self.current_transform["name"] = self.cb1.currentText(); index = self.mxnet_transforms_list.index(self.cb1.currentText()); for i in range(len(self.transform_ui_mxnet)): for j in range(len(self.transform_ui_mxnet[i])): if((index-1)==i): self.transform_ui_mxnet[i][j].show(); else: self.transform_ui_mxnet[i][j].hide(); for i in range(len(self.transform_ui_keras)): for j in range(len(self.transform_ui_keras[i])): self.transform_ui_keras[i][j].hide(); for i in range(len(self.transform_ui_pytorch)): for j in range(len(self.transform_ui_pytorch[i])): self.transform_ui_pytorch[i][j].hide(); elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"): self.current_transform["name"] = self.cb2.currentText(); index = self.keras_transforms_list.index(self.cb2.currentText()); for i in range(len(self.transform_ui_keras)): for j in range(len(self.transform_ui_keras[i])): if((index-1)==i): self.transform_ui_keras[i][j].show(); else: self.transform_ui_keras[i][j].hide(); for i in range(len(self.transform_ui_mxnet)): for j in range(len(self.transform_ui_mxnet[i])): self.transform_ui_mxnet[i][j].hide(); for i in range(len(self.transform_ui_pytorch)): for j in range(len(self.transform_ui_pytorch[i])): self.transform_ui_pytorch[i][j].hide(); elif(self.system["backend"] == "Pytorch-1.3.1"): self.current_transform["name"] = self.cb3.currentText(); index = self.pytorch_transforms_list.index(self.cb3.currentText()); for i in range(len(self.transform_ui_pytorch)): for j in range(len(self.transform_ui_pytorch[i])): if((index-1)==i): self.transform_ui_pytorch[i][j].show(); else: self.transform_ui_pytorch[i][j].hide(); for i in range(len(self.transform_ui_keras)): for j in range(len(self.transform_ui_keras[i])): self.transform_ui_keras[i][j].hide(); for i in range(len(self.transform_ui_mxnet)): for j in range(len(self.transform_ui_mxnet[i])): self.transform_ui_mxnet[i][j].hide(); def add_transform(self): self.system["update"]["transforms"]["active"] = True; if(self.system["backend"] == "Mxnet-1.5.1"): if(self.current_transform["name"] == self.mxnet_transforms_list[1]): self.current_transform["params"]["input_size"] = self.mx_tf1_e1.text(); self.current_transform["params"]["scale"] = [self.mx_tf1_e2_1.text(), self.mx_tf1_e2_2.text()]; self.current_transform["params"]["ratio"] = [self.mx_tf1_e3_1.text(), self.mx_tf1_e3_2.text()]; if(self.mx_tf1_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf1_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf1_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[2]): self.current_transform["params"]["input_size"] = self.mx_tf2_e1.text(); if(self.mx_tf2_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf2_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf2_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[3]): self.current_transform["params"]["brightness"] = self.mx_tf3_e1.text(); self.current_transform["params"]["contrast"] = self.mx_tf3_e2.text(); self.current_transform["params"]["saturation"] = self.mx_tf3_e3.text(); self.current_transform["params"]["hue"] = self.mx_tf3_e4.text(); if(self.mx_tf3_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf3_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf3_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[4]): self.current_transform["params"]["probability"] = self.mx_tf4_e1.text(); if(self.mx_tf4_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf4_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf4_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[5]): self.current_transform["params"]["probability"] = self.mx_tf5_e1.text(); if(self.mx_tf5_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf5_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf5_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[6]): self.current_transform["params"]["alpha"] = self.mx_tf6_e1.text(); if(self.mx_tf6_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf6_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf6_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[7]): self.current_transform["params"]["input_size"] = self.mx_tf7_e1.text(); if(self.mx_tf7_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf7_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf7_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.mxnet_transforms_list[8]): self.current_transform["params"]["mean"] = [self.mx_tf8_e1_1.text(), self.mx_tf8_e1_2.text(), self.mx_tf8_e1_3.text()]; self.current_transform["params"]["std"] = [self.mx_tf8_e2_1.text(), self.mx_tf8_e2_2.text(), self.mx_tf8_e2_3.text()]; if(self.mx_tf8_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.mx_tf8_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.mx_tf8_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"): if(self.current_transform["name"] == self.keras_transforms_list[1]): self.current_transform["params"]["brightness"] = self.ke_tf1_e1.text(); self.current_transform["params"]["contrast"] = self.ke_tf1_e2.text(); self.current_transform["params"]["saturation"] = self.ke_tf1_e3.text(); self.current_transform["params"]["hue"] = self.ke_tf1_e4.text(); if(self.ke_tf1_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf1_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf1_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[2]): self.current_transform["params"]["input_size"] = self.ke_tf2_e1.text(); self.current_transform["params"]["scale"] = [self.ke_tf2_e2_1.text(), self.ke_tf2_e2_2.text()]; self.current_transform["params"]["ratio"] = [self.ke_tf2_e3_1.text(), self.ke_tf2_e3_2.text()]; if(self.ke_tf2_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf2_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf2_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[3]): self.current_transform["params"]["probability"] = self.ke_tf3_e1.text(); if(self.ke_tf3_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf3_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf3_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[4]): self.current_transform["params"]["probability"] = self.ke_tf4_e1.text(); if(self.ke_tf4_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf4_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf4_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[5]): self.current_transform["params"]["degrees"] = self.ke_tf5_e1.text(); if(self.ke_tf5_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf5_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf5_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[6]): self.current_transform["params"]["mean"] = [self.ke_tf6_e1_1.text(), self.ke_tf6_e1_2.text(), self.ke_tf6_e1_3.text()]; if(self.ke_tf6_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf6_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf6_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.keras_transforms_list[7]): self.current_transform["params"]["mean"] = [self.ke_tf7_e1_1.text(), self.ke_tf7_e1_2.text(), self.ke_tf7_e1_3.text()]; self.current_transform["params"]["std"] = [self.ke_tf7_e2_1.text(), self.ke_tf7_e2_2.text(), self.ke_tf7_e2_3.text()]; if(self.ke_tf7_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.ke_tf7_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.ke_tf7_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.system["backend"] == "Pytorch-1.3.1"): if(self.current_transform["name"] == self.pytorch_transforms_list[1]): self.current_transform["params"]["input_size"] = self.py_tf1_e1.text(); if(self.py_tf1_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf1_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf1_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[2]): self.current_transform["params"]["brightness"] = self.py_tf2_e1.text(); self.current_transform["params"]["contrast"] = self.py_tf2_e2.text(); self.current_transform["params"]["saturation"] = self.py_tf2_e3.text(); self.current_transform["params"]["hue"] = self.py_tf2_e4.text(); if(self.py_tf2_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf2_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf2_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[3]): self.current_transform["params"]["degrees"] = self.py_tf3_e1.text(); self.current_transform["params"]["translate"] = [self.py_tf3_e2_1.text(), self.py_tf3_e2_2.text()]; self.current_transform["params"]["scale"] = [self.py_tf3_e3_1.text(), self.py_tf3_e3_2.text()]; self.current_transform["params"]["sheer"] = [self.py_tf3_e4_1.text(), self.py_tf3_e4_2.text()]; if(self.py_tf3_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf3_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf3_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[4]): self.current_transform["params"]["input_size"] = self.py_tf4_e1.text(); if(self.py_tf4_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf4_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf4_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[5]): self.current_transform["params"]["probability"] = self.py_tf5_e1.text(); if(self.py_tf5_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf5_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf5_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[6]): self.current_transform["params"]["distortion_scale"] = self.py_tf6_e1.text(); self.current_transform["params"]["probability"] = self.py_tf6_e2.text(); if(self.py_tf6_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf6_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf6_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[7]): self.current_transform["params"]["input_size"] = self.py_tf7_e1.text(); self.current_transform["params"]["scale"] = [self.py_tf7_e2_1.text(), self.py_tf7_e2_2.text()]; self.current_transform["params"]["ratio"] = [self.py_tf7_e3_1.text(), self.py_tf7_e3_2.text()]; if(self.py_tf7_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf7_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf7_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[8]): self.current_transform["params"]["degrees"] = self.py_tf8_e1.text(); if(self.py_tf8_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf8_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf8_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[9]): self.current_transform["params"]["probability"] = self.py_tf9_e1.text(); if(self.py_tf9_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf9_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf9_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[10]): self.current_transform["params"]["input_size"] = self.py_tf10_e1.text(); if(self.py_tf10_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf10_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf10_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); elif(self.current_transform["name"] == self.pytorch_transforms_list[11]): self.current_transform["params"]["mean"] = [self.py_tf11_e1_1.text(), self.py_tf11_e1_2.text(), self.py_tf11_e1_3.text()]; self.current_transform["params"]["std"] = [self.py_tf11_e2_1.text(), self.py_tf11_e2_2.text(), self.py_tf11_e2_3.text()]; if(self.py_tf11_cbox1.isChecked()): self.current_transform["params"]["train"] = "True"; else: self.current_transform["params"]["train"] = "False"; if(self.py_tf11_cbox2.isChecked()): self.current_transform["params"]["val"] = "True"; else: self.current_transform["params"]["val"] = "False"; if(self.py_tf11_cbox3.isChecked()): self.current_transform["params"]["test"] = "True"; else: self.current_transform["params"]["test"] = "False"; self.system["update"]["transforms"]["value"].append(self.current_transform); wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); def remove_transform(self): if(len(self.system["update"]["transforms"]["value"]) > 0): del self.system["update"]["transforms"]["value"][-1] else: self.system["update"]["transforms"]["active"] = False; wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); if(len(self.system["update"]["transforms"]["value"]) == 0): self.system["update"]["transforms"]["active"] = False; def clear_transform(self): self.system["update"]["transforms"]["value"] = []; self.system["update"]["transforms"]["active"] = False; wr = ""; for i in range(len(self.system["update"]["transforms"]["value"])): tmp = json.dumps(self.system["update"]["transforms"]["value"][i], indent=4) wr += "{}\n".format(tmp); self.tb1.setText(wr); def forward(self): with open('base_classification.json', 'w') as outfile: json.dump(self.system, outfile) self.forward_model_param.emit(); def backward(self): with open('base_classification.json', 'w') as outfile: json.dump(self.system, outfile) self.backward_data_param.emit(); ''' app = QApplication(sys.argv) screen = WindowClassificationTrainUpdateTransformParam() screen.show() sys.exit(app.exec_()) '''
0.107965
0.095856
import asyncio import discord import inspect import traceback from typing import Any, Dict, List, Union from inspect import getfullargspec from discord.ext import commands slash_cmd_option_types = { str: 3, int: 4, bool: 5, discord.Member: 6, discord.TextChannel: 7, discord.CategoryChannel: 7, discord.VoiceChannel: 7, discord.Thread: 7, discord.StageChannel: 7, discord.Role: 8, # TODO: 9 is mentionable, add that somehow float: 10, } slash_cmd_option_converters = { 3: str, 4: int, 5: bool, 6: commands.MemberConverter().convert, 7: commands.GuildChannelConverter().convert, 8: commands.RoleConverter().convert, # TODO: 9 is mentionable converters????!?!??!?!??! SLASH COMMANDS ARE CANCER 10: float, } class InteractionContext(discord.Interaction): def __init__(self, interaction: discord.Interaction, bot: commands.AutoShardedBot): super().__init__(data=interaction._raw_data, state=interaction._state) self.bot = bot self.command: Union[SlashCommand, ContextMenuCommand] = self.bot.app_cmds[interaction.data.get('name')] self.target: Union[discord.Message, discord.Member, discord.User] = None class SlashCommandChoice: def __init__(self, name: str, value: Union[str, int, float]): self.name = name self.value = value def to_dict(self) -> dict: return { "name": self.name, "value": self.value } def __repr__(self) -> str: return f"SlashCommandChoice(name={self.name} value={self.value})" class SlashCommandOption: def __init__( self, name: str, type: int, description: str, required: bool = True, choices: List[SlashCommandChoice] = None ): choices = choices or [] self.name = name self.type = type self.description = description self.required = required self.choices = choices def to_dict(self) -> dict: final = { "name": self.name, "type": self.type, "description": self.description, "required": self.required } if self.choices: final.update({"choices": [choice.to_dict() for choice in self.choices]}) return final def __repr__(self) -> str: return f"SlashCommandOption(name='{self.name}' type={self.type} description='{self.description}' required={self.required} choices={self.choices})" class SlashCommand: def __init__(self, func, **kwargs) -> None: self.callback = func self.name = kwargs.get('name', func.__name__) self.desc = kwargs.get('help') self.guild_ids = kwargs.get('guild_ids', []) self._is_global = True if self.guild_ids == [] else False _spec = getfullargspec(func) _raw_args = _spec.annotations _defaults = _spec.defaults or [] _cog = False if 'self' in _spec.args: _cog = True self._cog = _cog for _key in _raw_args: # im removing the 'ctx' arg from the func del _raw_args[_key] break _raw_options = self._parse_raw_args(_raw_args, _defaults) self.options = kwargs.get('options') or self._parse_options(_raw_options) def __repr__(self) -> str: return f"SlashCommand(name={self.name} callback={self.callback} desc={self.desc} guild_ids={self.guild_ids} options={self.options})" def __str__(self) -> str: return self.name def _parse_options(self, options: List[Union[dict, SlashCommandOption]]) -> List[SlashCommandOption]: final = [] for option in options: if not isinstance(option, SlashCommandOption): if option.get('type', str) not in slash_cmd_option_types: raise TypeError(f'Unknown option type {option.get("type")}') if 'name' not in option: raise ValueError('Missing option name') if 'choices' in option: choices = [SlashCommandChoice(c['name'], c['value']) for c in option['choices']] else: choices = [] final.append(SlashCommandOption( name=option['name'], type=slash_cmd_option_types[option.get('type', str)], description=option.get('help', f"Please enter a {option['name']}"), required=option.get('required', True), choices=choices )) else: final.append(option) if len(final) > 25: raise TypeError('Max 25 options allowed.') return final def _parse_raw_args(self, raw_args: Dict[str, Any], defaults: tuple) -> List[Union[dict, SlashCommandOption]]: final = [] i = 0 if len(defaults) > 0: args_copy = list(raw_args)[-(len(defaults)):] else: args_copy = [] for arg, type_ in raw_args.items(): if isinstance(type_, SlashCommandOption): final.append(raw_args[arg]) else: final.append({ 'name': arg, 'type': type_, 'required': False if arg in args_copy else True, }) i += 1 return final class ContextMenuCommand: def __init__(self, func, **kwargs) -> None: self.callback = func self.name: str = kwargs.get('name', func.__name__) self.guild_ids: List[int] = kwargs.get('guild_ids', []) self._is_global: bool = True if self.guild_ids == [] else False command_type = kwargs.get('type') if command_type is None: raise TypeError('Missing command type') self.type: int = command_type _spec = getfullargspec(func) _cog = False if 'self' in _spec.args: _cog = True self._cog = _cog def __repr__(self) -> str: return f"ContextMenuCommand(name='{self.name}' callback={self.callback} type={self.type})" def __str__(self) -> str: return self.name app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = {} def slash_command(**kwargs): def decorator(func): slash_cmd = SlashCommand(func, **kwargs) app_commands[slash_cmd.name] = slash_cmd return func return decorator def user_command(**kwargs): def decorator(func): user_cmd = ContextMenuCommand(func, type=2, **kwargs) app_commands[user_cmd.name] = user_cmd return func return decorator def message_command(**kwargs): def decorator(func): message_cmd = ContextMenuCommand(func, type=3, **kwargs) app_commands[message_cmd.name] = message_cmd return func return decorator def get_option(name: str, options: List[SlashCommandOption]): for option in options: if option.name == name: return option raise ValueError(f'Option {name} not found') class FakeCog: def __init__(self, bot: commands.AutoShardedBot): self.bot = bot self.client = bot async def app_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data inter_type = data.get('type') if inter_type is None: return if int(inter_type) == 1: return await slash_command_handler(interaction, bot) elif int(inter_type) == 2: return await user_command_handler(interaction, bot) elif int(inter_type) == 3: return await message_command_handler(interaction, bot) else: pass async def user_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data all_app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = bot.app_cmds command_name = data.get('name') if command_name not in all_app_commands: return command = all_app_commands[command_name] if not isinstance(command, ContextMenuCommand): return if not command._is_global: if interaction.guild_id not in command.guild_ids: return ctx = InteractionContext(interaction, bot) member_data = data.get("resolved", {}).get("members", {}).get(data.get("target_id")) user_data = data.get("resolved", {}).get("users", {}).get(data.get("target_id")) if member_data is not None: member_data.update({"user": user_data}) member = discord.Member(data=member_data, guild=interaction.guild, state=ctx._state) else: member = discord.User(data=user_data, state=ctx._state) ctx.target = member try: bot.dispatch('app_command', ctx) if command._cog: await command.callback(FakeCog(bot), ctx) else: await command.callback(ctx) bot.dispatch('app_command_completion', ctx) except Exception as e: bot.dispatch('app_command_error', ctx, e) async def message_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data all_app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = bot.app_cmds command_name = data.get('name') if command_name not in all_app_commands: return command = all_app_commands[command_name] if not isinstance(command, ContextMenuCommand): return if not command._is_global: if interaction.guild_id not in command.guild_ids: return ctx = InteractionContext(interaction, bot) message_data = data.get("resolved", {}).get("messages", {}).get(data.get("target_id")) message = discord.Message(data=message_data, channel=interaction.channel, state=ctx._state) ctx.target = message try: bot.dispatch('app_command', ctx) if command._cog: await command.callback(FakeCog(bot), ctx) else: await command.callback(ctx) bot.dispatch('app_command_completion', ctx) except Exception as e: bot.dispatch('app_command_error', ctx, e) async def slash_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data # checking if the slash cmd is in the slash cmds dict all_app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = bot.app_cmds if data.get('name') not in all_app_commands: return slash_cmd = all_app_commands[data.get('name')] if not isinstance(slash_cmd, SlashCommand): return if not slash_cmd._is_global: if interaction.guild_id not in slash_cmd.guild_ids: return kwargs = {} ctx = InteractionContext(interaction, bot) for option in data.get('options', []): _opt = get_option(option['name'], slash_cmd.options) if _opt.type not in slash_cmd_option_converters: raise TypeError(f'Not known option type {_opt.type}') converter = slash_cmd_option_converters[_opt.type] kwargs.update({_opt.name: await converter(ctx, option['value']) if inspect.iscoroutinefunction(converter) else converter(option['value'])}) try: bot.dispatch('app_command', ctx) if slash_cmd._cog: await slash_cmd.callback(FakeCog(bot), ctx, **kwargs) else: await slash_cmd.callback(ctx, **kwargs) bot.dispatch('app_command_completion', ctx) except Exception as e: bot.dispatch('app_command_error', ctx, e) async def update_app_commands(bot: commands.AutoShardedBot): bot.app_cmds = app_commands global_slash_cmds = [] guild_slash_cmds: Dict[int, list] = {} for cmd_name, cmd in app_commands.items(): cmd_payload = { "name": cmd_name, "type": 1 if isinstance(cmd, SlashCommand) else cmd.type, } if isinstance(cmd, SlashCommand): cmd_payload.update({"description": cmd.desc, "options": [option.to_dict() for option in cmd.options]}) if not cmd.guild_ids: global_slash_cmds.append(cmd_payload) else: for guild_id in cmd.guild_ids: current_guild_cmds = guild_slash_cmds.get(guild_id, []) current_guild_cmds.append(cmd_payload) guild_slash_cmds.update({guild_id: current_guild_cmds}) await bot.http.bulk_upsert_global_commands(bot.user.id, global_slash_cmds) bot.dispatch('global_commands_update', global_slash_cmds) for guild_id, guild_commands in guild_slash_cmds.items(): try: await bot.http.bulk_upsert_guild_commands(bot.user.id, guild_id, guild_commands) bot.dispatch('guild_commands_update', guild_commands, guild_id) await asyncio.sleep(0.5) except discord.Forbidden: print(f"Unable to update guild commands for guild ID: {guild_id}\nPlease re-add the bot to that guild using the application.commands scope.") except Exception as e: traceback.print_exception(type(e), e, e.__traceback__)
handler/app_commands.py
import asyncio import discord import inspect import traceback from typing import Any, Dict, List, Union from inspect import getfullargspec from discord.ext import commands slash_cmd_option_types = { str: 3, int: 4, bool: 5, discord.Member: 6, discord.TextChannel: 7, discord.CategoryChannel: 7, discord.VoiceChannel: 7, discord.Thread: 7, discord.StageChannel: 7, discord.Role: 8, # TODO: 9 is mentionable, add that somehow float: 10, } slash_cmd_option_converters = { 3: str, 4: int, 5: bool, 6: commands.MemberConverter().convert, 7: commands.GuildChannelConverter().convert, 8: commands.RoleConverter().convert, # TODO: 9 is mentionable converters????!?!??!?!??! SLASH COMMANDS ARE CANCER 10: float, } class InteractionContext(discord.Interaction): def __init__(self, interaction: discord.Interaction, bot: commands.AutoShardedBot): super().__init__(data=interaction._raw_data, state=interaction._state) self.bot = bot self.command: Union[SlashCommand, ContextMenuCommand] = self.bot.app_cmds[interaction.data.get('name')] self.target: Union[discord.Message, discord.Member, discord.User] = None class SlashCommandChoice: def __init__(self, name: str, value: Union[str, int, float]): self.name = name self.value = value def to_dict(self) -> dict: return { "name": self.name, "value": self.value } def __repr__(self) -> str: return f"SlashCommandChoice(name={self.name} value={self.value})" class SlashCommandOption: def __init__( self, name: str, type: int, description: str, required: bool = True, choices: List[SlashCommandChoice] = None ): choices = choices or [] self.name = name self.type = type self.description = description self.required = required self.choices = choices def to_dict(self) -> dict: final = { "name": self.name, "type": self.type, "description": self.description, "required": self.required } if self.choices: final.update({"choices": [choice.to_dict() for choice in self.choices]}) return final def __repr__(self) -> str: return f"SlashCommandOption(name='{self.name}' type={self.type} description='{self.description}' required={self.required} choices={self.choices})" class SlashCommand: def __init__(self, func, **kwargs) -> None: self.callback = func self.name = kwargs.get('name', func.__name__) self.desc = kwargs.get('help') self.guild_ids = kwargs.get('guild_ids', []) self._is_global = True if self.guild_ids == [] else False _spec = getfullargspec(func) _raw_args = _spec.annotations _defaults = _spec.defaults or [] _cog = False if 'self' in _spec.args: _cog = True self._cog = _cog for _key in _raw_args: # im removing the 'ctx' arg from the func del _raw_args[_key] break _raw_options = self._parse_raw_args(_raw_args, _defaults) self.options = kwargs.get('options') or self._parse_options(_raw_options) def __repr__(self) -> str: return f"SlashCommand(name={self.name} callback={self.callback} desc={self.desc} guild_ids={self.guild_ids} options={self.options})" def __str__(self) -> str: return self.name def _parse_options(self, options: List[Union[dict, SlashCommandOption]]) -> List[SlashCommandOption]: final = [] for option in options: if not isinstance(option, SlashCommandOption): if option.get('type', str) not in slash_cmd_option_types: raise TypeError(f'Unknown option type {option.get("type")}') if 'name' not in option: raise ValueError('Missing option name') if 'choices' in option: choices = [SlashCommandChoice(c['name'], c['value']) for c in option['choices']] else: choices = [] final.append(SlashCommandOption( name=option['name'], type=slash_cmd_option_types[option.get('type', str)], description=option.get('help', f"Please enter a {option['name']}"), required=option.get('required', True), choices=choices )) else: final.append(option) if len(final) > 25: raise TypeError('Max 25 options allowed.') return final def _parse_raw_args(self, raw_args: Dict[str, Any], defaults: tuple) -> List[Union[dict, SlashCommandOption]]: final = [] i = 0 if len(defaults) > 0: args_copy = list(raw_args)[-(len(defaults)):] else: args_copy = [] for arg, type_ in raw_args.items(): if isinstance(type_, SlashCommandOption): final.append(raw_args[arg]) else: final.append({ 'name': arg, 'type': type_, 'required': False if arg in args_copy else True, }) i += 1 return final class ContextMenuCommand: def __init__(self, func, **kwargs) -> None: self.callback = func self.name: str = kwargs.get('name', func.__name__) self.guild_ids: List[int] = kwargs.get('guild_ids', []) self._is_global: bool = True if self.guild_ids == [] else False command_type = kwargs.get('type') if command_type is None: raise TypeError('Missing command type') self.type: int = command_type _spec = getfullargspec(func) _cog = False if 'self' in _spec.args: _cog = True self._cog = _cog def __repr__(self) -> str: return f"ContextMenuCommand(name='{self.name}' callback={self.callback} type={self.type})" def __str__(self) -> str: return self.name app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = {} def slash_command(**kwargs): def decorator(func): slash_cmd = SlashCommand(func, **kwargs) app_commands[slash_cmd.name] = slash_cmd return func return decorator def user_command(**kwargs): def decorator(func): user_cmd = ContextMenuCommand(func, type=2, **kwargs) app_commands[user_cmd.name] = user_cmd return func return decorator def message_command(**kwargs): def decorator(func): message_cmd = ContextMenuCommand(func, type=3, **kwargs) app_commands[message_cmd.name] = message_cmd return func return decorator def get_option(name: str, options: List[SlashCommandOption]): for option in options: if option.name == name: return option raise ValueError(f'Option {name} not found') class FakeCog: def __init__(self, bot: commands.AutoShardedBot): self.bot = bot self.client = bot async def app_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data inter_type = data.get('type') if inter_type is None: return if int(inter_type) == 1: return await slash_command_handler(interaction, bot) elif int(inter_type) == 2: return await user_command_handler(interaction, bot) elif int(inter_type) == 3: return await message_command_handler(interaction, bot) else: pass async def user_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data all_app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = bot.app_cmds command_name = data.get('name') if command_name not in all_app_commands: return command = all_app_commands[command_name] if not isinstance(command, ContextMenuCommand): return if not command._is_global: if interaction.guild_id not in command.guild_ids: return ctx = InteractionContext(interaction, bot) member_data = data.get("resolved", {}).get("members", {}).get(data.get("target_id")) user_data = data.get("resolved", {}).get("users", {}).get(data.get("target_id")) if member_data is not None: member_data.update({"user": user_data}) member = discord.Member(data=member_data, guild=interaction.guild, state=ctx._state) else: member = discord.User(data=user_data, state=ctx._state) ctx.target = member try: bot.dispatch('app_command', ctx) if command._cog: await command.callback(FakeCog(bot), ctx) else: await command.callback(ctx) bot.dispatch('app_command_completion', ctx) except Exception as e: bot.dispatch('app_command_error', ctx, e) async def message_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data all_app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = bot.app_cmds command_name = data.get('name') if command_name not in all_app_commands: return command = all_app_commands[command_name] if not isinstance(command, ContextMenuCommand): return if not command._is_global: if interaction.guild_id not in command.guild_ids: return ctx = InteractionContext(interaction, bot) message_data = data.get("resolved", {}).get("messages", {}).get(data.get("target_id")) message = discord.Message(data=message_data, channel=interaction.channel, state=ctx._state) ctx.target = message try: bot.dispatch('app_command', ctx) if command._cog: await command.callback(FakeCog(bot), ctx) else: await command.callback(ctx) bot.dispatch('app_command_completion', ctx) except Exception as e: bot.dispatch('app_command_error', ctx, e) async def slash_command_handler(interaction: discord.Interaction, bot: commands.AutoShardedBot): data = interaction.data # checking if the slash cmd is in the slash cmds dict all_app_commands: Dict[str, Union[SlashCommand, ContextMenuCommand]] = bot.app_cmds if data.get('name') not in all_app_commands: return slash_cmd = all_app_commands[data.get('name')] if not isinstance(slash_cmd, SlashCommand): return if not slash_cmd._is_global: if interaction.guild_id not in slash_cmd.guild_ids: return kwargs = {} ctx = InteractionContext(interaction, bot) for option in data.get('options', []): _opt = get_option(option['name'], slash_cmd.options) if _opt.type not in slash_cmd_option_converters: raise TypeError(f'Not known option type {_opt.type}') converter = slash_cmd_option_converters[_opt.type] kwargs.update({_opt.name: await converter(ctx, option['value']) if inspect.iscoroutinefunction(converter) else converter(option['value'])}) try: bot.dispatch('app_command', ctx) if slash_cmd._cog: await slash_cmd.callback(FakeCog(bot), ctx, **kwargs) else: await slash_cmd.callback(ctx, **kwargs) bot.dispatch('app_command_completion', ctx) except Exception as e: bot.dispatch('app_command_error', ctx, e) async def update_app_commands(bot: commands.AutoShardedBot): bot.app_cmds = app_commands global_slash_cmds = [] guild_slash_cmds: Dict[int, list] = {} for cmd_name, cmd in app_commands.items(): cmd_payload = { "name": cmd_name, "type": 1 if isinstance(cmd, SlashCommand) else cmd.type, } if isinstance(cmd, SlashCommand): cmd_payload.update({"description": cmd.desc, "options": [option.to_dict() for option in cmd.options]}) if not cmd.guild_ids: global_slash_cmds.append(cmd_payload) else: for guild_id in cmd.guild_ids: current_guild_cmds = guild_slash_cmds.get(guild_id, []) current_guild_cmds.append(cmd_payload) guild_slash_cmds.update({guild_id: current_guild_cmds}) await bot.http.bulk_upsert_global_commands(bot.user.id, global_slash_cmds) bot.dispatch('global_commands_update', global_slash_cmds) for guild_id, guild_commands in guild_slash_cmds.items(): try: await bot.http.bulk_upsert_guild_commands(bot.user.id, guild_id, guild_commands) bot.dispatch('guild_commands_update', guild_commands, guild_id) await asyncio.sleep(0.5) except discord.Forbidden: print(f"Unable to update guild commands for guild ID: {guild_id}\nPlease re-add the bot to that guild using the application.commands scope.") except Exception as e: traceback.print_exception(type(e), e, e.__traceback__)
0.417153
0.122025
import enum from typing import Mapping, MutableSequence, Union import enumtables from sqlalchemy import Column, DateTime, ForeignKey, Integer, JSON, Table from sqlalchemy.orm import backref, relationship from aspen.database.models.base import base, idbase from aspen.database.models.entity import _WORKFLOW_TABLENAME, Entity from aspen.database.models.enum import Enum from aspen.database.models.usergroup import User class WorkflowType(enum.Enum): PROCESS_GISAID_DUMP = "PROCESS_GISAID_DUMP" ALIGN_GISAID_DUMP = "ALIGN_GISAID_DUMP" PHYLO_RUN = "PHYLO_RUN" # Create the enumeration table # Pass your enum class and the SQLAlchemy declarative base to enumtables.EnumTable _WorkflowTypeTable = enumtables.EnumTable( WorkflowType, base, tablename="workflow_types", ) class WorkflowStatusType(enum.Enum): STARTED = "STARTED" FAILED = "FAILED" COMPLETED = "COMPLETED" # Create the enumeration table # Pass your enum class and the SQLAlchemy declarative base to enumtables.EnumTable _WorkflowStatusTypeTable = enumtables.EnumTable( WorkflowStatusType, base, tablename="workflow_status_types", ) WorkflowInputs = Table( "workflow_inputs", base.metadata, # type: ignore Column("entity_id", ForeignKey(Entity.id), primary_key=True), Column("workflow_id", ForeignKey(f"{_WORKFLOW_TABLENAME}.id"), primary_key=True), ) class Workflow(idbase): # type: ignore """A workflow describes some computational process undertaken on this system that takes one or more entities as inputs and produces one or more entities as outputs.""" __tablename__ = _WORKFLOW_TABLENAME workflow_type = Column( Enum(WorkflowType), ForeignKey(_WorkflowTypeTable.item_id), nullable=False, ) __mapper_args__: Mapping[str, Union[WorkflowType, Column]] = { "polymorphic_on": workflow_type } start_datetime = Column( DateTime, nullable=True, comment="datetime when the workflow is started." ) end_datetime = Column( DateTime, nullable=True, comment="datetime when the workflow is ended. this is only valid when the workflow's status is COMPLETED.", ) workflow_status = Column( Enum(WorkflowStatusType), ForeignKey(_WorkflowStatusTypeTable.item_id), nullable=False, ) software_versions = Column( JSON, nullable=False, comment=( "A mapping between all the tools used in this workflow and the version " "used." ), ) inputs = relationship( # type: ignore Entity, secondary=WorkflowInputs, backref="consuming_workflows", uselist=True, ) outputs: MutableSequence[Entity] user_id = Column( Integer, ForeignKey(User.id), nullable=True, ) user = relationship(User, backref=backref("workflows", uselist=True)) # type: ignore class SoftwareNames(str, enum.Enum): """Keys to be used in the software versions map (see Workflow.software_versions). Naming the versions consistently helps us ensure we're always using the same name to identify a particular piece of software.""" ASPEN_WORKFLOW = "aspen_workflow" """This is the version of the aspen codebase from which the workflow is retrieved. Generally speaking, the workflow revision is going to be the same as the creation revision, but it can be different. For instance, if the DB schema is updated between the time the workflow starts and when it finishes, we may update the tag. This way, the code reflects the state the database.""" ASPEN_CREATION = "aspen_creation" """This is the version of the aspen codebase that is used to save the objects.""" NCOV_INGEST = "ncov_ingest" """This is the version of the ncov-ingest repo (https://github.com/nextstrain/ncov-ingest/) for transforming the raw gisaid dumps into the sequence and metadata files.""" NCOV = "ncov" """This is the version of the ncov repo (https://github.com/nextstrain/ncov/) for aligning gisaid dumps and for nextstrain runs.""" ASPEN_DOCKER_IMAGE = "aspen_docker_image" """This is the version of the aspen docker image. This includes the nextstrain tools used for gisaid alignment."""
src/backend/aspen/database/models/workflow.py
import enum from typing import Mapping, MutableSequence, Union import enumtables from sqlalchemy import Column, DateTime, ForeignKey, Integer, JSON, Table from sqlalchemy.orm import backref, relationship from aspen.database.models.base import base, idbase from aspen.database.models.entity import _WORKFLOW_TABLENAME, Entity from aspen.database.models.enum import Enum from aspen.database.models.usergroup import User class WorkflowType(enum.Enum): PROCESS_GISAID_DUMP = "PROCESS_GISAID_DUMP" ALIGN_GISAID_DUMP = "ALIGN_GISAID_DUMP" PHYLO_RUN = "PHYLO_RUN" # Create the enumeration table # Pass your enum class and the SQLAlchemy declarative base to enumtables.EnumTable _WorkflowTypeTable = enumtables.EnumTable( WorkflowType, base, tablename="workflow_types", ) class WorkflowStatusType(enum.Enum): STARTED = "STARTED" FAILED = "FAILED" COMPLETED = "COMPLETED" # Create the enumeration table # Pass your enum class and the SQLAlchemy declarative base to enumtables.EnumTable _WorkflowStatusTypeTable = enumtables.EnumTable( WorkflowStatusType, base, tablename="workflow_status_types", ) WorkflowInputs = Table( "workflow_inputs", base.metadata, # type: ignore Column("entity_id", ForeignKey(Entity.id), primary_key=True), Column("workflow_id", ForeignKey(f"{_WORKFLOW_TABLENAME}.id"), primary_key=True), ) class Workflow(idbase): # type: ignore """A workflow describes some computational process undertaken on this system that takes one or more entities as inputs and produces one or more entities as outputs.""" __tablename__ = _WORKFLOW_TABLENAME workflow_type = Column( Enum(WorkflowType), ForeignKey(_WorkflowTypeTable.item_id), nullable=False, ) __mapper_args__: Mapping[str, Union[WorkflowType, Column]] = { "polymorphic_on": workflow_type } start_datetime = Column( DateTime, nullable=True, comment="datetime when the workflow is started." ) end_datetime = Column( DateTime, nullable=True, comment="datetime when the workflow is ended. this is only valid when the workflow's status is COMPLETED.", ) workflow_status = Column( Enum(WorkflowStatusType), ForeignKey(_WorkflowStatusTypeTable.item_id), nullable=False, ) software_versions = Column( JSON, nullable=False, comment=( "A mapping between all the tools used in this workflow and the version " "used." ), ) inputs = relationship( # type: ignore Entity, secondary=WorkflowInputs, backref="consuming_workflows", uselist=True, ) outputs: MutableSequence[Entity] user_id = Column( Integer, ForeignKey(User.id), nullable=True, ) user = relationship(User, backref=backref("workflows", uselist=True)) # type: ignore class SoftwareNames(str, enum.Enum): """Keys to be used in the software versions map (see Workflow.software_versions). Naming the versions consistently helps us ensure we're always using the same name to identify a particular piece of software.""" ASPEN_WORKFLOW = "aspen_workflow" """This is the version of the aspen codebase from which the workflow is retrieved. Generally speaking, the workflow revision is going to be the same as the creation revision, but it can be different. For instance, if the DB schema is updated between the time the workflow starts and when it finishes, we may update the tag. This way, the code reflects the state the database.""" ASPEN_CREATION = "aspen_creation" """This is the version of the aspen codebase that is used to save the objects.""" NCOV_INGEST = "ncov_ingest" """This is the version of the ncov-ingest repo (https://github.com/nextstrain/ncov-ingest/) for transforming the raw gisaid dumps into the sequence and metadata files.""" NCOV = "ncov" """This is the version of the ncov repo (https://github.com/nextstrain/ncov/) for aligning gisaid dumps and for nextstrain runs.""" ASPEN_DOCKER_IMAGE = "aspen_docker_image" """This is the version of the aspen docker image. This includes the nextstrain tools used for gisaid alignment."""
0.794225
0.242996
import os import numpy as np from OpenGL.GL import * import lib.easy_shaders as es import lib.object_handler as oh import lib.transformations as tr class Floor(): def __init__(self): self.GPU = es.toGPUShape(oh.readOBJ2(os.path.join('mod','tex','ground.obj'), os.path.join('mod','tex','ground.png')), GL_REPEAT, GL_NEAREST) self.s = 100 self.x,self.y, self.z = 0, 0, -self.s self.transform = np.matmul(tr.translate(self.x,self.y,self.z),tr.uniformScale(self.s)) def draw(self, pipeline, projection, view, food): if food.status == 'pikachu': glUseProgram(pipeline.shaderProgram) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "La"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ld"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ls"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ka"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Kd"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ks"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "lightPosition"), food.x, food.y, food.z) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "viewPosition"), self.x, self.y ,self.z) glUniform1ui(glGetUniformLocation(pipeline.shaderProgram, "shininess"), 10000) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "constantAttenuation"), 0.1) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "linearAttenuation"), 0.000001) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "quadraticAttenuation"), 0.0000001) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "model"), 1, GL_TRUE, self.transform) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "projection"), 1, GL_TRUE, projection) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "view"), 1, GL_TRUE, view) pipeline.drawShape(self.GPU) else: glUseProgram(pipeline.shaderProgram) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "La"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ld"), 1.0, 1.0, 1.0) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ls"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ka"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Kd"), 1.0, 1.0, 1.0) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ks"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "lightPosition"), 0, 0, 50) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "viewPosition"), self.x, self.y ,self.z) glUniform1ui(glGetUniformLocation(pipeline.shaderProgram, "shininess"), 10000) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "constantAttenuation"), 0.0001) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "linearAttenuation"), 0.0001) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "quadraticAttenuation"), 0.001) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "model"), 1, GL_TRUE, self.transform) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "projection"), 1, GL_TRUE, projection) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "view"), 1, GL_TRUE, view) pipeline.drawShape(self.GPU)
tarea2c/mod/floor.py
import os import numpy as np from OpenGL.GL import * import lib.easy_shaders as es import lib.object_handler as oh import lib.transformations as tr class Floor(): def __init__(self): self.GPU = es.toGPUShape(oh.readOBJ2(os.path.join('mod','tex','ground.obj'), os.path.join('mod','tex','ground.png')), GL_REPEAT, GL_NEAREST) self.s = 100 self.x,self.y, self.z = 0, 0, -self.s self.transform = np.matmul(tr.translate(self.x,self.y,self.z),tr.uniformScale(self.s)) def draw(self, pipeline, projection, view, food): if food.status == 'pikachu': glUseProgram(pipeline.shaderProgram) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "La"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ld"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ls"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ka"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Kd"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ks"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "lightPosition"), food.x, food.y, food.z) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "viewPosition"), self.x, self.y ,self.z) glUniform1ui(glGetUniformLocation(pipeline.shaderProgram, "shininess"), 10000) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "constantAttenuation"), 0.1) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "linearAttenuation"), 0.000001) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "quadraticAttenuation"), 0.0000001) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "model"), 1, GL_TRUE, self.transform) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "projection"), 1, GL_TRUE, projection) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "view"), 1, GL_TRUE, view) pipeline.drawShape(self.GPU) else: glUseProgram(pipeline.shaderProgram) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "La"), 0.9, 0.9, 0.9) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ld"), 1.0, 1.0, 1.0) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ls"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ka"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Kd"), 1.0, 1.0, 1.0) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "Ks"), 0.2, 0.2, 0.2) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "lightPosition"), 0, 0, 50) glUniform3f(glGetUniformLocation(pipeline.shaderProgram, "viewPosition"), self.x, self.y ,self.z) glUniform1ui(glGetUniformLocation(pipeline.shaderProgram, "shininess"), 10000) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "constantAttenuation"), 0.0001) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "linearAttenuation"), 0.0001) glUniform1f(glGetUniformLocation(pipeline.shaderProgram, "quadraticAttenuation"), 0.001) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "model"), 1, GL_TRUE, self.transform) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "projection"), 1, GL_TRUE, projection) glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, "view"), 1, GL_TRUE, view) pipeline.drawShape(self.GPU)
0.371137
0.430866
import os import pyudev import psutil import logging from arm.ui import db from arm.config.config import cfg from flask_login import LoginManager, current_user, login_user, UserMixin # noqa: F401 from prettytable import PrettyTable hidden_attribs = ("OMDB_API_KEY", "EMBY_USERID", "EMBY_PASSWORD", "EMBY_API_KEY", "PB_KEY", "IFTTT_KEY", "PO_KEY", "PO_USER_KEY", "PO_APP_KEY", "ARM_API_KEY", "TMDB_API_KEY") HIDDEN_VALUE = "<hidden>" class Job(db.Model): job_id = db.Column(db.Integer, primary_key=True) arm_version = db.Column(db.String(20)) crc_id = db.Column(db.String(63)) logfile = db.Column(db.String(256)) start_time = db.Column(db.DateTime) stop_time = db.Column(db.DateTime) job_length = db.Column(db.String(12)) status = db.Column(db.String(32)) no_of_titles = db.Column(db.Integer) title = db.Column(db.String(256)) title_auto = db.Column(db.String(256)) title_manual = db.Column(db.String(256)) year = db.Column(db.String(4)) year_auto = db.Column(db.String(4)) year_manual = db.Column(db.String(4)) video_type = db.Column(db.String(20)) video_type_auto = db.Column(db.String(20)) video_type_manual = db.Column(db.String(20)) imdb_id = db.Column(db.String(15)) imdb_id_auto = db.Column(db.String(15)) imdb_id_manual = db.Column(db.String(15)) poster_url = db.Column(db.String(256)) poster_url_auto = db.Column(db.String(256)) poster_url_manual = db.Column(db.String(256)) devpath = db.Column(db.String(15)) mountpoint = db.Column(db.String(20)) hasnicetitle = db.Column(db.Boolean) errors = db.Column(db.Text) disctype = db.Column(db.String(20)) # dvd/bluray/data/music/unknown label = db.Column(db.String(256)) path = db.Column(db.String(256)) ejected = db.Column(db.Boolean) updated = db.Column(db.Boolean) pid = db.Column(db.Integer) pid_hash = db.Column(db.Integer) tracks = db.relationship('Track', backref='job', lazy='dynamic') config = db.relationship('Config', uselist=False, backref="job") def __init__(self, devpath): """Return a disc object""" self.devpath = devpath self.mountpoint = "/mnt" + devpath self.hasnicetitle = False self.video_type = "unknown" self.ejected = False self.updated = False if cfg['VIDEOTYPE'] != "auto": self.video_type = cfg['VIDEOTYPE'] self.parse_udev() self.get_pid() def parse_udev(self): """Parse udev for properties of current disc""" context = pyudev.Context() device = pyudev.Devices.from_device_file(context, self.devpath) self.disctype = "unknown" for key, value in device.items(): if key == "ID_FS_LABEL": self.label = value if value == "iso9660": self.disctype = "data" elif key == "ID_CDROM_MEDIA_BD": self.disctype = "bluray" elif key == "ID_CDROM_MEDIA_DVD": self.disctype = "dvd" elif key == "ID_CDROM_MEDIA_TRACK_COUNT_AUDIO": self.disctype = "music" else: pass def get_pid(self): pid = os.getpid() p = psutil.Process(pid) self.pid = pid self.pid_hash = hash(p) def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): s = s + "(" + str(attr) + "=" + str(value) + ") " return s def pretty_table(self): """Returns a string of the prettytable""" x = PrettyTable() x.field_names = ["Config", "Value"] x._max_width = {"Config": 50, "Value": 60} for attr, value in self.__dict__.items(): if attr == "config": x.add_row([str(attr), str(value.pretty_table())]) else: x.add_row([str(attr), str(value)]) return str(x.get_string()) def get_d(self): r = {} for key, value in self.__dict__.items(): if '_sa_instance_state' not in key: r[str(key)] = str(value) return r def __repr__(self): return '<Job {}>'.format(self.label) def eject(self): """Eject disc if it hasn't previously been ejected""" try: if os.system("umount " + self.devpath): logging.debug("we unmounted disc" + self.devpath) if os.system("eject " + self.devpath): logging.debug("we ejected disc" + self.devpath) self.ejected = True else: logging.debug("failed to eject" + self.devpath) except Exception as e: self.ejected = False logging.debug(self.devpath + " couldn't be ejected " + str(e)) class Track(db.Model): track_id = db.Column(db.Integer, primary_key=True) job_id = db.Column(db.Integer, db.ForeignKey('job.job_id')) track_number = db.Column(db.String(4)) length = db.Column(db.Integer) aspect_ratio = db.Column(db.String(20)) fps = db.Column(db.Float) main_feature = db.Column(db.Boolean) basename = db.Column(db.String(256)) filename = db.Column(db.String(256)) orig_filename = db.Column(db.String(256)) new_filename = db.Column(db.String(256)) ripped = db.Column(db.Boolean) status = db.Column(db.String(32)) error = db.Column(db.Text) source = db.Column(db.String(32)) def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename): """Return a track object""" self.job_id = job_id self.track_number = track_number self.length = length self.aspect_ratio = aspect_ratio self.fps = fps self.main_feature = main_feature self.source = source self.basename = basename self.filename = filename self.ripped = False def __repr__(self): return '<Post {}>'.format(self.track_number) class Config(db.Model): CONFIG_ID = db.Column(db.Integer, primary_key=True) job_id = db.Column(db.Integer, db.ForeignKey('job.job_id')) ARM_CHECK_UDF = db.Column(db.Boolean) GET_VIDEO_TITLE = db.Column(db.Boolean) SKIP_TRANSCODE = db.Column(db.Boolean) VIDEOTYPE = db.Column(db.String(25)) MINLENGTH = db.Column(db.String(6)) MAXLENGTH = db.Column(db.String(6)) MANUAL_WAIT = db.Column(db.Boolean) MANUAL_WAIT_TIME = db.Column(db.Integer) RAW_PATH = db.Column(db.String(255)) TRANSCODE_PATH = db.Column(db.String(255)) COMPLETED_PATH = db.Column(db.String(255)) EXTRAS_SUB = db.Column(db.String(255)) INSTALLPATH = db.Column(db.String(255)) LOGPATH = db.Column(db.String(255)) LOGLEVEL = db.Column(db.String(255)) LOGLIFE = db.Column(db.Integer) DBFILE = db.Column(db.String(255)) WEBSERVER_IP = db.Column(db.String(25)) WEBSERVER_PORT = db.Column(db.Integer) SET_MEDIA_PERMISSIONS = db.Column(db.Boolean) CHMOD_VALUE = db.Column(db.Integer) SET_MEDIA_OWNER = db.Column(db.Boolean) CHOWN_USER = db.Column(db.String(50)) CHOWN_GROUP = db.Column(db.String(50)) RIPMETHOD = db.Column(db.String(25)) MKV_ARGS = db.Column(db.String(25)) DELRAWFILES = db.Column(db.Boolean) HASHEDKEYS = db.Column(db.Boolean) HB_PRESET_DVD = db.Column(db.String(256)) HB_PRESET_BD = db.Column(db.String(256)) DEST_EXT = db.Column(db.String(10)) HANDBRAKE_CLI = db.Column(db.String(25)) MAINFEATURE = db.Column(db.Boolean) HB_ARGS_DVD = db.Column(db.String(256)) HB_ARGS_BD = db.Column(db.String(256)) EMBY_REFRESH = db.Column(db.Boolean) EMBY_SERVER = db.Column(db.String(25)) EMBY_PORT = db.Column(db.String(6)) EMBY_CLIENT = db.Column(db.String(25)) EMBY_DEVICE = db.Column(db.String(50)) EMBY_DEVICEID = db.Column(db.String(128)) EMBY_USERNAME = db.Column(db.String(50)) EMBY_USERID = db.Column(db.String(128)) EMBY_PASSWORD = db.Column(db.String(128)) EMBY_API_KEY = db.Column(db.String(64)) NOTIFY_RIP = db.Column(db.Boolean) NOTIFY_TRANSCODE = db.Column(db.Boolean) PB_KEY = db.Column(db.String(64)) IFTTT_KEY = db.Column(db.String(64)) IFTTT_EVENT = db.Column(db.String(25)) PO_USER_KEY = db.Column(db.String(64)) PO_APP_KEY = db.Column(db.String(64)) OMDB_API_KEY = db.Column(db.String(64)) def __init__(self, c, job_id): self.__dict__.update(c) self.job_id = job_id def list_params(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): if s: s = s + "\n" if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE s = s + str(attr) + ":" + str(value) return s def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE s = s + "(" + str(attr) + "=" + str(value) + ") " return s def pretty_table(self): """Returns a string of the prettytable""" x = PrettyTable() x.field_names = ["Config", "Value"] x._max_width = {"Config": 20, "Value": 30} for attr, value in self.__dict__.items(): if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE x.add_row([str(attr), str(value)]) return str(x.get_string()) def get_d(self): r = {} for key, value in self.__dict__.items(): if str(key) not in hidden_attribs: r[str(key)] = str(value) return r class User(db.Model, UserMixin): user_id = db.Column(db.Integer, index=True, primary_key=True) email = db.Column(db.String(64)) password = db.Column(db.String(128)) hash = db.Column(db.String(256)) def __init__(self, email=None, password=<PASSWORD>, hashed=None): self.email = email self.password = password self.hash = hashed def __repr__(self): return '<User %r>' % (self.email) def get_id(self): return self.user_id class Alembic_version(db.Model): version_num = db.Column(db.String(36), autoincrement=False, primary_key=True) def __init__(self, version=None): self.version_num = version class UISettings(db.Model): id = db.Column(db.Integer, autoincrement=True, primary_key=True) use_icons = db.Column(db.Boolean) save_remote_images = db.Column(db.Boolean) bootstrap_skin = db.Column(db.String(64)) language = db.Column(db.String(4)) index_refresh = db.Column(db.Integer) database_limit = db.Column(db.Integer) def __init__(self, use_icons=None, save_remote_images=None, bootstrap_skin=None, language=None, index_refresh=None, database_limit=None): self.use_icons = use_icons self.save_remote_images = save_remote_images self.bootstrap_skin = bootstrap_skin self.language = language self.index_refresh = index_refresh self.database_limit = database_limit def __repr__(self): return '<UISettings %r>' % self.id def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): s = s + "(" + str(attr) + "=" + str(value) + ") " return s def get_d(self): r = {} for key, value in self.__dict__.items(): if '_sa_instance_state' not in key: r[str(key)] = str(value) return r
arm/models/models.py
import os import pyudev import psutil import logging from arm.ui import db from arm.config.config import cfg from flask_login import LoginManager, current_user, login_user, UserMixin # noqa: F401 from prettytable import PrettyTable hidden_attribs = ("OMDB_API_KEY", "EMBY_USERID", "EMBY_PASSWORD", "EMBY_API_KEY", "PB_KEY", "IFTTT_KEY", "PO_KEY", "PO_USER_KEY", "PO_APP_KEY", "ARM_API_KEY", "TMDB_API_KEY") HIDDEN_VALUE = "<hidden>" class Job(db.Model): job_id = db.Column(db.Integer, primary_key=True) arm_version = db.Column(db.String(20)) crc_id = db.Column(db.String(63)) logfile = db.Column(db.String(256)) start_time = db.Column(db.DateTime) stop_time = db.Column(db.DateTime) job_length = db.Column(db.String(12)) status = db.Column(db.String(32)) no_of_titles = db.Column(db.Integer) title = db.Column(db.String(256)) title_auto = db.Column(db.String(256)) title_manual = db.Column(db.String(256)) year = db.Column(db.String(4)) year_auto = db.Column(db.String(4)) year_manual = db.Column(db.String(4)) video_type = db.Column(db.String(20)) video_type_auto = db.Column(db.String(20)) video_type_manual = db.Column(db.String(20)) imdb_id = db.Column(db.String(15)) imdb_id_auto = db.Column(db.String(15)) imdb_id_manual = db.Column(db.String(15)) poster_url = db.Column(db.String(256)) poster_url_auto = db.Column(db.String(256)) poster_url_manual = db.Column(db.String(256)) devpath = db.Column(db.String(15)) mountpoint = db.Column(db.String(20)) hasnicetitle = db.Column(db.Boolean) errors = db.Column(db.Text) disctype = db.Column(db.String(20)) # dvd/bluray/data/music/unknown label = db.Column(db.String(256)) path = db.Column(db.String(256)) ejected = db.Column(db.Boolean) updated = db.Column(db.Boolean) pid = db.Column(db.Integer) pid_hash = db.Column(db.Integer) tracks = db.relationship('Track', backref='job', lazy='dynamic') config = db.relationship('Config', uselist=False, backref="job") def __init__(self, devpath): """Return a disc object""" self.devpath = devpath self.mountpoint = "/mnt" + devpath self.hasnicetitle = False self.video_type = "unknown" self.ejected = False self.updated = False if cfg['VIDEOTYPE'] != "auto": self.video_type = cfg['VIDEOTYPE'] self.parse_udev() self.get_pid() def parse_udev(self): """Parse udev for properties of current disc""" context = pyudev.Context() device = pyudev.Devices.from_device_file(context, self.devpath) self.disctype = "unknown" for key, value in device.items(): if key == "ID_FS_LABEL": self.label = value if value == "iso9660": self.disctype = "data" elif key == "ID_CDROM_MEDIA_BD": self.disctype = "bluray" elif key == "ID_CDROM_MEDIA_DVD": self.disctype = "dvd" elif key == "ID_CDROM_MEDIA_TRACK_COUNT_AUDIO": self.disctype = "music" else: pass def get_pid(self): pid = os.getpid() p = psutil.Process(pid) self.pid = pid self.pid_hash = hash(p) def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): s = s + "(" + str(attr) + "=" + str(value) + ") " return s def pretty_table(self): """Returns a string of the prettytable""" x = PrettyTable() x.field_names = ["Config", "Value"] x._max_width = {"Config": 50, "Value": 60} for attr, value in self.__dict__.items(): if attr == "config": x.add_row([str(attr), str(value.pretty_table())]) else: x.add_row([str(attr), str(value)]) return str(x.get_string()) def get_d(self): r = {} for key, value in self.__dict__.items(): if '_sa_instance_state' not in key: r[str(key)] = str(value) return r def __repr__(self): return '<Job {}>'.format(self.label) def eject(self): """Eject disc if it hasn't previously been ejected""" try: if os.system("umount " + self.devpath): logging.debug("we unmounted disc" + self.devpath) if os.system("eject " + self.devpath): logging.debug("we ejected disc" + self.devpath) self.ejected = True else: logging.debug("failed to eject" + self.devpath) except Exception as e: self.ejected = False logging.debug(self.devpath + " couldn't be ejected " + str(e)) class Track(db.Model): track_id = db.Column(db.Integer, primary_key=True) job_id = db.Column(db.Integer, db.ForeignKey('job.job_id')) track_number = db.Column(db.String(4)) length = db.Column(db.Integer) aspect_ratio = db.Column(db.String(20)) fps = db.Column(db.Float) main_feature = db.Column(db.Boolean) basename = db.Column(db.String(256)) filename = db.Column(db.String(256)) orig_filename = db.Column(db.String(256)) new_filename = db.Column(db.String(256)) ripped = db.Column(db.Boolean) status = db.Column(db.String(32)) error = db.Column(db.Text) source = db.Column(db.String(32)) def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename): """Return a track object""" self.job_id = job_id self.track_number = track_number self.length = length self.aspect_ratio = aspect_ratio self.fps = fps self.main_feature = main_feature self.source = source self.basename = basename self.filename = filename self.ripped = False def __repr__(self): return '<Post {}>'.format(self.track_number) class Config(db.Model): CONFIG_ID = db.Column(db.Integer, primary_key=True) job_id = db.Column(db.Integer, db.ForeignKey('job.job_id')) ARM_CHECK_UDF = db.Column(db.Boolean) GET_VIDEO_TITLE = db.Column(db.Boolean) SKIP_TRANSCODE = db.Column(db.Boolean) VIDEOTYPE = db.Column(db.String(25)) MINLENGTH = db.Column(db.String(6)) MAXLENGTH = db.Column(db.String(6)) MANUAL_WAIT = db.Column(db.Boolean) MANUAL_WAIT_TIME = db.Column(db.Integer) RAW_PATH = db.Column(db.String(255)) TRANSCODE_PATH = db.Column(db.String(255)) COMPLETED_PATH = db.Column(db.String(255)) EXTRAS_SUB = db.Column(db.String(255)) INSTALLPATH = db.Column(db.String(255)) LOGPATH = db.Column(db.String(255)) LOGLEVEL = db.Column(db.String(255)) LOGLIFE = db.Column(db.Integer) DBFILE = db.Column(db.String(255)) WEBSERVER_IP = db.Column(db.String(25)) WEBSERVER_PORT = db.Column(db.Integer) SET_MEDIA_PERMISSIONS = db.Column(db.Boolean) CHMOD_VALUE = db.Column(db.Integer) SET_MEDIA_OWNER = db.Column(db.Boolean) CHOWN_USER = db.Column(db.String(50)) CHOWN_GROUP = db.Column(db.String(50)) RIPMETHOD = db.Column(db.String(25)) MKV_ARGS = db.Column(db.String(25)) DELRAWFILES = db.Column(db.Boolean) HASHEDKEYS = db.Column(db.Boolean) HB_PRESET_DVD = db.Column(db.String(256)) HB_PRESET_BD = db.Column(db.String(256)) DEST_EXT = db.Column(db.String(10)) HANDBRAKE_CLI = db.Column(db.String(25)) MAINFEATURE = db.Column(db.Boolean) HB_ARGS_DVD = db.Column(db.String(256)) HB_ARGS_BD = db.Column(db.String(256)) EMBY_REFRESH = db.Column(db.Boolean) EMBY_SERVER = db.Column(db.String(25)) EMBY_PORT = db.Column(db.String(6)) EMBY_CLIENT = db.Column(db.String(25)) EMBY_DEVICE = db.Column(db.String(50)) EMBY_DEVICEID = db.Column(db.String(128)) EMBY_USERNAME = db.Column(db.String(50)) EMBY_USERID = db.Column(db.String(128)) EMBY_PASSWORD = db.Column(db.String(128)) EMBY_API_KEY = db.Column(db.String(64)) NOTIFY_RIP = db.Column(db.Boolean) NOTIFY_TRANSCODE = db.Column(db.Boolean) PB_KEY = db.Column(db.String(64)) IFTTT_KEY = db.Column(db.String(64)) IFTTT_EVENT = db.Column(db.String(25)) PO_USER_KEY = db.Column(db.String(64)) PO_APP_KEY = db.Column(db.String(64)) OMDB_API_KEY = db.Column(db.String(64)) def __init__(self, c, job_id): self.__dict__.update(c) self.job_id = job_id def list_params(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): if s: s = s + "\n" if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE s = s + str(attr) + ":" + str(value) return s def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE s = s + "(" + str(attr) + "=" + str(value) + ") " return s def pretty_table(self): """Returns a string of the prettytable""" x = PrettyTable() x.field_names = ["Config", "Value"] x._max_width = {"Config": 20, "Value": 30} for attr, value in self.__dict__.items(): if str(attr) in hidden_attribs and value: value = HIDDEN_VALUE x.add_row([str(attr), str(value)]) return str(x.get_string()) def get_d(self): r = {} for key, value in self.__dict__.items(): if str(key) not in hidden_attribs: r[str(key)] = str(value) return r class User(db.Model, UserMixin): user_id = db.Column(db.Integer, index=True, primary_key=True) email = db.Column(db.String(64)) password = db.Column(db.String(128)) hash = db.Column(db.String(256)) def __init__(self, email=None, password=<PASSWORD>, hashed=None): self.email = email self.password = password self.hash = hashed def __repr__(self): return '<User %r>' % (self.email) def get_id(self): return self.user_id class Alembic_version(db.Model): version_num = db.Column(db.String(36), autoincrement=False, primary_key=True) def __init__(self, version=None): self.version_num = version class UISettings(db.Model): id = db.Column(db.Integer, autoincrement=True, primary_key=True) use_icons = db.Column(db.Boolean) save_remote_images = db.Column(db.Boolean) bootstrap_skin = db.Column(db.String(64)) language = db.Column(db.String(4)) index_refresh = db.Column(db.Integer) database_limit = db.Column(db.Integer) def __init__(self, use_icons=None, save_remote_images=None, bootstrap_skin=None, language=None, index_refresh=None, database_limit=None): self.use_icons = use_icons self.save_remote_images = save_remote_images self.bootstrap_skin = bootstrap_skin self.language = language self.index_refresh = index_refresh self.database_limit = database_limit def __repr__(self): return '<UISettings %r>' % self.id def __str__(self): """Returns a string of the object""" s = self.__class__.__name__ + ": " for attr, value in self.__dict__.items(): s = s + "(" + str(attr) + "=" + str(value) + ") " return s def get_d(self): r = {} for key, value in self.__dict__.items(): if '_sa_instance_state' not in key: r[str(key)] = str(value) return r
0.399226
0.053453