hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f702ff27609b10993b2ffa9f4f5f3c2f75bb82b0
1,270
py
Python
pyvisdk/do/host_file_system_volume.py
Infinidat/pyvisdk
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
[ "MIT" ]
null
null
null
pyvisdk/do/host_file_system_volume.py
Infinidat/pyvisdk
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
[ "MIT" ]
null
null
null
pyvisdk/do/host_file_system_volume.py
Infinidat/pyvisdk
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
[ "MIT" ]
null
null
null
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def HostFileSystemVolume(vim, *args, **kwargs): '''Detailed information about a file system. This is a base type for derived types that have more specific details about specific filesystem types.Typically a FileSystem is exposed as a datatoreSee DatastoreInfoSee HostVmfsVolumeSee HostNasVolumeSee HostLocalFileSystemVolumeSee HostVfatVolume''' obj = vim.client.factory.create('{urn:vim25}HostFileSystemVolume') # do some validation checking... if (len(args) + len(kwargs)) < 3: raise IndexError('Expected at least 4 arguments got: %d' % len(args)) required = [ 'capacity', 'name', 'type' ] optional = [ 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
35.277778
124
0.649606
import logging from pyvisdk.exceptions import InvalidArgumentError log = logging.getLogger(__name__) def HostFileSystemVolume(vim, *args, **kwargs): obj = vim.client.factory.create('{urn:vim25}HostFileSystemVolume') if (len(args) + len(kwargs)) < 3: raise IndexError('Expected at least 4 arguments got: %d' % len(args)) required = [ 'capacity', 'name', 'type' ] optional = [ 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
true
true
f702ffed102d32e3915d2369681024fbb76b53f6
1,994
py
Python
backend/builder/build_db_node_mode.py
blast-eu-com/blast.eu.com
90f14c694c0cc523949b553623a631bc0b202bd0
[ "Apache-2.0" ]
null
null
null
backend/builder/build_db_node_mode.py
blast-eu-com/blast.eu.com
90f14c694c0cc523949b553623a631bc0b202bd0
[ "Apache-2.0" ]
null
null
null
backend/builder/build_db_node_mode.py
blast-eu-com/blast.eu.com
90f14c694c0cc523949b553623a631bc0b202bd0
[ "Apache-2.0" ]
null
null
null
#!../bin/python3 # -*- coding:utf-8 -*- """ Copyright 2021 Jerome DE LUCCHI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import sys import json from env import _SERVER_DIR sys.path.insert(0, _SERVER_DIR) from api import db __DATAMODEL_DIR = os.path.join(os.path.abspath('..'), 'datamodel') __DATAMODEL_NODE_MODE_FILE = os.path.join(__DATAMODEL_DIR, 'node_mode.template.mapping') __ES_ADDR = db.ES_PROTOCOL + """://""" + str(db.ES_HOSTNAME) + """:""" + str(db.ES_PORT) __CREATE_INDEX_TEMPLATE = """curl -s -XPUT -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/_template/blast_node_mode -d@""" + __DATAMODEL_NODE_MODE_FILE __NODE_MODES = [ {"name": "maintenance"}, {"name": "pause"}, {"name": "running"} ] def defineIndexTemplate(): try: if json.load(os.popen(__CREATE_INDEX_TEMPLATE))["acknowledged"]: return True except KeyError: return False def provisionDefault(): try: for mode in __NODE_MODES: __ES_PROVISION_DEFAULT = """curl -s -XPOST -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/blast_node_mode/_doc -d \'""" + json.dumps(mode) + """\'""" if not json.load(os.popen(__ES_PROVISION_DEFAULT))["result"] == "created": return False return True except KeyError: return False def main(): if defineIndexTemplate(): if provisionDefault(): sys.exit(0) if __name__ == "__main__": main()
30.212121
174
0.664493
import os import sys import json from env import _SERVER_DIR sys.path.insert(0, _SERVER_DIR) from api import db __DATAMODEL_DIR = os.path.join(os.path.abspath('..'), 'datamodel') __DATAMODEL_NODE_MODE_FILE = os.path.join(__DATAMODEL_DIR, 'node_mode.template.mapping') __ES_ADDR = db.ES_PROTOCOL + """://""" + str(db.ES_HOSTNAME) + """:""" + str(db.ES_PORT) __CREATE_INDEX_TEMPLATE = """curl -s -XPUT -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/_template/blast_node_mode -d@""" + __DATAMODEL_NODE_MODE_FILE __NODE_MODES = [ {"name": "maintenance"}, {"name": "pause"}, {"name": "running"} ] def defineIndexTemplate(): try: if json.load(os.popen(__CREATE_INDEX_TEMPLATE))["acknowledged"]: return True except KeyError: return False def provisionDefault(): try: for mode in __NODE_MODES: __ES_PROVISION_DEFAULT = """curl -s -XPOST -H \"Content-Type: Application/Json\" """ + __ES_ADDR + """/blast_node_mode/_doc -d \'""" + json.dumps(mode) + """\'""" if not json.load(os.popen(__ES_PROVISION_DEFAULT))["result"] == "created": return False return True except KeyError: return False def main(): if defineIndexTemplate(): if provisionDefault(): sys.exit(0) if __name__ == "__main__": main()
true
true
f703000bbea298bd87b433179d83258c0772e508
756
py
Python
flask/lib/python3.4/site-packages/sqlalchemy/ext/declarative/__init__.py
ddayguerrero/blogme
e6ee6a47310c382648eefd96634630c3bceb864f
[ "MIT" ]
2
2016-04-03T06:30:45.000Z
2017-05-22T08:36:54.000Z
flask/lib/python3.4/site-packages/sqlalchemy/ext/declarative/__init__.py
ddayguerrero/blogme
e6ee6a47310c382648eefd96634630c3bceb864f
[ "MIT" ]
null
null
null
flask/lib/python3.4/site-packages/sqlalchemy/ext/declarative/__init__.py
ddayguerrero/blogme
e6ee6a47310c382648eefd96634630c3bceb864f
[ "MIT" ]
1
2021-11-04T22:07:43.000Z
2021-11-04T22:07:43.000Z
# ext/declarative/__init__.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .api import declarative_base, synonym_for, comparable_using, \ instrument_declarative, ConcreteBase, AbstractConcreteBase, \ DeclarativeMeta, DeferredReflection, has_inherited_table,\ declared_attr, as_declarative __all__ = ['declarative_base', 'synonym_for', 'has_inherited_table', 'comparable_using', 'instrument_declarative', 'declared_attr', 'as_declarative', 'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta', 'DeferredReflection']
39.789474
73
0.744709
from .api import declarative_base, synonym_for, comparable_using, \ instrument_declarative, ConcreteBase, AbstractConcreteBase, \ DeclarativeMeta, DeferredReflection, has_inherited_table,\ declared_attr, as_declarative __all__ = ['declarative_base', 'synonym_for', 'has_inherited_table', 'comparable_using', 'instrument_declarative', 'declared_attr', 'as_declarative', 'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta', 'DeferredReflection']
true
true
f70300779170cca42d3422649546d22ba7c9f52f
1,594
py
Python
eelbrain/_stats/tests/test_spm.py
reddigari/Eelbrain
6c02b99955d4b5dc7e3054042c182e1a4629b13c
[ "BSD-3-Clause" ]
null
null
null
eelbrain/_stats/tests/test_spm.py
reddigari/Eelbrain
6c02b99955d4b5dc7e3054042c182e1a4629b13c
[ "BSD-3-Clause" ]
null
null
null
eelbrain/_stats/tests/test_spm.py
reddigari/Eelbrain
6c02b99955d4b5dc7e3054042c182e1a4629b13c
[ "BSD-3-Clause" ]
null
null
null
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu> import pickle from nose.tools import eq_ import numpy as np from numpy.testing import assert_array_equal from eelbrain import datasets from eelbrain._stats.spm import LM, LMGroup def test_lm(): ds = datasets.get_uts() model = ds.eval("A*B*Y") coeffs = ds['uts'].ols(model) lm = LM('uts', 'A*B*Y', ds, 'effect') eq_(repr(lm), "<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>") for i, effect in enumerate(model.effects): assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i]) def test_random_lm(): # dummy coding ds = datasets.get_uts() lms = [] for i in range(5): ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape) lms.append(LM('uts', 'A*B*Y', ds)) rlm = LMGroup(lms) eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>') # coefficients ds = rlm.coefficients_dataset(('A', 'A x B')) eq_(ds['term'].cells, ('A', 'A x B')) # tests res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025) eq_(res.clusters.n_cases, 1) # effect coding ds = datasets.get_uts() lms = [] for i in range(5): ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape) lms.append(LM('uts', 'A*B*Y', ds, 'effect')) rlm = LMGroup(lms) res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025) eq_(res.clusters.n_cases, 6) # persistence rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL)) eq_(rlm_p.dims, rlm.dims)
30.075472
89
0.598494
import pickle from nose.tools import eq_ import numpy as np from numpy.testing import assert_array_equal from eelbrain import datasets from eelbrain._stats.spm import LM, LMGroup def test_lm(): ds = datasets.get_uts() model = ds.eval("A*B*Y") coeffs = ds['uts'].ols(model) lm = LM('uts', 'A*B*Y', ds, 'effect') eq_(repr(lm), "<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>") for i, effect in enumerate(model.effects): assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i]) def test_random_lm(): ds = datasets.get_uts() lms = [] for i in range(5): ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape) lms.append(LM('uts', 'A*B*Y', ds)) rlm = LMGroup(lms) eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>') ds = rlm.coefficients_dataset(('A', 'A x B')) eq_(ds['term'].cells, ('A', 'A x B')) res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025) eq_(res.clusters.n_cases, 1) ds = datasets.get_uts() lms = [] for i in range(5): ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape) lms.append(LM('uts', 'A*B*Y', ds, 'effect')) rlm = LMGroup(lms) res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025) eq_(res.clusters.n_cases, 6) rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL)) eq_(rlm_p.dims, rlm.dims)
true
true
f70301afabd0286d6dde0df8be28927b57a173c6
1,728
py
Python
armada/exceptions/lint_exceptions.py
sktelecom-oslab/armada
ebc71ff8eca7ecf0560493d5cdafc14e34c783c9
[ "Apache-2.0" ]
null
null
null
armada/exceptions/lint_exceptions.py
sktelecom-oslab/armada
ebc71ff8eca7ecf0560493d5cdafc14e34c783c9
[ "Apache-2.0" ]
null
null
null
armada/exceptions/lint_exceptions.py
sktelecom-oslab/armada
ebc71ff8eca7ecf0560493d5cdafc14e34c783c9
[ "Apache-2.0" ]
2
2018-05-28T13:00:42.000Z
2021-09-02T07:28:59.000Z
# Copyright 2017 The Armada Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from armada.exceptions import base_exception class LintException(base_exception.ArmadaBaseException): '''Base class for linting exceptions and errors.''' message = 'An unknown linting error occurred.' class InvalidManifestException(LintException): ''' Exception for invalid manifests. **Troubleshoot:** *Coming Soon* ''' message = 'Armada manifest invalid.' class InvalidChartNameException(LintException): '''Exception that occurs when an invalid filename is encountered.''' message = 'Chart name must be a string.' class InvalidChartDefinitionException(LintException): '''Exception when invalid chart definition is encountered.''' message = 'Invalid chart definition. Chart definition must be array.' class InvalidReleaseException(LintException): '''Exception that occurs when a release is invalid.''' message = 'Release needs to be a string.' class InvalidArmadaObjectException(LintException): ''' Exception that occurs when an Armada object is not declared. **Troubleshoot:** *Coming Soon* ''' message = 'An Armada object was not declared.'
27.870968
74
0.735532
from armada.exceptions import base_exception class LintException(base_exception.ArmadaBaseException): message = 'An unknown linting error occurred.' class InvalidManifestException(LintException): message = 'Armada manifest invalid.' class InvalidChartNameException(LintException): message = 'Chart name must be a string.' class InvalidChartDefinitionException(LintException): message = 'Invalid chart definition. Chart definition must be array.' class InvalidReleaseException(LintException): message = 'Release needs to be a string.' class InvalidArmadaObjectException(LintException): message = 'An Armada object was not declared.'
true
true
f70301cabf33a749c00bb53322ef295333499300
4,467
py
Python
code-tf2/encoders/message_gcns/gcn_basis.py
manas96/RelationPrediction
06be62a55554971d1b523dc555f4c8616c21c664
[ "MIT" ]
null
null
null
code-tf2/encoders/message_gcns/gcn_basis.py
manas96/RelationPrediction
06be62a55554971d1b523dc555f4c8616c21c664
[ "MIT" ]
null
null
null
code-tf2/encoders/message_gcns/gcn_basis.py
manas96/RelationPrediction
06be62a55554971d1b523dc555f4c8616c21c664
[ "MIT" ]
null
null
null
import numpy as np import tensorflow as tf from common.shared_functions import dot_or_lookup, glorot_variance, make_tf_variable, make_tf_bias from encoders.message_gcns.message_gcn import MessageGcn class BasisGcn(MessageGcn): def parse_settings(self): self.dropout_keep_probability = float(self.settings['DropoutKeepProbability']) self.n_coefficients = int(self.settings['NumberOfBasisFunctions']) def local_initialize_train(self): vertex_feature_dimension = self.entity_count if self.onehot_input else self.shape[0] type_matrix_shape = (self.relation_count, self.n_coefficients) vertex_matrix_shape = (vertex_feature_dimension, self.n_coefficients, self.shape[1]) self_matrix_shape = (vertex_feature_dimension, self.shape[1]) glorot_var_combined = glorot_variance([vertex_matrix_shape[0], vertex_matrix_shape[2]]) self.W_forward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape) self.W_backward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape) self.W_self = make_tf_variable(0, glorot_var_combined, self_matrix_shape) type_init_var = 1 self.C_forward = make_tf_variable(0, type_init_var, type_matrix_shape) self.C_backward = make_tf_variable(0, type_init_var, type_matrix_shape) self.b = make_tf_bias(self.shape[1]) def local_get_weights(self): return [self.W_forward, self.W_backward, self.C_forward, self.C_backward, self.W_self, self.b] def compute_messages(self, sender_features, receiver_features): backward_type_scaling, forward_type_scaling = self.compute_coefficients() receiver_terms, sender_terms = self.compute_basis_functions(receiver_features, sender_features) forward_messages = tf.reduce_sum(input_tensor=sender_terms * tf.expand_dims(forward_type_scaling,-1), axis=1) backward_messages = tf.reduce_sum(input_tensor=receiver_terms * tf.expand_dims(backward_type_scaling, -1), axis=1) return forward_messages, backward_messages def compute_coefficients(self): message_types = self.get_graph().get_type_indices() forward_type_scaling = tf.nn.embedding_lookup(params=self.C_forward, ids=message_types) backward_type_scaling = tf.nn.embedding_lookup(params=self.C_backward, ids=message_types) return backward_type_scaling, forward_type_scaling def compute_basis_functions(self, receiver_features, sender_features): sender_terms = self.dot_or_tensor_mul(sender_features, self.W_forward) receiver_terms = self.dot_or_tensor_mul(receiver_features, self.W_backward) return receiver_terms, sender_terms def dot_or_tensor_mul(self, features, tensor): tensor_shape = tf.shape(input=tensor) flat_shape = [tensor_shape[0], tensor_shape[1] * tensor_shape[2]] flattened_tensor = tf.reshape(tensor, flat_shape) result_tensor = dot_or_lookup(features, flattened_tensor, onehot_input=self.onehot_input) result_tensor = tf.reshape(result_tensor, [-1, tensor_shape[1], tensor_shape[2]]) return result_tensor def compute_self_loop_messages(self, vertex_features): return dot_or_lookup(vertex_features, self.W_self, onehot_input=self.onehot_input) def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'): mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated')) mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated')) collected_messages_f = tf.sparse.sparse_dense_matmul(mtr_f, forward_messages) collected_messages_b = tf.sparse.sparse_dense_matmul(mtr_b, backward_messages) updated_vertex_embeddings = collected_messages_f + collected_messages_b if self.use_nonlinearity: activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages) else: activated = updated_vertex_embeddings + self_loop_messages return activated def local_get_regularization(self): regularization = tf.reduce_mean(input_tensor=tf.square(self.W_forward)) regularization += tf.reduce_mean(input_tensor=tf.square(self.W_backward)) regularization += tf.reduce_mean(input_tensor=tf.square(self.W_self)) return 0.0 * regularization
47.021053
122
0.74681
import numpy as np import tensorflow as tf from common.shared_functions import dot_or_lookup, glorot_variance, make_tf_variable, make_tf_bias from encoders.message_gcns.message_gcn import MessageGcn class BasisGcn(MessageGcn): def parse_settings(self): self.dropout_keep_probability = float(self.settings['DropoutKeepProbability']) self.n_coefficients = int(self.settings['NumberOfBasisFunctions']) def local_initialize_train(self): vertex_feature_dimension = self.entity_count if self.onehot_input else self.shape[0] type_matrix_shape = (self.relation_count, self.n_coefficients) vertex_matrix_shape = (vertex_feature_dimension, self.n_coefficients, self.shape[1]) self_matrix_shape = (vertex_feature_dimension, self.shape[1]) glorot_var_combined = glorot_variance([vertex_matrix_shape[0], vertex_matrix_shape[2]]) self.W_forward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape) self.W_backward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape) self.W_self = make_tf_variable(0, glorot_var_combined, self_matrix_shape) type_init_var = 1 self.C_forward = make_tf_variable(0, type_init_var, type_matrix_shape) self.C_backward = make_tf_variable(0, type_init_var, type_matrix_shape) self.b = make_tf_bias(self.shape[1]) def local_get_weights(self): return [self.W_forward, self.W_backward, self.C_forward, self.C_backward, self.W_self, self.b] def compute_messages(self, sender_features, receiver_features): backward_type_scaling, forward_type_scaling = self.compute_coefficients() receiver_terms, sender_terms = self.compute_basis_functions(receiver_features, sender_features) forward_messages = tf.reduce_sum(input_tensor=sender_terms * tf.expand_dims(forward_type_scaling,-1), axis=1) backward_messages = tf.reduce_sum(input_tensor=receiver_terms * tf.expand_dims(backward_type_scaling, -1), axis=1) return forward_messages, backward_messages def compute_coefficients(self): message_types = self.get_graph().get_type_indices() forward_type_scaling = tf.nn.embedding_lookup(params=self.C_forward, ids=message_types) backward_type_scaling = tf.nn.embedding_lookup(params=self.C_backward, ids=message_types) return backward_type_scaling, forward_type_scaling def compute_basis_functions(self, receiver_features, sender_features): sender_terms = self.dot_or_tensor_mul(sender_features, self.W_forward) receiver_terms = self.dot_or_tensor_mul(receiver_features, self.W_backward) return receiver_terms, sender_terms def dot_or_tensor_mul(self, features, tensor): tensor_shape = tf.shape(input=tensor) flat_shape = [tensor_shape[0], tensor_shape[1] * tensor_shape[2]] flattened_tensor = tf.reshape(tensor, flat_shape) result_tensor = dot_or_lookup(features, flattened_tensor, onehot_input=self.onehot_input) result_tensor = tf.reshape(result_tensor, [-1, tensor_shape[1], tensor_shape[2]]) return result_tensor def compute_self_loop_messages(self, vertex_features): return dot_or_lookup(vertex_features, self.W_self, onehot_input=self.onehot_input) def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'): mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated')) mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated')) collected_messages_f = tf.sparse.sparse_dense_matmul(mtr_f, forward_messages) collected_messages_b = tf.sparse.sparse_dense_matmul(mtr_b, backward_messages) updated_vertex_embeddings = collected_messages_f + collected_messages_b if self.use_nonlinearity: activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages) else: activated = updated_vertex_embeddings + self_loop_messages return activated def local_get_regularization(self): regularization = tf.reduce_mean(input_tensor=tf.square(self.W_forward)) regularization += tf.reduce_mean(input_tensor=tf.square(self.W_backward)) regularization += tf.reduce_mean(input_tensor=tf.square(self.W_self)) return 0.0 * regularization
true
true
f70303ae169cee1d118069e7b9e0609229c086e8
3,699
py
Python
cli.py
bormiopoli/mix_blockchain_network
bfa9a2b5f6d954883ffb6c3542989bfc272c5c20
[ "MIT" ]
1
2021-03-11T16:57:42.000Z
2021-03-11T16:57:42.000Z
cli.py
bormiopoli/mix_blockchain_network
bfa9a2b5f6d954883ffb6c3542989bfc272c5c20
[ "MIT" ]
null
null
null
cli.py
bormiopoli/mix_blockchain_network
bfa9a2b5f6d954883ffb6c3542989bfc272c5c20
[ "MIT" ]
null
null
null
import hashlib import datetime import json import uuid from hashlib import sha256 from sys import version_info as pyVersion from binascii import hexlify, unhexlify from wallet import * from func.send_message import send_message from func.send_coin import send_coin from func.node_connection import * from lib.mixlib import * import pickle from blockchain.blockchain_main import get_blockchain , create_blockchain, sendme_full_chain from lib.settings import the_settings def show_menu(): print(banner_maker(sc_name="Mix Blockchain Network",description="This is an open source blockchain network project. It exists for people to build and use their own blockchain networks. Or to join the network created by others.",author="Onur Atakan ULUSOY",email="atadogan06@gmail.com") + \ menu_space() + \ menu_maker(menu_number="cbc",menu_text="Create Blockchain")+ \ menu_maker(menu_number="cw",menu_text="Create Wallet")+ \ menu_space() + \ menu_maker(menu_number="sm",menu_text="Send Message")+ \ menu_maker(menu_number="sc",menu_text="Send Coin")+ \ menu_space() + \ menu_maker(menu_number="gb",menu_text="Get Balance")+ \ menu_space() + \ menu_maker(menu_number="ndstart",menu_text="Node Start")+ \ menu_maker(menu_number="ndstop",menu_text="Node Stop")+ \ menu_maker(menu_number="ndconnect",menu_text="Node Connect")+ \ menu_maker(menu_number="ndconnectmix_blockchain_network",menu_text="Node Connect from mix_blockchain_network-DB")+ \ menu_space() + \ menu_maker(menu_number="testmodeon",menu_text="Test mode ON")+ \ menu_maker(menu_number="testmodeoff",menu_text="Test mode OF")+ \ menu_maker(menu_number="debugmodeon",menu_text="Debug mode ON")+ \ menu_maker(menu_number="debugmodeoff",menu_text="Debug mode OF")+ \ menu_space() + \ menu_maker(menu_number="getfullnodelist",menu_text="Get Full Node List")+ \ menu_maker(menu_number="getfullchain",menu_text="Get Full Chain")+ \ quit_menu_maker(mode="main") ) def menu(): while True: show_menu() choices_input = question_maker(mode="main") if choices_input == "cbc": create_blockchain() if choices_input == "cw": Wallet_Create() if choices_input == "sm": send_message(input("Message: "),input("Please write receiver adress: ")) if choices_input == "sc": send_coin(input("Coin Amount: "),input("Please write receiver adress: ")) if choices_input == "gb": print(get_blockchain().getBalance(Wallet_Import(0,0))) if choices_input == "help": show_menu() if choices_input == "ndstart": ndstart(int(input("port: "))) if choices_input == "ndstop": ndstop() if choices_input == "ndconnect": ndconnect(str(input("node ip: ")),int(input("node port: "))) if choices_input == "ndconnectmix_blockchain_network": ndconnectmix_blockchain_network() if choices_input == "testmodeon": the_settings().test_mode(True) if choices_input == "testmodeoff": the_settings().test_mode(False) if choices_input == "debugmodeon": the_settings().debug_mode(True) if choices_input == "debugmodeoff": the_settings().debug_mode(False) if choices_input == "getfullnodelist": sendme_full_node_list() if choices_input == "getfullchain": sendme_full_chain() if choices_input == "0": exit() def start(): menu() if __name__ == '__main__': start()
29.592
290
0.656394
import hashlib import datetime import json import uuid from hashlib import sha256 from sys import version_info as pyVersion from binascii import hexlify, unhexlify from wallet import * from func.send_message import send_message from func.send_coin import send_coin from func.node_connection import * from lib.mixlib import * import pickle from blockchain.blockchain_main import get_blockchain , create_blockchain, sendme_full_chain from lib.settings import the_settings def show_menu(): print(banner_maker(sc_name="Mix Blockchain Network",description="This is an open source blockchain network project. It exists for people to build and use their own blockchain networks. Or to join the network created by others.",author="Onur Atakan ULUSOY",email="atadogan06@gmail.com") + \ menu_space() + \ menu_maker(menu_number="cbc",menu_text="Create Blockchain")+ \ menu_maker(menu_number="cw",menu_text="Create Wallet")+ \ menu_space() + \ menu_maker(menu_number="sm",menu_text="Send Message")+ \ menu_maker(menu_number="sc",menu_text="Send Coin")+ \ menu_space() + \ menu_maker(menu_number="gb",menu_text="Get Balance")+ \ menu_space() + \ menu_maker(menu_number="ndstart",menu_text="Node Start")+ \ menu_maker(menu_number="ndstop",menu_text="Node Stop")+ \ menu_maker(menu_number="ndconnect",menu_text="Node Connect")+ \ menu_maker(menu_number="ndconnectmix_blockchain_network",menu_text="Node Connect from mix_blockchain_network-DB")+ \ menu_space() + \ menu_maker(menu_number="testmodeon",menu_text="Test mode ON")+ \ menu_maker(menu_number="testmodeoff",menu_text="Test mode OF")+ \ menu_maker(menu_number="debugmodeon",menu_text="Debug mode ON")+ \ menu_maker(menu_number="debugmodeoff",menu_text="Debug mode OF")+ \ menu_space() + \ menu_maker(menu_number="getfullnodelist",menu_text="Get Full Node List")+ \ menu_maker(menu_number="getfullchain",menu_text="Get Full Chain")+ \ quit_menu_maker(mode="main") ) def menu(): while True: show_menu() choices_input = question_maker(mode="main") if choices_input == "cbc": create_blockchain() if choices_input == "cw": Wallet_Create() if choices_input == "sm": send_message(input("Message: "),input("Please write receiver adress: ")) if choices_input == "sc": send_coin(input("Coin Amount: "),input("Please write receiver adress: ")) if choices_input == "gb": print(get_blockchain().getBalance(Wallet_Import(0,0))) if choices_input == "help": show_menu() if choices_input == "ndstart": ndstart(int(input("port: "))) if choices_input == "ndstop": ndstop() if choices_input == "ndconnect": ndconnect(str(input("node ip: ")),int(input("node port: "))) if choices_input == "ndconnectmix_blockchain_network": ndconnectmix_blockchain_network() if choices_input == "testmodeon": the_settings().test_mode(True) if choices_input == "testmodeoff": the_settings().test_mode(False) if choices_input == "debugmodeon": the_settings().debug_mode(True) if choices_input == "debugmodeoff": the_settings().debug_mode(False) if choices_input == "getfullnodelist": sendme_full_node_list() if choices_input == "getfullchain": sendme_full_chain() if choices_input == "0": exit() def start(): menu() if __name__ == '__main__': start()
true
true
f70303d3f564d36a16f59efccc0cc2b001f36690
5,839
py
Python
utils/flow.py
Leo-xxx/Deep-Flow-Guided-Video-Inpainting
fe58b6d0eaa3f9d59e9aca197d166b677a6a6c6a
[ "MIT" ]
2,072
2019-05-10T01:39:24.000Z
2022-03-31T05:44:34.000Z
utils/flow.py
RexBarker/Deep-Flow
6310007009d2bfe150f1e4b29c7588f720c4bba2
[ "MIT" ]
94
2019-05-31T08:54:32.000Z
2022-03-11T23:53:00.000Z
utils/flow.py
RexBarker/Deep-Flow
6310007009d2bfe150f1e4b29c7588f720c4bba2
[ "MIT" ]
412
2019-05-10T05:47:48.000Z
2022-03-21T07:19:01.000Z
import numpy as np import cv2 def make_colorwheel(): ''' Generates a color wheel for optical flow visualization as presented in: Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf According to the C++ source code of Daniel Scharstein According to the Matlab source code of Deqing Sun ''' RY = 15 YG = 6 GC = 4 CB = 11 BM = 13 MR = 6 ncols = RY + YG + GC + CB + BM + MR colorwheel = np.zeros((ncols, 3)) col = 0 # RY colorwheel[0:RY, 0] = 255 colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY) col = col + RY # YG colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG) colorwheel[col:col + YG, 1] = 255 col = col + YG # GC colorwheel[col:col + GC, 1] = 255 colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC) col = col + GC # CB colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB) colorwheel[col:col + CB, 2] = 255 col = col + CB # BM colorwheel[col:col + BM, 2] = 255 colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM) col = col + BM # MR colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR) colorwheel[col:col + MR, 0] = 255 return colorwheel def flow_compute_color(u, v, convert_to_bgr=False): ''' Applies the flow color wheel to (possibly clipped) flow components u and v. According to the C++ source code of Daniel Scharstein According to the Matlab source code of Deqing Sun :param u: np.ndarray, input horizontal flow :param v: np.ndarray, input vertical flow :param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB :return: ''' flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) colorwheel = make_colorwheel() # shape [55x3] ncols = colorwheel.shape[0] rad = np.sqrt(np.square(u) + np.square(v)) a = np.arctan2(-v, -u) / np.pi fk = (a + 1) / 2 * (ncols - 1) + 1 k0 = np.floor(fk).astype(np.int32) k0[k0 > 53] = 53 k1 = k0 + 1 k1[k1 == ncols] = 1 f = fk - k0 for i in range(colorwheel.shape[1]): tmp = colorwheel[:, i] col0 = tmp[k0] / 255.0 col1 = tmp[k1] / 255.0 col = (1 - f) * col0 + f * col1 idx = (rad <= 1) col[idx] = 1 - rad[idx] * (1 - col[idx]) col[~idx] = col[~idx] * 0.75 # out of range? # Note the 2-i => BGR instead of RGB ch_idx = 2 - i if convert_to_bgr else i flow_image[:, :, ch_idx] = np.floor(255 * col) return flow_image def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False): ''' Expects a two dimensional flow image of shape [H,W,2] According to the C++ source code of Daniel Scharstein According to the Matlab source code of Deqing Sun :param flow_uv: np.ndarray of shape [H,W,2] :param clip_flow: float, maximum clipping value for flow :return: ''' assert flow_uv.ndim == 3, 'input flow must have three dimensions' assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' if clip_flow is not None: flow_uv = np.clip(flow_uv, 0, clip_flow) u = flow_uv[:, :, 0] v = flow_uv[:, :, 1] rad = np.sqrt(np.square(u) + np.square(v)) rad_max = np.max(rad) epsilon = 1e-5 u = u / (rad_max + epsilon) v = v / (rad_max + epsilon) return flow_compute_color(u, v, convert_to_bgr) def readFlow(name): f = open(name, 'rb') header = f.read(4) if header.decode("utf-8") != 'PIEH': raise Exception('Flow file header does not contain PIEH') width = np.fromfile(f, np.int32, 1).squeeze() height = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2)) f.close() return flow.astype(np.float32) def get_warp_label(flow1, flow2, label1, th=50, value=0): label2 = np.ones_like(label1, dtype=label1.dtype) * value height = flow1.shape[0] width = flow1.shape[1] flow_t = np.zeros_like(flow1, dtype=flow1.dtype) grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2) dx = grid[:, :, 0] + flow2[:, :, 1] dy = grid[:, :, 1] + flow2[:, :, 0] sx = np.floor(dx).astype(int) sy = np.floor(dy).astype(int) valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1) sx_mat = np.dstack((sx, sx + 1, sx, sx + 1)).clip(0, height - 1) sy_mat = np.dstack((sy, sy, sy + 1, sy + 1)).clip(0, width - 1) sxsy_mat = np.abs((1 - np.abs(sx_mat - dx[:, :, np.newaxis])) * (1 - np.abs(sy_mat - dy[:, :, np.newaxis]))) for i in range(4): flow_t = flow_t + sxsy_mat[:, :, i][:, :, np. newaxis] * flow1[sx_mat[:, :, i], sy_mat[:, :, i], :] valid = valid & (np.linalg.norm( flow_t[:, :, [1, 0]] + np.dstack((dx, dy)) - grid, axis=2) < th) flow_t = (flow2 - flow_t) / 2.0 dx = grid[:, :, 0] + flow_t[:, :, 1] dy = grid[:, :, 1] + flow_t[:, :, 0] valid = valid & (dx >= 0) & (dx < height - 1) & (dy >= 0) & (dy < width - 1) label2[valid, :] = label1[dx[valid].round().astype(int), dy[valid].round() .astype(int), :] return label2 def flow_tf(flow, size): flow_shape = flow.shape flow_resized = cv2.resize(flow, (size[1], size[0])) flow_resized[:, :, 0] *= (float(size[1]) / float(flow_shape[1])) flow_resized[:, :, 1] *= (float(size[0]) / float(flow_shape[0])) return flow_resized
31.224599
90
0.554033
import numpy as np import cv2 def make_colorwheel(): RY = 15 YG = 6 GC = 4 CB = 11 BM = 13 MR = 6 ncols = RY + YG + GC + CB + BM + MR colorwheel = np.zeros((ncols, 3)) col = 0 colorwheel[0:RY, 0] = 255 colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY) col = col + RY colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG) colorwheel[col:col + YG, 1] = 255 col = col + YG colorwheel[col:col + GC, 1] = 255 colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC) col = col + GC colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB) colorwheel[col:col + CB, 2] = 255 col = col + CB colorwheel[col:col + BM, 2] = 255 colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM) col = col + BM colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR) colorwheel[col:col + MR, 0] = 255 return colorwheel def flow_compute_color(u, v, convert_to_bgr=False): flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) colorwheel = make_colorwheel() ncols = colorwheel.shape[0] rad = np.sqrt(np.square(u) + np.square(v)) a = np.arctan2(-v, -u) / np.pi fk = (a + 1) / 2 * (ncols - 1) + 1 k0 = np.floor(fk).astype(np.int32) k0[k0 > 53] = 53 k1 = k0 + 1 k1[k1 == ncols] = 1 f = fk - k0 for i in range(colorwheel.shape[1]): tmp = colorwheel[:, i] col0 = tmp[k0] / 255.0 col1 = tmp[k1] / 255.0 col = (1 - f) * col0 + f * col1 idx = (rad <= 1) col[idx] = 1 - rad[idx] * (1 - col[idx]) col[~idx] = col[~idx] * 0.75 ch_idx = 2 - i if convert_to_bgr else i flow_image[:, :, ch_idx] = np.floor(255 * col) return flow_image def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False): assert flow_uv.ndim == 3, 'input flow must have three dimensions' assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' if clip_flow is not None: flow_uv = np.clip(flow_uv, 0, clip_flow) u = flow_uv[:, :, 0] v = flow_uv[:, :, 1] rad = np.sqrt(np.square(u) + np.square(v)) rad_max = np.max(rad) epsilon = 1e-5 u = u / (rad_max + epsilon) v = v / (rad_max + epsilon) return flow_compute_color(u, v, convert_to_bgr) def readFlow(name): f = open(name, 'rb') header = f.read(4) if header.decode("utf-8") != 'PIEH': raise Exception('Flow file header does not contain PIEH') width = np.fromfile(f, np.int32, 1).squeeze() height = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2)) f.close() return flow.astype(np.float32) def get_warp_label(flow1, flow2, label1, th=50, value=0): label2 = np.ones_like(label1, dtype=label1.dtype) * value height = flow1.shape[0] width = flow1.shape[1] flow_t = np.zeros_like(flow1, dtype=flow1.dtype) grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2) dx = grid[:, :, 0] + flow2[:, :, 1] dy = grid[:, :, 1] + flow2[:, :, 0] sx = np.floor(dx).astype(int) sy = np.floor(dy).astype(int) valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1) sx_mat = np.dstack((sx, sx + 1, sx, sx + 1)).clip(0, height - 1) sy_mat = np.dstack((sy, sy, sy + 1, sy + 1)).clip(0, width - 1) sxsy_mat = np.abs((1 - np.abs(sx_mat - dx[:, :, np.newaxis])) * (1 - np.abs(sy_mat - dy[:, :, np.newaxis]))) for i in range(4): flow_t = flow_t + sxsy_mat[:, :, i][:, :, np. newaxis] * flow1[sx_mat[:, :, i], sy_mat[:, :, i], :] valid = valid & (np.linalg.norm( flow_t[:, :, [1, 0]] + np.dstack((dx, dy)) - grid, axis=2) < th) flow_t = (flow2 - flow_t) / 2.0 dx = grid[:, :, 0] + flow_t[:, :, 1] dy = grid[:, :, 1] + flow_t[:, :, 0] valid = valid & (dx >= 0) & (dx < height - 1) & (dy >= 0) & (dy < width - 1) label2[valid, :] = label1[dx[valid].round().astype(int), dy[valid].round() .astype(int), :] return label2 def flow_tf(flow, size): flow_shape = flow.shape flow_resized = cv2.resize(flow, (size[1], size[0])) flow_resized[:, :, 0] *= (float(size[1]) / float(flow_shape[1])) flow_resized[:, :, 1] *= (float(size[0]) / float(flow_shape[0])) return flow_resized
true
true
f70309b3062804015197c248bb55f8798addb560
9,360
py
Python
pytorch/torch/distributed/launch.py
raghavnauhria/whatmt
c20483a437c82936cb0fb8080925e37b9c4bba87
[ "MIT" ]
null
null
null
pytorch/torch/distributed/launch.py
raghavnauhria/whatmt
c20483a437c82936cb0fb8080925e37b9c4bba87
[ "MIT" ]
1
2019-07-22T09:48:46.000Z
2019-07-22T09:48:46.000Z
pytorch/torch/distributed/launch.py
raghavnauhria/whatmt
c20483a437c82936cb0fb8080925e37b9c4bba87
[ "MIT" ]
null
null
null
r""" `torch.distributed.launch` is a module that spawns up multiple distributed training processes on each of the training nodes. The utility can be used for single-node distributed training, in which one or more processes per node will be spawned. The utility can be used for either CPU training or GPU training. If the utility is used for GPU training, each distributed process will be operating on a single GPU. This can achieve well-improved single-node training performance. It can also be used in multi-node distributed training, by spawning up multiple processes on each node for well-improved multi-node distributed training performance as well. This will especially be benefitial for systems with multiple Infiniband interfaces that have direct-GPU support, since all of them can be utilized for aggregated communication bandwidth. In both cases of single-node distributed training or multi-node distributed training, this utility will launch the given number of processes per node (``--nproc_per_node``). If used for GPU training, this number needs to be less or euqal to the number of GPUs on the current system (``nproc_per_node``), and each process will be operating on a single GPU from *GPU 0 to GPU (nproc_per_node - 1)*. **How to use this module:** 1. Single-Node multi-process distributed training :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) 2. Multi-Node multi-process distributed training: (e.g. two nodes) Node 1: *(IP: 192.168.1.1, and has a free port: 1234)* :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE --nnodes=2 --node_rank=0 --master_addr="192.168.1.1" --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) Node 2: :: >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE --nnodes=2 --node_rank=1 --master_addr="192.168.1.1" --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) 3. To look up what optional arguments this module offers: :: >>> python -m torch.distributed.launch --help **Important Notices:** 1. This utilty and multi-process distributed (single-node or multi-node) GPU training currently only achieves the best performance using the NCCL distributed backend. Thus NCCL backend is the recommended backend to use for GPU training. 2. In your training program, you must parse the command-line argument: ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module. If your training program uses GPUs, you should ensure that your code only runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by: Parsing the local_rank argument :: >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--local_rank", type=int) >>> args = parser.parse_args() Set your device to local rank using either :: >>> torch.cuda.set_device(arg.local_rank) # before your code runs or :: >>> with torch.cuda.device(arg.local_rank): >>> # your code to run 3. In your training program, you are supposed to call the following function at the beginning to start the distributed backend. You need to make sure that the init_method uses ``env://``, which is the only supported ``init_method`` by this module. :: torch.distributed.init_process_group(backend='YOUR BACKEND', init_method='env://') 4. In your training program, you can either use regular distributed functions or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your training program uses GPUs for training and you would like to use :func:`torch.nn.parallel.DistributedDataParallel` module, here is how to configure it. :: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[arg.local_rank], output_device=arg.local_rank) Please ensure that ``device_ids`` argument is set to be the only GPU device id that your code will be operating on. This is generally the local rank of the process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``, and ``output_device`` needs to be ``args.local_rank`` in order to use this utility 5. Another way to pass ``local_rank`` to the subprocesses via environment variable ``LOCAL_RANK``. This behavior is enabled when you launch the script with ``--use_env=True``. You must adjust the subprocess example above to replace ``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher will not pass ``--local_rank`` when you specify this flag. .. warning:: ``local_rank`` is NOT globally unique: it is only unique per process on a machine. Thus, don't use it to decide if you should, e.g., write to a networked filesystem. See https://github.com/pytorch/pytorch/issues/12042 for an example of how things can go wrong if you don't do this correctly. """ import sys import subprocess import os from argparse import ArgumentParser, REMAINDER def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser(description="PyTorch distributed training launch " "helper utilty that will spawn up " "multiple distributed processes") # Optional arguments for the launch helper parser.add_argument("--nnodes", type=int, default=1, help="The number of nodes to use for distributed " "training") parser.add_argument("--node_rank", type=int, default=0, help="The rank of the node for multi-node distributed " "training") parser.add_argument("--nproc_per_node", type=int, default=1, help="The number of processes to launch on each node, " "for GPU training, this is recommended to be set " "to the number of GPUs in your system so that " "each process can be bound to a single GPU.") parser.add_argument("--master_addr", default="127.0.0.1", type=str, help="Master node (rank 0)'s address, should be either " "the IP address or the hostname of node 0, for " "single node multi-proc training, the " "--master_addr can simply be 127.0.0.1") parser.add_argument("--master_port", default=29500, type=int, help="Master node (rank 0)'s free port that needs to " "be used for communciation during distributed " "training") parser.add_argument("--use_env", default=False, action="store_true", help="Use environment variable to pass " "'local rank'. For legacy reasons, the default value is False. " "If set to True, the script will not pass " "--local_rank as argument, and will instead set LOCAL_RANK.") # positional parser.add_argument("training_script", type=str, help="The full path to the single GPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script") # rest from the training program parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() # world size in terms of number of processes dist_world_size = args.nproc_per_node * args.nnodes # set PyTorch distributed related environmental variables current_env = os.environ.copy() current_env["MASTER_ADDR"] = args.master_addr current_env["MASTER_PORT"] = str(args.master_port) current_env["WORLD_SIZE"] = str(dist_world_size) processes = [] for local_rank in range(0, args.nproc_per_node): # each process's rank dist_rank = args.nproc_per_node * args.node_rank + local_rank current_env["RANK"] = str(dist_rank) current_env["LOCAL_RANK"] = str(local_rank) # spawn the processes if args.use_env: cmd = [sys.executable, "-u", args.training_script] + args.training_script_args else: cmd = [sys.executable, "-u", args.training_script, "--local_rank={}".format(local_rank)] + args.training_script_args process = subprocess.Popen(cmd, env=current_env) processes.append(process) for process in processes: process.wait() if process.returncode != 0: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) if __name__ == "__main__": main()
39.661017
93
0.65
import sys import subprocess import os from argparse import ArgumentParser, REMAINDER def parse_args(): parser = ArgumentParser(description="PyTorch distributed training launch " "helper utilty that will spawn up " "multiple distributed processes") parser.add_argument("--nnodes", type=int, default=1, help="The number of nodes to use for distributed " "training") parser.add_argument("--node_rank", type=int, default=0, help="The rank of the node for multi-node distributed " "training") parser.add_argument("--nproc_per_node", type=int, default=1, help="The number of processes to launch on each node, " "for GPU training, this is recommended to be set " "to the number of GPUs in your system so that " "each process can be bound to a single GPU.") parser.add_argument("--master_addr", default="127.0.0.1", type=str, help="Master node (rank 0)'s address, should be either " "the IP address or the hostname of node 0, for " "single node multi-proc training, the " "--master_addr can simply be 127.0.0.1") parser.add_argument("--master_port", default=29500, type=int, help="Master node (rank 0)'s free port that needs to " "be used for communciation during distributed " "training") parser.add_argument("--use_env", default=False, action="store_true", help="Use environment variable to pass " "'local rank'. For legacy reasons, the default value is False. " "If set to True, the script will not pass " "--local_rank as argument, and will instead set LOCAL_RANK.") parser.add_argument("training_script", type=str, help="The full path to the single GPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script") parser.add_argument('training_script_args', nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() dist_world_size = args.nproc_per_node * args.nnodes current_env = os.environ.copy() current_env["MASTER_ADDR"] = args.master_addr current_env["MASTER_PORT"] = str(args.master_port) current_env["WORLD_SIZE"] = str(dist_world_size) processes = [] for local_rank in range(0, args.nproc_per_node): dist_rank = args.nproc_per_node * args.node_rank + local_rank current_env["RANK"] = str(dist_rank) current_env["LOCAL_RANK"] = str(local_rank) # spawn the processes if args.use_env: cmd = [sys.executable, "-u", args.training_script] + args.training_script_args else: cmd = [sys.executable, "-u", args.training_script, "--local_rank={}".format(local_rank)] + args.training_script_args process = subprocess.Popen(cmd, env=current_env) processes.append(process) for process in processes: process.wait() if process.returncode != 0: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) if __name__ == "__main__": main()
true
true
f7030a2548771cfe1fc406bf93ce331585a26470
1,615
py
Python
runtime/bamboo-pipeline/test/eri_imp_test_use/tests/control/test_retry_subprocess.py
DomineCore/bamboo-engine
fb4583e70f9e1e87d9d48c2393db8d8104306f37
[ "MIT" ]
1
2022-01-06T15:44:43.000Z
2022-01-06T15:44:43.000Z
runtime/bamboo-pipeline/test/eri_imp_test_use/tests/control/test_retry_subprocess.py
DomineCore/bamboo-engine
fb4583e70f9e1e87d9d48c2393db8d8104306f37
[ "MIT" ]
null
null
null
runtime/bamboo-pipeline/test/eri_imp_test_use/tests/control/test_retry_subprocess.py
DomineCore/bamboo-engine
fb4583e70f9e1e87d9d48c2393db8d8104306f37
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from bamboo_engine.builder import * # noqa from bamboo_engine.engine import Engine from pipeline.eri.runtime import BambooDjangoRuntime from ..utils import * # noqa def test_retry_subprocess(): subproc_start = EmptyStartEvent() subproc_act = ServiceActivity(component_code="debug_node") subproc_end = EmptyEndEvent() subproc_start.extend(subproc_act).extend(subproc_end) params = Params({"${raise_var}": Var(type=Var.LAZY, custom_type="raise_variable", value="")}) start = EmptyStartEvent() subproc = SubProcess(start=subproc_start, params=params) end = EmptyEndEvent() start.extend(subproc).extend(end) pipeline = build_tree(start) engine = Engine(BambooDjangoRuntime()) engine.run_pipeline(pipeline=pipeline, root_pipeline_data={}) sleep(1) old_state = runtime.get_state(subproc.id) assert old_state.name == states.FAILED engine.retry_subprocess(subproc.id) sleep(1) state = runtime.get_state(subproc.id) assert state.name == states.FAILED assert state.version != old_state.version histories = runtime.get_histories(subproc.id) assert len(histories) == 1 assert histories[0].node_id == subproc.id assert histories[0].loop == 1 assert histories[0].retry == 0 assert histories[0].skip is False assert histories[0].started_time is not None assert histories[0].archived_time is not None assert histories[0].inputs == {} assert len(histories[0].outputs) == 1 assert "ex_data" in histories[0].outputs assert histories[0].version == old_state.version
29.907407
97
0.713313
from bamboo_engine.builder import * from bamboo_engine.engine import Engine from pipeline.eri.runtime import BambooDjangoRuntime from ..utils import * def test_retry_subprocess(): subproc_start = EmptyStartEvent() subproc_act = ServiceActivity(component_code="debug_node") subproc_end = EmptyEndEvent() subproc_start.extend(subproc_act).extend(subproc_end) params = Params({"${raise_var}": Var(type=Var.LAZY, custom_type="raise_variable", value="")}) start = EmptyStartEvent() subproc = SubProcess(start=subproc_start, params=params) end = EmptyEndEvent() start.extend(subproc).extend(end) pipeline = build_tree(start) engine = Engine(BambooDjangoRuntime()) engine.run_pipeline(pipeline=pipeline, root_pipeline_data={}) sleep(1) old_state = runtime.get_state(subproc.id) assert old_state.name == states.FAILED engine.retry_subprocess(subproc.id) sleep(1) state = runtime.get_state(subproc.id) assert state.name == states.FAILED assert state.version != old_state.version histories = runtime.get_histories(subproc.id) assert len(histories) == 1 assert histories[0].node_id == subproc.id assert histories[0].loop == 1 assert histories[0].retry == 0 assert histories[0].skip is False assert histories[0].started_time is not None assert histories[0].archived_time is not None assert histories[0].inputs == {} assert len(histories[0].outputs) == 1 assert "ex_data" in histories[0].outputs assert histories[0].version == old_state.version
true
true
f7030a2f06200602ce7f131372fa021f6a707ddc
2,066
py
Python
pylearn2/cross_validation/blocks.py
BouchardLab/pylearn2
4cab785b870d22cd9e85a5f536d4cac234b6bf60
[ "BSD-3-Clause" ]
3
2018-04-05T21:24:54.000Z
2021-09-14T01:48:36.000Z
pylearn2/cross_validation/blocks.py
BouchardLab/pylearn2
4cab785b870d22cd9e85a5f536d4cac234b6bf60
[ "BSD-3-Clause" ]
null
null
null
pylearn2/cross_validation/blocks.py
BouchardLab/pylearn2
4cab785b870d22cd9e85a5f536d4cac234b6bf60
[ "BSD-3-Clause" ]
2
2018-02-18T14:46:57.000Z
2019-05-03T11:51:45.000Z
""" Cross-validation with blocks. """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "3-clause BSD" __maintainer__ = "Steven Kearnes" from theano.compat.six.moves import xrange from pylearn2.blocks import StackedBlocks class StackedBlocksCV(object): """ Multi-layer transforms using cross-validation models. Parameters ---------- layers : iterable (list of lists) Cross-validation models for each layer. Should be a list of lists, where the first index is for the layer and the second index is for the cross-validation fold. """ def __init__(self, layers): stacked_blocks = [] n_folds = len(layers[0]) assert all([len(layer) == n_folds for layer in layers]) # stack the k-th block from each layer for k in xrange(n_folds): this_blocks = [] for i, layer in enumerate(layers): this_blocks.append(layer[k]) this_stacked_blocks = StackedBlocks(this_blocks) stacked_blocks.append(this_stacked_blocks) # _folds contains a StackedBlocks instance for each CV fold self._folds = stacked_blocks def select_fold(self, k): """ Choose a single cross-validation fold to represent. Parameters ---------- k : int Index of selected fold. """ return self._folds[k] def get_input_space(self): """Get input space.""" return self._folds[0][0].get_input_space() def get_output_space(self): """Get output space.""" return self._folds[0][-1].get_output_space() def set_input_space(self, space): """ Set input space. Parameters ---------- space : WRITEME Input space. """ for fold in self._folds: this_space = space for layer in fold._layers: layer.set_input_space(this_space) this_space = layer.get_output_space()
27.918919
74
0.601162
__author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "3-clause BSD" __maintainer__ = "Steven Kearnes" from theano.compat.six.moves import xrange from pylearn2.blocks import StackedBlocks class StackedBlocksCV(object): def __init__(self, layers): stacked_blocks = [] n_folds = len(layers[0]) assert all([len(layer) == n_folds for layer in layers]) for k in xrange(n_folds): this_blocks = [] for i, layer in enumerate(layers): this_blocks.append(layer[k]) this_stacked_blocks = StackedBlocks(this_blocks) stacked_blocks.append(this_stacked_blocks) self._folds = stacked_blocks def select_fold(self, k): return self._folds[k] def get_input_space(self): return self._folds[0][0].get_input_space() def get_output_space(self): return self._folds[0][-1].get_output_space() def set_input_space(self, space): for fold in self._folds: this_space = space for layer in fold._layers: layer.set_input_space(this_space) this_space = layer.get_output_space()
true
true
f7030bd71130c2a93c0694be9d3f5c477bbc3082
560
py
Python
boilerplate/leads/migrations/0005_auto_20190512_1153.py
imciflam/electronic-document-management-system
f4db6be1e93204a3d09f0ebc09b7ba8bbdd90f77
[ "MIT" ]
1
2019-08-11T21:23:59.000Z
2019-08-11T21:23:59.000Z
boilerplate/leads/migrations/0005_auto_20190512_1153.py
imciflam/electronic-document-management-system
f4db6be1e93204a3d09f0ebc09b7ba8bbdd90f77
[ "MIT" ]
5
2020-06-05T21:28:14.000Z
2021-06-10T18:21:18.000Z
boilerplate/leads/migrations/0005_auto_20190512_1153.py
imciflam/electronic-document-management-system
f4db6be1e93204a3d09f0ebc09b7ba8bbdd90f77
[ "MIT" ]
null
null
null
# Generated by Django 2.2.1 on 2019-05-12 08:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('leads', '0004_lead_documentstagecode'), ] operations = [ migrations.AlterField( model_name='lead', name='email', field=models.CharField(blank=True, max_length=100), ), migrations.AlterField( model_name='lead', name='name', field=models.CharField(blank=True, max_length=100), ), ]
23.333333
63
0.578571
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('leads', '0004_lead_documentstagecode'), ] operations = [ migrations.AlterField( model_name='lead', name='email', field=models.CharField(blank=True, max_length=100), ), migrations.AlterField( model_name='lead', name='name', field=models.CharField(blank=True, max_length=100), ), ]
true
true
f7030c00749497a837fcb1e598f6a89804856653
46,146
py
Python
chia/wallet/wallet_node.py
loueradun/silicoin-blockchain
a199dd86ffb84f214ccd6192ad42d150badc05e1
[ "Apache-2.0" ]
null
null
null
chia/wallet/wallet_node.py
loueradun/silicoin-blockchain
a199dd86ffb84f214ccd6192ad42d150badc05e1
[ "Apache-2.0" ]
4
2021-11-16T08:18:39.000Z
2022-03-29T08:12:41.000Z
chia/wallet/wallet_node.py
Devh4ox4d/silishitcoin
4372d06aa4a54220f2bde29c8081410503679a82
[ "Apache-2.0" ]
null
null
null
import asyncio import json import logging import socket import time import traceback from pathlib import Path from typing import Callable, Dict, List, Optional, Set, Tuple, Union from blspy import PrivateKey from chia.consensus.block_record import BlockRecord from chia.consensus.blockchain_interface import BlockchainInterface from chia.consensus.constants import ConsensusConstants from chia.consensus.multiprocess_validation import PreValidationResult from chia.daemon.keychain_proxy import ( KeychainProxy, KeychainProxyConnectionFailure, KeyringIsEmpty, KeyringIsLocked, connect_to_keychain_and_validate, wrap_local_keychain, ) from chia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH from chia.protocols import wallet_protocol from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.protocols.wallet_protocol import ( RejectAdditionsRequest, RejectRemovalsRequest, RequestAdditions, RequestHeaderBlocks, RespondAdditions, RespondBlockHeader, RespondHeaderBlocks, RespondRemovals, ) from chia.server.node_discovery import WalletPeers from chia.server.outbound_message import Message, NodeType, make_msg from chia.server.server import ChiaServer from chia.server.ws_connection import WSChiaConnection from chia.types.blockchain_format.coin import Coin, hash_coin_list from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.coin_spend import CoinSpend from chia.types.header_block import HeaderBlock from chia.types.mempool_inclusion_status import MempoolInclusionStatus from chia.types.peer_info import PeerInfo from chia.util.byte_types import hexstr_to_bytes from chia.util.check_fork_next_block import check_fork_next_block from chia.util.errors import Err, ValidationError from chia.util.ints import uint32, uint128 from chia.util.keychain import Keychain from chia.util.lru_cache import LRUCache from chia.util.merkle_set import MerkleSet, confirm_included_already_hashed, confirm_not_included_already_hashed from chia.util.path import mkdir, path_from_root from chia.wallet.block_record import HeaderBlockRecord from chia.wallet.derivation_record import DerivationRecord from chia.wallet.settings.settings_objects import BackupInitialized from chia.wallet.transaction_record import TransactionRecord from chia.wallet.util.backup_utils import open_backup_file from chia.wallet.util.wallet_types import WalletType from chia.wallet.wallet_action import WalletAction from chia.wallet.wallet_blockchain import ReceiveBlockResult from chia.wallet.wallet_state_manager import WalletStateManager from chia.util.profiler import profile_task class WalletNode: key_config: Dict config: Dict constants: ConsensusConstants keychain_proxy: Optional[KeychainProxy] local_keychain: Optional[Keychain] # For testing only. KeychainProxy is used in normal cases server: Optional[ChiaServer] log: logging.Logger wallet_peers: WalletPeers # Maintains the state of the wallet (blockchain and transactions), handles DB connections wallet_state_manager: Optional[WalletStateManager] # How far away from LCA we must be to perform a full sync. Before then, do a short sync, # which is consecutive requests for the previous block short_sync_threshold: int _shut_down: bool root_path: Path state_changed_callback: Optional[Callable] syncing: bool full_node_peer: Optional[PeerInfo] peer_task: Optional[asyncio.Task] logged_in: bool wallet_peers_initialized: bool def __init__( self, config: Dict, root_path: Path, consensus_constants: ConsensusConstants, name: str = None, local_keychain: Optional[Keychain] = None, ): self.config = config self.constants = consensus_constants self.keychain_proxy = None self.local_keychain = local_keychain self.root_path = root_path self.log = logging.getLogger(name if name else __name__) # Normal operation data self.cached_blocks: Dict = {} self.future_block_hashes: Dict = {} # Sync data self._shut_down = False self.proof_hashes: List = [] self.header_hashes: List = [] self.header_hashes_error = False self.short_sync_threshold = 15 # Change the test when changing this self.potential_blocks_received: Dict = {} self.potential_header_hashes: Dict = {} self.state_changed_callback = None self.wallet_state_manager = None self.backup_initialized = False # Delay first launch sync after user imports backup info or decides to skip self.server = None self.wsm_close_task = None self.sync_task: Optional[asyncio.Task] = None self.logged_in_fingerprint: Optional[int] = None self.peer_task = None self.logged_in = False self.wallet_peers_initialized = False self.last_new_peak_messages = LRUCache(5) async def ensure_keychain_proxy(self) -> KeychainProxy: if not self.keychain_proxy: if self.local_keychain: self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log) else: self.keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log) if not self.keychain_proxy: raise KeychainProxyConnectionFailure("Failed to connect to keychain service") return self.keychain_proxy async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]: key: PrivateKey = None try: keychain_proxy = await self.ensure_keychain_proxy() key = await keychain_proxy.get_key_for_fingerprint(fingerprint) except KeyringIsEmpty: self.log.warning("No keys present. Create keys with the UI, or with the 'sit keys' program.") return None except KeyringIsLocked: self.log.warning("Keyring is locked") return None except KeychainProxyConnectionFailure as e: tb = traceback.format_exc() self.log.error(f"Missing keychain_proxy: {e} {tb}") raise e # Re-raise so that the caller can decide whether to continue or abort return key async def _start( self, fingerprint: Optional[int] = None, new_wallet: bool = False, backup_file: Optional[Path] = None, skip_backup_import: bool = False, ) -> bool: try: private_key = await self.get_key_for_fingerprint(fingerprint) except KeychainProxyConnectionFailure: self.log.error("Failed to connect to keychain service") return False if private_key is None: self.logged_in = False return False if self.config.get("enable_profiler", False): asyncio.create_task(profile_task(self.root_path, "wallet", self.log)) db_path_key_suffix = str(private_key.get_g1().get_fingerprint()) db_path_replaced: str = ( self.config["database_path"] .replace("CHALLENGE", self.config["selected_network"]) .replace("KEY", db_path_key_suffix) ) path = path_from_root(self.root_path, db_path_replaced) mkdir(path.parent) self.new_peak_lock = asyncio.Lock() assert self.server is not None self.wallet_state_manager = await WalletStateManager.create( private_key, self.config, path, self.constants, self.server, self.root_path ) self.wsm_close_task = None assert self.wallet_state_manager is not None backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings() if backup_settings.user_initialized is False: if new_wallet is True: await self.wallet_state_manager.user_settings.user_created_new_wallet() self.wallet_state_manager.new_wallet = True elif skip_backup_import is True: await self.wallet_state_manager.user_settings.user_skipped_backup_import() elif backup_file is not None: await self.wallet_state_manager.import_backup_info(backup_file) else: self.backup_initialized = False await self.wallet_state_manager.close_all_stores() self.wallet_state_manager = None self.logged_in = False return False self.backup_initialized = True # Start peers here after the backup initialization has finished # We only want to do this once per instantiation # However, doing it earlier before backup initialization causes # the wallet to spam the introducer if self.wallet_peers_initialized is False: asyncio.create_task(self.wallet_peers.start()) self.wallet_peers_initialized = True if backup_file is not None: json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key) if "start_height" in json_dict["data"]: start_height = json_dict["data"]["start_height"] self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"]) else: self.config["starting_height"] = 0 else: self.config["starting_height"] = 0 if self.state_changed_callback is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback(self._pending_tx_handler) self._shut_down = False self.peer_task = asyncio.create_task(self._periodically_check_full_node()) self.sync_event = asyncio.Event() self.sync_task = asyncio.create_task(self.sync_job()) self.logged_in_fingerprint = fingerprint self.logged_in = True return True def _close(self): self.log.info("self._close") self.logged_in_fingerprint = None self._shut_down = True async def _await_closed(self): self.log.info("self._await_closed") await self.server.close_all_connections() asyncio.create_task(self.wallet_peers.ensure_is_closed()) if self.wallet_state_manager is not None: await self.wallet_state_manager.close_all_stores() self.wallet_state_manager = None if self.sync_task is not None: self.sync_task.cancel() self.sync_task = None if self.peer_task is not None: self.peer_task.cancel() self.peer_task = None self.logged_in = False def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback if self.wallet_state_manager is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback(self._pending_tx_handler) def _pending_tx_handler(self): if self.wallet_state_manager is None or self.backup_initialized is False: return None asyncio.create_task(self._resend_queue()) async def _action_messages(self) -> List[Message]: if self.wallet_state_manager is None or self.backup_initialized is False: return [] actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions() result: List[Message] = [] for action in actions: data = json.loads(action.data) action_data = data["data"]["action_data"] if action.name == "request_puzzle_solution": coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"])) height = uint32(action_data["height"]) msg = make_msg( ProtocolMessageTypes.request_puzzle_solution, wallet_protocol.RequestPuzzleSolution(coin_name, height), ) result.append(msg) return result async def _resend_queue(self): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None for msg, sent_peers in await self._messages_to_resend(): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None full_nodes = self.server.get_full_node_connections() for peer in full_nodes: if peer.peer_node_id in sent_peers: continue await peer.send_message(msg) for msg in await self._action_messages(): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None await self.server.send_to_all([msg], NodeType.FULL_NODE) async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]: if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down: return [] messages: List[Tuple[Message, Set[bytes32]]] = [] records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent() for record in records: if record.spend_bundle is None: continue msg = make_msg( ProtocolMessageTypes.send_transaction, wallet_protocol.SendTransaction(record.spend_bundle), ) already_sent = set() for peer, status, _ in record.sent_to: if status == MempoolInclusionStatus.SUCCESS.value: already_sent.add(hexstr_to_bytes(peer)) messages.append((msg, already_sent)) return messages def set_server(self, server: ChiaServer): self.server = server DNS_SERVERS_EMPTY: list = [] # TODO: Perhaps use a different set of DNS seeders for wallets, to split the traffic. self.wallet_peers = WalletPeers( self.server, self.root_path, self.config["target_peer_count"], self.config["wallet_peers_path"], self.config["introducer_peer"], DNS_SERVERS_EMPTY, self.config["peer_connect_interval"], self.config["selected_network"], None, self.log, ) async def on_connect(self, peer: WSChiaConnection): if self.wallet_state_manager is None or self.backup_initialized is False: return None messages_peer_ids = await self._messages_to_resend() self.wallet_state_manager.state_changed("add_connection") for msg, peer_ids in messages_peer_ids: if peer.peer_node_id in peer_ids: continue await peer.send_message(msg) if not self.has_full_node() and self.wallet_peers is not None: asyncio.create_task(self.wallet_peers.on_connect(peer)) async def _periodically_check_full_node(self) -> None: tries = 0 while not self._shut_down and tries < 5: if self.has_full_node(): await self.wallet_peers.ensure_is_closed() if self.wallet_state_manager is not None: self.wallet_state_manager.state_changed("add_connection") break tries += 1 await asyncio.sleep(self.config["peer_connect_interval"]) def has_full_node(self) -> bool: if self.server is None: return False if "full_node_peer" in self.config: full_node_peer = PeerInfo( self.config["full_node_peer"]["host"], self.config["full_node_peer"]["port"], ) peers = [c.get_peer_info() for c in self.server.get_full_node_connections()] full_node_resolved = PeerInfo(socket.gethostbyname(full_node_peer.host), full_node_peer.port) if full_node_peer in peers or full_node_resolved in peers: self.log.info(f"Will not attempt to connect to other nodes, already connected to {full_node_peer}") for connection in self.server.get_full_node_connections(): if ( connection.get_peer_info() != full_node_peer and connection.get_peer_info() != full_node_resolved ): self.log.info(f"Closing unnecessary connection to {connection.get_peer_logging()}.") asyncio.create_task(connection.close()) return True return False async def complete_blocks(self, header_blocks: List[HeaderBlock], peer: WSChiaConnection): if self.wallet_state_manager is None: return None header_block_records: List[HeaderBlockRecord] = [] assert self.server trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"]) async with self.wallet_state_manager.blockchain.lock: for block in header_blocks: if block.is_transaction_block: # Find additions and removals (additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals( block, block.transactions_filter, None ) # Get Additions added_coins = await self.get_additions(peer, block, additions) if added_coins is None: raise ValueError("Failed to fetch additions") # Get removals removed_coins = await self.get_removals(peer, block, added_coins, removals) if removed_coins is None: raise ValueError("Failed to fetch removals") # If there is a launcher created, or we have a singleton spent, fetches the required solutions additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends( peer, block, added_coins, removed_coins ) hbr = HeaderBlockRecord(block, added_coins, removed_coins) else: hbr = HeaderBlockRecord(block, [], []) header_block_records.append(hbr) additional_coin_spends = [] (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( hbr, trusted=trusted, additional_coin_spends=additional_coin_spends ) if result == ReceiveBlockResult.NEW_PEAK: if not self.wallet_state_manager.sync_mode: self.wallet_state_manager.blockchain.clean_block_records() self.wallet_state_manager.state_changed("new_block") self.wallet_state_manager.state_changed("sync_changed") await self.wallet_state_manager.new_peak() elif result == ReceiveBlockResult.INVALID_BLOCK: self.log.info(f"Invalid block from peer: {peer.get_peer_logging()} {error}") await peer.close() return else: self.log.debug(f"Result: {result}") async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection): if self.wallet_state_manager is None: return if self.wallet_state_manager.blockchain.contains_block(peak.header_hash): self.log.debug(f"known peak {peak.header_hash}") return if self.wallet_state_manager.sync_mode: self.last_new_peak_messages.put(peer, peak) return async with self.new_peak_lock: curr_peak = self.wallet_state_manager.blockchain.get_peak() if curr_peak is not None and curr_peak.weight >= peak.weight: return request = wallet_protocol.RequestBlockHeader(peak.height) response: Optional[RespondBlockHeader] = await peer.request_block_header(request) if response is None or not isinstance(response, RespondBlockHeader) or response.header_block is None: self.log.warning(f"bad peak response from peer {response}") return header_block = response.header_block curr_peak_height = 0 if curr_peak is None else curr_peak.height if (curr_peak_height == 0 and peak.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or ( curr_peak_height > peak.height - 200 ): if peak.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]: await self.wallet_short_sync_backtrack(header_block, peer) else: await self.batch_sync_to_peak(curr_peak_height, peak) elif peak.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS: # Request weight proof # Sync if PoW validates weight_request = RequestProofOfWeight(peak.height, peak.header_hash) weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight( weight_request, timeout=360 ) if weight_proof_response is None: return weight_proof = weight_proof_response.wp if self.wallet_state_manager is None: return if self.server is not None and self.server.is_trusted_peer(peer, self.config["trusted_peers"]): valid, fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point_no_validations( weight_proof ) else: valid, fork_point, _ = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof( weight_proof ) if not valid: self.log.error( f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}" f" recent blocks num ,{len(weight_proof.recent_chain_data)}" ) self.log.debug(f"{weight_proof}") return self.log.info(f"Validated, fork point is {fork_point}") self.wallet_state_manager.sync_store.add_potential_fork_point( header_block.header_hash, uint32(fork_point) ) self.wallet_state_manager.sync_store.add_potential_peak(header_block) self.start_sync() async def wallet_short_sync_backtrack(self, header_block, peer): top = header_block blocks = [top] # Fetch blocks backwards until we hit the one that we have, # then complete them with additions / removals going forward while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0: request_prev = wallet_protocol.RequestBlockHeader(top.height - 1) response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev) if response_prev is None or not isinstance(response_prev, RespondBlockHeader): raise RuntimeError("bad block header response from peer while syncing") prev_head = response_prev.header_block blocks.append(prev_head) top = prev_head blocks.reverse() await self.complete_blocks(blocks, peer) await self.wallet_state_manager.create_more_puzzle_hashes() async def batch_sync_to_peak(self, fork_height, peak): advanced_peak = False batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS for i in range(max(0, fork_height - 1), peak.height, batch_size): start_height = i end_height = min(peak.height, start_height + batch_size) peers = self.server.get_full_node_connections() added = False for peer in peers: try: added, advanced_peak = await self.fetch_blocks_and_validate( peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height ) if added: break except Exception as e: await peer.close() exc = traceback.format_exc() self.log.error(f"Error while trying to fetch from peer:{e} {exc}") if not added: raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}") curr_peak = self.wallet_state_manager.blockchain.get_peak() assert peak is not None self.wallet_state_manager.blockchain.clean_block_record( min(end_height, curr_peak.height) - self.constants.BLOCKS_CACHE_SIZE ) def start_sync(self) -> None: self.log.info("self.sync_event.set()") self.sync_event.set() async def check_new_peak(self) -> None: if self.wallet_state_manager is None: return None current_peak: Optional[BlockRecord] = self.wallet_state_manager.blockchain.get_peak() if current_peak is None: return None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() for _, block in potential_peaks: if current_peak.weight < block.weight: await asyncio.sleep(5) self.start_sync() return None async def sync_job(self) -> None: while True: self.log.info("Loop start in sync job") if self._shut_down is True: break asyncio.create_task(self.check_new_peak()) await self.sync_event.wait() self.last_new_peak_messages = LRUCache(5) self.sync_event.clear() if self._shut_down is True: break try: assert self.wallet_state_manager is not None self.wallet_state_manager.set_sync_mode(True) await self._sync() except Exception as e: tb = traceback.format_exc() self.log.error(f"Loop exception in sync {e}. {tb}") finally: if self.wallet_state_manager is not None: self.wallet_state_manager.set_sync_mode(False) for peer, peak in self.last_new_peak_messages.cache.items(): asyncio.create_task(self.new_peak_wallet(peak, peer)) self.log.info("Loop end in sync job") async def _sync(self) -> None: """ Wallet has fallen far behind (or is starting up for the first time), and must be synced up to the LCA of the blockchain. """ if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None: return None highest_weight: uint128 = uint128(0) peak_height: uint32 = uint32(0) peak: Optional[HeaderBlock] = None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() self.log.info(f"Have collected {len(potential_peaks)} potential peaks") for header_hash, potential_peak_block in potential_peaks: if potential_peak_block.weight > highest_weight: highest_weight = potential_peak_block.weight peak_height = potential_peak_block.height peak = potential_peak_block if peak_height is None or peak_height == 0: return None if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight: self.log.info("Not performing sync, already caught up.") return None peers: List[WSChiaConnection] = self.server.get_full_node_connections() if len(peers) == 0: self.log.info("No peers to sync to") return None async with self.wallet_state_manager.blockchain.lock: fork_height = None if peak is not None: fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash) assert fork_height is not None # This is the fork point in SES in the case where no fork was detected peers = self.server.get_full_node_connections() fork_height = await check_fork_next_block( self.wallet_state_manager.blockchain, fork_height, peers, wallet_next_block_check ) if fork_height is None: fork_height = uint32(0) await self.wallet_state_manager.blockchain.warmup(fork_height) await self.batch_sync_to_peak(fork_height, peak) async def fetch_blocks_and_validate( self, peer: WSChiaConnection, height_start: uint32, height_end: uint32, fork_point_with_peak: Optional[uint32], ) -> Tuple[bool, bool]: """ Returns whether the blocks validated, and whether the peak was advanced """ if self.wallet_state_manager is None: return False, False self.log.info(f"Requesting blocks {height_start}-{height_end}") request = RequestHeaderBlocks(uint32(height_start), uint32(height_end)) res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request) if res is None or not isinstance(res, RespondHeaderBlocks): raise ValueError("Peer returned no response") header_blocks: List[HeaderBlock] = res.header_blocks advanced_peak = False if header_blocks is None: raise ValueError(f"No response from peer {peer}") assert self.server trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"]) pre_validation_results: Optional[List[PreValidationResult]] = None if not trusted: pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing( header_blocks ) if pre_validation_results is None: return False, advanced_peak assert len(header_blocks) == len(pre_validation_results) for i in range(len(header_blocks)): header_block = header_blocks[i] if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None: raise ValidationError(Err(pre_validation_results[i].error)) fork_point_with_old_peak = None if advanced_peak else fork_point_with_peak if header_block.is_transaction_block: # Find additions and removals (additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals( header_block, header_block.transactions_filter, fork_point_with_old_peak ) # Get Additions added_coins = await self.get_additions(peer, header_block, additions) if added_coins is None: raise ValueError("Failed to fetch additions") # Get removals removed_coins = await self.get_removals(peer, header_block, added_coins, removals) if removed_coins is None: raise ValueError("Failed to fetch removals") # If there is a launcher created, or we have a singleton spent, fetches the required solutions additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends( peer, header_block, added_coins, removed_coins ) header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins) else: header_block_record = HeaderBlockRecord(header_block, [], []) additional_coin_spends = [] start_t = time.time() if trusted: (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( header_block_record, None, trusted, fork_point_with_old_peak, additional_coin_spends=additional_coin_spends, ) else: assert pre_validation_results is not None (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( header_block_record, pre_validation_results[i], trusted, fork_point_with_old_peak, additional_coin_spends=additional_coin_spends, ) self.log.debug( f"Time taken to validate {header_block.height} with fork " f"{fork_point_with_old_peak}: {time.time() - start_t}" ) if result == ReceiveBlockResult.NEW_PEAK: advanced_peak = True self.wallet_state_manager.state_changed("new_block") elif result == ReceiveBlockResult.INVALID_BLOCK: raise ValueError("Value error peer sent us invalid block") if advanced_peak: await self.wallet_state_manager.create_more_puzzle_hashes() return True, advanced_peak def validate_additions( self, coins: List[Tuple[bytes32, List[Coin]]], proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]], root, ): if proofs is None: # Verify root additions_merkle_set = MerkleSet() # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash for puzzle_hash, coins_l in coins: additions_merkle_set.add_already_hashed(puzzle_hash) additions_merkle_set.add_already_hashed(hash_coin_list(coins_l)) additions_root = additions_merkle_set.get_root() if root != additions_root: return False else: for i in range(len(coins)): assert coins[i][0] == proofs[i][0] coin_list_1: List[Coin] = coins[i][1] puzzle_hash_proof: bytes32 = proofs[i][1] coin_list_proof: Optional[bytes32] = proofs[i][2] if len(coin_list_1) == 0: # Verify exclusion proof for puzzle hash not_included = confirm_not_included_already_hashed( root, coins[i][0], puzzle_hash_proof, ) if not_included is False: return False else: try: # Verify inclusion proof for coin list included = confirm_included_already_hashed( root, hash_coin_list(coin_list_1), coin_list_proof, ) if included is False: return False except AssertionError: return False try: # Verify inclusion proof for puzzle hash included = confirm_included_already_hashed( root, coins[i][0], puzzle_hash_proof, ) if included is False: return False except AssertionError: return False return True def validate_removals(self, coins, proofs, root): if proofs is None: # If there are no proofs, it means all removals were returned in the response. # we must find the ones relevant to our wallets. # Verify removals root removals_merkle_set = MerkleSet() for name_coin in coins: # TODO review all verification name, coin = name_coin if coin is not None: removals_merkle_set.add_already_hashed(coin.name()) removals_root = removals_merkle_set.get_root() if root != removals_root: return False else: # This means the full node has responded only with the relevant removals # for our wallet. Each merkle proof must be verified. if len(coins) != len(proofs): return False for i in range(len(coins)): # Coins are in the same order as proofs if coins[i][0] != proofs[i][0]: return False coin = coins[i][1] if coin is None: # Verifies merkle proof of exclusion not_included = confirm_not_included_already_hashed( root, coins[i][0], proofs[i][1], ) if not_included is False: return False else: # Verifies merkle proof of inclusion of coin name if coins[i][0] != coin.name(): return False included = confirm_included_already_hashed( root, coin.name(), proofs[i][1], ) if included is False: return False return True async def fetch_puzzle_solution(self, peer, height: uint32, coin: Coin) -> CoinSpend: solution_response = await peer.request_puzzle_solution( wallet_protocol.RequestPuzzleSolution(coin.name(), height) ) if solution_response is None or not isinstance(solution_response, wallet_protocol.RespondPuzzleSolution): raise ValueError(f"Was not able to obtain solution {solution_response}") return CoinSpend(coin, solution_response.response.puzzle, solution_response.response.solution) async def get_additional_coin_spends( self, peer, block, added_coins: List[Coin], removed_coins: List[Coin] ) -> List[CoinSpend]: assert self.wallet_state_manager is not None additional_coin_spends: List[CoinSpend] = [] if len(removed_coins) > 0: removed_coin_ids = set([coin.name() for coin in removed_coins]) all_added_coins = await self.get_additions(peer, block, [], get_all_additions=True) assert all_added_coins is not None if all_added_coins is not None: for coin in all_added_coins: # This searches specifically for a launcher being created, and adds the solution of the launcher if coin.puzzle_hash == SINGLETON_LAUNCHER_HASH and coin.parent_coin_info in removed_coin_ids: cs: CoinSpend = await self.fetch_puzzle_solution(peer, block.height, coin) additional_coin_spends.append(cs) # Apply this coin solution, which might add things to interested list await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False) all_removed_coins: Optional[List[Coin]] = await self.get_removals( peer, block, added_coins, removed_coins, request_all_removals=True ) assert all_removed_coins is not None all_removed_coins_dict: Dict[bytes32, Coin] = {coin.name(): coin for coin in all_removed_coins} keep_searching = True while keep_searching: # This keeps fetching solutions for coins we are interested list, in this block, until # there are no more interested things to fetch keep_searching = False interested_ids: List[ bytes32 ] = await self.wallet_state_manager.interested_store.get_interested_coin_ids() for coin_id in interested_ids: if coin_id in all_removed_coins_dict: coin = all_removed_coins_dict[coin_id] cs = await self.fetch_puzzle_solution(peer, block.height, coin) # Apply this coin solution, which might add things to interested list await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False) additional_coin_spends.append(cs) keep_searching = True all_removed_coins_dict.pop(coin_id) break return additional_coin_spends async def get_additions( self, peer: WSChiaConnection, block_i, additions: Optional[List[bytes32]], get_all_additions: bool = False ) -> Optional[List[Coin]]: if (additions is not None and len(additions) > 0) or get_all_additions: if get_all_additions: additions = None additions_request = RequestAdditions(block_i.height, block_i.header_hash, additions) additions_res: Optional[Union[RespondAdditions, RejectAdditionsRequest]] = await peer.request_additions( additions_request ) if additions_res is None: await peer.close() return None elif isinstance(additions_res, RespondAdditions): validated = self.validate_additions( additions_res.coins, additions_res.proofs, block_i.foliage_transaction_block.additions_root, ) if not validated: await peer.close() return None added_coins = [] for ph_coins in additions_res.coins: ph, coins = ph_coins added_coins.extend(coins) return added_coins elif isinstance(additions_res, RejectRemovalsRequest): await peer.close() return None return None else: return [] # No added coins async def get_removals( self, peer: WSChiaConnection, block_i, additions, removals, request_all_removals=False ) -> Optional[List[Coin]]: assert self.wallet_state_manager is not None # Check if we need all removals for coin in additions: puzzle_store = self.wallet_state_manager.puzzle_store record_info: Optional[DerivationRecord] = await puzzle_store.get_derivation_record_for_puzzle_hash( coin.puzzle_hash.hex() ) if record_info is not None and record_info.wallet_type == WalletType.COLOURED_COIN: # TODO why ? request_all_removals = True break if record_info is not None and record_info.wallet_type == WalletType.DISTRIBUTED_ID: request_all_removals = True break if len(removals) > 0 or request_all_removals: if request_all_removals: removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, None) else: removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, removals) removals_res: Optional[Union[RespondRemovals, RejectRemovalsRequest]] = await peer.request_removals( removals_request ) if removals_res is None: return None elif isinstance(removals_res, RespondRemovals): validated = self.validate_removals( removals_res.coins, removals_res.proofs, block_i.foliage_transaction_block.removals_root, ) if validated is False: await peer.close() return None removed_coins = [] for _, coins_l in removals_res.coins: if coins_l is not None: removed_coins.append(coins_l) return removed_coins elif isinstance(removals_res, RejectRemovalsRequest): return None else: return None else: return [] async def wallet_next_block_check( peer: WSChiaConnection, potential_peek: uint32, blockchain: BlockchainInterface ) -> bool: block_response = await peer.request_header_blocks( wallet_protocol.RequestHeaderBlocks(potential_peek, potential_peek) ) if block_response is not None and isinstance(block_response, wallet_protocol.RespondHeaderBlocks): our_peak = blockchain.get_peak() if our_peak is not None and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash: return True return False
45.241176
118
0.616543
import asyncio import json import logging import socket import time import traceback from pathlib import Path from typing import Callable, Dict, List, Optional, Set, Tuple, Union from blspy import PrivateKey from chia.consensus.block_record import BlockRecord from chia.consensus.blockchain_interface import BlockchainInterface from chia.consensus.constants import ConsensusConstants from chia.consensus.multiprocess_validation import PreValidationResult from chia.daemon.keychain_proxy import ( KeychainProxy, KeychainProxyConnectionFailure, KeyringIsEmpty, KeyringIsLocked, connect_to_keychain_and_validate, wrap_local_keychain, ) from chia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH from chia.protocols import wallet_protocol from chia.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight from chia.protocols.protocol_message_types import ProtocolMessageTypes from chia.protocols.wallet_protocol import ( RejectAdditionsRequest, RejectRemovalsRequest, RequestAdditions, RequestHeaderBlocks, RespondAdditions, RespondBlockHeader, RespondHeaderBlocks, RespondRemovals, ) from chia.server.node_discovery import WalletPeers from chia.server.outbound_message import Message, NodeType, make_msg from chia.server.server import ChiaServer from chia.server.ws_connection import WSChiaConnection from chia.types.blockchain_format.coin import Coin, hash_coin_list from chia.types.blockchain_format.sized_bytes import bytes32 from chia.types.coin_spend import CoinSpend from chia.types.header_block import HeaderBlock from chia.types.mempool_inclusion_status import MempoolInclusionStatus from chia.types.peer_info import PeerInfo from chia.util.byte_types import hexstr_to_bytes from chia.util.check_fork_next_block import check_fork_next_block from chia.util.errors import Err, ValidationError from chia.util.ints import uint32, uint128 from chia.util.keychain import Keychain from chia.util.lru_cache import LRUCache from chia.util.merkle_set import MerkleSet, confirm_included_already_hashed, confirm_not_included_already_hashed from chia.util.path import mkdir, path_from_root from chia.wallet.block_record import HeaderBlockRecord from chia.wallet.derivation_record import DerivationRecord from chia.wallet.settings.settings_objects import BackupInitialized from chia.wallet.transaction_record import TransactionRecord from chia.wallet.util.backup_utils import open_backup_file from chia.wallet.util.wallet_types import WalletType from chia.wallet.wallet_action import WalletAction from chia.wallet.wallet_blockchain import ReceiveBlockResult from chia.wallet.wallet_state_manager import WalletStateManager from chia.util.profiler import profile_task class WalletNode: key_config: Dict config: Dict constants: ConsensusConstants keychain_proxy: Optional[KeychainProxy] local_keychain: Optional[Keychain] server: Optional[ChiaServer] log: logging.Logger wallet_peers: WalletPeers wallet_state_manager: Optional[WalletStateManager] short_sync_threshold: int _shut_down: bool root_path: Path state_changed_callback: Optional[Callable] syncing: bool full_node_peer: Optional[PeerInfo] peer_task: Optional[asyncio.Task] logged_in: bool wallet_peers_initialized: bool def __init__( self, config: Dict, root_path: Path, consensus_constants: ConsensusConstants, name: str = None, local_keychain: Optional[Keychain] = None, ): self.config = config self.constants = consensus_constants self.keychain_proxy = None self.local_keychain = local_keychain self.root_path = root_path self.log = logging.getLogger(name if name else __name__) self.cached_blocks: Dict = {} self.future_block_hashes: Dict = {} self._shut_down = False self.proof_hashes: List = [] self.header_hashes: List = [] self.header_hashes_error = False self.short_sync_threshold = 15 self.potential_blocks_received: Dict = {} self.potential_header_hashes: Dict = {} self.state_changed_callback = None self.wallet_state_manager = None self.backup_initialized = False self.server = None self.wsm_close_task = None self.sync_task: Optional[asyncio.Task] = None self.logged_in_fingerprint: Optional[int] = None self.peer_task = None self.logged_in = False self.wallet_peers_initialized = False self.last_new_peak_messages = LRUCache(5) async def ensure_keychain_proxy(self) -> KeychainProxy: if not self.keychain_proxy: if self.local_keychain: self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log) else: self.keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log) if not self.keychain_proxy: raise KeychainProxyConnectionFailure("Failed to connect to keychain service") return self.keychain_proxy async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]: key: PrivateKey = None try: keychain_proxy = await self.ensure_keychain_proxy() key = await keychain_proxy.get_key_for_fingerprint(fingerprint) except KeyringIsEmpty: self.log.warning("No keys present. Create keys with the UI, or with the 'sit keys' program.") return None except KeyringIsLocked: self.log.warning("Keyring is locked") return None except KeychainProxyConnectionFailure as e: tb = traceback.format_exc() self.log.error(f"Missing keychain_proxy: {e} {tb}") raise e return key async def _start( self, fingerprint: Optional[int] = None, new_wallet: bool = False, backup_file: Optional[Path] = None, skip_backup_import: bool = False, ) -> bool: try: private_key = await self.get_key_for_fingerprint(fingerprint) except KeychainProxyConnectionFailure: self.log.error("Failed to connect to keychain service") return False if private_key is None: self.logged_in = False return False if self.config.get("enable_profiler", False): asyncio.create_task(profile_task(self.root_path, "wallet", self.log)) db_path_key_suffix = str(private_key.get_g1().get_fingerprint()) db_path_replaced: str = ( self.config["database_path"] .replace("CHALLENGE", self.config["selected_network"]) .replace("KEY", db_path_key_suffix) ) path = path_from_root(self.root_path, db_path_replaced) mkdir(path.parent) self.new_peak_lock = asyncio.Lock() assert self.server is not None self.wallet_state_manager = await WalletStateManager.create( private_key, self.config, path, self.constants, self.server, self.root_path ) self.wsm_close_task = None assert self.wallet_state_manager is not None backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings() if backup_settings.user_initialized is False: if new_wallet is True: await self.wallet_state_manager.user_settings.user_created_new_wallet() self.wallet_state_manager.new_wallet = True elif skip_backup_import is True: await self.wallet_state_manager.user_settings.user_skipped_backup_import() elif backup_file is not None: await self.wallet_state_manager.import_backup_info(backup_file) else: self.backup_initialized = False await self.wallet_state_manager.close_all_stores() self.wallet_state_manager = None self.logged_in = False return False self.backup_initialized = True if self.wallet_peers_initialized is False: asyncio.create_task(self.wallet_peers.start()) self.wallet_peers_initialized = True if backup_file is not None: json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key) if "start_height" in json_dict["data"]: start_height = json_dict["data"]["start_height"] self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"]) else: self.config["starting_height"] = 0 else: self.config["starting_height"] = 0 if self.state_changed_callback is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback(self._pending_tx_handler) self._shut_down = False self.peer_task = asyncio.create_task(self._periodically_check_full_node()) self.sync_event = asyncio.Event() self.sync_task = asyncio.create_task(self.sync_job()) self.logged_in_fingerprint = fingerprint self.logged_in = True return True def _close(self): self.log.info("self._close") self.logged_in_fingerprint = None self._shut_down = True async def _await_closed(self): self.log.info("self._await_closed") await self.server.close_all_connections() asyncio.create_task(self.wallet_peers.ensure_is_closed()) if self.wallet_state_manager is not None: await self.wallet_state_manager.close_all_stores() self.wallet_state_manager = None if self.sync_task is not None: self.sync_task.cancel() self.sync_task = None if self.peer_task is not None: self.peer_task.cancel() self.peer_task = None self.logged_in = False def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback if self.wallet_state_manager is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback(self._pending_tx_handler) def _pending_tx_handler(self): if self.wallet_state_manager is None or self.backup_initialized is False: return None asyncio.create_task(self._resend_queue()) async def _action_messages(self) -> List[Message]: if self.wallet_state_manager is None or self.backup_initialized is False: return [] actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions() result: List[Message] = [] for action in actions: data = json.loads(action.data) action_data = data["data"]["action_data"] if action.name == "request_puzzle_solution": coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"])) height = uint32(action_data["height"]) msg = make_msg( ProtocolMessageTypes.request_puzzle_solution, wallet_protocol.RequestPuzzleSolution(coin_name, height), ) result.append(msg) return result async def _resend_queue(self): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None for msg, sent_peers in await self._messages_to_resend(): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None full_nodes = self.server.get_full_node_connections() for peer in full_nodes: if peer.peer_node_id in sent_peers: continue await peer.send_message(msg) for msg in await self._action_messages(): if ( self._shut_down or self.server is None or self.wallet_state_manager is None or self.backup_initialized is None ): return None await self.server.send_to_all([msg], NodeType.FULL_NODE) async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]: if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down: return [] messages: List[Tuple[Message, Set[bytes32]]] = [] records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent() for record in records: if record.spend_bundle is None: continue msg = make_msg( ProtocolMessageTypes.send_transaction, wallet_protocol.SendTransaction(record.spend_bundle), ) already_sent = set() for peer, status, _ in record.sent_to: if status == MempoolInclusionStatus.SUCCESS.value: already_sent.add(hexstr_to_bytes(peer)) messages.append((msg, already_sent)) return messages def set_server(self, server: ChiaServer): self.server = server DNS_SERVERS_EMPTY: list = [] self.wallet_peers = WalletPeers( self.server, self.root_path, self.config["target_peer_count"], self.config["wallet_peers_path"], self.config["introducer_peer"], DNS_SERVERS_EMPTY, self.config["peer_connect_interval"], self.config["selected_network"], None, self.log, ) async def on_connect(self, peer: WSChiaConnection): if self.wallet_state_manager is None or self.backup_initialized is False: return None messages_peer_ids = await self._messages_to_resend() self.wallet_state_manager.state_changed("add_connection") for msg, peer_ids in messages_peer_ids: if peer.peer_node_id in peer_ids: continue await peer.send_message(msg) if not self.has_full_node() and self.wallet_peers is not None: asyncio.create_task(self.wallet_peers.on_connect(peer)) async def _periodically_check_full_node(self) -> None: tries = 0 while not self._shut_down and tries < 5: if self.has_full_node(): await self.wallet_peers.ensure_is_closed() if self.wallet_state_manager is not None: self.wallet_state_manager.state_changed("add_connection") break tries += 1 await asyncio.sleep(self.config["peer_connect_interval"]) def has_full_node(self) -> bool: if self.server is None: return False if "full_node_peer" in self.config: full_node_peer = PeerInfo( self.config["full_node_peer"]["host"], self.config["full_node_peer"]["port"], ) peers = [c.get_peer_info() for c in self.server.get_full_node_connections()] full_node_resolved = PeerInfo(socket.gethostbyname(full_node_peer.host), full_node_peer.port) if full_node_peer in peers or full_node_resolved in peers: self.log.info(f"Will not attempt to connect to other nodes, already connected to {full_node_peer}") for connection in self.server.get_full_node_connections(): if ( connection.get_peer_info() != full_node_peer and connection.get_peer_info() != full_node_resolved ): self.log.info(f"Closing unnecessary connection to {connection.get_peer_logging()}.") asyncio.create_task(connection.close()) return True return False async def complete_blocks(self, header_blocks: List[HeaderBlock], peer: WSChiaConnection): if self.wallet_state_manager is None: return None header_block_records: List[HeaderBlockRecord] = [] assert self.server trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"]) async with self.wallet_state_manager.blockchain.lock: for block in header_blocks: if block.is_transaction_block: (additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals( block, block.transactions_filter, None ) added_coins = await self.get_additions(peer, block, additions) if added_coins is None: raise ValueError("Failed to fetch additions") removed_coins = await self.get_removals(peer, block, added_coins, removals) if removed_coins is None: raise ValueError("Failed to fetch removals") additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends( peer, block, added_coins, removed_coins ) hbr = HeaderBlockRecord(block, added_coins, removed_coins) else: hbr = HeaderBlockRecord(block, [], []) header_block_records.append(hbr) additional_coin_spends = [] (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( hbr, trusted=trusted, additional_coin_spends=additional_coin_spends ) if result == ReceiveBlockResult.NEW_PEAK: if not self.wallet_state_manager.sync_mode: self.wallet_state_manager.blockchain.clean_block_records() self.wallet_state_manager.state_changed("new_block") self.wallet_state_manager.state_changed("sync_changed") await self.wallet_state_manager.new_peak() elif result == ReceiveBlockResult.INVALID_BLOCK: self.log.info(f"Invalid block from peer: {peer.get_peer_logging()} {error}") await peer.close() return else: self.log.debug(f"Result: {result}") async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSChiaConnection): if self.wallet_state_manager is None: return if self.wallet_state_manager.blockchain.contains_block(peak.header_hash): self.log.debug(f"known peak {peak.header_hash}") return if self.wallet_state_manager.sync_mode: self.last_new_peak_messages.put(peer, peak) return async with self.new_peak_lock: curr_peak = self.wallet_state_manager.blockchain.get_peak() if curr_peak is not None and curr_peak.weight >= peak.weight: return request = wallet_protocol.RequestBlockHeader(peak.height) response: Optional[RespondBlockHeader] = await peer.request_block_header(request) if response is None or not isinstance(response, RespondBlockHeader) or response.header_block is None: self.log.warning(f"bad peak response from peer {response}") return header_block = response.header_block curr_peak_height = 0 if curr_peak is None else curr_peak.height if (curr_peak_height == 0 and peak.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or ( curr_peak_height > peak.height - 200 ): if peak.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]: await self.wallet_short_sync_backtrack(header_block, peer) else: await self.batch_sync_to_peak(curr_peak_height, peak) elif peak.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS: weight_request = RequestProofOfWeight(peak.height, peak.header_hash) weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight( weight_request, timeout=360 ) if weight_proof_response is None: return weight_proof = weight_proof_response.wp if self.wallet_state_manager is None: return if self.server is not None and self.server.is_trusted_peer(peer, self.config["trusted_peers"]): valid, fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point_no_validations( weight_proof ) else: valid, fork_point, _ = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof( weight_proof ) if not valid: self.log.error( f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}" f" recent blocks num ,{len(weight_proof.recent_chain_data)}" ) self.log.debug(f"{weight_proof}") return self.log.info(f"Validated, fork point is {fork_point}") self.wallet_state_manager.sync_store.add_potential_fork_point( header_block.header_hash, uint32(fork_point) ) self.wallet_state_manager.sync_store.add_potential_peak(header_block) self.start_sync() async def wallet_short_sync_backtrack(self, header_block, peer): top = header_block blocks = [top] while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0: request_prev = wallet_protocol.RequestBlockHeader(top.height - 1) response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev) if response_prev is None or not isinstance(response_prev, RespondBlockHeader): raise RuntimeError("bad block header response from peer while syncing") prev_head = response_prev.header_block blocks.append(prev_head) top = prev_head blocks.reverse() await self.complete_blocks(blocks, peer) await self.wallet_state_manager.create_more_puzzle_hashes() async def batch_sync_to_peak(self, fork_height, peak): advanced_peak = False batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS for i in range(max(0, fork_height - 1), peak.height, batch_size): start_height = i end_height = min(peak.height, start_height + batch_size) peers = self.server.get_full_node_connections() added = False for peer in peers: try: added, advanced_peak = await self.fetch_blocks_and_validate( peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height ) if added: break except Exception as e: await peer.close() exc = traceback.format_exc() self.log.error(f"Error while trying to fetch from peer:{e} {exc}") if not added: raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}") curr_peak = self.wallet_state_manager.blockchain.get_peak() assert peak is not None self.wallet_state_manager.blockchain.clean_block_record( min(end_height, curr_peak.height) - self.constants.BLOCKS_CACHE_SIZE ) def start_sync(self) -> None: self.log.info("self.sync_event.set()") self.sync_event.set() async def check_new_peak(self) -> None: if self.wallet_state_manager is None: return None current_peak: Optional[BlockRecord] = self.wallet_state_manager.blockchain.get_peak() if current_peak is None: return None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() for _, block in potential_peaks: if current_peak.weight < block.weight: await asyncio.sleep(5) self.start_sync() return None async def sync_job(self) -> None: while True: self.log.info("Loop start in sync job") if self._shut_down is True: break asyncio.create_task(self.check_new_peak()) await self.sync_event.wait() self.last_new_peak_messages = LRUCache(5) self.sync_event.clear() if self._shut_down is True: break try: assert self.wallet_state_manager is not None self.wallet_state_manager.set_sync_mode(True) await self._sync() except Exception as e: tb = traceback.format_exc() self.log.error(f"Loop exception in sync {e}. {tb}") finally: if self.wallet_state_manager is not None: self.wallet_state_manager.set_sync_mode(False) for peer, peak in self.last_new_peak_messages.cache.items(): asyncio.create_task(self.new_peak_wallet(peak, peer)) self.log.info("Loop end in sync job") async def _sync(self) -> None: if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None: return None highest_weight: uint128 = uint128(0) peak_height: uint32 = uint32(0) peak: Optional[HeaderBlock] = None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() self.log.info(f"Have collected {len(potential_peaks)} potential peaks") for header_hash, potential_peak_block in potential_peaks: if potential_peak_block.weight > highest_weight: highest_weight = potential_peak_block.weight peak_height = potential_peak_block.height peak = potential_peak_block if peak_height is None or peak_height == 0: return None if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight: self.log.info("Not performing sync, already caught up.") return None peers: List[WSChiaConnection] = self.server.get_full_node_connections() if len(peers) == 0: self.log.info("No peers to sync to") return None async with self.wallet_state_manager.blockchain.lock: fork_height = None if peak is not None: fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash) assert fork_height is not None peers = self.server.get_full_node_connections() fork_height = await check_fork_next_block( self.wallet_state_manager.blockchain, fork_height, peers, wallet_next_block_check ) if fork_height is None: fork_height = uint32(0) await self.wallet_state_manager.blockchain.warmup(fork_height) await self.batch_sync_to_peak(fork_height, peak) async def fetch_blocks_and_validate( self, peer: WSChiaConnection, height_start: uint32, height_end: uint32, fork_point_with_peak: Optional[uint32], ) -> Tuple[bool, bool]: if self.wallet_state_manager is None: return False, False self.log.info(f"Requesting blocks {height_start}-{height_end}") request = RequestHeaderBlocks(uint32(height_start), uint32(height_end)) res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request) if res is None or not isinstance(res, RespondHeaderBlocks): raise ValueError("Peer returned no response") header_blocks: List[HeaderBlock] = res.header_blocks advanced_peak = False if header_blocks is None: raise ValueError(f"No response from peer {peer}") assert self.server trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"]) pre_validation_results: Optional[List[PreValidationResult]] = None if not trusted: pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing( header_blocks ) if pre_validation_results is None: return False, advanced_peak assert len(header_blocks) == len(pre_validation_results) for i in range(len(header_blocks)): header_block = header_blocks[i] if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None: raise ValidationError(Err(pre_validation_results[i].error)) fork_point_with_old_peak = None if advanced_peak else fork_point_with_peak if header_block.is_transaction_block: (additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals( header_block, header_block.transactions_filter, fork_point_with_old_peak ) added_coins = await self.get_additions(peer, header_block, additions) if added_coins is None: raise ValueError("Failed to fetch additions") removed_coins = await self.get_removals(peer, header_block, added_coins, removals) if removed_coins is None: raise ValueError("Failed to fetch removals") additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends( peer, header_block, added_coins, removed_coins ) header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins) else: header_block_record = HeaderBlockRecord(header_block, [], []) additional_coin_spends = [] start_t = time.time() if trusted: (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( header_block_record, None, trusted, fork_point_with_old_peak, additional_coin_spends=additional_coin_spends, ) else: assert pre_validation_results is not None (result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block( header_block_record, pre_validation_results[i], trusted, fork_point_with_old_peak, additional_coin_spends=additional_coin_spends, ) self.log.debug( f"Time taken to validate {header_block.height} with fork " f"{fork_point_with_old_peak}: {time.time() - start_t}" ) if result == ReceiveBlockResult.NEW_PEAK: advanced_peak = True self.wallet_state_manager.state_changed("new_block") elif result == ReceiveBlockResult.INVALID_BLOCK: raise ValueError("Value error peer sent us invalid block") if advanced_peak: await self.wallet_state_manager.create_more_puzzle_hashes() return True, advanced_peak def validate_additions( self, coins: List[Tuple[bytes32, List[Coin]]], proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]], root, ): if proofs is None: additions_merkle_set = MerkleSet() for puzzle_hash, coins_l in coins: additions_merkle_set.add_already_hashed(puzzle_hash) additions_merkle_set.add_already_hashed(hash_coin_list(coins_l)) additions_root = additions_merkle_set.get_root() if root != additions_root: return False else: for i in range(len(coins)): assert coins[i][0] == proofs[i][0] coin_list_1: List[Coin] = coins[i][1] puzzle_hash_proof: bytes32 = proofs[i][1] coin_list_proof: Optional[bytes32] = proofs[i][2] if len(coin_list_1) == 0: not_included = confirm_not_included_already_hashed( root, coins[i][0], puzzle_hash_proof, ) if not_included is False: return False else: try: included = confirm_included_already_hashed( root, hash_coin_list(coin_list_1), coin_list_proof, ) if included is False: return False except AssertionError: return False try: included = confirm_included_already_hashed( root, coins[i][0], puzzle_hash_proof, ) if included is False: return False except AssertionError: return False return True def validate_removals(self, coins, proofs, root): if proofs is None: removals_merkle_set = MerkleSet() for name_coin in coins: name, coin = name_coin if coin is not None: removals_merkle_set.add_already_hashed(coin.name()) removals_root = removals_merkle_set.get_root() if root != removals_root: return False else: if len(coins) != len(proofs): return False for i in range(len(coins)): if coins[i][0] != proofs[i][0]: return False coin = coins[i][1] if coin is None: not_included = confirm_not_included_already_hashed( root, coins[i][0], proofs[i][1], ) if not_included is False: return False else: if coins[i][0] != coin.name(): return False included = confirm_included_already_hashed( root, coin.name(), proofs[i][1], ) if included is False: return False return True async def fetch_puzzle_solution(self, peer, height: uint32, coin: Coin) -> CoinSpend: solution_response = await peer.request_puzzle_solution( wallet_protocol.RequestPuzzleSolution(coin.name(), height) ) if solution_response is None or not isinstance(solution_response, wallet_protocol.RespondPuzzleSolution): raise ValueError(f"Was not able to obtain solution {solution_response}") return CoinSpend(coin, solution_response.response.puzzle, solution_response.response.solution) async def get_additional_coin_spends( self, peer, block, added_coins: List[Coin], removed_coins: List[Coin] ) -> List[CoinSpend]: assert self.wallet_state_manager is not None additional_coin_spends: List[CoinSpend] = [] if len(removed_coins) > 0: removed_coin_ids = set([coin.name() for coin in removed_coins]) all_added_coins = await self.get_additions(peer, block, [], get_all_additions=True) assert all_added_coins is not None if all_added_coins is not None: for coin in all_added_coins: if coin.puzzle_hash == SINGLETON_LAUNCHER_HASH and coin.parent_coin_info in removed_coin_ids: cs: CoinSpend = await self.fetch_puzzle_solution(peer, block.height, coin) additional_coin_spends.append(cs) await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False) all_removed_coins: Optional[List[Coin]] = await self.get_removals( peer, block, added_coins, removed_coins, request_all_removals=True ) assert all_removed_coins is not None all_removed_coins_dict: Dict[bytes32, Coin] = {coin.name(): coin for coin in all_removed_coins} keep_searching = True while keep_searching: keep_searching = False interested_ids: List[ bytes32 ] = await self.wallet_state_manager.interested_store.get_interested_coin_ids() for coin_id in interested_ids: if coin_id in all_removed_coins_dict: coin = all_removed_coins_dict[coin_id] cs = await self.fetch_puzzle_solution(peer, block.height, coin) await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False) additional_coin_spends.append(cs) keep_searching = True all_removed_coins_dict.pop(coin_id) break return additional_coin_spends async def get_additions( self, peer: WSChiaConnection, block_i, additions: Optional[List[bytes32]], get_all_additions: bool = False ) -> Optional[List[Coin]]: if (additions is not None and len(additions) > 0) or get_all_additions: if get_all_additions: additions = None additions_request = RequestAdditions(block_i.height, block_i.header_hash, additions) additions_res: Optional[Union[RespondAdditions, RejectAdditionsRequest]] = await peer.request_additions( additions_request ) if additions_res is None: await peer.close() return None elif isinstance(additions_res, RespondAdditions): validated = self.validate_additions( additions_res.coins, additions_res.proofs, block_i.foliage_transaction_block.additions_root, ) if not validated: await peer.close() return None added_coins = [] for ph_coins in additions_res.coins: ph, coins = ph_coins added_coins.extend(coins) return added_coins elif isinstance(additions_res, RejectRemovalsRequest): await peer.close() return None return None else: return [] async def get_removals( self, peer: WSChiaConnection, block_i, additions, removals, request_all_removals=False ) -> Optional[List[Coin]]: assert self.wallet_state_manager is not None for coin in additions: puzzle_store = self.wallet_state_manager.puzzle_store record_info: Optional[DerivationRecord] = await puzzle_store.get_derivation_record_for_puzzle_hash( coin.puzzle_hash.hex() ) if record_info is not None and record_info.wallet_type == WalletType.COLOURED_COIN: request_all_removals = True break if record_info is not None and record_info.wallet_type == WalletType.DISTRIBUTED_ID: request_all_removals = True break if len(removals) > 0 or request_all_removals: if request_all_removals: removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, None) else: removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, removals) removals_res: Optional[Union[RespondRemovals, RejectRemovalsRequest]] = await peer.request_removals( removals_request ) if removals_res is None: return None elif isinstance(removals_res, RespondRemovals): validated = self.validate_removals( removals_res.coins, removals_res.proofs, block_i.foliage_transaction_block.removals_root, ) if validated is False: await peer.close() return None removed_coins = [] for _, coins_l in removals_res.coins: if coins_l is not None: removed_coins.append(coins_l) return removed_coins elif isinstance(removals_res, RejectRemovalsRequest): return None else: return None else: return [] async def wallet_next_block_check( peer: WSChiaConnection, potential_peek: uint32, blockchain: BlockchainInterface ) -> bool: block_response = await peer.request_header_blocks( wallet_protocol.RequestHeaderBlocks(potential_peek, potential_peek) ) if block_response is not None and isinstance(block_response, wallet_protocol.RespondHeaderBlocks): our_peak = blockchain.get_peak() if our_peak is not None and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash: return True return False
true
true
f7030ca21baff3a44a1489d617f958c1a15717d5
1,175
py
Python
src/phiqnet/bin/train_resnet50_mos_fpn_attention_imageaug_koniq_small_finetune.py
junyongyou/lagt_vqa
11aeda111ec4d97980db1e60f7b66b481266d1f3
[ "MIT" ]
9
2021-11-01T06:06:33.000Z
2022-02-07T12:21:18.000Z
src/phiqnet/bin/train_resnet50_mos_fpn_attention_imageaug_koniq_small_finetune.py
junyongyou/lagt_vqa
11aeda111ec4d97980db1e60f7b66b481266d1f3
[ "MIT" ]
null
null
null
src/phiqnet/bin/train_resnet50_mos_fpn_attention_imageaug_koniq_small_finetune.py
junyongyou/lagt_vqa
11aeda111ec4d97980db1e60f7b66b481266d1f3
[ "MIT" ]
1
2022-03-06T07:38:32.000Z
2022-03-06T07:38:32.000Z
from phiqnet.train.train import train_main if __name__ == '__main__': args = {} args['multi_gpu'] = 0 args['gpu'] = 0 args['result_folder'] = r'..\databases\experiments\koniq_small' args['n_quality_levels'] = 1 args['train_folders'] = [#r'..\databases\train\koniq_normal', r'..\databases\train\koniq_small',] # r'..\databases\train\live'] args['val_folders'] = [#r'..\databases\val\koniq_normal', r'..\databases\val\koniq_small',] # r'..\databases\val\live'] args['koniq_mos_file'] = r'..\databases\koniq10k_images_scores.csv' args['live_mos_file'] = r'..\databases\live_mos.csv' args['naive_backbone'] = False args['backbone'] = 'resnet50' args['model_weights'] = r'..\databases\experiments\koniq_small\resnet50_mos_attention_fpn\44_0.0094_0.0473.h5' args['initial_epoch'] = 0 args['lr_base'] = 1e-6 args['lr_schedule'] = True args['batch_size'] = 8 args['epochs'] = 120 args['fpn_type'] = 'fpn' args['attention_module'] = True args['image_aug'] = True train_main(args)
32.638889
114
0.591489
from phiqnet.train.train import train_main if __name__ == '__main__': args = {} args['multi_gpu'] = 0 args['gpu'] = 0 args['result_folder'] = r'..\databases\experiments\koniq_small' args['n_quality_levels'] = 1 args['train_folders'] = [ r'..\databases\train\koniq_small',] args['val_folders'] = [ r'..\databases\val\koniq_small',] args['koniq_mos_file'] = r'..\databases\koniq10k_images_scores.csv' args['live_mos_file'] = r'..\databases\live_mos.csv' args['naive_backbone'] = False args['backbone'] = 'resnet50' args['model_weights'] = r'..\databases\experiments\koniq_small\resnet50_mos_attention_fpn\44_0.0094_0.0473.h5' args['initial_epoch'] = 0 args['lr_base'] = 1e-6 args['lr_schedule'] = True args['batch_size'] = 8 args['epochs'] = 120 args['fpn_type'] = 'fpn' args['attention_module'] = True args['image_aug'] = True train_main(args)
true
true
f7030d07bc7c0ce56ea130285e2eff935e0bf461
30
py
Python
anchor/__init__.py
forest-snow/anchor-topic
ad947f2ff6aefc28394531fa74ba3e94e5a01fc2
[ "MIT" ]
13
2019-02-14T15:55:55.000Z
2022-03-03T01:01:28.000Z
anchor/__init__.py
gkaramanolakis/anchor-topic
e1637fa3965bfe14d8a5241b070c675bcdf4df18
[ "MIT" ]
1
2020-11-12T22:49:16.000Z
2020-11-12T22:49:16.000Z
anchor/__init__.py
gkaramanolakis/anchor-topic
e1637fa3965bfe14d8a5241b070c675bcdf4df18
[ "MIT" ]
2
2019-02-14T15:26:56.000Z
2021-08-21T02:37:53.000Z
import anchor name = 'anchor'
10
15
0.733333
import anchor name = 'anchor'
true
true
f7030d0c7cff4eb5186658c4505f06314d86506a
964
py
Python
web_app/__init__.py
medamer/twitoff-class-practice
c02bcd81d420233b8db12daf168353a7660f0030
[ "MIT" ]
null
null
null
web_app/__init__.py
medamer/twitoff-class-practice
c02bcd81d420233b8db12daf168353a7660f0030
[ "MIT" ]
null
null
null
web_app/__init__.py
medamer/twitoff-class-practice
c02bcd81d420233b8db12daf168353a7660f0030
[ "MIT" ]
null
null
null
# web_app/__init__.py from flask import Flask from web_app.models import db, migrate from web_app.routes.home_routes import home_routes from web_app.routes.book_routes import book_routes DATABASE_URI = "sqlite:///twitoff_class.db" # using relative filepath #DATABASE_URI = "sqlite:////Users/Username/Desktop/your-repo-name/web_app_99.db" # using absolute filepath on Mac (recommended) #DATABASE_URI = "sqlite:///C:\\Users\\Username\\Desktop\\your-repo-name\\web_app_99.db" # using absolute filepath on Windows (recommended) h/t: https://stackoverflow.com/a/19262231/670433 def create_app(): app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False db.init_app(app) migrate.init_app(app, db) app.register_blueprint(home_routes) app.register_blueprint(book_routes) return app if __name__ == "__main__": my_app = create_app() my_app.run(debug=True)
34.428571
187
0.756224
from flask import Flask from web_app.models import db, migrate from web_app.routes.home_routes import home_routes from web_app.routes.book_routes import book_routes DATABASE_URI = "sqlite:///twitoff_class.db" def create_app(): app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False db.init_app(app) migrate.init_app(app, db) app.register_blueprint(home_routes) app.register_blueprint(book_routes) return app if __name__ == "__main__": my_app = create_app() my_app.run(debug=True)
true
true
f7030d1fc7c80f90ced33769b5b823d7d16ebf1a
2,958
py
Python
app/api/users.py
s-titoo/bloggger
9d25e4421ffdcf2c616d7948746bc544ee77b3bc
[ "MIT" ]
null
null
null
app/api/users.py
s-titoo/bloggger
9d25e4421ffdcf2c616d7948746bc544ee77b3bc
[ "MIT" ]
16
2020-12-31T20:02:12.000Z
2021-01-03T08:32:56.000Z
app/api/users.py
s-titoo/bloggger
9d25e4421ffdcf2c616d7948746bc544ee77b3bc
[ "MIT" ]
null
null
null
from flask import jsonify, request, url_for, abort from app import db from app.api import bp from app.api.auth import token_auth from app.api.errors import bad_request from app.models import User @bp.route('/users/<int:id>', methods=['GET']) @token_auth.login_required def get_user(id): return jsonify(User.query.get_or_404(id).to_dict()) @bp.route('/users', methods=['GET']) @token_auth.login_required def get_users(): page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(User.query, page, per_page, 'api.get_users') return jsonify(data) @bp.route('/users/<int:id>/followers', methods=['GET']) @token_auth.login_required def get_followers(id): user = User.query.get_or_404(id) page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(user.followers, page, per_page, 'api.get_followers', id=id) return jsonify(data) @bp.route('/users/<int:id>/followed', methods=['GET']) @token_auth.login_required def get_followed(id): user = User.query.get_or_404(id) page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(user.followed, page, per_page, 'api.get_followed', id=id) return jsonify(data) @bp.route('/users', methods=['POST']) def create_user(): data = request.get_json() or {} if 'username' not in data or 'email' not in data or 'password' not in data: return bad_request('Request must include username, email and password') if User.query.filter_by(username=data['username']).first(): return bad_request('Please use a different username') if User.query.filter_by(email=data['email']).first(): return bad_request('Please use a different email') user = User() user.from_dict(data, new_user=True) db.session.add(user) db.session.commit() response = jsonify(user.to_dict()) response.status_code = 201 response.headers['Location'] = url_for('api.get_user', id=user.id) return response @bp.route('/users/<int:id>', methods=['PUT']) @token_auth.login_required def update_user(id): if token_auth.current_user().id != id: abort(403) user = User.query.get_or_404(id) data = request.get_json() or {} if 'username' in data and data['username'] != user.username and \ User.query.filter_by(username=data['username']).first(): return bad_request('Please use a different username') if 'email' in data and data['email'] != user.email and \ User.query.filter_by(email=data['email']).first(): return bad_request('Please use a different email') user.from_dict(data, new_user=False) db.session.commit() return jsonify(user.to_dict())
38.921053
79
0.671738
from flask import jsonify, request, url_for, abort from app import db from app.api import bp from app.api.auth import token_auth from app.api.errors import bad_request from app.models import User @bp.route('/users/<int:id>', methods=['GET']) @token_auth.login_required def get_user(id): return jsonify(User.query.get_or_404(id).to_dict()) @bp.route('/users', methods=['GET']) @token_auth.login_required def get_users(): page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(User.query, page, per_page, 'api.get_users') return jsonify(data) @bp.route('/users/<int:id>/followers', methods=['GET']) @token_auth.login_required def get_followers(id): user = User.query.get_or_404(id) page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(user.followers, page, per_page, 'api.get_followers', id=id) return jsonify(data) @bp.route('/users/<int:id>/followed', methods=['GET']) @token_auth.login_required def get_followed(id): user = User.query.get_or_404(id) page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', 10, type=int), 100) data = User.to_collection_dict(user.followed, page, per_page, 'api.get_followed', id=id) return jsonify(data) @bp.route('/users', methods=['POST']) def create_user(): data = request.get_json() or {} if 'username' not in data or 'email' not in data or 'password' not in data: return bad_request('Request must include username, email and password') if User.query.filter_by(username=data['username']).first(): return bad_request('Please use a different username') if User.query.filter_by(email=data['email']).first(): return bad_request('Please use a different email') user = User() user.from_dict(data, new_user=True) db.session.add(user) db.session.commit() response = jsonify(user.to_dict()) response.status_code = 201 response.headers['Location'] = url_for('api.get_user', id=user.id) return response @bp.route('/users/<int:id>', methods=['PUT']) @token_auth.login_required def update_user(id): if token_auth.current_user().id != id: abort(403) user = User.query.get_or_404(id) data = request.get_json() or {} if 'username' in data and data['username'] != user.username and \ User.query.filter_by(username=data['username']).first(): return bad_request('Please use a different username') if 'email' in data and data['email'] != user.email and \ User.query.filter_by(email=data['email']).first(): return bad_request('Please use a different email') user.from_dict(data, new_user=False) db.session.commit() return jsonify(user.to_dict())
true
true
f7030d57b6f7f41fc5ad8fdacb642095d4dd9445
3,242
py
Python
src/py/breakpoint_splitter.py
marbl/VALET
d3a433690b70288f42e47d096dfcd5370ddf04ce
[ "MIT" ]
12
2016-01-29T10:58:52.000Z
2020-11-02T00:39:15.000Z
src/py/breakpoint_splitter.py
marbl/VALET
d3a433690b70288f42e47d096dfcd5370ddf04ce
[ "MIT" ]
9
2016-01-29T12:04:00.000Z
2021-12-06T12:30:12.000Z
src/py/breakpoint_splitter.py
marbl/VALET
d3a433690b70288f42e47d096dfcd5370ddf04ce
[ "MIT" ]
5
2017-07-20T21:47:47.000Z
2022-01-24T09:07:04.000Z
#!/usr/bin/env python from __future__ import print_function from optparse import OptionParser import os import sys class ReadsSplitter: def __init__(self): self.options = None self.files_to_split = [] self.getOptions() def go(self): for fn in self.files_to_split: self.splitFile(fn) def getOptions(self): parser = OptionParser() parser.add_option("-u", "--unaligned", dest="unaligned_dir", \ help="Unaligned read directory", metavar="DIR") parser.add_option("-o", "--output", dest="output_dir",\ help="Directory for output", metavar="DIR",\ default="data/output/breakpoints/reads") (options, args) = parser.parse_args() self.options = options if options.unaligned_dir: for file_name in os.listdir(options.unaligned_dir): if 'unaligned' in file_name: self.files_to_split.append(options.unaligned_dir + file_name) def splitFile(self, fn): if not os.path.isfile(fn): warning("%s DOES NOT EXIST" %(fn)) exit(1) read_split_output_dir = self.options.output_dir ensure_dir(read_split_output_dir) read_split_output_1 = read_split_output_dir + os.path.split(fn)[1] + ".1" read_split_output_2 = read_split_output_dir + os.path.split(fn)[1] + ".2" read_file = open(fn, 'r') r_o_1 = open(read_split_output_1, 'w') r_o_2 = open(read_split_output_2, 'w') for read in self.read_read(read_file): h1 = read[0].strip() read_contents = read[1].strip() h2 = read[2].strip() read_quality = read[3].strip() # l = len(read_contents) l_1 = int(l / 3) l_2 = int(l - l_1) # left h1_1 = h1 + "/1\n" read_contents_1 = read_contents[0:l_1] + "\n" h2_1 = h2 + "/1\n" read_quality_1 = read_quality[0:l_1] + "\n" # right h1_2 = h1 + "/2\n" read_contents_2 = read_contents[l_2:]+ "\n" h2_2 = h2 + "/2\n" read_quality_2 = read_quality[l_2:] + "\n" r_o_1.write(h1_1) r_o_1.write(read_contents_1) r_o_1.write(h2_1) r_o_1.write(read_quality_1) r_o_2.write(h1_2) r_o_2.write(read_contents_2) r_o_2.write(h2_2) r_o_2.write(read_quality_2) r_o_1.close() r_o_2.close() read_file.close() def read_read(self, fp): while True: read_bundle = [] for i in range(4): read_bundle.append(fp.readline()) if not read_bundle[0]: break else: yield read_bundle def ensure_dir(f): d = os.path.dirname(f) if not os.path.exists(d): os.makedirs(d) def warning(*objs): print("\tINFO: ",*objs, file=sys.stderr) def main(): ''' splits read files for breakpoint ''' splitter = ReadsSplitter() splitter.go() if __name__=='__main__': main()
27.474576
82
0.537631
from __future__ import print_function from optparse import OptionParser import os import sys class ReadsSplitter: def __init__(self): self.options = None self.files_to_split = [] self.getOptions() def go(self): for fn in self.files_to_split: self.splitFile(fn) def getOptions(self): parser = OptionParser() parser.add_option("-u", "--unaligned", dest="unaligned_dir", \ help="Unaligned read directory", metavar="DIR") parser.add_option("-o", "--output", dest="output_dir",\ help="Directory for output", metavar="DIR",\ default="data/output/breakpoints/reads") (options, args) = parser.parse_args() self.options = options if options.unaligned_dir: for file_name in os.listdir(options.unaligned_dir): if 'unaligned' in file_name: self.files_to_split.append(options.unaligned_dir + file_name) def splitFile(self, fn): if not os.path.isfile(fn): warning("%s DOES NOT EXIST" %(fn)) exit(1) read_split_output_dir = self.options.output_dir ensure_dir(read_split_output_dir) read_split_output_1 = read_split_output_dir + os.path.split(fn)[1] + ".1" read_split_output_2 = read_split_output_dir + os.path.split(fn)[1] + ".2" read_file = open(fn, 'r') r_o_1 = open(read_split_output_1, 'w') r_o_2 = open(read_split_output_2, 'w') for read in self.read_read(read_file): h1 = read[0].strip() read_contents = read[1].strip() h2 = read[2].strip() read_quality = read[3].strip() l = len(read_contents) l_1 = int(l / 3) l_2 = int(l - l_1) h1_1 = h1 + "/1\n" read_contents_1 = read_contents[0:l_1] + "\n" h2_1 = h2 + "/1\n" read_quality_1 = read_quality[0:l_1] + "\n" h1_2 = h1 + "/2\n" read_contents_2 = read_contents[l_2:]+ "\n" h2_2 = h2 + "/2\n" read_quality_2 = read_quality[l_2:] + "\n" r_o_1.write(h1_1) r_o_1.write(read_contents_1) r_o_1.write(h2_1) r_o_1.write(read_quality_1) r_o_2.write(h1_2) r_o_2.write(read_contents_2) r_o_2.write(h2_2) r_o_2.write(read_quality_2) r_o_1.close() r_o_2.close() read_file.close() def read_read(self, fp): while True: read_bundle = [] for i in range(4): read_bundle.append(fp.readline()) if not read_bundle[0]: break else: yield read_bundle def ensure_dir(f): d = os.path.dirname(f) if not os.path.exists(d): os.makedirs(d) def warning(*objs): print("\tINFO: ",*objs, file=sys.stderr) def main(): splitter = ReadsSplitter() splitter.go() if __name__=='__main__': main()
true
true
f7030ff12b6ef50e1672d30b3059987601d2c469
6,435
py
Python
qualcoder/GUI/information.py
qwertygc/QualCoder
2a4820f4de7a2ddf1a629336c74e534c20bdb5d9
[ "MIT" ]
null
null
null
qualcoder/GUI/information.py
qwertygc/QualCoder
2a4820f4de7a2ddf1a629336c74e534c20bdb5d9
[ "MIT" ]
null
null
null
qualcoder/GUI/information.py
qwertygc/QualCoder
2a4820f4de7a2ddf1a629336c74e534c20bdb5d9
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- ''' Copyright (c) 2019 Colin Curtain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Author: Colin Curtain (ccbogel) https://github.com/ccbogel/QualCoder https://qualcoder.wordpress.com/ ''' from PyQt5 import QtWidgets, QtCore import os import sys import logging import traceback from GUI.ui_dialog_information import Ui_Dialog_information path = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) def exception_handler(exception_type, value, tb_obj): """ Global exception handler useful in GUIs. tb_obj: exception.__traceback__ """ tb = '\n'.join(traceback.format_tb(tb_obj)) text = 'Traceback (most recent call last):\n' + tb + '\n' + exception_type.__name__ + ': ' + str(value) print(text) logger.error(_("Uncaught exception: ") + text) QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text) class DialogInformation(QtWidgets.QDialog): """ Dialog to display about information from html and text files for PyQDA development, version and license. The html is coded below because it avoids potential data file import errors with pyinstaller. Called from: qualcoder.MainWindow.about view_graph_original.ViewGraphOriginal.list_graph.TextGraphicsItem view_graph_original.ViewGraphOriginal.circular_graph.TextGraphicsItem """ title = "" text = "" def __init__(self, app, title, html="", parent=None): """Display information text in dialog. If no html is given, fill with About html. """ sys.excepthook = exception_handler QtWidgets.QDialog.__init__(self) self.ui = Ui_Dialog_information() self.ui.setupUi(self) self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint) font = 'font: ' + str(app.settings['fontsize']) + 'pt ' font += '"' + app.settings['font'] + '";' self.setStyleSheet(font) self.setWindowTitle(title) if html == "": self.setHtml(a) else: self.setHtml(html) def setHtml(self, html): """This method is used to populate the textEdit. Usually called from a View_graph TextGraphicsItem via a context menu. """ self.text = html self.ui.textEdit.setHtml(self.text) def accepted(self): """ Accepted button overridden method """ self.information = self.ui.textEdit.toPlainText() self.ui.Dialog_information.accept() a = '<h1 class="western">About QualCoder</h1>\ <h2 class="western">Version:</h2>\ <p>QualCoder 1.9 2020 March 11</p>\ <p>Depends on python 3.x, pyqt5 lxml Pillow ebooklib ply chardet pdfminer.six openpyxl</p>\ <p>VLC should also be installed.</p>\ <p>Tested on: Linux Mint 18.04, Ubuntu 19.04, Lubuntu 18.04, mostly tested on Windows 10, partly tested on Mac OS.</p>\ <p></p>\ <h2 class="western">Acknowledgements</h2>\ <p>Ronggui Huang and Zhang Gehao for creating RQDA, which inspired this software.</p>\ <p>Mike MacCana for the source code for the docx module.</p>\ <p>User: bit4 on stackoverflow who presented the source code to convert html to text.</p>\ <p>ebooklib: Aleksandar Erkalović (<a href="https://github.com/aerkalov">https://github.com/aerkalov</a>)</p>\ <p>The VideoLAN team for the bindings to VLC</p>\ <p>To various members on github for supporting this project.</p>\ <h2 class="western">Other details</h2\ <p>The qda data folder contains folders for imported documents, \ images, audio and video. It also contains the sqlite database, named data.qda, to store coding data.</p>\ <p>QualCoder creates a .qualcoder folder inside your home directory. \ This contains QualCoder.log, config.ini (for settings) and \ recent_project.txt. The config file contains the name of the current coder, \ default working directory and selected font.</p>\ <p>QualCoder is written in python 3 using Qt5 for the graphical interface.</p>\ <p>The REFI-QDA Project import and export are experimental and should not be relied upon. </p>\ <h2 class="western">License</h2>\ <p>MIT License</p>\ <p>Copyright (c) 2020 Colin Curtain</p>\ <p>Permission is hereby granted, free of charge, to any person<br />\ obtaining a copy of this software and associated documentation files<br />\ (the &quot;Software&quot;), to deal in the Software without<br />\ restriction, including without limitation the rights to use, copy,<br />\ modify, merge, publish, distribute, sublicense, and/or sell copies of<br />\ the Software, and to permit persons to whom the Software is furnished<br />\ to do so, subject to the following conditions:</p>\ <p>The above copyright notice and this permission notice shall be <br />\ included in all copies or substantial portions of the Software.</p>\ <p>THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF<br />\ ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE<br />\ WARRANTIES OF MERCHANTABILITY,</p>\ <p>FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT<br />\ SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,<br />\ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR<br />\ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR<br />\ THE USE OR OTHER DEALINGS IN THE SOFTWARE.</p>' if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) ui = DialogInformation(None, "a title", "") ui.show() sys.exit(app.exec_())
45.316901
119
0.724476
from PyQt5 import QtWidgets, QtCore import os import sys import logging import traceback from GUI.ui_dialog_information import Ui_Dialog_information path = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) def exception_handler(exception_type, value, tb_obj): tb = '\n'.join(traceback.format_tb(tb_obj)) text = 'Traceback (most recent call last):\n' + tb + '\n' + exception_type.__name__ + ': ' + str(value) print(text) logger.error(_("Uncaught exception: ") + text) QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text) class DialogInformation(QtWidgets.QDialog): title = "" text = "" def __init__(self, app, title, html="", parent=None): sys.excepthook = exception_handler QtWidgets.QDialog.__init__(self) self.ui = Ui_Dialog_information() self.ui.setupUi(self) self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint) font = 'font: ' + str(app.settings['fontsize']) + 'pt ' font += '"' + app.settings['font'] + '";' self.setStyleSheet(font) self.setWindowTitle(title) if html == "": self.setHtml(a) else: self.setHtml(html) def setHtml(self, html): self.text = html self.ui.textEdit.setHtml(self.text) def accepted(self): self.information = self.ui.textEdit.toPlainText() self.ui.Dialog_information.accept() a = '<h1 class="western">About QualCoder</h1>\ <h2 class="western">Version:</h2>\ <p>QualCoder 1.9 2020 March 11</p>\ <p>Depends on python 3.x, pyqt5 lxml Pillow ebooklib ply chardet pdfminer.six openpyxl</p>\ <p>VLC should also be installed.</p>\ <p>Tested on: Linux Mint 18.04, Ubuntu 19.04, Lubuntu 18.04, mostly tested on Windows 10, partly tested on Mac OS.</p>\ <p></p>\ <h2 class="western">Acknowledgements</h2>\ <p>Ronggui Huang and Zhang Gehao for creating RQDA, which inspired this software.</p>\ <p>Mike MacCana for the source code for the docx module.</p>\ <p>User: bit4 on stackoverflow who presented the source code to convert html to text.</p>\ <p>ebooklib: Aleksandar Erkalović (<a href="https://github.com/aerkalov">https://github.com/aerkalov</a>)</p>\ <p>The VideoLAN team for the bindings to VLC</p>\ <p>To various members on github for supporting this project.</p>\ <h2 class="western">Other details</h2\ <p>The qda data folder contains folders for imported documents, \ images, audio and video. It also contains the sqlite database, named data.qda, to store coding data.</p>\ <p>QualCoder creates a .qualcoder folder inside your home directory. \ This contains QualCoder.log, config.ini (for settings) and \ recent_project.txt. The config file contains the name of the current coder, \ default working directory and selected font.</p>\ <p>QualCoder is written in python 3 using Qt5 for the graphical interface.</p>\ <p>The REFI-QDA Project import and export are experimental and should not be relied upon. </p>\ <h2 class="western">License</h2>\ <p>MIT License</p>\ <p>Copyright (c) 2020 Colin Curtain</p>\ <p>Permission is hereby granted, free of charge, to any person<br />\ obtaining a copy of this software and associated documentation files<br />\ (the &quot;Software&quot;), to deal in the Software without<br />\ restriction, including without limitation the rights to use, copy,<br />\ modify, merge, publish, distribute, sublicense, and/or sell copies of<br />\ the Software, and to permit persons to whom the Software is furnished<br />\ to do so, subject to the following conditions:</p>\ <p>The above copyright notice and this permission notice shall be <br />\ included in all copies or substantial portions of the Software.</p>\ <p>THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF<br />\ ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE<br />\ WARRANTIES OF MERCHANTABILITY,</p>\ <p>FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT<br />\ SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,<br />\ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR<br />\ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR<br />\ THE USE OR OTHER DEALINGS IN THE SOFTWARE.</p>' if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) ui = DialogInformation(None, "a title", "") ui.show() sys.exit(app.exec_())
true
true
f7030ff8aada85e06d45e7775f87f399f8143230
4,774
py
Python
sdk/lusid/models/resource_list_of_portfolio.py
inwaves/lusid-sdk-python
9ad2cfc71c998928bf23f54ce0821a8ea2db556b
[ "MIT" ]
null
null
null
sdk/lusid/models/resource_list_of_portfolio.py
inwaves/lusid-sdk-python
9ad2cfc71c998928bf23f54ce0821a8ea2db556b
[ "MIT" ]
null
null
null
sdk/lusid/models/resource_list_of_portfolio.py
inwaves/lusid-sdk-python
9ad2cfc71c998928bf23f54ce0821a8ea2db556b
[ "MIT" ]
null
null
null
# coding: utf-8 """ LUSID API FINBOURNE Technology # noqa: E501 The version of the OpenAPI document: 0.11.2342 Contact: info@finbourne.com Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six class ResourceListOfPortfolio(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. required_map (dict): The key is attribute name and the value is whether it is 'required' or 'optional'. """ openapi_types = { 'values': 'list[Portfolio]', 'href': 'str', 'links': 'list[Link]' } attribute_map = { 'values': 'values', 'href': 'href', 'links': 'links' } required_map = { 'values': 'required', 'href': 'optional', 'links': 'optional' } def __init__(self, values=None, href=None, links=None): # noqa: E501 """ ResourceListOfPortfolio - a model defined in OpenAPI :param values: (required) :type values: list[lusid.Portfolio] :param href: :type href: str :param links: :type links: list[lusid.Link] """ # noqa: E501 self._values = None self._href = None self._links = None self.discriminator = None self.values = values self.href = href self.links = links @property def values(self): """Gets the values of this ResourceListOfPortfolio. # noqa: E501 :return: The values of this ResourceListOfPortfolio. # noqa: E501 :rtype: list[Portfolio] """ return self._values @values.setter def values(self, values): """Sets the values of this ResourceListOfPortfolio. :param values: The values of this ResourceListOfPortfolio. # noqa: E501 :type: list[Portfolio] """ if values is None: raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501 self._values = values @property def href(self): """Gets the href of this ResourceListOfPortfolio. # noqa: E501 :return: The href of this ResourceListOfPortfolio. # noqa: E501 :rtype: str """ return self._href @href.setter def href(self, href): """Sets the href of this ResourceListOfPortfolio. :param href: The href of this ResourceListOfPortfolio. # noqa: E501 :type: str """ self._href = href @property def links(self): """Gets the links of this ResourceListOfPortfolio. # noqa: E501 :return: The links of this ResourceListOfPortfolio. # noqa: E501 :rtype: list[Link] """ return self._links @links.setter def links(self, links): """Sets the links of this ResourceListOfPortfolio. :param links: The links of this ResourceListOfPortfolio. # noqa: E501 :type: list[Link] """ self._links = links def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResourceListOfPortfolio): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
26.230769
92
0.55886
import pprint import re import six class ResourceListOfPortfolio(object): openapi_types = { 'values': 'list[Portfolio]', 'href': 'str', 'links': 'list[Link]' } attribute_map = { 'values': 'values', 'href': 'href', 'links': 'links' } required_map = { 'values': 'required', 'href': 'optional', 'links': 'optional' } def __init__(self, values=None, href=None, links=None): self._values = None self._href = None self._links = None self.discriminator = None self.values = values self.href = href self.links = links @property def values(self): return self._values @values.setter def values(self, values): if values is None: raise ValueError("Invalid value for `values`, must not be `None`") self._values = values @property def href(self): return self._href @href.setter def href(self, href): self._href = href @property def links(self): return self._links @links.setter def links(self, links): self._links = links def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, ResourceListOfPortfolio): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f703111c2592fb6a5ff0565da71ea5e703eed890
2,449
py
Python
env/lib/python3.5/site-packages/cartopy/tests/crs/test_utm.py
project-pantheon/pantheon_glob_planner
c0d50a53b36c4678192ec75ad7a4cd68c570daef
[ "BSD-3-Clause" ]
null
null
null
env/lib/python3.5/site-packages/cartopy/tests/crs/test_utm.py
project-pantheon/pantheon_glob_planner
c0d50a53b36c4678192ec75ad7a4cd68c570daef
[ "BSD-3-Clause" ]
null
null
null
env/lib/python3.5/site-packages/cartopy/tests/crs/test_utm.py
project-pantheon/pantheon_glob_planner
c0d50a53b36c4678192ec75ad7a4cd68c570daef
[ "BSD-3-Clause" ]
null
null
null
# (C) British Crown Copyright 2018, Met Office # # This file is part of cartopy. # # cartopy is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # cartopy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with cartopy. If not, see <https://www.gnu.org/licenses/>. """ Tests for the UTM coordinate system. """ from __future__ import (absolute_import, division, print_function) import numpy as np from numpy.testing import assert_almost_equal import pytest import cartopy.crs as ccrs def check_proj4_params(crs, other_args): expected = other_args | {'proj=utm', 'no_defs', 'units=m'} pro4_params = set(crs.proj4_init.lstrip('+').split(' +')) assert expected == pro4_params @pytest.mark.parametrize('south', [False, True]) def test_default(south): zone = 1 # Limits are fixed, so don't bother checking other zones. utm = ccrs.UTM(zone, southern_hemisphere=south) other_args = {'ellps=WGS84', 'zone={}'.format(zone)} if south: other_args |= {'south'} check_proj4_params(utm, other_args) assert_almost_equal(np.array(utm.x_limits), [-250000, 1250000]) assert_almost_equal(np.array(utm.y_limits), [-10000000, 25000000]) def test_ellipsoid_transform(): # USGS Professional Paper 1395, pp 269 - 271 globe = ccrs.Globe(ellipse='clrk66') utm = ccrs.UTM(zone=18, globe=globe) geodetic = utm.as_geodetic() other_args = {'ellps=clrk66', 'zone=18'} check_proj4_params(utm, other_args) assert_almost_equal(np.array(utm.x_limits), [-250000, 1250000]) assert_almost_equal(np.array(utm.y_limits), [-10000000, 25000000]) result = utm.transform_point(-73.5, 40.5, geodetic) assert_almost_equal(result, np.array([127106.5 + 500000, 4484124.4]), decimal=1) inverse_result = geodetic.transform_point(result[0], result[1], utm) assert_almost_equal(inverse_result, [-73.5, 40.5])
34.013889
74
0.687219
from __future__ import (absolute_import, division, print_function) import numpy as np from numpy.testing import assert_almost_equal import pytest import cartopy.crs as ccrs def check_proj4_params(crs, other_args): expected = other_args | {'proj=utm', 'no_defs', 'units=m'} pro4_params = set(crs.proj4_init.lstrip('+').split(' +')) assert expected == pro4_params @pytest.mark.parametrize('south', [False, True]) def test_default(south): zone = 1 utm = ccrs.UTM(zone, southern_hemisphere=south) other_args = {'ellps=WGS84', 'zone={}'.format(zone)} if south: other_args |= {'south'} check_proj4_params(utm, other_args) assert_almost_equal(np.array(utm.x_limits), [-250000, 1250000]) assert_almost_equal(np.array(utm.y_limits), [-10000000, 25000000]) def test_ellipsoid_transform(): # USGS Professional Paper 1395, pp 269 - 271 globe = ccrs.Globe(ellipse='clrk66') utm = ccrs.UTM(zone=18, globe=globe) geodetic = utm.as_geodetic() other_args = {'ellps=clrk66', 'zone=18'} check_proj4_params(utm, other_args) assert_almost_equal(np.array(utm.x_limits), [-250000, 1250000]) assert_almost_equal(np.array(utm.y_limits), [-10000000, 25000000]) result = utm.transform_point(-73.5, 40.5, geodetic) assert_almost_equal(result, np.array([127106.5 + 500000, 4484124.4]), decimal=1) inverse_result = geodetic.transform_point(result[0], result[1], utm) assert_almost_equal(inverse_result, [-73.5, 40.5])
true
true
f70311ade7985cc89c566ef969d4f9af213d0e28
35,786
py
Python
vnpy/app/cta_backtester/ui/widget.py
IcyCC/vnpy
04f6ec013daddde2df36590625e0533e260b4bc1
[ "MIT" ]
2
2020-04-20T15:02:11.000Z
2022-01-21T03:29:01.000Z
vnpy/app/cta_backtester/ui/widget.py
IcyCC/vnpy
04f6ec013daddde2df36590625e0533e260b4bc1
[ "MIT" ]
null
null
null
vnpy/app/cta_backtester/ui/widget.py
IcyCC/vnpy
04f6ec013daddde2df36590625e0533e260b4bc1
[ "MIT" ]
5
2019-10-26T06:03:26.000Z
2020-02-28T13:31:42.000Z
import numpy as np import pyqtgraph as pg from datetime import datetime, timedelta from vnpy.trader.constant import Interval, Direction, Offset from vnpy.trader.engine import MainEngine from vnpy.trader.ui import QtCore, QtWidgets, QtGui from vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell from vnpy.trader.ui.editor import CodeEditor from vnpy.event import Event, EventEngine from vnpy.chart import ChartWidget, CandleItem, VolumeItem from vnpy.trader.utility import load_json, save_json from ..engine import ( APP_NAME, EVENT_BACKTESTER_LOG, EVENT_BACKTESTER_BACKTESTING_FINISHED, EVENT_BACKTESTER_OPTIMIZATION_FINISHED, OptimizationSetting ) class BacktesterManager(QtWidgets.QWidget): """""" setting_filename = "cta_backtester_setting.json" signal_log = QtCore.pyqtSignal(Event) signal_backtesting_finished = QtCore.pyqtSignal(Event) signal_optimization_finished = QtCore.pyqtSignal(Event) def __init__(self, main_engine: MainEngine, event_engine: EventEngine): """""" super().__init__() self.main_engine = main_engine self.event_engine = event_engine self.backtester_engine = main_engine.get_engine(APP_NAME) self.class_names = [] self.settings = {} self.target_display = "" self.init_ui() self.register_event() self.backtester_engine.init_engine() self.init_strategy_settings() def init_strategy_settings(self): """""" self.class_names = self.backtester_engine.get_strategy_class_names() for class_name in self.class_names: setting = self.backtester_engine.get_default_setting(class_name) self.settings[class_name] = setting self.class_combo.addItems(self.class_names) def init_ui(self): """""" self.setWindowTitle("CTA回测") # Setting Part self.class_combo = QtWidgets.QComboBox() self.symbol_line = QtWidgets.QLineEdit("IF88.CFFEX") self.interval_combo = QtWidgets.QComboBox() for inteval in Interval: self.interval_combo.addItem(inteval.value) end_dt = datetime.now() start_dt = end_dt - timedelta(days=3 * 365) self.start_date_edit = QtWidgets.QDateEdit( QtCore.QDate( start_dt.year, start_dt.month, start_dt.day ) ) self.end_date_edit = QtWidgets.QDateEdit( QtCore.QDate.currentDate() ) self.rate_line = QtWidgets.QLineEdit("0.000025") self.slippage_line = QtWidgets.QLineEdit("0.2") self.size_line = QtWidgets.QLineEdit("300") self.pricetick_line = QtWidgets.QLineEdit("0.2") self.capital_line = QtWidgets.QLineEdit("1000000") self.inverse_combo = QtWidgets.QComboBox() self.inverse_combo.addItems(["正向", "反向"]) backtesting_button = QtWidgets.QPushButton("开始回测") backtesting_button.clicked.connect(self.start_backtesting) optimization_button = QtWidgets.QPushButton("参数优化") optimization_button.clicked.connect(self.start_optimization) self.result_button = QtWidgets.QPushButton("优化结果") self.result_button.clicked.connect(self.show_optimization_result) self.result_button.setEnabled(False) downloading_button = QtWidgets.QPushButton("下载数据") downloading_button.clicked.connect(self.start_downloading) self.order_button = QtWidgets.QPushButton("委托记录") self.order_button.clicked.connect(self.show_backtesting_orders) self.order_button.setEnabled(False) self.trade_button = QtWidgets.QPushButton("成交记录") self.trade_button.clicked.connect(self.show_backtesting_trades) self.trade_button.setEnabled(False) self.daily_button = QtWidgets.QPushButton("每日盈亏") self.daily_button.clicked.connect(self.show_daily_results) self.daily_button.setEnabled(False) self.candle_button = QtWidgets.QPushButton("K线图表") self.candle_button.clicked.connect(self.show_candle_chart) self.candle_button.setEnabled(False) edit_button = QtWidgets.QPushButton("代码编辑") edit_button.clicked.connect(self.edit_strategy_code) reload_button = QtWidgets.QPushButton("策略重载") reload_button.clicked.connect(self.reload_strategy_class) for button in [ backtesting_button, optimization_button, downloading_button, self.result_button, self.order_button, self.trade_button, self.daily_button, self.candle_button, edit_button, reload_button ]: button.setFixedHeight(button.sizeHint().height() * 2) form = QtWidgets.QFormLayout() form.addRow("交易策略", self.class_combo) form.addRow("本地代码", self.symbol_line) form.addRow("K线周期", self.interval_combo) form.addRow("开始日期", self.start_date_edit) form.addRow("结束日期", self.end_date_edit) form.addRow("手续费率", self.rate_line) form.addRow("交易滑点", self.slippage_line) form.addRow("合约乘数", self.size_line) form.addRow("价格跳动", self.pricetick_line) form.addRow("回测资金", self.capital_line) form.addRow("合约模式", self.inverse_combo) result_grid = QtWidgets.QGridLayout() result_grid.addWidget(self.trade_button, 0, 0) result_grid.addWidget(self.order_button, 0, 1) result_grid.addWidget(self.daily_button, 1, 0) result_grid.addWidget(self.candle_button, 1, 1) left_vbox = QtWidgets.QVBoxLayout() left_vbox.addLayout(form) left_vbox.addWidget(backtesting_button) left_vbox.addWidget(downloading_button) left_vbox.addStretch() left_vbox.addLayout(result_grid) left_vbox.addStretch() left_vbox.addWidget(optimization_button) left_vbox.addWidget(self.result_button) left_vbox.addStretch() left_vbox.addWidget(edit_button) left_vbox.addWidget(reload_button) # Result part self.statistics_monitor = StatisticsMonitor() self.log_monitor = QtWidgets.QTextEdit() self.log_monitor.setMaximumHeight(400) self.chart = BacktesterChart() self.chart.setMinimumWidth(1000) self.trade_dialog = BacktestingResultDialog( self.main_engine, self.event_engine, "回测成交记录", BacktestingTradeMonitor ) self.order_dialog = BacktestingResultDialog( self.main_engine, self.event_engine, "回测委托记录", BacktestingOrderMonitor ) self.daily_dialog = BacktestingResultDialog( self.main_engine, self.event_engine, "回测每日盈亏", DailyResultMonitor ) # Candle Chart self.candle_dialog = CandleChartDialog() # Layout vbox = QtWidgets.QVBoxLayout() vbox.addWidget(self.statistics_monitor) vbox.addWidget(self.log_monitor) hbox = QtWidgets.QHBoxLayout() hbox.addLayout(left_vbox) hbox.addLayout(vbox) hbox.addWidget(self.chart) self.setLayout(hbox) # Code Editor self.editor = CodeEditor(self.main_engine, self.event_engine) # Load setting setting = load_json(self.setting_filename) if not setting: return self.class_combo.setCurrentIndex( self.class_combo.findText(setting["class_name"]) ) self.symbol_line.setText(setting["vt_symbol"]) self.interval_combo.setCurrentIndex( self.interval_combo.findText(setting["interval"]) ) self.rate_line.setText(str(setting["rate"])) self.slippage_line.setText(str(setting["slippage"])) self.size_line.setText(str(setting["size"])) self.pricetick_line.setText(str(setting["pricetick"])) self.capital_line.setText(str(setting["capital"])) if not setting["inverse"]: self.inverse_combo.setCurrentIndex(0) else: self.inverse_combo.setCurrentIndex(1) def register_event(self): """""" self.signal_log.connect(self.process_log_event) self.signal_backtesting_finished.connect( self.process_backtesting_finished_event) self.signal_optimization_finished.connect( self.process_optimization_finished_event) self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit) self.event_engine.register( EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit) self.event_engine.register( EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit) def process_log_event(self, event: Event): """""" msg = event.data self.write_log(msg) def write_log(self, msg): """""" timestamp = datetime.now().strftime("%H:%M:%S") msg = f"{timestamp}\t{msg}" self.log_monitor.append(msg) def process_backtesting_finished_event(self, event: Event): """""" statistics = self.backtester_engine.get_result_statistics() self.statistics_monitor.set_data(statistics) df = self.backtester_engine.get_result_df() self.chart.set_data(df) self.trade_button.setEnabled(True) self.order_button.setEnabled(True) self.daily_button.setEnabled(True) self.candle_button.setEnabled(True) def process_optimization_finished_event(self, event: Event): """""" self.write_log("请点击[优化结果]按钮查看") self.result_button.setEnabled(True) def start_backtesting(self): """""" class_name = self.class_combo.currentText() vt_symbol = self.symbol_line.text() interval = self.interval_combo.currentText() start = self.start_date_edit.date().toPyDate() end = self.end_date_edit.date().toPyDate() rate = float(self.rate_line.text()) slippage = float(self.slippage_line.text()) size = float(self.size_line.text()) pricetick = float(self.pricetick_line.text()) capital = float(self.capital_line.text()) if self.inverse_combo.currentText() == "正向": inverse = False else: inverse = True # Save backtesting parameters backtesting_setting = { "class_name": class_name, "vt_symbol": vt_symbol, "interval": interval, "rate": rate, "slippage": slippage, "size": size, "pricetick": pricetick, "capital": capital, "inverse": inverse, } save_json(self.setting_filename, backtesting_setting) # Get strategy setting old_setting = self.settings[class_name] dialog = BacktestingSettingEditor(class_name, old_setting) i = dialog.exec() if i != dialog.Accepted: return new_setting = dialog.get_setting() self.settings[class_name] = new_setting result = self.backtester_engine.start_backtesting( class_name, vt_symbol, interval, start, end, rate, slippage, size, pricetick, capital, inverse, new_setting ) if result: self.statistics_monitor.clear_data() self.chart.clear_data() self.trade_button.setEnabled(False) self.order_button.setEnabled(False) self.daily_button.setEnabled(False) self.candle_button.setEnabled(False) self.trade_dialog.clear_data() self.order_dialog.clear_data() self.daily_dialog.clear_data() self.candle_dialog.clear_data() def start_optimization(self): """""" class_name = self.class_combo.currentText() vt_symbol = self.symbol_line.text() interval = self.interval_combo.currentText() start = self.start_date_edit.date().toPyDate() end = self.end_date_edit.date().toPyDate() rate = float(self.rate_line.text()) slippage = float(self.slippage_line.text()) size = float(self.size_line.text()) pricetick = float(self.pricetick_line.text()) capital = float(self.capital_line.text()) if self.inverse_combo.currentText() == "正向": inverse = False else: inverse = True parameters = self.settings[class_name] dialog = OptimizationSettingEditor(class_name, parameters) i = dialog.exec() if i != dialog.Accepted: return optimization_setting, use_ga = dialog.get_setting() self.target_display = dialog.target_display self.backtester_engine.start_optimization( class_name, vt_symbol, interval, start, end, rate, slippage, size, pricetick, capital, inverse, optimization_setting, use_ga ) self.result_button.setEnabled(False) def start_downloading(self): """""" vt_symbol = self.symbol_line.text() interval = self.interval_combo.currentText() start_date = self.start_date_edit.date() end_date = self.end_date_edit.date() start = datetime(start_date.year(), start_date.month(), start_date.day()) end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59) self.backtester_engine.start_downloading( vt_symbol, interval, start, end ) def show_optimization_result(self): """""" result_values = self.backtester_engine.get_result_values() dialog = OptimizationResultMonitor( result_values, self.target_display ) dialog.exec_() def show_backtesting_trades(self): """""" if not self.trade_dialog.is_updated(): trades = self.backtester_engine.get_all_trades() self.trade_dialog.update_data(trades) self.trade_dialog.exec_() def show_backtesting_orders(self): """""" if not self.order_dialog.is_updated(): orders = self.backtester_engine.get_all_orders() self.order_dialog.update_data(orders) self.order_dialog.exec_() def show_daily_results(self): """""" if not self.daily_dialog.is_updated(): results = self.backtester_engine.get_all_daily_results() self.daily_dialog.update_data(results) self.daily_dialog.exec_() def show_candle_chart(self): """""" if not self.candle_dialog.is_updated(): history = self.backtester_engine.get_history_data() self.candle_dialog.update_history(history) trades = self.backtester_engine.get_all_trades() self.candle_dialog.update_trades(trades) self.candle_dialog.exec_() def edit_strategy_code(self): """""" class_name = self.class_combo.currentText() file_path = self.backtester_engine.get_strategy_class_file(class_name) self.editor.open_editor(file_path) self.editor.show() def reload_strategy_class(self): """""" self.backtester_engine.reload_strategy_class() self.class_combo.clear() self.init_strategy_settings() def show(self): """""" self.showMaximized() class StatisticsMonitor(QtWidgets.QTableWidget): """""" KEY_NAME_MAP = { "start_date": "首个交易日", "end_date": "最后交易日", "total_days": "总交易日", "profit_days": "盈利交易日", "loss_days": "亏损交易日", "capital": "起始资金", "end_balance": "结束资金", "total_return": "总收益率", "annual_return": "年化收益", "max_drawdown": "最大回撤", "max_ddpercent": "百分比最大回撤", "total_net_pnl": "总盈亏", "total_commission": "总手续费", "total_slippage": "总滑点", "total_turnover": "总成交额", "total_trade_count": "总成交笔数", "daily_net_pnl": "日均盈亏", "daily_commission": "日均手续费", "daily_slippage": "日均滑点", "daily_turnover": "日均成交额", "daily_trade_count": "日均成交笔数", "daily_return": "日均收益率", "return_std": "收益标准差", "sharpe_ratio": "夏普比率", "return_drawdown_ratio": "收益回撤比" } def __init__(self): """""" super().__init__() self.cells = {} self.init_ui() def init_ui(self): """""" self.setRowCount(len(self.KEY_NAME_MAP)) self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values())) self.setColumnCount(1) self.horizontalHeader().setVisible(False) self.horizontalHeader().setSectionResizeMode( QtWidgets.QHeaderView.Stretch ) self.setEditTriggers(self.NoEditTriggers) for row, key in enumerate(self.KEY_NAME_MAP.keys()): cell = QtWidgets.QTableWidgetItem() self.setItem(row, 0, cell) self.cells[key] = cell def clear_data(self): """""" for cell in self.cells.values(): cell.setText("") def set_data(self, data: dict): """""" data["capital"] = f"{data['capital']:,.2f}" data["end_balance"] = f"{data['end_balance']:,.2f}" data["total_return"] = f"{data['total_return']:,.2f}%" data["annual_return"] = f"{data['annual_return']:,.2f}%" data["max_drawdown"] = f"{data['max_drawdown']:,.2f}" data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%" data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}" data["total_commission"] = f"{data['total_commission']:,.2f}" data["total_slippage"] = f"{data['total_slippage']:,.2f}" data["total_turnover"] = f"{data['total_turnover']:,.2f}" data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}" data["daily_commission"] = f"{data['daily_commission']:,.2f}" data["daily_slippage"] = f"{data['daily_slippage']:,.2f}" data["daily_turnover"] = f"{data['daily_turnover']:,.2f}" data["daily_return"] = f"{data['daily_return']:,.2f}%" data["return_std"] = f"{data['return_std']:,.2f}%" data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}" data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}" for key, cell in self.cells.items(): value = data.get(key, "") cell.setText(str(value)) class BacktestingSettingEditor(QtWidgets.QDialog): """ For creating new strategy and editing strategy parameters. """ def __init__( self, class_name: str, parameters: dict ): """""" super(BacktestingSettingEditor, self).__init__() self.class_name = class_name self.parameters = parameters self.edits = {} self.init_ui() def init_ui(self): """""" form = QtWidgets.QFormLayout() # Add vt_symbol and name edit if add new strategy self.setWindowTitle(f"策略参数配置:{self.class_name}") button_text = "确定" parameters = self.parameters for name, value in parameters.items(): type_ = type(value) edit = QtWidgets.QLineEdit(str(value)) if type_ is int: validator = QtGui.QIntValidator() edit.setValidator(validator) elif type_ is float: validator = QtGui.QDoubleValidator() edit.setValidator(validator) form.addRow(f"{name} {type_}", edit) self.edits[name] = (edit, type_) button = QtWidgets.QPushButton(button_text) button.clicked.connect(self.accept) form.addRow(button) self.setLayout(form) def get_setting(self): """""" setting = {} for name, tp in self.edits.items(): edit, type_ = tp value_text = edit.text() if type_ == bool: if value_text == "True": value = True else: value = False else: value = type_(value_text) setting[name] = value return setting class BacktesterChart(pg.GraphicsWindow): """""" def __init__(self): """""" super().__init__(title="Backtester Chart") self.dates = {} self.init_ui() def init_ui(self): """""" pg.setConfigOptions(antialias=True) # Create plot widgets self.balance_plot = self.addPlot( title="账户净值", axisItems={"bottom": DateAxis(self.dates, orientation="bottom")} ) self.nextRow() self.drawdown_plot = self.addPlot( title="净值回撤", axisItems={"bottom": DateAxis(self.dates, orientation="bottom")} ) self.nextRow() self.pnl_plot = self.addPlot( title="每日盈亏", axisItems={"bottom": DateAxis(self.dates, orientation="bottom")} ) self.nextRow() self.distribution_plot = self.addPlot(title="盈亏分布") # Add curves and bars on plot widgets self.balance_curve = self.balance_plot.plot( pen=pg.mkPen("#ffc107", width=3) ) dd_color = "#303f9f" self.drawdown_curve = self.drawdown_plot.plot( fillLevel=-0.3, brush=dd_color, pen=dd_color ) profit_color = 'r' loss_color = 'g' self.profit_pnl_bar = pg.BarGraphItem( x=[], height=[], width=0.3, brush=profit_color, pen=profit_color ) self.loss_pnl_bar = pg.BarGraphItem( x=[], height=[], width=0.3, brush=loss_color, pen=loss_color ) self.pnl_plot.addItem(self.profit_pnl_bar) self.pnl_plot.addItem(self.loss_pnl_bar) distribution_color = "#6d4c41" self.distribution_curve = self.distribution_plot.plot( fillLevel=-0.3, brush=distribution_color, pen=distribution_color ) def clear_data(self): """""" self.balance_curve.setData([], []) self.drawdown_curve.setData([], []) self.profit_pnl_bar.setOpts(x=[], height=[]) self.loss_pnl_bar.setOpts(x=[], height=[]) self.distribution_curve.setData([], []) def set_data(self, df): """""" if df is None: return count = len(df) self.dates.clear() for n, date in enumerate(df.index): self.dates[n] = date # Set data for curve of balance and drawdown self.balance_curve.setData(df["balance"]) self.drawdown_curve.setData(df["drawdown"]) # Set data for daily pnl bar profit_pnl_x = [] profit_pnl_height = [] loss_pnl_x = [] loss_pnl_height = [] for count, pnl in enumerate(df["net_pnl"]): if pnl >= 0: profit_pnl_height.append(pnl) profit_pnl_x.append(count) else: loss_pnl_height.append(pnl) loss_pnl_x.append(count) self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height) self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height) # Set data for pnl distribution hist, x = np.histogram(df["net_pnl"], bins="auto") x = x[:-1] self.distribution_curve.setData(x, hist) class DateAxis(pg.AxisItem): """Axis for showing date data""" def __init__(self, dates: dict, *args, **kwargs): """""" super().__init__(*args, **kwargs) self.dates = dates def tickStrings(self, values, scale, spacing): """""" strings = [] for v in values: dt = self.dates.get(v, "") strings.append(str(dt)) return strings class OptimizationSettingEditor(QtWidgets.QDialog): """ For setting up parameters for optimization. """ DISPLAY_NAME_MAP = { "总收益率": "total_return", "夏普比率": "sharpe_ratio", "收益回撤比": "return_drawdown_ratio", "日均盈亏": "daily_net_pnl" } def __init__( self, class_name: str, parameters: dict ): """""" super().__init__() self.class_name = class_name self.parameters = parameters self.edits = {} self.optimization_setting = None self.use_ga = False self.init_ui() def init_ui(self): """""" QLabel = QtWidgets.QLabel self.target_combo = QtWidgets.QComboBox() self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys())) grid = QtWidgets.QGridLayout() grid.addWidget(QLabel("目标"), 0, 0) grid.addWidget(self.target_combo, 0, 1, 1, 3) grid.addWidget(QLabel("参数"), 1, 0) grid.addWidget(QLabel("开始"), 1, 1) grid.addWidget(QLabel("步进"), 1, 2) grid.addWidget(QLabel("结束"), 1, 3) # Add vt_symbol and name edit if add new strategy self.setWindowTitle(f"优化参数配置:{self.class_name}") validator = QtGui.QDoubleValidator() row = 2 for name, value in self.parameters.items(): type_ = type(value) if type_ not in [int, float]: continue start_edit = QtWidgets.QLineEdit(str(value)) step_edit = QtWidgets.QLineEdit(str(1)) end_edit = QtWidgets.QLineEdit(str(value)) for edit in [start_edit, step_edit, end_edit]: edit.setValidator(validator) grid.addWidget(QLabel(name), row, 0) grid.addWidget(start_edit, row, 1) grid.addWidget(step_edit, row, 2) grid.addWidget(end_edit, row, 3) self.edits[name] = { "type": type_, "start": start_edit, "step": step_edit, "end": end_edit } row += 1 parallel_button = QtWidgets.QPushButton("多进程优化") parallel_button.clicked.connect(self.generate_parallel_setting) grid.addWidget(parallel_button, row, 0, 1, 4) row += 1 ga_button = QtWidgets.QPushButton("遗传算法优化") ga_button.clicked.connect(self.generate_ga_setting) grid.addWidget(ga_button, row, 0, 1, 4) self.setLayout(grid) def generate_ga_setting(self): """""" self.use_ga = True self.generate_setting() def generate_parallel_setting(self): """""" self.use_ga = False self.generate_setting() def generate_setting(self): """""" self.optimization_setting = OptimizationSetting() self.target_display = self.target_combo.currentText() target_name = self.DISPLAY_NAME_MAP[self.target_display] self.optimization_setting.set_target(target_name) for name, d in self.edits.items(): type_ = d["type"] start_value = type_(d["start"].text()) step_value = type_(d["step"].text()) end_value = type_(d["end"].text()) if start_value == end_value: self.optimization_setting.add_parameter(name, start_value) else: self.optimization_setting.add_parameter( name, start_value, end_value, step_value ) self.accept() def get_setting(self): """""" return self.optimization_setting, self.use_ga class OptimizationResultMonitor(QtWidgets.QDialog): """ For viewing optimization result. """ def __init__( self, result_values: list, target_display: str ): """""" super().__init__() self.result_values = result_values self.target_display = target_display self.init_ui() def init_ui(self): """""" self.setWindowTitle("参数优化结果") self.resize(1100, 500) table = QtWidgets.QTableWidget() table.setColumnCount(2) table.setRowCount(len(self.result_values)) table.setHorizontalHeaderLabels(["参数", self.target_display]) table.setEditTriggers(table.NoEditTriggers) table.verticalHeader().setVisible(False) table.horizontalHeader().setSectionResizeMode( 0, QtWidgets.QHeaderView.ResizeToContents ) table.horizontalHeader().setSectionResizeMode( 1, QtWidgets.QHeaderView.Stretch ) for n, tp in enumerate(self.result_values): setting, target_value, _ = tp setting_cell = QtWidgets.QTableWidgetItem(str(setting)) target_cell = QtWidgets.QTableWidgetItem(str(target_value)) setting_cell.setTextAlignment(QtCore.Qt.AlignCenter) target_cell.setTextAlignment(QtCore.Qt.AlignCenter) table.setItem(n, 0, setting_cell) table.setItem(n, 1, target_cell) vbox = QtWidgets.QVBoxLayout() vbox.addWidget(table) self.setLayout(vbox) class BacktestingTradeMonitor(BaseMonitor): """ Monitor for backtesting trade data. """ headers = { "tradeid": {"display": "成交号 ", "cell": BaseCell, "update": False}, "orderid": {"display": "委托号", "cell": BaseCell, "update": False}, "symbol": {"display": "代码", "cell": BaseCell, "update": False}, "exchange": {"display": "交易所", "cell": EnumCell, "update": False}, "direction": {"display": "方向", "cell": DirectionCell, "update": False}, "offset": {"display": "开平", "cell": EnumCell, "update": False}, "price": {"display": "价格", "cell": BaseCell, "update": False}, "volume": {"display": "数量", "cell": BaseCell, "update": False}, "datetime": {"display": "时间", "cell": BaseCell, "update": False}, "gateway_name": {"display": "接口", "cell": BaseCell, "update": False}, } class BacktestingOrderMonitor(BaseMonitor): """ Monitor for backtesting order data. """ headers = { "orderid": {"display": "委托号", "cell": BaseCell, "update": False}, "symbol": {"display": "代码", "cell": BaseCell, "update": False}, "exchange": {"display": "交易所", "cell": EnumCell, "update": False}, "type": {"display": "类型", "cell": EnumCell, "update": False}, "direction": {"display": "方向", "cell": DirectionCell, "update": False}, "offset": {"display": "开平", "cell": EnumCell, "update": False}, "price": {"display": "价格", "cell": BaseCell, "update": False}, "volume": {"display": "总数量", "cell": BaseCell, "update": False}, "traded": {"display": "已成交", "cell": BaseCell, "update": False}, "status": {"display": "状态", "cell": EnumCell, "update": False}, "datetime": {"display": "时间", "cell": BaseCell, "update": False}, "gateway_name": {"display": "接口", "cell": BaseCell, "update": False}, } class DailyResultMonitor(BaseMonitor): """ Monitor for backtesting daily result. """ headers = { "date": {"display": "日期", "cell": BaseCell, "update": False}, "trade_count": {"display": "成交笔数", "cell": BaseCell, "update": False}, "start_pos": {"display": "开盘持仓", "cell": BaseCell, "update": False}, "end_pos": {"display": "收盘持仓", "cell": BaseCell, "update": False}, "turnover": {"display": "成交额", "cell": BaseCell, "update": False}, "commission": {"display": "手续费", "cell": BaseCell, "update": False}, "slippage": {"display": "滑点", "cell": BaseCell, "update": False}, "trading_pnl": {"display": "交易盈亏", "cell": BaseCell, "update": False}, "holding_pnl": {"display": "持仓盈亏", "cell": BaseCell, "update": False}, "total_pnl": {"display": "总盈亏", "cell": BaseCell, "update": False}, "net_pnl": {"display": "净盈亏", "cell": BaseCell, "update": False}, } class BacktestingResultDialog(QtWidgets.QDialog): """ """ def __init__( self, main_engine: MainEngine, event_engine: EventEngine, title: str, table_class: QtWidgets.QTableWidget ): """""" super().__init__() self.main_engine = main_engine self.event_engine = event_engine self.title = title self.table_class = table_class self.updated = False self.init_ui() def init_ui(self): """""" self.setWindowTitle(self.title) self.resize(1100, 600) self.table = self.table_class(self.main_engine, self.event_engine) vbox = QtWidgets.QVBoxLayout() vbox.addWidget(self.table) self.setLayout(vbox) def clear_data(self): """""" self.updated = False self.table.setRowCount(0) def update_data(self, data: list): """""" self.updated = True data.reverse() for obj in data: self.table.insert_new_row(obj) def is_updated(self): """""" return self.updated class CandleChartDialog(QtWidgets.QDialog): """ """ def __init__(self): """""" super().__init__() self.dt_ix_map = {} self.updated = False self.init_ui() def init_ui(self): """""" self.setWindowTitle("回测K线图表") self.resize(1400, 800) # Create chart widget self.chart = ChartWidget() self.chart.add_plot("candle", hide_x_axis=True) self.chart.add_plot("volume", maximum_height=200) self.chart.add_item(CandleItem, "candle", "candle") self.chart.add_item(VolumeItem, "volume", "volume") self.chart.add_cursor() # Add scatter item for showing tradings self.trade_scatter = pg.ScatterPlotItem() candle_plot = self.chart.get_plot("candle") candle_plot.addItem(self.trade_scatter) # Set layout vbox = QtWidgets.QVBoxLayout() vbox.addWidget(self.chart) self.setLayout(vbox) def update_history(self, history: list): """""" self.updated = True self.chart.update_history(history) for ix, bar in enumerate(history): self.dt_ix_map[bar.datetime] = ix def update_trades(self, trades: list): """""" trade_data = [] for trade in trades: ix = self.dt_ix_map[trade.datetime] scatter = { "pos": (ix, trade.price), "data": 1, "size": 14, "pen": pg.mkPen((255, 255, 255)) } if trade.direction == Direction.LONG: scatter_symbol = "t1" # Up arrow else: scatter_symbol = "t" # Down arrow if trade.offset == Offset.OPEN: scatter_brush = pg.mkBrush((255, 255, 0)) # Yellow else: scatter_brush = pg.mkBrush((0, 0, 255)) # Blue scatter["symbol"] = scatter_symbol scatter["brush"] = scatter_brush trade_data.append(scatter) self.trade_scatter.setData(trade_data) def clear_data(self): """""" self.updated = False self.chart.clear_all() self.dt_ix_map.clear() self.trade_scatter.clear() def is_updated(self): """""" return self.updated
31.118261
91
0.592494
import numpy as np import pyqtgraph as pg from datetime import datetime, timedelta from vnpy.trader.constant import Interval, Direction, Offset from vnpy.trader.engine import MainEngine from vnpy.trader.ui import QtCore, QtWidgets, QtGui from vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell from vnpy.trader.ui.editor import CodeEditor from vnpy.event import Event, EventEngine from vnpy.chart import ChartWidget, CandleItem, VolumeItem from vnpy.trader.utility import load_json, save_json from ..engine import ( APP_NAME, EVENT_BACKTESTER_LOG, EVENT_BACKTESTER_BACKTESTING_FINISHED, EVENT_BACKTESTER_OPTIMIZATION_FINISHED, OptimizationSetting ) class BacktesterManager(QtWidgets.QWidget): setting_filename = "cta_backtester_setting.json" signal_log = QtCore.pyqtSignal(Event) signal_backtesting_finished = QtCore.pyqtSignal(Event) signal_optimization_finished = QtCore.pyqtSignal(Event) def __init__(self, main_engine: MainEngine, event_engine: EventEngine): super().__init__() self.main_engine = main_engine self.event_engine = event_engine self.backtester_engine = main_engine.get_engine(APP_NAME) self.class_names = [] self.settings = {} self.target_display = "" self.init_ui() self.register_event() self.backtester_engine.init_engine() self.init_strategy_settings() def init_strategy_settings(self): self.class_names = self.backtester_engine.get_strategy_class_names() for class_name in self.class_names: setting = self.backtester_engine.get_default_setting(class_name) self.settings[class_name] = setting self.class_combo.addItems(self.class_names) def init_ui(self): self.setWindowTitle("CTA回测") self.class_combo = QtWidgets.QComboBox() self.symbol_line = QtWidgets.QLineEdit("IF88.CFFEX") self.interval_combo = QtWidgets.QComboBox() for inteval in Interval: self.interval_combo.addItem(inteval.value) end_dt = datetime.now() start_dt = end_dt - timedelta(days=3 * 365) self.start_date_edit = QtWidgets.QDateEdit( QtCore.QDate( start_dt.year, start_dt.month, start_dt.day ) ) self.end_date_edit = QtWidgets.QDateEdit( QtCore.QDate.currentDate() ) self.rate_line = QtWidgets.QLineEdit("0.000025") self.slippage_line = QtWidgets.QLineEdit("0.2") self.size_line = QtWidgets.QLineEdit("300") self.pricetick_line = QtWidgets.QLineEdit("0.2") self.capital_line = QtWidgets.QLineEdit("1000000") self.inverse_combo = QtWidgets.QComboBox() self.inverse_combo.addItems(["正向", "反向"]) backtesting_button = QtWidgets.QPushButton("开始回测") backtesting_button.clicked.connect(self.start_backtesting) optimization_button = QtWidgets.QPushButton("参数优化") optimization_button.clicked.connect(self.start_optimization) self.result_button = QtWidgets.QPushButton("优化结果") self.result_button.clicked.connect(self.show_optimization_result) self.result_button.setEnabled(False) downloading_button = QtWidgets.QPushButton("下载数据") downloading_button.clicked.connect(self.start_downloading) self.order_button = QtWidgets.QPushButton("委托记录") self.order_button.clicked.connect(self.show_backtesting_orders) self.order_button.setEnabled(False) self.trade_button = QtWidgets.QPushButton("成交记录") self.trade_button.clicked.connect(self.show_backtesting_trades) self.trade_button.setEnabled(False) self.daily_button = QtWidgets.QPushButton("每日盈亏") self.daily_button.clicked.connect(self.show_daily_results) self.daily_button.setEnabled(False) self.candle_button = QtWidgets.QPushButton("K线图表") self.candle_button.clicked.connect(self.show_candle_chart) self.candle_button.setEnabled(False) edit_button = QtWidgets.QPushButton("代码编辑") edit_button.clicked.connect(self.edit_strategy_code) reload_button = QtWidgets.QPushButton("策略重载") reload_button.clicked.connect(self.reload_strategy_class) for button in [ backtesting_button, optimization_button, downloading_button, self.result_button, self.order_button, self.trade_button, self.daily_button, self.candle_button, edit_button, reload_button ]: button.setFixedHeight(button.sizeHint().height() * 2) form = QtWidgets.QFormLayout() form.addRow("交易策略", self.class_combo) form.addRow("本地代码", self.symbol_line) form.addRow("K线周期", self.interval_combo) form.addRow("开始日期", self.start_date_edit) form.addRow("结束日期", self.end_date_edit) form.addRow("手续费率", self.rate_line) form.addRow("交易滑点", self.slippage_line) form.addRow("合约乘数", self.size_line) form.addRow("价格跳动", self.pricetick_line) form.addRow("回测资金", self.capital_line) form.addRow("合约模式", self.inverse_combo) result_grid = QtWidgets.QGridLayout() result_grid.addWidget(self.trade_button, 0, 0) result_grid.addWidget(self.order_button, 0, 1) result_grid.addWidget(self.daily_button, 1, 0) result_grid.addWidget(self.candle_button, 1, 1) left_vbox = QtWidgets.QVBoxLayout() left_vbox.addLayout(form) left_vbox.addWidget(backtesting_button) left_vbox.addWidget(downloading_button) left_vbox.addStretch() left_vbox.addLayout(result_grid) left_vbox.addStretch() left_vbox.addWidget(optimization_button) left_vbox.addWidget(self.result_button) left_vbox.addStretch() left_vbox.addWidget(edit_button) left_vbox.addWidget(reload_button) self.statistics_monitor = StatisticsMonitor() self.log_monitor = QtWidgets.QTextEdit() self.log_monitor.setMaximumHeight(400) self.chart = BacktesterChart() self.chart.setMinimumWidth(1000) self.trade_dialog = BacktestingResultDialog( self.main_engine, self.event_engine, "回测成交记录", BacktestingTradeMonitor ) self.order_dialog = BacktestingResultDialog( self.main_engine, self.event_engine, "回测委托记录", BacktestingOrderMonitor ) self.daily_dialog = BacktestingResultDialog( self.main_engine, self.event_engine, "回测每日盈亏", DailyResultMonitor ) self.candle_dialog = CandleChartDialog() vbox = QtWidgets.QVBoxLayout() vbox.addWidget(self.statistics_monitor) vbox.addWidget(self.log_monitor) hbox = QtWidgets.QHBoxLayout() hbox.addLayout(left_vbox) hbox.addLayout(vbox) hbox.addWidget(self.chart) self.setLayout(hbox) self.editor = CodeEditor(self.main_engine, self.event_engine) setting = load_json(self.setting_filename) if not setting: return self.class_combo.setCurrentIndex( self.class_combo.findText(setting["class_name"]) ) self.symbol_line.setText(setting["vt_symbol"]) self.interval_combo.setCurrentIndex( self.interval_combo.findText(setting["interval"]) ) self.rate_line.setText(str(setting["rate"])) self.slippage_line.setText(str(setting["slippage"])) self.size_line.setText(str(setting["size"])) self.pricetick_line.setText(str(setting["pricetick"])) self.capital_line.setText(str(setting["capital"])) if not setting["inverse"]: self.inverse_combo.setCurrentIndex(0) else: self.inverse_combo.setCurrentIndex(1) def register_event(self): self.signal_log.connect(self.process_log_event) self.signal_backtesting_finished.connect( self.process_backtesting_finished_event) self.signal_optimization_finished.connect( self.process_optimization_finished_event) self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit) self.event_engine.register( EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit) self.event_engine.register( EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit) def process_log_event(self, event: Event): msg = event.data self.write_log(msg) def write_log(self, msg): timestamp = datetime.now().strftime("%H:%M:%S") msg = f"{timestamp}\t{msg}" self.log_monitor.append(msg) def process_backtesting_finished_event(self, event: Event): statistics = self.backtester_engine.get_result_statistics() self.statistics_monitor.set_data(statistics) df = self.backtester_engine.get_result_df() self.chart.set_data(df) self.trade_button.setEnabled(True) self.order_button.setEnabled(True) self.daily_button.setEnabled(True) self.candle_button.setEnabled(True) def process_optimization_finished_event(self, event: Event): self.write_log("请点击[优化结果]按钮查看") self.result_button.setEnabled(True) def start_backtesting(self): class_name = self.class_combo.currentText() vt_symbol = self.symbol_line.text() interval = self.interval_combo.currentText() start = self.start_date_edit.date().toPyDate() end = self.end_date_edit.date().toPyDate() rate = float(self.rate_line.text()) slippage = float(self.slippage_line.text()) size = float(self.size_line.text()) pricetick = float(self.pricetick_line.text()) capital = float(self.capital_line.text()) if self.inverse_combo.currentText() == "正向": inverse = False else: inverse = True backtesting_setting = { "class_name": class_name, "vt_symbol": vt_symbol, "interval": interval, "rate": rate, "slippage": slippage, "size": size, "pricetick": pricetick, "capital": capital, "inverse": inverse, } save_json(self.setting_filename, backtesting_setting) old_setting = self.settings[class_name] dialog = BacktestingSettingEditor(class_name, old_setting) i = dialog.exec() if i != dialog.Accepted: return new_setting = dialog.get_setting() self.settings[class_name] = new_setting result = self.backtester_engine.start_backtesting( class_name, vt_symbol, interval, start, end, rate, slippage, size, pricetick, capital, inverse, new_setting ) if result: self.statistics_monitor.clear_data() self.chart.clear_data() self.trade_button.setEnabled(False) self.order_button.setEnabled(False) self.daily_button.setEnabled(False) self.candle_button.setEnabled(False) self.trade_dialog.clear_data() self.order_dialog.clear_data() self.daily_dialog.clear_data() self.candle_dialog.clear_data() def start_optimization(self): class_name = self.class_combo.currentText() vt_symbol = self.symbol_line.text() interval = self.interval_combo.currentText() start = self.start_date_edit.date().toPyDate() end = self.end_date_edit.date().toPyDate() rate = float(self.rate_line.text()) slippage = float(self.slippage_line.text()) size = float(self.size_line.text()) pricetick = float(self.pricetick_line.text()) capital = float(self.capital_line.text()) if self.inverse_combo.currentText() == "正向": inverse = False else: inverse = True parameters = self.settings[class_name] dialog = OptimizationSettingEditor(class_name, parameters) i = dialog.exec() if i != dialog.Accepted: return optimization_setting, use_ga = dialog.get_setting() self.target_display = dialog.target_display self.backtester_engine.start_optimization( class_name, vt_symbol, interval, start, end, rate, slippage, size, pricetick, capital, inverse, optimization_setting, use_ga ) self.result_button.setEnabled(False) def start_downloading(self): vt_symbol = self.symbol_line.text() interval = self.interval_combo.currentText() start_date = self.start_date_edit.date() end_date = self.end_date_edit.date() start = datetime(start_date.year(), start_date.month(), start_date.day()) end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59) self.backtester_engine.start_downloading( vt_symbol, interval, start, end ) def show_optimization_result(self): result_values = self.backtester_engine.get_result_values() dialog = OptimizationResultMonitor( result_values, self.target_display ) dialog.exec_() def show_backtesting_trades(self): if not self.trade_dialog.is_updated(): trades = self.backtester_engine.get_all_trades() self.trade_dialog.update_data(trades) self.trade_dialog.exec_() def show_backtesting_orders(self): if not self.order_dialog.is_updated(): orders = self.backtester_engine.get_all_orders() self.order_dialog.update_data(orders) self.order_dialog.exec_() def show_daily_results(self): if not self.daily_dialog.is_updated(): results = self.backtester_engine.get_all_daily_results() self.daily_dialog.update_data(results) self.daily_dialog.exec_() def show_candle_chart(self): if not self.candle_dialog.is_updated(): history = self.backtester_engine.get_history_data() self.candle_dialog.update_history(history) trades = self.backtester_engine.get_all_trades() self.candle_dialog.update_trades(trades) self.candle_dialog.exec_() def edit_strategy_code(self): class_name = self.class_combo.currentText() file_path = self.backtester_engine.get_strategy_class_file(class_name) self.editor.open_editor(file_path) self.editor.show() def reload_strategy_class(self): self.backtester_engine.reload_strategy_class() self.class_combo.clear() self.init_strategy_settings() def show(self): self.showMaximized() class StatisticsMonitor(QtWidgets.QTableWidget): KEY_NAME_MAP = { "start_date": "首个交易日", "end_date": "最后交易日", "total_days": "总交易日", "profit_days": "盈利交易日", "loss_days": "亏损交易日", "capital": "起始资金", "end_balance": "结束资金", "total_return": "总收益率", "annual_return": "年化收益", "max_drawdown": "最大回撤", "max_ddpercent": "百分比最大回撤", "total_net_pnl": "总盈亏", "total_commission": "总手续费", "total_slippage": "总滑点", "total_turnover": "总成交额", "total_trade_count": "总成交笔数", "daily_net_pnl": "日均盈亏", "daily_commission": "日均手续费", "daily_slippage": "日均滑点", "daily_turnover": "日均成交额", "daily_trade_count": "日均成交笔数", "daily_return": "日均收益率", "return_std": "收益标准差", "sharpe_ratio": "夏普比率", "return_drawdown_ratio": "收益回撤比" } def __init__(self): super().__init__() self.cells = {} self.init_ui() def init_ui(self): self.setRowCount(len(self.KEY_NAME_MAP)) self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values())) self.setColumnCount(1) self.horizontalHeader().setVisible(False) self.horizontalHeader().setSectionResizeMode( QtWidgets.QHeaderView.Stretch ) self.setEditTriggers(self.NoEditTriggers) for row, key in enumerate(self.KEY_NAME_MAP.keys()): cell = QtWidgets.QTableWidgetItem() self.setItem(row, 0, cell) self.cells[key] = cell def clear_data(self): for cell in self.cells.values(): cell.setText("") def set_data(self, data: dict): data["capital"] = f"{data['capital']:,.2f}" data["end_balance"] = f"{data['end_balance']:,.2f}" data["total_return"] = f"{data['total_return']:,.2f}%" data["annual_return"] = f"{data['annual_return']:,.2f}%" data["max_drawdown"] = f"{data['max_drawdown']:,.2f}" data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%" data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}" data["total_commission"] = f"{data['total_commission']:,.2f}" data["total_slippage"] = f"{data['total_slippage']:,.2f}" data["total_turnover"] = f"{data['total_turnover']:,.2f}" data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}" data["daily_commission"] = f"{data['daily_commission']:,.2f}" data["daily_slippage"] = f"{data['daily_slippage']:,.2f}" data["daily_turnover"] = f"{data['daily_turnover']:,.2f}" data["daily_return"] = f"{data['daily_return']:,.2f}%" data["return_std"] = f"{data['return_std']:,.2f}%" data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}" data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}" for key, cell in self.cells.items(): value = data.get(key, "") cell.setText(str(value)) class BacktestingSettingEditor(QtWidgets.QDialog): def __init__( self, class_name: str, parameters: dict ): super(BacktestingSettingEditor, self).__init__() self.class_name = class_name self.parameters = parameters self.edits = {} self.init_ui() def init_ui(self): form = QtWidgets.QFormLayout() self.setWindowTitle(f"策略参数配置:{self.class_name}") button_text = "确定" parameters = self.parameters for name, value in parameters.items(): type_ = type(value) edit = QtWidgets.QLineEdit(str(value)) if type_ is int: validator = QtGui.QIntValidator() edit.setValidator(validator) elif type_ is float: validator = QtGui.QDoubleValidator() edit.setValidator(validator) form.addRow(f"{name} {type_}", edit) self.edits[name] = (edit, type_) button = QtWidgets.QPushButton(button_text) button.clicked.connect(self.accept) form.addRow(button) self.setLayout(form) def get_setting(self): setting = {} for name, tp in self.edits.items(): edit, type_ = tp value_text = edit.text() if type_ == bool: if value_text == "True": value = True else: value = False else: value = type_(value_text) setting[name] = value return setting class BacktesterChart(pg.GraphicsWindow): def __init__(self): super().__init__(title="Backtester Chart") self.dates = {} self.init_ui() def init_ui(self): pg.setConfigOptions(antialias=True) self.balance_plot = self.addPlot( title="账户净值", axisItems={"bottom": DateAxis(self.dates, orientation="bottom")} ) self.nextRow() self.drawdown_plot = self.addPlot( title="净值回撤", axisItems={"bottom": DateAxis(self.dates, orientation="bottom")} ) self.nextRow() self.pnl_plot = self.addPlot( title="每日盈亏", axisItems={"bottom": DateAxis(self.dates, orientation="bottom")} ) self.nextRow() self.distribution_plot = self.addPlot(title="盈亏分布") self.balance_curve = self.balance_plot.plot( pen=pg.mkPen("#ffc107", width=3) ) dd_color = "#303f9f" self.drawdown_curve = self.drawdown_plot.plot( fillLevel=-0.3, brush=dd_color, pen=dd_color ) profit_color = 'r' loss_color = 'g' self.profit_pnl_bar = pg.BarGraphItem( x=[], height=[], width=0.3, brush=profit_color, pen=profit_color ) self.loss_pnl_bar = pg.BarGraphItem( x=[], height=[], width=0.3, brush=loss_color, pen=loss_color ) self.pnl_plot.addItem(self.profit_pnl_bar) self.pnl_plot.addItem(self.loss_pnl_bar) distribution_color = "#6d4c41" self.distribution_curve = self.distribution_plot.plot( fillLevel=-0.3, brush=distribution_color, pen=distribution_color ) def clear_data(self): self.balance_curve.setData([], []) self.drawdown_curve.setData([], []) self.profit_pnl_bar.setOpts(x=[], height=[]) self.loss_pnl_bar.setOpts(x=[], height=[]) self.distribution_curve.setData([], []) def set_data(self, df): if df is None: return count = len(df) self.dates.clear() for n, date in enumerate(df.index): self.dates[n] = date self.balance_curve.setData(df["balance"]) self.drawdown_curve.setData(df["drawdown"]) profit_pnl_x = [] profit_pnl_height = [] loss_pnl_x = [] loss_pnl_height = [] for count, pnl in enumerate(df["net_pnl"]): if pnl >= 0: profit_pnl_height.append(pnl) profit_pnl_x.append(count) else: loss_pnl_height.append(pnl) loss_pnl_x.append(count) self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height) self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height) hist, x = np.histogram(df["net_pnl"], bins="auto") x = x[:-1] self.distribution_curve.setData(x, hist) class DateAxis(pg.AxisItem): def __init__(self, dates: dict, *args, **kwargs): super().__init__(*args, **kwargs) self.dates = dates def tickStrings(self, values, scale, spacing): strings = [] for v in values: dt = self.dates.get(v, "") strings.append(str(dt)) return strings class OptimizationSettingEditor(QtWidgets.QDialog): DISPLAY_NAME_MAP = { "总收益率": "total_return", "夏普比率": "sharpe_ratio", "收益回撤比": "return_drawdown_ratio", "日均盈亏": "daily_net_pnl" } def __init__( self, class_name: str, parameters: dict ): super().__init__() self.class_name = class_name self.parameters = parameters self.edits = {} self.optimization_setting = None self.use_ga = False self.init_ui() def init_ui(self): QLabel = QtWidgets.QLabel self.target_combo = QtWidgets.QComboBox() self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys())) grid = QtWidgets.QGridLayout() grid.addWidget(QLabel("目标"), 0, 0) grid.addWidget(self.target_combo, 0, 1, 1, 3) grid.addWidget(QLabel("参数"), 1, 0) grid.addWidget(QLabel("开始"), 1, 1) grid.addWidget(QLabel("步进"), 1, 2) grid.addWidget(QLabel("结束"), 1, 3) self.setWindowTitle(f"优化参数配置:{self.class_name}") validator = QtGui.QDoubleValidator() row = 2 for name, value in self.parameters.items(): type_ = type(value) if type_ not in [int, float]: continue start_edit = QtWidgets.QLineEdit(str(value)) step_edit = QtWidgets.QLineEdit(str(1)) end_edit = QtWidgets.QLineEdit(str(value)) for edit in [start_edit, step_edit, end_edit]: edit.setValidator(validator) grid.addWidget(QLabel(name), row, 0) grid.addWidget(start_edit, row, 1) grid.addWidget(step_edit, row, 2) grid.addWidget(end_edit, row, 3) self.edits[name] = { "type": type_, "start": start_edit, "step": step_edit, "end": end_edit } row += 1 parallel_button = QtWidgets.QPushButton("多进程优化") parallel_button.clicked.connect(self.generate_parallel_setting) grid.addWidget(parallel_button, row, 0, 1, 4) row += 1 ga_button = QtWidgets.QPushButton("遗传算法优化") ga_button.clicked.connect(self.generate_ga_setting) grid.addWidget(ga_button, row, 0, 1, 4) self.setLayout(grid) def generate_ga_setting(self): self.use_ga = True self.generate_setting() def generate_parallel_setting(self): self.use_ga = False self.generate_setting() def generate_setting(self): self.optimization_setting = OptimizationSetting() self.target_display = self.target_combo.currentText() target_name = self.DISPLAY_NAME_MAP[self.target_display] self.optimization_setting.set_target(target_name) for name, d in self.edits.items(): type_ = d["type"] start_value = type_(d["start"].text()) step_value = type_(d["step"].text()) end_value = type_(d["end"].text()) if start_value == end_value: self.optimization_setting.add_parameter(name, start_value) else: self.optimization_setting.add_parameter( name, start_value, end_value, step_value ) self.accept() def get_setting(self): return self.optimization_setting, self.use_ga class OptimizationResultMonitor(QtWidgets.QDialog): def __init__( self, result_values: list, target_display: str ): super().__init__() self.result_values = result_values self.target_display = target_display self.init_ui() def init_ui(self): self.setWindowTitle("参数优化结果") self.resize(1100, 500) table = QtWidgets.QTableWidget() table.setColumnCount(2) table.setRowCount(len(self.result_values)) table.setHorizontalHeaderLabels(["参数", self.target_display]) table.setEditTriggers(table.NoEditTriggers) table.verticalHeader().setVisible(False) table.horizontalHeader().setSectionResizeMode( 0, QtWidgets.QHeaderView.ResizeToContents ) table.horizontalHeader().setSectionResizeMode( 1, QtWidgets.QHeaderView.Stretch ) for n, tp in enumerate(self.result_values): setting, target_value, _ = tp setting_cell = QtWidgets.QTableWidgetItem(str(setting)) target_cell = QtWidgets.QTableWidgetItem(str(target_value)) setting_cell.setTextAlignment(QtCore.Qt.AlignCenter) target_cell.setTextAlignment(QtCore.Qt.AlignCenter) table.setItem(n, 0, setting_cell) table.setItem(n, 1, target_cell) vbox = QtWidgets.QVBoxLayout() vbox.addWidget(table) self.setLayout(vbox) class BacktestingTradeMonitor(BaseMonitor): headers = { "tradeid": {"display": "成交号 ", "cell": BaseCell, "update": False}, "orderid": {"display": "委托号", "cell": BaseCell, "update": False}, "symbol": {"display": "代码", "cell": BaseCell, "update": False}, "exchange": {"display": "交易所", "cell": EnumCell, "update": False}, "direction": {"display": "方向", "cell": DirectionCell, "update": False}, "offset": {"display": "开平", "cell": EnumCell, "update": False}, "price": {"display": "价格", "cell": BaseCell, "update": False}, "volume": {"display": "数量", "cell": BaseCell, "update": False}, "datetime": {"display": "时间", "cell": BaseCell, "update": False}, "gateway_name": {"display": "接口", "cell": BaseCell, "update": False}, } class BacktestingOrderMonitor(BaseMonitor): headers = { "orderid": {"display": "委托号", "cell": BaseCell, "update": False}, "symbol": {"display": "代码", "cell": BaseCell, "update": False}, "exchange": {"display": "交易所", "cell": EnumCell, "update": False}, "type": {"display": "类型", "cell": EnumCell, "update": False}, "direction": {"display": "方向", "cell": DirectionCell, "update": False}, "offset": {"display": "开平", "cell": EnumCell, "update": False}, "price": {"display": "价格", "cell": BaseCell, "update": False}, "volume": {"display": "总数量", "cell": BaseCell, "update": False}, "traded": {"display": "已成交", "cell": BaseCell, "update": False}, "status": {"display": "状态", "cell": EnumCell, "update": False}, "datetime": {"display": "时间", "cell": BaseCell, "update": False}, "gateway_name": {"display": "接口", "cell": BaseCell, "update": False}, } class DailyResultMonitor(BaseMonitor): headers = { "date": {"display": "日期", "cell": BaseCell, "update": False}, "trade_count": {"display": "成交笔数", "cell": BaseCell, "update": False}, "start_pos": {"display": "开盘持仓", "cell": BaseCell, "update": False}, "end_pos": {"display": "收盘持仓", "cell": BaseCell, "update": False}, "turnover": {"display": "成交额", "cell": BaseCell, "update": False}, "commission": {"display": "手续费", "cell": BaseCell, "update": False}, "slippage": {"display": "滑点", "cell": BaseCell, "update": False}, "trading_pnl": {"display": "交易盈亏", "cell": BaseCell, "update": False}, "holding_pnl": {"display": "持仓盈亏", "cell": BaseCell, "update": False}, "total_pnl": {"display": "总盈亏", "cell": BaseCell, "update": False}, "net_pnl": {"display": "净盈亏", "cell": BaseCell, "update": False}, } class BacktestingResultDialog(QtWidgets.QDialog): def __init__( self, main_engine: MainEngine, event_engine: EventEngine, title: str, table_class: QtWidgets.QTableWidget ): super().__init__() self.main_engine = main_engine self.event_engine = event_engine self.title = title self.table_class = table_class self.updated = False self.init_ui() def init_ui(self): self.setWindowTitle(self.title) self.resize(1100, 600) self.table = self.table_class(self.main_engine, self.event_engine) vbox = QtWidgets.QVBoxLayout() vbox.addWidget(self.table) self.setLayout(vbox) def clear_data(self): self.updated = False self.table.setRowCount(0) def update_data(self, data: list): self.updated = True data.reverse() for obj in data: self.table.insert_new_row(obj) def is_updated(self): return self.updated class CandleChartDialog(QtWidgets.QDialog): def __init__(self): super().__init__() self.dt_ix_map = {} self.updated = False self.init_ui() def init_ui(self): self.setWindowTitle("回测K线图表") self.resize(1400, 800) self.chart = ChartWidget() self.chart.add_plot("candle", hide_x_axis=True) self.chart.add_plot("volume", maximum_height=200) self.chart.add_item(CandleItem, "candle", "candle") self.chart.add_item(VolumeItem, "volume", "volume") self.chart.add_cursor() self.trade_scatter = pg.ScatterPlotItem() candle_plot = self.chart.get_plot("candle") candle_plot.addItem(self.trade_scatter) vbox = QtWidgets.QVBoxLayout() vbox.addWidget(self.chart) self.setLayout(vbox) def update_history(self, history: list): self.updated = True self.chart.update_history(history) for ix, bar in enumerate(history): self.dt_ix_map[bar.datetime] = ix def update_trades(self, trades: list): trade_data = [] for trade in trades: ix = self.dt_ix_map[trade.datetime] scatter = { "pos": (ix, trade.price), "data": 1, "size": 14, "pen": pg.mkPen((255, 255, 255)) } if trade.direction == Direction.LONG: scatter_symbol = "t1" else: scatter_symbol = "t" if trade.offset == Offset.OPEN: scatter_brush = pg.mkBrush((255, 255, 0)) else: scatter_brush = pg.mkBrush((0, 0, 255)) scatter["symbol"] = scatter_symbol scatter["brush"] = scatter_brush trade_data.append(scatter) self.trade_scatter.setData(trade_data) def clear_data(self): self.updated = False self.chart.clear_all() self.dt_ix_map.clear() self.trade_scatter.clear() def is_updated(self): return self.updated
true
true
f70311ba62056f0a9cdf1f81dd1ad123f6427c5a
1,992
py
Python
677.map-sum-pairs.py
y1zhou/leetcode
4c24952b0fa228027f81fdd28fad0e6e662193d2
[ "MIT" ]
null
null
null
677.map-sum-pairs.py
y1zhou/leetcode
4c24952b0fa228027f81fdd28fad0e6e662193d2
[ "MIT" ]
null
null
null
677.map-sum-pairs.py
y1zhou/leetcode
4c24952b0fa228027f81fdd28fad0e6e662193d2
[ "MIT" ]
null
null
null
# # @lc app=leetcode id=677 lang=python3 # # [677] Map Sum Pairs # https://leetcode.com/problems/map-sum-pairs/ # This problem is about the trie data structure. Each node keeps track of the sum of its children. # A new key overrides the original values. # import unittest from typing import Dict # @lc code=start class Node: def __init__(self, val: int = 0): self.value = val self.children: Dict[str, Node] = {} class MapSum: def __init__(self) -> None: """ Initialize your data structure here. """ self.root_node = Node() self.keys: Dict[str, int] = {} def insert(self, key: str, val: int) -> None: # override if key already exists val_diff = val - self.keys.get(key, 0) self.keys[key] = val # track count of prefix characters node = self.root_node for c in key: if c not in node.children: node.children[c] = Node() node = node.children[c] node.value += val_diff def sum(self, prefix: str) -> int: node = self.root_node for c in prefix: # return 0 if prefix doesn't exist if c not in node.children: return 0 node = node.children[c] return node.value # Your MapSum object will be instantiated and called as such: # obj = MapSum() # obj.insert(key,val) # param_2 = obj.sum(prefix) # @lc code=end class TestSolution(unittest.TestCase): def test_given(self) -> None: x = MapSum() x.insert("apple", 3) self.assertEqual(x.sum("ap"), 3) x.insert("app", 2) self.assertEqual(x.sum("ap"), 5) def test_override(self) -> None: x = MapSum() x.insert("apple", 3) x.insert("app", 2) x.insert("apple", 8) self.assertEqual(x.sum("ap"), 10) if __name__ == "__main__": unittest.main()
26.918919
99
0.553213
import unittest from typing import Dict class Node: def __init__(self, val: int = 0): self.value = val self.children: Dict[str, Node] = {} class MapSum: def __init__(self) -> None: self.root_node = Node() self.keys: Dict[str, int] = {} def insert(self, key: str, val: int) -> None: val_diff = val - self.keys.get(key, 0) self.keys[key] = val node = self.root_node for c in key: if c not in node.children: node.children[c] = Node() node = node.children[c] node.value += val_diff def sum(self, prefix: str) -> int: node = self.root_node for c in prefix: if c not in node.children: return 0 node = node.children[c] return node.value # Your MapSum object will be instantiated and called as such: # obj = MapSum() # obj.insert(key,val) # param_2 = obj.sum(prefix) # @lc code=end class TestSolution(unittest.TestCase): def test_given(self) -> None: x = MapSum() x.insert("apple", 3) self.assertEqual(x.sum("ap"), 3) x.insert("app", 2) self.assertEqual(x.sum("ap"), 5) def test_override(self) -> None: x = MapSum() x.insert("apple", 3) x.insert("app", 2) x.insert("apple", 8) self.assertEqual(x.sum("ap"), 10) if __name__ == "__main__": unittest.main()
true
true
f70311d0ab6c7fe8c230f261fb16c43b3d1544f1
4,658
py
Python
sdk/python/pulumi_azure_nextgen/security/v20170801preview/get_security_contact.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
31
2020-09-21T09:41:01.000Z
2021-02-26T13:21:59.000Z
sdk/python/pulumi_azure_nextgen/security/v20170801preview/get_security_contact.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
231
2020-09-21T09:38:45.000Z
2021-03-01T11:16:03.000Z
sdk/python/pulumi_azure_nextgen/security/v20170801preview/get_security_contact.py
pulumi/pulumi-azure-nextgen
452736b0a1cf584c2d4c04666e017af6e9b2c15c
[ "Apache-2.0" ]
4
2020-09-29T14:14:59.000Z
2021-02-10T20:38:16.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = [ 'GetSecurityContactResult', 'AwaitableGetSecurityContactResult', 'get_security_contact', ] @pulumi.output_type class GetSecurityContactResult: """ Contact details for security issues """ def __init__(__self__, alert_notifications=None, alerts_to_admins=None, email=None, id=None, name=None, phone=None, type=None): if alert_notifications and not isinstance(alert_notifications, str): raise TypeError("Expected argument 'alert_notifications' to be a str") pulumi.set(__self__, "alert_notifications", alert_notifications) if alerts_to_admins and not isinstance(alerts_to_admins, str): raise TypeError("Expected argument 'alerts_to_admins' to be a str") pulumi.set(__self__, "alerts_to_admins", alerts_to_admins) if email and not isinstance(email, str): raise TypeError("Expected argument 'email' to be a str") pulumi.set(__self__, "email", email) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if phone and not isinstance(phone, str): raise TypeError("Expected argument 'phone' to be a str") pulumi.set(__self__, "phone", phone) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter(name="alertNotifications") def alert_notifications(self) -> str: """ Whether to send security alerts notifications to the security contact """ return pulumi.get(self, "alert_notifications") @property @pulumi.getter(name="alertsToAdmins") def alerts_to_admins(self) -> str: """ Whether to send security alerts notifications to subscription admins """ return pulumi.get(self, "alerts_to_admins") @property @pulumi.getter def email(self) -> str: """ The email of this security contact """ return pulumi.get(self, "email") @property @pulumi.getter def id(self) -> str: """ Resource Id """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter def phone(self) -> Optional[str]: """ The phone number of this security contact """ return pulumi.get(self, "phone") @property @pulumi.getter def type(self) -> str: """ Resource type """ return pulumi.get(self, "type") class AwaitableGetSecurityContactResult(GetSecurityContactResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSecurityContactResult( alert_notifications=self.alert_notifications, alerts_to_admins=self.alerts_to_admins, email=self.email, id=self.id, name=self.name, phone=self.phone, type=self.type) def get_security_contact(security_contact_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityContactResult: """ Contact details for security issues :param str security_contact_name: Name of the security contact object """ __args__ = dict() __args__['securityContactName'] = security_contact_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:security/v20170801preview:getSecurityContact', __args__, opts=opts, typ=GetSecurityContactResult).value return AwaitableGetSecurityContactResult( alert_notifications=__ret__.alert_notifications, alerts_to_admins=__ret__.alerts_to_admins, email=__ret__.email, id=__ret__.id, name=__ret__.name, phone=__ret__.phone, type=__ret__.type)
33.035461
154
0.644912
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = [ 'GetSecurityContactResult', 'AwaitableGetSecurityContactResult', 'get_security_contact', ] @pulumi.output_type class GetSecurityContactResult: def __init__(__self__, alert_notifications=None, alerts_to_admins=None, email=None, id=None, name=None, phone=None, type=None): if alert_notifications and not isinstance(alert_notifications, str): raise TypeError("Expected argument 'alert_notifications' to be a str") pulumi.set(__self__, "alert_notifications", alert_notifications) if alerts_to_admins and not isinstance(alerts_to_admins, str): raise TypeError("Expected argument 'alerts_to_admins' to be a str") pulumi.set(__self__, "alerts_to_admins", alerts_to_admins) if email and not isinstance(email, str): raise TypeError("Expected argument 'email' to be a str") pulumi.set(__self__, "email", email) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if phone and not isinstance(phone, str): raise TypeError("Expected argument 'phone' to be a str") pulumi.set(__self__, "phone", phone) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter(name="alertNotifications") def alert_notifications(self) -> str: return pulumi.get(self, "alert_notifications") @property @pulumi.getter(name="alertsToAdmins") def alerts_to_admins(self) -> str: return pulumi.get(self, "alerts_to_admins") @property @pulumi.getter def email(self) -> str: return pulumi.get(self, "email") @property @pulumi.getter def id(self) -> str: return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def phone(self) -> Optional[str]: return pulumi.get(self, "phone") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") class AwaitableGetSecurityContactResult(GetSecurityContactResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSecurityContactResult( alert_notifications=self.alert_notifications, alerts_to_admins=self.alerts_to_admins, email=self.email, id=self.id, name=self.name, phone=self.phone, type=self.type) def get_security_contact(security_contact_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityContactResult: __args__ = dict() __args__['securityContactName'] = security_contact_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:security/v20170801preview:getSecurityContact', __args__, opts=opts, typ=GetSecurityContactResult).value return AwaitableGetSecurityContactResult( alert_notifications=__ret__.alert_notifications, alerts_to_admins=__ret__.alerts_to_admins, email=__ret__.email, id=__ret__.id, name=__ret__.name, phone=__ret__.phone, type=__ret__.type)
true
true
f70311fcd044270860c9185e5d92b8e64abc2b05
11,544
py
Python
src/localimport/__init__.py
NiklasRosenstein/python-localimport
a51722387cf03fa4c5ff859cc17419a25e280e35
[ "MIT" ]
5
2015-04-11T19:34:12.000Z
2016-07-27T20:08:38.000Z
src/localimport/__init__.py
nrosenstein-stuff/py-localimport
a51722387cf03fa4c5ff859cc17419a25e280e35
[ "MIT" ]
15
2015-07-03T11:31:08.000Z
2016-01-22T16:20:40.000Z
src/localimport/__init__.py
nrosenstein-stuff/py-localimport
a51722387cf03fa4c5ff859cc17419a25e280e35
[ "MIT" ]
null
null
null
__author__ = 'Niklas Rosenstein <rosensteinniklas@gmail.com>' __version__ = '1.7.6' import copy import glob import os import pkgutil import sys import traceback import typing as t import zipfile if t.TYPE_CHECKING: from sys import _MetaPathFinder def is_local(filename: str, pathlist: t.List[str]) -> bool: ''' Returns True if *filename* is a subpath of any of the paths in *pathlist*. ''' filename = os.path.abspath(filename) for path_name in pathlist: path_name = os.path.abspath(path_name) if is_subpath(filename, path_name): return True return False def is_subpath(path: str, parent: str) -> bool: ''' Returns True if *path* points to the same or a subpath of *parent*. ''' try: relpath = os.path.relpath(path, parent) except ValueError: return False # happens on Windows if drive letters don't match return relpath == os.curdir or not relpath.startswith(os.pardir) def eval_pth( filename: str, sitedir: str, dest: t.Optional[t.List[str]] = None, imports: t.Optional[t.List[t.Tuple[str, int, str]]] = None, ) -> t.List[str]: ''' Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list *dest*. If *dest* is #None, it will fall back to `sys.path`. If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to that list in tuples of (*filename*, *line*, *stmt*). ''' if dest is None: dest = sys.path if not os.path.isfile(filename): return [] with open(filename, 'r') as fp: for index, line in enumerate(fp): if line.startswith('import'): if imports is None: exec_pth_import(filename, index+1, line) else: imports.append((filename, index+1, line)) else: index = line.find('#') if index > 0: line = line[:index] line = line.strip() if not os.path.isabs(line): line = os.path.join(os.path.dirname(filename), line) line = os.path.normpath(line) if line and line not in dest: dest.insert(0, line) return dest def exec_pth_import(filename: str, lineno: int, line: str) -> None: line = '\n' * (lineno - 1) + line.strip() try: exec(compile(line, filename, 'exec')) except BaseException: traceback.print_exc() def extend_path(pth: t.List[str], name: str) -> t.List[str]: ''' Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original #pkgutil.extend_path() gets mocked by this function inside the #localimport context. ''' def zip_isfile(z, name): name.rstrip('/') return name in z.namelist() pname = os.path.join(*name.split('.')) zname = '/'.join(name.split('.')) init_py = '__init__' + os.extsep + 'py' init_pyc = '__init__' + os.extsep + 'pyc' init_pyo = '__init__' + os.extsep + 'pyo' mod_path = list(pth) for path in sys.path: if zipfile.is_zipfile(path): try: egg = zipfile.ZipFile(path, 'r') addpath = ( zip_isfile(egg, zname + '/__init__.py') or zip_isfile(egg, zname + '/__init__.pyc') or zip_isfile(egg, zname + '/__init__.pyo')) fpath = os.path.join(path, path, zname) if addpath and fpath not in mod_path: mod_path.append(fpath) except (zipfile.BadZipfile, zipfile.LargeZipFile): pass # xxx: Show a warning at least? else: path = os.path.join(path, pname) if os.path.isdir(path) and path not in mod_path: addpath = ( os.path.isfile(os.path.join(path, init_py)) or os.path.isfile(os.path.join(path, init_pyc)) or os.path.isfile(os.path.join(path, init_pyo))) if addpath and path not in mod_path: mod_path.append(path) return [os.path.normpath(x) for x in mod_path] class localimport: def __init__( self, path: t.Union[t.List[str], str], parent_dir: t.Optional[str] = None, do_eggs: bool = True, do_pth: bool = True, do_autodisable: bool = True, ) -> None: if not parent_dir: frame = sys._getframe(1).f_globals if '__file__' in frame: parent_dir = os.path.dirname(os.path.abspath(frame['__file__'])) # Convert relative paths to absolute paths with parent_dir and # evaluate .egg files in the specified directories. self.path = [] if isinstance(path, str): path = [path] for path_name in path: if not os.path.isabs(path_name): if not parent_dir: raise ValueError('relative path but no parent_dir') path_name = os.path.join(parent_dir, path_name) path_name = os.path.normpath(path_name) self.path.append(path_name) if do_eggs: self.path.extend(glob.glob(os.path.join(path_name, '*.egg'))) self.meta_path: t.List[_MetaPathFinder] = [] self.modules: t.Dict[str, t.Any] = {} self.do_pth = do_pth self.in_context = False self.do_autodisable = do_autodisable self.pth_imports: t.List[t.Tuple[str, int, str]] = [] if self.do_pth: seen = set() for path_name in self.path: for fn in glob.glob(os.path.join(path_name, '*.pth')): if fn in seen: continue seen.add(fn) eval_pth(fn, path_name, dest=self.path, imports=self.pth_imports) def __enter__(self) -> 'localimport': # pkg_resources comes with setuptools. try: import pkg_resources nsdict = copy.deepcopy(pkg_resources._namespace_packages) # type: ignore declare_namespace = pkg_resources.declare_namespace pkg_resources.declare_namespace = self._declare_namespace # type: ignore except ImportError: nsdict = None declare_namespace = None # Save the global importer state. self.state = { 'nsdict': nsdict, 'declare_namespace': declare_namespace, 'nspaths': {}, 'path': sys.path[:], 'meta_path': sys.meta_path[:], 'disables': {}, 'pkgutil.extend_path': pkgutil.extend_path, } # Update the systems meta path and apply function mocks. sys.path[:] = self.path sys.meta_path[:] = self.meta_path + sys.meta_path pkgutil.extend_path = extend_path # type: ignore # If this function is called not the first time, we need to # restore the modules that have been imported with it and # temporarily disable the ones that would be shadowed. for key, mod in list(self.modules.items()): try: self.state['disables'][key] = sys.modules.pop(key) except KeyError: pass sys.modules[key] = mod # Evaluate imports from the .pth files, if any. for fn, lineno, stmt in self.pth_imports: exec_pth_import(fn, lineno, stmt) # Add the original path to sys.path. sys.path += self.state['path'] # Update the __path__ of all namespace modules. for key, mod in list(sys.modules.items()): if mod is None: # Relative imports could have lead to None-entries in # sys.modules. Get rid of them so they can be re-evaluated. prefix = key.rpartition('.')[0] if hasattr(sys.modules.get(prefix), '__path__'): del sys.modules[key] elif hasattr(mod, '__path__'): self.state['nspaths'][key] = copy.copy(mod.__path__) mod.__path__ = pkgutil.extend_path(mod.__path__, mod.__name__) self.in_context = True if self.do_autodisable: self.autodisable() return self def __exit__(self, *__) -> None: if not self.in_context: raise RuntimeError('context not entered') # Figure the difference of the original sys.path and the # current path. The list of paths will be used to determine # what modules are local and what not. local_paths = [] for path in sys.path: if path not in self.state['path']: local_paths.append(path) for path in self.path: if path not in local_paths: local_paths.append(path) # Move all meta path objects to self.meta_path that have not # been there before and have not been in the list before. for meta in sys.meta_path: if meta is not self and meta not in self.state['meta_path']: if meta not in self.meta_path: self.meta_path.append(meta) # Move all modules that shadow modules of the original system # state or modules that are from any of the localimport context # paths away. modules = sys.modules.copy() for key, mod in modules.items(): force_pop = False filename = getattr(mod, '__file__', None) if not filename and key not in sys.builtin_module_names: parent = key.rsplit('.', 1)[0] if parent in modules: filename = getattr(modules[parent], '__file__', None) else: force_pop = True if force_pop or (filename and is_local(filename, local_paths)): self.modules[key] = sys.modules.pop(key) # Restore the disabled modules. sys.modules.update(self.state['disables']) for key, mod in self.state['disables'].items(): try: parent_name = key.split('.')[-2] except IndexError: parent_name = None if parent_name and parent_name in sys.modules: parent_module = sys.modules[parent_name] setattr(parent_module, key.split('.')[-1], mod) # Restore the original __path__ value of namespace packages. for key, path_list in self.state['nspaths'].items(): try: sys.modules[key].__path__ = path_list except KeyError: pass # Restore the original state of the global importer. sys.path[:] = self.state['path'] sys.meta_path[:] = self.state['meta_path'] pkgutil.extend_path = self.state['pkgutil.extend_path'] try: import pkg_resources pkg_resources.declare_namespace = self.state['declare_namespace'] pkg_resources._namespace_packages.clear() # type: ignore pkg_resources._namespace_packages.update(self.state['nsdict']) # type: ignore except ImportError: pass self.in_context = False del self.state def _declare_namespace(self, package_name: str) -> None: ''' Mock for #pkg_resources.declare_namespace() which calls #pkgutil.extend_path() afterwards as the original implementation doesn't seem to properly find all available namespace paths. ''' self.state['declare_namespace'](package_name) mod = sys.modules[package_name] mod.__path__ = pkgutil.extend_path(mod.__path__, package_name) # type: ignore def discover(self) -> t.Iterable[pkgutil.ModuleInfo]: return pkgutil.iter_modules(self.path) def disable(self, module: t.Union[t.List[str], str]) -> None: if not isinstance(module, str): for module_name in module: self.disable(module_name) return sub_prefix = module + '.' modules = {} for key, mod in sys.modules.items(): if key == module or key.startswith(sub_prefix): try: parent_name = '.'.join(key.split('.')[:-1]) except IndexError: parent_name = None # Delete the child module reference from the parent module. modules[key] = mod if parent_name and parent_name in sys.modules: parent = sys.modules[parent_name] try: delattr(parent, key.split('.')[-1]) except AttributeError: pass # Pop all the modules we found from sys.modules for key, mod in modules.items(): del sys.modules[key] self.state['disables'][key] = mod def autodisable(self) -> None: for loader, name, ispkg in self.discover(): self.disable(name)
33.754386
110
0.649948
__author__ = 'Niklas Rosenstein <rosensteinniklas@gmail.com>' __version__ = '1.7.6' import copy import glob import os import pkgutil import sys import traceback import typing as t import zipfile if t.TYPE_CHECKING: from sys import _MetaPathFinder def is_local(filename: str, pathlist: t.List[str]) -> bool: filename = os.path.abspath(filename) for path_name in pathlist: path_name = os.path.abspath(path_name) if is_subpath(filename, path_name): return True return False def is_subpath(path: str, parent: str) -> bool: try: relpath = os.path.relpath(path, parent) except ValueError: return False return relpath == os.curdir or not relpath.startswith(os.pardir) def eval_pth( filename: str, sitedir: str, dest: t.Optional[t.List[str]] = None, imports: t.Optional[t.List[t.Tuple[str, int, str]]] = None, ) -> t.List[str]: if dest is None: dest = sys.path if not os.path.isfile(filename): return [] with open(filename, 'r') as fp: for index, line in enumerate(fp): if line.startswith('import'): if imports is None: exec_pth_import(filename, index+1, line) else: imports.append((filename, index+1, line)) else: index = line.find(' if index > 0: line = line[:index] line = line.strip() if not os.path.isabs(line): line = os.path.join(os.path.dirname(filename), line) line = os.path.normpath(line) if line and line not in dest: dest.insert(0, line) return dest def exec_pth_import(filename: str, lineno: int, line: str) -> None: line = '\n' * (lineno - 1) + line.strip() try: exec(compile(line, filename, 'exec')) except BaseException: traceback.print_exc() def extend_path(pth: t.List[str], name: str) -> t.List[str]: def zip_isfile(z, name): name.rstrip('/') return name in z.namelist() pname = os.path.join(*name.split('.')) zname = '/'.join(name.split('.')) init_py = '__init__' + os.extsep + 'py' init_pyc = '__init__' + os.extsep + 'pyc' init_pyo = '__init__' + os.extsep + 'pyo' mod_path = list(pth) for path in sys.path: if zipfile.is_zipfile(path): try: egg = zipfile.ZipFile(path, 'r') addpath = ( zip_isfile(egg, zname + '/__init__.py') or zip_isfile(egg, zname + '/__init__.pyc') or zip_isfile(egg, zname + '/__init__.pyo')) fpath = os.path.join(path, path, zname) if addpath and fpath not in mod_path: mod_path.append(fpath) except (zipfile.BadZipfile, zipfile.LargeZipFile): pass # xxx: Show a warning at least? else: path = os.path.join(path, pname) if os.path.isdir(path) and path not in mod_path: addpath = ( os.path.isfile(os.path.join(path, init_py)) or os.path.isfile(os.path.join(path, init_pyc)) or os.path.isfile(os.path.join(path, init_pyo))) if addpath and path not in mod_path: mod_path.append(path) return [os.path.normpath(x) for x in mod_path] class localimport: def __init__( self, path: t.Union[t.List[str], str], parent_dir: t.Optional[str] = None, do_eggs: bool = True, do_pth: bool = True, do_autodisable: bool = True, ) -> None: if not parent_dir: frame = sys._getframe(1).f_globals if '__file__' in frame: parent_dir = os.path.dirname(os.path.abspath(frame['__file__'])) # Convert relative paths to absolute paths with parent_dir and # evaluate .egg files in the specified directories. self.path = [] if isinstance(path, str): path = [path] for path_name in path: if not os.path.isabs(path_name): if not parent_dir: raise ValueError('relative path but no parent_dir') path_name = os.path.join(parent_dir, path_name) path_name = os.path.normpath(path_name) self.path.append(path_name) if do_eggs: self.path.extend(glob.glob(os.path.join(path_name, '*.egg'))) self.meta_path: t.List[_MetaPathFinder] = [] self.modules: t.Dict[str, t.Any] = {} self.do_pth = do_pth self.in_context = False self.do_autodisable = do_autodisable self.pth_imports: t.List[t.Tuple[str, int, str]] = [] if self.do_pth: seen = set() for path_name in self.path: for fn in glob.glob(os.path.join(path_name, '*.pth')): if fn in seen: continue seen.add(fn) eval_pth(fn, path_name, dest=self.path, imports=self.pth_imports) def __enter__(self) -> 'localimport': # pkg_resources comes with setuptools. try: import pkg_resources nsdict = copy.deepcopy(pkg_resources._namespace_packages) # type: ignore declare_namespace = pkg_resources.declare_namespace pkg_resources.declare_namespace = self._declare_namespace # type: ignore except ImportError: nsdict = None declare_namespace = None # Save the global importer state. self.state = { 'nsdict': nsdict, 'declare_namespace': declare_namespace, 'nspaths': {}, 'path': sys.path[:], 'meta_path': sys.meta_path[:], 'disables': {}, 'pkgutil.extend_path': pkgutil.extend_path, } # Update the systems meta path and apply function mocks. sys.path[:] = self.path sys.meta_path[:] = self.meta_path + sys.meta_path pkgutil.extend_path = extend_path # type: ignore # If this function is called not the first time, we need to # restore the modules that have been imported with it and # temporarily disable the ones that would be shadowed. for key, mod in list(self.modules.items()): try: self.state['disables'][key] = sys.modules.pop(key) except KeyError: pass sys.modules[key] = mod # Evaluate imports from the .pth files, if any. for fn, lineno, stmt in self.pth_imports: exec_pth_import(fn, lineno, stmt) # Add the original path to sys.path. sys.path += self.state['path'] # Update the __path__ of all namespace modules. for key, mod in list(sys.modules.items()): if mod is None: # Relative imports could have lead to None-entries in # sys.modules. Get rid of them so they can be re-evaluated. prefix = key.rpartition('.')[0] if hasattr(sys.modules.get(prefix), '__path__'): del sys.modules[key] elif hasattr(mod, '__path__'): self.state['nspaths'][key] = copy.copy(mod.__path__) mod.__path__ = pkgutil.extend_path(mod.__path__, mod.__name__) self.in_context = True if self.do_autodisable: self.autodisable() return self def __exit__(self, *__) -> None: if not self.in_context: raise RuntimeError('context not entered') # Figure the difference of the original sys.path and the # current path. The list of paths will be used to determine # what modules are local and what not. local_paths = [] for path in sys.path: if path not in self.state['path']: local_paths.append(path) for path in self.path: if path not in local_paths: local_paths.append(path) # Move all meta path objects to self.meta_path that have not # been there before and have not been in the list before. for meta in sys.meta_path: if meta is not self and meta not in self.state['meta_path']: if meta not in self.meta_path: self.meta_path.append(meta) # Move all modules that shadow modules of the original system # state or modules that are from any of the localimport context # paths away. modules = sys.modules.copy() for key, mod in modules.items(): force_pop = False filename = getattr(mod, '__file__', None) if not filename and key not in sys.builtin_module_names: parent = key.rsplit('.', 1)[0] if parent in modules: filename = getattr(modules[parent], '__file__', None) else: force_pop = True if force_pop or (filename and is_local(filename, local_paths)): self.modules[key] = sys.modules.pop(key) # Restore the disabled modules. sys.modules.update(self.state['disables']) for key, mod in self.state['disables'].items(): try: parent_name = key.split('.')[-2] except IndexError: parent_name = None if parent_name and parent_name in sys.modules: parent_module = sys.modules[parent_name] setattr(parent_module, key.split('.')[-1], mod) # Restore the original __path__ value of namespace packages. for key, path_list in self.state['nspaths'].items(): try: sys.modules[key].__path__ = path_list except KeyError: pass # Restore the original state of the global importer. sys.path[:] = self.state['path'] sys.meta_path[:] = self.state['meta_path'] pkgutil.extend_path = self.state['pkgutil.extend_path'] try: import pkg_resources pkg_resources.declare_namespace = self.state['declare_namespace'] pkg_resources._namespace_packages.clear() # type: ignore pkg_resources._namespace_packages.update(self.state['nsdict']) # type: ignore except ImportError: pass self.in_context = False del self.state def _declare_namespace(self, package_name: str) -> None: self.state['declare_namespace'](package_name) mod = sys.modules[package_name] mod.__path__ = pkgutil.extend_path(mod.__path__, package_name) # type: ignore def discover(self) -> t.Iterable[pkgutil.ModuleInfo]: return pkgutil.iter_modules(self.path) def disable(self, module: t.Union[t.List[str], str]) -> None: if not isinstance(module, str): for module_name in module: self.disable(module_name) return sub_prefix = module + '.' modules = {} for key, mod in sys.modules.items(): if key == module or key.startswith(sub_prefix): try: parent_name = '.'.join(key.split('.')[:-1]) except IndexError: parent_name = None # Delete the child module reference from the parent module. modules[key] = mod if parent_name and parent_name in sys.modules: parent = sys.modules[parent_name] try: delattr(parent, key.split('.')[-1]) except AttributeError: pass # Pop all the modules we found from sys.modules for key, mod in modules.items(): del sys.modules[key] self.state['disables'][key] = mod def autodisable(self) -> None: for loader, name, ispkg in self.discover(): self.disable(name)
true
true
f7031222207e58ab6b118aabf23323dbda72761a
1,391
py
Python
svhn/evaluate_calibration.py
mvaldenegro/paper-subensembles-image-classification
cc3a6567b1de82b9bfb1612ad8d0e73cdd7ae09b
[ "BSD-3-Clause" ]
5
2020-06-04T19:54:29.000Z
2021-12-13T06:19:48.000Z
svhn/evaluate_calibration.py
mvaldenegro/paper-subensembles-image-classification
cc3a6567b1de82b9bfb1612ad8d0e73cdd7ae09b
[ "BSD-3-Clause" ]
null
null
null
svhn/evaluate_calibration.py
mvaldenegro/paper-subensembles-image-classification
cc3a6567b1de82b9bfb1612ad8d0e73cdd7ae09b
[ "BSD-3-Clause" ]
null
null
null
import numpy as np import h5py import pandas as pd from svhn_io import load_svhn from keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error EPSILON = 1e-10 def load_hdf5_data(filename): inp = h5py.File(filename, "r") preds = inp["preds"][...] inp.close() return preds NUM_ENSEMBLES = 15 NUM_BINS=7 #IOD_FILE_PATTERN = "cnn_svhn-num_ens-{}-preds.hdf5" #OUTPUT_PATTERN = "svhn-calibration-sub-deepensembles_1_num-ens-{}_cnn_svhn.csv" IOD_FILE_PATTERN = "deepensembles-cnn_svhn-num_ens-{}-preds.hdf5" OUTPUT_PATTERN = "svhn-calibration-deepensembles-num-ens-{}_cnn_svhn.csv" if __name__ == "__main__": for num_ens in range(1, NUM_ENSEMBLES + 1): (_, __), (___, y_true) = load_svhn() y_true = y_true.flatten() y_probs = load_hdf5_data(IOD_FILE_PATTERN.format(num_ens)) y_confs = np.max(y_probs, axis=1) y_pred = np.argmax(y_probs, axis=1) curve_conf, curve_acc = classifier_calibration_curve(y_pred, y_true, y_confs, num_bins=NUM_BINS) error = classifier_calibration_error(y_pred, y_true, y_confs, num_bins=NUM_BINS) print("Processing calibration curve for {} ensembles. Error: {}".format(num_ens, error)) output_df = pd.DataFrame(data={"conf": curve_conf, "acc": curve_acc}) output_df.to_csv(OUTPUT_PATTERN.format(num_ens), sep=';', index=False)
33.119048
104
0.716751
import numpy as np import h5py import pandas as pd from svhn_io import load_svhn from keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error EPSILON = 1e-10 def load_hdf5_data(filename): inp = h5py.File(filename, "r") preds = inp["preds"][...] inp.close() return preds NUM_ENSEMBLES = 15 NUM_BINS=7 IOD_FILE_PATTERN = "deepensembles-cnn_svhn-num_ens-{}-preds.hdf5" OUTPUT_PATTERN = "svhn-calibration-deepensembles-num-ens-{}_cnn_svhn.csv" if __name__ == "__main__": for num_ens in range(1, NUM_ENSEMBLES + 1): (_, __), (___, y_true) = load_svhn() y_true = y_true.flatten() y_probs = load_hdf5_data(IOD_FILE_PATTERN.format(num_ens)) y_confs = np.max(y_probs, axis=1) y_pred = np.argmax(y_probs, axis=1) curve_conf, curve_acc = classifier_calibration_curve(y_pred, y_true, y_confs, num_bins=NUM_BINS) error = classifier_calibration_error(y_pred, y_true, y_confs, num_bins=NUM_BINS) print("Processing calibration curve for {} ensembles. Error: {}".format(num_ens, error)) output_df = pd.DataFrame(data={"conf": curve_conf, "acc": curve_acc}) output_df.to_csv(OUTPUT_PATTERN.format(num_ens), sep=';', index=False)
true
true
f70312775afc4e4d6e8b475e09fdec40312d7ceb
846
py
Python
test/test_tracks.py
dpitch40/rockawayplayer
534b5dc5ca709b6f46696d55e18631d4ffaed903
[ "MIT" ]
null
null
null
test/test_tracks.py
dpitch40/rockawayplayer
534b5dc5ca709b6f46696d55e18631d4ffaed903
[ "MIT" ]
null
null
null
test/test_tracks.py
dpitch40/rockawayplayer
534b5dc5ca709b6f46696d55e18631d4ffaed903
[ "MIT" ]
null
null
null
from unittest import TestCase from rockaway.models import Track class TestTrackBasics(TestCase): def test_track_create_no_args(self): track = Track() self.assertFalse(track.hasDbEntry()) self.assertFalse(track.hasFile()) def test_track_create(self): args = {"Title": "Rockaway Beach", "Artist": "The Ramones", # FIXME--This and album will not just be strings "Album": "Rocket to Russia", "Year": 1977, "Genre": "Punk Rock", "Time": 126000} track = Track(**args) self.assertEqual(track.Title, args["Title"]) self.assertEqual(track.Year, 1977) # Alternate ways of looking up attributes self.assertEqual(track.genre, track.Genre) self.assertEqual(track.Time, track["Time"])
30.214286
89
0.601655
from unittest import TestCase from rockaway.models import Track class TestTrackBasics(TestCase): def test_track_create_no_args(self): track = Track() self.assertFalse(track.hasDbEntry()) self.assertFalse(track.hasFile()) def test_track_create(self): args = {"Title": "Rockaway Beach", "Artist": "The Ramones", "Album": "Rocket to Russia", "Year": 1977, "Genre": "Punk Rock", "Time": 126000} track = Track(**args) self.assertEqual(track.Title, args["Title"]) self.assertEqual(track.Year, 1977) self.assertEqual(track.genre, track.Genre) self.assertEqual(track.Time, track["Time"])
true
true
f70312c9886f2d7f39781658427518a8d2976831
24,253
py
Python
numba/targets/npyimpl.py
tolysz/numba
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
[ "Apache-2.0", "BSD-2-Clause" ]
null
null
null
numba/targets/npyimpl.py
tolysz/numba
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
[ "Apache-2.0", "BSD-2-Clause" ]
1
2019-02-11T13:46:30.000Z
2019-02-11T13:46:30.000Z
numba/targets/npyimpl.py
asodeur/numba
d7953a18dbf5ea231dc16e967ce8e9b754578ea6
[ "Apache-2.0", "BSD-2-Clause" ]
null
null
null
""" Implementation of functions in the Numpy package. """ import math import sys import itertools from collections import namedtuple from llvmlite.llvmpy import core as lc import numpy as np import operator from . import builtins, callconv, ufunc_db, arrayobj from .imputils import Registry, impl_ret_new_ref, force_error_model from .. import typing, types, cgutils, numpy_support, utils from ..numpy_support import ufunc_find_matching_loop, select_array_wrapper, from_dtype from ..typing import npydecl from ..extending import overload, intrinsic from .. import errors registry = Registry() lower = registry.lower ######################################################################## # In the way we generate code, ufuncs work with scalar as well as # with array arguments. The following helper classes help dealing # with scalar and array arguments in a regular way. # # In short, the classes provide a uniform interface. The interface # handles the indexing of as many dimensions as the array may have. # For scalars, all indexing is ignored and when the value is read, # the scalar is returned. For arrays code for actual indexing is # generated and reading performs the appropriate indirection. class _ScalarIndexingHelper(object): def update_indices(self, loop_indices, name): pass def as_values(self): pass class _ScalarHelper(object): """Helper class to handle scalar arguments (and result). Note that store_data is only used when generating code for a scalar ufunc and to write the output value. For loading, the value is directly used without having any kind of indexing nor memory backing it up. This is the use for input arguments. For storing, a variable is created in the stack where the value will be written. Note that it is not supported (as it is unneeded for our current use-cases) reading back a stored value. This class will always "load" the original value it got at its creation. """ def __init__(self, ctxt, bld, val, ty): self.context = ctxt self.builder = bld self.val = val self.base_type = ty intpty = ctxt.get_value_type(types.intp) self.shape = [lc.Constant.int(intpty, 1)] lty = ctxt.get_data_type(ty) if ty != types.boolean else lc.Type.int(1) self._ptr = cgutils.alloca_once(bld, lty) def create_iter_indices(self): return _ScalarIndexingHelper() def load_data(self, indices): return self.val def store_data(self, indices, val): self.builder.store(val, self._ptr) @property def return_val(self): return self.builder.load(self._ptr) class _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper', ('array', 'indices'))): def update_indices(self, loop_indices, name): bld = self.array.builder intpty = self.array.context.get_value_type(types.intp) ONE = lc.Constant.int(lc.Type.int(intpty.width), 1) # we are only interested in as many inner dimensions as dimensions # the indexed array has (the outer dimensions are broadcast, so # ignoring the outer indices produces the desired result. indices = loop_indices[len(loop_indices) - len(self.indices):] for src, dst, dim in zip(indices, self.indices, self.array.shape): cond = bld.icmp(lc.ICMP_UGT, dim, ONE) with bld.if_then(cond): bld.store(src, dst) def as_values(self): """ The indexing helper is built using alloca for each value, so it actually contains pointers to the actual indices to load. Note that update_indices assumes the same. This method returns the indices as values """ bld = self.array.builder return [bld.load(index) for index in self.indices] class _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder', 'shape', 'strides', 'data', 'layout', 'base_type', 'ndim', 'return_val'))): """Helper class to handle array arguments/result. It provides methods to generate code loading/storing specific items as well as support code for handling indices. """ def create_iter_indices(self): intpty = self.context.get_value_type(types.intp) ZERO = lc.Constant.int(lc.Type.int(intpty.width), 0) indices = [] for i in range(self.ndim): x = cgutils.alloca_once(self.builder, lc.Type.int(intpty.width)) self.builder.store(ZERO, x) indices.append(x) return _ArrayIndexingHelper(self, indices) def _load_effective_address(self, indices): return cgutils.get_item_pointer2(self.context, self.builder, data=self.data, shape=self.shape, strides=self.strides, layout=self.layout, inds=indices) def load_data(self, indices): model = self.context.data_model_manager[self.base_type] ptr = self._load_effective_address(indices) return model.load_from_data_pointer(self.builder, ptr) def store_data(self, indices, value): ctx = self.context bld = self.builder store_value = ctx.get_value_as_data(bld, self.base_type, value) assert ctx.get_data_type(self.base_type) == store_value.type bld.store(store_value, self._load_effective_address(indices)) def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'): """returns an instance of the appropriate Helper (either _ScalarHelper or _ArrayHelper) class to handle the argument. using the polymorphic interface of the Helper classes, scalar and array cases can be handled with the same code""" # first un-Optional Optionals if isinstance(tyinp, types.Optional): oty = tyinp tyinp = tyinp.type inp = ctxt.cast(bld, inp, oty, tyinp) # then prepare the arg for a concrete instance if isinstance(tyinp, types.ArrayCompatible): ary = ctxt.make_array(tyinp)(ctxt, bld, inp) shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim) strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim) return _ArrayHelper(ctxt, bld, shape, strides, ary.data, tyinp.layout, tyinp.dtype, tyinp.ndim, inp) elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]): return _ScalarHelper(ctxt, bld, inp, tyinp) else: raise NotImplementedError('unsupported type for {0}: {1}'.format(where, str(tyinp))) _broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp), types.intp, types.CPointer(types.intp)) def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape): '''Low-level utility function used in calculating a shape for an implicit output array. This function assumes that the destination shape is an LLVM pointer to a C-style array that was already initialized to a size of one along all axes. Returns an integer value: >= 1 : Succeeded. Return value should equal the number of dimensions in the destination shape. 0 : Failed to broadcast because source shape is larger than the destination shape (this case should be weeded out at type checking). < 0 : Failed to broadcast onto destination axis, at axis number == -(return_value + 1). ''' if src_ndim > dest_ndim: # This check should have been done during type checking, but # let's be defensive anyway... return 0 else: src_index = 0 dest_index = dest_ndim - src_ndim while src_index < src_ndim: src_dim_size = src_shape[src_index] dest_dim_size = dest_shape[dest_index] # Check to see if we've already mutated the destination # shape along this axis. if dest_dim_size != 1: # If we have mutated the destination shape already, # then the source axis size must either be one, # or the destination axis size. if src_dim_size != dest_dim_size and src_dim_size != 1: return -(dest_index + 1) elif src_dim_size != 1: # If the destination size is still its initial dest_shape[dest_index] = src_dim_size src_index += 1 dest_index += 1 return dest_index def _build_array(context, builder, array_ty, input_types, inputs): """Utility function to handle allocation of an implicit output array given the target context, builder, output array type, and a list of _ArrayHelper instances. """ intp_ty = context.get_value_type(types.intp) def make_intp_const(val): return context.get_constant(types.intp, val) ZERO = make_intp_const(0) ONE = make_intp_const(1) src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "src_shape") dest_ndim = make_intp_const(array_ty.ndim) dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "dest_shape") dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index) for index in range(array_ty.ndim)) # Initialize the destination shape with all ones. for dest_shape_addr in dest_shape_addrs: builder.store(ONE, dest_shape_addr) # For each argument, try to broadcast onto the destination shape, # mutating along any axis where the argument shape is not one and # the destination shape is one. for arg_number, arg in enumerate(inputs): if not hasattr(arg, "ndim"): # Skip scalar arguments continue arg_ndim = make_intp_const(arg.ndim) for index in range(arg.ndim): builder.store(arg.shape[index], cgutils.gep_inbounds(builder, src_shape, index)) arg_result = context.compile_internal( builder, _broadcast_onto, _broadcast_onto_sig, [arg_ndim, src_shape, dest_ndim, dest_shape]) with cgutils.if_unlikely(builder, builder.icmp(lc.ICMP_SLT, arg_result, ONE)): msg = "unable to broadcast argument %d to output array" % ( arg_number,) loc = errors.loc_info.get('loc', None) if loc is not None: msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line) context.call_conv.return_user_exc(builder, ValueError, (msg,)) real_array_ty = array_ty.as_array dest_shape_tup = tuple(builder.load(dest_shape_addr) for dest_shape_addr in dest_shape_addrs) array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty, dest_shape_tup) # Get the best argument to call __array_wrap__ on array_wrapper_index = select_array_wrapper(input_types) array_wrapper_ty = input_types[array_wrapper_index] try: # __array_wrap__(source wrapped array, out array) -> out wrapped array array_wrap = context.get_function('__array_wrap__', array_ty(array_wrapper_ty, real_array_ty)) except NotImplementedError: # If it's the same priority as a regular array, assume we # should use the allocated array unchanged. if array_wrapper_ty.array_priority != types.Array.array_priority: raise out_val = array_val._getvalue() else: wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue()) out_val = array_wrap(builder, wrap_args) ndim = array_ty.ndim shape = cgutils.unpack_tuple(builder, array_val.shape, ndim) strides = cgutils.unpack_tuple(builder, array_val.strides, ndim) return _ArrayHelper(context, builder, shape, strides, array_val.data, array_ty.layout, array_ty.dtype, ndim, out_val) def numpy_ufunc_kernel(context, builder, sig, args, kernel_class, explicit_output=True): # This is the code generator that builds all the looping needed # to execute a numpy functions over several dimensions (including # scalar cases). # # context - the code generation context # builder - the code emitter # sig - signature of the ufunc # args - the args to the ufunc # kernel_class - a code generating subclass of _Kernel that provides # explicit_output - if the output was explicit in the call # (ie: np.add(x,y,r)) arguments = [_prepare_argument(context, builder, arg, tyarg) for arg, tyarg in zip(args, sig.args)] if not explicit_output: ret_ty = sig.return_type if isinstance(ret_ty, types.ArrayCompatible): output = _build_array(context, builder, ret_ty, sig.args, arguments) else: output = _prepare_argument( context, builder, lc.Constant.null(context.get_value_type(ret_ty)), ret_ty) arguments.append(output) elif context.enable_nrt: # Incref the output context.nrt.incref(builder, sig.return_type, args[-1]) inputs = arguments[0:-1] output = arguments[-1] outer_sig = [a.base_type for a in arguments] #signature expects return type first, while we have it last: outer_sig = outer_sig[-1:] + outer_sig[:-1] outer_sig = typing.signature(*outer_sig) kernel = kernel_class(context, builder, outer_sig) intpty = context.get_value_type(types.intp) indices = [inp.create_iter_indices() for inp in inputs] loopshape = output.shape with cgutils.loop_nest(builder, loopshape, intp=intpty) as loop_indices: vals_in = [] for i, (index, arg) in enumerate(zip(indices, inputs)): index.update_indices(loop_indices, i) vals_in.append(arg.load_data(index.as_values())) val_out = kernel.generate(*vals_in) output.store_data(loop_indices, val_out) out = arguments[-1].return_val return impl_ret_new_ref(context, builder, sig.return_type, out) # Kernels are the code to be executed inside the multidimensional loop. class _Kernel(object): def __init__(self, context, builder, outer_sig): self.context = context self.builder = builder self.outer_sig = outer_sig def cast(self, val, fromty, toty): """Numpy uses cast semantics that are different from standard Python (for example, it does allow casting from complex to float). This method acts as a patch to context.cast so that it allows complex to real/int casts. """ if (isinstance(fromty, types.Complex) and not isinstance(toty, types.Complex)): # attempt conversion of the real part to the specified type. # note that NumPy issues a warning in this kind of conversions newty = fromty.underlying_float attr = self.context.get_getattr(fromty, 'real') val = attr(self.context, self.builder, fromty, val, 'real') fromty = newty # let the regular cast do the rest... return self.context.cast(self.builder, val, fromty, toty) def _ufunc_db_function(ufunc): """Use the ufunc loop type information to select the code generation function from the table provided by the dict_of_kernels. The dict of kernels maps the loop identifier to a function with the following signature: (context, builder, signature, args). The loop type information has the form 'AB->C'. The letters to the left of '->' are the input types (specified as NumPy letter types). The letters to the right of '->' are the output types. There must be 'ufunc.nin' letters to the left of '->', and 'ufunc.nout' letters to the right. For example, a binary float loop resulting in a float, will have the following signature: 'ff->f'. A given ufunc implements many loops. The list of loops implemented for a given ufunc can be accessed using the 'types' attribute in the ufunc object. The NumPy machinery selects the first loop that fits a given calling signature (in our case, what we call the outer_sig). This logic is mimicked by 'ufunc_find_matching_loop'. """ class _KernelImpl(_Kernel): def __init__(self, context, builder, outer_sig): super(_KernelImpl, self).__init__(context, builder, outer_sig) loop = ufunc_find_matching_loop( ufunc, outer_sig.args + (outer_sig.return_type,)) self.fn = ufunc_db.get_ufunc_info(ufunc).get(loop.ufunc_sig) self.inner_sig = typing.signature( *(loop.outputs + loop.inputs)) if self.fn is None: msg = "Don't know how to lower ufunc '{0}' for loop '{1}'" raise NotImplementedError(msg.format(ufunc.__name__, loop)) def generate(self, *args): isig = self.inner_sig osig = self.outer_sig cast_args = [self.cast(val, inty, outty) for val, inty, outty in zip(args, osig.args, isig.args)] with force_error_model(self.context, 'numpy'): res = self.fn(self.context, self.builder, isig, cast_args) dmm = self.context.data_model_manager res = dmm[isig.return_type].from_return(self.builder, res) return self.cast(res, isig.return_type, osig.return_type) return _KernelImpl ################################################################################ # Helper functions that register the ufuncs _kernels = {} # Temporary map from ufunc's to their kernel implementation class def register_unary_ufunc_kernel(ufunc, kernel): def unary_ufunc(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel) def unary_ufunc_no_explicit_output(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) _any = types.Any # (array or scalar, out=array) lower(ufunc, _any, types.Array)(unary_ufunc) # (array or scalar) lower(ufunc, _any)(unary_ufunc_no_explicit_output) _kernels[ufunc] = kernel def register_binary_ufunc_kernel(ufunc, kernel): def binary_ufunc(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel) def binary_ufunc_no_explicit_output(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) _any = types.Any # (array or scalar, array o scalar, out=array) lower(ufunc, _any, _any, types.Array)(binary_ufunc) # (scalar, scalar) lower(ufunc, _any, _any)(binary_ufunc_no_explicit_output) _kernels[ufunc] = kernel def register_unary_operator_kernel(operator, kernel, inplace=False): assert not inplace # are there any inplace unary operators? def lower_unary_operator(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) _arr_kind = types.Array lower(operator, _arr_kind)(lower_unary_operator) def register_binary_operator_kernel(op, kernel, inplace=False): def lower_binary_operator(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) def lower_inplace_operator(context, builder, sig, args): # The visible signature is (A, B) -> A # The implementation's signature (with explicit output) # is (A, B, A) -> A args = tuple(args) + (args[0],) sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],)) return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=True) _any = types.Any _arr_kind = types.Array formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)] for sig in formal_sigs: if not inplace: lower(op, *sig)(lower_binary_operator) else: lower(op, *sig)(lower_inplace_operator) ################################################################################ # Use the contents of ufunc_db to initialize the supported ufuncs for ufunc in ufunc_db.get_ufuncs(): if ufunc.nin == 1: register_unary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc)) elif ufunc.nin == 2: register_binary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc)) else: raise RuntimeError("Don't know how to register ufuncs from ufunc_db with arity > 2") @lower(operator.pos, types.Array) def array_positive_impl(context, builder, sig, args): '''Lowering function for +(array) expressions. Defined here (numba.targets.npyimpl) since the remaining array-operator lowering functions are also registered in this module. ''' class _UnaryPositiveKernel(_Kernel): def generate(self, *args): [val] = args return val return numpy_ufunc_kernel(context, builder, sig, args, _UnaryPositiveKernel, explicit_output=False) for _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map, npydecl.NumpyRulesArrayOperator._op_map, ): for operator, ufunc_name in _op_map.items(): ufunc = getattr(np, ufunc_name) kernel = _kernels[ufunc] if ufunc.nin == 1: register_unary_operator_kernel(operator, kernel) elif ufunc.nin == 2: register_binary_operator_kernel(operator, kernel) else: raise RuntimeError("There shouldn't be any non-unary or binary operators") for _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map, ): for operator, ufunc_name in _op_map.items(): ufunc = getattr(np, ufunc_name) kernel = _kernels[ufunc] if ufunc.nin == 1: register_unary_operator_kernel(operator, kernel, inplace=True) elif ufunc.nin == 2: register_binary_operator_kernel(operator, kernel, inplace=True) else: raise RuntimeError("There shouldn't be any non-unary or binary operators") del _kernels @intrinsic def _make_dtype_object(typingctx, desc): """Given a string or NumberClass description *desc*, returns the dtype object. """ def from_nb_type(nb_type): return_type = types.DType(nb_type) sig = return_type(desc) def codegen(context, builder, signature, args): # All dtype objects are dummy values in LLVM. # They only exist in the type level. return context.get_dummy_value() return sig, codegen if isinstance(desc, types.Literal): # Convert the str description into np.dtype then to numba type. nb_type = from_dtype(np.dtype(desc.literal_value)) return from_nb_type(nb_type) elif isinstance(desc, types.functions.NumberClass): thestr = str(desc.dtype) # Convert the str description into np.dtype then to numba type. nb_type = from_dtype(np.dtype(thestr)) return from_nb_type(nb_type) @overload(np.dtype) def numpy_dtype(desc): """Provide an implementation so that numpy.dtype function can be lowered. """ if isinstance(desc, (types.Literal, types.functions.NumberClass)): def imp(desc): return _make_dtype_object(desc) return imp else: raise TypeError('unknown dtype descriptor: {}'.format(desc))
40.153974
92
0.641364
import math import sys import itertools from collections import namedtuple from llvmlite.llvmpy import core as lc import numpy as np import operator from . import builtins, callconv, ufunc_db, arrayobj from .imputils import Registry, impl_ret_new_ref, force_error_model from .. import typing, types, cgutils, numpy_support, utils from ..numpy_support import ufunc_find_matching_loop, select_array_wrapper, from_dtype from ..typing import npydecl from ..extending import overload, intrinsic from .. import errors registry = Registry() lower = registry.lower class _ScalarIndexingHelper(object): def update_indices(self, loop_indices, name): pass def as_values(self): pass class _ScalarHelper(object): def __init__(self, ctxt, bld, val, ty): self.context = ctxt self.builder = bld self.val = val self.base_type = ty intpty = ctxt.get_value_type(types.intp) self.shape = [lc.Constant.int(intpty, 1)] lty = ctxt.get_data_type(ty) if ty != types.boolean else lc.Type.int(1) self._ptr = cgutils.alloca_once(bld, lty) def create_iter_indices(self): return _ScalarIndexingHelper() def load_data(self, indices): return self.val def store_data(self, indices, val): self.builder.store(val, self._ptr) @property def return_val(self): return self.builder.load(self._ptr) class _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper', ('array', 'indices'))): def update_indices(self, loop_indices, name): bld = self.array.builder intpty = self.array.context.get_value_type(types.intp) ONE = lc.Constant.int(lc.Type.int(intpty.width), 1) indices = loop_indices[len(loop_indices) - len(self.indices):] for src, dst, dim in zip(indices, self.indices, self.array.shape): cond = bld.icmp(lc.ICMP_UGT, dim, ONE) with bld.if_then(cond): bld.store(src, dst) def as_values(self): bld = self.array.builder return [bld.load(index) for index in self.indices] class _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder', 'shape', 'strides', 'data', 'layout', 'base_type', 'ndim', 'return_val'))): def create_iter_indices(self): intpty = self.context.get_value_type(types.intp) ZERO = lc.Constant.int(lc.Type.int(intpty.width), 0) indices = [] for i in range(self.ndim): x = cgutils.alloca_once(self.builder, lc.Type.int(intpty.width)) self.builder.store(ZERO, x) indices.append(x) return _ArrayIndexingHelper(self, indices) def _load_effective_address(self, indices): return cgutils.get_item_pointer2(self.context, self.builder, data=self.data, shape=self.shape, strides=self.strides, layout=self.layout, inds=indices) def load_data(self, indices): model = self.context.data_model_manager[self.base_type] ptr = self._load_effective_address(indices) return model.load_from_data_pointer(self.builder, ptr) def store_data(self, indices, value): ctx = self.context bld = self.builder store_value = ctx.get_value_as_data(bld, self.base_type, value) assert ctx.get_data_type(self.base_type) == store_value.type bld.store(store_value, self._load_effective_address(indices)) def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'): if isinstance(tyinp, types.Optional): oty = tyinp tyinp = tyinp.type inp = ctxt.cast(bld, inp, oty, tyinp) if isinstance(tyinp, types.ArrayCompatible): ary = ctxt.make_array(tyinp)(ctxt, bld, inp) shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim) strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim) return _ArrayHelper(ctxt, bld, shape, strides, ary.data, tyinp.layout, tyinp.dtype, tyinp.ndim, inp) elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]): return _ScalarHelper(ctxt, bld, inp, tyinp) else: raise NotImplementedError('unsupported type for {0}: {1}'.format(where, str(tyinp))) _broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp), types.intp, types.CPointer(types.intp)) def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape): if src_ndim > dest_ndim: return 0 else: src_index = 0 dest_index = dest_ndim - src_ndim while src_index < src_ndim: src_dim_size = src_shape[src_index] dest_dim_size = dest_shape[dest_index] # Check to see if we've already mutated the destination if dest_dim_size != 1: if src_dim_size != dest_dim_size and src_dim_size != 1: return -(dest_index + 1) elif src_dim_size != 1: dest_shape[dest_index] = src_dim_size src_index += 1 dest_index += 1 return dest_index def _build_array(context, builder, array_ty, input_types, inputs): intp_ty = context.get_value_type(types.intp) def make_intp_const(val): return context.get_constant(types.intp, val) ZERO = make_intp_const(0) ONE = make_intp_const(1) src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "src_shape") dest_ndim = make_intp_const(array_ty.ndim) dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, "dest_shape") dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index) for index in range(array_ty.ndim)) for dest_shape_addr in dest_shape_addrs: builder.store(ONE, dest_shape_addr) for arg_number, arg in enumerate(inputs): if not hasattr(arg, "ndim"): continue arg_ndim = make_intp_const(arg.ndim) for index in range(arg.ndim): builder.store(arg.shape[index], cgutils.gep_inbounds(builder, src_shape, index)) arg_result = context.compile_internal( builder, _broadcast_onto, _broadcast_onto_sig, [arg_ndim, src_shape, dest_ndim, dest_shape]) with cgutils.if_unlikely(builder, builder.icmp(lc.ICMP_SLT, arg_result, ONE)): msg = "unable to broadcast argument %d to output array" % ( arg_number,) loc = errors.loc_info.get('loc', None) if loc is not None: msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line) context.call_conv.return_user_exc(builder, ValueError, (msg,)) real_array_ty = array_ty.as_array dest_shape_tup = tuple(builder.load(dest_shape_addr) for dest_shape_addr in dest_shape_addrs) array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty, dest_shape_tup) array_wrapper_index = select_array_wrapper(input_types) array_wrapper_ty = input_types[array_wrapper_index] try: array_wrap = context.get_function('__array_wrap__', array_ty(array_wrapper_ty, real_array_ty)) except NotImplementedError: # should use the allocated array unchanged. if array_wrapper_ty.array_priority != types.Array.array_priority: raise out_val = array_val._getvalue() else: wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue()) out_val = array_wrap(builder, wrap_args) ndim = array_ty.ndim shape = cgutils.unpack_tuple(builder, array_val.shape, ndim) strides = cgutils.unpack_tuple(builder, array_val.strides, ndim) return _ArrayHelper(context, builder, shape, strides, array_val.data, array_ty.layout, array_ty.dtype, ndim, out_val) def numpy_ufunc_kernel(context, builder, sig, args, kernel_class, explicit_output=True): # This is the code generator that builds all the looping needed # to execute a numpy functions over several dimensions (including # scalar cases). # # context - the code generation context # builder - the code emitter # sig - signature of the ufunc # args - the args to the ufunc # kernel_class - a code generating subclass of _Kernel that provides # explicit_output - if the output was explicit in the call # (ie: np.add(x,y,r)) arguments = [_prepare_argument(context, builder, arg, tyarg) for arg, tyarg in zip(args, sig.args)] if not explicit_output: ret_ty = sig.return_type if isinstance(ret_ty, types.ArrayCompatible): output = _build_array(context, builder, ret_ty, sig.args, arguments) else: output = _prepare_argument( context, builder, lc.Constant.null(context.get_value_type(ret_ty)), ret_ty) arguments.append(output) elif context.enable_nrt: # Incref the output context.nrt.incref(builder, sig.return_type, args[-1]) inputs = arguments[0:-1] output = arguments[-1] outer_sig = [a.base_type for a in arguments] #signature expects return type first, while we have it last: outer_sig = outer_sig[-1:] + outer_sig[:-1] outer_sig = typing.signature(*outer_sig) kernel = kernel_class(context, builder, outer_sig) intpty = context.get_value_type(types.intp) indices = [inp.create_iter_indices() for inp in inputs] loopshape = output.shape with cgutils.loop_nest(builder, loopshape, intp=intpty) as loop_indices: vals_in = [] for i, (index, arg) in enumerate(zip(indices, inputs)): index.update_indices(loop_indices, i) vals_in.append(arg.load_data(index.as_values())) val_out = kernel.generate(*vals_in) output.store_data(loop_indices, val_out) out = arguments[-1].return_val return impl_ret_new_ref(context, builder, sig.return_type, out) # Kernels are the code to be executed inside the multidimensional loop. class _Kernel(object): def __init__(self, context, builder, outer_sig): self.context = context self.builder = builder self.outer_sig = outer_sig def cast(self, val, fromty, toty): if (isinstance(fromty, types.Complex) and not isinstance(toty, types.Complex)): # attempt conversion of the real part to the specified type. # note that NumPy issues a warning in this kind of conversions newty = fromty.underlying_float attr = self.context.get_getattr(fromty, 'real') val = attr(self.context, self.builder, fromty, val, 'real') fromty = newty # let the regular cast do the rest... return self.context.cast(self.builder, val, fromty, toty) def _ufunc_db_function(ufunc): class _KernelImpl(_Kernel): def __init__(self, context, builder, outer_sig): super(_KernelImpl, self).__init__(context, builder, outer_sig) loop = ufunc_find_matching_loop( ufunc, outer_sig.args + (outer_sig.return_type,)) self.fn = ufunc_db.get_ufunc_info(ufunc).get(loop.ufunc_sig) self.inner_sig = typing.signature( *(loop.outputs + loop.inputs)) if self.fn is None: msg = "Don't know how to lower ufunc '{0}' for loop '{1}'" raise NotImplementedError(msg.format(ufunc.__name__, loop)) def generate(self, *args): isig = self.inner_sig osig = self.outer_sig cast_args = [self.cast(val, inty, outty) for val, inty, outty in zip(args, osig.args, isig.args)] with force_error_model(self.context, 'numpy'): res = self.fn(self.context, self.builder, isig, cast_args) dmm = self.context.data_model_manager res = dmm[isig.return_type].from_return(self.builder, res) return self.cast(res, isig.return_type, osig.return_type) return _KernelImpl _kernels = {} def register_unary_ufunc_kernel(ufunc, kernel): def unary_ufunc(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel) def unary_ufunc_no_explicit_output(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) _any = types.Any # (array or scalar, out=array) lower(ufunc, _any, types.Array)(unary_ufunc) # (array or scalar) lower(ufunc, _any)(unary_ufunc_no_explicit_output) _kernels[ufunc] = kernel def register_binary_ufunc_kernel(ufunc, kernel): def binary_ufunc(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel) def binary_ufunc_no_explicit_output(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) _any = types.Any # (array or scalar, array o scalar, out=array) lower(ufunc, _any, _any, types.Array)(binary_ufunc) # (scalar, scalar) lower(ufunc, _any, _any)(binary_ufunc_no_explicit_output) _kernels[ufunc] = kernel def register_unary_operator_kernel(operator, kernel, inplace=False): assert not inplace # are there any inplace unary operators? def lower_unary_operator(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) _arr_kind = types.Array lower(operator, _arr_kind)(lower_unary_operator) def register_binary_operator_kernel(op, kernel, inplace=False): def lower_binary_operator(context, builder, sig, args): return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=False) def lower_inplace_operator(context, builder, sig, args): # The visible signature is (A, B) -> A # The implementation's signature (with explicit output) args = tuple(args) + (args[0],) sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],)) return numpy_ufunc_kernel(context, builder, sig, args, kernel, explicit_output=True) _any = types.Any _arr_kind = types.Array formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)] for sig in formal_sigs: if not inplace: lower(op, *sig)(lower_binary_operator) else: lower(op, *sig)(lower_inplace_operator) for ufunc in ufunc_db.get_ufuncs(): if ufunc.nin == 1: register_unary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc)) elif ufunc.nin == 2: register_binary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc)) else: raise RuntimeError("Don't know how to register ufuncs from ufunc_db with arity > 2") @lower(operator.pos, types.Array) def array_positive_impl(context, builder, sig, args): class _UnaryPositiveKernel(_Kernel): def generate(self, *args): [val] = args return val return numpy_ufunc_kernel(context, builder, sig, args, _UnaryPositiveKernel, explicit_output=False) for _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map, npydecl.NumpyRulesArrayOperator._op_map, ): for operator, ufunc_name in _op_map.items(): ufunc = getattr(np, ufunc_name) kernel = _kernels[ufunc] if ufunc.nin == 1: register_unary_operator_kernel(operator, kernel) elif ufunc.nin == 2: register_binary_operator_kernel(operator, kernel) else: raise RuntimeError("There shouldn't be any non-unary or binary operators") for _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map, ): for operator, ufunc_name in _op_map.items(): ufunc = getattr(np, ufunc_name) kernel = _kernels[ufunc] if ufunc.nin == 1: register_unary_operator_kernel(operator, kernel, inplace=True) elif ufunc.nin == 2: register_binary_operator_kernel(operator, kernel, inplace=True) else: raise RuntimeError("There shouldn't be any non-unary or binary operators") del _kernels @intrinsic def _make_dtype_object(typingctx, desc): def from_nb_type(nb_type): return_type = types.DType(nb_type) sig = return_type(desc) def codegen(context, builder, signature, args): # All dtype objects are dummy values in LLVM. # They only exist in the type level. return context.get_dummy_value() return sig, codegen if isinstance(desc, types.Literal): # Convert the str description into np.dtype then to numba type. nb_type = from_dtype(np.dtype(desc.literal_value)) return from_nb_type(nb_type) elif isinstance(desc, types.functions.NumberClass): thestr = str(desc.dtype) # Convert the str description into np.dtype then to numba type. nb_type = from_dtype(np.dtype(thestr)) return from_nb_type(nb_type) @overload(np.dtype) def numpy_dtype(desc): if isinstance(desc, (types.Literal, types.functions.NumberClass)): def imp(desc): return _make_dtype_object(desc) return imp else: raise TypeError('unknown dtype descriptor: {}'.format(desc))
true
true
f703131bb20bf6a376a2a9e06ef04e947ce3981c
1,321
py
Python
setup.py
anobi/django-oauth-api
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
[ "BSD-2-Clause" ]
null
null
null
setup.py
anobi/django-oauth-api
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
[ "BSD-2-Clause" ]
null
null
null
setup.py
anobi/django-oauth-api
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages import os import re def get_version(package): """ Return package version as listed in `__version__` in `init.py`. """ init_py = open(os.path.join(package, '__init__.py')).read() return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) version = get_version('oauth_api') setup( name="django-oauth-api", version=version, description="OAuth API for Django using Django Rest Framework", classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: Django", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='django djangorestframework oauth oauth2 oauthlib', author='Tomi Pajunen', author_email='tomi@madlab.fi', url='https://github.com/eofs/django-oauth-api', license='BSD', packages=find_packages(), include_package_data=True, test_suite='runtests', install_requires=[ 'django>=1.11', 'oauthlib==2.0.7', ], zip_safe=False, )
27.520833
74
0.623013
from setuptools import setup, find_packages import os import re def get_version(package): init_py = open(os.path.join(package, '__init__.py')).read() return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) version = get_version('oauth_api') setup( name="django-oauth-api", version=version, description="OAuth API for Django using Django Rest Framework", classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: Django", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='django djangorestframework oauth oauth2 oauthlib', author='Tomi Pajunen', author_email='tomi@madlab.fi', url='https://github.com/eofs/django-oauth-api', license='BSD', packages=find_packages(), include_package_data=True, test_suite='runtests', install_requires=[ 'django>=1.11', 'oauthlib==2.0.7', ], zip_safe=False, )
true
true
f70313ad1800d1fbedaa2338bc9ced0d636179d5
537
py
Python
events/admin.py
the-mandarine/mypanamsquad
b34c1c6169a3b7496e171b9536472a1ede0bdc84
[ "Beerware" ]
null
null
null
events/admin.py
the-mandarine/mypanamsquad
b34c1c6169a3b7496e171b9536472a1ede0bdc84
[ "Beerware" ]
null
null
null
events/admin.py
the-mandarine/mypanamsquad
b34c1c6169a3b7496e171b9536472a1ede0bdc84
[ "Beerware" ]
null
null
null
from django.contrib import admin from events.models import Place, Event, Attendance # Register your models here. class EventAdmin(admin.ModelAdmin): filter_horizontal = ('expected_members', ) class AttendanceAdmin(admin.ModelAdmin): list_display = ('event__name', 'member', 'attendance', 'proxy_to', 'accepted',) list_filter = ('event__name',) def event__name(self, obj): return str(obj.event) admin.site.register(Place) admin.site.register(Attendance, AttendanceAdmin) admin.site.register(Event, EventAdmin)
29.833333
83
0.746741
from django.contrib import admin from events.models import Place, Event, Attendance class EventAdmin(admin.ModelAdmin): filter_horizontal = ('expected_members', ) class AttendanceAdmin(admin.ModelAdmin): list_display = ('event__name', 'member', 'attendance', 'proxy_to', 'accepted',) list_filter = ('event__name',) def event__name(self, obj): return str(obj.event) admin.site.register(Place) admin.site.register(Attendance, AttendanceAdmin) admin.site.register(Event, EventAdmin)
true
true
f703143bb336a96fa5c3b2d5b48b35b68a8fb16a
969
py
Python
test_factorial_example.py
gaoshanyu/web_ui_test_sample
8a6cc9b54b5f728af7ef0725dea42d759bd115d0
[ "MIT" ]
null
null
null
test_factorial_example.py
gaoshanyu/web_ui_test_sample
8a6cc9b54b5f728af7ef0725dea42d759bd115d0
[ "MIT" ]
null
null
null
test_factorial_example.py
gaoshanyu/web_ui_test_sample
8a6cc9b54b5f728af7ef0725dea42d759bd115d0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Created at 03/09/2020 __author__ = 'raniys' import math import pytest from factorial_example import factorial_function @pytest.mark.sample def test_factorial_functionality(): print("Inside test_factorial_functionality") assert factorial_function(0) == 1 assert factorial_function(4) == 24 @pytest.mark.sample def test_standard_library(): print("Inside test_standard_library") for i in range(5): # verify whether factorial is calculated correctly # by checking against result against standard # library - math.factorial() assert math.factorial(i) == factorial_function(i) @pytest.mark.sample def test_negative_number(): print("Inside test_negative_number") # This test case would pass if Assertion Error # is raised. In this case, the input number is negative # hence, the test case passes with pytest.raises(AssertionError): factorial_function(-10)
23.634146
59
0.716202
__author__ = 'raniys' import math import pytest from factorial_example import factorial_function @pytest.mark.sample def test_factorial_functionality(): print("Inside test_factorial_functionality") assert factorial_function(0) == 1 assert factorial_function(4) == 24 @pytest.mark.sample def test_standard_library(): print("Inside test_standard_library") for i in range(5): assert math.factorial(i) == factorial_function(i) @pytest.mark.sample def test_negative_number(): print("Inside test_negative_number") with pytest.raises(AssertionError): factorial_function(-10)
true
true
f70314d2b61bace541c7746ae96937b488fdab30
1,434
py
Python
FlaskAPI/PullLoLDataAssets.py
ItsViridae/LoLApiWrapper
f84c2bbb8f3e2e42e7b0e17c137c561727f045a3
[ "MIT" ]
null
null
null
FlaskAPI/PullLoLDataAssets.py
ItsViridae/LoLApiWrapper
f84c2bbb8f3e2e42e7b0e17c137c561727f045a3
[ "MIT" ]
3
2021-10-06T18:40:51.000Z
2022-02-27T06:30:39.000Z
FlaskAPI/PullLoLDataAssets.py
ItsViridae/LoLApiWrapper
f84c2bbb8f3e2e42e7b0e17c137c561727f045a3
[ "MIT" ]
null
null
null
import requests import json # Get Current Patch def getCurrentVersion(): versionResponse = requests.get("https://ddragon.leagueoflegends.com/api/versions.json") version_patch_RawData = versionResponse.json() currentVersion = version_patch_RawData[0] print(currentVersion) return currentVersion #champions, items, summoner_spells, spells def GetDDragonData_Champions(): version = getCurrentVersion() #Champions Data response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/champion.json") allChampionRawData = json.loads(response.text) ChampionIdToName = {} for key,champion in allChampionRawData['data'].items(): ChampionIdToName[int(champion['key'])] = champion['name'] print(ChampionIdToName) return ChampionIdToName def GetDDragonData_Items(): version = getCurrentVersion() response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/item.json") allItemsRawData = json.loads(response.text) QuickPrinter(allItemsRawData) #Items Data ItemIdToName = {} for key,item in allItemsRawData['data'].items(): ItemIdToName[int(key)] = item['name'] print(ItemToToName) return ItemIdToName def QuickPrinter(String_to_Print): print(json.dumps(String_to_Print, indent=4, sort_keys=True)) #main() version = getCurrentVersion() GetDDragonData_Champions() GetDDragonData_Items()
33.348837
106
0.739191
import requests import json def getCurrentVersion(): versionResponse = requests.get("https://ddragon.leagueoflegends.com/api/versions.json") version_patch_RawData = versionResponse.json() currentVersion = version_patch_RawData[0] print(currentVersion) return currentVersion def GetDDragonData_Champions(): version = getCurrentVersion() response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/champion.json") allChampionRawData = json.loads(response.text) ChampionIdToName = {} for key,champion in allChampionRawData['data'].items(): ChampionIdToName[int(champion['key'])] = champion['name'] print(ChampionIdToName) return ChampionIdToName def GetDDragonData_Items(): version = getCurrentVersion() response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/item.json") allItemsRawData = json.loads(response.text) QuickPrinter(allItemsRawData) ItemIdToName = {} for key,item in allItemsRawData['data'].items(): ItemIdToName[int(key)] = item['name'] print(ItemToToName) return ItemIdToName def QuickPrinter(String_to_Print): print(json.dumps(String_to_Print, indent=4, sort_keys=True)) version = getCurrentVersion() GetDDragonData_Champions() GetDDragonData_Items()
true
true
f7031633e36d301952c1bcfad443c0b4e0b34d97
2,839
py
Python
neuro_scripts/manual_rigid_body/manual_rigid_body.py
NicoleEic/projects
028a4bb4b49539fc98b442f0a2f9434e95c94561
[ "MIT" ]
9
2019-09-10T19:41:29.000Z
2022-03-03T21:29:11.000Z
neuro_scripts/manual_rigid_body/manual_rigid_body.py
NicoleEic/projects
028a4bb4b49539fc98b442f0a2f9434e95c94561
[ "MIT" ]
null
null
null
neuro_scripts/manual_rigid_body/manual_rigid_body.py
NicoleEic/projects
028a4bb4b49539fc98b442f0a2f9434e95c94561
[ "MIT" ]
4
2019-09-10T19:41:37.000Z
2020-11-26T13:51:55.000Z
import numpy as np import nibabel as nib import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from my_functions.matrix_stuff import * def manual_rigid_body(fname = 'example_brain.nii.gz', outmat = 'transformation.mat', outimg = 'example_brain_transformed.nii.gz', theta = np.radians([0,0,0]), translation_vec = [0,0,0], type = 'rotation', flip_coordinates = [True, False, False]): """ Function to perform a rigid body transformation based on manually determined parameters. Args: - fname (str): filepath to input nifti image (.nii.gz) - outmat (str): filepath of output 4x4 transformation matrix (.mat) - outimg (str): filepath of transformed output image (.nii.gz) - theta (np.array): vector of rotation angles in x,y,z dimension (in radians) - translation_vec (np.array): vector for translation in x,y,z (in image coordinates) - type (str): can be 'rotation' or 'translation' or 'rotation_translation' - flip_coordinates (boolean vector): indicates for which axis the sign of the offset needs to be flipped Returns: - M (np.array): output 4x4 transformation matrix - M is written to outmat - the output image (outimg) is written out Note on flip_coordinates: Voxel coordinates in the image are expected to increase in the following directions (it's similar to determining the reorient-command): - first dimension: left -> right - second dimension: posterir -> anterior - third dimension: inferior -> superior if they go the other way, change input variable accordingly, e.g.: flip_coordinates = [True, False, False] """ # get sform from image to determine offset of coordinate-system img = nib.load(fname) aff = img.get_affine() offset = aff[0:3,3] # which type of manipulation is requested if type == 'rotation': print('do rotation only') M = rotation(theta, offset, flip_coordinates) elif type == 'translation': print('do translation only') M = vector_to_translation_matrix(translation_vec) elif type == 'rotation_translation': print('do combined rotation and translation') M = rotation_translation(theta, translation_vec, offset, flip_coordinates) # save output matrix print('output matrix: ', M) print('save in: ', outmat) save_matrix4x4(M, outmat) # apply transformation to input image applywarp_command = "applywarp -i " + fname + " -r " + fname + " --premat=" + outmat + " --interp=nn -o " + outimg print('run flirt: ', applywarp_command) os.system(applywarp_command) return M
39.985915
118
0.641071
import numpy as np import nibabel as nib import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from my_functions.matrix_stuff import * def manual_rigid_body(fname = 'example_brain.nii.gz', outmat = 'transformation.mat', outimg = 'example_brain_transformed.nii.gz', theta = np.radians([0,0,0]), translation_vec = [0,0,0], type = 'rotation', flip_coordinates = [True, False, False]): img = nib.load(fname) aff = img.get_affine() offset = aff[0:3,3] if type == 'rotation': print('do rotation only') M = rotation(theta, offset, flip_coordinates) elif type == 'translation': print('do translation only') M = vector_to_translation_matrix(translation_vec) elif type == 'rotation_translation': print('do combined rotation and translation') M = rotation_translation(theta, translation_vec, offset, flip_coordinates) print('output matrix: ', M) print('save in: ', outmat) save_matrix4x4(M, outmat) applywarp_command = "applywarp -i " + fname + " -r " + fname + " --premat=" + outmat + " --interp=nn -o " + outimg print('run flirt: ', applywarp_command) os.system(applywarp_command) return M
true
true
f7031875b5b2ac17eb7490db6305e5cad29a4261
2,636
py
Python
model/contact.py
agakax/qa-courses-python-training
d523d5543c947ed449cd2d1109cac2eeac390f7b
[ "Apache-2.0" ]
null
null
null
model/contact.py
agakax/qa-courses-python-training
d523d5543c947ed449cd2d1109cac2eeac390f7b
[ "Apache-2.0" ]
null
null
null
model/contact.py
agakax/qa-courses-python-training
d523d5543c947ed449cd2d1109cac2eeac390f7b
[ "Apache-2.0" ]
null
null
null
from sys import maxsize class Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, photo_path=None, photo_delete=False, title=None, company=None, address=None, telephones_all=None, telephone_home=None, telephone_mobile=None, telephone_work=None, telephone_fax=None, emails_all=None, email=None, email2=None, email3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, group=None, secondary_address=None, secondary_telephone_home=None, secondary_notes=None, id_contact=None): self.first_name = first_name self.middle_name = middle_name self.last_name = last_name self.nickname = nickname self.photo_path = photo_path self.photo_delete = photo_delete self.title = title self.company = company self.address = address self.telephones_all = telephones_all self.telephone_home = telephone_home self.telephone_mobile = telephone_mobile self.telephone_work = telephone_work self.telephone_fax = telephone_fax self.emails_all = emails_all self.email = email self.email2 = email2 self.email3 = email3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.group = group self.secondary_address = secondary_address self.secondary_telephone_home = secondary_telephone_home self.secondary_notes = secondary_notes self.id = id_contact def __repr__(self): return "%s: %s %s, %s" % (self.id, self.first_name, self.last_name, self.address) def __eq__(self, other): return (self.id is None or other.id is None or self.id == other.id) and \ self.check_for_none(self.first_name, other.first_name) and \ self.check_for_none(self.last_name, other.last_name) and \ self.check_for_none(self.address, other.address) def id_or_max(self): if self.id: return int(self.id) else: return maxsize def check_for_none(self, first, second): return first == second or (first is None and second == "") or (first == "" and second is None)
42.516129
113
0.658194
from sys import maxsize class Contact: def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, photo_path=None, photo_delete=False, title=None, company=None, address=None, telephones_all=None, telephone_home=None, telephone_mobile=None, telephone_work=None, telephone_fax=None, emails_all=None, email=None, email2=None, email3=None, homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None, anniversary_month=None, anniversary_year=None, group=None, secondary_address=None, secondary_telephone_home=None, secondary_notes=None, id_contact=None): self.first_name = first_name self.middle_name = middle_name self.last_name = last_name self.nickname = nickname self.photo_path = photo_path self.photo_delete = photo_delete self.title = title self.company = company self.address = address self.telephones_all = telephones_all self.telephone_home = telephone_home self.telephone_mobile = telephone_mobile self.telephone_work = telephone_work self.telephone_fax = telephone_fax self.emails_all = emails_all self.email = email self.email2 = email2 self.email3 = email3 self.homepage = homepage self.birthday_day = birthday_day self.birthday_month = birthday_month self.birthday_year = birthday_year self.anniversary_day = anniversary_day self.anniversary_month = anniversary_month self.anniversary_year = anniversary_year self.group = group self.secondary_address = secondary_address self.secondary_telephone_home = secondary_telephone_home self.secondary_notes = secondary_notes self.id = id_contact def __repr__(self): return "%s: %s %s, %s" % (self.id, self.first_name, self.last_name, self.address) def __eq__(self, other): return (self.id is None or other.id is None or self.id == other.id) and \ self.check_for_none(self.first_name, other.first_name) and \ self.check_for_none(self.last_name, other.last_name) and \ self.check_for_none(self.address, other.address) def id_or_max(self): if self.id: return int(self.id) else: return maxsize def check_for_none(self, first, second): return first == second or (first is None and second == "") or (first == "" and second is None)
true
true
f703195775c624f40961822a40021754ea2a5f14
1,037
py
Python
api/insights/insights/infrastructure/mysql/orm/mapper_base.py
manisharmagarg/qymatix
0dc240970359429ae5105db79f9aebf1a99ba6fd
[ "Apache-2.0" ]
null
null
null
api/insights/insights/infrastructure/mysql/orm/mapper_base.py
manisharmagarg/qymatix
0dc240970359429ae5105db79f9aebf1a99ba6fd
[ "Apache-2.0" ]
null
null
null
api/insights/insights/infrastructure/mysql/orm/mapper_base.py
manisharmagarg/qymatix
0dc240970359429ae5105db79f9aebf1a99ba6fd
[ "Apache-2.0" ]
null
null
null
import os from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from .base import Base class MapperBase(): user = os.getenv("MYSQL_USER") key = os.getenv("MYSQL_KEY") host = os.getenv("MYSQL_HOST") port = os.getenv("MYSQL_PORT") def __init__(self, database): self.db = database if database == 'test': self.url = 'sqlite:///:memory:' else: self.url = \ 'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format( self.user, self.key, self.host, self.port, self.db, ) self.engine = create_engine( self.url, connect_args={'use_pure': True} ) self.session = sessionmaker(bind=self.engine) self.base = Base def get_base(self): return self.base def get_engine(self): return self.engine def get_session(self): return self.session()
23.568182
63
0.518804
import os from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from .base import Base class MapperBase(): user = os.getenv("MYSQL_USER") key = os.getenv("MYSQL_KEY") host = os.getenv("MYSQL_HOST") port = os.getenv("MYSQL_PORT") def __init__(self, database): self.db = database if database == 'test': self.url = 'sqlite:///:memory:' else: self.url = \ 'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format( self.user, self.key, self.host, self.port, self.db, ) self.engine = create_engine( self.url, connect_args={'use_pure': True} ) self.session = sessionmaker(bind=self.engine) self.base = Base def get_base(self): return self.base def get_engine(self): return self.engine def get_session(self): return self.session()
true
true
f7031a7684f35f0454e3dca92a763da2ae157341
1,774
py
Python
homophily_structural_balance/plotting/plot_positive_edge_density.py
robertjankowski/reproducing-dl-papers
01ad85eac333b87358b3d2e2276292333cacf0e0
[ "Apache-2.0" ]
2
2021-06-06T09:45:33.000Z
2021-06-07T20:00:33.000Z
homophily_structural_balance/plotting/plot_positive_edge_density.py
robertjankowski/reproducing-dl-papers
01ad85eac333b87358b3d2e2276292333cacf0e0
[ "Apache-2.0" ]
null
null
null
homophily_structural_balance/plotting/plot_positive_edge_density.py
robertjankowski/reproducing-dl-papers
01ad85eac333b87358b3d2e2276292333cacf0e0
[ "Apache-2.0" ]
2
2021-06-03T01:40:28.000Z
2021-06-07T06:56:18.000Z
import numpy as np import matplotlib.pyplot as plt import argparse def extract_name(word: str): return word.split('=')[-1] def extract_info(filename: str): filename_splitted = filename.split('_') assert len(filename_splitted) == 7 p = float(extract_name(filename_splitted[1])) iterations = int(extract_name(filename_splitted[2])) size = int(extract_name(filename_splitted[3])) G = int(extract_name(filename_splitted[4])) return p, iterations, size, G def load_metrics(filename: str) -> list: with open(filename, 'r') as f: return [float(line.strip()) for line in f] def plot_metrics(filename: str, metrics: list, output_path: str = None): p, iterations, size, G = extract_info(filename) x = np.linspace(0, iterations, len(metrics)) plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.figure(figsize=(8, 5)) plt.grid(True, alpha=0.3) plt.plot(x, metrics, label=f'p = {p}, N = {size}, G = {G}') plt.ylabel(r'$\rho$', fontsize=14) plt.xlabel('$t$', fontsize=14) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.legend(fontsize=13) if output_path is not None: plt.savefig(output_path, bbox_inches='tight') else: plt.show() def main(): parser = argparse.ArgumentParser(description='Plot positive edge density (rho)') parser.add_argument('--metrics-file', type=str, required=True, help='Path to calculated positive edge density') parser.add_argument('--output-figure', type=str, required=False, default=None, help='Where to save output figure') args = parser.parse_args() metrics = load_metrics(args.metrics_file) plot_metrics(args.metrics_file, metrics, args.output_figure) if __name__ == '__main__': main()
31.678571
118
0.678692
import numpy as np import matplotlib.pyplot as plt import argparse def extract_name(word: str): return word.split('=')[-1] def extract_info(filename: str): filename_splitted = filename.split('_') assert len(filename_splitted) == 7 p = float(extract_name(filename_splitted[1])) iterations = int(extract_name(filename_splitted[2])) size = int(extract_name(filename_splitted[3])) G = int(extract_name(filename_splitted[4])) return p, iterations, size, G def load_metrics(filename: str) -> list: with open(filename, 'r') as f: return [float(line.strip()) for line in f] def plot_metrics(filename: str, metrics: list, output_path: str = None): p, iterations, size, G = extract_info(filename) x = np.linspace(0, iterations, len(metrics)) plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.figure(figsize=(8, 5)) plt.grid(True, alpha=0.3) plt.plot(x, metrics, label=f'p = {p}, N = {size}, G = {G}') plt.ylabel(r'$\rho$', fontsize=14) plt.xlabel('$t$', fontsize=14) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.legend(fontsize=13) if output_path is not None: plt.savefig(output_path, bbox_inches='tight') else: plt.show() def main(): parser = argparse.ArgumentParser(description='Plot positive edge density (rho)') parser.add_argument('--metrics-file', type=str, required=True, help='Path to calculated positive edge density') parser.add_argument('--output-figure', type=str, required=False, default=None, help='Where to save output figure') args = parser.parse_args() metrics = load_metrics(args.metrics_file) plot_metrics(args.metrics_file, metrics, args.output_figure) if __name__ == '__main__': main()
true
true
f7031adca49813ee2cc217720e95574a56a10092
12,014
py
Python
src/pretix/control/forms/organizer.py
inwwin/pretix
c3bfb57d3e46577f54c1a32c7a8b61df570c272e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/pretix/control/forms/organizer.py
inwwin/pretix
c3bfb57d3e46577f54c1a32c7a8b61df570c272e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/pretix/control/forms/organizer.py
inwwin/pretix
c3bfb57d3e46577f54c1a32c7a8b61df570c272e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
from decimal import Decimal from urllib.parse import urlparse from django import forms from django.conf import settings from django.core.exceptions import ValidationError from django.db.models import Q from django.utils.safestring import mark_safe from django.utils.translation import gettext_lazy as _, pgettext_lazy from django_scopes.forms import SafeModelMultipleChoiceField from pretix.api.models import WebHook from pretix.api.webhooks import get_all_webhook_events from pretix.base.forms import I18nModelForm, SettingsForm from pretix.base.forms.widgets import SplitDateTimePickerWidget from pretix.base.models import ( Device, EventMetaProperty, Gate, GiftCard, Organizer, Team, ) from pretix.control.forms import ExtFileField, SplitDateTimeField from pretix.control.forms.event import SafeEventMultipleChoiceField from pretix.multidomain.models import KnownDomain class OrganizerForm(I18nModelForm): error_messages = { 'duplicate_slug': _("This slug is already in use. Please choose a different one."), } class Meta: model = Organizer fields = ['name', 'slug'] def clean_slug(self): slug = self.cleaned_data['slug'] if Organizer.objects.filter(slug__iexact=slug).exists(): raise forms.ValidationError( self.error_messages['duplicate_slug'], code='duplicate_slug', ) return slug class OrganizerDeleteForm(forms.Form): error_messages = { 'slug_wrong': _("The slug you entered was not correct."), } slug = forms.CharField( max_length=255, label=_("Event slug"), ) def __init__(self, *args, **kwargs): self.organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) def clean_slug(self): slug = self.cleaned_data.get('slug') if slug != self.organizer.slug: raise forms.ValidationError( self.error_messages['slug_wrong'], code='slug_wrong', ) return slug class OrganizerUpdateForm(OrganizerForm): def __init__(self, *args, **kwargs): self.domain = kwargs.pop('domain', False) self.change_slug = kwargs.pop('change_slug', False) kwargs.setdefault('initial', {}) self.instance = kwargs['instance'] if self.domain and self.instance: initial_domain = self.instance.domains.first() if initial_domain: kwargs['initial'].setdefault('domain', initial_domain.domainname) super().__init__(*args, **kwargs) if not self.change_slug: self.fields['slug'].widget.attrs['readonly'] = 'readonly' if self.domain: self.fields['domain'] = forms.CharField( max_length=255, label=_('Custom domain'), required=False, help_text=_('You need to configure the custom domain in the webserver beforehand.') ) def clean_domain(self): d = self.cleaned_data['domain'] if d: if d == urlparse(settings.SITE_URL).hostname: raise ValidationError( _('You cannot choose the base domain of this installation.') ) if KnownDomain.objects.filter(domainname=d).exclude(organizer=self.instance.pk, event__isnull=True).exists(): raise ValidationError( _('This domain is already in use for a different event or organizer.') ) return d def clean_slug(self): if self.change_slug: return self.cleaned_data['slug'] return self.instance.slug def save(self, commit=True): instance = super().save(commit) if self.domain: current_domain = instance.domains.first() if self.cleaned_data['domain']: if current_domain and current_domain.domainname != self.cleaned_data['domain']: current_domain.delete() KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain']) elif not current_domain: KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain']) elif current_domain: current_domain.delete() instance.cache.clear() for ev in instance.events.all(): ev.cache.clear() return instance class EventMetaPropertyForm(forms.ModelForm): class Meta: model = EventMetaProperty fields = ['name', 'default', 'required', 'protected', 'allowed_values'] widgets = { 'default': forms.TextInput() } class TeamForm(forms.ModelForm): def __init__(self, *args, **kwargs): organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) self.fields['limit_events'].queryset = organizer.events.all().order_by( '-has_subevents', '-date_from' ) class Meta: model = Team fields = ['name', 'all_events', 'limit_events', 'can_create_events', 'can_change_teams', 'can_change_organizer_settings', 'can_manage_gift_cards', 'can_change_event_settings', 'can_change_items', 'can_view_orders', 'can_change_orders', 'can_view_vouchers', 'can_change_vouchers'] widgets = { 'limit_events': forms.CheckboxSelectMultiple(attrs={ 'data-inverse-dependency': '#id_all_events', 'class': 'scrolling-multiple-choice scrolling-multiple-choice-large', }), } field_classes = { 'limit_events': SafeEventMultipleChoiceField } def clean(self): data = super().clean() if self.instance.pk and not data['can_change_teams']: if not self.instance.organizer.teams.exclude(pk=self.instance.pk).filter( can_change_teams=True, members__isnull=False ).exists(): raise ValidationError(_('The changes could not be saved because there would be no remaining team with ' 'the permission to change teams and permissions.')) return data class GateForm(forms.ModelForm): def __init__(self, *args, **kwargs): kwargs.pop('organizer') super().__init__(*args, **kwargs) class Meta: model = Gate fields = ['name', 'identifier'] class DeviceForm(forms.ModelForm): def __init__(self, *args, **kwargs): organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) self.fields['limit_events'].queryset = organizer.events.all().order_by( '-has_subevents', '-date_from' ) self.fields['gate'].queryset = organizer.gates.all() def clean(self): d = super().clean() if not d['all_events'] and not d['limit_events']: raise ValidationError(_('Your device will not have access to anything, please select some events.')) return d class Meta: model = Device fields = ['name', 'all_events', 'limit_events', 'security_profile', 'gate'] widgets = { 'limit_events': forms.CheckboxSelectMultiple(attrs={ 'data-inverse-dependency': '#id_all_events', 'class': 'scrolling-multiple-choice scrolling-multiple-choice-large', }), } field_classes = { 'limit_events': SafeEventMultipleChoiceField } class OrganizerSettingsForm(SettingsForm): auto_fields = [ 'contact_mail', 'imprint_url', 'organizer_info_text', 'event_list_type', 'event_list_availability', 'organizer_homepage_text', 'organizer_link_back', 'organizer_logo_image_large', 'giftcard_length', 'giftcard_expiry_years', 'locales', 'region', 'event_team_provisioning', 'primary_color', 'theme_color_success', 'theme_color_danger', 'theme_color_background', 'theme_round_borders', 'primary_font' ] organizer_logo_image = ExtFileField( label=_('Header image'), ext_whitelist=(".png", ".jpg", ".gif", ".jpeg"), max_size=10 * 1024 * 1024, required=False, help_text=_('If you provide a logo image, we will by default not show your organization name ' 'in the page header. By default, we show your logo with a size of up to 1140x120 pixels. You ' 'can increase the size with the setting below. We recommend not using small details on the picture ' 'as it will be resized on smaller screens.') ) favicon = ExtFileField( label=_('Favicon'), ext_whitelist=(".ico", ".png", ".jpg", ".gif", ".jpeg"), required=False, max_size=1 * 1024 * 1024, help_text=_('If you provide a favicon, we will show it instead of the default pretix icon. ' 'We recommend a size of at least 200x200px to accommodate most devices.') ) class WebHookForm(forms.ModelForm): events = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple, label=pgettext_lazy('webhooks', 'Event types') ) def __init__(self, *args, **kwargs): organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) self.fields['limit_events'].queryset = organizer.events.all() self.fields['events'].choices = [ ( a.action_type, mark_safe('{} – <code>{}</code>'.format(a.verbose_name, a.action_type)) ) for a in get_all_webhook_events().values() ] if self.instance: self.fields['events'].initial = list(self.instance.listeners.values_list('action_type', flat=True)) class Meta: model = WebHook fields = ['target_url', 'enabled', 'all_events', 'limit_events'] widgets = { 'limit_events': forms.CheckboxSelectMultiple(attrs={ 'data-inverse-dependency': '#id_all_events' }), } field_classes = { 'limit_events': SafeModelMultipleChoiceField } class GiftCardCreateForm(forms.ModelForm): value = forms.DecimalField( label=_('Gift card value'), min_value=Decimal('0.00') ) def __init__(self, *args, **kwargs): self.organizer = kwargs.pop('organizer') initial = kwargs.pop('initial', {}) initial['expires'] = self.organizer.default_gift_card_expiry kwargs['initial'] = initial super().__init__(*args, **kwargs) def clean_secret(self): s = self.cleaned_data['secret'] if GiftCard.objects.filter( secret__iexact=s ).filter( Q(issuer=self.organizer) | Q(issuer__gift_card_collector_acceptance__collector=self.organizer) ).exists(): raise ValidationError( _('A gift card with the same secret already exists in your or an affiliated organizer account.') ) return s class Meta: model = GiftCard fields = ['secret', 'currency', 'testmode', 'expires', 'conditions'] field_classes = { 'expires': SplitDateTimeField } widgets = { 'expires': SplitDateTimePickerWidget, 'conditions': forms.Textarea(attrs={"rows": 2}) } class GiftCardUpdateForm(forms.ModelForm): class Meta: model = GiftCard fields = ['expires', 'conditions'] field_classes = { 'expires': SplitDateTimeField } widgets = { 'expires': SplitDateTimePickerWidget, 'conditions': forms.Textarea(attrs={"rows": 2}) }
35.128655
120
0.601049
from decimal import Decimal from urllib.parse import urlparse from django import forms from django.conf import settings from django.core.exceptions import ValidationError from django.db.models import Q from django.utils.safestring import mark_safe from django.utils.translation import gettext_lazy as _, pgettext_lazy from django_scopes.forms import SafeModelMultipleChoiceField from pretix.api.models import WebHook from pretix.api.webhooks import get_all_webhook_events from pretix.base.forms import I18nModelForm, SettingsForm from pretix.base.forms.widgets import SplitDateTimePickerWidget from pretix.base.models import ( Device, EventMetaProperty, Gate, GiftCard, Organizer, Team, ) from pretix.control.forms import ExtFileField, SplitDateTimeField from pretix.control.forms.event import SafeEventMultipleChoiceField from pretix.multidomain.models import KnownDomain class OrganizerForm(I18nModelForm): error_messages = { 'duplicate_slug': _("This slug is already in use. Please choose a different one."), } class Meta: model = Organizer fields = ['name', 'slug'] def clean_slug(self): slug = self.cleaned_data['slug'] if Organizer.objects.filter(slug__iexact=slug).exists(): raise forms.ValidationError( self.error_messages['duplicate_slug'], code='duplicate_slug', ) return slug class OrganizerDeleteForm(forms.Form): error_messages = { 'slug_wrong': _("The slug you entered was not correct."), } slug = forms.CharField( max_length=255, label=_("Event slug"), ) def __init__(self, *args, **kwargs): self.organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) def clean_slug(self): slug = self.cleaned_data.get('slug') if slug != self.organizer.slug: raise forms.ValidationError( self.error_messages['slug_wrong'], code='slug_wrong', ) return slug class OrganizerUpdateForm(OrganizerForm): def __init__(self, *args, **kwargs): self.domain = kwargs.pop('domain', False) self.change_slug = kwargs.pop('change_slug', False) kwargs.setdefault('initial', {}) self.instance = kwargs['instance'] if self.domain and self.instance: initial_domain = self.instance.domains.first() if initial_domain: kwargs['initial'].setdefault('domain', initial_domain.domainname) super().__init__(*args, **kwargs) if not self.change_slug: self.fields['slug'].widget.attrs['readonly'] = 'readonly' if self.domain: self.fields['domain'] = forms.CharField( max_length=255, label=_('Custom domain'), required=False, help_text=_('You need to configure the custom domain in the webserver beforehand.') ) def clean_domain(self): d = self.cleaned_data['domain'] if d: if d == urlparse(settings.SITE_URL).hostname: raise ValidationError( _('You cannot choose the base domain of this installation.') ) if KnownDomain.objects.filter(domainname=d).exclude(organizer=self.instance.pk, event__isnull=True).exists(): raise ValidationError( _('This domain is already in use for a different event or organizer.') ) return d def clean_slug(self): if self.change_slug: return self.cleaned_data['slug'] return self.instance.slug def save(self, commit=True): instance = super().save(commit) if self.domain: current_domain = instance.domains.first() if self.cleaned_data['domain']: if current_domain and current_domain.domainname != self.cleaned_data['domain']: current_domain.delete() KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain']) elif not current_domain: KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain']) elif current_domain: current_domain.delete() instance.cache.clear() for ev in instance.events.all(): ev.cache.clear() return instance class EventMetaPropertyForm(forms.ModelForm): class Meta: model = EventMetaProperty fields = ['name', 'default', 'required', 'protected', 'allowed_values'] widgets = { 'default': forms.TextInput() } class TeamForm(forms.ModelForm): def __init__(self, *args, **kwargs): organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) self.fields['limit_events'].queryset = organizer.events.all().order_by( '-has_subevents', '-date_from' ) class Meta: model = Team fields = ['name', 'all_events', 'limit_events', 'can_create_events', 'can_change_teams', 'can_change_organizer_settings', 'can_manage_gift_cards', 'can_change_event_settings', 'can_change_items', 'can_view_orders', 'can_change_orders', 'can_view_vouchers', 'can_change_vouchers'] widgets = { 'limit_events': forms.CheckboxSelectMultiple(attrs={ 'data-inverse-dependency': '#id_all_events', 'class': 'scrolling-multiple-choice scrolling-multiple-choice-large', }), } field_classes = { 'limit_events': SafeEventMultipleChoiceField } def clean(self): data = super().clean() if self.instance.pk and not data['can_change_teams']: if not self.instance.organizer.teams.exclude(pk=self.instance.pk).filter( can_change_teams=True, members__isnull=False ).exists(): raise ValidationError(_('The changes could not be saved because there would be no remaining team with ' 'the permission to change teams and permissions.')) return data class GateForm(forms.ModelForm): def __init__(self, *args, **kwargs): kwargs.pop('organizer') super().__init__(*args, **kwargs) class Meta: model = Gate fields = ['name', 'identifier'] class DeviceForm(forms.ModelForm): def __init__(self, *args, **kwargs): organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) self.fields['limit_events'].queryset = organizer.events.all().order_by( '-has_subevents', '-date_from' ) self.fields['gate'].queryset = organizer.gates.all() def clean(self): d = super().clean() if not d['all_events'] and not d['limit_events']: raise ValidationError(_('Your device will not have access to anything, please select some events.')) return d class Meta: model = Device fields = ['name', 'all_events', 'limit_events', 'security_profile', 'gate'] widgets = { 'limit_events': forms.CheckboxSelectMultiple(attrs={ 'data-inverse-dependency': '#id_all_events', 'class': 'scrolling-multiple-choice scrolling-multiple-choice-large', }), } field_classes = { 'limit_events': SafeEventMultipleChoiceField } class OrganizerSettingsForm(SettingsForm): auto_fields = [ 'contact_mail', 'imprint_url', 'organizer_info_text', 'event_list_type', 'event_list_availability', 'organizer_homepage_text', 'organizer_link_back', 'organizer_logo_image_large', 'giftcard_length', 'giftcard_expiry_years', 'locales', 'region', 'event_team_provisioning', 'primary_color', 'theme_color_success', 'theme_color_danger', 'theme_color_background', 'theme_round_borders', 'primary_font' ] organizer_logo_image = ExtFileField( label=_('Header image'), ext_whitelist=(".png", ".jpg", ".gif", ".jpeg"), max_size=10 * 1024 * 1024, required=False, help_text=_('If you provide a logo image, we will by default not show your organization name ' 'in the page header. By default, we show your logo with a size of up to 1140x120 pixels. You ' 'can increase the size with the setting below. We recommend not using small details on the picture ' 'as it will be resized on smaller screens.') ) favicon = ExtFileField( label=_('Favicon'), ext_whitelist=(".ico", ".png", ".jpg", ".gif", ".jpeg"), required=False, max_size=1 * 1024 * 1024, help_text=_('If you provide a favicon, we will show it instead of the default pretix icon. ' 'We recommend a size of at least 200x200px to accommodate most devices.') ) class WebHookForm(forms.ModelForm): events = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple, label=pgettext_lazy('webhooks', 'Event types') ) def __init__(self, *args, **kwargs): organizer = kwargs.pop('organizer') super().__init__(*args, **kwargs) self.fields['limit_events'].queryset = organizer.events.all() self.fields['events'].choices = [ ( a.action_type, mark_safe('{} – <code>{}</code>'.format(a.verbose_name, a.action_type)) ) for a in get_all_webhook_events().values() ] if self.instance: self.fields['events'].initial = list(self.instance.listeners.values_list('action_type', flat=True)) class Meta: model = WebHook fields = ['target_url', 'enabled', 'all_events', 'limit_events'] widgets = { 'limit_events': forms.CheckboxSelectMultiple(attrs={ 'data-inverse-dependency': '#id_all_events' }), } field_classes = { 'limit_events': SafeModelMultipleChoiceField } class GiftCardCreateForm(forms.ModelForm): value = forms.DecimalField( label=_('Gift card value'), min_value=Decimal('0.00') ) def __init__(self, *args, **kwargs): self.organizer = kwargs.pop('organizer') initial = kwargs.pop('initial', {}) initial['expires'] = self.organizer.default_gift_card_expiry kwargs['initial'] = initial super().__init__(*args, **kwargs) def clean_secret(self): s = self.cleaned_data['secret'] if GiftCard.objects.filter( secret__iexact=s ).filter( Q(issuer=self.organizer) | Q(issuer__gift_card_collector_acceptance__collector=self.organizer) ).exists(): raise ValidationError( _('A gift card with the same secret already exists in your or an affiliated organizer account.') ) return s class Meta: model = GiftCard fields = ['secret', 'currency', 'testmode', 'expires', 'conditions'] field_classes = { 'expires': SplitDateTimeField } widgets = { 'expires': SplitDateTimePickerWidget, 'conditions': forms.Textarea(attrs={"rows": 2}) } class GiftCardUpdateForm(forms.ModelForm): class Meta: model = GiftCard fields = ['expires', 'conditions'] field_classes = { 'expires': SplitDateTimeField } widgets = { 'expires': SplitDateTimePickerWidget, 'conditions': forms.Textarea(attrs={"rows": 2}) }
true
true
f7031ae29ebda06156f8cc75f97ef136dd445efa
2,109
py
Python
setup.py
alphamodel/yagmail
e9efc4a68eee00f76929e72c55fb00dda0c1a57f
[ "MIT" ]
null
null
null
setup.py
alphamodel/yagmail
e9efc4a68eee00f76929e72c55fb00dda0c1a57f
[ "MIT" ]
null
null
null
setup.py
alphamodel/yagmail
e9efc4a68eee00f76929e72c55fb00dda0c1a57f
[ "MIT" ]
null
null
null
from setuptools import setup from setuptools import find_packages with open('README.rst') as f: LONG_DESCRIPTION = f.read() MAJOR_VERSION = '0' MINOR_VERSION = '11' MICRO_VERSION = '214' VERSION = "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, MICRO_VERSION) setup(name='yagmail', version=VERSION, description='Yet Another GMAIL client', long_description=LONG_DESCRIPTION, url='https://github.com/kootenpv/yagmail', author='Pascal van Kooten', author_email='kootenpv@gmail.com', license='MIT', extras_require={ "all": ["keyring"] }, keywords='email mime automatic html attachment', entry_points={ 'console_scripts': ['yagmail = yagmail.__main__:main'] }, classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Customer Service', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Unix', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Communications :: Email', 'Topic :: Communications :: Email :: Email Clients (MUA)', 'Topic :: Software Development', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Debuggers', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Software Distribution', 'Topic :: System :: Systems Administration', 'Topic :: Utilities' ], packages=find_packages(), zip_safe=False, platforms='any')
37.660714
73
0.593646
from setuptools import setup from setuptools import find_packages with open('README.rst') as f: LONG_DESCRIPTION = f.read() MAJOR_VERSION = '0' MINOR_VERSION = '11' MICRO_VERSION = '214' VERSION = "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, MICRO_VERSION) setup(name='yagmail', version=VERSION, description='Yet Another GMAIL client', long_description=LONG_DESCRIPTION, url='https://github.com/kootenpv/yagmail', author='Pascal van Kooten', author_email='kootenpv@gmail.com', license='MIT', extras_require={ "all": ["keyring"] }, keywords='email mime automatic html attachment', entry_points={ 'console_scripts': ['yagmail = yagmail.__main__:main'] }, classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Customer Service', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Unix', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Communications :: Email', 'Topic :: Communications :: Email :: Email Clients (MUA)', 'Topic :: Software Development', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Debuggers', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Software Distribution', 'Topic :: System :: Systems Administration', 'Topic :: Utilities' ], packages=find_packages(), zip_safe=False, platforms='any')
true
true
f7031c33be2f4d13a9a63447e0ba6d1507b76692
1,334
py
Python
examples/csrf-protection/app.py
TheNcar/starlette-wtf
f96ab6f33a7ffcb576663f996dbb401ab44f3d78
[ "MIT" ]
48
2020-02-03T14:16:34.000Z
2022-02-24T08:27:06.000Z
examples/csrf-protection/app.py
TheNcar/starlette-wtf
f96ab6f33a7ffcb576663f996dbb401ab44f3d78
[ "MIT" ]
10
2020-04-20T21:20:02.000Z
2022-01-29T16:58:44.000Z
examples/csrf-protection/app.py
TheNcar/starlette-wtf
f96ab6f33a7ffcb576663f996dbb401ab44f3d78
[ "MIT" ]
5
2020-04-23T12:39:32.000Z
2022-03-20T08:14:19.000Z
from jinja2 import Template from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.sessions import SessionMiddleware from starlette.responses import PlainTextResponse, HTMLResponse from starlette_wtf import StarletteForm, CSRFProtectMiddleware, csrf_protect from wtforms import StringField from wtforms.validators import DataRequired class MyForm(StarletteForm): name = StringField('name', validators=[DataRequired()]) template = Template(''' <html> <body> <form method="post" novalidate> {{ form.csrf_token }} <div> {{ form.name(placeholder='Name') }} {% if form.name.errors -%} <span>{{ form.name.errors[0] }}</span> {%- endif %} </div> <button type="submit">Submit</button> </form> </body> </html> ''') app = Starlette(middleware=[ Middleware(SessionMiddleware, secret_key='***REPLACEME1***'), Middleware(CSRFProtectMiddleware, csrf_secret='***REPLACEME2***') ]) @app.route('/', methods=['GET', 'POST']) @csrf_protect async def index(request): """GET|POST /: form handler """ form = await MyForm.from_formdata(request) if form.validate_on_submit(): return PlainTextResponse('SUCCESS') html = template.render(form=form) return HTMLResponse(html)
26.156863
76
0.689655
from jinja2 import Template from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.sessions import SessionMiddleware from starlette.responses import PlainTextResponse, HTMLResponse from starlette_wtf import StarletteForm, CSRFProtectMiddleware, csrf_protect from wtforms import StringField from wtforms.validators import DataRequired class MyForm(StarletteForm): name = StringField('name', validators=[DataRequired()]) template = Template(''' <html> <body> <form method="post" novalidate> {{ form.csrf_token }} <div> {{ form.name(placeholder='Name') }} {% if form.name.errors -%} <span>{{ form.name.errors[0] }}</span> {%- endif %} </div> <button type="submit">Submit</button> </form> </body> </html> ''') app = Starlette(middleware=[ Middleware(SessionMiddleware, secret_key='***REPLACEME1***'), Middleware(CSRFProtectMiddleware, csrf_secret='***REPLACEME2***') ]) @app.route('/', methods=['GET', 'POST']) @csrf_protect async def index(request): form = await MyForm.from_formdata(request) if form.validate_on_submit(): return PlainTextResponse('SUCCESS') html = template.render(form=form) return HTMLResponse(html)
true
true
f7031cafd7d51d240c36a866bfbb5dc4718e84d1
1,341
py
Python
youtube_contest.py
all0ws/cryze-peple
efd2bd2bcfc9c22bac78234e9dc191a17ca9e19d
[ "CC0-1.0" ]
null
null
null
youtube_contest.py
all0ws/cryze-peple
efd2bd2bcfc9c22bac78234e9dc191a17ca9e19d
[ "CC0-1.0" ]
null
null
null
youtube_contest.py
all0ws/cryze-peple
efd2bd2bcfc9c22bac78234e9dc191a17ca9e19d
[ "CC0-1.0" ]
null
null
null
#подключение библиотек from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel,QVBoxLayout,QHBoxLayout, QMessageBox, QRadioButton #создание приложения и главного окна app=QApplication([]) main_win =QWidget() main_win.setWindowTitle('Конкурс от Crazy People') question =QLabel("В каком году канал получил золотую кнопку от YouTube?") btn_answer1 =QRadioButton('2005') btn_answer2 =QRadioButton('2010') btn_answer3 =QRadioButton('2015') btn_answer4 =QRadioButton('2020') layout_main=QVBoxLayout() h1=QHBoxLayout() h2=QHBoxLayout() h3=QHBoxLayout() h1.addWidget(question,alignment =Qt.AlignCenter) h2.addWidget(btn_answer1,alignment =Qt.AlignCenter) h2.addWidget(btn_answer2,alignment =Qt.AlignCenter) h3.addWidget(btn_answer3,alignment =Qt.AlignCenter) h3.addWidget(btn_answer4,alignment =Qt.AlignCenter) layout_main.addLayout(h1) layout_main.addLayout(h2) layout_main.addLayout(h3) main_win.setLayout(layout_main) def win (): win =QMessageBox() win.setText('Верно!') win.exec_() def lose(): lose =QMessageBox() lose.setText('«Нет, в 2015 году. Вы выиграли фирменный плакат') lose.exec_() btn_answer1.clicked.connect(lose) btn_answer2.clicked.connect(lose) btn_answer3.clicked.connect(win) btn_answer4.clicked.connect(lose) main_win.show() app.exec_()
26.82
121
0.780015
from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel,QVBoxLayout,QHBoxLayout, QMessageBox, QRadioButton app=QApplication([]) main_win =QWidget() main_win.setWindowTitle('Конкурс от Crazy People') question =QLabel("В каком году канал получил золотую кнопку от YouTube?") btn_answer1 =QRadioButton('2005') btn_answer2 =QRadioButton('2010') btn_answer3 =QRadioButton('2015') btn_answer4 =QRadioButton('2020') layout_main=QVBoxLayout() h1=QHBoxLayout() h2=QHBoxLayout() h3=QHBoxLayout() h1.addWidget(question,alignment =Qt.AlignCenter) h2.addWidget(btn_answer1,alignment =Qt.AlignCenter) h2.addWidget(btn_answer2,alignment =Qt.AlignCenter) h3.addWidget(btn_answer3,alignment =Qt.AlignCenter) h3.addWidget(btn_answer4,alignment =Qt.AlignCenter) layout_main.addLayout(h1) layout_main.addLayout(h2) layout_main.addLayout(h3) main_win.setLayout(layout_main) def win (): win =QMessageBox() win.setText('Верно!') win.exec_() def lose(): lose =QMessageBox() lose.setText('«Нет, в 2015 году. Вы выиграли фирменный плакат') lose.exec_() btn_answer1.clicked.connect(lose) btn_answer2.clicked.connect(lose) btn_answer3.clicked.connect(win) btn_answer4.clicked.connect(lose) main_win.show() app.exec_()
true
true
f7031d0fdca75854293606c9e859fc97fd60f20a
2,764
py
Python
Autoplay/action_scripts/collection_scripts/ouch_script.py
Randy-Hodges/BTD6-Autoplay
6d2ac9685b5f55848decc285918f8ede465c3a63
[ "MIT" ]
null
null
null
Autoplay/action_scripts/collection_scripts/ouch_script.py
Randy-Hodges/BTD6-Autoplay
6d2ac9685b5f55848decc285918f8ede465c3a63
[ "MIT" ]
null
null
null
Autoplay/action_scripts/collection_scripts/ouch_script.py
Randy-Hodges/BTD6-Autoplay
6d2ac9685b5f55848decc285918f8ede465c3a63
[ "MIT" ]
null
null
null
from action_class import Action place = 'place' upgrade = 'upgrade' target = 'target' top = 'upgrade 1' middle = 'upgrade 2' bottom = 'upgrade 3' ouch_script = [ Action(place, name='sub1', action='sub', position=(708, 540)), # Sub Action(place, name='sub2', action='sub', position=(984, 545)), # Sub2 Action('start', action='start', cost=0), Action(place, name='dart1', action='dart', position=(303, 671)), # Dart Action(place, name='Psi', action='Hero', position=(546, 309)), # Psi Action(target, name = 'Psi', action='Strong'), # Psi Strong Action(upgrade, name='sub1', action=bottom), # 001 Action(upgrade, name='sub2', action=bottom), # 001 Action(upgrade, name='sub1', action=middle), # 011 Action(upgrade, name='sub2', action=top), # 101 Action(upgrade, name='sub1', action=middle), # 021 Action(upgrade, name='sub2', action=top), # 201 Action(upgrade, name='sub1', action=bottom), # 022 Action(upgrade, name='sub2', action=bottom), # 202 Action(place, name='alch1', action='alch', position=(1009, 411)), # Alchemist Action(target, name = 'alch1', action='Strong'), # Strong Action(upgrade, name='alch1', action=top), # 100 Action(upgrade, name='alch1', action=top), # 200 Action(upgrade, name='sub2', action=bottom), # 203 Sub2 Action(place, name='ace1', action='ace', position=(845, 310)), # Ace Action(upgrade, name='ace1', action= bottom), # 001 Action(upgrade, name='ace1', action=bottom), # 002 Action(upgrade, name='ace1', action=bottom), # 003 Action(place, name='village1', action='Village', position=(990, 295)), # Village Action(upgrade, name='village1', action= middle), # 010 Action(upgrade, name='village1', action= middle), # 020 Action(upgrade, name='ace1', action=top), # 103 Ace Action(upgrade, name='ace1', action=top), # 203 Action(upgrade, name='sub2', action=bottom), # 204 Sub2 Action(upgrade, name='sub1', action=middle), # 023 Sub2 Action(upgrade, name='alch1', action=top), # 300 Alch Action(upgrade, name='alch1', action=top), # 400 Action(upgrade, name='alch1', action=bottom), # 401 Action(upgrade, name='ace1', action=bottom), # 204 Ace Action(place, name='sniper1', action='sniper', position=(85, 676)), # Sniper Action(upgrade, name='sniper1', action= top), # 100 Action(target, name = 'sniper1', action='Strong'), Action(upgrade, name='sniper1', action=top), # 200 Action(upgrade, name='sniper1', action=top), # 300 Action(upgrade, name='sniper1', action=top), # 400 Action(upgrade, name='sniper1', action=bottom), # 401 Action(upgrade, name='sniper1', action=bottom), # 402 Action('finish', action='finish', cost=0) ]
37.351351
84
0.641823
from action_class import Action place = 'place' upgrade = 'upgrade' target = 'target' top = 'upgrade 1' middle = 'upgrade 2' bottom = 'upgrade 3' ouch_script = [ Action(place, name='sub1', action='sub', position=(708, 540)), Action(place, name='sub2', action='sub', position=(984, 545)), Action('start', action='start', cost=0), Action(place, name='dart1', action='dart', position=(303, 671)), Action(place, name='Psi', action='Hero', position=(546, 309)), Action(target, name = 'Psi', action='Strong'), Action(upgrade, name='sub1', action=bottom), Action(upgrade, name='sub2', action=bottom), Action(upgrade, name='sub1', action=middle), Action(upgrade, name='sub2', action=top), Action(upgrade, name='sub1', action=middle), Action(upgrade, name='sub2', action=top), Action(upgrade, name='sub1', action=bottom), Action(upgrade, name='sub2', action=bottom), Action(place, name='alch1', action='alch', position=(1009, 411)), Action(target, name = 'alch1', action='Strong'), Action(upgrade, name='alch1', action=top), Action(upgrade, name='alch1', action=top), Action(upgrade, name='sub2', action=bottom), Action(place, name='ace1', action='ace', position=(845, 310)), Action(upgrade, name='ace1', action= bottom), Action(upgrade, name='ace1', action=bottom), Action(upgrade, name='ace1', action=bottom), Action(place, name='village1', action='Village', position=(990, 295)), Action(upgrade, name='village1', action= middle), Action(upgrade, name='village1', action= middle), Action(upgrade, name='ace1', action=top), Action(upgrade, name='ace1', action=top), Action(upgrade, name='sub2', action=bottom), Action(upgrade, name='sub1', action=middle), Action(upgrade, name='alch1', action=top), Action(upgrade, name='alch1', action=top), Action(upgrade, name='alch1', action=bottom), Action(upgrade, name='ace1', action=bottom), Action(place, name='sniper1', action='sniper', position=(85, 676)), Action(upgrade, name='sniper1', action= top), Action(target, name = 'sniper1', action='Strong'), Action(upgrade, name='sniper1', action=top), Action(upgrade, name='sniper1', action=top), Action(upgrade, name='sniper1', action=top), Action(upgrade, name='sniper1', action=bottom), Action(upgrade, name='sniper1', action=bottom), Action('finish', action='finish', cost=0) ]
true
true
f7031df14237fdbfb39aadf27251e92115266ab0
737
py
Python
Leccion 8 Listas y diccionarios anidados/listAndDicts.py
Meluiscruz/Notas-de-Curso-Intermedio-de-Python
1def9edffd63f7283d133c393a80e61abb12b25d
[ "MIT" ]
1
2021-06-06T01:40:16.000Z
2021-06-06T01:40:16.000Z
Leccion 8 Listas y diccionarios anidados/listAndDicts.py
Meluiscruz/Notas-de-Curso-Intermedio-de-Python
1def9edffd63f7283d133c393a80e61abb12b25d
[ "MIT" ]
null
null
null
Leccion 8 Listas y diccionarios anidados/listAndDicts.py
Meluiscruz/Notas-de-Curso-Intermedio-de-Python
1def9edffd63f7283d133c393a80e61abb12b25d
[ "MIT" ]
null
null
null
def run(): my_list = [1, "Hello", True, 4.5] my_dict = {"firstname":"Facundo", "lastname":"Garcia"} superList = [ {"firstname":"Facundo", "lastname":"Garcia"}, {"firstname":"Miguel", "lastname":"Torres"}, {"firstname":"José", "lastname":"Rodelo"}, {"firstname":"Susana", "lastname":"Martinez"}, {"firstname":"Luis", "lastname":"Cruz"} ] superDict = { "naturalNums": [1,2,3,4,5], "integerNums": [-1,-2,0,1,2], "floatingNums": [1.1, 4.5, 6.43] } for k, v in superDict.items(): print(k, "-", v) for innerDict in superList: for k, v in innerDict.items(): print(k, "-", v) if __name__ == '__main__': run()
27.296296
58
0.506106
def run(): my_list = [1, "Hello", True, 4.5] my_dict = {"firstname":"Facundo", "lastname":"Garcia"} superList = [ {"firstname":"Facundo", "lastname":"Garcia"}, {"firstname":"Miguel", "lastname":"Torres"}, {"firstname":"José", "lastname":"Rodelo"}, {"firstname":"Susana", "lastname":"Martinez"}, {"firstname":"Luis", "lastname":"Cruz"} ] superDict = { "naturalNums": [1,2,3,4,5], "integerNums": [-1,-2,0,1,2], "floatingNums": [1.1, 4.5, 6.43] } for k, v in superDict.items(): print(k, "-", v) for innerDict in superList: for k, v in innerDict.items(): print(k, "-", v) if __name__ == '__main__': run()
true
true
f7031e134039488bf08fe651cc6d095e677304a2
5,279
py
Python
code/faciesplot.py
elephantscale/facies
ea78a4917ebb5dbbe478b9fc27200c67b6e5576f
[ "MIT" ]
null
null
null
code/faciesplot.py
elephantscale/facies
ea78a4917ebb5dbbe478b9fc27200c67b6e5576f
[ "MIT" ]
3
2019-12-05T20:26:52.000Z
2019-12-05T20:30:13.000Z
code/faciesplot.py
elephantscale/facies
ea78a4917ebb5dbbe478b9fc27200c67b6e5576f
[ "MIT" ]
3
2019-12-09T04:36:47.000Z
2019-12-09T15:26:04.000Z
import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors from mpl_toolkits.axes_grid1 import make_axes_locatable #Key: # 1=sandstone 2=c_siltstone 3=f_siltstone # 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite # 8=packstone 9=bafflestone facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D','PS', 'BS'] facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] #facies_color_map is a dictionary that maps facies labels #to their respective colors facies_color_map = {} for ind, label in enumerate(facies_labels): facies_color_map[label] = facies_colors[ind] def label_facies(row, labels): return labels[ row['Facies'] -1] def make_facies_log_plot(logs, facies_colors): #make sure logs are sorted by depth logs = logs.sort_values(by='Depth') cmap_facies = colors.ListedColormap( facies_colors[0:len(facies_colors)], 'indexed') ztop=logs.Depth.min(); zbot=logs.Depth.max() cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1) f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12)) ax[0].plot(logs.GR, logs.Depth, '-g') ax[1].plot(logs.ILD_log10, logs.Depth, '-') ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5') ax[3].plot(logs.PHIND, logs.Depth, '-', color='r') ax[4].plot(logs.PE, logs.Depth, '-', color='black') im=ax[5].imshow(cluster, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) divider = make_axes_locatable(ax[5]) cax = divider.append_axes("right", size="20%", pad=0.05) cbar=plt.colorbar(im, cax=cax) cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', 'SiSh', ' MS ', ' WS ', ' D ', ' PS ', ' BS '])) cbar.set_ticks(range(0,1)); cbar.set_ticklabels('') for i in range(len(ax)-1): ax[i].set_ylim(ztop,zbot) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[0].set_xlabel("GR") ax[0].set_xlim(logs.GR.min(),logs.GR.max()) ax[1].set_xlabel("ILD_log10") ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max()) ax[2].set_xlabel("DeltaPHI") ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max()) ax[3].set_xlabel("PHIND") ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max()) ax[4].set_xlabel("PE") ax[4].set_xlim(logs.PE.min(),logs.PE.max()) ax[5].set_xlabel('Facies') ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]) ax[4].set_yticklabels([]); ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94) def compare_facies_plot(logs, compadre, facies_colors): """plot the facies plot as a function of depth for both the prediction and the actual lithofacies labels. """ #make sure logs are sorted by depth logs = logs.sort_values(by='Depth') cmap_facies = colors.ListedColormap( facies_colors[0:len(facies_colors)], 'indexed') ztop=logs.Depth.min(); zbot=logs.Depth.max() cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1) cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1) f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12)) ax[0].plot(logs.GR, logs.Depth, '-g') ax[1].plot(logs.ILD_log10, logs.Depth, '-') ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5') ax[3].plot(logs.PHIND, logs.Depth, '-', color='r') ax[4].plot(logs.PE, logs.Depth, '-', color='black') im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) divider = make_axes_locatable(ax[6]) cax = divider.append_axes("right", size="20%", pad=0.05) cbar=plt.colorbar(im2, cax=cax) cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', 'SiSh', ' MS ', ' WS ', ' D ', ' PS ', ' BS '])) cbar.set_ticks(range(0,1)); cbar.set_ticklabels('') for i in range(len(ax)-2): ax[i].set_ylim(ztop,zbot) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[0].set_xlabel("GR") ax[0].set_xlim(logs.GR.min(),logs.GR.max()) ax[1].set_xlabel("ILD_log10") ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max()) ax[2].set_xlabel("DeltaPHI") ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max()) ax[3].set_xlabel("PHIND") ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max()) ax[4].set_xlabel("PE") ax[4].set_xlim(logs.PE.min(),logs.PE.max()) ax[5].set_xlabel('Facies') ax[6].set_xlabel(compadre) ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]) ax[4].set_yticklabels([]); ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) ax[6].set_xticklabels([]) f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)
37.978417
112
0.603523
import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors from mpl_toolkits.axes_grid1 import make_axes_locatable facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D','PS', 'BS'] facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] facies_color_map = {} for ind, label in enumerate(facies_labels): facies_color_map[label] = facies_colors[ind] def label_facies(row, labels): return labels[ row['Facies'] -1] def make_facies_log_plot(logs, facies_colors): logs = logs.sort_values(by='Depth') cmap_facies = colors.ListedColormap( facies_colors[0:len(facies_colors)], 'indexed') ztop=logs.Depth.min(); zbot=logs.Depth.max() cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1) f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12)) ax[0].plot(logs.GR, logs.Depth, '-g') ax[1].plot(logs.ILD_log10, logs.Depth, '-') ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5') ax[3].plot(logs.PHIND, logs.Depth, '-', color='r') ax[4].plot(logs.PE, logs.Depth, '-', color='black') im=ax[5].imshow(cluster, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) divider = make_axes_locatable(ax[5]) cax = divider.append_axes("right", size="20%", pad=0.05) cbar=plt.colorbar(im, cax=cax) cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', 'SiSh', ' MS ', ' WS ', ' D ', ' PS ', ' BS '])) cbar.set_ticks(range(0,1)); cbar.set_ticklabels('') for i in range(len(ax)-1): ax[i].set_ylim(ztop,zbot) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[0].set_xlabel("GR") ax[0].set_xlim(logs.GR.min(),logs.GR.max()) ax[1].set_xlabel("ILD_log10") ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max()) ax[2].set_xlabel("DeltaPHI") ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max()) ax[3].set_xlabel("PHIND") ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max()) ax[4].set_xlabel("PE") ax[4].set_xlim(logs.PE.min(),logs.PE.max()) ax[5].set_xlabel('Facies') ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]) ax[4].set_yticklabels([]); ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94) def compare_facies_plot(logs, compadre, facies_colors): logs = logs.sort_values(by='Depth') cmap_facies = colors.ListedColormap( facies_colors[0:len(facies_colors)], 'indexed') ztop=logs.Depth.min(); zbot=logs.Depth.max() cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1) cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1) f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12)) ax[0].plot(logs.GR, logs.Depth, '-g') ax[1].plot(logs.ILD_log10, logs.Depth, '-') ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5') ax[3].plot(logs.PHIND, logs.Depth, '-', color='r') ax[4].plot(logs.PE, logs.Depth, '-', color='black') im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) divider = make_axes_locatable(ax[6]) cax = divider.append_axes("right", size="20%", pad=0.05) cbar=plt.colorbar(im2, cax=cax) cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', 'SiSh', ' MS ', ' WS ', ' D ', ' PS ', ' BS '])) cbar.set_ticks(range(0,1)); cbar.set_ticklabels('') for i in range(len(ax)-2): ax[i].set_ylim(ztop,zbot) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[0].set_xlabel("GR") ax[0].set_xlim(logs.GR.min(),logs.GR.max()) ax[1].set_xlabel("ILD_log10") ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max()) ax[2].set_xlabel("DeltaPHI") ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max()) ax[3].set_xlabel("PHIND") ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max()) ax[4].set_xlabel("PE") ax[4].set_xlim(logs.PE.min(),logs.PE.max()) ax[5].set_xlabel('Facies') ax[6].set_xlabel(compadre) ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]) ax[4].set_yticklabels([]); ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) ax[6].set_xticklabels([]) f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)
true
true
f70320a4c6f7f1a47efc6a6cec4230e45d3d82d5
5,597
py
Python
imagepy/tools/Transform/scale_tol.py
siyemuxu888/imagepy
a933526483a15da282bacac54608d44d2173beb4
[ "BSD-4-Clause" ]
null
null
null
imagepy/tools/Transform/scale_tol.py
siyemuxu888/imagepy
a933526483a15da282bacac54608d44d2173beb4
[ "BSD-4-Clause" ]
null
null
null
imagepy/tools/Transform/scale_tol.py
siyemuxu888/imagepy
a933526483a15da282bacac54608d44d2173beb4
[ "BSD-4-Clause" ]
null
null
null
import wx import numpy as np from imagepy.core.engine import Tool, Filter import scipy.ndimage as nimg class ScaleTool(Tool): def __init__(self, plg): self.plg = plg self.para = plg.para self.moving = False def snap(self, x, y, lim): plg = self.plg if abs(x-plg.lt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'l' if abs(x-plg.rt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'r' if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.tp)<lim:return 't' if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.bm)<lim:return 'b' if abs(x-plg.lt)<lim and abs(y-plg.tp)<lim:return 'lt' if abs(x-plg.rt)<lim and abs(y-plg.bm)<lim:return 'rb' if abs(x-plg.rt)<lim and abs(y-plg.tp)<lim:return 'rt' if abs(x-plg.lt)<lim and abs(y-plg.bm)<lim:return 'lb' if (x-plg.lt)*(x-plg.rt)<0 and (y-plg.tp)*(y-plg.bm)<0: self.ox, self.oy = x, y return True return False def mouse_down(self, ips, x, y, btn, **key): lim = 5.0/key['canvas'].get_scale() self.moving = self.snap(x, y, lim) print(self.moving) def mouse_up(self, ips, x, y, btn, **key): if self.moving : self.plg.preview(ips, self.para) def mouse_move(self, ips, x, y, btn, **key): lim = 5.0/key['canvas'].get_scale() if btn==None: self.cursor = wx.CURSOR_CROSS if isinstance(self.snap(x, y, lim), str): self.cursor = wx.CURSOR_HAND elif self.moving==True: self.plg.lt+=x-self.ox self.plg.rt+=x-self.ox self.plg.bm+=y-self.oy self.plg.tp+=y-self.oy self.ox, self.oy = x, y self.plg.count() self.plg.dialog.reset() ips.update = True elif self.moving != False: print("scale_tol.ScaleTool.mouse_move") if 'l' in self.moving:self.plg.lt = x if 'r' in self.moving:self.plg.rt = x if 't' in self.moving:self.plg.tp = y if 'b' in self.moving:self.plg.bm = y self.plg.count() self.plg.dialog.reset() ips.update = True class Plugin(Filter): modal = False title = 'Scale' note = ['all', 'auto_msk', 'auto_snap', 'preview'] para = {'kx': 1, 'ky':1, 'ox':0, 'oy':0, 'img':True, 'msk':False} view = [(float, (-100,100), 3, 'KX', 'kx', ''), (float, (-100,100), 3, 'KY', 'ky', ''), (int, (-10000,10000), 0, 'OffX', 'ox', 'pix'), (int, (-10000,10000), 0, 'OffY', 'oy', 'pix'), (bool, 'scale image', 'img'), (bool, 'scale mask', 'msk')] def draw(self, dc, f, **key): body = [(self.lt,self.bm),(self.rt,self.bm), (self.rt,self.tp),(self.lt,self.tp),(self.lt,self.bm)] dc.SetPen(wx.Pen((0,255,0), width=1, style=wx.SOLID)) dc.DrawLines([f(*i) for i in body]) for i in body:dc.DrawCircle(f(*i),2) dc.DrawCircle(f(self.lt, (self.tp+self.bm)/2),2) dc.DrawCircle(f(self.rt, (self.tp+self.bm)/2),2) dc.DrawCircle(f((self.lt+self.rt)/2, self.tp),2) dc.DrawCircle(f((self.lt+self.rt)/2, self.bm),2) def load(self, ips): self.bufroi = ips.roi self.lt, self.tp, self.rt, self.bm = 0, 0, ips.size[1], ips.size[0] if ips.roi!=None: box = ips.roi.get_box() if box[0]!=box[2] and box[1]!=box[3]: self.lt, self.tp, self.rt, self.bm = box self.orio = ((self.lt+self.rt)/2,(self.tp+self.bm)/2) self.oriw, self.orih = self.rt - self.lt, self.tp - self.bm self.para['ox'] = (self.lt+self.rt)/2 self.para['oy'] = (self.tp+self.bm)/2 self.para['kx'] = self.para['ky'] = 1 ips.mark = self ips.update = True ips.tool = ScaleTool(self) return True def count(self, dir=True): if dir: self.para['ox'] = int((self.lt+self.rt)/2) self.para['oy'] = int((self.tp+self.bm)/2) self.para['kx'] = (self.rt-self.lt)*1.0/self.oriw self.para['ky'] = (self.tp-self.bm)*1.0/self.orih else: self.lt = self.para['ox']-self.oriw*self.para['kx']/2 self.rt = self.para['ox']+self.oriw*self.para['kx']/2 self.bm = self.para['oy']-self.orih*self.para['ky']/2 self.tp = self.para['oy']+self.orih*self.para['ky']/2 def ok(self, ips, para=None): Filter.ok(self, ips, para) ips.mark = None ips.tool = None def cancel(self, ips): Filter.cancel(self, ips) ips.roi = self.bufroi ips.mark = None ips.tool = None ips.update = 'pix' def run(self, ips, img, buf, para = None): if para == None: para = self.para self.count(False) trans = np.array([[1/self.para['ky'],0],[0,1/self.para['kx']]]) o = np.array([self.para['oy'], self.para['ox']]) offset = self.orio[::-1]-trans.dot(o) if self.para['img']: nimg.affine_transform(img, trans, output=buf, offset=offset) trans = np.array([[self.para['kx'],0],[0, self.para['ky']]]) offset = o[::-1]-trans.dot(self.orio) if self.para['msk'] and self.bufroi!=None:ips.roi = self.bufroi.affine(trans, offset) if self.para['img'] and not ips.get_msk('out') is None: buf[ips.get_msk('out')] = img[ips.get_msk('out')] ips.update = True
39.13986
93
0.516348
import wx import numpy as np from imagepy.core.engine import Tool, Filter import scipy.ndimage as nimg class ScaleTool(Tool): def __init__(self, plg): self.plg = plg self.para = plg.para self.moving = False def snap(self, x, y, lim): plg = self.plg if abs(x-plg.lt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'l' if abs(x-plg.rt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'r' if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.tp)<lim:return 't' if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.bm)<lim:return 'b' if abs(x-plg.lt)<lim and abs(y-plg.tp)<lim:return 'lt' if abs(x-plg.rt)<lim and abs(y-plg.bm)<lim:return 'rb' if abs(x-plg.rt)<lim and abs(y-plg.tp)<lim:return 'rt' if abs(x-plg.lt)<lim and abs(y-plg.bm)<lim:return 'lb' if (x-plg.lt)*(x-plg.rt)<0 and (y-plg.tp)*(y-plg.bm)<0: self.ox, self.oy = x, y return True return False def mouse_down(self, ips, x, y, btn, **key): lim = 5.0/key['canvas'].get_scale() self.moving = self.snap(x, y, lim) print(self.moving) def mouse_up(self, ips, x, y, btn, **key): if self.moving : self.plg.preview(ips, self.para) def mouse_move(self, ips, x, y, btn, **key): lim = 5.0/key['canvas'].get_scale() if btn==None: self.cursor = wx.CURSOR_CROSS if isinstance(self.snap(x, y, lim), str): self.cursor = wx.CURSOR_HAND elif self.moving==True: self.plg.lt+=x-self.ox self.plg.rt+=x-self.ox self.plg.bm+=y-self.oy self.plg.tp+=y-self.oy self.ox, self.oy = x, y self.plg.count() self.plg.dialog.reset() ips.update = True elif self.moving != False: print("scale_tol.ScaleTool.mouse_move") if 'l' in self.moving:self.plg.lt = x if 'r' in self.moving:self.plg.rt = x if 't' in self.moving:self.plg.tp = y if 'b' in self.moving:self.plg.bm = y self.plg.count() self.plg.dialog.reset() ips.update = True class Plugin(Filter): modal = False title = 'Scale' note = ['all', 'auto_msk', 'auto_snap', 'preview'] para = {'kx': 1, 'ky':1, 'ox':0, 'oy':0, 'img':True, 'msk':False} view = [(float, (-100,100), 3, 'KX', 'kx', ''), (float, (-100,100), 3, 'KY', 'ky', ''), (int, (-10000,10000), 0, 'OffX', 'ox', 'pix'), (int, (-10000,10000), 0, 'OffY', 'oy', 'pix'), (bool, 'scale image', 'img'), (bool, 'scale mask', 'msk')] def draw(self, dc, f, **key): body = [(self.lt,self.bm),(self.rt,self.bm), (self.rt,self.tp),(self.lt,self.tp),(self.lt,self.bm)] dc.SetPen(wx.Pen((0,255,0), width=1, style=wx.SOLID)) dc.DrawLines([f(*i) for i in body]) for i in body:dc.DrawCircle(f(*i),2) dc.DrawCircle(f(self.lt, (self.tp+self.bm)/2),2) dc.DrawCircle(f(self.rt, (self.tp+self.bm)/2),2) dc.DrawCircle(f((self.lt+self.rt)/2, self.tp),2) dc.DrawCircle(f((self.lt+self.rt)/2, self.bm),2) def load(self, ips): self.bufroi = ips.roi self.lt, self.tp, self.rt, self.bm = 0, 0, ips.size[1], ips.size[0] if ips.roi!=None: box = ips.roi.get_box() if box[0]!=box[2] and box[1]!=box[3]: self.lt, self.tp, self.rt, self.bm = box self.orio = ((self.lt+self.rt)/2,(self.tp+self.bm)/2) self.oriw, self.orih = self.rt - self.lt, self.tp - self.bm self.para['ox'] = (self.lt+self.rt)/2 self.para['oy'] = (self.tp+self.bm)/2 self.para['kx'] = self.para['ky'] = 1 ips.mark = self ips.update = True ips.tool = ScaleTool(self) return True def count(self, dir=True): if dir: self.para['ox'] = int((self.lt+self.rt)/2) self.para['oy'] = int((self.tp+self.bm)/2) self.para['kx'] = (self.rt-self.lt)*1.0/self.oriw self.para['ky'] = (self.tp-self.bm)*1.0/self.orih else: self.lt = self.para['ox']-self.oriw*self.para['kx']/2 self.rt = self.para['ox']+self.oriw*self.para['kx']/2 self.bm = self.para['oy']-self.orih*self.para['ky']/2 self.tp = self.para['oy']+self.orih*self.para['ky']/2 def ok(self, ips, para=None): Filter.ok(self, ips, para) ips.mark = None ips.tool = None def cancel(self, ips): Filter.cancel(self, ips) ips.roi = self.bufroi ips.mark = None ips.tool = None ips.update = 'pix' def run(self, ips, img, buf, para = None): if para == None: para = self.para self.count(False) trans = np.array([[1/self.para['ky'],0],[0,1/self.para['kx']]]) o = np.array([self.para['oy'], self.para['ox']]) offset = self.orio[::-1]-trans.dot(o) if self.para['img']: nimg.affine_transform(img, trans, output=buf, offset=offset) trans = np.array([[self.para['kx'],0],[0, self.para['ky']]]) offset = o[::-1]-trans.dot(self.orio) if self.para['msk'] and self.bufroi!=None:ips.roi = self.bufroi.affine(trans, offset) if self.para['img'] and not ips.get_msk('out') is None: buf[ips.get_msk('out')] = img[ips.get_msk('out')] ips.update = True
true
true
f7032136be940769cd29b8db7635b5bf9b375e79
10,735
py
Python
py_std_logic_1164/std_logic.py
krcb197/py_std_logic_1164
66ae8b644db4d96b222132e92768d52ee1b98f7b
[ "MIT" ]
null
null
null
py_std_logic_1164/std_logic.py
krcb197/py_std_logic_1164
66ae8b644db4d96b222132e92768d52ee1b98f7b
[ "MIT" ]
null
null
null
py_std_logic_1164/std_logic.py
krcb197/py_std_logic_1164
66ae8b644db4d96b222132e92768d52ee1b98f7b
[ "MIT" ]
null
null
null
class std_logic(): """ class to represent a digital bit allowing for the same 9 values of a bit supported by IEEE 1164. ====== =============== Value Interpreatation ------ --------------- U Unitialized X Unknown 0 Strong 0 1 Strong 1 Z High Impedance W Weak unknown logic L Weak logic 0 H Weak logic 1 - Don't care ====== =============== Refer to https://en.wikipedia.org/wiki/IEEE_1164 for more details """ def __init__(self,initialvalue='U'): """ :param initialvalue: value to be loaded into the bit :type initialvalue: int, bool, str """ self._value = 'U' self.set(value=initialvalue) def __str__(self): return self._value def __repr__(self): base_repr = super().__repr__() return base_repr[:-2] + ':%s>'%self._value def __eq__(self, other): if issubclass(other.__class__,std_logic): return self._value == other._value else: raise NotImplementedError def __and__(self,other): return_value = NotImplemented if issubclass(other.__class__,std_logic): """ truth table from std_logic_1164-body.vhdl ---------------------------------------------------- | U X 0 1 Z W L H - | | ---------------------------------------------------- ( 'U', 'U', '0', 'U', 'U', 'U', '0', 'U', 'U' ), -- | U | ( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | X | ( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | 0 | ( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | 1 | ( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | Z | ( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | W | ( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | L | ( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | H | ( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ) -- | - | """ if self == std_logic('U'): if other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) else: return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) else: return_value = std_logic('X') elif self == std_logic('0') or self == std_logic('L'): return_value = std_logic(0) elif self == std_logic('1') or self == std_logic('H'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') else: raise TypeError('can not perform operation on classes') return return_value def __xor__(self, other): """ perfroms a bitwise xor operation :param other: :return: self ^ other """ return_value = NotImplemented if issubclass(other.__class__,std_logic): """ truth table from std_logic_1164-body.vhdl ---------------------------------------------------- | U X 0 1 Z W L H - | | ---------------------------------------------------- ('U', 'U', 'U', 'U', 'U', 'U', 'U', 'U', 'U'), -- | U | ('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | X | ('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 | ('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | 1 | ('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | Z | ('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | W | ('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L | ('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | H | ('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X') -- | - | ); """ if self == std_logic('U'): return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): if other == std_logic('U'): return_value = std_logic('U') else: return_value = std_logic('X') elif self == std_logic('1') or self == std_logic('H'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(1) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(0) else: return_value = std_logic('X') elif self == std_logic('0') or self == std_logic('L'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') else: raise TypeError('can not perform operation on classes') return return_value def __or__(self,other): return_value = NotImplemented if issubclass(other.__class__,std_logic): """ truth table from std_logic_1164-body.vhdl ---------------------------------------------------- | U X 0 1 Z W L H - | | ---------------------------------------------------- ('U', 'U', 'U', '1', 'U', 'U', 'U', '1', 'U'), -- | U | ('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | X | ('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 | ('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | 1 | ('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | Z | ('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | W | ('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L | ('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | H | ('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X') -- | - | ) """ if self == std_logic('U'): if other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') elif self == std_logic('1') or self == std_logic('H'): return_value = std_logic(1) elif self == std_logic('0') or self == std_logic('L'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') else: raise TypeError('can not perform operation on classes') return return_value def __invert__(self): """ truth table from std_logic_1164-body.vhdl ------------------------------------------------- | U X 0 1 Z W L H - | ------------------------------------------------- ('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X') """ if self == std_logic('U'): return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): return_value = std_logic('X') elif self == std_logic('0') or self == std_logic('L'): return_value = std_logic(1) elif self == std_logic('1') or self == std_logic('H'): return_value = std_logic(0) return return_value def set(self,value): """ in place value set :param value: value to be loaded into the bit :type value: int, bool, str """ if isinstance(value,str): if len(value) != 1: raise ValueError('length is not 1') if ((value == 'U') or (value == 'X') or (value == '0') or (value == '1') or (value == 'Z') or (value == 'W') or (value == 'L') or (value == 'H') or (value == '-')): self._value = value else: raise ValueError('Unsupported value, only U,X,0,1,Z,W,L,H or - is permitted') elif isinstance(value,bool): if value is False: self._value = '0' elif value is True: self._value = '1' else: raise ValueError('Illegal boolean value') elif isinstance(value,int): if (value == 0) or (value == 1): self._value = str(value) assert (self._value == '1') or (self._value == '0') else: raise ValueError('Unsupported integer value, only 0 or 1 is permitted') else: raise ValueError('Unsupported type')
39.036364
118
0.382301
class std_logic(): def __init__(self,initialvalue='U'): self._value = 'U' self.set(value=initialvalue) def __str__(self): return self._value def __repr__(self): base_repr = super().__repr__() return base_repr[:-2] + ':%s>'%self._value def __eq__(self, other): if issubclass(other.__class__,std_logic): return self._value == other._value else: raise NotImplementedError def __and__(self,other): return_value = NotImplemented if issubclass(other.__class__,std_logic): if self == std_logic('U'): if other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) else: return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) else: return_value = std_logic('X') elif self == std_logic('0') or self == std_logic('L'): return_value = std_logic(0) elif self == std_logic('1') or self == std_logic('H'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') else: raise TypeError('can not perform operation on classes') return return_value def __xor__(self, other): return_value = NotImplemented if issubclass(other.__class__,std_logic): if self == std_logic('U'): return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): if other == std_logic('U'): return_value = std_logic('U') else: return_value = std_logic('X') elif self == std_logic('1') or self == std_logic('H'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(1) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(0) else: return_value = std_logic('X') elif self == std_logic('0') or self == std_logic('L'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') else: raise TypeError('can not perform operation on classes') return return_value def __or__(self,other): return_value = NotImplemented if issubclass(other.__class__,std_logic): if self == std_logic('U'): if other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') elif self == std_logic('1') or self == std_logic('H'): return_value = std_logic(1) elif self == std_logic('0') or self == std_logic('L'): if other == std_logic('U'): return_value = std_logic('U') elif other == std_logic('0') or other == std_logic('L'): return_value = std_logic(0) elif other == std_logic('1') or other == std_logic('H'): return_value = std_logic(1) else: return_value = std_logic('X') else: raise TypeError('can not perform operation on classes') return return_value def __invert__(self): if self == std_logic('U'): return_value = std_logic('U') elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'): return_value = std_logic('X') elif self == std_logic('0') or self == std_logic('L'): return_value = std_logic(1) elif self == std_logic('1') or self == std_logic('H'): return_value = std_logic(0) return return_value def set(self,value): if isinstance(value,str): if len(value) != 1: raise ValueError('length is not 1') if ((value == 'U') or (value == 'X') or (value == '0') or (value == '1') or (value == 'Z') or (value == 'W') or (value == 'L') or (value == 'H') or (value == '-')): self._value = value else: raise ValueError('Unsupported value, only U,X,0,1,Z,W,L,H or - is permitted') elif isinstance(value,bool): if value is False: self._value = '0' elif value is True: self._value = '1' else: raise ValueError('Illegal boolean value') elif isinstance(value,int): if (value == 0) or (value == 1): self._value = str(value) assert (self._value == '1') or (self._value == '0') else: raise ValueError('Unsupported integer value, only 0 or 1 is permitted') else: raise ValueError('Unsupported type')
true
true
f7032158e00462f1a6bfc96d5858e3f893dcb171
8,515
py
Python
seisidd/tomo_plans.py
KedoKudo/jupyter-ht-hedm
b447202fb9800e7b2916b38470db1b9a83357130
[ "MIT" ]
null
null
null
seisidd/tomo_plans.py
KedoKudo/jupyter-ht-hedm
b447202fb9800e7b2916b38470db1b9a83357130
[ "MIT" ]
null
null
null
seisidd/tomo_plans.py
KedoKudo/jupyter-ht-hedm
b447202fb9800e7b2916b38470db1b9a83357130
[ "MIT" ]
1
2019-07-16T22:13:02.000Z
2019-07-16T22:13:02.000Z
#!/usr/bin/env python """ Predefined bluesky scan plans """ import numpy as np import bluesky.plans as bp import bluesky.preprocessors as bpp import bluesky.plan_stubs as bps from .utility import load_config #@bpp.run_decorator() def collect_white_field(experiment, cfg_tomo, atfront=True): """ Collect white/flat field images by moving the sample out of the FOV """ # unpack devices det = experiment.det tomostage = experiment.tomostage # move sample out of the way _x = cfg_tomo['fronte_white_ksamX'] if atfront else cfg_tomo['back_white_ksamX'] _z = cfg_tomo['fronte_white_ksamZ'] if atfront else cfg_tomo['back_white_ksamZ'] yield from bps.mv(tomostage.ksamX, _x) yield from bps.mv(tomostage.ksamZ, _z) # setup detector yield from bps.mv(det.hdf1.nd_array_port, 'PROC1') yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') yield from bps.mv(det.proc1.enable, 1) yield from bps.mv(det.proc1.reset_filter, 1) yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames']) yield from bps.mv(det.cam.trigger_mode, "Internal") yield from bps.mv(det.cam.image_mode, "Multiple") yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white']) yield from bps.trigger_and_read([det]) # move sample back to FOV # NOTE: # not sure is this will work or not... yield from bps.mv(tomostage.ksamX, cfg_tomo['initial_ksamX']) yield from bps.mv(tomostage.ksamZ, cfg_tomo['initial_ksamZ']) #@bpp.run_decorator() def collect_dark_field(experiment, cfg_tomo): """ Collect dark field images by close the shutter """ det = experiment.det yield from bps.mv(det.hdf1.nd_array_port, 'PROC1') yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') yield from bps.mv(det.proc1.enable, 1) yield from bps.mv(det.proc1.reset_filter, 1) yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames']) yield from bps.mv(det.cam.trigger_mode, "Internal") yield from bps.mv(det.cam.image_mode, "Multiple") yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark']) yield from bps.trigger_and_read([det]) #@bpp.run_decorator() def step_scan(experiment, cfg_tomo): """ Collect projects with step motion """ # unpack devices det = experiment.det tomostage = experiment.tomostage yield from bps.mv(det.hdf1.nd_array_port, 'PROC1') yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') yield from bps.mv(det.proc1.enable, 1) yield from bps.mv(det.proc1.reset_filter, 1) yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames']) angs = np.arange( cfg_tomo['omega_start'], cfg_tomo['omega_end']+cfg_tomo['omega_step']/2, cfg_tomo['omega_step'], ) for ang in angs: yield from bps.checkpoint() yield from bps.mv(tomostage.preci, ang) yield from bps.trigger_and_read([det]) #@bpp.run_decorator() def fly_scan(experiment, cfg_tomo): """ Collect projections with fly motion """ det = experiment.det psofly = experiment.psofly yield from bps.mv(det.hdf1.nd_array_port, 'PG1') yield from bps.mv(det.tiff1.nd_array_port, 'PG1') # we are assuming that the global psofly is available yield from bps.mv( psofly.start, cfg_tomo['omega_start'], psofly.end, cfg_tomo['omega_end'], psofly.scan_delta, abs(cfg_tomo['omega_step']), psofly.slew_speed, cfg_tomo['slew_speed'], ) # taxi yield from bps.mv(psofly.taxi, "Taxi") yield from bps.mv( det.cam.num_images, cfg_tomo['n_projections'], det.cam.trigger_mode, "Overlapped", ) # start the fly scan yield from bps.trigger(det, group='fly') yield from bps.abs_set(psofly.fly, "Fly", group='fly') yield from bps.wait(group='fly') def tomo_scan(experiment, cfg): """ Tomography scan plan based on given configuration """ # unpack devices det = experiment.det tomostage = experiment.tomostage shutter = experiment.shutter shutter_suspender = experiment.suspend_shutter cfg = load_config(cfg) if type(cfg) != dict else cfg # update the cached motor position in the dict in case exp goes wrong _cahed_position = experiment.cache_motor_position() # step 0: preparation acquire_time = cfg['tomo']['acquire_time'] n_white = cfg['tomo']['n_white'] n_dark = cfg['tomo']['n_dark'] angs = np.arange( cfg['tomo']['omega_start'], cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2, cfg['tomo']['omega_step'], ) n_projections = len(angs) cfg['tomo']['n_projections'] = n_projections total_images = n_white + n_projections + n_white + n_dark fp = cfg['output']['filepath'] fn = cfg['output']['fileprefix'] # calculate slew speed for fly scan # https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py # TODO: considering blue pixels, use 2BM code as ref if cfg['tomo']['type'].lower() == 'fly': scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections slew_speed = (angs.max() - angs.min())/scan_time cfg['tomo']['slew_speed'] = slew_speed # need to make sure that the sample out position is the same for both front and back x0, z0 = tomostage.ksamX.position, tomostage.ksamZ.position dfx, dfz = cfg['tomo']['sample_out_position']['samX'], cfg['tomo']['sample_out_position']['samZ'] rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start']) rotm = np.array([[ np.cos(rotang), np.sin(rotang)], [-np.sin(rotang), np.cos(rotang)]]) dbxz = np.dot(rotm, np.array([dfx, dfz])) dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0 dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0 # now put the value to dict cfg['tomo']['initial_ksamX'] = x0 cfg['tomo']['initial_ksamZ'] = z0 cfg['tomo']['fronte_white_ksamX'] = x0 + dfx cfg['tomo']['fronte_white_ksamZ'] = z0 + dfz cfg['tomo']['back_white_ksamX'] = x0 + dbx cfg['tomo']['back_white_ksamZ'] = z0 + dbz @bpp.run_decorator() @bpp.stage_decorator([det]) def scan_closure(): # open shutter for beam yield from bps.mv(shutter, 'open') yield from bps.install_suspender(shutter_suspender) # config output for me in [det.tiff1, det.hdf1]: yield from bps.mv(me.file_path, fp) yield from bps.mv(me.file_name, fn) yield from bps.mv(me.file_write_mode, 2) yield from bps.mv(me.num_capture, total_images) yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()])) if cfg['output']['type'] in ['tif', 'tiff']: yield from bps.mv(det.tiff1.enable, 1) yield from bps.mv(det.tiff1.capture, 1) yield from bps.mv(det.hdf1.enable, 0) elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']: yield from bps.mv(det.tiff1.enable, 0) yield from bps.mv(det.hdf1.enable, 1) yield from bps.mv(det.hdf1.capture, 1) else: raise ValueError(f"Unsupported output type {cfg['output']['type']}") # collect front white field yield from bps.mv(det.cam.frame_type, 0) # for HDF5 dxchange data structure yield from collect_white_field(experiment, cfg['tomo'], atfront=True) # collect projections yield from bps.mv(det.cam.frame_type, 1) # for HDF5 dxchange data structure if cfg['tomo']['type'].lower() == 'step': yield from step_scan(experiment, cfg['tomo']) elif cfg['tomo']['type'].lower() == 'fly': yield from fly_scan(experiment, cfg['tomo']) else: raise ValueError(f"Unsupported scan type: {cfg['tomo']['type']}") # collect back white field yield from bps.mv(det.cam.frame_type, 2) # for HDF5 dxchange data structure yield from collect_white_field(experiment, cfg['tomo'], atfront=False) # collect back dark field yield from bps.mv(det.cam.frame_type, 3) # for HDF5 dxchange data structure yield from bps.remove_suspender(shutter_suspender) yield from bps.mv(shutter, "close") yield from collect_dark_field(experiment, cfg['tomo']) return (yield from scan_closure())
37.676991
107
0.640752
import numpy as np import bluesky.plans as bp import bluesky.preprocessors as bpp import bluesky.plan_stubs as bps from .utility import load_config def collect_white_field(experiment, cfg_tomo, atfront=True): det = experiment.det tomostage = experiment.tomostage _x = cfg_tomo['fronte_white_ksamX'] if atfront else cfg_tomo['back_white_ksamX'] _z = cfg_tomo['fronte_white_ksamZ'] if atfront else cfg_tomo['back_white_ksamZ'] yield from bps.mv(tomostage.ksamX, _x) yield from bps.mv(tomostage.ksamZ, _z) yield from bps.mv(det.hdf1.nd_array_port, 'PROC1') yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') yield from bps.mv(det.proc1.enable, 1) yield from bps.mv(det.proc1.reset_filter, 1) yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames']) yield from bps.mv(det.cam.trigger_mode, "Internal") yield from bps.mv(det.cam.image_mode, "Multiple") yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white']) yield from bps.trigger_and_read([det]) yield from bps.mv(tomostage.ksamX, cfg_tomo['initial_ksamX']) yield from bps.mv(tomostage.ksamZ, cfg_tomo['initial_ksamZ']) def collect_dark_field(experiment, cfg_tomo): det = experiment.det yield from bps.mv(det.hdf1.nd_array_port, 'PROC1') yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') yield from bps.mv(det.proc1.enable, 1) yield from bps.mv(det.proc1.reset_filter, 1) yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames']) yield from bps.mv(det.cam.trigger_mode, "Internal") yield from bps.mv(det.cam.image_mode, "Multiple") yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark']) yield from bps.trigger_and_read([det]) def step_scan(experiment, cfg_tomo): det = experiment.det tomostage = experiment.tomostage yield from bps.mv(det.hdf1.nd_array_port, 'PROC1') yield from bps.mv(det.tiff1.nd_array_port, 'PROC1') yield from bps.mv(det.proc1.enable, 1) yield from bps.mv(det.proc1.reset_filter, 1) yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames']) angs = np.arange( cfg_tomo['omega_start'], cfg_tomo['omega_end']+cfg_tomo['omega_step']/2, cfg_tomo['omega_step'], ) for ang in angs: yield from bps.checkpoint() yield from bps.mv(tomostage.preci, ang) yield from bps.trigger_and_read([det]) def fly_scan(experiment, cfg_tomo): det = experiment.det psofly = experiment.psofly yield from bps.mv(det.hdf1.nd_array_port, 'PG1') yield from bps.mv(det.tiff1.nd_array_port, 'PG1') yield from bps.mv( psofly.start, cfg_tomo['omega_start'], psofly.end, cfg_tomo['omega_end'], psofly.scan_delta, abs(cfg_tomo['omega_step']), psofly.slew_speed, cfg_tomo['slew_speed'], ) yield from bps.mv(psofly.taxi, "Taxi") yield from bps.mv( det.cam.num_images, cfg_tomo['n_projections'], det.cam.trigger_mode, "Overlapped", ) yield from bps.trigger(det, group='fly') yield from bps.abs_set(psofly.fly, "Fly", group='fly') yield from bps.wait(group='fly') def tomo_scan(experiment, cfg): det = experiment.det tomostage = experiment.tomostage shutter = experiment.shutter shutter_suspender = experiment.suspend_shutter cfg = load_config(cfg) if type(cfg) != dict else cfg _cahed_position = experiment.cache_motor_position() acquire_time = cfg['tomo']['acquire_time'] n_white = cfg['tomo']['n_white'] n_dark = cfg['tomo']['n_dark'] angs = np.arange( cfg['tomo']['omega_start'], cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2, cfg['tomo']['omega_step'], ) n_projections = len(angs) cfg['tomo']['n_projections'] = n_projections total_images = n_white + n_projections + n_white + n_dark fp = cfg['output']['filepath'] fn = cfg['output']['fileprefix'] if cfg['tomo']['type'].lower() == 'fly': scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections slew_speed = (angs.max() - angs.min())/scan_time cfg['tomo']['slew_speed'] = slew_speed x0, z0 = tomostage.ksamX.position, tomostage.ksamZ.position dfx, dfz = cfg['tomo']['sample_out_position']['samX'], cfg['tomo']['sample_out_position']['samZ'] rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start']) rotm = np.array([[ np.cos(rotang), np.sin(rotang)], [-np.sin(rotang), np.cos(rotang)]]) dbxz = np.dot(rotm, np.array([dfx, dfz])) dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0 dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0 cfg['tomo']['initial_ksamX'] = x0 cfg['tomo']['initial_ksamZ'] = z0 cfg['tomo']['fronte_white_ksamX'] = x0 + dfx cfg['tomo']['fronte_white_ksamZ'] = z0 + dfz cfg['tomo']['back_white_ksamX'] = x0 + dbx cfg['tomo']['back_white_ksamZ'] = z0 + dbz @bpp.run_decorator() @bpp.stage_decorator([det]) def scan_closure(): yield from bps.mv(shutter, 'open') yield from bps.install_suspender(shutter_suspender) for me in [det.tiff1, det.hdf1]: yield from bps.mv(me.file_path, fp) yield from bps.mv(me.file_name, fn) yield from bps.mv(me.file_write_mode, 2) yield from bps.mv(me.num_capture, total_images) yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()])) if cfg['output']['type'] in ['tif', 'tiff']: yield from bps.mv(det.tiff1.enable, 1) yield from bps.mv(det.tiff1.capture, 1) yield from bps.mv(det.hdf1.enable, 0) elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']: yield from bps.mv(det.tiff1.enable, 0) yield from bps.mv(det.hdf1.enable, 1) yield from bps.mv(det.hdf1.capture, 1) else: raise ValueError(f"Unsupported output type {cfg['output']['type']}") yield from bps.mv(det.cam.frame_type, 0) yield from collect_white_field(experiment, cfg['tomo'], atfront=True) yield from bps.mv(det.cam.frame_type, 1) if cfg['tomo']['type'].lower() == 'step': yield from step_scan(experiment, cfg['tomo']) elif cfg['tomo']['type'].lower() == 'fly': yield from fly_scan(experiment, cfg['tomo']) else: raise ValueError(f"Unsupported scan type: {cfg['tomo']['type']}") yield from bps.mv(det.cam.frame_type, 2) yield from collect_white_field(experiment, cfg['tomo'], atfront=False) yield from bps.mv(det.cam.frame_type, 3) yield from bps.remove_suspender(shutter_suspender) yield from bps.mv(shutter, "close") yield from collect_dark_field(experiment, cfg['tomo']) return (yield from scan_closure())
true
true
f70321ab9d1e17a3074d6de10d8dd3f2b2a4c4d5
502
py
Python
server/__init__.py
Programmer-RD-AI/Chat-App
b9ff5f2d5f59cc37d4ef7040b9d59ae7c1ace9e2
[ "Apache-2.0" ]
null
null
null
server/__init__.py
Programmer-RD-AI/Chat-App
b9ff5f2d5f59cc37d4ef7040b9d59ae7c1ace9e2
[ "Apache-2.0" ]
5
2021-09-16T17:42:31.000Z
2021-10-30T13:57:12.000Z
server/__init__.py
Programmer-RD-AI/Chat-App
b9ff5f2d5f59cc37d4ef7040b9d59ae7c1ace9e2
[ "Apache-2.0" ]
null
null
null
from pymongo import * from flask import * from flask_restful import * import datetime mongodb_url = "mongodb://Ranuga:ranuga2008@cluster0-shard-00-00.6n3dg.mongodb.net:27017,cluster0-shard-00-01.6n3dg.mongodb.net:27017,cluster0-shard-00-02.6n3dg.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-uo9rgq-shard-0&authSource=admin&retryWrites=true&w=majority" app = Flask(__name__) app.debug = True app.secret_key = "development" cluster = MongoClient(mongodb_url) from server.routes import *
38.615385
280
0.806773
from pymongo import * from flask import * from flask_restful import * import datetime mongodb_url = "mongodb://Ranuga:ranuga2008@cluster0-shard-00-00.6n3dg.mongodb.net:27017,cluster0-shard-00-01.6n3dg.mongodb.net:27017,cluster0-shard-00-02.6n3dg.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-uo9rgq-shard-0&authSource=admin&retryWrites=true&w=majority" app = Flask(__name__) app.debug = True app.secret_key = "development" cluster = MongoClient(mongodb_url) from server.routes import *
true
true
f70321d163f2bb54c285dd61fcf2d529daa5b17b
6,435
py
Python
tuun/probo/models/gp_stan_transfer.py
petuum/tuun
8eec472dbf0e5e695449b0fa2d98985469fd5b30
[ "Apache-2.0" ]
33
2020-08-30T16:22:35.000Z
2022-02-26T13:48:32.000Z
tuun/probo/models/gp_stan_transfer.py
petuum/tuun
8eec472dbf0e5e695449b0fa2d98985469fd5b30
[ "Apache-2.0" ]
2
2021-01-18T19:46:43.000Z
2021-03-24T09:59:14.000Z
tuun/probo/models/gp_stan_transfer.py
petuum/tuun
8eec472dbf0e5e695449b0fa2d98985469fd5b30
[ "Apache-2.0" ]
2
2020-08-25T17:02:15.000Z
2021-04-21T16:40:44.000Z
""" Classes for GP models with Stan that perform transfer optimization. """ from argparse import Namespace import numpy as np import copy from .gp_stan import StanGp from .regression.transfer_regression import TransferRegression from ..util.misc_util import dict_to_namespace class StanTransferGp(StanGp): """ GP model with transferred prior mean based on a regression model. """ def __init__(self, params=None, data=None, verbose=None): self.set_params(params) self.set_verbose(verbose) self.set_model(data) def set_params(self, params): """Set self.params, the parameters for this model.""" super().set_params(params) params = dict_to_namespace(params) assert hasattr(params, 'transfer_config') self.params.transfer_config = params.transfer_config def set_model(self, data): """Set GP Stan model and regression model.""" self.model = self.get_model() self.regressor = self.get_regressor(data) #self.regressor = self.get_proxy_regressor(data) # TODO def get_regressor(self, data): """Return transfer (prior mean) regressor.""" # Define regressor regressor = TransferRegression(self.params.transfer_config) if len(data.x) < 1: regressor = None else: mean_errors = [] # TODO: remove extra files such as .DS_STORE (or ignore files that break) for i, reg in enumerate(regressor.model_fnames): try: val_acc = regressor.evaluate_model(reg, data.x) error = np.mean((data.y - val_acc) ** 2) mean_errors.append((error, i)) except: print(f'Transfer model file in tarball did not load: {reg}') mean_errors.sort() if mean_errors[0][0] > self.params.transfer_config.get('metric_threshold', 0.6): regressor.set_best_model(-1) else: regressor.set_best_model(mean_errors[0][1]) return regressor def get_proxy_regressor(self, data): if not data: regressor = None else: def regressor(x): return np.linalg.norm(x) return regressor def transform_data_y(self): """Transform data.y using PriorMeanDataTransformer.""" self.dt = PriorMeanDataTransformer(self.data, self.regressor, False) y_trans = self.dt.transform_y_data() self.data = Namespace(x=self.data.x, y=y_trans) def gen_list(self, x_list, z, s, nsamp): """ Draw nsamp samples from generative process, given list of inputs x_list, posterior sample z, and seed s. Parameters ---------- x_list : list List of numpy ndarrays each with shape=(self.params.ndimx,) z : Namespace Namespace of GP hyperparameters. s : int The seed, a positive integer. nsamp : int The number of samples to draw from generative process. Returns ------- list A list with len=len(x_list) of numpy ndarrays, each with shape=(nsamp,). """ x_list = self.transform_xin_list(x_list) pred_list = self.sample_gp_pred(nsamp, x_list) pred_list = [ self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list) ] return pred_list def postgen_list(self, x_list, s, nsamp): """ Draw nsamp samples from posterior predictive distribution, given list of inputs x_list and seed s. Parameters ---------- x_list : list List of numpy ndarrays each with shape=(self.params.ndimx,). s : int The seed, a positive integer. nsamp : int The number of samples to draw from the posterior predictive distribution. Returns ------- list A list with len=len(x_list) of numpy ndarrays, each with shape=(nsamp,). """ x_list = self.transform_xin_list(x_list) pred_list = self.sample_gp_post_pred( nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp]) ) pred_list = [ self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list) ] return pred_list def __str__(self): return f'StanTransferGp with params={self.params}' class PriorMeanDataTransformer: """ A class to transform (and inverse transform) data, based on a prior mean regression. """ def __init__(self, data, prior_mean_f, verbose=True): """ Parameters ---------- data : Namespace Namespace containing data. prior_mean_f : function Prior mean function. verbose : bool If True, print description string. """ self._set_data(data) self._set_prior_mean_f(prior_mean_f) self._set_verbose(verbose) def _set_data(self, data): """Set self.data""" self.data = data def _set_prior_mean_f(self, prior_mean_f): """Set self.prior_mean_f.""" if prior_mean_f is None: # Default prior mean function is constant 0 function def prior_mean_f(x): return 0. self.prior_mean_f = prior_mean_f def _set_verbose(self, verbose): """Set verbose options.""" self.verbose = verbose if self.verbose: self._print_str() def transform_y_data(self, y_data=None, x_data=None): """Transform and return self.data.y""" # Transform self.data.y into new list y_trans = [y - self.prior_mean_f(x) for x, y in zip(self.data.x, self.data.y)] return y_trans def inv_transform_y_data(self, y_arr, x_single_arr): """Return inverse transform of y_arr.""" # Compute prior mean val for the single input prior_mean_val = self.prior_mean_f(x_single_arr) # Inverse transform y_arr into list y_inv_trans_list = [y + prior_mean_val for y in list(y_arr)] # Transform back to array and return y_inv_trans = np.array(y_inv_trans_list).reshape(-1) return y_inv_trans def _print_str(self): """Print a description string.""" print('*PriorMeanDataTransformer')
31.856436
92
0.601399
from argparse import Namespace import numpy as np import copy from .gp_stan import StanGp from .regression.transfer_regression import TransferRegression from ..util.misc_util import dict_to_namespace class StanTransferGp(StanGp): def __init__(self, params=None, data=None, verbose=None): self.set_params(params) self.set_verbose(verbose) self.set_model(data) def set_params(self, params): super().set_params(params) params = dict_to_namespace(params) assert hasattr(params, 'transfer_config') self.params.transfer_config = params.transfer_config def set_model(self, data): self.model = self.get_model() self.regressor = self.get_regressor(data) def get_regressor(self, data): regressor = TransferRegression(self.params.transfer_config) if len(data.x) < 1: regressor = None else: mean_errors = [] for i, reg in enumerate(regressor.model_fnames): try: val_acc = regressor.evaluate_model(reg, data.x) error = np.mean((data.y - val_acc) ** 2) mean_errors.append((error, i)) except: print(f'Transfer model file in tarball did not load: {reg}') mean_errors.sort() if mean_errors[0][0] > self.params.transfer_config.get('metric_threshold', 0.6): regressor.set_best_model(-1) else: regressor.set_best_model(mean_errors[0][1]) return regressor def get_proxy_regressor(self, data): if not data: regressor = None else: def regressor(x): return np.linalg.norm(x) return regressor def transform_data_y(self): self.dt = PriorMeanDataTransformer(self.data, self.regressor, False) y_trans = self.dt.transform_y_data() self.data = Namespace(x=self.data.x, y=y_trans) def gen_list(self, x_list, z, s, nsamp): x_list = self.transform_xin_list(x_list) pred_list = self.sample_gp_pred(nsamp, x_list) pred_list = [ self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list) ] return pred_list def postgen_list(self, x_list, s, nsamp): x_list = self.transform_xin_list(x_list) pred_list = self.sample_gp_post_pred( nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp]) ) pred_list = [ self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list) ] return pred_list def __str__(self): return f'StanTransferGp with params={self.params}' class PriorMeanDataTransformer: def __init__(self, data, prior_mean_f, verbose=True): self._set_data(data) self._set_prior_mean_f(prior_mean_f) self._set_verbose(verbose) def _set_data(self, data): self.data = data def _set_prior_mean_f(self, prior_mean_f): if prior_mean_f is None: def prior_mean_f(x): return 0. self.prior_mean_f = prior_mean_f def _set_verbose(self, verbose): self.verbose = verbose if self.verbose: self._print_str() def transform_y_data(self, y_data=None, x_data=None): y_trans = [y - self.prior_mean_f(x) for x, y in zip(self.data.x, self.data.y)] return y_trans def inv_transform_y_data(self, y_arr, x_single_arr): prior_mean_val = self.prior_mean_f(x_single_arr) y_inv_trans_list = [y + prior_mean_val for y in list(y_arr)] y_inv_trans = np.array(y_inv_trans_list).reshape(-1) return y_inv_trans def _print_str(self): print('*PriorMeanDataTransformer')
true
true
f703223b3ac044df519437e64abc05bc1bd9973c
5,865
py
Python
sdk/python/pulumi_azure_native/authorization/latest/get_management_lock_at_resource_level.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/authorization/latest/get_management_lock_at_resource_level.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/authorization/latest/get_management_lock_at_resource_level.py
pulumi-bot/pulumi-azure-native
f7b9490b5211544318e455e5cceafe47b628e12c
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetManagementLockAtResourceLevelResult', 'AwaitableGetManagementLockAtResourceLevelResult', 'get_management_lock_at_resource_level', ] warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""", DeprecationWarning) @pulumi.output_type class GetManagementLockAtResourceLevelResult: """ The lock information. """ def __init__(__self__, id=None, level=None, name=None, notes=None, owners=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if level and not isinstance(level, str): raise TypeError("Expected argument 'level' to be a str") pulumi.set(__self__, "level", level) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if notes and not isinstance(notes, str): raise TypeError("Expected argument 'notes' to be a str") pulumi.set(__self__, "notes", notes) if owners and not isinstance(owners, list): raise TypeError("Expected argument 'owners' to be a list") pulumi.set(__self__, "owners", owners) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ The resource ID of the lock. """ return pulumi.get(self, "id") @property @pulumi.getter def level(self) -> str: """ The level of the lock. Possible values are: NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to read and modify the resources, but not delete. ReadOnly means authorized users can only read from a resource, but they can't modify or delete it. """ return pulumi.get(self, "level") @property @pulumi.getter def name(self) -> str: """ The name of the lock. """ return pulumi.get(self, "name") @property @pulumi.getter def notes(self) -> Optional[str]: """ Notes about the lock. Maximum of 512 characters. """ return pulumi.get(self, "notes") @property @pulumi.getter def owners(self) -> Optional[Sequence['outputs.ManagementLockOwnerResponse']]: """ The owners of the lock. """ return pulumi.get(self, "owners") @property @pulumi.getter def type(self) -> str: """ The resource type of the lock - Microsoft.Authorization/locks. """ return pulumi.get(self, "type") class AwaitableGetManagementLockAtResourceLevelResult(GetManagementLockAtResourceLevelResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetManagementLockAtResourceLevelResult( id=self.id, level=self.level, name=self.name, notes=self.notes, owners=self.owners, type=self.type) def get_management_lock_at_resource_level(lock_name: Optional[str] = None, parent_resource_path: Optional[str] = None, resource_group_name: Optional[str] = None, resource_name: Optional[str] = None, resource_provider_namespace: Optional[str] = None, resource_type: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagementLockAtResourceLevelResult: """ The lock information. Latest API Version: 2016-09-01. :param str lock_name: The name of lock. :param str parent_resource_path: An extra path parameter needed in some services, like SQL Databases. :param str resource_group_name: The name of the resource group. :param str resource_name: The name of the resource. :param str resource_provider_namespace: The namespace of the resource provider. :param str resource_type: The type of the resource. """ pulumi.log.warn("""get_management_lock_at_resource_level is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""") __args__ = dict() __args__['lockName'] = lock_name __args__['parentResourcePath'] = parent_resource_path __args__['resourceGroupName'] = resource_group_name __args__['resourceName'] = resource_name __args__['resourceProviderNamespace'] = resource_provider_namespace __args__['resourceType'] = resource_type if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:authorization/latest:getManagementLockAtResourceLevel', __args__, opts=opts, typ=GetManagementLockAtResourceLevelResult).value return AwaitableGetManagementLockAtResourceLevelResult( id=__ret__.id, level=__ret__.level, name=__ret__.name, notes=__ret__.notes, owners=__ret__.owners, type=__ret__.type)
39.628378
283
0.650298
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetManagementLockAtResourceLevelResult', 'AwaitableGetManagementLockAtResourceLevelResult', 'get_management_lock_at_resource_level', ] warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""", DeprecationWarning) @pulumi.output_type class GetManagementLockAtResourceLevelResult: def __init__(__self__, id=None, level=None, name=None, notes=None, owners=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if level and not isinstance(level, str): raise TypeError("Expected argument 'level' to be a str") pulumi.set(__self__, "level", level) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if notes and not isinstance(notes, str): raise TypeError("Expected argument 'notes' to be a str") pulumi.set(__self__, "notes", notes) if owners and not isinstance(owners, list): raise TypeError("Expected argument 'owners' to be a list") pulumi.set(__self__, "owners", owners) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: return pulumi.get(self, "id") @property @pulumi.getter def level(self) -> str: return pulumi.get(self, "level") @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def notes(self) -> Optional[str]: return pulumi.get(self, "notes") @property @pulumi.getter def owners(self) -> Optional[Sequence['outputs.ManagementLockOwnerResponse']]: return pulumi.get(self, "owners") @property @pulumi.getter def type(self) -> str: return pulumi.get(self, "type") class AwaitableGetManagementLockAtResourceLevelResult(GetManagementLockAtResourceLevelResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetManagementLockAtResourceLevelResult( id=self.id, level=self.level, name=self.name, notes=self.notes, owners=self.owners, type=self.type) def get_management_lock_at_resource_level(lock_name: Optional[str] = None, parent_resource_path: Optional[str] = None, resource_group_name: Optional[str] = None, resource_name: Optional[str] = None, resource_provider_namespace: Optional[str] = None, resource_type: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagementLockAtResourceLevelResult: pulumi.log.warn("""get_management_lock_at_resource_level is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""") __args__ = dict() __args__['lockName'] = lock_name __args__['parentResourcePath'] = parent_resource_path __args__['resourceGroupName'] = resource_group_name __args__['resourceName'] = resource_name __args__['resourceProviderNamespace'] = resource_provider_namespace __args__['resourceType'] = resource_type if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:authorization/latest:getManagementLockAtResourceLevel', __args__, opts=opts, typ=GetManagementLockAtResourceLevelResult).value return AwaitableGetManagementLockAtResourceLevelResult( id=__ret__.id, level=__ret__.level, name=__ret__.name, notes=__ret__.notes, owners=__ret__.owners, type=__ret__.type)
true
true
f703224172490713b6ea5921d8e35e234816b5d6
8,418
py
Python
mvn/utils/op.py
K4S4B4/learnable-triangulation-pytorch
94f5121919785bf7c89dd973521a21c01104dbd5
[ "MIT" ]
null
null
null
mvn/utils/op.py
K4S4B4/learnable-triangulation-pytorch
94f5121919785bf7c89dd973521a21c01104dbd5
[ "MIT" ]
null
null
null
mvn/utils/op.py
K4S4B4/learnable-triangulation-pytorch
94f5121919785bf7c89dd973521a21c01104dbd5
[ "MIT" ]
null
null
null
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mvn.utils.img import to_numpy, to_torch from mvn.utils import multiview def integrate_tensor_2d(heatmaps, softmax=True): """Applies softmax to heatmaps and integrates them to get their's "center of masses" Args: heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps Returns: coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps """ batch_size, n_heatmaps, h, w = heatmaps.shape heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1)) if softmax: heatmaps = nn.functional.softmax(heatmaps, dim=2) else: heatmaps = nn.functional.relu(heatmaps) heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w)) mass_x = heatmaps.sum(dim=2) mass_y = heatmaps.sum(dim=3) mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device) mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device) x = mass_times_coord_x.sum(dim=2, keepdim=True) y = mass_times_coord_y.sum(dim=2, keepdim=True) if not softmax: x = x / mass_x.sum(dim=2, keepdim=True) y = y / mass_y.sum(dim=2, keepdim=True) coordinates = torch.cat((x, y), dim=2) coordinates = coordinates.reshape((batch_size, n_heatmaps, 2)) return coordinates def integrate_tensor_3d(volumes, softmax=True): batch_size, n_volumes, x_size, y_size, z_size = volumes.shape volumes = volumes.reshape((batch_size, n_volumes, -1)) if softmax: volumes = nn.functional.softmax(volumes, dim=2) else: volumes = nn.functional.relu(volumes) volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size)) mass_x = volumes.sum(dim=3).sum(dim=3) mass_y = volumes.sum(dim=2).sum(dim=3) mass_z = volumes.sum(dim=2).sum(dim=2) mass_times_coord_x = mass_x * torch.arange(x_size).type(torch.float).to(mass_x.device) mass_times_coord_y = mass_y * torch.arange(y_size).type(torch.float).to(mass_y.device) mass_times_coord_z = mass_z * torch.arange(z_size).type(torch.float).to(mass_z.device) x = mass_times_coord_x.sum(dim=2, keepdim=True) y = mass_times_coord_y.sum(dim=2, keepdim=True) z = mass_times_coord_z.sum(dim=2, keepdim=True) if not softmax: x = x / mass_x.sum(dim=2, keepdim=True) y = y / mass_y.sum(dim=2, keepdim=True) z = z / mass_z.sum(dim=2, keepdim=True) coordinates = torch.cat((x, y, z), dim=2) coordinates = coordinates.reshape((batch_size, n_volumes, 3)) return coordinates, volumes def integrate_tensor_3d_with_coordinates(volumes, coord_volumes, softmax=True): batch_size, n_volumes, x_size, y_size, z_size = volumes.shape volumes = volumes.reshape((batch_size, n_volumes, -1)) if softmax: volumes = nn.functional.softmax(volumes, dim=2) else: volumes = nn.functional.relu(volumes) volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size)) coordinates = torch.einsum("bnxyz, bxyzc -> bnc", volumes, coord_volumes) return coordinates #, volumes def unproject_heatmaps(heatmaps, proj_matricies, coord_volumes, volume_aggregation_method='sum', vol_confidences=None): device = heatmaps.device batch_size, n_views, n_joints, heatmap_shape = heatmaps.shape[0], heatmaps.shape[1], heatmaps.shape[2], tuple(heatmaps.shape[3:]) # 1,4,32,96x96 volume_shape = coord_volumes.shape[1:4] #64x64x64 volume_batch = torch.zeros(batch_size, n_joints, *volume_shape, device=device) # 1x32x64x64x64のTensor # TODO: speed up this this loop for batch_i in range(batch_size): coord_volume = coord_volumes[batch_i] # Bx64x64x64x3 -> 64x64x64x3 grid_coord = coord_volume.reshape((-1, 3)) # 262144x3 volume_batch_to_aggregate = torch.zeros(n_views, n_joints, *volume_shape, device=device) # 4x32x64x64x64 for view_i in range(n_views): heatmap = heatmaps[batch_i, view_i] # 1x4x32x96x96 -> 32x96x96 heatmap = heatmap.unsqueeze(0) # 1x32x96x96 (一番初めに次元を追加) grid_coord_proj = multiview.project_3d_points_to_image_plane_without_distortion( # 262144x3 proj_matricies[batch_i, view_i], grid_coord, convert_back_to_euclidean=False ) invalid_mask = grid_coord_proj[:, 2] <= 0.0 # depth must be larger than 0.0 #人がカメラに近づきすぎた場合に起こる?? grid_coord_proj[grid_coord_proj[:, 2] == 0.0, 2] = 1.0 # not to divide by zero grid_coord_proj = multiview.homogeneous_to_euclidean(grid_coord_proj) # transform to [-1.0, 1.0] range grid_coord_proj_transformed = torch.zeros_like(grid_coord_proj) # 262144x2 grid_coord_proj_transformed[:, 0] = 2 * (grid_coord_proj[:, 0] / heatmap_shape[0] - 0.5) # (0,0)->(96,96)の座標を、中心を(0,0)、左上を(-1,-1)、右下を(1,1)とする相対的な座標に変換 grid_coord_proj_transformed[:, 1] = 2 * (grid_coord_proj[:, 1] / heatmap_shape[1] - 0.5) grid_coord_proj = grid_coord_proj_transformed # prepare to F.grid_sample grid_coord_proj = grid_coord_proj.unsqueeze(1).unsqueeze(0) # 引数で指定された場所に一つ次元を足すらしい 1x262144x1x2。heatmapが1x32x96x96 try: current_volume = F.grid_sample(heatmap, grid_coord_proj, align_corners=True) # 1x32x262144x1 = Heatmap(1x32x96x96), grid_coord_proj(1x262144x1x2) except TypeError: # old PyTorch current_volume = F.grid_sample(heatmap, grid_coord_proj) # zero out non-valid points current_volume = current_volume.view(n_joints, -1) #32x262144 current_volume[:, invalid_mask] = 0.0 # reshape back to volume current_volume = current_volume.view(n_joints, *volume_shape) #32x64x64x64 # collect volume_batch_to_aggregate[view_i] = current_volume # agregate resulting volume if volume_aggregation_method.startswith('conf'): volume_batch[batch_i] = (volume_batch_to_aggregate * vol_confidences[batch_i].view(n_views, n_joints, 1, 1, 1)).sum(0) elif volume_aggregation_method == 'sum': volume_batch[batch_i] = volume_batch_to_aggregate.sum(0) elif volume_aggregation_method == 'max': volume_batch[batch_i] = volume_batch_to_aggregate.max(0)[0] elif volume_aggregation_method == 'softmax': volume_batch_to_aggregate_softmin = volume_batch_to_aggregate.clone() # 2x32x64x64x64(n_views, n_joints, *volume_shape) volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, -1) # reshape volume_batch_to_aggregate_softmin = nn.functional.softmax(volume_batch_to_aggregate_softmin, dim=0) volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, n_joints, *volume_shape) #reshape back volume_batch[batch_i] = (volume_batch_to_aggregate * volume_batch_to_aggregate_softmin).sum(0) else: raise ValueError("Unknown volume_aggregation_method: {}".format(volume_aggregation_method)) return volume_batch def gaussian_2d_pdf(coords, means, sigmas, normalize=True): normalization = 1.0 if normalize: normalization = (2 * np.pi * sigmas[:, 0] * sigmas[:, 0]) exp = torch.exp(-((coords[:, 0] - means[:, 0]) ** 2 / sigmas[:, 0] ** 2 + (coords[:, 1] - means[:, 1]) ** 2 / sigmas[:, 1] ** 2) / 2) return exp / normalization def render_points_as_2d_gaussians(points, sigmas, image_shape, normalize=True): device = points.device n_points = points.shape[0] yy, xx = torch.meshgrid(torch.arange(image_shape[0]).to(device), torch.arange(image_shape[1]).to(device)) grid = torch.stack([xx, yy], dim=-1).type(torch.float32) grid = grid.unsqueeze(0).repeat(n_points, 1, 1, 1) # (n_points, h, w, 2) grid = grid.reshape((-1, 2)) points = points.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1) points = points.reshape(-1, 2) sigmas = sigmas.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1) sigmas = sigmas.reshape(-1, 2) images = gaussian_2d_pdf(grid, points, sigmas, normalize=normalize) images = images.reshape(n_points, *image_shape) return images
42.730964
162
0.68698
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mvn.utils.img import to_numpy, to_torch from mvn.utils import multiview def integrate_tensor_2d(heatmaps, softmax=True): batch_size, n_heatmaps, h, w = heatmaps.shape heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1)) if softmax: heatmaps = nn.functional.softmax(heatmaps, dim=2) else: heatmaps = nn.functional.relu(heatmaps) heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w)) mass_x = heatmaps.sum(dim=2) mass_y = heatmaps.sum(dim=3) mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device) mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device) x = mass_times_coord_x.sum(dim=2, keepdim=True) y = mass_times_coord_y.sum(dim=2, keepdim=True) if not softmax: x = x / mass_x.sum(dim=2, keepdim=True) y = y / mass_y.sum(dim=2, keepdim=True) coordinates = torch.cat((x, y), dim=2) coordinates = coordinates.reshape((batch_size, n_heatmaps, 2)) return coordinates def integrate_tensor_3d(volumes, softmax=True): batch_size, n_volumes, x_size, y_size, z_size = volumes.shape volumes = volumes.reshape((batch_size, n_volumes, -1)) if softmax: volumes = nn.functional.softmax(volumes, dim=2) else: volumes = nn.functional.relu(volumes) volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size)) mass_x = volumes.sum(dim=3).sum(dim=3) mass_y = volumes.sum(dim=2).sum(dim=3) mass_z = volumes.sum(dim=2).sum(dim=2) mass_times_coord_x = mass_x * torch.arange(x_size).type(torch.float).to(mass_x.device) mass_times_coord_y = mass_y * torch.arange(y_size).type(torch.float).to(mass_y.device) mass_times_coord_z = mass_z * torch.arange(z_size).type(torch.float).to(mass_z.device) x = mass_times_coord_x.sum(dim=2, keepdim=True) y = mass_times_coord_y.sum(dim=2, keepdim=True) z = mass_times_coord_z.sum(dim=2, keepdim=True) if not softmax: x = x / mass_x.sum(dim=2, keepdim=True) y = y / mass_y.sum(dim=2, keepdim=True) z = z / mass_z.sum(dim=2, keepdim=True) coordinates = torch.cat((x, y, z), dim=2) coordinates = coordinates.reshape((batch_size, n_volumes, 3)) return coordinates, volumes def integrate_tensor_3d_with_coordinates(volumes, coord_volumes, softmax=True): batch_size, n_volumes, x_size, y_size, z_size = volumes.shape volumes = volumes.reshape((batch_size, n_volumes, -1)) if softmax: volumes = nn.functional.softmax(volumes, dim=2) else: volumes = nn.functional.relu(volumes) volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size)) coordinates = torch.einsum("bnxyz, bxyzc -> bnc", volumes, coord_volumes) return coordinates def unproject_heatmaps(heatmaps, proj_matricies, coord_volumes, volume_aggregation_method='sum', vol_confidences=None): device = heatmaps.device batch_size, n_views, n_joints, heatmap_shape = heatmaps.shape[0], heatmaps.shape[1], heatmaps.shape[2], tuple(heatmaps.shape[3:]) volume_shape = coord_volumes.shape[1:4] volume_batch = torch.zeros(batch_size, n_joints, *volume_shape, device=device) for batch_i in range(batch_size): coord_volume = coord_volumes[batch_i] grid_coord = coord_volume.reshape((-1, 3)) volume_batch_to_aggregate = torch.zeros(n_views, n_joints, *volume_shape, device=device) for view_i in range(n_views): heatmap = heatmaps[batch_i, view_i] heatmap = heatmap.unsqueeze(0) grid_coord_proj = multiview.project_3d_points_to_image_plane_without_distortion( proj_matricies[batch_i, view_i], grid_coord, convert_back_to_euclidean=False ) invalid_mask = grid_coord_proj[:, 2] <= 0.0 grid_coord_proj[grid_coord_proj[:, 2] == 0.0, 2] = 1.0 grid_coord_proj = multiview.homogeneous_to_euclidean(grid_coord_proj) grid_coord_proj_transformed = torch.zeros_like(grid_coord_proj) grid_coord_proj_transformed[:, 0] = 2 * (grid_coord_proj[:, 0] / heatmap_shape[0] - 0.5) grid_coord_proj_transformed[:, 1] = 2 * (grid_coord_proj[:, 1] / heatmap_shape[1] - 0.5) grid_coord_proj = grid_coord_proj_transformed grid_coord_proj = grid_coord_proj.unsqueeze(1).unsqueeze(0) try: current_volume = F.grid_sample(heatmap, grid_coord_proj, align_corners=True) except TypeError: current_volume = F.grid_sample(heatmap, grid_coord_proj) current_volume = current_volume.view(n_joints, -1) current_volume[:, invalid_mask] = 0.0 current_volume = current_volume.view(n_joints, *volume_shape) volume_batch_to_aggregate[view_i] = current_volume if volume_aggregation_method.startswith('conf'): volume_batch[batch_i] = (volume_batch_to_aggregate * vol_confidences[batch_i].view(n_views, n_joints, 1, 1, 1)).sum(0) elif volume_aggregation_method == 'sum': volume_batch[batch_i] = volume_batch_to_aggregate.sum(0) elif volume_aggregation_method == 'max': volume_batch[batch_i] = volume_batch_to_aggregate.max(0)[0] elif volume_aggregation_method == 'softmax': volume_batch_to_aggregate_softmin = volume_batch_to_aggregate.clone() volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, -1) volume_batch_to_aggregate_softmin = nn.functional.softmax(volume_batch_to_aggregate_softmin, dim=0) volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, n_joints, *volume_shape) volume_batch[batch_i] = (volume_batch_to_aggregate * volume_batch_to_aggregate_softmin).sum(0) else: raise ValueError("Unknown volume_aggregation_method: {}".format(volume_aggregation_method)) return volume_batch def gaussian_2d_pdf(coords, means, sigmas, normalize=True): normalization = 1.0 if normalize: normalization = (2 * np.pi * sigmas[:, 0] * sigmas[:, 0]) exp = torch.exp(-((coords[:, 0] - means[:, 0]) ** 2 / sigmas[:, 0] ** 2 + (coords[:, 1] - means[:, 1]) ** 2 / sigmas[:, 1] ** 2) / 2) return exp / normalization def render_points_as_2d_gaussians(points, sigmas, image_shape, normalize=True): device = points.device n_points = points.shape[0] yy, xx = torch.meshgrid(torch.arange(image_shape[0]).to(device), torch.arange(image_shape[1]).to(device)) grid = torch.stack([xx, yy], dim=-1).type(torch.float32) grid = grid.unsqueeze(0).repeat(n_points, 1, 1, 1) grid = grid.reshape((-1, 2)) points = points.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1) points = points.reshape(-1, 2) sigmas = sigmas.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1) sigmas = sigmas.reshape(-1, 2) images = gaussian_2d_pdf(grid, points, sigmas, normalize=normalize) images = images.reshape(n_points, *image_shape) return images
true
true
f703225523b2675260fc0159bf5fae1de004c8d2
2,319
py
Python
src/sas2sqlite3/adapters.py
ericgj/sas2sqlite
f404dc862886f5a052bfc2dc492b4d0ec0773b31
[ "MIT" ]
null
null
null
src/sas2sqlite3/adapters.py
ericgj/sas2sqlite
f404dc862886f5a052bfc2dc492b4d0ec0773b31
[ "MIT" ]
null
null
null
src/sas2sqlite3/adapters.py
ericgj/sas2sqlite
f404dc862886f5a052bfc2dc492b4d0ec0773b31
[ "MIT" ]
null
null
null
from calendar import timegm from datetime import date, datetime, time import sqlite3 from typing import Callable import julian # type: ignore def store_time(time_type: str, time_format: str = "") -> None: if time_type == "seconds": sqlite3.register_adapter(time, time_to_seconds) elif time_type == "text": sqlite3.register_adapter(time, time_to_text(time_format)) else: raise ValueError(f"Unknown time adapter: '{time_type}'") def store_date(date_type: str, date_format: str = "") -> None: if date_type == "julian": sqlite3.register_adapter(date, date_to_julian) elif date_type == "posix": sqlite3.register_adapter(date, date_to_posix) elif date_type == "text": sqlite3.register_adapter(date, date_to_text(date_format)) else: raise ValueError(f"Unknown date adapter: '{date_type}'") def store_datetime(datetime_type: str, datetime_format: str = "") -> None: if datetime_type == "julian": sqlite3.register_adapter(datetime, datetime_to_julian) elif datetime_type == "posix": sqlite3.register_adapter(datetime, datetime_to_posix) elif datetime_type == "text": sqlite3.register_adapter(datetime, datetime_to_text(datetime_format)) else: raise ValueError(f"Unknown datetime adapter: '{datetime_type}'") def time_to_seconds(t: time) -> float: return (60 * 60 * t.hour) + (60 * t.minute) + t.second + t.microsecond def time_to_text(format: str) -> Callable[[time], str]: def _time_to_text(t: time) -> str: return t.strftime(format) return _time_to_text def date_to_posix(d: date) -> int: return datetime_to_posix(datetime(d.year, d.month, d.day)) def date_to_julian(d: date) -> float: return datetime_to_julian(datetime(d.year, d.month, d.day)) def date_to_text(format: str) -> Callable[[date], str]: def _date_to_text(d: date) -> str: return d.strftime(format) return _date_to_text def datetime_to_posix(dt: datetime) -> int: return timegm(dt.utctimetuple()) def datetime_to_julian(dt: datetime) -> float: return float(julian.to_jd(dt)) def datetime_to_text(format: str) -> Callable[[datetime], str]: def _datetime_to_text(dt: datetime) -> str: return dt.strftime(format) return _datetime_to_text
29.35443
77
0.692109
from calendar import timegm from datetime import date, datetime, time import sqlite3 from typing import Callable import julian def store_time(time_type: str, time_format: str = "") -> None: if time_type == "seconds": sqlite3.register_adapter(time, time_to_seconds) elif time_type == "text": sqlite3.register_adapter(time, time_to_text(time_format)) else: raise ValueError(f"Unknown time adapter: '{time_type}'") def store_date(date_type: str, date_format: str = "") -> None: if date_type == "julian": sqlite3.register_adapter(date, date_to_julian) elif date_type == "posix": sqlite3.register_adapter(date, date_to_posix) elif date_type == "text": sqlite3.register_adapter(date, date_to_text(date_format)) else: raise ValueError(f"Unknown date adapter: '{date_type}'") def store_datetime(datetime_type: str, datetime_format: str = "") -> None: if datetime_type == "julian": sqlite3.register_adapter(datetime, datetime_to_julian) elif datetime_type == "posix": sqlite3.register_adapter(datetime, datetime_to_posix) elif datetime_type == "text": sqlite3.register_adapter(datetime, datetime_to_text(datetime_format)) else: raise ValueError(f"Unknown datetime adapter: '{datetime_type}'") def time_to_seconds(t: time) -> float: return (60 * 60 * t.hour) + (60 * t.minute) + t.second + t.microsecond def time_to_text(format: str) -> Callable[[time], str]: def _time_to_text(t: time) -> str: return t.strftime(format) return _time_to_text def date_to_posix(d: date) -> int: return datetime_to_posix(datetime(d.year, d.month, d.day)) def date_to_julian(d: date) -> float: return datetime_to_julian(datetime(d.year, d.month, d.day)) def date_to_text(format: str) -> Callable[[date], str]: def _date_to_text(d: date) -> str: return d.strftime(format) return _date_to_text def datetime_to_posix(dt: datetime) -> int: return timegm(dt.utctimetuple()) def datetime_to_julian(dt: datetime) -> float: return float(julian.to_jd(dt)) def datetime_to_text(format: str) -> Callable[[datetime], str]: def _datetime_to_text(dt: datetime) -> str: return dt.strftime(format) return _datetime_to_text
true
true
f7032289beb6682222976cb9462e8cd05bc9a850
506
py
Python
origamid/main.py
Cloud-CV/origami-daemon
c4179352c8f8ed86193d1a04d340661a9f24ff6d
[ "BSD-3-Clause" ]
4
2018-06-16T20:07:33.000Z
2019-02-14T06:35:18.000Z
origamid/main.py
Cloud-CV/origami-daemon
c4179352c8f8ed86193d1a04d340661a9f24ff6d
[ "BSD-3-Clause" ]
20
2018-06-13T13:35:50.000Z
2019-06-01T22:12:53.000Z
origamid/main.py
Cloud-CV/origami-daemon
c4179352c8f8ed86193d1a04d340661a9f24ff6d
[ "BSD-3-Clause" ]
7
2018-09-05T10:37:30.000Z
2021-10-04T03:46:18.000Z
import click import logging from .constants import WELCOME_TEXT from .api import run_server from .logger import OrigamiLogger logger = OrigamiLogger( file_log_level=logging.DEBUG, console_log_level=logging.DEBUG) @click.group(invoke_without_command=True) @click.pass_context def main(ctx): """ Origami daemon is an application which deploys and manages demos on CloudCV servers. """ if not ctx.invoked_subcommand: click.echo(WELCOME_TEXT) main.add_command(run_server)
21.083333
71
0.764822
import click import logging from .constants import WELCOME_TEXT from .api import run_server from .logger import OrigamiLogger logger = OrigamiLogger( file_log_level=logging.DEBUG, console_log_level=logging.DEBUG) @click.group(invoke_without_command=True) @click.pass_context def main(ctx): if not ctx.invoked_subcommand: click.echo(WELCOME_TEXT) main.add_command(run_server)
true
true
f7032363fc14f699405a1f015afdc99f3377b608
11,524
py
Python
python/day18.py
simmsb/advent-of-code-2021
6ba2a2a9a2ccf6f0ae0328eb1b1add00a655590a
[ "MIT" ]
null
null
null
python/day18.py
simmsb/advent-of-code-2021
6ba2a2a9a2ccf6f0ae0328eb1b1add00a655590a
[ "MIT" ]
null
null
null
python/day18.py
simmsb/advent-of-code-2021
6ba2a2a9a2ccf6f0ae0328eb1b1add00a655590a
[ "MIT" ]
null
null
null
from __future__ import annotations import itertools import math from dataclasses import dataclass from typing import Any @dataclass class TreeZipper: inner: Any path: list[int] def up(self): if self.path: return TreeZipper(self.inner, self.path[:-1]), self.path[-1] return None def get(self): v = self.inner for p in self.path: v = v[p] return v def set(self, x): v = self.inner for p in self.path[:-1]: v = v[p] v[self.path[-1]] = x def try_left(self): v = self.get() if isinstance(v, list): return TreeZipper(self.inner, self.path + [0]) return None def try_right(self): v = self.get() if isinstance(v, list): return TreeZipper(self.inner, self.path + [1]) return None class Whoop(Exception): pass def do_reduce_exp(v: TreeZipper, depth): if depth == 4 and isinstance(v.get(), list): # print("exploding") l, r = v.get() v.set(0) l_v = v came_from_left = False dont_go = False while True: # print("left", l_v, l_v.get()) if (l_v_n := l_v.try_left()) != None and not came_from_left: l_v = l_v_n break elif (l_v_n_v := l_v.up()) != None: # if we can up and didn't go left, do so l_v = l_v_n_v[0] came_from_left = l_v_n_v[1] == 0 else: dont_go = True # if we did nothing, we have to have reached the top and we were already from the left break if not dont_go: while True: if (l_v_n := l_v.try_right()) != None: l_v = l_v_n # try to go down and to the left if isinstance(l_v.get(), int): # if it's an int, add and quit l_v.set(l_v.get() + l) break l_v = v came_from_right = False dont_go = False while True: # print("right", l_v, l_v.get()) if (l_v_n := l_v.try_right()) != None and not came_from_right: l_v = l_v_n break elif (l_v_n_v := l_v.up()) != None: # if we can up and didn't go left, do so l_v = l_v_n_v[0] came_from_right = l_v_n_v[1] == 1 else: # if we did nothing, we have to have reached the top, bail dont_go = True break if not dont_go: while True: if (l_v_n := l_v.try_left()) != None: l_v = l_v_n # try to go down and to the left if isinstance(l_v.get(), int): # if it's an int, add and quit l_v.set(l_v.get() + r) break raise Whoop() if (l_v := v.try_left()) != None: do_reduce_exp(l_v, depth + 1) if (r_v := v.try_right()) != None: do_reduce_exp(r_v, depth + 1) def do_reduce_splt(v: TreeZipper): n_v = v.get() if isinstance(n_v, int): if n_v >= 10: # print("splitting") l_v = math.floor(n_v / 2) r_v = math.ceil(n_v / 2) v.set([l_v, r_v]) raise Whoop() # otherwise, go and reduce both sides if (l_v := v.try_left()) != None: do_reduce_splt(l_v) if (r_v := v.try_right()) != None: do_reduce_splt(r_v) def iter_red(l): # print("doing", l) while True: t = TreeZipper(l, []) try: # print(l) do_reduce_exp(t, 0) do_reduce_splt(t) except Whoop: pass else: print("did nothing") return def do_mag(v: TreeZipper): if isinstance(v.get(), int): return v.get() return 3 * do_mag(v.try_left()) + 2 * do_mag(v.try_right()) inp = [ [[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]], [[[3,6],[9,4]],[[[5,9],5],[8,0]]], [[[2,2],2],[1,[[1,6],7]]], [[[[0,9],7],[[3,2],8]],[6,[7,9]]], [[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]], [[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]], [[4,[8,2]],[1,[0,5]]], [8,[8,7]], [[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]], [[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]], [0,7], [[[[5,1],3],[8,[5,3]]],7], [[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]], [[[[3,4],2],0],4], [[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]], [[[3,[2,5]],[3,3]],7], [[[[5,1],1],[4,8]],[[5,[8,3]],2]], [[4,[[8,1],[8,5]]],[[[4,1],0],6]], [[[5,5],[5,9]],[0,[[6,8],[0,1]]]], [4,[[[7,9],4],0]], [[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]], [[[7,7],[8,0]],[6,[8,[7,9]]]], [[[9,2],1],6], [[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]], [[2,[[4,7],5]],1], [[8,7],[[[2,0],7],[1,[0,3]]]], [[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]], [[3,4],[[9,4],5]], [[5,[[8,3],5]],1], [[0,[[9,0],[3,2]]],[2,[7,[5,1]]]], [[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]], [[[1,[5,2]],9],[[4,6],[3,[8,0]]]], [[1,7],[[1,7],9]], [[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]], [[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]], [[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]], [[[8,9],0],3], [[[1,7],[1,[3,9]]],[6,[0,[8,5]]]], [[0,5],[6,5]], [[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]], [[8,[[0,9],8]],[9,[7,[7,9]]]], [0,[[[7,1],2],[[0,4],4]]], [[0,[[9,1],5]],[1,4]], [3,4], [[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]], [[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]], [8,[[1,[3,0]],[[7,9],4]]], [[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]], [[3,[[9,6],6]],2], [[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]], [[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]], [[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]], [[3,[7,1]],[9,[[1,8],7]]], [[9,1],[0,[[0,7],[7,1]]]], [[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]], [8,[[[2,1],[6,9]],[[3,3],[4,6]]]], [0,[7,[3,0]]], [[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]], [[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]], [[[[5,1],4],[[1,2],1]],7], [[[3,[7,5]],7],3], [[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]], [2,7], [[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]], [[[0,[6,4]],2],[4,[7,[7,5]]]], [[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]], [[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]], [[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]], [[[3,[0,2]],3],8], [[[4,[4,9]],9],[[[4,4],5],9]], [[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]], [[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]], [[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]], [[9,[[9,1],5]],[[4,[1,1]],2]], [[[[7,4],[0,3]],7],[8,[6,[3,3]]]], [5,5], [[6,7],[1,[7,[8,1]]]], [[1,[0,4]],7], [[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]], [[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]], [[[[8,3],7],5],[9,[[5,1],8]]], [[[[4,0],[5,2]],[[0,0],7]],2], [[[[0,1],6],2],[[8,2],6]], [[[[2,4],1],[[6,7],9]],[[[1,6],9],3]], [[5,5],[[8,[7,7]],[5,8]]], [[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]], [[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]], [[7,[6,2]],[[9,[5,2]],[1,4]]], [[[7,[5,9]],[[3,9],[4,5]]],[0,6]], [[9,[8,[2,2]]],[[9,7],[1,1]]], [[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]], [[0,[[9,3],0]],[8,8]], [[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]], [[9,[[8,3],[5,8]]],[[7,[3,0]],3]], [[[4,[4,2]],0],1], [[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]], [[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]], [[3,[5,[0,3]]],[5,4]], [[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]], [[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]] ] inp = [ [[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]], [[[5,[2,8]],4],[5,[[9,9],0]]], [6,[[[6,2],[5,6]],[[7,6],[4,7]]]], [[[6,[0,7]],[0,9]],[4,[9,[9,0]]]], [[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]], [[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]], [[[[5,4],[7,7]],8],[[8,3],8]], [[9,3],[[9,9],[6,[4,9]]]], [[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]], [[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]] ] # inp = [ # [[[[[7,0],[7,7]],[[7,7],[7,8]]],[[[7,7],[8,8]],[[7,7],[8,7]]]],[7,[5,[[3,8],[1,4]]]]] # ] def do_add(l): it = iter(l) x = next(it) iter_red(x) for y in it: x = [x, y] iter_red(x) return x out = do_add(inp) print(out) print(do_mag(TreeZipper(out, []))) import copy inp = [ [[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]], [[[3,6],[9,4]],[[[5,9],5],[8,0]]], [[[2,2],2],[1,[[1,6],7]]], [[[[0,9],7],[[3,2],8]],[6,[7,9]]], [[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]], [[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]], [[4,[8,2]],[1,[0,5]]], [8,[8,7]], [[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]], [[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]], [0,7], [[[[5,1],3],[8,[5,3]]],7], [[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]], [[[[3,4],2],0],4], [[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]], [[[3,[2,5]],[3,3]],7], [[[[5,1],1],[4,8]],[[5,[8,3]],2]], [[4,[[8,1],[8,5]]],[[[4,1],0],6]], [[[5,5],[5,9]],[0,[[6,8],[0,1]]]], [4,[[[7,9],4],0]], [[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]], [[[7,7],[8,0]],[6,[8,[7,9]]]], [[[9,2],1],6], [[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]], [[2,[[4,7],5]],1], [[8,7],[[[2,0],7],[1,[0,3]]]], [[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]], [[3,4],[[9,4],5]], [[5,[[8,3],5]],1], [[0,[[9,0],[3,2]]],[2,[7,[5,1]]]], [[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]], [[[1,[5,2]],9],[[4,6],[3,[8,0]]]], [[1,7],[[1,7],9]], [[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]], [[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]], [[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]], [[[8,9],0],3], [[[1,7],[1,[3,9]]],[6,[0,[8,5]]]], [[0,5],[6,5]], [[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]], [[8,[[0,9],8]],[9,[7,[7,9]]]], [0,[[[7,1],2],[[0,4],4]]], [[0,[[9,1],5]],[1,4]], [3,4], [[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]], [[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]], [8,[[1,[3,0]],[[7,9],4]]], [[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]], [[3,[[9,6],6]],2], [[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]], [[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]], [[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]], [[3,[7,1]],[9,[[1,8],7]]], [[9,1],[0,[[0,7],[7,1]]]], [[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]], [8,[[[2,1],[6,9]],[[3,3],[4,6]]]], [0,[7,[3,0]]], [[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]], [[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]], [[[[5,1],4],[[1,2],1]],7], [[[3,[7,5]],7],3], [[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]], [2,7], [[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]], [[[0,[6,4]],2],[4,[7,[7,5]]]], [[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]], [[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]], [[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]], [[[3,[0,2]],3],8], [[[4,[4,9]],9],[[[4,4],5],9]], [[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]], [[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]], [[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]], [[9,[[9,1],5]],[[4,[1,1]],2]], [[[[7,4],[0,3]],7],[8,[6,[3,3]]]], [5,5], [[6,7],[1,[7,[8,1]]]], [[1,[0,4]],7], [[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]], [[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]], [[[[8,3],7],5],[9,[[5,1],8]]], [[[[4,0],[5,2]],[[0,0],7]],2], [[[[0,1],6],2],[[8,2],6]], [[[[2,4],1],[[6,7],9]],[[[1,6],9],3]], [[5,5],[[8,[7,7]],[5,8]]], [[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]], [[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]], [[7,[6,2]],[[9,[5,2]],[1,4]]], [[[7,[5,9]],[[3,9],[4,5]]],[0,6]], [[9,[8,[2,2]]],[[9,7],[1,1]]], [[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]], [[0,[[9,3],0]],[8,8]], [[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]], [[9,[[8,3],[5,8]]],[[7,[3,0]],3]], [[[4,[4,2]],0],1], [[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]], [[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]], [[3,[5,[0,3]]],[5,4]], [[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]], [[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]] ] m_v = 0 for l, r in itertools.permutations(inp, 2): l = copy.deepcopy(l) r = copy.deepcopy(r) v = [l, r] print(f"{l=} {r=}") do_add(v) m_v = max(do_mag(TreeZipper(v, [])), m_v) print(m_v)
28.245098
102
0.345106
from __future__ import annotations import itertools import math from dataclasses import dataclass from typing import Any @dataclass class TreeZipper: inner: Any path: list[int] def up(self): if self.path: return TreeZipper(self.inner, self.path[:-1]), self.path[-1] return None def get(self): v = self.inner for p in self.path: v = v[p] return v def set(self, x): v = self.inner for p in self.path[:-1]: v = v[p] v[self.path[-1]] = x def try_left(self): v = self.get() if isinstance(v, list): return TreeZipper(self.inner, self.path + [0]) return None def try_right(self): v = self.get() if isinstance(v, list): return TreeZipper(self.inner, self.path + [1]) return None class Whoop(Exception): pass def do_reduce_exp(v: TreeZipper, depth): if depth == 4 and isinstance(v.get(), list): l, r = v.get() v.set(0) l_v = v came_from_left = False dont_go = False while True: if (l_v_n := l_v.try_left()) != None and not came_from_left: l_v = l_v_n break elif (l_v_n_v := l_v.up()) != None: l_v = l_v_n_v[0] came_from_left = l_v_n_v[1] == 0 else: dont_go = True # if we did nothing, we have to have reached the top and we were already from the left break if not dont_go: while True: if (l_v_n := l_v.try_right()) != None: l_v = l_v_n # try to go down and to the left if isinstance(l_v.get(), int): # if it's an int, add and quit l_v.set(l_v.get() + l) break l_v = v came_from_right = False dont_go = False while True: if (l_v_n := l_v.try_right()) != None and not came_from_right: l_v = l_v_n break elif (l_v_n_v := l_v.up()) != None: l_v = l_v_n_v[0] came_from_right = l_v_n_v[1] == 1 else: # if we did nothing, we have to have reached the top, bail dont_go = True break if not dont_go: while True: if (l_v_n := l_v.try_left()) != None: l_v = l_v_n # try to go down and to the left if isinstance(l_v.get(), int): # if it's an int, add and quit l_v.set(l_v.get() + r) break raise Whoop() if (l_v := v.try_left()) != None: do_reduce_exp(l_v, depth + 1) if (r_v := v.try_right()) != None: do_reduce_exp(r_v, depth + 1) def do_reduce_splt(v: TreeZipper): n_v = v.get() if isinstance(n_v, int): if n_v >= 10: l_v = math.floor(n_v / 2) r_v = math.ceil(n_v / 2) v.set([l_v, r_v]) raise Whoop() if (l_v := v.try_left()) != None: do_reduce_splt(l_v) if (r_v := v.try_right()) != None: do_reduce_splt(r_v) def iter_red(l): while True: t = TreeZipper(l, []) try: do_reduce_exp(t, 0) do_reduce_splt(t) except Whoop: pass else: print("did nothing") return def do_mag(v: TreeZipper): if isinstance(v.get(), int): return v.get() return 3 * do_mag(v.try_left()) + 2 * do_mag(v.try_right()) inp = [ [[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]], [[[3,6],[9,4]],[[[5,9],5],[8,0]]], [[[2,2],2],[1,[[1,6],7]]], [[[[0,9],7],[[3,2],8]],[6,[7,9]]], [[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]], [[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]], [[4,[8,2]],[1,[0,5]]], [8,[8,7]], [[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]], [[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]], [0,7], [[[[5,1],3],[8,[5,3]]],7], [[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]], [[[[3,4],2],0],4], [[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]], [[[3,[2,5]],[3,3]],7], [[[[5,1],1],[4,8]],[[5,[8,3]],2]], [[4,[[8,1],[8,5]]],[[[4,1],0],6]], [[[5,5],[5,9]],[0,[[6,8],[0,1]]]], [4,[[[7,9],4],0]], [[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]], [[[7,7],[8,0]],[6,[8,[7,9]]]], [[[9,2],1],6], [[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]], [[2,[[4,7],5]],1], [[8,7],[[[2,0],7],[1,[0,3]]]], [[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]], [[3,4],[[9,4],5]], [[5,[[8,3],5]],1], [[0,[[9,0],[3,2]]],[2,[7,[5,1]]]], [[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]], [[[1,[5,2]],9],[[4,6],[3,[8,0]]]], [[1,7],[[1,7],9]], [[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]], [[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]], [[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]], [[[8,9],0],3], [[[1,7],[1,[3,9]]],[6,[0,[8,5]]]], [[0,5],[6,5]], [[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]], [[8,[[0,9],8]],[9,[7,[7,9]]]], [0,[[[7,1],2],[[0,4],4]]], [[0,[[9,1],5]],[1,4]], [3,4], [[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]], [[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]], [8,[[1,[3,0]],[[7,9],4]]], [[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]], [[3,[[9,6],6]],2], [[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]], [[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]], [[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]], [[3,[7,1]],[9,[[1,8],7]]], [[9,1],[0,[[0,7],[7,1]]]], [[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]], [8,[[[2,1],[6,9]],[[3,3],[4,6]]]], [0,[7,[3,0]]], [[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]], [[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]], [[[[5,1],4],[[1,2],1]],7], [[[3,[7,5]],7],3], [[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]], [2,7], [[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]], [[[0,[6,4]],2],[4,[7,[7,5]]]], [[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]], [[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]], [[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]], [[[3,[0,2]],3],8], [[[4,[4,9]],9],[[[4,4],5],9]], [[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]], [[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]], [[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]], [[9,[[9,1],5]],[[4,[1,1]],2]], [[[[7,4],[0,3]],7],[8,[6,[3,3]]]], [5,5], [[6,7],[1,[7,[8,1]]]], [[1,[0,4]],7], [[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]], [[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]], [[[[8,3],7],5],[9,[[5,1],8]]], [[[[4,0],[5,2]],[[0,0],7]],2], [[[[0,1],6],2],[[8,2],6]], [[[[2,4],1],[[6,7],9]],[[[1,6],9],3]], [[5,5],[[8,[7,7]],[5,8]]], [[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]], [[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]], [[7,[6,2]],[[9,[5,2]],[1,4]]], [[[7,[5,9]],[[3,9],[4,5]]],[0,6]], [[9,[8,[2,2]]],[[9,7],[1,1]]], [[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]], [[0,[[9,3],0]],[8,8]], [[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]], [[9,[[8,3],[5,8]]],[[7,[3,0]],3]], [[[4,[4,2]],0],1], [[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]], [[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]], [[3,[5,[0,3]]],[5,4]], [[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]], [[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]] ] inp = [ [[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]], [[[5,[2,8]],4],[5,[[9,9],0]]], [6,[[[6,2],[5,6]],[[7,6],[4,7]]]], [[[6,[0,7]],[0,9]],[4,[9,[9,0]]]], [[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]], [[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]], [[[[5,4],[7,7]],8],[[8,3],8]], [[9,3],[[9,9],[6,[4,9]]]], [[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]], [[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]] ] def do_add(l): it = iter(l) x = next(it) iter_red(x) for y in it: x = [x, y] iter_red(x) return x out = do_add(inp) print(out) print(do_mag(TreeZipper(out, []))) import copy inp = [ [[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]], [[[3,6],[9,4]],[[[5,9],5],[8,0]]], [[[2,2],2],[1,[[1,6],7]]], [[[[0,9],7],[[3,2],8]],[6,[7,9]]], [[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]], [[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]], [[4,[8,2]],[1,[0,5]]], [8,[8,7]], [[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]], [[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]], [0,7], [[[[5,1],3],[8,[5,3]]],7], [[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]], [[[[3,4],2],0],4], [[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]], [[[3,[2,5]],[3,3]],7], [[[[5,1],1],[4,8]],[[5,[8,3]],2]], [[4,[[8,1],[8,5]]],[[[4,1],0],6]], [[[5,5],[5,9]],[0,[[6,8],[0,1]]]], [4,[[[7,9],4],0]], [[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]], [[[7,7],[8,0]],[6,[8,[7,9]]]], [[[9,2],1],6], [[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]], [[2,[[4,7],5]],1], [[8,7],[[[2,0],7],[1,[0,3]]]], [[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]], [[3,4],[[9,4],5]], [[5,[[8,3],5]],1], [[0,[[9,0],[3,2]]],[2,[7,[5,1]]]], [[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]], [[[1,[5,2]],9],[[4,6],[3,[8,0]]]], [[1,7],[[1,7],9]], [[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]], [[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]], [[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]], [[[8,9],0],3], [[[1,7],[1,[3,9]]],[6,[0,[8,5]]]], [[0,5],[6,5]], [[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]], [[8,[[0,9],8]],[9,[7,[7,9]]]], [0,[[[7,1],2],[[0,4],4]]], [[0,[[9,1],5]],[1,4]], [3,4], [[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]], [[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]], [8,[[1,[3,0]],[[7,9],4]]], [[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]], [[3,[[9,6],6]],2], [[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]], [[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]], [[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]], [[3,[7,1]],[9,[[1,8],7]]], [[9,1],[0,[[0,7],[7,1]]]], [[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]], [8,[[[2,1],[6,9]],[[3,3],[4,6]]]], [0,[7,[3,0]]], [[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]], [[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]], [[[[5,1],4],[[1,2],1]],7], [[[3,[7,5]],7],3], [[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]], [2,7], [[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]], [[[0,[6,4]],2],[4,[7,[7,5]]]], [[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]], [[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]], [[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]], [[[3,[0,2]],3],8], [[[4,[4,9]],9],[[[4,4],5],9]], [[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]], [[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]], [[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]], [[9,[[9,1],5]],[[4,[1,1]],2]], [[[[7,4],[0,3]],7],[8,[6,[3,3]]]], [5,5], [[6,7],[1,[7,[8,1]]]], [[1,[0,4]],7], [[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]], [[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]], [[[[8,3],7],5],[9,[[5,1],8]]], [[[[4,0],[5,2]],[[0,0],7]],2], [[[[0,1],6],2],[[8,2],6]], [[[[2,4],1],[[6,7],9]],[[[1,6],9],3]], [[5,5],[[8,[7,7]],[5,8]]], [[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]], [[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]], [[7,[6,2]],[[9,[5,2]],[1,4]]], [[[7,[5,9]],[[3,9],[4,5]]],[0,6]], [[9,[8,[2,2]]],[[9,7],[1,1]]], [[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]], [[0,[[9,3],0]],[8,8]], [[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]], [[9,[[8,3],[5,8]]],[[7,[3,0]],3]], [[[4,[4,2]],0],1], [[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]], [[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]], [[3,[5,[0,3]]],[5,4]], [[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]], [[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]] ] m_v = 0 for l, r in itertools.permutations(inp, 2): l = copy.deepcopy(l) r = copy.deepcopy(r) v = [l, r] print(f"{l=} {r=}") do_add(v) m_v = max(do_mag(TreeZipper(v, [])), m_v) print(m_v)
true
true
f703237c7b21dfe2701fe1fbdb4022c1eba0e043
4,805
py
Python
tests/unit/test_protocols.py
ebi-ait/ingest-archiver
de0676fc3f750bc5abd3d42f140a142c3b972910
[ "Apache-2.0" ]
1
2020-09-15T10:29:18.000Z
2020-09-15T10:29:18.000Z
tests/unit/test_protocols.py
ebi-ait/ingest-archiver
de0676fc3f750bc5abd3d42f140a142c3b972910
[ "Apache-2.0" ]
28
2020-01-17T15:37:21.000Z
2022-02-21T15:18:46.000Z
tests/unit/test_protocols.py
ebi-ait/ingest-archiver
de0676fc3f750bc5abd3d42f140a142c3b972910
[ "Apache-2.0" ]
4
2020-05-31T11:49:56.000Z
2020-11-01T06:05:12.000Z
import json from unittest import TestCase from unittest.mock import Mock from utils import protocols from api.ontology import OntologyAPI from utils.protocols import ONTOLOGY_3PRIME_PARENT, ONTOLOGY_5PRIME_PARENT, ONTOLOGY_CITESEQ class TestProtocols(TestCase): def setUp(self) -> None: self.ontology_api = Mock() def test_is_10x__when_equal_3prime_parent__returns_true(self): # given lib_prep_protocol = { 'content': { 'library_construction_method': { 'ontology': ONTOLOGY_3PRIME_PARENT } } } # when is10x = protocols.is_10x(OntologyAPI(), lib_prep_protocol) # then self.assertTrue(is10x) def test_is_10x__when_equal_5prime_parent__returns_true(self): # given lib_prep_protocol = { 'content': { 'library_construction_method': { 'ontology': ONTOLOGY_5PRIME_PARENT } } } # when is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol) # then self.assertTrue(is10x) def test_is_10x__when_equal_citeseq__returns_true(self): # given lib_prep_protocol = { 'content': { 'library_construction_method': { 'ontology': ONTOLOGY_CITESEQ } } } # when is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol) # then self.assertTrue(is10x) def test_is_10x__when_not_descendant__returns_false(self): lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0000000", } } } self.ontology_api.is_equal_or_descendant = Mock(return_value=False) is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol) self.assertFalse(is10x) def test_map_bam_schema__when_equals_citeseq__returns_10xV2(self): # given lib_prep_protocol = { "content": { "library_construction_method": { "ontology": ONTOLOGY_CITESEQ, } } } # when bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol) # then self.assertEqual(bam_schema, '10xV2') def test_map_bam_schema__when_not_leaf_term__returns_none(self): # given lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0000000", } } } self.ontology_api.get_descendants = Mock(return_value=['descendant']) # not leaf term self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"}) # when bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol) # then self.assertEqual(bam_schema, None) def test_map_bam_schema__when_leaf_term__returns_correct_bam_schema(self): # given lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0000000", } } } self.ontology_api.get_descendants = Mock(return_value=[]) # leaf term self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"}) # when bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol) # then self.assertEqual(bam_schema, '10xV2') def test_version_10x_by_label__given_label__return_version(self): # given lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0009294", } } } self.ontology_api.search = Mock(return_value={'label': "10x 5' v2"}) # when bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol) # then self.assertEqual(bam_schema, 'V2') def test_version_10x_by_label__given_label__return_version(self): # given lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0009294", } } } self.ontology_api.search = Mock(return_value={'label': "10x 3' v3"}) # when bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol) # then self.assertEqual(bam_schema, 'V3')
29.478528
115
0.57565
import json from unittest import TestCase from unittest.mock import Mock from utils import protocols from api.ontology import OntologyAPI from utils.protocols import ONTOLOGY_3PRIME_PARENT, ONTOLOGY_5PRIME_PARENT, ONTOLOGY_CITESEQ class TestProtocols(TestCase): def setUp(self) -> None: self.ontology_api = Mock() def test_is_10x__when_equal_3prime_parent__returns_true(self): lib_prep_protocol = { 'content': { 'library_construction_method': { 'ontology': ONTOLOGY_3PRIME_PARENT } } } is10x = protocols.is_10x(OntologyAPI(), lib_prep_protocol) self.assertTrue(is10x) def test_is_10x__when_equal_5prime_parent__returns_true(self): lib_prep_protocol = { 'content': { 'library_construction_method': { 'ontology': ONTOLOGY_5PRIME_PARENT } } } is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol) self.assertTrue(is10x) def test_is_10x__when_equal_citeseq__returns_true(self): lib_prep_protocol = { 'content': { 'library_construction_method': { 'ontology': ONTOLOGY_CITESEQ } } } is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol) self.assertTrue(is10x) def test_is_10x__when_not_descendant__returns_false(self): lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0000000", } } } self.ontology_api.is_equal_or_descendant = Mock(return_value=False) is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol) self.assertFalse(is10x) def test_map_bam_schema__when_equals_citeseq__returns_10xV2(self): lib_prep_protocol = { "content": { "library_construction_method": { "ontology": ONTOLOGY_CITESEQ, } } } bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol) self.assertEqual(bam_schema, '10xV2') def test_map_bam_schema__when_not_leaf_term__returns_none(self): lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0000000", } } } self.ontology_api.get_descendants = Mock(return_value=['descendant']) self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"}) # when bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol) # then self.assertEqual(bam_schema, None) def test_map_bam_schema__when_leaf_term__returns_correct_bam_schema(self): # given lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0000000", } } } self.ontology_api.get_descendants = Mock(return_value=[]) # leaf term self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"}) bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol) self.assertEqual(bam_schema, '10xV2') def test_version_10x_by_label__given_label__return_version(self): lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0009294", } } } self.ontology_api.search = Mock(return_value={'label': "10x 5' v2"}) # when bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol) # then self.assertEqual(bam_schema, 'V2') def test_version_10x_by_label__given_label__return_version(self): # given lib_prep_protocol = { "content": { "library_construction_method": { "ontology": "EFO:0009294", } } } self.ontology_api.search = Mock(return_value={'label': "10x 3' v3"}) bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol) self.assertEqual(bam_schema, 'V3')
true
true
f703239ebe11327a7f5a7741cddd1a6aaacbbe07
711
py
Python
homepage/summarizer.py
prats1997/Euphorum
16bfee9c71ea5b1332c6263233c79a633ddfdd83
[ "MIT" ]
1
2020-03-01T17:39:04.000Z
2020-03-01T17:39:04.000Z
homepage/summarizer.py
prats1997/Euphorum
16bfee9c71ea5b1332c6263233c79a633ddfdd83
[ "MIT" ]
null
null
null
homepage/summarizer.py
prats1997/Euphorum
16bfee9c71ea5b1332c6263233c79a633ddfdd83
[ "MIT" ]
null
null
null
from sumy.parsers.plaintext import PlaintextParser #We're choosing a plaintext parser here, other parsers available for HTML etc. from sumy.nlp.tokenizers import Tokenizer from sumy.summarizers.lex_rank import LexRankSummarizer #We're choosing Lexrank, other algorithms are also built in def get_summary(text): # file = "plain_text.txt" #name of the plain-text file # parser = PlaintextParser.from_file(file, Tokenizer("english")) parser=PlaintextParser.from_string(text,Tokenizer("English")) summarizer = LexRankSummarizer() summary = summarizer(parser.document, 5) #Summarize the document with 5 sentences # for sentence in summary: # print(sentence) return summary
35.55
129
0.759494
from sumy.parsers.plaintext import PlaintextParser from sumy.nlp.tokenizers import Tokenizer from sumy.summarizers.lex_rank import LexRankSummarizer #We're choosing Lexrank, other algorithms are also built in def get_summary(text): parser=PlaintextParser.from_string(text,Tokenizer("English")) summarizer = LexRankSummarizer() summary = summarizer(parser.document, 5) return summary
true
true
f703249eb0707d1f33a73eb8c4811cfac1b4bc40
863
py
Python
load_and_process.py
ctiger34/BASIC-EMOTION-DETECTION
1c2be519c70408159ea6e1093d5f139c99ea6e27
[ "MIT" ]
18
2019-10-08T10:15:27.000Z
2022-02-21T06:36:57.000Z
load_and_process.py
ctiger34/BASIC-EMOTION-DETECTION
1c2be519c70408159ea6e1093d5f139c99ea6e27
[ "MIT" ]
3
2020-04-17T20:43:58.000Z
2022-02-10T00:21:01.000Z
load_and_process.py
ctiger34/BASIC-EMOTION-DETECTION
1c2be519c70408159ea6e1093d5f139c99ea6e27
[ "MIT" ]
8
2020-03-07T10:17:11.000Z
2021-07-05T05:22:04.000Z
import pandas as pd import cv2 import numpy as np dataset_path = 'fer2013/fer2013/fer2013.csv' image_size=(48,48) def load_fer2013(): data = pd.read_csv(dataset_path) pixels = data['pixels'].tolist() width, height = 48, 48 faces = [] for pixel_sequence in pixels: face = [int(pixel) for pixel in pixel_sequence.split(' ')] face = np.asarray(face).reshape(width, height) face = cv2.resize(face.astype('uint8'),image_size) faces.append(face.astype('float32')) faces = np.asarray(faces) faces = np.expand_dims(faces, -1) emotions = pd.get_dummies(data['emotion']).as_matrix() return faces, emotions def preprocess_input(x, v2=True): x = x.astype('float32') x = x / 255.0 if v2: x = x - 0.5 x = x * 2.0 return x
28.766667
70
0.589803
import pandas as pd import cv2 import numpy as np dataset_path = 'fer2013/fer2013/fer2013.csv' image_size=(48,48) def load_fer2013(): data = pd.read_csv(dataset_path) pixels = data['pixels'].tolist() width, height = 48, 48 faces = [] for pixel_sequence in pixels: face = [int(pixel) for pixel in pixel_sequence.split(' ')] face = np.asarray(face).reshape(width, height) face = cv2.resize(face.astype('uint8'),image_size) faces.append(face.astype('float32')) faces = np.asarray(faces) faces = np.expand_dims(faces, -1) emotions = pd.get_dummies(data['emotion']).as_matrix() return faces, emotions def preprocess_input(x, v2=True): x = x.astype('float32') x = x / 255.0 if v2: x = x - 0.5 x = x * 2.0 return x
true
true
f70326ca696e1c05be0940e1d778d0eb50c76934
10,074
py
Python
spreadsheet.py
rohanuttamsingh/ScoutingSheetInitializer
acf1c01617796450fd58f29fbb0e67ae2a977002
[ "MIT" ]
null
null
null
spreadsheet.py
rohanuttamsingh/ScoutingSheetInitializer
acf1c01617796450fd58f29fbb0e67ae2a977002
[ "MIT" ]
null
null
null
spreadsheet.py
rohanuttamsingh/ScoutingSheetInitializer
acf1c01617796450fd58f29fbb0e67ae2a977002
[ "MIT" ]
null
null
null
import json from time import sleep import gspread import requests from gspread_formatting import * from oauth2client.service_account import ServiceAccountCredentials class Spreadsheet: # comment out all but one of these depending on which spreadsheet being used # URL = 'https://docs.google.com/spreadsheets/d/1WhExw_ReHnyPQYXl0p-kT6jYXpZW5w8-cq2ffK7niOs' # Sample Deep Space Scouting Sheet Machine # URL = 'https://docs.google.om/spreadsheets/d/1lOTML4TgNqv5OKUJU32keWu62__T9cFT3IL52kmPbKk' # Bethesda Week 2 Scouting Sheet Machine # URL = 'https://docs.google.com/spreadsheets/d/1C8hjCqMZmacyUe3SlRgW4o4HGqTRFozviK4WZ6Mu4yc' # Week 0 Scouting Sheet Machine # URL = 'https://docs.google.com/spreadsheets/d/1uYb9n_2IaGSRvOPZcuE59eUQjinaTSIN1SKqTQ6z2lQ' # Dickinson Center Week 0 Scouting Sheet Machine # URL = 'https://docs.google.com/spreadsheets/d/1_8tFjgxjGVA0__1BLkMV-ookfPLrnGDE8gZj6pQc1_k' # Centurion-KnightKrawler Week 0 Scouting Sheet Machine # URL = 'https://docs.google.com/spreadsheets/d/1Ftzcn5u5axYUkob1MXI8wV1KAD-8qjGkywqQjP4_AMo' # Haymarket Week 1 Scouting Sheet Machine # URL = 'https://docs.google.com/spreadsheets/d/1fRm4nZIT457zIpW5cyZrIvR0gSGt6oEcphVYiaH6eK8' # Owings Mills Week 3 Scouting Sheet Machine URL = 'https://docs.google.com/spreadsheets/d/1y8xtKJftg1mDbhfcmISWkyi4MgmSauveD9BY2bPNUCo/edit#gid=168604214' # CHCMP Scouting Sheet Machine # google sheets setup scope = ['https://spreadsheets.google.com/feeds'] creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret_gsheets.json', scope) client = gspread.authorize(creds) # google sheets document sheet = client.open_by_url(URL) # individual worksheets of google sheets document key_worksheet = sheet.worksheet('Key') teams_worksheet = sheet.worksheet('Teams') sample_team_worksheet = sheet.worksheet('Sample Team') schedule_worksheet = sheet.worksheet('Schedule') team_data_worksheet = sheet.worksheet('Team Data') # setting event key to value in A1 of Key worksheet event_key = key_worksheet.cell(1, 1).value # 2537 cell format format_2537 = CellFormat(backgroundColor=Color(.148, .98, .216)) # 25fa37 converted to rgb out of 1 # tba setup tba_session = requests.Session() BASE_URL = 'https://www.thebluealliance.com/api/v3' # tba credentials setup with open('client_secret_tba.json') as json_file: data = json.load(json_file) tba_auth_key = data['tba_auth_key'] def __init__(self): """All TBA requests will have authentication key in header""" self.tba_session.headers.update({'X-TBA-Auth-Key': self.tba_auth_key}) def get_teams_from_event(self, event): """Returns all team keys from event in a list event: event key of intended competition (e.g. 2018vahay) """ teams_raw = self.tba_session.get(self.BASE_URL + '/event/%s/teams/keys' % event).json() teams = [] for team_raw in teams_raw: teams.append(team_raw[3:]) return teams def fill_teams(self, sheet, event): """Fills first column of specified sheet with all teams from specified sheet sheet: intended google sheet event: event key of intended competition (e.g. 2018vahay) """ column = [] for team in self.get_teams_from_event(event): column.append(team) for index in range(0, len(column)): sheet.update_cell(index + 1, 1, column[index]) def create_team_sheets(self): """Creates a scouting sheet for each team in competition event: event key of intended competition (e.g. 2018 vahay) """ teams = self.teams_worksheet.col_values(1) for team in teams: self.sheet.add_worksheet(team, self.sample_team_worksheet.row_count, self.sample_team_worksheet.col_count) def delete_team_sheets(self): """Deletes all individual team worksheets Used for testing """ teams = self.teams_worksheet.col_values(1) for team in teams: self.sheet.del_worksheet(self.sheet.worksheet(team)) def get_sample_sheet(self): """Returns the sample team sheet in 2D list format [row][column]""" sample_sheet = [] for row in range(1, self.sample_team_worksheet.row_count + 1): sample_sheet.append(self.sample_team_worksheet.row_values(row, value_render_option='FORMULA')) return sample_sheet def copy_sheet(self, copy_from, copy_to, team_num): """Copies every element from a list of values to a specified sheet copy_from: list from which values are copied copy_to: sheet to which values are copied """ i, j = 1, 1 for row in copy_from: for col in row: if col == 'Team #': copy_to.update_cell(i, j, team_num) sleep(1.01) elif col != '': copy_to.update_cell(i, j, col) sleep(1.01) # Quota is 100 requests per 100s, this does 100 requests per 101s j += 1 i += 1 j = 1 def copy_sample_to_team_sheets(self): """Copies sample sheet format to every team sheet""" sample_sheet = self.get_sample_sheet() for team in self.teams_worksheet.col_values(1): self.copy_sheet(sample_sheet, self.sheet.worksheet(team), team) def get_color_schedule(self, event, color): """Returns match schedule of specified color alliance in list event: event key of intended competition (e.g. 2018vahay) color: color of desired alliance schedule (e.g. red or blue) """ # event schedules get updated to elims event schedules once elims are reached # only elims schedule accessible in finished events schedule = [] event_list = self.tba_session.get(self.BASE_URL + '/event/%s/matches/simple' % event).json() # list of dicts for match in event_list: schedule.append(match['alliances'][color]['team_keys']) for alliance in schedule: for i in range(len(alliance)): alliance[i] = alliance[i][3:] # trims 'frc' from beginning of every team number return schedule def fill_schedule(self, event): """Auto fills Schedule worksheet with schedule event: event key of intended competition (e.g. 2018vahay) """ red_schedule = self.get_color_schedule(event, 'red') blue_schedule = self.get_color_schedule(event, 'blue') # updates num_matches to the correct number of matches and fill column 1 of spreadsheet with match number num_matches = 1 for match in range(len(red_schedule)): self.schedule_worksheet.update_cell(match + 1, 1, match + 1) num_matches += 1 sleep(1.01) for i in range(num_matches): for j in range(3): self.schedule_worksheet.update_cell(i + 1, j + 2, red_schedule[i][j]) sleep(1.01) self.schedule_worksheet.update_cell(i + 1, j + 5, blue_schedule[i][j]) sleep(1.01) def get_team_metrics_from_event(self, event): """Returns OPRs, DPRs, and CCWMs of all teams at event in dictionary of dictionaries event: event key of intended competition (e.g. 2018vahay) """ return self.tba_session.get(self.BASE_URL + '/event/%s/oprs' % event).json() def fill_team_data(self, event): """Auto fills Team Data worksheet with teams and their corresponding OPR, DPR, and CCWM event: event key if intended competition (e.g. 2018vahay) """ teams = self.get_teams_from_event(event) metrics = self.get_team_metrics_from_event(event) row = 2 team_col, opr_col, dpr_col, ccwm_col = 1, 2, 3, 4 for team in teams: self.team_data_worksheet.update_cell(row, team_col, team) sleep(1.01) self.team_data_worksheet.update_cell(row, opr_col, metrics['oprs']['frc' + team]) sleep(1.01) self.team_data_worksheet.update_cell(row, dpr_col, metrics['dprs']['frc' + team]) sleep(1.01) self.team_data_worksheet.update_cell(row, ccwm_col, metrics['ccwms']['frc' + team]) sleep(1.01) row += 1 def get_predictions_from_event(self, event): return self.tba_session.get(self.BASE_URL + '/event/%s/predictions' % event).json() def format_cells_in_schedule(self): cells_2537_raw = self.schedule_worksheet.findall('2537') cells_2537 = [] for cell in cells_2537_raw: cells_2537.append([cell.col + 64, cell.row]) # add 64 to column to match ascii character decimals for cell in cells_2537: b = bytes(str(cell[0]), 'utf8') ascii_char = b.decode('ascii') cell[0] = chr(int(ascii_char)) for i in range(len(cells_2537)): format_cell_range(self.schedule_worksheet, '%s%i:%s%i' % (cells_2537[i][0], cells_2537[i][1], cells_2537[i][0], cells_2537[i][1]), self.format_2537) def main(self): self.fill_teams(self.teams_worksheet, self.event_key) self.create_team_sheets() # self.delete_team_sheets() # print(self.get_sample_sheet()) # self.copy_sheet(self.get_sample_sheet(), self.sheet.worksheet('1086'), 1086) # testing on single sheet # print(len(self.get_sample_sheet())) self.copy_sample_to_team_sheets() # print(self.get_color_schedule(self.event_key, 'red')) self.fill_schedule(self.event_key) self.fill_team_data(self.event_key) # print(self.get_team_metrics_from_event(self.event_key)) # print(self.get_predictions_from_event(self.event_key)) self.format_cells_in_schedule() if __name__ == '__main__': spreadsheet = Spreadsheet() spreadsheet.main()
43.8
160
0.655847
import json from time import sleep import gspread import requests from gspread_formatting import * from oauth2client.service_account import ServiceAccountCredentials class Spreadsheet: URL = 'https://docs.google.com/spreadsheets/d/1y8xtKJftg1mDbhfcmISWkyi4MgmSauveD9BY2bPNUCo/edit#gid=168604214' # CHCMP Scouting Sheet Machine scope = ['https://spreadsheets.google.com/feeds'] creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret_gsheets.json', scope) client = gspread.authorize(creds) sheet = client.open_by_url(URL) key_worksheet = sheet.worksheet('Key') teams_worksheet = sheet.worksheet('Teams') sample_team_worksheet = sheet.worksheet('Sample Team') schedule_worksheet = sheet.worksheet('Schedule') team_data_worksheet = sheet.worksheet('Team Data') event_key = key_worksheet.cell(1, 1).value format_2537 = CellFormat(backgroundColor=Color(.148, .98, .216)) tba_session = requests.Session() BASE_URL = 'https://www.thebluealliance.com/api/v3' with open('client_secret_tba.json') as json_file: data = json.load(json_file) tba_auth_key = data['tba_auth_key'] def __init__(self): self.tba_session.headers.update({'X-TBA-Auth-Key': self.tba_auth_key}) def get_teams_from_event(self, event): teams_raw = self.tba_session.get(self.BASE_URL + '/event/%s/teams/keys' % event).json() teams = [] for team_raw in teams_raw: teams.append(team_raw[3:]) return teams def fill_teams(self, sheet, event): column = [] for team in self.get_teams_from_event(event): column.append(team) for index in range(0, len(column)): sheet.update_cell(index + 1, 1, column[index]) def create_team_sheets(self): teams = self.teams_worksheet.col_values(1) for team in teams: self.sheet.add_worksheet(team, self.sample_team_worksheet.row_count, self.sample_team_worksheet.col_count) def delete_team_sheets(self): teams = self.teams_worksheet.col_values(1) for team in teams: self.sheet.del_worksheet(self.sheet.worksheet(team)) def get_sample_sheet(self): sample_sheet = [] for row in range(1, self.sample_team_worksheet.row_count + 1): sample_sheet.append(self.sample_team_worksheet.row_values(row, value_render_option='FORMULA')) return sample_sheet def copy_sheet(self, copy_from, copy_to, team_num): i, j = 1, 1 for row in copy_from: for col in row: if col == 'Team #': copy_to.update_cell(i, j, team_num) sleep(1.01) elif col != '': copy_to.update_cell(i, j, col) sleep(1.01) j += 1 i += 1 j = 1 def copy_sample_to_team_sheets(self): sample_sheet = self.get_sample_sheet() for team in self.teams_worksheet.col_values(1): self.copy_sheet(sample_sheet, self.sheet.worksheet(team), team) def get_color_schedule(self, event, color): schedule = [] event_list = self.tba_session.get(self.BASE_URL + '/event/%s/matches/simple' % event).json() for match in event_list: schedule.append(match['alliances'][color]['team_keys']) for alliance in schedule: for i in range(len(alliance)): alliance[i] = alliance[i][3:] return schedule def fill_schedule(self, event): red_schedule = self.get_color_schedule(event, 'red') blue_schedule = self.get_color_schedule(event, 'blue') num_matches = 1 for match in range(len(red_schedule)): self.schedule_worksheet.update_cell(match + 1, 1, match + 1) num_matches += 1 sleep(1.01) for i in range(num_matches): for j in range(3): self.schedule_worksheet.update_cell(i + 1, j + 2, red_schedule[i][j]) sleep(1.01) self.schedule_worksheet.update_cell(i + 1, j + 5, blue_schedule[i][j]) sleep(1.01) def get_team_metrics_from_event(self, event): return self.tba_session.get(self.BASE_URL + '/event/%s/oprs' % event).json() def fill_team_data(self, event): teams = self.get_teams_from_event(event) metrics = self.get_team_metrics_from_event(event) row = 2 team_col, opr_col, dpr_col, ccwm_col = 1, 2, 3, 4 for team in teams: self.team_data_worksheet.update_cell(row, team_col, team) sleep(1.01) self.team_data_worksheet.update_cell(row, opr_col, metrics['oprs']['frc' + team]) sleep(1.01) self.team_data_worksheet.update_cell(row, dpr_col, metrics['dprs']['frc' + team]) sleep(1.01) self.team_data_worksheet.update_cell(row, ccwm_col, metrics['ccwms']['frc' + team]) sleep(1.01) row += 1 def get_predictions_from_event(self, event): return self.tba_session.get(self.BASE_URL + '/event/%s/predictions' % event).json() def format_cells_in_schedule(self): cells_2537_raw = self.schedule_worksheet.findall('2537') cells_2537 = [] for cell in cells_2537_raw: cells_2537.append([cell.col + 64, cell.row]) for cell in cells_2537: b = bytes(str(cell[0]), 'utf8') ascii_char = b.decode('ascii') cell[0] = chr(int(ascii_char)) for i in range(len(cells_2537)): format_cell_range(self.schedule_worksheet, '%s%i:%s%i' % (cells_2537[i][0], cells_2537[i][1], cells_2537[i][0], cells_2537[i][1]), self.format_2537) def main(self): self.fill_teams(self.teams_worksheet, self.event_key) self.create_team_sheets() self.copy_sample_to_team_sheets() self.fill_schedule(self.event_key) self.fill_team_data(self.event_key) self.format_cells_in_schedule() if __name__ == '__main__': spreadsheet = Spreadsheet() spreadsheet.main()
true
true
f7032731d7e241881607e9839492fa621aefb0c9
10,129
py
Python
app/fedcv/medical_chest_xray_image_clf/data/chexpert/data_loader.py
ray-ruisun/FedML
24ff30d636bb70f64e94e9ca205375033597d3dd
[ "Apache-2.0" ]
null
null
null
app/fedcv/medical_chest_xray_image_clf/data/chexpert/data_loader.py
ray-ruisun/FedML
24ff30d636bb70f64e94e9ca205375033597d3dd
[ "Apache-2.0" ]
null
null
null
app/fedcv/medical_chest_xray_image_clf/data/chexpert/data_loader.py
ray-ruisun/FedML
24ff30d636bb70f64e94e9ca205375033597d3dd
[ "Apache-2.0" ]
null
null
null
import logging import os import numpy as np import torch from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from torch.utils.data.distributed import DistributedSampler from .dataset import CheXpert def _get_mean_and_std(dataset: Dataset): """Compute the mean and std of dataset.""" data_loader = DataLoader(dataset, batch_size=1, shuffle=False) mean = torch.zeros(3) std = torch.zeros(3) for i, (img, _) in enumerate(data_loader): if i % 1000 == 0: print(i) mean += img.mean(dim=(0, 2, 3)) std += img.std(dim=(0, 2, 3)) mean /= len(data_loader) std /= len(data_loader) return mean, std class Cutout(object): def __init__(self, length): self.length = length def __call__(self, img): h, w = img.size(1), img.size(2) mask = np.ones((h, w), np.float32) y = np.random.randint(h) x = np.random.randint(w) y1 = np.clip(y - self.length // 2, 0, h) y2 = np.clip(y + self.length // 2, 0, h) x1 = np.clip(x - self.length // 2, 0, w) x2 = np.clip(x + self.length // 2, 0, w) mask[y1:y2, x1:x2] = 0.0 mask = torch.from_numpy(mask) mask = mask.expand_as(img) img *= mask return img def _data_transforms_chexpert(): CHEXPERT_MEAN = [0.503, 0.503, 0.503] CHEXPERT_STD = [0.291, 0.291, 0.291] image_size = 256 train_transform = transforms.Compose( [ # transforms.ToPILImage(), transforms.RandomResizedCrop(image_size), # transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD), ] ) # train_transform.transforms.append(Cutout(16)) test_transform = transforms.Compose( [ transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD), ] ) return train_transform, test_transform # for centralized training def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, policy="zeros"): return get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs, policy=policy) # for local devices def get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy="zeros"): return get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy=policy) def get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs=None, policy="zeros"): dl_obj = CheXpert transform_train, transform_test = _data_transforms_chexpert() train_ds = dl_obj( datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=False, policy=policy, ) test_ds = dl_obj( datadir, dataidxs=None, train=False, transform=transform_test, download=False, policy=policy, ) train_dl = DataLoader( dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False, pin_memory=True, num_workers=4, ) test_dl = DataLoader( dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False, pin_memory=True, num_workers=4, ) return train_dl, test_dl def get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None, policy="zeros"): dl_obj = CheXpert transform_train, transform_test = _data_transforms_chexpert() train_ds = dl_obj( datadir, dataidxs=dataidxs_train, train=True, transform=transform_train, download=True, policy=policy, ) test_ds = dl_obj( datadir, dataidxs=dataidxs_test, train=False, transform=transform_test, download=True, policy=policy, ) train_dl = DataLoader( dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False, pin_memory=True, num_workers=4, ) test_dl = DataLoader( dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False, pin_memory=True, num_workers=4, ) return train_dl, test_dl def distributed_centralized_chexpert_loader(dataset, data_dir, world_size, rank, batch_size): """ Used for generating distributed dataloader for accelerating centralized training """ train_bs = batch_size test_bs = batch_size transform_train, transform_test = _data_transforms_chexpert() train_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=True, transform=transform_train) test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test) train_sam = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank) test_sam = DistributedSampler(test_dataset, num_replicas=world_size, rank=rank) train_dl = data.DataLoader( train_dataset, batch_size=train_bs, sampler=train_sam, pin_memory=True, num_workers=4, ) test_dl = data.DataLoader( test_dataset, batch_size=test_bs, sampler=test_sam, pin_memory=True, num_workers=4, ) class_num = 1000 train_data_num = len(train_dataset) test_data_num = len(test_dataset) return train_data_num, test_data_num, train_dl, test_dl, None, None, None, class_num def load_partition_data_chexpert( data_dir, partition_method="random", partition_alpha=None, client_number=100, batch_size=10, policy="zeros", ): transform_train, transform_test = _data_transforms_chexpert() train_dataset = CheXpert( data_dir=data_dir, dataidxs=None, train=True, transform=transform_train, policy=policy, ) test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test, policy=policy) # get local dataset if partition_method == "random": num_train_items = int(len(train_dataset) / client_number) num_test_items = int(len(test_dataset) / client_number) dict_client = {} all_train_idxs = list(range(len(train_dataset))) all_test_idxs = list(range(len(test_dataset))) for client_idx in range(client_number): dict_client[client_idx] = {} dict_client[client_idx]["train"] = set(np.random.choice(all_train_idxs, num_train_items, replace=False)) dict_client[client_idx]["test"] = set(np.random.choice(all_test_idxs, num_test_items, replace=False)) all_train_idxs = list(set(all_train_idxs) - dict_client[client_idx]["train"]) all_test_idxs = list(set(all_test_idxs) - dict_client[client_idx]["test"]) if len(all_train_idxs) > 0: all_client_idxs = list(range(client_number)) np.random.shuffle(all_client_idxs) choiced_client_idxs = all_client_idxs[: len(all_train_idxs)] for idx, client_idx in enumerate(choiced_client_idxs): dict_client[client_idx]["train"].add(all_train_idxs[idx]) if len(all_test_idxs) > 0: all_client_idxs = list(range(client_number)) np.random.shuffle(all_client_idxs) choiced_client_idxs = all_client_idxs[: len(all_test_idxs)] for idx, client_idx in enumerate(choiced_client_idxs): dict_client[client_idx]["test"].add(all_test_idxs[idx]) else: raise NotImplementedError # build dataloader train_dl = [] test_dl = [] for client_idx in range(client_number): train_data_idxs = list(dict_client[client_idx]["train"]) test_data_idxs = list(dict_client[client_idx]["test"]) train_dl_, test_dl_ = get_dataloader_test_chexpert( datadir=data_dir, dataidxs_train=train_data_idxs, dataidxs_test=test_data_idxs, train_bs=batch_size, test_bs=batch_size, policy=policy, ) train_dl.append(train_dl_) test_dl.append(test_dl_) logging.info(f"Client {client_idx} train data num: {len(train_dl_)} test data num: {len(test_dl_)}") logging.info("Partition data done") # logging.info("Partition data for each client: {}".format(dict_client)) train_data_num = len(train_dataset) test_data_num = len(test_dataset) train_data_global = train_dataset test_data_global = test_dataset data_local_num_dict = { client_idx: len(dict_client[client_idx]["train"]) + len(dict_client[client_idx]["test"]) for client_idx in range(client_number) } train_data_local_dict = {client_idx: train_dl_ for client_idx, train_dl_ in enumerate(train_dl)} test_data_local_dict = {client_idx: test_dl_ for client_idx, test_dl_ in enumerate(test_dl)} class_num = train_dataset.num_classes return ( train_data_num, test_data_num, train_data_global, test_data_global, data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num, ) if __name__ == "__main__": data_path = os.path.join("D:\\", "dataset", "CheXpert", "CheXpert-v1.0-small") data = CheXpert(data_dir=data_path, transform=transforms.ToTensor()) print(len(data)) print(data[0][0]) print(data[0][1]) # mean, std = _get_mean_and_std(data) # print(mean, std) # train_transform, valid_transform = _data_transforms_chexpert() # print(train_transform) # print(valid_transform) ( train_data_num, test_data_num, train_data_global, test_data_global, data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num, ) = load_partition_data_chexpert(data_dir=data_path, client_number=10, batch_size=10, policy="zeros") print(train_data_num, test_data_num, class_num)
30.601208
118
0.654852
import logging import os import numpy as np import torch from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from torch.utils.data.distributed import DistributedSampler from .dataset import CheXpert def _get_mean_and_std(dataset: Dataset): data_loader = DataLoader(dataset, batch_size=1, shuffle=False) mean = torch.zeros(3) std = torch.zeros(3) for i, (img, _) in enumerate(data_loader): if i % 1000 == 0: print(i) mean += img.mean(dim=(0, 2, 3)) std += img.std(dim=(0, 2, 3)) mean /= len(data_loader) std /= len(data_loader) return mean, std class Cutout(object): def __init__(self, length): self.length = length def __call__(self, img): h, w = img.size(1), img.size(2) mask = np.ones((h, w), np.float32) y = np.random.randint(h) x = np.random.randint(w) y1 = np.clip(y - self.length // 2, 0, h) y2 = np.clip(y + self.length // 2, 0, h) x1 = np.clip(x - self.length // 2, 0, w) x2 = np.clip(x + self.length // 2, 0, w) mask[y1:y2, x1:x2] = 0.0 mask = torch.from_numpy(mask) mask = mask.expand_as(img) img *= mask return img def _data_transforms_chexpert(): CHEXPERT_MEAN = [0.503, 0.503, 0.503] CHEXPERT_STD = [0.291, 0.291, 0.291] image_size = 256 train_transform = transforms.Compose( [ transforms.RandomResizedCrop(image_size), transforms.ToTensor(), transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD), ] ) test_transform = transforms.Compose( [ transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD), ] ) return train_transform, test_transform def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, policy="zeros"): return get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs, policy=policy) def get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy="zeros"): return get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy=policy) def get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs=None, policy="zeros"): dl_obj = CheXpert transform_train, transform_test = _data_transforms_chexpert() train_ds = dl_obj( datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=False, policy=policy, ) test_ds = dl_obj( datadir, dataidxs=None, train=False, transform=transform_test, download=False, policy=policy, ) train_dl = DataLoader( dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False, pin_memory=True, num_workers=4, ) test_dl = DataLoader( dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False, pin_memory=True, num_workers=4, ) return train_dl, test_dl def get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None, policy="zeros"): dl_obj = CheXpert transform_train, transform_test = _data_transforms_chexpert() train_ds = dl_obj( datadir, dataidxs=dataidxs_train, train=True, transform=transform_train, download=True, policy=policy, ) test_ds = dl_obj( datadir, dataidxs=dataidxs_test, train=False, transform=transform_test, download=True, policy=policy, ) train_dl = DataLoader( dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False, pin_memory=True, num_workers=4, ) test_dl = DataLoader( dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False, pin_memory=True, num_workers=4, ) return train_dl, test_dl def distributed_centralized_chexpert_loader(dataset, data_dir, world_size, rank, batch_size): train_bs = batch_size test_bs = batch_size transform_train, transform_test = _data_transforms_chexpert() train_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=True, transform=transform_train) test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test) train_sam = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank) test_sam = DistributedSampler(test_dataset, num_replicas=world_size, rank=rank) train_dl = data.DataLoader( train_dataset, batch_size=train_bs, sampler=train_sam, pin_memory=True, num_workers=4, ) test_dl = data.DataLoader( test_dataset, batch_size=test_bs, sampler=test_sam, pin_memory=True, num_workers=4, ) class_num = 1000 train_data_num = len(train_dataset) test_data_num = len(test_dataset) return train_data_num, test_data_num, train_dl, test_dl, None, None, None, class_num def load_partition_data_chexpert( data_dir, partition_method="random", partition_alpha=None, client_number=100, batch_size=10, policy="zeros", ): transform_train, transform_test = _data_transforms_chexpert() train_dataset = CheXpert( data_dir=data_dir, dataidxs=None, train=True, transform=transform_train, policy=policy, ) test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test, policy=policy) if partition_method == "random": num_train_items = int(len(train_dataset) / client_number) num_test_items = int(len(test_dataset) / client_number) dict_client = {} all_train_idxs = list(range(len(train_dataset))) all_test_idxs = list(range(len(test_dataset))) for client_idx in range(client_number): dict_client[client_idx] = {} dict_client[client_idx]["train"] = set(np.random.choice(all_train_idxs, num_train_items, replace=False)) dict_client[client_idx]["test"] = set(np.random.choice(all_test_idxs, num_test_items, replace=False)) all_train_idxs = list(set(all_train_idxs) - dict_client[client_idx]["train"]) all_test_idxs = list(set(all_test_idxs) - dict_client[client_idx]["test"]) if len(all_train_idxs) > 0: all_client_idxs = list(range(client_number)) np.random.shuffle(all_client_idxs) choiced_client_idxs = all_client_idxs[: len(all_train_idxs)] for idx, client_idx in enumerate(choiced_client_idxs): dict_client[client_idx]["train"].add(all_train_idxs[idx]) if len(all_test_idxs) > 0: all_client_idxs = list(range(client_number)) np.random.shuffle(all_client_idxs) choiced_client_idxs = all_client_idxs[: len(all_test_idxs)] for idx, client_idx in enumerate(choiced_client_idxs): dict_client[client_idx]["test"].add(all_test_idxs[idx]) else: raise NotImplementedError train_dl = [] test_dl = [] for client_idx in range(client_number): train_data_idxs = list(dict_client[client_idx]["train"]) test_data_idxs = list(dict_client[client_idx]["test"]) train_dl_, test_dl_ = get_dataloader_test_chexpert( datadir=data_dir, dataidxs_train=train_data_idxs, dataidxs_test=test_data_idxs, train_bs=batch_size, test_bs=batch_size, policy=policy, ) train_dl.append(train_dl_) test_dl.append(test_dl_) logging.info(f"Client {client_idx} train data num: {len(train_dl_)} test data num: {len(test_dl_)}") logging.info("Partition data done") train_data_num = len(train_dataset) test_data_num = len(test_dataset) train_data_global = train_dataset test_data_global = test_dataset data_local_num_dict = { client_idx: len(dict_client[client_idx]["train"]) + len(dict_client[client_idx]["test"]) for client_idx in range(client_number) } train_data_local_dict = {client_idx: train_dl_ for client_idx, train_dl_ in enumerate(train_dl)} test_data_local_dict = {client_idx: test_dl_ for client_idx, test_dl_ in enumerate(test_dl)} class_num = train_dataset.num_classes return ( train_data_num, test_data_num, train_data_global, test_data_global, data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num, ) if __name__ == "__main__": data_path = os.path.join("D:\\", "dataset", "CheXpert", "CheXpert-v1.0-small") data = CheXpert(data_dir=data_path, transform=transforms.ToTensor()) print(len(data)) print(data[0][0]) print(data[0][1]) ( train_data_num, test_data_num, train_data_global, test_data_global, data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num, ) = load_partition_data_chexpert(data_dir=data_path, client_number=10, batch_size=10, policy="zeros") print(train_data_num, test_data_num, class_num)
true
true
f70327b5c57e7952effeec7cc8716fd76dd00bd0
22,049
py
Python
paramak/reactor.py
openmcworkshop/paramak
c41dc4c2e68183869556544ee7a72deb1d16a8dc
[ "MIT" ]
1
2020-11-25T10:46:32.000Z
2020-11-25T10:46:32.000Z
paramak/reactor.py
openmcworkshop/paramak
c41dc4c2e68183869556544ee7a72deb1d16a8dc
[ "MIT" ]
null
null
null
paramak/reactor.py
openmcworkshop/paramak
c41dc4c2e68183869556544ee7a72deb1d16a8dc
[ "MIT" ]
null
null
null
import json from collections import Iterable from pathlib import Path import cadquery as cq import matplotlib.pyplot as plt import plotly.graph_objects as go from cadquery import exporters import paramak from paramak.neutronics_utils import (add_stl_to_moab_core, define_moab_core_and_tags) from paramak.utils import get_hash class Reactor: """The Reactor object allows shapes and components to be added and then collective operations to be performed on them. Combining all the shapes is required for creating images of the whole reactor and creating a Graveyard (bounding box) that is needed for neutronics simulations. Args: shapes_and_components (list): list of paramak.Shape """ def __init__(self, shapes_and_components): self.material_tags = [] self.stp_filenames = [] self.stl_filenames = [] self.tet_meshes = [] self.graveyard = None self.solid = None self.shapes_and_components = shapes_and_components self.reactor_hash_value = None self.graveyard_offset = None # set by the make_graveyard method @property def stp_filenames(self): values = [] for shape_or_component in self.shapes_and_components: values.append(shape_or_component.stp_filename) return values @stp_filenames.setter def stp_filenames(self, value): self._stp_filenames = value @property def stl_filenames(self): values = [] for shape_or_component in self.shapes_and_components: values.append(shape_or_component.stl_filename) return values @stl_filenames.setter def stl_filenames(self, value): self._stl_filenames = value @property def largest_dimension(self): """Calculates a bounding box for the Reactor and returns the largest absolute value of the largest dimension of the bounding box""" largest_dimension = 0 for component in self.shapes_and_components: largest_dimension = max( largest_dimension, component.largest_dimension) self._largest_dimension = largest_dimension return largest_dimension @largest_dimension.setter def largest_dimension(self, value): self._largest_dimension = value @property def material_tags(self): """Returns a set of all the materials_tags used in the Reactor (excluding the plasma)""" values = [] for shape_or_component in self.shapes_and_components: if isinstance( shape_or_component, (paramak.Plasma, paramak.PlasmaFromPoints, paramak.PlasmaBoundaries)) is False: values.append(shape_or_component.material_tag) return values @material_tags.setter def material_tags(self, value): self._material_tags = value @property def tet_meshes(self): values = [] for shape_or_componet in self.shapes_and_components: values.append(shape_or_componet.tet_mesh) return values @tet_meshes.setter def tet_meshes(self, value): self._tet_meshes = value @property def shapes_and_components(self): """Adds a list of parametric shape(s) and or parametric component(s) to the Reactor object. This allows collective operations to be performed on all the shapes in the reactor. When adding a shape or component the stp_filename of the shape or component should be unique""" if hasattr(self, "create_solids"): ignored_keys = ["reactor_hash_value"] if get_hash(self, ignored_keys) != self.reactor_hash_value: self.create_solids() self.reactor_hash_value = get_hash(self, ignored_keys) return self._shapes_and_components @shapes_and_components.setter def shapes_and_components(self, value): if not isinstance(value, Iterable): raise ValueError("shapes_and_components must be a list") self._shapes_and_components = value @property def graveyard_offset(self): return self._graveyard_offset @graveyard_offset.setter def graveyard_offset(self, value): if value is None: self._graveyard_offset = None elif not isinstance(value, (float, int)): raise ValueError("graveyard_offset must be a number") elif value < 0: raise ValueError("graveyard_offset must be positive") self._graveyard_offset = value @property def solid(self): """This combines all the parametric shapes and compents in the reactor object and rotates the viewing angle so that .solid operations in jupyter notebook. """ list_of_cq_vals = [] for shape_or_compound in self.shapes_and_components: if isinstance( shape_or_compound.solid, cq.occ_impl.shapes.Compound): for solid in shape_or_compound.solid.Solids(): list_of_cq_vals.append(solid) else: list_of_cq_vals.append(shape_or_compound.solid.val()) compound = cq.Compound.makeCompound(list_of_cq_vals) compound = compound.rotate( startVector=(0, 1, 0), endVector=(0, 0, 1), angleDegrees=180 ) return compound @solid.setter def solid(self, value): self._solid = value def neutronics_description(self, include_plasma=False, include_graveyard=True ): """A description of the reactor containing material tags, stp filenames, and tet mesh instructions. This is used for neutronics simulations which require linkage between volumes, materials and identification of which volumes to tet mesh. The plasma geometry is not included by default as it is typically not included in neutronics simulations. The reason for this is that the low number density results in minimal interaction with neutrons. However, it can be added if the include_plasma argument is set to True. Returns: dictionary: a dictionary of materials and filenames for the reactor """ neutronics_description = [] for entry in self.shapes_and_components: if include_plasma is False and isinstance( entry, (paramak.Plasma, paramak.PlasmaFromPoints, paramak.PlasmaBoundaries)) is True: continue if entry.stp_filename is None: raise ValueError( "Set Shape.stp_filename for all the \ Reactor entries before using this method" ) if entry.material_tag is None: raise ValueError( "set Shape.material_tag for all the \ Reactor entries before using this method" ) neutronics_description.append(entry.neutronics_description()) # This add the neutronics description for the graveyard which is unique # as it is automatically calculated instead of being added by the user. # Also the graveyard must have 'Graveyard' as the material name if include_graveyard is True: self.make_graveyard() neutronics_description.append( self.graveyard.neutronics_description()) return neutronics_description def export_neutronics_description( self, filename="manifest.json", include_plasma=False, include_graveyard=True): """ Saves Reactor.neutronics_description to a json file. The resulting json file contains a list of dictionaries. Each dictionary entry comprises of a material and a filename and optionally a tet_mesh instruction. The json file can then be used with the neutronics workflows to create a neutronics model. Creating of the neutronics model requires linkage between volumes, materials and identification of which volumes to tet_mesh. If the filename does not end with .json then .json will be added. The plasma geometry is not included by default as it is typically not included in neutronics simulations. The reason for this is that the low number density results in minimal interactions with neutrons. However, the plasma can be added if the include_plasma argument is set to True. Args: filename (str, optional): the filename used to save the neutronics description include_plasma (Boolean, optional): should the plasma be included. Defaults to False as the plasma volume and material has very little impact on the neutronics results due to the low density. Including the plasma does however slow down the simulation. include_graveyard (Boolean, optional): should the graveyard be included. Defaults to True as this is needed for DAGMC models. """ path_filename = Path(filename) if path_filename.suffix != ".json": path_filename = path_filename.with_suffix(".json") path_filename.parents[0].mkdir(parents=True, exist_ok=True) with open(path_filename, "w") as outfile: json.dump( self.neutronics_description( include_plasma=include_plasma, include_graveyard=include_graveyard, ), outfile, indent=4, ) print("saved geometry description to ", path_filename) return str(path_filename) def export_stp(self, output_folder="", graveyard_offset=100, mode='solid'): """Writes stp files (CAD geometry) for each Shape object in the reactor and the graveyard. Args: output_folder (str): the folder for saving the stp files to graveyard_offset (float, optional): the offset between the largest edge of the geometry and inner bounding shell created. Defaults to 100. mode (str, optional): the object to export can be either 'solid' which exports 3D solid shapes or the 'wire' which exports the wire edges of the shape. Defaults to 'solid'. Returns: list: a list of stp filenames created """ if len(self.stp_filenames) != len(set(self.stp_filenames)): raise ValueError( "Set Reactor already contains a shape or component \ with this stp_filename", self.stp_filenames, ) filenames = [] for entry in self.shapes_and_components: if entry.stp_filename is None: raise ValueError( "set .stp_filename property for \ Shapes before using the export_stp method" ) filenames.append( str(Path(output_folder) / Path(entry.stp_filename))) entry.export_stp( filename=Path(output_folder) / Path(entry.stp_filename), mode=mode ) # creates a graveyard (bounding shell volume) which is needed for # nuetronics simulations self.make_graveyard(graveyard_offset=graveyard_offset) filenames.append( str(Path(output_folder) / Path(self.graveyard.stp_filename))) self.graveyard.export_stp( Path(output_folder) / Path(self.graveyard.stp_filename) ) return filenames def export_stl(self, output_folder="", tolerance=0.001): """Writes stl files (CAD geometry) for each Shape object in the reactor :param output_folder: the folder for saving the stp files to :type output_folder: str :param tolerance: the precision of the faceting :type tolerance: float :return: a list of stl filenames created :rtype: list """ if len(self.stl_filenames) != len(set(self.stl_filenames)): raise ValueError( "Set Reactor already contains a shape or component \ with this stl_filename", self.stl_filenames, ) filenames = [] for entry in self.shapes_and_components: print("entry.stl_filename", entry.stl_filename) if entry.stl_filename is None: raise ValueError( "set .stl_filename property for \ Shapes before using the export_stl method" ) filenames.append( str(Path(output_folder) / Path(entry.stl_filename))) entry.export_stl( Path(output_folder) / Path( entry.stl_filename), tolerance) # creates a graveyard (bounding shell volume) which is needed for # nuetronics simulations self.make_graveyard() filenames.append( str(Path(output_folder) / Path(self.graveyard.stl_filename))) self.graveyard.export_stl( Path(output_folder) / Path(self.graveyard.stl_filename) ) print("exported stl files ", filenames) return filenames def export_h5m( self, filename='dagmc.h5m', skip_graveyard=False, tolerance=0.001, graveyard_offset=100): """Converts stl files into DAGMC compatible h5m file using PyMOAB. The DAGMC file produced has not been imprinted and merged unlike the other supported method which uses Trelis to produce an imprinted and merged DAGMC geometry. If the provided filename doesn't end with .h5m it will be added Args: filename (str, optional): filename of h5m outputfile Defaults to "dagmc.h5m". skip_graveyard (boolean, optional): filename of h5m outputfile Defaults to False. tolerance (float, optional): the precision of the faceting Defaults to 0.001. graveyard_offset (float, optional): the offset between the largest edge of the geometry and inner bounding shell created. Defaults to 100. Returns: filename: output h5m filename """ path_filename = Path(filename) if path_filename.suffix != ".h5m": path_filename = path_filename.with_suffix(".h5m") path_filename.parents[0].mkdir(parents=True, exist_ok=True) moab_core, moab_tags = define_moab_core_and_tags() surface_id = 1 volume_id = 1 for item in self.shapes_and_components: item.export_stl(item.stl_filename, tolerance=tolerance) moab_core = add_stl_to_moab_core( moab_core, surface_id, volume_id, item.material_tag, moab_tags, item.stl_filename) volume_id += 1 surface_id += 1 if skip_graveyard is False: self.make_graveyard(graveyard_offset=graveyard_offset) self.graveyard.export_stl(self.graveyard.stl_filename) volume_id = 2 surface_id = 2 moab_core = add_stl_to_moab_core( moab_core, surface_id, volume_id, self.graveyard.material_tag, moab_tags, self.graveyard.stl_filename ) all_sets = moab_core.get_entities_by_handle(0) file_set = moab_core.create_meshset() moab_core.add_entities(file_set, all_sets) moab_core.write_file(str(path_filename)) return filename def export_physical_groups(self, output_folder=""): """Exports several JSON files containing a look up table which is useful for identifying faces and volumes. The output file names are generated from .stp_filename properties. Args: output_folder (str, optional): directory of outputfiles. Defaults to "". Raises: ValueError: if one .stp_filename property is set to None Returns: list: list of output file names """ filenames = [] for entry in self.shapes_and_components: if entry.stp_filename is None: raise ValueError( "set .stp_filename property for \ Shapes before using the export_stp method" ) filenames.append( str(Path(output_folder) / Path(entry.stp_filename))) entry.export_physical_groups( Path(output_folder) / Path(entry.stp_filename)) return filenames def export_svg(self, filename): """Exports an svg file for the Reactor.solid. If the filename provided doesn't end with .svg it will be added. Args: filename (str): the filename of the svg file to be exported """ path_filename = Path(filename) if path_filename.suffix != ".svg": path_filename = path_filename.with_suffix(".svg") path_filename.parents[0].mkdir(parents=True, exist_ok=True) with open(path_filename, "w") as out_file: exporters.exportShape(self.solid, "SVG", out_file) print("Saved file as ", path_filename) def export_graveyard( self, graveyard_offset=100, filename="Graveyard.stp"): """Writes an stp file (CAD geometry) for the reactor graveyard. This is needed for DAGMC simulations. This method also calls Reactor.make_graveyard with the offset. Args: filename (str): the filename for saving the stp file graveyard_offset (float): the offset between the largest edge of the geometry and inner bounding shell created. Defaults to Reactor.graveyard_offset Returns: str: the stp filename created """ self.make_graveyard(graveyard_offset=graveyard_offset) self.graveyard.export_stp(Path(filename)) return filename def make_graveyard(self, graveyard_offset=100): """Creates a graveyard volume (bounding box) that encapsulates all volumes. This is required by DAGMC when performing neutronics simulations. Args: graveyard_offset (float): the offset between the largest edge of the geometry and inner bounding shell created. Defaults to Reactor.graveyard_offset Returns: CadQuery solid: a shell volume that bounds the geometry, referred to as a graveyard in DAGMC """ self.graveyard_offset = graveyard_offset for component in self.shapes_and_components: if component.solid is None: component.create_solid() graveyard_shape = paramak.HollowCube( length=self.largest_dimension * 2 + graveyard_offset * 2, name="Graveyard", material_tag="Graveyard", stp_filename="Graveyard.stp", stl_filename="Graveyard.stl", ) self.graveyard = graveyard_shape return graveyard_shape def export_2d_image( self, filename="2d_slice.png", xmin=0.0, xmax=900.0, ymin=-600.0, ymax=600.0): """Creates a 2D slice image (png) of the reactor. Args: filename (str): output filename of the image created Returns: str: png filename created """ path_filename = Path(filename) if path_filename.suffix != ".png": path_filename = path_filename.with_suffix(".png") path_filename.parents[0].mkdir(parents=True, exist_ok=True) fig, ax = plt.subplots() # creates indvidual patches for each Shape which are combined together for entry in self.shapes_and_components: patch = entry._create_patch() ax.add_collection(patch) ax.axis("equal") ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax)) ax.set_aspect("equal", "box") Path(filename).parent.mkdir(parents=True, exist_ok=True) plt.savefig(filename, dpi=100) plt.close() print("\n saved 2d image to ", str(path_filename)) return str(path_filename) def export_html(self, filename="reactor.html"): """Creates a html graph representation of the points for the Shape objects that make up the reactor. Note, If filename provided doesn't end with .html then it will be appended. Args: filename (str): the filename to save the html graph Returns: plotly figure: figure object """ path_filename = Path(filename) if path_filename.suffix != ".html": path_filename = path_filename.with_suffix(".html") path_filename.parents[0].mkdir(parents=True, exist_ok=True) fig = go.Figure() fig.update_layout( {"title": "coordinates of components", "hovermode": "closest"} ) # accesses the Shape traces for each Shape and adds them to the figure for entry in self.shapes_and_components: fig.add_trace(entry._trace()) fig.write_html(str(path_filename)) print("Exported html graph to ", str(path_filename)) return fig
35.448553
80
0.612318
import json from collections import Iterable from pathlib import Path import cadquery as cq import matplotlib.pyplot as plt import plotly.graph_objects as go from cadquery import exporters import paramak from paramak.neutronics_utils import (add_stl_to_moab_core, define_moab_core_and_tags) from paramak.utils import get_hash class Reactor: def __init__(self, shapes_and_components): self.material_tags = [] self.stp_filenames = [] self.stl_filenames = [] self.tet_meshes = [] self.graveyard = None self.solid = None self.shapes_and_components = shapes_and_components self.reactor_hash_value = None self.graveyard_offset = None @property def stp_filenames(self): values = [] for shape_or_component in self.shapes_and_components: values.append(shape_or_component.stp_filename) return values @stp_filenames.setter def stp_filenames(self, value): self._stp_filenames = value @property def stl_filenames(self): values = [] for shape_or_component in self.shapes_and_components: values.append(shape_or_component.stl_filename) return values @stl_filenames.setter def stl_filenames(self, value): self._stl_filenames = value @property def largest_dimension(self): largest_dimension = 0 for component in self.shapes_and_components: largest_dimension = max( largest_dimension, component.largest_dimension) self._largest_dimension = largest_dimension return largest_dimension @largest_dimension.setter def largest_dimension(self, value): self._largest_dimension = value @property def material_tags(self): values = [] for shape_or_component in self.shapes_and_components: if isinstance( shape_or_component, (paramak.Plasma, paramak.PlasmaFromPoints, paramak.PlasmaBoundaries)) is False: values.append(shape_or_component.material_tag) return values @material_tags.setter def material_tags(self, value): self._material_tags = value @property def tet_meshes(self): values = [] for shape_or_componet in self.shapes_and_components: values.append(shape_or_componet.tet_mesh) return values @tet_meshes.setter def tet_meshes(self, value): self._tet_meshes = value @property def shapes_and_components(self): if hasattr(self, "create_solids"): ignored_keys = ["reactor_hash_value"] if get_hash(self, ignored_keys) != self.reactor_hash_value: self.create_solids() self.reactor_hash_value = get_hash(self, ignored_keys) return self._shapes_and_components @shapes_and_components.setter def shapes_and_components(self, value): if not isinstance(value, Iterable): raise ValueError("shapes_and_components must be a list") self._shapes_and_components = value @property def graveyard_offset(self): return self._graveyard_offset @graveyard_offset.setter def graveyard_offset(self, value): if value is None: self._graveyard_offset = None elif not isinstance(value, (float, int)): raise ValueError("graveyard_offset must be a number") elif value < 0: raise ValueError("graveyard_offset must be positive") self._graveyard_offset = value @property def solid(self): list_of_cq_vals = [] for shape_or_compound in self.shapes_and_components: if isinstance( shape_or_compound.solid, cq.occ_impl.shapes.Compound): for solid in shape_or_compound.solid.Solids(): list_of_cq_vals.append(solid) else: list_of_cq_vals.append(shape_or_compound.solid.val()) compound = cq.Compound.makeCompound(list_of_cq_vals) compound = compound.rotate( startVector=(0, 1, 0), endVector=(0, 0, 1), angleDegrees=180 ) return compound @solid.setter def solid(self, value): self._solid = value def neutronics_description(self, include_plasma=False, include_graveyard=True ): neutronics_description = [] for entry in self.shapes_and_components: if include_plasma is False and isinstance( entry, (paramak.Plasma, paramak.PlasmaFromPoints, paramak.PlasmaBoundaries)) is True: continue if entry.stp_filename is None: raise ValueError( "Set Shape.stp_filename for all the \ Reactor entries before using this method" ) if entry.material_tag is None: raise ValueError( "set Shape.material_tag for all the \ Reactor entries before using this method" ) neutronics_description.append(entry.neutronics_description()) if include_graveyard is True: self.make_graveyard() neutronics_description.append( self.graveyard.neutronics_description()) return neutronics_description def export_neutronics_description( self, filename="manifest.json", include_plasma=False, include_graveyard=True): path_filename = Path(filename) if path_filename.suffix != ".json": path_filename = path_filename.with_suffix(".json") path_filename.parents[0].mkdir(parents=True, exist_ok=True) with open(path_filename, "w") as outfile: json.dump( self.neutronics_description( include_plasma=include_plasma, include_graveyard=include_graveyard, ), outfile, indent=4, ) print("saved geometry description to ", path_filename) return str(path_filename) def export_stp(self, output_folder="", graveyard_offset=100, mode='solid'): if len(self.stp_filenames) != len(set(self.stp_filenames)): raise ValueError( "Set Reactor already contains a shape or component \ with this stp_filename", self.stp_filenames, ) filenames = [] for entry in self.shapes_and_components: if entry.stp_filename is None: raise ValueError( "set .stp_filename property for \ Shapes before using the export_stp method" ) filenames.append( str(Path(output_folder) / Path(entry.stp_filename))) entry.export_stp( filename=Path(output_folder) / Path(entry.stp_filename), mode=mode ) self.make_graveyard(graveyard_offset=graveyard_offset) filenames.append( str(Path(output_folder) / Path(self.graveyard.stp_filename))) self.graveyard.export_stp( Path(output_folder) / Path(self.graveyard.stp_filename) ) return filenames def export_stl(self, output_folder="", tolerance=0.001): if len(self.stl_filenames) != len(set(self.stl_filenames)): raise ValueError( "Set Reactor already contains a shape or component \ with this stl_filename", self.stl_filenames, ) filenames = [] for entry in self.shapes_and_components: print("entry.stl_filename", entry.stl_filename) if entry.stl_filename is None: raise ValueError( "set .stl_filename property for \ Shapes before using the export_stl method" ) filenames.append( str(Path(output_folder) / Path(entry.stl_filename))) entry.export_stl( Path(output_folder) / Path( entry.stl_filename), tolerance) self.make_graveyard() filenames.append( str(Path(output_folder) / Path(self.graveyard.stl_filename))) self.graveyard.export_stl( Path(output_folder) / Path(self.graveyard.stl_filename) ) print("exported stl files ", filenames) return filenames def export_h5m( self, filename='dagmc.h5m', skip_graveyard=False, tolerance=0.001, graveyard_offset=100): path_filename = Path(filename) if path_filename.suffix != ".h5m": path_filename = path_filename.with_suffix(".h5m") path_filename.parents[0].mkdir(parents=True, exist_ok=True) moab_core, moab_tags = define_moab_core_and_tags() surface_id = 1 volume_id = 1 for item in self.shapes_and_components: item.export_stl(item.stl_filename, tolerance=tolerance) moab_core = add_stl_to_moab_core( moab_core, surface_id, volume_id, item.material_tag, moab_tags, item.stl_filename) volume_id += 1 surface_id += 1 if skip_graveyard is False: self.make_graveyard(graveyard_offset=graveyard_offset) self.graveyard.export_stl(self.graveyard.stl_filename) volume_id = 2 surface_id = 2 moab_core = add_stl_to_moab_core( moab_core, surface_id, volume_id, self.graveyard.material_tag, moab_tags, self.graveyard.stl_filename ) all_sets = moab_core.get_entities_by_handle(0) file_set = moab_core.create_meshset() moab_core.add_entities(file_set, all_sets) moab_core.write_file(str(path_filename)) return filename def export_physical_groups(self, output_folder=""): filenames = [] for entry in self.shapes_and_components: if entry.stp_filename is None: raise ValueError( "set .stp_filename property for \ Shapes before using the export_stp method" ) filenames.append( str(Path(output_folder) / Path(entry.stp_filename))) entry.export_physical_groups( Path(output_folder) / Path(entry.stp_filename)) return filenames def export_svg(self, filename): path_filename = Path(filename) if path_filename.suffix != ".svg": path_filename = path_filename.with_suffix(".svg") path_filename.parents[0].mkdir(parents=True, exist_ok=True) with open(path_filename, "w") as out_file: exporters.exportShape(self.solid, "SVG", out_file) print("Saved file as ", path_filename) def export_graveyard( self, graveyard_offset=100, filename="Graveyard.stp"): self.make_graveyard(graveyard_offset=graveyard_offset) self.graveyard.export_stp(Path(filename)) return filename def make_graveyard(self, graveyard_offset=100): self.graveyard_offset = graveyard_offset for component in self.shapes_and_components: if component.solid is None: component.create_solid() graveyard_shape = paramak.HollowCube( length=self.largest_dimension * 2 + graveyard_offset * 2, name="Graveyard", material_tag="Graveyard", stp_filename="Graveyard.stp", stl_filename="Graveyard.stl", ) self.graveyard = graveyard_shape return graveyard_shape def export_2d_image( self, filename="2d_slice.png", xmin=0.0, xmax=900.0, ymin=-600.0, ymax=600.0): path_filename = Path(filename) if path_filename.suffix != ".png": path_filename = path_filename.with_suffix(".png") path_filename.parents[0].mkdir(parents=True, exist_ok=True) fig, ax = plt.subplots() for entry in self.shapes_and_components: patch = entry._create_patch() ax.add_collection(patch) ax.axis("equal") ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax)) ax.set_aspect("equal", "box") Path(filename).parent.mkdir(parents=True, exist_ok=True) plt.savefig(filename, dpi=100) plt.close() print("\n saved 2d image to ", str(path_filename)) return str(path_filename) def export_html(self, filename="reactor.html"): path_filename = Path(filename) if path_filename.suffix != ".html": path_filename = path_filename.with_suffix(".html") path_filename.parents[0].mkdir(parents=True, exist_ok=True) fig = go.Figure() fig.update_layout( {"title": "coordinates of components", "hovermode": "closest"} ) for entry in self.shapes_and_components: fig.add_trace(entry._trace()) fig.write_html(str(path_filename)) print("Exported html graph to ", str(path_filename)) return fig
true
true
f70327c1e525eeac2a08ebfff0787c591e507497
2,327
py
Python
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_service.py
zyclove/ambari
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
[ "Apache-2.0" ]
null
null
null
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_service.py
zyclove/ambari
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
[ "Apache-2.0" ]
null
null
null
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_service.py
zyclove/ambari
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python2 """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management import * def hive_service( name, action='start'): import params if name == 'metastore': pid_file = format("{hive_pid_dir}/{hive_metastore_pid}") cmd = format( "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.err {pid_file} {hive_server_conf_dir}") elif name == 'hiveserver2': pid_file = format("{hive_pid_dir}/{hive_pid}") cmd = format( "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.err {pid_file} {hive_server_conf_dir}") if action == 'start': demon_cmd = format("{cmd}") no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1") Execute(demon_cmd, user=params.hive_user, not_if=no_op_test ) if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver": db_connection_check_command = format( "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}") Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin') elif action == 'stop': demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}") Execute(demon_cmd)
40.824561
247
0.728406
from resource_management import * def hive_service( name, action='start'): import params if name == 'metastore': pid_file = format("{hive_pid_dir}/{hive_metastore_pid}") cmd = format( "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.err {pid_file} {hive_server_conf_dir}") elif name == 'hiveserver2': pid_file = format("{hive_pid_dir}/{hive_pid}") cmd = format( "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.err {pid_file} {hive_server_conf_dir}") if action == 'start': demon_cmd = format("{cmd}") no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1") Execute(demon_cmd, user=params.hive_user, not_if=no_op_test ) if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver": db_connection_check_command = format( "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}") Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin') elif action == 'stop': demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}") Execute(demon_cmd)
true
true
f70327e26d7f220656e09d04aed22ba77da630b5
23,857
py
Python
pwnlib/log.py
Magic-King/pwntools
4e7355202df6d83edc2d388b9a936c3c38ea1e8a
[ "MIT" ]
10
2021-02-04T15:09:22.000Z
2021-03-02T19:50:58.000Z
pwnlib/log.py
Magic-King/pwntools
4e7355202df6d83edc2d388b9a936c3c38ea1e8a
[ "MIT" ]
null
null
null
pwnlib/log.py
Magic-King/pwntools
4e7355202df6d83edc2d388b9a936c3c38ea1e8a
[ "MIT" ]
2
2015-05-28T09:13:23.000Z
2016-08-31T06:43:30.000Z
""" Logging module for printing status during an exploit, and internally within ``pwntools``. Exploit Developers ------------------ By using the standard ``from pwn import *``, an object named ``log`` will be inserted into the global namespace. You can use this to print out status messages during exploitation. For example,:: log.info('Hello, world!') prints:: [*] Hello, world! Additionally, there are some nifty mechanisms for performing status updates on a running job (e.g. when brute-forcing).:: p = log.progress('Working') p.status('Reticulating splines') time.sleep(1) p.success('Got a shell!') The verbosity of logging can be most easily controlled by setting ``log_level`` on the global ``context`` object.:: log.info("No you see me") context.log_level = 'error' log.info("Now you don't") The purpose of this attribute is to control what gets printed to the screen, not what gets emitted. This means that you can put all logging events into a log file, while only wanting to see a small subset of them on your screen. Pwnlib Developers ----------------- A module-specific logger can be imported into the module via:: from pwnlib.log import getLogger log = getLogger(__name__) This provides an easy way to filter logging programmatically or via a configuration file for debugging. When using ``progress``, you should use the ``with`` keyword to manage scoping, to ensure the spinner stops if an exception is thrown. Technical details ----------------- Familiarity with the :mod:`logging` module is assumed. A pwnlib root logger named 'pwnlib' is created and a custom handler and formatter is installed for it. The handler determines its logging level from :data:`context.log_level`. Ideally :data:`context.log_level` should only affect which records will be emitted by the handler such that e.g. logging to a file will not be changed by it. But for performance reasons it is not feasible log everything in the normal case. In particular there are tight loops inside :mod:`pwnlib.tubes.tube`, which we would like to be able to debug, but if we are not debugging them, they should not spit out messages (even to a log file). For this reason there are a few places inside pwnlib, that will not even emit a record without :data:`context.log_level` being set to `logging.DEBUG` or below. Log records created by ``Progress`` and ``Logger`` objects will set ``'pwnlib_msgtype'`` on the ``extra`` field to signal which kind of message was generated. This information is used by the formatter to prepend a symbol to the message, e.g. ``'[+] '`` in ``'[+] got a shell!'`` This field is ignored when using the ``logging`` module's standard formatters. All status updates (which are not dropped due to throttling) on progress loggers result in a log record being created. The ``extra`` field then carries a reference to the ``Progress`` logger as ``'pwnlib_progress'``. If the custom handler determines that :data:`term.term_mode` is enabled, log records that have a ``'pwnlib_progess'`` in their ``extra`` field will not result in a message being emitted but rather an animated progress line (with a spinner!) being created. Note that other handlers will still see a meaningful log record. The custom handler will only handle log records whith a level of at least :data:`context.log_level`. Thus if e.g. the level for the ``'pwnlib.tubes.ssh'`` is set to ``'DEBUG'`` no additional output will show up unless :data:`context.log_level` is also set to ``'DEBUG'``. Other handlers will however see the extra log records generated by the ``'pwnlib.tubes.ssh'`` logger. """ from __future__ import absolute_import from __future__ import division import logging import os import random import re import six import sys import threading import time from pwnlib import term from pwnlib.config import register_config from pwnlib.context import Thread from pwnlib.context import context from pwnlib.exception import PwnlibException from pwnlib.term import spinners from pwnlib.term import text __all__ = [ 'getLogger', 'install_default_handler', 'rootlogger' ] # list of prefixes to use for the different message types. note that the `text` # module won't add any escape codes if `pwnlib.context.log_console.isatty()` is `False` _msgtype_prefixes = { 'status' : [text.magenta, 'x'], 'success' : [text.bold_green, '+'], 'failure' : [text.bold_red, '-'], 'debug' : [text.bold_red, 'DEBUG'], 'info' : [text.bold_blue, '*'], 'warning' : [text.bold_yellow, '!'], 'error' : [text.on_red, 'ERROR'], 'exception' : [text.on_red, 'ERROR'], 'critical' : [text.on_red, 'CRITICAL'], 'info_once' : [text.bold_blue, '*'], 'warning_once' : [text.bold_yellow, '!'], } def read_log_config(settings): log = getLogger(__name__) for key, value in settings.items(): if '.' not in key: log.warn("Invalid configuration option %r in section %r" % (key, 'log')) continue msgtype, key = key.split('.', 1) if key == 'color': current = _msgtype_prefixes[msgtype][0] _msgtype_prefixes[msgtype][0] = getattr(text, value, current) elif key == 'symbol': _msgtype_prefixes[msgtype][1] = value else: log.warn("Unknown configuration option %r in section %r" % (key, 'log')) register_config('log', read_log_config) # the text decoration to use for spinners. the spinners themselves can be found # in the `pwnlib.term.spinners` module _spinner_style = text.bold_blue class Progress(object): """ Progress logger used to generate log records associated with some running job. Instances can be used as context managers which will automatically declare the running job a success upon exit or a failure upon a thrown exception. After :meth:`success` or :meth:`failure` is called the status can no longer be updated. This class is intended for internal use. Progress loggers should be created using :meth:`Logger.progress`. """ def __init__(self, logger, msg, status, level, args, kwargs): self._logger = logger self._msg = msg self._status = status self._level = level self._stopped = False self.last_status = 0 self.rate = kwargs.pop('rate', 0) self._log(status, args, kwargs, 'status') # it is a common use case to create a logger and then immediately update # its status line, so we reset `last_status` to accommodate this pattern self.last_status = 0 def _log(self, status, args, kwargs, msgtype): # Logs are strings, not bytes. Handle Python3 bytes() objects. status = six.ensure_text(status) # this progress logger is stopped, so don't generate any more records if self._stopped: return msg = self._msg if msg and status: msg += ': ' msg += status self._logger._log(self._level, msg, args, kwargs, msgtype, self) def status(self, status, *args, **kwargs): """status(status, *args, **kwargs) Logs a status update for the running job. If the progress logger is animated the status line will be updated in place. Status updates are throttled at one update per 100ms. """ now = time.time() if (now - self.last_status) > self.rate: self.last_status = now self._log(status, args, kwargs, 'status') def success(self, status = 'Done', *args, **kwargs): """success(status = 'Done', *args, **kwargs) Logs that the running job succeeded. No further status updates are allowed. If the Logger is animated, the animation is stopped. """ self._log(status, args, kwargs, 'success') self._stopped = True def failure(self, status = 'Failed', *args, **kwargs): """failure(message) Logs that the running job failed. No further status updates are allowed. If the Logger is animated, the animation is stopped. """ self._log(status, args, kwargs, 'failure') self._stopped = True def __enter__(self): return self def __exit__(self, exc_typ, exc_val, exc_tb): # if the progress logger is already stopped these are no-ops if exc_typ is None: self.success() else: self.failure() class Logger(object): """ A class akin to the :class:`logging.LoggerAdapter` class. All public methods defined on :class:`logging.Logger` instances are defined on this class. Also adds some ``pwnlib`` flavor: * :meth:`progress` (alias :meth:`waitfor`) * :meth:`success` * :meth:`failure` * :meth:`indented` * :meth:`info_once` * :meth:`warning_once` (alias :meth:`warn_once`) Adds ``pwnlib``-specific information for coloring, indentation and progress logging via log records ``extra`` field. Loggers instantiated with :func:`getLogger` will be of this class. """ _one_time_infos = set() _one_time_warnings = set() def __init__(self, logger=None): if logger is None: # This is a minor hack to permit user-defined classes which inherit # from a tube (which do not actually reside in the pwnlib library) # to receive logging abilities that behave as they would expect from # the rest of the library module = self.__module__ if not module.startswith('pwnlib'): module = 'pwnlib.' + module # - end hack - logger_name = '%s.%s.%s' % (module, self.__class__.__name__, id(self)) logger = logging.getLogger(logger_name) self._logger = logger def _getlevel(self, levelString): if isinstance(levelString, six.integer_types): return levelString return logging._levelNames[levelString.upper()] def _log(self, level, msg, args, kwargs, msgtype, progress = None): # Logs are strings, not bytes. Handle Python3 bytes() objects. msg = six.ensure_text(msg) extra = kwargs.get('extra', {}) extra.setdefault('pwnlib_msgtype', msgtype) extra.setdefault('pwnlib_progress', progress) kwargs['extra'] = extra self._logger.log(level, msg, *args, **kwargs) def progress(self, message, status = '', *args, **kwargs): """progress(message, status = '', *args, level = logging.INFO, **kwargs) -> Progress Creates a new progress logger which creates log records with log level `level`. Progress status can be updated using :meth:`Progress.status` and stopped using :meth:`Progress.success` or :meth:`Progress.failure`. If `term.term_mode` is enabled the progress logger will be animated. The progress manager also functions as a context manager. Using context managers ensures that animations stop even if an exception is raised. .. code-block:: python with log.progress('Trying something...') as p: for i in range(10): p.status("At %i" % i) time.sleep(0.5) x = 1/0 """ level = self._getlevel(kwargs.pop('level', logging.INFO)) return Progress(self, message, status, level, args, kwargs) def waitfor(self, *args, **kwargs): """Alias for :meth:`progress`.""" return self.progress(*args, **kwargs) def indented(self, message, *args, **kwargs): """indented(message, *args, level = logging.INFO, **kwargs) Log a message but don't put a line prefix on it. Arguments: level(int): Alternate log level at which to set the indented message. Defaults to :const:`logging.INFO`. """ level = self._getlevel(kwargs.pop('level', logging.INFO)) self._log(level, message, args, kwargs, 'indented') def success(self, message, *args, **kwargs): """success(message, *args, **kwargs) Logs a success message. """ self._log(logging.INFO, message, args, kwargs, 'success') def failure(self, message, *args, **kwargs): """failure(message, *args, **kwargs) Logs a failure message. """ self._log(logging.INFO, message, args, kwargs, 'failure') def info_once(self, message, *args, **kwargs): """info_once(message, *args, **kwargs) Logs an info message. The same message is never printed again. """ m = message % args if m not in self._one_time_infos: if self.isEnabledFor(logging.INFO): self._one_time_infos.add(m) self._log(logging.INFO, message, args, kwargs, 'info_once') def warning_once(self, message, *args, **kwargs): """warning_once(message, *args, **kwargs) Logs a warning message. The same message is never printed again. """ m = message % args if m not in self._one_time_warnings: if self.isEnabledFor(logging.WARNING): self._one_time_warnings.add(m) self._log(logging.WARNING, message, args, kwargs, 'warning_once') def warn_once(self, *args, **kwargs): """Alias for :meth:`warning_once`.""" return self.warning_once(*args, **kwargs) # logging functions also exposed by `logging.Logger` def debug(self, message, *args, **kwargs): """debug(message, *args, **kwargs) Logs a debug message. """ self._log(logging.DEBUG, message, args, kwargs, 'debug') def info(self, message, *args, **kwargs): """info(message, *args, **kwargs) Logs an info message. """ self._log(logging.INFO, message, args, kwargs, 'info') def hexdump(self, message, *args, **kwargs): # cyclic dependencies FTW! # TODO: Move pwnlib.util.fiddling.hexdump into a new module. import pwnlib.util.fiddling self.info(pwnlib.util.fiddling.hexdump(message, *args, **kwargs)) def warning(self, message, *args, **kwargs): """warning(message, *args, **kwargs) Logs a warning message. """ self._log(logging.WARNING, message, args, kwargs, 'warning') def warn(self, *args, **kwargs): """Alias for :meth:`warning`.""" return self.warning(*args, **kwargs) def error(self, message, *args, **kwargs): """error(message, *args, **kwargs) To be called outside an exception handler. Logs an error message, then raises a ``PwnlibException``. """ self._log(logging.ERROR, message, args, kwargs, 'error') raise PwnlibException(message % args) def exception(self, message, *args, **kwargs): """exception(message, *args, **kwargs) To be called from an exception handler. Logs a error message, then re-raises the current exception. """ kwargs["exc_info"] = 1 self._log(logging.ERROR, message, args, kwargs, 'exception') raise def critical(self, message, *args, **kwargs): """critical(message, *args, **kwargs) Logs a critical message. """ self._log(logging.CRITICAL, message, args, kwargs, 'critical') def log(self, level, message, *args, **kwargs): """log(level, message, *args, **kwargs) Logs a message with log level `level`. The ``pwnlib`` formatter will use the default :mod:`logging` formater to format this message. """ self._log(level, message, args, kwargs, None) def isEnabledFor(self, level): """isEnabledFor(level) -> bool See if the underlying logger is enabled for the specified level. """ effectiveLevel = self._logger.getEffectiveLevel() if effectiveLevel == 1: effectiveLevel = context.log_level return effectiveLevel <= level def setLevel(self, level): """setLevel(level) Set the logging level for the underlying logger. """ with context.local(log_level=level): self._logger.setLevel(context.log_level) def addHandler(self, handler): """addHandler(handler) Add the specified handler to the underlying logger. """ self._logger.addHandler(handler) def removeHandler(self, handler): """removeHandler(handler) Remove the specified handler from the underlying logger. """ self._logger.removeHandler(handler) @property def level(self): return self._logger.level @level.setter def level(self, value): with context.local(log_level=value): self._logger.level = context.log_level class Handler(logging.StreamHandler): """ A custom handler class. This class will report whatever :data:`context.log_level` is currently set to as its log level. If :data:`term.term_mode` is enabled log records originating from a progress logger will not be emitted but rather an animated progress line will be created. An instance of this handler is added to the ``'pwnlib'`` logger. """ @property def stream(self): return context.log_console @stream.setter def stream(self, value): pass def emit(self, record): """ Emit a log record or create/update an animated progress logger depending on whether :data:`term.term_mode` is enabled. """ # We have set the root 'pwnlib' logger to have a logLevel of 1, # when logging has been enabled via install_default_handler. # # If the level is 1, we should only process the record if # context.log_level is less than the record's log level. # # If the level is not 1, somebody else expressly set the log # level somewhere on the tree, and we should use that value. level = logging.getLogger(record.name).getEffectiveLevel() if level == 1: level = context.log_level if level > record.levelno: return progress = getattr(record, 'pwnlib_progress', None) # if the record originates from a `Progress` object and term handling # is enabled we can have animated spinners! so check that if progress is None or not term.term_mode: super(Handler, self).emit(record) return # yay, spinners! # since we want to be able to update the spinner we overwrite the # message type so that the formatter doesn't output a prefix symbol msgtype = record.pwnlib_msgtype record.pwnlib_msgtype = 'animated' msg = "%s\n" % self.format(record) # we enrich the `Progress` object to keep track of the spinner if not hasattr(progress, '_spinner_handle'): spinner_handle = term.output('') msg_handle = term.output(msg) stop = threading.Event() def spin(): '''Wheeeee!''' state = 0 states = random.choice(spinners.spinners) while True: prefix = '[%s] ' % _spinner_style(states[state]) spinner_handle.update(prefix) state = (state + 1) % len(states) if stop.wait(0.1): break t = Thread(target = spin) t.daemon = True t.start() progress._spinner_handle = spinner_handle progress._msg_handle = msg_handle progress._stop_event = stop progress._spinner_thread = t else: progress._msg_handle.update(msg) # if the message type was not a status message update, then we should # stop the spinner if msgtype != 'status': progress._stop_event.set() progress._spinner_thread.join() style, symb = _msgtype_prefixes[msgtype] prefix = '[%s] ' % style(symb) progress._spinner_handle.update(prefix) class Formatter(logging.Formatter): """ Logging formatter which performs custom formatting for log records containing the ``'pwnlib_msgtype'`` attribute. Other records are formatted using the `logging` modules default formatter. If ``'pwnlib_msgtype'`` is set, it performs the following actions: * A prefix looked up in `_msgtype_prefixes` is prepended to the message. * The message is prefixed such that it starts on column four. * If the message spans multiple lines they are split, and all subsequent lines are indented. This formatter is used by the handler installed on the ``'pwnlib'`` logger. """ # Indentation from the left side of the terminal. # All log messages will be indented at list this far. indent = ' ' # Newline, followed by an indent. Used to wrap multiple lines. nlindent = '\n' + indent def format(self, record): # use the default formatter to actually format the record msg = super(Formatter, self).format(record) # then put on a prefix symbol according to the message type msgtype = getattr(record, 'pwnlib_msgtype', None) # if 'pwnlib_msgtype' is not set (or set to `None`) we just return the # message as it is if msgtype is None: return msg if msgtype in _msgtype_prefixes: style, symb = _msgtype_prefixes[msgtype] prefix = '[%s] ' % style(symb) elif msgtype == 'indented': prefix = self.indent elif msgtype == 'animated': # the handler will take care of updating the spinner, so we will # not include it here prefix = '' else: # this should never happen prefix = '[?] ' msg = prefix + msg msg = self.nlindent.join(msg.splitlines()) return msg # we keep a dictionary of loggers such that multiple calls to `getLogger` with # the same name will return the same logger def getLogger(name): return Logger(logging.getLogger(name)) class LogfileHandler(logging.FileHandler): def __init__(self): super(LogfileHandler, self).__init__('', delay=1) @property def stream(self): return context.log_file @stream.setter def stream(self, value): pass def handle(self, *a, **kw): if self.stream.name is not None: super(LogfileHandler, self).handle(*a, **kw) iso_8601 = '%Y-%m-%dT%H:%M:%S' fmt = '%(asctime)s:%(levelname)s:%(name)s:%(message)s' log_file = LogfileHandler() log_file.setFormatter(logging.Formatter(fmt, iso_8601)) # # The root 'pwnlib' logger is declared here. To change the target of all # 'pwntools'-specific logging, only this logger needs to be changed. # # Logging cascades upward through the hierarchy, # so the only point that should ever need to be # modified is the root 'pwnlib' logger. # # For example: # map(rootlogger.removeHandler, rootlogger.handlers) # logger.addHandler(myCoolPitchingHandler) # rootlogger = getLogger('pwnlib') console = Handler() formatter = Formatter() console.setFormatter(formatter) def install_default_handler(): '''install_default_handler() Instantiates a :class:`Handler` and :class:`Formatter` and installs them for the ``pwnlib`` root logger. This function is automatically called from when importing :mod:`pwn`. ''' logger = logging.getLogger('pwnlib') if console not in logger.handlers: logger.addHandler(console) logger.addHandler(log_file) logger.setLevel(1)
34.726346
92
0.635746
from __future__ import absolute_import from __future__ import division import logging import os import random import re import six import sys import threading import time from pwnlib import term from pwnlib.config import register_config from pwnlib.context import Thread from pwnlib.context import context from pwnlib.exception import PwnlibException from pwnlib.term import spinners from pwnlib.term import text __all__ = [ 'getLogger', 'install_default_handler', 'rootlogger' ] _msgtype_prefixes = { 'status' : [text.magenta, 'x'], 'success' : [text.bold_green, '+'], 'failure' : [text.bold_red, '-'], 'debug' : [text.bold_red, 'DEBUG'], 'info' : [text.bold_blue, '*'], 'warning' : [text.bold_yellow, '!'], 'error' : [text.on_red, 'ERROR'], 'exception' : [text.on_red, 'ERROR'], 'critical' : [text.on_red, 'CRITICAL'], 'info_once' : [text.bold_blue, '*'], 'warning_once' : [text.bold_yellow, '!'], } def read_log_config(settings): log = getLogger(__name__) for key, value in settings.items(): if '.' not in key: log.warn("Invalid configuration option %r in section %r" % (key, 'log')) continue msgtype, key = key.split('.', 1) if key == 'color': current = _msgtype_prefixes[msgtype][0] _msgtype_prefixes[msgtype][0] = getattr(text, value, current) elif key == 'symbol': _msgtype_prefixes[msgtype][1] = value else: log.warn("Unknown configuration option %r in section %r" % (key, 'log')) register_config('log', read_log_config) # the text decoration to use for spinners. the spinners themselves can be found # in the `pwnlib.term.spinners` module _spinner_style = text.bold_blue class Progress(object): def __init__(self, logger, msg, status, level, args, kwargs): self._logger = logger self._msg = msg self._status = status self._level = level self._stopped = False self.last_status = 0 self.rate = kwargs.pop('rate', 0) self._log(status, args, kwargs, 'status') # it is a common use case to create a logger and then immediately update # its status line, so we reset `last_status` to accommodate this pattern self.last_status = 0 def _log(self, status, args, kwargs, msgtype): # Logs are strings, not bytes. Handle Python3 bytes() objects. status = six.ensure_text(status) # this progress logger is stopped, so don't generate any more records if self._stopped: return msg = self._msg if msg and status: msg += ': ' msg += status self._logger._log(self._level, msg, args, kwargs, msgtype, self) def status(self, status, *args, **kwargs): now = time.time() if (now - self.last_status) > self.rate: self.last_status = now self._log(status, args, kwargs, 'status') def success(self, status = 'Done', *args, **kwargs): self._log(status, args, kwargs, 'success') self._stopped = True def failure(self, status = 'Failed', *args, **kwargs): self._log(status, args, kwargs, 'failure') self._stopped = True def __enter__(self): return self def __exit__(self, exc_typ, exc_val, exc_tb): if exc_typ is None: self.success() else: self.failure() class Logger(object): _one_time_infos = set() _one_time_warnings = set() def __init__(self, logger=None): if logger is None: module = self.__module__ if not module.startswith('pwnlib'): module = 'pwnlib.' + module logger_name = '%s.%s.%s' % (module, self.__class__.__name__, id(self)) logger = logging.getLogger(logger_name) self._logger = logger def _getlevel(self, levelString): if isinstance(levelString, six.integer_types): return levelString return logging._levelNames[levelString.upper()] def _log(self, level, msg, args, kwargs, msgtype, progress = None): msg = six.ensure_text(msg) extra = kwargs.get('extra', {}) extra.setdefault('pwnlib_msgtype', msgtype) extra.setdefault('pwnlib_progress', progress) kwargs['extra'] = extra self._logger.log(level, msg, *args, **kwargs) def progress(self, message, status = '', *args, **kwargs): level = self._getlevel(kwargs.pop('level', logging.INFO)) return Progress(self, message, status, level, args, kwargs) def waitfor(self, *args, **kwargs): return self.progress(*args, **kwargs) def indented(self, message, *args, **kwargs): level = self._getlevel(kwargs.pop('level', logging.INFO)) self._log(level, message, args, kwargs, 'indented') def success(self, message, *args, **kwargs): self._log(logging.INFO, message, args, kwargs, 'success') def failure(self, message, *args, **kwargs): self._log(logging.INFO, message, args, kwargs, 'failure') def info_once(self, message, *args, **kwargs): m = message % args if m not in self._one_time_infos: if self.isEnabledFor(logging.INFO): self._one_time_infos.add(m) self._log(logging.INFO, message, args, kwargs, 'info_once') def warning_once(self, message, *args, **kwargs): m = message % args if m not in self._one_time_warnings: if self.isEnabledFor(logging.WARNING): self._one_time_warnings.add(m) self._log(logging.WARNING, message, args, kwargs, 'warning_once') def warn_once(self, *args, **kwargs): return self.warning_once(*args, **kwargs) def debug(self, message, *args, **kwargs): self._log(logging.DEBUG, message, args, kwargs, 'debug') def info(self, message, *args, **kwargs): self._log(logging.INFO, message, args, kwargs, 'info') def hexdump(self, message, *args, **kwargs): import pwnlib.util.fiddling self.info(pwnlib.util.fiddling.hexdump(message, *args, **kwargs)) def warning(self, message, *args, **kwargs): self._log(logging.WARNING, message, args, kwargs, 'warning') def warn(self, *args, **kwargs): return self.warning(*args, **kwargs) def error(self, message, *args, **kwargs): self._log(logging.ERROR, message, args, kwargs, 'error') raise PwnlibException(message % args) def exception(self, message, *args, **kwargs): kwargs["exc_info"] = 1 self._log(logging.ERROR, message, args, kwargs, 'exception') raise def critical(self, message, *args, **kwargs): self._log(logging.CRITICAL, message, args, kwargs, 'critical') def log(self, level, message, *args, **kwargs): self._log(level, message, args, kwargs, None) def isEnabledFor(self, level): effectiveLevel = self._logger.getEffectiveLevel() if effectiveLevel == 1: effectiveLevel = context.log_level return effectiveLevel <= level def setLevel(self, level): with context.local(log_level=level): self._logger.setLevel(context.log_level) def addHandler(self, handler): self._logger.addHandler(handler) def removeHandler(self, handler): self._logger.removeHandler(handler) @property def level(self): return self._logger.level @level.setter def level(self, value): with context.local(log_level=value): self._logger.level = context.log_level class Handler(logging.StreamHandler): @property def stream(self): return context.log_console @stream.setter def stream(self, value): pass def emit(self, record): # # If the level is not 1, somebody else expressly set the log # level somewhere on the tree, and we should use that value. level = logging.getLogger(record.name).getEffectiveLevel() if level == 1: level = context.log_level if level > record.levelno: return progress = getattr(record, 'pwnlib_progress', None) # if the record originates from a `Progress` object and term handling # is enabled we can have animated spinners! so check that if progress is None or not term.term_mode: super(Handler, self).emit(record) return # yay, spinners! # since we want to be able to update the spinner we overwrite the # message type so that the formatter doesn't output a prefix symbol msgtype = record.pwnlib_msgtype record.pwnlib_msgtype = 'animated' msg = "%s\n" % self.format(record) if not hasattr(progress, '_spinner_handle'): spinner_handle = term.output('') msg_handle = term.output(msg) stop = threading.Event() def spin(): state = 0 states = random.choice(spinners.spinners) while True: prefix = '[%s] ' % _spinner_style(states[state]) spinner_handle.update(prefix) state = (state + 1) % len(states) if stop.wait(0.1): break t = Thread(target = spin) t.daemon = True t.start() progress._spinner_handle = spinner_handle progress._msg_handle = msg_handle progress._stop_event = stop progress._spinner_thread = t else: progress._msg_handle.update(msg) if msgtype != 'status': progress._stop_event.set() progress._spinner_thread.join() style, symb = _msgtype_prefixes[msgtype] prefix = '[%s] ' % style(symb) progress._spinner_handle.update(prefix) class Formatter(logging.Formatter): indent = ' ' nlindent = '\n' + indent def format(self, record): msg = super(Formatter, self).format(record) msgtype = getattr(record, 'pwnlib_msgtype', None) if msgtype is None: return msg if msgtype in _msgtype_prefixes: style, symb = _msgtype_prefixes[msgtype] prefix = '[%s] ' % style(symb) elif msgtype == 'indented': prefix = self.indent elif msgtype == 'animated': prefix = '' else: prefix = '[?] ' msg = prefix + msg msg = self.nlindent.join(msg.splitlines()) return msg def getLogger(name): return Logger(logging.getLogger(name)) class LogfileHandler(logging.FileHandler): def __init__(self): super(LogfileHandler, self).__init__('', delay=1) @property def stream(self): return context.log_file @stream.setter def stream(self, value): pass def handle(self, *a, **kw): if self.stream.name is not None: super(LogfileHandler, self).handle(*a, **kw) iso_8601 = '%Y-%m-%dT%H:%M:%S' fmt = '%(asctime)s:%(levelname)s:%(name)s:%(message)s' log_file = LogfileHandler() log_file.setFormatter(logging.Formatter(fmt, iso_8601)) rootlogger = getLogger('pwnlib') console = Handler() formatter = Formatter() console.setFormatter(formatter) def install_default_handler(): logger = logging.getLogger('pwnlib') if console not in logger.handlers: logger.addHandler(console) logger.addHandler(log_file) logger.setLevel(1)
true
true
f703287577524da6d03be806c4b05b4d0149cfad
2,602
py
Python
src/tools.py
JoaoCarabetta/brasilio-package
5004a87e6d98b7f07b31d358ef44346b3570c27b
[ "MIT" ]
null
null
null
src/tools.py
JoaoCarabetta/brasilio-package
5004a87e6d98b7f07b31d358ef44346b3570c27b
[ "MIT" ]
4
2018-06-22T23:15:47.000Z
2018-06-25T04:59:10.000Z
src/tools.py
JoaoCarabetta/brasilio-package
5004a87e6d98b7f07b31d358ef44346b3570c27b
[ "MIT" ]
1
2018-06-22T23:04:51.000Z
2018-06-22T23:04:51.000Z
import rows import os from timeit import default_timer import json output_path = '../package/data/' class Brasilio(object): def __init__(self, output_path='../package/data/', verbose=False): self.verbose = verbose self.output_path = output_path self.timer = default_timer def __enter__(self): # Cria diretório package if not os.path.exists(self.output_path): os.makedirs(self.output_path) # Cria resouces.py vazio json.dump([], open("resources.json", "w"), indent=2) # Start Timer self.start = self.timer() return self def __exit__(self, *args): # Cria datapackage create_datapackage(self.output_path, verbose=False) # End Timer end = self.timer() self.elapsed_secs = end - self.start self.elapsed = self.elapsed_secs # millisecs if self.verbose: print('Sucesso!\n Sua captura demorou: {0:.2f} s'.format(self.elapsed)) def generate_resources(filename, verbose=False): data_path = os.path.join(output_path, filename) if verbose: print('Reading Data') data = rows.import_from_csv(data_path) translate = {int: 'integer', str: 'string'} resource = {'format': "csv", "url": "http://brasil.io/dataset/{}?format=csv".format(filename.split('.')[0]), "path": data_path, "profile": "tabular-data-resource", 'schema': { 'fields': []} } for i, field in enumerate(data.field_names): resource['schema']['fields'].append({'name': field, 'type': translate[data.field_types[i].TYPE[0]]}) if verbose: print('Writing resources.json') # print(type(resources)) # print(json.dumps(resources)) resources = json.load(open("resources.json", "r")) resources.append(resource) json.dump(resources, open("resources.json", "w"), indent=2) def create_datapackage(output_path, verbose=False): # Criar o datapackage.json if verbose: print("Criando datapackage.json") with open("metadata.json", "r") as mfd: output = json.load(mfd) with open("resources.json", "r") as rfd: output['resources'] = json.load(rfd) with open("../package/datapackage.json", "w") as datapackage: json.dump(output, datapackage, indent=2) if __name__ == '__main__': pass
27.978495
95
0.568793
import rows import os from timeit import default_timer import json output_path = '../package/data/' class Brasilio(object): def __init__(self, output_path='../package/data/', verbose=False): self.verbose = verbose self.output_path = output_path self.timer = default_timer def __enter__(self): if not os.path.exists(self.output_path): os.makedirs(self.output_path) json.dump([], open("resources.json", "w"), indent=2) self.start = self.timer() return self def __exit__(self, *args): create_datapackage(self.output_path, verbose=False) end = self.timer() self.elapsed_secs = end - self.start self.elapsed = self.elapsed_secs if self.verbose: print('Sucesso!\n Sua captura demorou: {0:.2f} s'.format(self.elapsed)) def generate_resources(filename, verbose=False): data_path = os.path.join(output_path, filename) if verbose: print('Reading Data') data = rows.import_from_csv(data_path) translate = {int: 'integer', str: 'string'} resource = {'format': "csv", "url": "http://brasil.io/dataset/{}?format=csv".format(filename.split('.')[0]), "path": data_path, "profile": "tabular-data-resource", 'schema': { 'fields': []} } for i, field in enumerate(data.field_names): resource['schema']['fields'].append({'name': field, 'type': translate[data.field_types[i].TYPE[0]]}) if verbose: print('Writing resources.json') resources = json.load(open("resources.json", "r")) resources.append(resource) json.dump(resources, open("resources.json", "w"), indent=2) def create_datapackage(output_path, verbose=False): if verbose: print("Criando datapackage.json") with open("metadata.json", "r") as mfd: output = json.load(mfd) with open("resources.json", "r") as rfd: output['resources'] = json.load(rfd) with open("../package/datapackage.json", "w") as datapackage: json.dump(output, datapackage, indent=2) if __name__ == '__main__': pass
true
true
f70328c20a055ff6504e882b8c3d6f98fe0c019f
1,301
py
Python
scripts/git-precommit-hook.py
AWSNB/relay
3d7b69204fdfc34e1020f2b072961061b91a691a
[ "BSL-1.0" ]
null
null
null
scripts/git-precommit-hook.py
AWSNB/relay
3d7b69204fdfc34e1020f2b072961061b91a691a
[ "BSL-1.0" ]
null
null
null
scripts/git-precommit-hook.py
AWSNB/relay
3d7b69204fdfc34e1020f2b072961061b91a691a
[ "BSL-1.0" ]
null
null
null
#!/usr/bin/env python3 import os import pathlib import sys import subprocess def has_cargo_fmt(): """Runs a quick check to see if cargo fmt is installed.""" try: c = subprocess.run(["cargo", "fmt", "--", "--help"], capture_output=True) except OSError: return False else: return c.returncode == 0 def get_modified_files(): """Returns a list of all modified files.""" c = subprocess.run( ["git", "diff-index", "--cached", "--name-only", "HEAD"], capture_output=True ) return [pathlib.Path(os.fsdecode(p)) for p in c.stdout.splitlines()] def run_format_check(files): rust_files = [x for x in files if x.suffix == "rs" and x.isfile()] if not rust_files: return 0 ret = subprocess.run( ["cargo", "fmt", "--", "--check", "--color=always"] + rust_files ) if ret.returncode != 0: print("", file=sys.stderr) print( "\033[1m\033[2minfo: to fix this run `cargo fmt --all` and " "commit again\033[0m", file=sys.stderr, ) return ret.returncode def main(): if not has_cargo_fmt(): print("warning: cargo fmt not installed") return sys.exit(run_format_check(get_modified_files())) if __name__ == "__main__": main()
25.019231
85
0.590315
import os import pathlib import sys import subprocess def has_cargo_fmt(): try: c = subprocess.run(["cargo", "fmt", "--", "--help"], capture_output=True) except OSError: return False else: return c.returncode == 0 def get_modified_files(): c = subprocess.run( ["git", "diff-index", "--cached", "--name-only", "HEAD"], capture_output=True ) return [pathlib.Path(os.fsdecode(p)) for p in c.stdout.splitlines()] def run_format_check(files): rust_files = [x for x in files if x.suffix == "rs" and x.isfile()] if not rust_files: return 0 ret = subprocess.run( ["cargo", "fmt", "--", "--check", "--color=always"] + rust_files ) if ret.returncode != 0: print("", file=sys.stderr) print( "\033[1m\033[2minfo: to fix this run `cargo fmt --all` and " "commit again\033[0m", file=sys.stderr, ) return ret.returncode def main(): if not has_cargo_fmt(): print("warning: cargo fmt not installed") return sys.exit(run_format_check(get_modified_files())) if __name__ == "__main__": main()
true
true
f7032a385a92af39ff4e03fa94030ff64c7a226d
1,146
py
Python
check_mpi.py
jjmaldonis/mpi-parallelization
4cc2ab1e6929352073cafb83b1cb0ea990acff15
[ "MIT" ]
16
2017-06-07T07:14:58.000Z
2021-11-15T06:35:58.000Z
check_mpi.py
jjmaldonis/mpi-parallelization
4cc2ab1e6929352073cafb83b1cb0ea990acff15
[ "MIT" ]
1
2021-03-21T07:06:24.000Z
2021-03-21T15:33:24.000Z
check_mpi.py
jjmaldonis/mpi-parallelization
4cc2ab1e6929352073cafb83b1cb0ea990acff15
[ "MIT" ]
1
2020-09-14T04:29:31.000Z
2020-09-14T04:29:31.000Z
import os import distutils.spawn import mpi4py from mpi4py import MPI def check_mpi(): mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec")) for executable, path in mpi4py.get_config().items(): if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']: continue if mpiexec_path not in path: raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config())) if 'Open MPI' not in MPI.get_vendor(): raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.") vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]]) if vendor_number not in mpiexec_path: raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path)) check_mpi()
57.3
277
0.719023
import os import distutils.spawn import mpi4py from mpi4py import MPI def check_mpi(): mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec")) for executable, path in mpi4py.get_config().items(): if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']: continue if mpiexec_path not in path: raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config())) if 'Open MPI' not in MPI.get_vendor(): raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.") vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]]) if vendor_number not in mpiexec_path: raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path)) check_mpi()
true
true
f7032b5bb12dc99e7462909296f8c4084c40396f
903
py
Python
streaming_helpers.py
jaycosaur/spynet
535841bcea761463d27f7f3eb745ffe186d9f763
[ "MIT" ]
2
2020-08-13T05:51:15.000Z
2020-09-14T08:08:59.000Z
streaming_helpers.py
jaycosaur/distributed-vision-networking
9534202b5f0748dc181d6716ba96abd73aa9727d
[ "MIT" ]
5
2021-03-10T12:50:12.000Z
2022-02-27T02:18:33.000Z
streaming_helpers.py
jaycosaur/spynet
535841bcea761463d27f7f3eb745ffe186d9f763
[ "MIT" ]
null
null
null
import queue import time import numpy as np class CameraInformation: def __init__(self, cam_id: str): self._frame_queue: queue.Queue = queue.Queue(maxsize=1) self._frame_shape = None self._last_frame_time = None self.is_online = True self.node_id = cam_id def write_frame(self, frame): try: self._frame_queue.get_nowait() except queue.Empty: pass self._frame_shape = frame.shape self._last_frame_time = time.time() self._frame_queue.put_nowait(frame) def read_frame(self,): try: frame = self._frame_queue.get(timeout=2) if not self.is_online: self.is_online = True return frame except queue.Empty: if self.is_online: self.is_online = False return np.zeros(self._frame_shape)
27.363636
63
0.594684
import queue import time import numpy as np class CameraInformation: def __init__(self, cam_id: str): self._frame_queue: queue.Queue = queue.Queue(maxsize=1) self._frame_shape = None self._last_frame_time = None self.is_online = True self.node_id = cam_id def write_frame(self, frame): try: self._frame_queue.get_nowait() except queue.Empty: pass self._frame_shape = frame.shape self._last_frame_time = time.time() self._frame_queue.put_nowait(frame) def read_frame(self,): try: frame = self._frame_queue.get(timeout=2) if not self.is_online: self.is_online = True return frame except queue.Empty: if self.is_online: self.is_online = False return np.zeros(self._frame_shape)
true
true
f7032cb6248d072f5e1d20fee17d63cd87896599
1,733
py
Python
nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py
vferat/nipype
536c57da150d157dcb5c121af43aaeab71cdbd5f
[ "Apache-2.0" ]
null
null
null
nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py
vferat/nipype
536c57da150d157dcb5c121af43aaeab71cdbd5f
[ "Apache-2.0" ]
null
null
null
nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py
vferat/nipype
536c57da150d157dcb5c121af43aaeab71cdbd5f
[ "Apache-2.0" ]
null
null
null
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals from ..utils import Generate5tt def test_Generate5tt_inputs(): input_map = dict( algorithm=dict( argstr='%s', mandatory=True, position=-3, ), args=dict(argstr='%s', ), bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict( nohash=True, usedefault=True, ), grad_file=dict( argstr='-grad %s', extensions=None, xor=['grad_fsl'], ), grad_fsl=dict( argstr='-fslgrad %s %s', xor=['grad_file'], ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', extensions=None, ), in_file=dict( argstr='%s', extensions=None, mandatory=True, position=-2, ), nthreads=dict( argstr='-nthreads %d', nohash=True, ), out_file=dict( argstr='%s', extensions=None, mandatory=True, position=-1, ), ) inputs = Generate5tt.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Generate5tt_outputs(): output_map = dict(out_file=dict(extensions=None, ), ) outputs = Generate5tt.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
27.951613
67
0.524524
from __future__ import unicode_literals from ..utils import Generate5tt def test_Generate5tt_inputs(): input_map = dict( algorithm=dict( argstr='%s', mandatory=True, position=-3, ), args=dict(argstr='%s', ), bval_scale=dict(argstr='-bvalue_scaling %s', ), environ=dict( nohash=True, usedefault=True, ), grad_file=dict( argstr='-grad %s', extensions=None, xor=['grad_fsl'], ), grad_fsl=dict( argstr='-fslgrad %s %s', xor=['grad_file'], ), in_bval=dict(extensions=None, ), in_bvec=dict( argstr='-fslgrad %s %s', extensions=None, ), in_file=dict( argstr='%s', extensions=None, mandatory=True, position=-2, ), nthreads=dict( argstr='-nthreads %d', nohash=True, ), out_file=dict( argstr='%s', extensions=None, mandatory=True, position=-1, ), ) inputs = Generate5tt.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_Generate5tt_outputs(): output_map = dict(out_file=dict(extensions=None, ), ) outputs = Generate5tt.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
true
true
f7032cee21098d6fc2a0ddeeff88cf6cdfc6fbf1
958
py
Python
setup.py
SaladDais/Impasse
3f8971ac533c59a3dce7802ecf673daeeec4e0fd
[ "BSD-3-Clause" ]
2
2021-07-07T08:22:46.000Z
2021-12-27T23:51:44.000Z
setup.py
SaladDais/Impasse
3f8971ac533c59a3dce7802ecf673daeeec4e0fd
[ "BSD-3-Clause" ]
6
2021-07-06T05:47:00.000Z
2021-07-11T15:04:14.000Z
setup.py
SaladDais/Impasse
3f8971ac533c59a3dce7802ecf673daeeec4e0fd
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python from setuptools import setup def readme(): with open('README.md') as f: return f.read() setup( name='impasse', # Version chosen for parity with Assimp since we need ABI compatibility version='5.0.6', license='BSD', description='Alternate Python bindings for the Open Asset Import Library (ASSIMP)', long_description=readme(), long_description_content_type="text/markdown", url='https://github.com/SaladDais/Impasse', author='Salad Dais', author_email='SaladDais@users.noreply.github.com', packages=['impasse'], data_files=[ ('share/impasse', ['README.md']), # TODO: Make these proper console scripts # ('share/examples/impasse', ['scripts/' + f for f in os.listdir('scripts/')]), ], install_requires=['numpy', 'cffi'], python_requires='>=3.7', zip_safe=False, tests_require=[ "pytest", ], test_suite='tests', )
26.611111
87
0.638831
from setuptools import setup def readme(): with open('README.md') as f: return f.read() setup( name='impasse', version='5.0.6', license='BSD', description='Alternate Python bindings for the Open Asset Import Library (ASSIMP)', long_description=readme(), long_description_content_type="text/markdown", url='https://github.com/SaladDais/Impasse', author='Salad Dais', author_email='SaladDais@users.noreply.github.com', packages=['impasse'], data_files=[ ('share/impasse', ['README.md']), ], install_requires=['numpy', 'cffi'], python_requires='>=3.7', zip_safe=False, tests_require=[ "pytest", ], test_suite='tests', )
true
true
f7032dda39d019d042d83b0dcb8d9588c22ca8d2
7,060
py
Python
tests/extension/thread_/stream_graph_substream/thread_stream_graph_substream.py
jesseclin/veriloggen
a645f2c53f04e5b88213eef17779d212192ea2b5
[ "Apache-2.0" ]
null
null
null
tests/extension/thread_/stream_graph_substream/thread_stream_graph_substream.py
jesseclin/veriloggen
a645f2c53f04e5b88213eef17779d212192ea2b5
[ "Apache-2.0" ]
null
null
null
tests/extension/thread_/stream_graph_substream/thread_stream_graph_substream.py
jesseclin/veriloggen
a645f2c53f04e5b88213eef17779d212192ea2b5
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import from __future__ import print_function import sys import os # the next line can be removed after installation sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))) from veriloggen import * import veriloggen.thread as vthread import veriloggen.types.axi as axi def mkLed(): m = Module('blinkled') clk = m.Input('CLK') rst = m.Input('RST') datawidth = 32 addrwidth = 10 myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth) ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth) ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth) ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth) mulstrm = vthread.Stream(m, 'mul_stream', clk, rst) mulx = mulstrm.source('x') muly = mulstrm.source('y') mulz = mulx * muly mulstrm.sink(mulz, 'z') macstrm = vthread.Stream(m, 'mac_stream', clk, rst) a = macstrm.source('a') b = macstrm.source('b') a = a + 1 b = b + 1 sub = macstrm.substream(mulstrm) sub.to_source('x', a) sub.to_source('y', b) c = sub.from_sink('z') size = macstrm.parameter('size') sum, sum_valid = macstrm.ReduceAddValid(c, size) macstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid') actstrm = vthread.Stream(m, 'act_stream', clk, rst) a = actstrm.source('a') b = actstrm.source('b') a = a + 1 b = b + 1 a = a + 1 b = b + 1 sub = actstrm.substream(mulstrm) sub.to_source('x', a) sub.to_source('y', b) c = sub.from_sink('z') size = actstrm.parameter('size') sum, sum_valid = actstrm.ReduceAddValid(c, size) sum = actstrm.Mux(sum > 0, sum, 0) actstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid') all_ok = m.TmpReg(initval=0) def comp_stream_mul(size, offset): mulstrm.set_source('x', ram_a, offset, size) mulstrm.set_source('y', ram_b, offset, size) mulstrm.set_sink('z', ram_c, offset, size) mulstrm.run() mulstrm.join() def comp_stream_mac(size, offset): macstrm.set_source('a', ram_a, offset, size) macstrm.set_source('b', ram_b, offset, size) macstrm.set_parameter('size', size) macstrm.set_sink('sum', ram_c, offset, 1) macstrm.run() macstrm.join() def comp_stream_act(size, offset): actstrm.set_source('a', ram_a, offset, size) actstrm.set_source('b', ram_b, offset, size) actstrm.set_parameter('size', size) actstrm.set_sink('sum', ram_c, offset, 1) actstrm.run() actstrm.join() def comp_sequential_mul(size, offset): sum = 0 for i in range(size): a = ram_a.read(i + offset) b = ram_b.read(i + offset) sum = a * b ram_c.write(i + offset, sum) def comp_sequential_mac(size, offset): sum = 0 for i in range(size): a = ram_a.read(i + offset) + 1 b = ram_b.read(i + offset) + 1 sum += a * b ram_c.write(offset, sum) def comp_sequential_act(size, offset): sum = 0 for i in range(size): a = ram_a.read(i + offset) + 1 + 1 b = ram_b.read(i + offset) + 1 + 1 sum += a * b if sum <= 0: sum = 0 ram_c.write(offset, sum) def check(size, offset_stream, offset_seq): for i in range(size): st = ram_c.read(i + offset_stream) sq = ram_c.read(i + offset_seq) if vthread.verilog.NotEql(st, sq): all_ok.value = False if all_ok: print('# verify: PASSED') else: print('# verify: FAILED') def comp(size): all_ok.value = True # mul # stream offset = 0 myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_stream_mul(size, offset) myaxi.dma_write(ram_c, offset, 1024, size) # sequential offset = size myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_sequential_mul(size, offset) myaxi.dma_write(ram_c, offset, 1024 * 2, size) # verification print('# MUL') check(size, 0, offset) # mac # stream offset = 0 myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_stream_mac(size, offset) myaxi.dma_write(ram_c, offset, 1024, 1) # sequential offset = size myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_sequential_mac(size, offset) myaxi.dma_write(ram_c, offset, 1024 * 2, 1) # verification print('# MAC') check(1, 0, offset) # act # stream offset = 0 myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_stream_act(size, offset) myaxi.dma_write(ram_c, offset, 1024, 1) # sequential offset = size myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_sequential_act(size, offset) myaxi.dma_write(ram_c, offset, 1024 * 2, 1) # verification print('# ACT') check(1, 0, offset) vthread.finish() th = vthread.Thread(m, 'th_comp', clk, rst, comp) fsm = th.start(32) try: actstrm.draw_graph() except: pass return m def mkTest(memimg_name=None): m = Module('test') # target instance led = mkLed() # copy paras and ports params = m.copy_params(led) ports = m.copy_sim_ports(led) clk = ports['CLK'] rst = ports['RST'] memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name) memory.connect(ports, 'myaxi') uut = m.Instance(led, 'uut', params=m.connect_params(led), ports=m.connect_ports(led)) #simulation.setup_waveform(m, uut) simulation.setup_clock(m, clk, hperiod=5) init = simulation.setup_reset(m, rst, m.make_reset(), period=100) init.add( Delay(1000000), Systask('finish'), ) return m def run(filename='tmp.v', simtype='iverilog', outputfile=None): if outputfile is None: outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out' memimg_name = 'memimg_' + outputfile test = mkTest(memimg_name=memimg_name) if filename is not None: test.to_verilog(filename) sim = simulation.Simulator(test, sim=simtype) rslt = sim.run(outputfile=outputfile) lines = rslt.splitlines() if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')): rslt = '\n'.join(lines[:-1]) return rslt if __name__ == '__main__': rslt = run(filename='tmp.v') print(rslt)
28.015873
87
0.583711
from __future__ import absolute_import from __future__ import print_function import sys import os sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))) from veriloggen import * import veriloggen.thread as vthread import veriloggen.types.axi as axi def mkLed(): m = Module('blinkled') clk = m.Input('CLK') rst = m.Input('RST') datawidth = 32 addrwidth = 10 myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth) ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth) ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth) ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth) mulstrm = vthread.Stream(m, 'mul_stream', clk, rst) mulx = mulstrm.source('x') muly = mulstrm.source('y') mulz = mulx * muly mulstrm.sink(mulz, 'z') macstrm = vthread.Stream(m, 'mac_stream', clk, rst) a = macstrm.source('a') b = macstrm.source('b') a = a + 1 b = b + 1 sub = macstrm.substream(mulstrm) sub.to_source('x', a) sub.to_source('y', b) c = sub.from_sink('z') size = macstrm.parameter('size') sum, sum_valid = macstrm.ReduceAddValid(c, size) macstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid') actstrm = vthread.Stream(m, 'act_stream', clk, rst) a = actstrm.source('a') b = actstrm.source('b') a = a + 1 b = b + 1 a = a + 1 b = b + 1 sub = actstrm.substream(mulstrm) sub.to_source('x', a) sub.to_source('y', b) c = sub.from_sink('z') size = actstrm.parameter('size') sum, sum_valid = actstrm.ReduceAddValid(c, size) sum = actstrm.Mux(sum > 0, sum, 0) actstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid') all_ok = m.TmpReg(initval=0) def comp_stream_mul(size, offset): mulstrm.set_source('x', ram_a, offset, size) mulstrm.set_source('y', ram_b, offset, size) mulstrm.set_sink('z', ram_c, offset, size) mulstrm.run() mulstrm.join() def comp_stream_mac(size, offset): macstrm.set_source('a', ram_a, offset, size) macstrm.set_source('b', ram_b, offset, size) macstrm.set_parameter('size', size) macstrm.set_sink('sum', ram_c, offset, 1) macstrm.run() macstrm.join() def comp_stream_act(size, offset): actstrm.set_source('a', ram_a, offset, size) actstrm.set_source('b', ram_b, offset, size) actstrm.set_parameter('size', size) actstrm.set_sink('sum', ram_c, offset, 1) actstrm.run() actstrm.join() def comp_sequential_mul(size, offset): sum = 0 for i in range(size): a = ram_a.read(i + offset) b = ram_b.read(i + offset) sum = a * b ram_c.write(i + offset, sum) def comp_sequential_mac(size, offset): sum = 0 for i in range(size): a = ram_a.read(i + offset) + 1 b = ram_b.read(i + offset) + 1 sum += a * b ram_c.write(offset, sum) def comp_sequential_act(size, offset): sum = 0 for i in range(size): a = ram_a.read(i + offset) + 1 + 1 b = ram_b.read(i + offset) + 1 + 1 sum += a * b if sum <= 0: sum = 0 ram_c.write(offset, sum) def check(size, offset_stream, offset_seq): for i in range(size): st = ram_c.read(i + offset_stream) sq = ram_c.read(i + offset_seq) if vthread.verilog.NotEql(st, sq): all_ok.value = False if all_ok: print('# verify: PASSED') else: print('# verify: FAILED') def comp(size): all_ok.value = True offset = 0 myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_stream_mul(size, offset) myaxi.dma_write(ram_c, offset, 1024, size) offset = size myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_sequential_mul(size, offset) myaxi.dma_write(ram_c, offset, 1024 * 2, size) print('# MUL') check(size, 0, offset) offset = 0 myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_stream_mac(size, offset) myaxi.dma_write(ram_c, offset, 1024, 1) offset = size myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_sequential_mac(size, offset) myaxi.dma_write(ram_c, offset, 1024 * 2, 1) print('# MAC') check(1, 0, offset) offset = 0 myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_stream_act(size, offset) myaxi.dma_write(ram_c, offset, 1024, 1) offset = size myaxi.dma_read(ram_a, offset, 0, size) myaxi.dma_read(ram_b, offset, 512, size) comp_sequential_act(size, offset) myaxi.dma_write(ram_c, offset, 1024 * 2, 1) print('# ACT') check(1, 0, offset) vthread.finish() th = vthread.Thread(m, 'th_comp', clk, rst, comp) fsm = th.start(32) try: actstrm.draw_graph() except: pass return m def mkTest(memimg_name=None): m = Module('test') led = mkLed() params = m.copy_params(led) ports = m.copy_sim_ports(led) clk = ports['CLK'] rst = ports['RST'] memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name) memory.connect(ports, 'myaxi') uut = m.Instance(led, 'uut', params=m.connect_params(led), ports=m.connect_ports(led)) simulation.setup_clock(m, clk, hperiod=5) init = simulation.setup_reset(m, rst, m.make_reset(), period=100) init.add( Delay(1000000), Systask('finish'), ) return m def run(filename='tmp.v', simtype='iverilog', outputfile=None): if outputfile is None: outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out' memimg_name = 'memimg_' + outputfile test = mkTest(memimg_name=memimg_name) if filename is not None: test.to_verilog(filename) sim = simulation.Simulator(test, sim=simtype) rslt = sim.run(outputfile=outputfile) lines = rslt.splitlines() if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')): rslt = '\n'.join(lines[:-1]) return rslt if __name__ == '__main__': rslt = run(filename='tmp.v') print(rslt)
true
true
f7032de78fdbf6c462f5e261e42160b48beac9e7
5,556
py
Python
tests/test_numbits.py
WEBZCC/coveragepy
e4f0f9ee71a1ade66b51ec53d0061f462e3838cb
[ "Apache-2.0" ]
2,254
2015-01-05T01:28:03.000Z
2022-03-29T10:37:10.000Z
tests/test_numbits.py
mgorny/coveragepy
73ca4596fc8eed9c76714e7a5c80dd61d71fe1b1
[ "Apache-2.0" ]
707
2015-02-07T01:32:02.000Z
2022-03-31T18:00:14.000Z
tests/test_numbits.py
sitedata/coveragepy
e4f0f9ee71a1ade66b51ec53d0061f462e3838cb
[ "Apache-2.0" ]
439
2015-01-16T15:06:08.000Z
2022-03-30T06:19:12.000Z
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt """Tests for coverage.numbits""" import json import sqlite3 from hypothesis import example, given, settings from hypothesis.strategies import sets, integers from coverage import env from coverage.numbits import ( nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection, numbits_any_intersection, num_in_numbits, register_sqlite_functions, ) from tests.coveragetest import CoverageTest # Hypothesis-generated line number data line_numbers = integers(min_value=1, max_value=9999) line_number_sets = sets(line_numbers) # When coverage-testing ourselves, hypothesis complains about a test being # flaky because the first run exceeds the deadline (and fails), and the second # run succeeds. Disable the deadline if we are coverage-testing. default_settings = settings() if env.METACOV: default_settings = settings(default_settings, deadline=None) def good_numbits(numbits): """Assert that numbits is good.""" # It shouldn't end with a zero byte, that should have been trimmed off. assert (not numbits) or (numbits[-1] != 0) class NumbitsOpTest(CoverageTest): """Tests of the numbits operations in numbits.py.""" run_in_temp_dir = False @given(line_number_sets) @settings(default_settings) def test_conversion(self, nums): numbits = nums_to_numbits(nums) good_numbits(numbits) nums2 = numbits_to_nums(numbits) assert nums == set(nums2) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_union(self, nums1, nums2): nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) nbu = numbits_union(nb1, nb2) good_numbits(nbu) union = numbits_to_nums(nbu) assert nums1 | nums2 == set(union) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_intersection(self, nums1, nums2): nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) nbi = numbits_intersection(nb1, nb2) good_numbits(nbi) intersection = numbits_to_nums(nbi) assert nums1 & nums2 == set(intersection) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_any_intersection(self, nums1, nums2): nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) inter = numbits_any_intersection(nb1, nb2) expect = bool(nums1 & nums2) assert expect == bool(inter) @given(line_numbers, line_number_sets) @settings(default_settings) @example(152, {144}) def test_num_in_numbits(self, num, nums): numbits = nums_to_numbits(nums) good_numbits(numbits) is_in = num_in_numbits(num, numbits) assert (num in nums) == is_in class NumbitsSqliteFunctionTest(CoverageTest): """Tests of the SQLite integration for numbits functions.""" run_in_temp_dir = False def setup_test(self): super().setup_test() conn = sqlite3.connect(":memory:") register_sqlite_functions(conn) self.cursor = conn.cursor() self.cursor.execute("create table data (id int, numbits blob)") self.cursor.executemany( "insert into data (id, numbits) values (?, ?)", [ (i, nums_to_numbits(range(i, 100, i))) for i in range(1, 11) ] ) self.addCleanup(self.cursor.close) def test_numbits_union(self): res = self.cursor.execute( "select numbits_union(" + "(select numbits from data where id = 7)," + "(select numbits from data where id = 9)" + ")" ) expected = [ 7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49, 54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99, ] answer = numbits_to_nums(list(res)[0][0]) assert expected == answer def test_numbits_intersection(self): res = self.cursor.execute( "select numbits_intersection(" + "(select numbits from data where id = 7)," + "(select numbits from data where id = 9)" + ")" ) answer = numbits_to_nums(list(res)[0][0]) assert [63] == answer def test_numbits_any_intersection(self): res = self.cursor.execute( "select numbits_any_intersection(?, ?)", (nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5])) ) answer = [any_inter for (any_inter,) in res] assert [1] == answer res = self.cursor.execute( "select numbits_any_intersection(?, ?)", (nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9])) ) answer = [any_inter for (any_inter,) in res] assert [0] == answer def test_num_in_numbits(self): res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id") answer = [is_in for (id, is_in) in res] assert [1, 1, 1, 1, 0, 1, 0, 0, 0, 0] == answer def test_numbits_to_nums(self): res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])]) assert [1, 2, 3] == json.loads(res.fetchone()[0])
34.08589
97
0.633369
import json import sqlite3 from hypothesis import example, given, settings from hypothesis.strategies import sets, integers from coverage import env from coverage.numbits import ( nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection, numbits_any_intersection, num_in_numbits, register_sqlite_functions, ) from tests.coveragetest import CoverageTest line_numbers = integers(min_value=1, max_value=9999) line_number_sets = sets(line_numbers) default_settings = settings() if env.METACOV: default_settings = settings(default_settings, deadline=None) def good_numbits(numbits): assert (not numbits) or (numbits[-1] != 0) class NumbitsOpTest(CoverageTest): run_in_temp_dir = False @given(line_number_sets) @settings(default_settings) def test_conversion(self, nums): numbits = nums_to_numbits(nums) good_numbits(numbits) nums2 = numbits_to_nums(numbits) assert nums == set(nums2) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_union(self, nums1, nums2): nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) nbu = numbits_union(nb1, nb2) good_numbits(nbu) union = numbits_to_nums(nbu) assert nums1 | nums2 == set(union) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_intersection(self, nums1, nums2): nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) nbi = numbits_intersection(nb1, nb2) good_numbits(nbi) intersection = numbits_to_nums(nbi) assert nums1 & nums2 == set(intersection) @given(line_number_sets, line_number_sets) @settings(default_settings) def test_any_intersection(self, nums1, nums2): nb1 = nums_to_numbits(nums1) good_numbits(nb1) nb2 = nums_to_numbits(nums2) good_numbits(nb2) inter = numbits_any_intersection(nb1, nb2) expect = bool(nums1 & nums2) assert expect == bool(inter) @given(line_numbers, line_number_sets) @settings(default_settings) @example(152, {144}) def test_num_in_numbits(self, num, nums): numbits = nums_to_numbits(nums) good_numbits(numbits) is_in = num_in_numbits(num, numbits) assert (num in nums) == is_in class NumbitsSqliteFunctionTest(CoverageTest): run_in_temp_dir = False def setup_test(self): super().setup_test() conn = sqlite3.connect(":memory:") register_sqlite_functions(conn) self.cursor = conn.cursor() self.cursor.execute("create table data (id int, numbits blob)") self.cursor.executemany( "insert into data (id, numbits) values (?, ?)", [ (i, nums_to_numbits(range(i, 100, i))) for i in range(1, 11) ] ) self.addCleanup(self.cursor.close) def test_numbits_union(self): res = self.cursor.execute( "select numbits_union(" + "(select numbits from data where id = 7)," + "(select numbits from data where id = 9)" + ")" ) expected = [ 7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49, 54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99, ] answer = numbits_to_nums(list(res)[0][0]) assert expected == answer def test_numbits_intersection(self): res = self.cursor.execute( "select numbits_intersection(" + "(select numbits from data where id = 7)," + "(select numbits from data where id = 9)" + ")" ) answer = numbits_to_nums(list(res)[0][0]) assert [63] == answer def test_numbits_any_intersection(self): res = self.cursor.execute( "select numbits_any_intersection(?, ?)", (nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5])) ) answer = [any_inter for (any_inter,) in res] assert [1] == answer res = self.cursor.execute( "select numbits_any_intersection(?, ?)", (nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9])) ) answer = [any_inter for (any_inter,) in res] assert [0] == answer def test_num_in_numbits(self): res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id") answer = [is_in for (id, is_in) in res] assert [1, 1, 1, 1, 0, 1, 0, 0, 0, 0] == answer def test_numbits_to_nums(self): res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])]) assert [1, 2, 3] == json.loads(res.fetchone()[0])
true
true
f7032e464938cec9d9e23b8417b9949012af2160
29,619
py
Python
selfdrive/controls/controlsd.py
sky84ky/forEQ
ecad25c3e8a7fda378e867d067d49eb06ecd2092
[ "MIT" ]
null
null
null
selfdrive/controls/controlsd.py
sky84ky/forEQ
ecad25c3e8a7fda378e867d067d49eb06ecd2092
[ "MIT" ]
null
null
null
selfdrive/controls/controlsd.py
sky84ky/forEQ
ecad25c3e8a7fda378e867d067d49eb06ecd2092
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import os import math from cereal import car, log from common.numpy_fast import clip, interp from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL from common.profiler import Profiler from common.params import Params, put_nonblocking import cereal.messaging as messaging from selfdrive.config import Conversions as CV from selfdrive.swaglog import cloudlog from selfdrive.boardd.boardd import can_list_to_can_capnp from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED from selfdrive.controls.lib.latcontrol_pid import LatControlPID from selfdrive.controls.lib.latcontrol_indi import LatControlINDI from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR from selfdrive.controls.lib.latcontrol_angle import LatControlAngle from selfdrive.controls.lib.events import Events, ET from selfdrive.controls.lib.alertmanager import AlertManager from selfdrive.controls.lib.vehicle_model import VehicleModel from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP from selfdrive.locationd.calibrationd import Calibration from selfdrive.hardware import HARDWARE, TICI from selfdrive.car.hyundai.scc_smoother import SccSmoother from selfdrive.ntune import ntune_get, ntune_isEnabled LDW_MIN_SPEED = 31 * CV.MPH_TO_MS LANE_DEPARTURE_THRESHOLD = 0.1 STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees SIMULATION = "SIMULATION" in os.environ NOSENSOR = "NOSENSOR" in os.environ IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"]) ThermalStatus = log.DeviceState.ThermalStatus State = log.ControlsState.OpenpilotState PandaType = log.PandaState.PandaType Desire = log.LateralPlan.Desire LaneChangeState = log.LateralPlan.LaneChangeState LaneChangeDirection = log.LateralPlan.LaneChangeDirection EventName = car.CarEvent.EventName class Controls: def __init__(self, sm=None, pm=None, can_sock=None): config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH) # Setup sockets self.pm = pm if self.pm is None: self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState', 'carControl', 'carEvents', 'carParams']) self.camera_packets = ["roadCameraState", "driverCameraState"] if TICI: self.camera_packets.append("wideRoadCameraState") self.sm = sm if self.sm is None: ignore = ['driverCameraState', 'managerState'] if SIMULATION else None self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration', 'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman', 'managerState', 'liveParameters', 'radarState'] + self.camera_packets, ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan']) self.can_sock = can_sock if can_sock is None: can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100 self.can_sock = messaging.sub_sock('can', timeout=can_timeout) if TICI: self.log_sock = messaging.sub_sock('androidLog') # wait for one pandaState and one CAN packet hw_type = messaging.recv_one(self.sm.sock['pandaState']).pandaState.pandaType has_relay = hw_type in [PandaType.blackPanda, PandaType.uno, PandaType.dos] print("Waiting for CAN messages...") get_one_can(self.can_sock) self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay) # read params params = Params() self.is_metric = params.get_bool("IsMetric") self.is_ldw_enabled = params.get_bool("IsLdwEnabled") self.enable_lte_onroad = params.get_bool("EnableLteOnroad") community_feature_toggle = params.get_bool("CommunityFeaturesToggle") openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle") passive = params.get_bool("Passive") or not openpilot_enabled_toggle # detect sound card presence and ensure successful init sounds_available = HARDWARE.get_sound_card_online() car_recognized = self.CP.carName != 'mock' fuzzy_fingerprint = self.CP.fuzzyFingerprint # If stock camera is disconnected, we loaded car controls and it's not dashcam mode controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly community_feature = self.CP.communityFeature or fuzzy_fingerprint community_feature_disallowed = community_feature and (not community_feature_toggle) self.read_only = not car_recognized or not controller_available or \ self.CP.dashcamOnly or community_feature_disallowed if self.read_only: self.CP.safetyModel = car.CarParams.SafetyModel.noOutput # Write CarParams for radard cp_bytes = self.CP.to_bytes() params.put("CarParams", cp_bytes) put_nonblocking("CarParamsCache", cp_bytes) self.CC = car.CarControl.new_message() self.AM = AlertManager() self.events = Events() self.LoC = LongControl(self.CP, self.CI.compute_gb) self.VM = VehicleModel(self.CP) if self.CP.steerControlType == car.CarParams.SteerControlType.angle: self.LaC = LatControlAngle(self.CP) elif self.CP.lateralTuning.which() == 'pid': self.LaC = LatControlPID(self.CP) elif self.CP.lateralTuning.which() == 'indi': self.LaC = LatControlINDI(self.CP) elif self.CP.lateralTuning.which() == 'lqr': self.LaC = LatControlLQR(self.CP) self.initialized = False self.state = State.disabled self.enabled = False self.active = False self.can_rcv_error = False self.soft_disable_timer = 0 self.v_cruise_kph = 255 self.v_cruise_kph_last = 0 self.mismatch_counter = 0 self.can_error_counter = 0 self.last_blinker_frame = 0 self.saturated_count = 0 self.distance_traveled = 0 self.last_functional_fan_frame = 0 self.events_prev = [] self.current_alert_types = [ET.PERMANENT] self.logged_comm_issue = False # scc smoother self.is_cruise_enabled = False self.cruiseVirtualMaxSpeed = 0 self.clu_speed_ms = 0. self.apply_accel = 0. self.fused_accel = 0. self.lead_drel = 0. self.aReqValue = 0. self.aReqValueMin = 0. self.aReqValueMax = 0. self.angle_steers_des = 0. # TODO: no longer necessary, aside from process replay self.sm['liveParameters'].valid = True self.startup_event = get_startup_event(car_recognized, controller_available, fuzzy_fingerprint) if not sounds_available: self.events.add(EventName.soundsUnavailable, static=True) if community_feature_disallowed: self.events.add(EventName.communityFeatureDisallowed, static=True) if not car_recognized: self.events.add(EventName.carUnrecognized, static=True) elif self.read_only: self.events.add(EventName.dashcamMode, static=True) # controlsd is driven by can recv, expected at 100Hz self.rk = Ratekeeper(100, print_delay_threshold=None) self.prof = Profiler(False) # off by default def update_events(self, CS): """Compute carEvents from carState""" self.events.clear() self.events.add_from_msg(CS.events) self.events.add_from_msg(self.sm['driverMonitoringState'].events) # Handle startup event if self.startup_event is not None: self.events.add(self.startup_event) self.startup_event = None # Don't add any more events if not initialized if not self.initialized: self.events.add(EventName.controlsInitializing) return # Create events for battery, temperature, disk space, and memory if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError: # at zero percent battery, while discharging, OP should not allowed self.events.add(EventName.lowBattery) if self.sm['deviceState'].thermalStatus >= ThermalStatus.red: self.events.add(EventName.overheat) if self.sm['deviceState'].freeSpacePercent < 7: # under 7% of space free no enable allowed self.events.add(EventName.outOfSpace) if self.sm['deviceState'].memoryUsagePercent > 90: self.events.add(EventName.lowMemory) # Alert if fan isn't spinning for 5 seconds if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]: if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50: if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0: self.events.add(EventName.fanMalfunction) else: self.last_functional_fan_frame = self.sm.frame # Handle calibration status cal_status = self.sm['liveCalibration'].calStatus if cal_status != Calibration.CALIBRATED: if cal_status == Calibration.UNCALIBRATED: self.events.add(EventName.calibrationIncomplete) else: self.events.add(EventName.calibrationInvalid) # Handle lane change if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange: direction = self.sm['lateralPlan'].laneChangeDirection if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \ (CS.rightBlindspot and direction == LaneChangeDirection.right): self.events.add(EventName.laneChangeBlocked) elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0: self.events.add(EventName.autoLaneChange) else: if direction == LaneChangeDirection.left: self.events.add(EventName.preLaneChangeLeft) else: self.events.add(EventName.preLaneChangeRight) elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting, LaneChangeState.laneChangeFinishing]: self.events.add(EventName.laneChange) if self.can_rcv_error or not CS.canValid: self.events.add(EventName.canError) safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel or self.sm['pandaState'].safetyParam != self.CP.safetyParam if safety_mismatch or self.mismatch_counter >= 200: self.events.add(EventName.controlsMismatch) if not self.sm['liveParameters'].valid: self.events.add(EventName.vehicleModelInvalid) if len(self.sm['radarState'].radarErrors): self.events.add(EventName.radarFault) elif not self.sm.valid["pandaState"]: self.events.add(EventName.usbError) elif not self.sm.all_alive_and_valid(): self.events.add(EventName.commIssue) if not self.logged_comm_issue: cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}") self.logged_comm_issue = True else: self.logged_comm_issue = False if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.turningIndicatorOn in self.events.names): self.events.add(EventName.plannerError) if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR: if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs self.events.add(EventName.sensorDataInvalid) if not self.sm['liveLocationKalman'].posenetOK: self.events.add(EventName.posenetInvalid) if not self.sm['liveLocationKalman'].deviceStable: self.events.add(EventName.deviceFalling) if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults: self.events.add(EventName.relayMalfunction) if self.sm['longitudinalPlan'].fcw or (self.enabled and self.sm['modelV2'].meta.hardBrakePredicted): self.events.add(EventName.fcw) if TICI and self.enable_lte_onroad: logs = messaging.drain_sock(self.log_sock, wait_for_one=False) messages = [] for m in logs: try: messages.append(m.androidLog.message) except UnicodeDecodeError: pass for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]: for m in messages: if err not in m: continue csid = m.split("CSID:")[-1].split(" ")[0] evt = {"0": EventName.wideRoadCameraError, "1": EventName.roadCameraError, "2": EventName.driverCameraError}.get(csid, None) if evt is not None: self.events.add(evt) # TODO: fix simulator if not SIMULATION: #if not NOSENSOR: # if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and \ # (not TICI or self.enable_lte_onroad): # # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes # self.events.add(EventName.noGps) if not self.sm.all_alive(self.camera_packets): self.events.add(EventName.cameraMalfunction) if self.sm['modelV2'].frameDropPerc > 20: self.events.add(EventName.modeldLagging) # Check if all manager processes are running not_running = set(p.name for p in self.sm['managerState'].processes if not p.running) if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES): self.events.add(EventName.processNotRunning) # Only allow engagement with brake pressed when stopped behind another stopped car #if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \ #and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3: #self.events.add(EventName.noTarget) def data_sample(self): """Receive data from sockets and update carState""" # Update carState from CAN can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True) CS = self.CI.update(self.CC, can_strs) self.sm.update(0) all_valid = CS.canValid and self.sm.all_alive_and_valid() if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 2.0): self.initialized = True Params().put_bool("ControlsReady", True) # Check for CAN timeout if not can_strs: self.can_error_counter += 1 self.can_rcv_error = True else: self.can_rcv_error = False # When the panda and controlsd do not agree on controls_allowed # we want to disengage openpilot. However the status from the panda goes through # another socket other than the CAN messages and one can arrive earlier than the other. # Therefore we allow a mismatch for two samples, then we trigger the disengagement. if not self.enabled: self.mismatch_counter = 0 if not self.sm['pandaState'].controlsAllowed and self.enabled: self.mismatch_counter += 1 self.distance_traveled += CS.vEgo * DT_CTRL return CS def state_transition(self, CS): """Compute conditional state transitions and execute actions on state transitions""" self.v_cruise_kph_last = self.v_cruise_kph # if stock cruise is completely disabled, then we can use our own set speed logic self.CP.enableCruise = self.CI.CP.enableCruise #if not self.CP.enableCruise: # self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled, self.is_metric) #elif self.CP.enableCruise and CS.cruiseState.enabled: # self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH SccSmoother.update_cruise_buttons(self, CS, self.CP.openpilotLongitudinalControl) # decrease the soft disable timer at every step, as it's reset on # entrance in SOFT_DISABLING state self.soft_disable_timer = max(0, self.soft_disable_timer - 1) self.current_alert_types = [ET.PERMANENT] # ENABLED, PRE ENABLING, SOFT DISABLING if self.state != State.disabled: # user and immediate disable always have priority in a non-disabled state if self.events.any(ET.USER_DISABLE): self.state = State.disabled self.current_alert_types.append(ET.USER_DISABLE) elif self.events.any(ET.IMMEDIATE_DISABLE): self.state = State.disabled self.current_alert_types.append(ET.IMMEDIATE_DISABLE) else: # ENABLED if self.state == State.enabled: if self.events.any(ET.SOFT_DISABLE): self.state = State.softDisabling self.soft_disable_timer = 50 # 0.5s self.current_alert_types.append(ET.SOFT_DISABLE) # SOFT DISABLING elif self.state == State.softDisabling: if not self.events.any(ET.SOFT_DISABLE): # no more soft disabling condition, so go back to ENABLED self.state = State.enabled elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0: self.current_alert_types.append(ET.SOFT_DISABLE) elif self.soft_disable_timer <= 0: self.state = State.disabled # PRE ENABLING elif self.state == State.preEnabled: if not self.events.any(ET.PRE_ENABLE): self.state = State.enabled else: self.current_alert_types.append(ET.PRE_ENABLE) # DISABLED elif self.state == State.disabled: if self.events.any(ET.ENABLE): if self.events.any(ET.NO_ENTRY): self.current_alert_types.append(ET.NO_ENTRY) else: if self.events.any(ET.PRE_ENABLE): self.state = State.preEnabled else: self.state = State.enabled self.current_alert_types.append(ET.ENABLE) self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last) # Check if actuators are enabled self.active = self.state == State.enabled or self.state == State.softDisabling if self.active: self.current_alert_types.append(ET.WARNING) # Check if openpilot is engaged self.enabled = self.active or self.state == State.preEnabled def state_control(self, CS): """Given the state, this function returns an actuators packet""" # Update VehicleModel params = self.sm['liveParameters'] x = max(params.stiffnessFactor, 0.1) #sr = max(params.steerRatio, 0.1) if ntune_isEnabled('useLiveSteerRatio'): sr = max(params.steerRatio, 0.1) else: sr = max(ntune_get('steerRatio'), 0.1) self.VM.update_params(x, sr) lat_plan = self.sm['lateralPlan'] long_plan = self.sm['longitudinalPlan'] actuators = car.CarControl.Actuators.new_message() if CS.leftBlinker or CS.rightBlinker: self.last_blinker_frame = self.sm.frame # State specific actions if not self.active: self.LaC.reset() self.LoC.reset(v_pid=CS.vEgo) long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan']) # no greater than dt mpc + dt, to prevent too high extraps dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart) v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0 # Gas/Brake PID loop #actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP) # scc smoother actuators.gas, actuators.brake = self.LoC.update(self.active and CS.cruiseState.speed > 1., CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP, self.sm['radarState']) # Steering PID loop and lateral MPC actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params, lat_plan) # Check for difference between desired angle and angle for angle based control angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \ abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD if angle_control_saturated and not CS.steeringPressed and self.active: self.saturated_count += 1 else: self.saturated_count = 0 # Send a "steering required alert" if saturation count has reached the limit if (lac_log.saturated and not CS.steeringPressed) or \ (self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT): if len(lat_plan.dPathPoints): # Check if we deviated from the path left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1 right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1 # if left_deviation or right_deviation: # self.events.add(EventName.steerSaturated) return actuators, v_acc_sol, a_acc_sol, lac_log def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log): """Send actuators and hud commands to the car, send controlsstate and MPC logging""" CC = car.CarControl.new_message() CC.enabled = self.enabled CC.actuators = actuators CC.cruiseControl.override = True CC.cruiseControl.cancel = self.CP.enableCruise and not self.enabled and CS.cruiseState.enabled # Some override values for Honda # brake discount removes a sharp nonlinearity brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0)) speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount) CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0) CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget) CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS) CC.hudControl.speedVisible = self.enabled CC.hudControl.lanesVisible = self.enabled CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead right_lane_visible = self.sm['lateralPlan'].rProb > 0.5 left_lane_visible = self.sm['lateralPlan'].lProb > 0.5 CC.hudControl.rightLaneVisible = bool(right_lane_visible) CC.hudControl.leftLaneVisible = bool(left_lane_visible) recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \ and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED meta = self.sm['modelV2'].meta if len(meta.desirePrediction) and ldw_allowed: l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1] r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1] cameraOffset = ntune_get("cameraOffset") l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + cameraOffset)) r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - cameraOffset)) CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close) CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close) if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart: self.events.add(EventName.ldw) clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric]) self.AM.add_many(self.sm.frame, alerts, self.enabled) self.AM.process_alerts(self.sm.frame, clear_event) CC.hudControl.visualAlert = self.AM.visual_alert if not self.read_only and self.initialized: # send car controls over can can_sends = self.CI.apply(CC, self) self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid)) force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \ (self.state == State.softDisabling) # Curvature & Steering angle params = self.sm['liveParameters'] lat_plan = self.sm['lateralPlan'] steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg) curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo) self.angle_steers_des = math.degrees(self.VM.get_steer_from_curvature(-lat_plan.curvature, CS.vEgo)) self.angle_steers_des += params.angleOffsetDeg # controlsState dat = messaging.new_message('controlsState') dat.valid = CS.canValid controlsState = dat.controlsState controlsState.alertText1 = self.AM.alert_text_1 controlsState.alertText2 = self.AM.alert_text_2 controlsState.alertSize = self.AM.alert_size controlsState.alertStatus = self.AM.alert_status controlsState.alertBlinkingRate = self.AM.alert_rate controlsState.alertType = self.AM.alert_type controlsState.alertSound = self.AM.audible_alert controlsState.canMonoTimes = list(CS.canMonoTimes) controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan'] controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan'] controlsState.enabled = self.enabled controlsState.active = self.active controlsState.curvature = curvature controlsState.steeringAngleDesiredDeg = self.angle_steers_des controlsState.state = self.state controlsState.engageable = not self.events.any(ET.NO_ENTRY) controlsState.longControlState = self.LoC.long_control_state controlsState.vPid = float(self.LoC.v_pid) controlsState.vCruise = float(self.cruiseVirtualMaxSpeed if self.CP.openpilotLongitudinalControl else self.v_cruise_kph) controlsState.upAccelCmd = float(self.LoC.pid.p) controlsState.uiAccelCmd = float(self.LoC.pid.i) controlsState.ufAccelCmd = float(self.LoC.pid.f) controlsState.vTargetLead = float(v_acc) controlsState.aTarget = float(a_acc) controlsState.cumLagMs = -self.rk.remaining * 1000. controlsState.startMonoTime = int(start_time * 1e9) controlsState.forceDecel = bool(force_decel) controlsState.canErrorCounter = self.can_error_counter controlsState.angleSteers = steer_angle_without_offset * CV.RAD_TO_DEG controlsState.cluSpeedMs = self.clu_speed_ms controlsState.applyAccel = self.apply_accel controlsState.fusedAccel = self.fused_accel controlsState.leadDist = self.lead_drel controlsState.aReqValue = self.aReqValue controlsState.aReqValueMin = self.aReqValueMin controlsState.aReqValueMax = self.aReqValueMax controlsState.steerRatio = self.VM.sR controlsState.steerRateCost = ntune_get('steerRateCost') controlsState.steerActuatorDelay = ntune_get('steerActuatorDelay') if self.CP.steerControlType == car.CarParams.SteerControlType.angle: controlsState.lateralControlState.angleState = lac_log elif self.CP.lateralTuning.which() == 'pid': controlsState.lateralControlState.pidState = lac_log elif self.CP.lateralTuning.which() == 'lqr': controlsState.lateralControlState.lqrState = lac_log elif self.CP.lateralTuning.which() == 'indi': controlsState.lateralControlState.indiState = lac_log self.pm.send('controlsState', dat) # carState car_events = self.events.to_msg() cs_send = messaging.new_message('carState') cs_send.valid = CS.canValid cs_send.carState = CS cs_send.carState.events = car_events self.pm.send('carState', cs_send) # carEvents - logged every second or on change if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev): ce_send = messaging.new_message('carEvents', len(self.events)) ce_send.carEvents = car_events self.pm.send('carEvents', ce_send) self.events_prev = self.events.names.copy() # carParams - logged every 50 seconds (> 1 per segment) if (self.sm.frame % int(50. / DT_CTRL) == 0): cp_send = messaging.new_message('carParams') cp_send.carParams = self.CP self.pm.send('carParams', cp_send) # carControl cc_send = messaging.new_message('carControl') cc_send.valid = CS.canValid cc_send.carControl = CC self.pm.send('carControl', cc_send) # copy CarControl to pass to CarInterface on the next iteration self.CC = CC def step(self): start_time = sec_since_boot() self.prof.checkpoint("Ratekeeper", ignore=True) # Sample data from sockets and get a carState CS = self.data_sample() self.prof.checkpoint("Sample") self.update_events(CS) if not self.read_only and self.initialized: # Update control state self.state_transition(CS) self.prof.checkpoint("State transition") # Compute actuators (runs PID loops and lateral MPC) actuators, v_acc, a_acc, lac_log = self.state_control(CS) self.prof.checkpoint("State Control") # Publish data self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log) self.prof.checkpoint("Sent") def controlsd_thread(self): while True: self.step() self.rk.monitor_time() self.prof.display() def main(sm=None, pm=None, logcan=None): controls = Controls(sm, pm, logcan) controls.controlsd_thread() if __name__ == "__main__": main()
42.252496
175
0.710389
import os import math from cereal import car, log from common.numpy_fast import clip, interp from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL from common.profiler import Profiler from common.params import Params, put_nonblocking import cereal.messaging as messaging from selfdrive.config import Conversions as CV from selfdrive.swaglog import cloudlog from selfdrive.boardd.boardd import can_list_to_can_capnp from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED from selfdrive.controls.lib.latcontrol_pid import LatControlPID from selfdrive.controls.lib.latcontrol_indi import LatControlINDI from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR from selfdrive.controls.lib.latcontrol_angle import LatControlAngle from selfdrive.controls.lib.events import Events, ET from selfdrive.controls.lib.alertmanager import AlertManager from selfdrive.controls.lib.vehicle_model import VehicleModel from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP from selfdrive.locationd.calibrationd import Calibration from selfdrive.hardware import HARDWARE, TICI from selfdrive.car.hyundai.scc_smoother import SccSmoother from selfdrive.ntune import ntune_get, ntune_isEnabled LDW_MIN_SPEED = 31 * CV.MPH_TO_MS LANE_DEPARTURE_THRESHOLD = 0.1 STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL STEER_ANGLE_SATURATION_THRESHOLD = 2.5 SIMULATION = "SIMULATION" in os.environ NOSENSOR = "NOSENSOR" in os.environ IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"]) ThermalStatus = log.DeviceState.ThermalStatus State = log.ControlsState.OpenpilotState PandaType = log.PandaState.PandaType Desire = log.LateralPlan.Desire LaneChangeState = log.LateralPlan.LaneChangeState LaneChangeDirection = log.LateralPlan.LaneChangeDirection EventName = car.CarEvent.EventName class Controls: def __init__(self, sm=None, pm=None, can_sock=None): config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH) self.pm = pm if self.pm is None: self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState', 'carControl', 'carEvents', 'carParams']) self.camera_packets = ["roadCameraState", "driverCameraState"] if TICI: self.camera_packets.append("wideRoadCameraState") self.sm = sm if self.sm is None: ignore = ['driverCameraState', 'managerState'] if SIMULATION else None self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration', 'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman', 'managerState', 'liveParameters', 'radarState'] + self.camera_packets, ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan']) self.can_sock = can_sock if can_sock is None: can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100 self.can_sock = messaging.sub_sock('can', timeout=can_timeout) if TICI: self.log_sock = messaging.sub_sock('androidLog') hw_type = messaging.recv_one(self.sm.sock['pandaState']).pandaState.pandaType has_relay = hw_type in [PandaType.blackPanda, PandaType.uno, PandaType.dos] print("Waiting for CAN messages...") get_one_can(self.can_sock) self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay) params = Params() self.is_metric = params.get_bool("IsMetric") self.is_ldw_enabled = params.get_bool("IsLdwEnabled") self.enable_lte_onroad = params.get_bool("EnableLteOnroad") community_feature_toggle = params.get_bool("CommunityFeaturesToggle") openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle") passive = params.get_bool("Passive") or not openpilot_enabled_toggle sounds_available = HARDWARE.get_sound_card_online() car_recognized = self.CP.carName != 'mock' fuzzy_fingerprint = self.CP.fuzzyFingerprint controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly community_feature = self.CP.communityFeature or fuzzy_fingerprint community_feature_disallowed = community_feature and (not community_feature_toggle) self.read_only = not car_recognized or not controller_available or \ self.CP.dashcamOnly or community_feature_disallowed if self.read_only: self.CP.safetyModel = car.CarParams.SafetyModel.noOutput # Write CarParams for radard cp_bytes = self.CP.to_bytes() params.put("CarParams", cp_bytes) put_nonblocking("CarParamsCache", cp_bytes) self.CC = car.CarControl.new_message() self.AM = AlertManager() self.events = Events() self.LoC = LongControl(self.CP, self.CI.compute_gb) self.VM = VehicleModel(self.CP) if self.CP.steerControlType == car.CarParams.SteerControlType.angle: self.LaC = LatControlAngle(self.CP) elif self.CP.lateralTuning.which() == 'pid': self.LaC = LatControlPID(self.CP) elif self.CP.lateralTuning.which() == 'indi': self.LaC = LatControlINDI(self.CP) elif self.CP.lateralTuning.which() == 'lqr': self.LaC = LatControlLQR(self.CP) self.initialized = False self.state = State.disabled self.enabled = False self.active = False self.can_rcv_error = False self.soft_disable_timer = 0 self.v_cruise_kph = 255 self.v_cruise_kph_last = 0 self.mismatch_counter = 0 self.can_error_counter = 0 self.last_blinker_frame = 0 self.saturated_count = 0 self.distance_traveled = 0 self.last_functional_fan_frame = 0 self.events_prev = [] self.current_alert_types = [ET.PERMANENT] self.logged_comm_issue = False # scc smoother self.is_cruise_enabled = False self.cruiseVirtualMaxSpeed = 0 self.clu_speed_ms = 0. self.apply_accel = 0. self.fused_accel = 0. self.lead_drel = 0. self.aReqValue = 0. self.aReqValueMin = 0. self.aReqValueMax = 0. self.angle_steers_des = 0. # TODO: no longer necessary, aside from process replay self.sm['liveParameters'].valid = True self.startup_event = get_startup_event(car_recognized, controller_available, fuzzy_fingerprint) if not sounds_available: self.events.add(EventName.soundsUnavailable, static=True) if community_feature_disallowed: self.events.add(EventName.communityFeatureDisallowed, static=True) if not car_recognized: self.events.add(EventName.carUnrecognized, static=True) elif self.read_only: self.events.add(EventName.dashcamMode, static=True) # controlsd is driven by can recv, expected at 100Hz self.rk = Ratekeeper(100, print_delay_threshold=None) self.prof = Profiler(False) # off by default def update_events(self, CS): self.events.clear() self.events.add_from_msg(CS.events) self.events.add_from_msg(self.sm['driverMonitoringState'].events) # Handle startup event if self.startup_event is not None: self.events.add(self.startup_event) self.startup_event = None # Don't add any more events if not initialized if not self.initialized: self.events.add(EventName.controlsInitializing) return if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError: self.events.add(EventName.lowBattery) if self.sm['deviceState'].thermalStatus >= ThermalStatus.red: self.events.add(EventName.overheat) if self.sm['deviceState'].freeSpacePercent < 7: self.events.add(EventName.outOfSpace) if self.sm['deviceState'].memoryUsagePercent > 90: self.events.add(EventName.lowMemory) if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]: if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50: if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0: self.events.add(EventName.fanMalfunction) else: self.last_functional_fan_frame = self.sm.frame # Handle calibration status cal_status = self.sm['liveCalibration'].calStatus if cal_status != Calibration.CALIBRATED: if cal_status == Calibration.UNCALIBRATED: self.events.add(EventName.calibrationIncomplete) else: self.events.add(EventName.calibrationInvalid) # Handle lane change if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange: direction = self.sm['lateralPlan'].laneChangeDirection if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \ (CS.rightBlindspot and direction == LaneChangeDirection.right): self.events.add(EventName.laneChangeBlocked) elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0: self.events.add(EventName.autoLaneChange) else: if direction == LaneChangeDirection.left: self.events.add(EventName.preLaneChangeLeft) else: self.events.add(EventName.preLaneChangeRight) elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting, LaneChangeState.laneChangeFinishing]: self.events.add(EventName.laneChange) if self.can_rcv_error or not CS.canValid: self.events.add(EventName.canError) safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel or self.sm['pandaState'].safetyParam != self.CP.safetyParam if safety_mismatch or self.mismatch_counter >= 200: self.events.add(EventName.controlsMismatch) if not self.sm['liveParameters'].valid: self.events.add(EventName.vehicleModelInvalid) if len(self.sm['radarState'].radarErrors): self.events.add(EventName.radarFault) elif not self.sm.valid["pandaState"]: self.events.add(EventName.usbError) elif not self.sm.all_alive_and_valid(): self.events.add(EventName.commIssue) if not self.logged_comm_issue: cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}") self.logged_comm_issue = True else: self.logged_comm_issue = False if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.turningIndicatorOn in self.events.names): self.events.add(EventName.plannerError) if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR: if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs self.events.add(EventName.sensorDataInvalid) if not self.sm['liveLocationKalman'].posenetOK: self.events.add(EventName.posenetInvalid) if not self.sm['liveLocationKalman'].deviceStable: self.events.add(EventName.deviceFalling) if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults: self.events.add(EventName.relayMalfunction) if self.sm['longitudinalPlan'].fcw or (self.enabled and self.sm['modelV2'].meta.hardBrakePredicted): self.events.add(EventName.fcw) if TICI and self.enable_lte_onroad: logs = messaging.drain_sock(self.log_sock, wait_for_one=False) messages = [] for m in logs: try: messages.append(m.androidLog.message) except UnicodeDecodeError: pass for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]: for m in messages: if err not in m: continue csid = m.split("CSID:")[-1].split(" ")[0] evt = {"0": EventName.wideRoadCameraError, "1": EventName.roadCameraError, "2": EventName.driverCameraError}.get(csid, None) if evt is not None: self.events.add(evt) # TODO: fix simulator if not SIMULATION: #if not NOSENSOR: # if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and \ # (not TICI or self.enable_lte_onroad): # # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes # self.events.add(EventName.noGps) if not self.sm.all_alive(self.camera_packets): self.events.add(EventName.cameraMalfunction) if self.sm['modelV2'].frameDropPerc > 20: self.events.add(EventName.modeldLagging) # Check if all manager processes are running not_running = set(p.name for p in self.sm['managerState'].processes if not p.running) if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES): self.events.add(EventName.processNotRunning) # Only allow engagement with brake pressed when stopped behind another stopped car #if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \ #and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3: #self.events.add(EventName.noTarget) def data_sample(self): # Update carState from CAN can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True) CS = self.CI.update(self.CC, can_strs) self.sm.update(0) all_valid = CS.canValid and self.sm.all_alive_and_valid() if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 2.0): self.initialized = True Params().put_bool("ControlsReady", True) # Check for CAN timeout if not can_strs: self.can_error_counter += 1 self.can_rcv_error = True else: self.can_rcv_error = False # When the panda and controlsd do not agree on controls_allowed # we want to disengage openpilot. However the status from the panda goes through # another socket other than the CAN messages and one can arrive earlier than the other. # Therefore we allow a mismatch for two samples, then we trigger the disengagement. if not self.enabled: self.mismatch_counter = 0 if not self.sm['pandaState'].controlsAllowed and self.enabled: self.mismatch_counter += 1 self.distance_traveled += CS.vEgo * DT_CTRL return CS def state_transition(self, CS): self.v_cruise_kph_last = self.v_cruise_kph # if stock cruise is completely disabled, then we can use our own set speed logic self.CP.enableCruise = self.CI.CP.enableCruise #if not self.CP.enableCruise: # self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled, self.is_metric) #elif self.CP.enableCruise and CS.cruiseState.enabled: # self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH SccSmoother.update_cruise_buttons(self, CS, self.CP.openpilotLongitudinalControl) # decrease the soft disable timer at every step, as it's reset on self.soft_disable_timer = max(0, self.soft_disable_timer - 1) self.current_alert_types = [ET.PERMANENT] if self.state != State.disabled: if self.events.any(ET.USER_DISABLE): self.state = State.disabled self.current_alert_types.append(ET.USER_DISABLE) elif self.events.any(ET.IMMEDIATE_DISABLE): self.state = State.disabled self.current_alert_types.append(ET.IMMEDIATE_DISABLE) else: if self.state == State.enabled: if self.events.any(ET.SOFT_DISABLE): self.state = State.softDisabling self.soft_disable_timer = 50 self.current_alert_types.append(ET.SOFT_DISABLE) elif self.state == State.softDisabling: if not self.events.any(ET.SOFT_DISABLE): self.state = State.enabled elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0: self.current_alert_types.append(ET.SOFT_DISABLE) elif self.soft_disable_timer <= 0: self.state = State.disabled elif self.state == State.preEnabled: if not self.events.any(ET.PRE_ENABLE): self.state = State.enabled else: self.current_alert_types.append(ET.PRE_ENABLE) elif self.state == State.disabled: if self.events.any(ET.ENABLE): if self.events.any(ET.NO_ENTRY): self.current_alert_types.append(ET.NO_ENTRY) else: if self.events.any(ET.PRE_ENABLE): self.state = State.preEnabled else: self.state = State.enabled self.current_alert_types.append(ET.ENABLE) self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last) self.active = self.state == State.enabled or self.state == State.softDisabling if self.active: self.current_alert_types.append(ET.WARNING) self.enabled = self.active or self.state == State.preEnabled def state_control(self, CS): params = self.sm['liveParameters'] x = max(params.stiffnessFactor, 0.1) if ntune_isEnabled('useLiveSteerRatio'): sr = max(params.steerRatio, 0.1) else: sr = max(ntune_get('steerRatio'), 0.1) self.VM.update_params(x, sr) lat_plan = self.sm['lateralPlan'] long_plan = self.sm['longitudinalPlan'] actuators = car.CarControl.Actuators.new_message() if CS.leftBlinker or CS.rightBlinker: self.last_blinker_frame = self.sm.frame if not self.active: self.LaC.reset() self.LoC.reset(v_pid=CS.vEgo) long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan']) dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart) v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0 actuators.gas, actuators.brake = self.LoC.update(self.active and CS.cruiseState.speed > 1., CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP, self.sm['radarState']) actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params, lat_plan) angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \ abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD if angle_control_saturated and not CS.steeringPressed and self.active: self.saturated_count += 1 else: self.saturated_count = 0 if (lac_log.saturated and not CS.steeringPressed) or \ (self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT): if len(lat_plan.dPathPoints): left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1 right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1 return actuators, v_acc_sol, a_acc_sol, lac_log def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log): CC = car.CarControl.new_message() CC.enabled = self.enabled CC.actuators = actuators CC.cruiseControl.override = True CC.cruiseControl.cancel = self.CP.enableCruise and not self.enabled and CS.cruiseState.enabled brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0)) speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount) CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0) CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget) CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS) CC.hudControl.speedVisible = self.enabled CC.hudControl.lanesVisible = self.enabled CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead right_lane_visible = self.sm['lateralPlan'].rProb > 0.5 left_lane_visible = self.sm['lateralPlan'].lProb > 0.5 CC.hudControl.rightLaneVisible = bool(right_lane_visible) CC.hudControl.leftLaneVisible = bool(left_lane_visible) recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \ and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED meta = self.sm['modelV2'].meta if len(meta.desirePrediction) and ldw_allowed: l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1] r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1] cameraOffset = ntune_get("cameraOffset") l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + cameraOffset)) r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - cameraOffset)) CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close) CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close) if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart: self.events.add(EventName.ldw) clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric]) self.AM.add_many(self.sm.frame, alerts, self.enabled) self.AM.process_alerts(self.sm.frame, clear_event) CC.hudControl.visualAlert = self.AM.visual_alert if not self.read_only and self.initialized: can_sends = self.CI.apply(CC, self) self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid)) force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \ (self.state == State.softDisabling) params = self.sm['liveParameters'] lat_plan = self.sm['lateralPlan'] steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg) curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo) self.angle_steers_des = math.degrees(self.VM.get_steer_from_curvature(-lat_plan.curvature, CS.vEgo)) self.angle_steers_des += params.angleOffsetDeg dat = messaging.new_message('controlsState') dat.valid = CS.canValid controlsState = dat.controlsState controlsState.alertText1 = self.AM.alert_text_1 controlsState.alertText2 = self.AM.alert_text_2 controlsState.alertSize = self.AM.alert_size controlsState.alertStatus = self.AM.alert_status controlsState.alertBlinkingRate = self.AM.alert_rate controlsState.alertType = self.AM.alert_type controlsState.alertSound = self.AM.audible_alert controlsState.canMonoTimes = list(CS.canMonoTimes) controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan'] controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan'] controlsState.enabled = self.enabled controlsState.active = self.active controlsState.curvature = curvature controlsState.steeringAngleDesiredDeg = self.angle_steers_des controlsState.state = self.state controlsState.engageable = not self.events.any(ET.NO_ENTRY) controlsState.longControlState = self.LoC.long_control_state controlsState.vPid = float(self.LoC.v_pid) controlsState.vCruise = float(self.cruiseVirtualMaxSpeed if self.CP.openpilotLongitudinalControl else self.v_cruise_kph) controlsState.upAccelCmd = float(self.LoC.pid.p) controlsState.uiAccelCmd = float(self.LoC.pid.i) controlsState.ufAccelCmd = float(self.LoC.pid.f) controlsState.vTargetLead = float(v_acc) controlsState.aTarget = float(a_acc) controlsState.cumLagMs = -self.rk.remaining * 1000. controlsState.startMonoTime = int(start_time * 1e9) controlsState.forceDecel = bool(force_decel) controlsState.canErrorCounter = self.can_error_counter controlsState.angleSteers = steer_angle_without_offset * CV.RAD_TO_DEG controlsState.cluSpeedMs = self.clu_speed_ms controlsState.applyAccel = self.apply_accel controlsState.fusedAccel = self.fused_accel controlsState.leadDist = self.lead_drel controlsState.aReqValue = self.aReqValue controlsState.aReqValueMin = self.aReqValueMin controlsState.aReqValueMax = self.aReqValueMax controlsState.steerRatio = self.VM.sR controlsState.steerRateCost = ntune_get('steerRateCost') controlsState.steerActuatorDelay = ntune_get('steerActuatorDelay') if self.CP.steerControlType == car.CarParams.SteerControlType.angle: controlsState.lateralControlState.angleState = lac_log elif self.CP.lateralTuning.which() == 'pid': controlsState.lateralControlState.pidState = lac_log elif self.CP.lateralTuning.which() == 'lqr': controlsState.lateralControlState.lqrState = lac_log elif self.CP.lateralTuning.which() == 'indi': controlsState.lateralControlState.indiState = lac_log self.pm.send('controlsState', dat) car_events = self.events.to_msg() cs_send = messaging.new_message('carState') cs_send.valid = CS.canValid cs_send.carState = CS cs_send.carState.events = car_events self.pm.send('carState', cs_send) if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev): ce_send = messaging.new_message('carEvents', len(self.events)) ce_send.carEvents = car_events self.pm.send('carEvents', ce_send) self.events_prev = self.events.names.copy() if (self.sm.frame % int(50. / DT_CTRL) == 0): cp_send = messaging.new_message('carParams') cp_send.carParams = self.CP self.pm.send('carParams', cp_send) cc_send = messaging.new_message('carControl') cc_send.valid = CS.canValid cc_send.carControl = CC self.pm.send('carControl', cc_send) self.CC = CC def step(self): start_time = sec_since_boot() self.prof.checkpoint("Ratekeeper", ignore=True) CS = self.data_sample() self.prof.checkpoint("Sample") self.update_events(CS) if not self.read_only and self.initialized: self.state_transition(CS) self.prof.checkpoint("State transition") actuators, v_acc, a_acc, lac_log = self.state_control(CS) self.prof.checkpoint("State Control") self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log) self.prof.checkpoint("Sent") def controlsd_thread(self): while True: self.step() self.rk.monitor_time() self.prof.display() def main(sm=None, pm=None, logcan=None): controls = Controls(sm, pm, logcan) controls.controlsd_thread() if __name__ == "__main__": main()
true
true
f70332380c19749fdaaa89b3db41948706f18fba
228
py
Python
main.py
ConnorDoesDev/cozmo
dc2e4574d056b6e61e1e7042c3b32d7e0d00f055
[ "CC0-1.0" ]
null
null
null
main.py
ConnorDoesDev/cozmo
dc2e4574d056b6e61e1e7042c3b32d7e0d00f055
[ "CC0-1.0" ]
null
null
null
main.py
ConnorDoesDev/cozmo
dc2e4574d056b6e61e1e7042c3b32d7e0d00f055
[ "CC0-1.0" ]
null
null
null
import cozmo name = input("What is your name? ") def cozmo_program(robot: cozmo.robot.Robot): robot.say_text( f"Hi! My name is Cozmo. How are you, {name}?").wait_for_completed() cozmo.run_program(cozmo_program)
19
75
0.697368
import cozmo name = input("What is your name? ") def cozmo_program(robot: cozmo.robot.Robot): robot.say_text( f"Hi! My name is Cozmo. How are you, {name}?").wait_for_completed() cozmo.run_program(cozmo_program)
true
true
f7033349b9962b4cac2915283c209a909c7aede1
2,212
py
Python
tests/test_buffer.py
recht/thriftrw-python
aad5ee4e9ca21fe59c9bea479465615ef3825dec
[ "MIT" ]
40
2016-02-18T18:01:23.000Z
2022-03-31T10:34:33.000Z
tests/test_buffer.py
recht/thriftrw-python
aad5ee4e9ca21fe59c9bea479465615ef3825dec
[ "MIT" ]
56
2016-02-10T16:51:07.000Z
2020-02-07T05:28:49.000Z
tests/test_buffer.py
recht/thriftrw-python
aad5ee4e9ca21fe59c9bea479465615ef3825dec
[ "MIT" ]
12
2016-03-29T17:29:28.000Z
2021-10-30T14:36:39.000Z
# Copyright (c) 2016 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, unicode_literals, print_function import pytest from thriftrw._buffer import ReadBuffer from thriftrw._buffer import WriteBuffer from thriftrw.errors import EndOfInputError def test_empty_write_buffer(): buff = WriteBuffer(10) assert buff.length == 0 assert buff.capacity == 10 assert buff.value == b'' def test_empty_read_buffer(): buff = ReadBuffer(b'') assert buff.take(0) == b'' with pytest.raises(EndOfInputError): buff.take(1) def test_simple_write(): buff = WriteBuffer(10) buff.write_bytes(b'hello ') buff.write_bytes(b'world') assert buff.value == b'hello world' assert buff.length == 11 def test_simple_read(): buff = ReadBuffer(b'abcd') assert buff.take(1) == b'a' assert buff.take(2) == b'bc' with pytest.raises(EndOfInputError): buff.take(2) assert buff.take(1) == b'd' def test_write_clear(): buff = WriteBuffer(10) buff.write_bytes(b'foo') buff.clear() assert buff.value == b'' assert buff.capacity == 10 assert buff.length == 0
29.891892
79
0.726944
from __future__ import absolute_import, unicode_literals, print_function import pytest from thriftrw._buffer import ReadBuffer from thriftrw._buffer import WriteBuffer from thriftrw.errors import EndOfInputError def test_empty_write_buffer(): buff = WriteBuffer(10) assert buff.length == 0 assert buff.capacity == 10 assert buff.value == b'' def test_empty_read_buffer(): buff = ReadBuffer(b'') assert buff.take(0) == b'' with pytest.raises(EndOfInputError): buff.take(1) def test_simple_write(): buff = WriteBuffer(10) buff.write_bytes(b'hello ') buff.write_bytes(b'world') assert buff.value == b'hello world' assert buff.length == 11 def test_simple_read(): buff = ReadBuffer(b'abcd') assert buff.take(1) == b'a' assert buff.take(2) == b'bc' with pytest.raises(EndOfInputError): buff.take(2) assert buff.take(1) == b'd' def test_write_clear(): buff = WriteBuffer(10) buff.write_bytes(b'foo') buff.clear() assert buff.value == b'' assert buff.capacity == 10 assert buff.length == 0
true
true
f703347dfba74e588f8315118b141a54de310acd
44,993
py
Python
tests/integration/test_packagewagl.py
joestasks/eo-datasets
6df2fffb5a773a8c65980a8c75d22fd5bb937edc
[ "Apache-2.0" ]
null
null
null
tests/integration/test_packagewagl.py
joestasks/eo-datasets
6df2fffb5a773a8c65980a8c75d22fd5bb937edc
[ "Apache-2.0" ]
null
null
null
tests/integration/test_packagewagl.py
joestasks/eo-datasets
6df2fffb5a773a8c65980a8c75d22fd5bb937edc
[ "Apache-2.0" ]
1
2021-05-21T03:05:54.000Z
2021-05-21T03:05:54.000Z
from binascii import crc32 from contextlib import contextmanager from datetime import datetime, timedelta, timezone from pathlib import Path from osgeo import gdal import pytest import rasterio from click.testing import CliRunner from rasterio import DatasetReader from rasterio.enums import Compression from rio_cogeo import cogeo import eodatasets3 from eodatasets3.model import DatasetDoc from tests import assert_file_structure from tests.common import assert_same_as_file from . import assert_image h5py = pytest.importorskip( "h5py", reason="Extra dependencies needed to run wagl package test. " "Try pip install eodatasets3[wagl]", ) # These test datasets come from running `tests/integration/h5downsample.py` on a real # wagl output. WAGL_LANDSAT_OUTPUT: Path = ( Path(__file__).parent / "data/wagl-input/LC80920842016180LGN01/LC80920842016180LGN01.wagl.h5" ) WAGL_SENTINEL_OUTPUT: Path = ( Path(__file__).parent / "data/wagl-input/S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09/" "S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09.wagl.h5" ) # The matching Level1 metadata (produced by landsat_l1_prepare.py) L1_METADATA_PATH: Path = ( Path(__file__).parent / "data/wagl-input/LC08_L1TP_092084_20160628_20170323_01_T1.yaml" ) S2_L1_METADATA_PATH: Path = ( Path(__file__).parent / "data/wagl-input/S2A_MSIL1C_20201031T004711_N0209_R102_T53JQJ_20201031T022859.odc-metadata.yaml" ) def test_whole_landsat_wagl_package( l1_ls8_dataset: DatasetDoc, l1_ls8_folder: Path, tmp_path: Path ): out = tmp_path from eodatasets3.scripts import packagewagl # No warnings should be logged during package. # We could tighten this to specific warnings if it proves too noisy, but it's # useful for catching things like unclosed files. with expect_no_warnings(): res = CliRunner().invoke( packagewagl.run, map( str, (WAGL_LANDSAT_OUTPUT, "--level1", L1_METADATA_PATH, "--output", out), ), catch_exceptions=False, ) # The last line of output ends with the dataset path. words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1) expected_folder = out / "ga_ls8c_ard_3/092/084/2016/06/28" assert_file_structure( expected_folder, { "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.odc-metadata.yaml": "", "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml": "", "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif": "", }, ) [output_metadata] = expected_folder.rglob("*.odc-metadata.yaml") assert reported_metadata == str( output_metadata ), "Cli didn't report the expected output path" # Checksum should include all files other than itself. [checksum_file] = expected_folder.rglob("*.sha1") all_output_files = set( p.relative_to(checksum_file.parent) for p in expected_folder.rglob("*") if p != checksum_file ) files_in_checksum = { Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines() } assert all_output_files == files_in_checksum # Verify the computed contiguity looks the same. (metadata fields will depend on it) [image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 1978, 1: 4184}) [image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 1979, 1: 4183}) assert_same_as_file( { "$schema": "https://schemas.opendatacube.org/dataset", # A stable ID is taken from the WAGL doc. "id": "787eb74c-e7df-43d6-b562-b796137330ae", "label": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final", "product": { "href": "https://collections.dea.ga.gov.au/product/ga_ls8c_ard_3", "name": "ga_ls8c_ard_3", }, "crs": "epsg:32655", "geometry": { "coordinates": [ [ [386_170.809_107_605_5, -3_787_581.737_315_514_6], [393_422.698_122_467_44, -3_754_539.332_156_166_4], [402_370.463_567_812_2, -3_717_207.883_853_628_3], [405_296.703_429_750_9, -3_713_106.822_612_258_6], [405_302.307_692_307_7, -3_713_085.0], [560_999.714_134_832_8, -3_745_790.820_117_99], [591_203.344_050_317_7, -3_755_934.776_849_929_2], [593_107.5, -3_756_373.614_649_681_4], [593_066.089_284_004_1, -3_756_560.384_007_281_6], [593_115.0, -3_756_576.810_780_758], [593_115.0, -3_769_934.639_090_926_4], [555_895.771_981_598_6, -3_924_204.823_795_153], [554_316.830_569_659_8, -3_931_326.117_549_759], [553_913.572_308_820_1, -3_932_420.854_216_015], [550_505.686_408_068, -3_946_546.219_392_854], [548_673.645_879_151_9, -3_946_645.831_477_726_3], [548_393.076_923_077, -3_947_407.5], [543_888.417_289_877_3, -3_946_906.014_911_907], [535_826.373_854_402_9, -3_947_344.365_997_631_6], [362_232.941_315_876_84, -3_905_575.014_223_633], [362_109.819_892_458_1, -3_904_490.351_889_350_5], [360_592.5, -3_904_126.385_350_318_6], [361_565.347_585_850_9, -3_899_693.716_286_561_5], [360_585.0, -3_891_057.151_898_734_3], [366_618.297_729_428_5, -3_863_717.869_440_751], [386_170.809_107_605_5, -3_787_581.737_315_514_6], ] ], "type": "Polygon", }, "grids": { "default": { "shape": [79, 78], "transform": [ 2981.153_846_153_846, 0.0, 360_585.0, 0.0, -2966.202_531_645_569_7, -3_713_085.0, 0.0, 0.0, 1.0, ], }, "panchromatic": { "shape": [157, 156], "transform": [ 1490.480_769_230_769_3, 0.0, 360_592.5, 0.0, -1492.452_229_299_363, -3_713_092.5, 0.0, 0.0, 1.0, ], }, }, "properties": { "datetime": datetime(2016, 6, 28, 0, 2, 28, 624_635), "dea:dataset_maturity": "final", "dtr:end_datetime": datetime(2016, 6, 28, 0, 2, 43, 114_771), "dtr:start_datetime": datetime(2016, 6, 28, 0, 2, 14, 25815), "eo:cloud_cover": 63.069_613_577_531_236, "eo:gsd": 1490.480_769_230_769_3, "eo:instrument": "OLI_TIRS", "eo:platform": "landsat-8", "eo:sun_azimuth": 33.655_125_34, "eo:sun_elevation": 23.988_361_72, "fmask:clear": 32.735_343_657_403_305, "fmask:cloud": 63.069_613_577_531_236, "fmask:cloud_shadow": 4.139_470_857_647_722, "fmask:snow": 0.005_053_323_801_138_007, "fmask:water": 0.050_518_583_616_596_675, "gqa:abs_iterative_mean_x": 0.21, "gqa:abs_iterative_mean_xy": 0.27, "gqa:abs_iterative_mean_y": 0.18, "gqa:abs_x": 0.3, "gqa:abs_xy": 0.39, "gqa:abs_y": 0.25, "gqa:cep90": 0.46, "gqa:iterative_mean_x": -0.17, "gqa:iterative_mean_xy": 0.21, "gqa:iterative_mean_y": 0.12, "gqa:iterative_stddev_x": 0.19, "gqa:iterative_stddev_xy": 0.25, "gqa:iterative_stddev_y": 0.17, "gqa:mean_x": -0.1, "gqa:mean_xy": 0.14, "gqa:mean_y": 0.1, "gqa:stddev_x": 0.35, "gqa:stddev_xy": 0.45, "gqa:stddev_y": 0.29, "landsat:collection_category": "T1", "landsat:collection_number": 1, "landsat:landsat_product_id": "LC08_L1TP_092084_20160628_20170323_01_T1", "landsat:landsat_scene_id": "LC80920842016180LGN01", "landsat:wrs_path": 92, "landsat:wrs_row": 84, "odc:dataset_version": "3.2.1", "odc:file_format": "GeoTIFF", "odc:processing_datetime": datetime(2019, 7, 11, 23, 29, 29, 21245), "odc:producer": "ga.gov.au", "odc:product_family": "ard", "odc:region_code": "092084", }, "measurements": { "nbar_blue": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif" }, "nbar_coastal_aerosol": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif" }, "nbar_green": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif" }, "nbar_nir": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif" }, "nbar_panchromatic": { "grid": "panchromatic", "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif", }, "nbar_red": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif" }, "nbar_swir_1": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif" }, "nbar_swir_2": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif" }, "nbart_blue": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif" }, "nbart_coastal_aerosol": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif" }, "nbart_green": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif" }, "nbart_nir": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif" }, "nbart_panchromatic": { "grid": "panchromatic", "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif", }, "nbart_red": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif" }, "nbart_swir_1": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif" }, "nbart_swir_2": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif" }, "oa_azimuthal_exiting": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif" }, "oa_azimuthal_incident": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif" }, "oa_combined_terrain_shadow": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif" }, "oa_exiting_angle": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif" }, "oa_fmask": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif" }, "oa_incident_angle": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif" }, "oa_nbar_contiguity": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif" }, "oa_nbart_contiguity": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif" }, "oa_relative_azimuth": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif" }, "oa_relative_slope": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif" }, "oa_satellite_azimuth": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif" }, "oa_satellite_view": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif" }, "oa_solar_azimuth": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif" }, "oa_solar_zenith": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif" }, "oa_time_delta": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif" }, }, "accessories": { "checksum:sha1": { "path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1" }, "metadata:processor": { "path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml" }, "thumbnail:nbar": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg" }, "thumbnail:nbart": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg" }, }, "lineage": {"level1": ["fb1c622e-90aa-50e8-9d5e-ad69db82d0f6"]}, }, output_metadata, ) [proc_info] = expected_folder.rglob("*.proc-info.yaml") assert_same_as_file( { "fmask": { "parameters": { "cloud_buffer_distance_metres": 0.0, "cloud_shadow_buffer_distance_metres": 0.0, "frantz_parallax_sentinel_2": False, }, "percent_class_distribution": { "clear": 32.735_343_657_403_305, "cloud": 63.069_613_577_531_236, "cloud_shadow": 4.139_470_857_647_722, "snow": 0.005_053_323_801_138_007, "water": 0.050_518_583_616_596_675, }, }, "software_versions": [ { "name": "modtran", "url": "http://www.ontar.com/software/productdetails.aspx?item=modtran", "version": "6.0.1", }, { "name": "wagl", "url": "https://github.com/GeoscienceAustralia/wagl.git", "version": "5.3.1+118.g9edd420", }, { "name": "eugl", "url": "https://github.com/OpenDataCubePipelines/eugl.git", "version": "0.0.2+69.gb1d1231", }, {"name": "gverify", "url": None, "version": "v0.25c"}, { "name": "fmask", "url": "https://bitbucket.org/chchrsc/python-fmask", "version": "0.5.3", }, { "name": "tesp", "url": "https://github.com/OpenDataCubePipelines/tesp.git", "version": "0.6.1", }, { "name": "eodatasets3", "url": "https://github.com/GeoscienceAustralia/eo-datasets", "version": eodatasets3.__version__, }, ], }, proc_info, ignore_fields=("gqa", "wagl"), ) # All produced tifs should be valid COGs for image in expected_folder.rglob("*.tif"): assert cogeo.cog_validate(image), f"Failed COG validation: {image}" # Check one of the images explicitly. [image] = expected_folder.rglob("*_nbar_*_band08.tif") with rasterio.open(image) as d: d: DatasetReader assert d.count == 1, "Expected one band" assert d.nodata == -999.0 # Verify the pixel values haven't changed. assert crc32(d.read(1).tobytes()) == 3_381_159_350 # (Rasterio's checksum is zero on some datasets for some reason? So we use crc above...) assert d.checksum(1) == 58403 # The last overview is an odd size because of the tiny test data image size. assert d.overviews(1) == [8, 16, 31] assert d.driver == "GTiff" assert d.dtypes == ("int16",) assert d.compression == Compression.deflate assert d.height == 157 assert d.width == 156 # The reduced resolution makes it hard to test the chosen block size... assert d.block_shapes == [(26, 156)] # Check the overviews use default 512 block size. # (Rasterio doesn't seem to have an api for this?) assert gdal.Open(str(image)).GetRasterBand(1).GetOverview(1).GetBlockSize() == [ 512, 512, ], "Expected overviews to have a larger block size." # OA data should have no overviews. [*oa_images] = expected_folder.rglob("*_oa_*.tif") assert oa_images for image in oa_images: # fmask is the only OA that should have overviews according to spec (and Josh). if "fmask" in image.name: assert_image(image, overviews=[8, 16, 26]) else: assert_image(image, overviews=[]) # Check we didn't get height/width mixed up again :) # (The small size of our test data makes this slightly silly, though...) [thumb_path] = expected_folder.rglob("*_nbar_*.jpg") assert_image(thumb_path, bands=3, shape=(7, 8)) def test_maturity_calculation(): from eodatasets3 import wagl # Simplified. Only a few ancillary parts that matter to us. wagl_doc = { "ancillary": { "aerosol": { "id": ["99d73c48-9985-51d2-9639-d37bcdfe119e"], "tier": "AATSR_CMP_MONTH", "value": 0.047_813_605_517_148_97, }, "brdf": { "alpha_1": { "band_1": 0.407_471_513_826_581_4, "band_2": 0.407_472_440_438_251_7, "band_3": 0.564_374_828_124_185, "band_4": 0.452_550_357_394_962_35, "band_5": 0.720_394_875_348_492_4, "band_6": 0.475_077_458_430_413_66, "band_7": 0.549_934_518_094_732, }, "alpha_2": { "band_1": 0.177_715_841_252_848_28, "band_2": 0.177_716_091_422_247_15, "band_3": 0.136_703_039_045_401_32, "band_4": 0.167_629_648_004_969_63, "band_5": 0.090_148_975_875_461_32, "band_6": 0.121_059_126_731_143_88, "band_7": 0.181_073_714_539_622_23, }, "id": [ "2e95bdec-42e4-50a2-9a4c-1ea970e2696d", "d02e1c58-7379-5c2d-a080-995838550d0d", ], "tier": "DEFINITIVE", }, "elevation": { "id": [ "8ad73086-72cf-561a-aa0f-1e3c64d53384", "e75ac77d-1ed0-55a5-888b-9ae48080eae9", ] }, "ozone": { "id": ["83914de1-c12e-5035-af8d-e2dc1baa54d4"], "tier": "DEFINITIVE", "value": 0.295, }, "water_vapour": { "id": ["e68035cd-1cd3-57fc-9b0e-2bf710a3df87"], "tier": "DEFINITIVE", "value": 0.490_000_009_536_743_16, }, } } # July 2002 is when we consider our BRDF to be good enough: both Aqua # and Terra satellites were now operational. acq_before_brdf = datetime(2002, 6, 29, tzinfo=timezone.utc) acq_after_brdf = datetime(2002, 7, 1, tzinfo=timezone.utc) proc_after_brdf = acq_after_brdf + timedelta(days=7) # Normal, final dataset. Processed just outside of NRT window. assert ( wagl._determine_maturity( acq_after_brdf, acq_after_brdf + timedelta(hours=49), wagl_doc ) == "final" ) # NRT when processed < 48 hours assert ( wagl._determine_maturity( acq_after_brdf, acq_after_brdf + timedelta(hours=1), wagl_doc ) == "nrt" ) assert ( wagl._determine_maturity( acq_before_brdf, acq_before_brdf + timedelta(hours=47), wagl_doc ) == "nrt" ) # Before 2001: final if water vapour is definitive. assert ( wagl._determine_maturity( acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc ) == "final" ) # Interim whenever water vapour is fallback. wagl_doc["ancillary"]["water_vapour"]["tier"] = "FALLBACK_DATASET" assert ( wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim" ) assert ( wagl._determine_maturity( acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc ) == "interim" ) wagl_doc["ancillary"]["water_vapour"]["tier"] = "DEFINITIVE" # Fallback BRDF (when at least one is fallback) wagl_doc["ancillary"]["brdf"]["tier"] = "FALLBACK_DEFAULT" assert ( wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim" ) @contextmanager def expect_no_warnings(): """Throw an assertion error if any warnings are produced.""" with pytest.warns(None) as warning_record: yield # We could tighten this to specific warnings if it proves too noisy, but it's # useful for catching things like unclosed files. if warning_record: messages = "\n".join(f"- {w.message} ({w})\n" for w in warning_record) raise AssertionError(f"Expected no warnings to be produced, got:\n {messages}") def test_sentinel_wagl_package(tmp_path: Path): out = tmp_path from eodatasets3.scripts import packagewagl # No warnings should have been logged during package. # We could tighten this to specific warnings if it proves too noisy, but it's # useful for catching things like unclosed files. with expect_no_warnings(): res = CliRunner().invoke( packagewagl.run, map( str, ( WAGL_SENTINEL_OUTPUT, "--level1", S2_L1_METADATA_PATH, "--output", out, # Our weird scaled test dataset resolution "--oa-resolution", 998.1818181818181, ), ), catch_exceptions=False, ) # The last line of output ends with the dataset path. words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1) expected_folder = out / "ga_s2am_ard_3/53/JQJ/2020/10/31" assert_file_structure( expected_folder, { "20201031T022859": { "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.odc-metadata.yaml": "", "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml": "", "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif": "", } }, ) [output_metadata] = expected_folder.rglob("*.odc-metadata.yaml") # Checksum should include all files other than itself. [checksum_file] = expected_folder.rglob("*.sha1") all_output_files = set( p.relative_to(checksum_file.parent) for p in expected_folder.rglob("*") if p != checksum_file and not p.is_dir() ) files_in_checksum = { Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines() } assert all_output_files == files_in_checksum # Verify the computed contiguity looks the same. (metadata fields will depend on it) [image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733}) [image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733}) assert_same_as_file( { "$schema": "https://schemas.opendatacube.org/dataset", "id": "14cfa990-7e2f-4f0c-bd5e-b4cb28c27e8d", "label": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final", "product": { "name": "ga_s2am_ard_3", "href": "https://collections.dea.ga.gov.au/product/ga_s2am_ard_3", }, "crs": "epsg:32753", "geometry": { "type": "Polygon", "coordinates": [ [ [731901.8181818182, 6790240.0], [728854.7368421053, 6790240.0], [752174.154338321, 6890002.646902946], [759379.8080509851, 6900040.0], [762411.0326110948, 6900040.0], [763218.8851094716, 6900040.0], [809760.0, 6900040.0], [809760.0, 6790240.0], [732900.0, 6790240.0], [731901.8181818182, 6790240.0], ] ], }, "grids": { "default": { "shape": [110, 110], "transform": [ 998.1818181818181, 0.0, 699960.0, 0.0, -998.1818181818181, 6900040.0, 0.0, 0.0, 1.0, ], }, "a": { "shape": [55, 55], "transform": [ 1996.3636363636363, 0.0, 699960.0, 0.0, -1996.3636363636363, 6900040.0, 0.0, 0.0, 1.0, ], }, "b": { "shape": [19, 19], "transform": [ 5778.9473684210525, 0.0, 699960.0, 0.0, -5778.9473684210525, 6900040.0, 0.0, 0.0, 1.0, ], }, "c": { "shape": [19, 19], "transform": [ 5778.947368421053, 0.0, 699960.0, 0.0, -5778.947368421053, 6900040.0, 0.0, 0.0, 1.0, ], }, }, "properties": { "datetime": "2020-10-31T00:55:10.954414", "dea:dataset_maturity": "final", "eo:cloud_cover": 11.063428320692061, "eo:gsd": 998.1818181818181, "eo:instrument": "MSI", "eo:platform": "sentinel-2a", "eo:sun_azimuth": 62.9424764928076, "eo:sun_elevation": 26.8398246645449, "fmask:clear": 73.65382838133374, "fmask:cloud": 11.063428320692061, "fmask:cloud_shadow": 0.6983135097842945, "fmask:snow": 14.583962676987106, "fmask:water": 0.0004671112027989303, "gqa:abs_iterative_mean_x": 0.42, "gqa:abs_iterative_mean_xy": 0.53, "gqa:abs_iterative_mean_y": 0.32, "gqa:abs_x": 0.69, "gqa:abs_xy": 1.07, "gqa:abs_y": 0.82, "gqa:cep90": 0.97, "gqa:iterative_mean_x": 0.4, "gqa:iterative_mean_xy": 0.4, "gqa:iterative_mean_y": 0.04, "gqa:iterative_stddev_x": 0.29, "gqa:iterative_stddev_xy": 0.53, "gqa:iterative_stddev_y": 0.44, "gqa:mean_x": 0.38, "gqa:mean_xy": 0.39, "gqa:mean_y": -0.07, "gqa:stddev_x": 1.18, "gqa:stddev_xy": 2.24, "gqa:stddev_y": 1.9, "odc:dataset_version": "3.2.1", "odc:file_format": "GeoTIFF", "odc:processing_datetime": "2021-02-10T03:25:22.635668", "odc:producer": "ga.gov.au", "odc:product_family": "ard", "odc:region_code": "53JQJ", "sat:orbit_state": "descending", "sat:relative_orbit": 102, "sentinel:datastrip_id": "S2A_OPER_MSI_L1C_DS_EPAE_20201031T022859_S20201031T004711_N02.09", "sentinel:sentinel_tile_id": "S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09", "sentinel:datatake_start_datetime": "2020-10-31T02:28:59", }, "measurements": { "nbar_blue": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif" }, "nbar_coastal_aerosol": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif", "grid": "b", }, "nbar_green": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif" }, "nbar_nir_1": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif" }, "nbar_nir_2": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif", "grid": "a", }, "nbar_red": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif" }, "nbar_red_edge_1": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif", "grid": "a", }, "nbar_red_edge_2": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif", "grid": "a", }, "nbar_red_edge_3": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif", "grid": "a", }, "nbar_swir_2": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif", "grid": "a", }, "nbar_swir_3": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif", "grid": "a", }, "nbart_blue": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif" }, "nbart_coastal_aerosol": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif", "grid": "b", }, "nbart_green": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif" }, "nbart_nir_1": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif" }, "nbart_nir_2": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif", "grid": "a", }, "nbart_red": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif" }, "nbart_red_edge_1": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif", "grid": "a", }, "nbart_red_edge_2": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif", "grid": "a", }, "nbart_red_edge_3": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif", "grid": "a", }, "nbart_swir_2": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif", "grid": "a", }, "nbart_swir_3": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif", "grid": "a", }, "oa_azimuthal_exiting": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif" }, "oa_azimuthal_incident": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif" }, "oa_combined_terrain_shadow": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif" }, "oa_exiting_angle": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif" }, "oa_fmask": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif", "grid": "c", }, "oa_incident_angle": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif" }, "oa_nbar_contiguity": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif" }, "oa_nbart_contiguity": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif" }, "oa_relative_azimuth": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif" }, "oa_relative_slope": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif" }, "oa_satellite_azimuth": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif" }, "oa_satellite_view": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif" }, "oa_solar_azimuth": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif" }, "oa_solar_zenith": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif" }, "oa_time_delta": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif" }, }, "accessories": { "checksum:sha1": { "path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1" }, "metadata:processor": { "path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml" }, "thumbnail:nbar": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg" }, "thumbnail:nbart": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg" }, }, "lineage": {"level1": ["e27200c1-0a9c-5e24-bfe1-bbbb3f3bdedc"]}, }, output_metadata, ) [proc_info] = expected_folder.rglob("*.proc-info.yaml") assert_same_as_file( { "fmask": { "parameters": { "cloud_buffer_distance_metres": 0.0, "cloud_shadow_buffer_distance_metres": 0.0, "frantz_parallax_sentinel_2": False, }, "percent_class_distribution": { "clear": 73.65382838133374, "cloud": 11.063428320692061, "cloud_shadow": 0.6983135097842945, "snow": 14.583962676987106, "water": 0.0004671112027989303, }, }, "software_versions": [ { "name": "modtran", "url": "http://www.ontar.com/software/productdetails.aspx?item=modtran", "version": "6.0.1", }, { "name": "wagl", "url": "https://github.com/GeoscienceAustralia/wagl.git", "version": "5.4.1", }, { "name": "eugl", "url": "https://github.com/OpenDataCubePipelines/eugl.git", "version": "0.2.1", }, {"name": "gverify", "url": None, "version": "v0.25c"}, { "name": "fmask", "url": "https://bitbucket.org/chchrsc/python-fmask", "version": "0.5.4", }, { "name": "tesp", "url": "https://github.com/OpenDataCubePipelines/tesp.git", "version": "0.6.2", }, { "name": "eodatasets3", "url": "https://github.com/GeoscienceAustralia/eo-datasets", "version": eodatasets3.__version__, }, ], }, proc_info, ignore_fields=("gqa", "wagl"), ) # All produced tifs should be valid COGs for image in expected_folder.rglob("*.tif"): assert cogeo.cog_validate(image), f"Failed COG validation: {image}"
43.096743
110
0.510613
from binascii import crc32 from contextlib import contextmanager from datetime import datetime, timedelta, timezone from pathlib import Path from osgeo import gdal import pytest import rasterio from click.testing import CliRunner from rasterio import DatasetReader from rasterio.enums import Compression from rio_cogeo import cogeo import eodatasets3 from eodatasets3.model import DatasetDoc from tests import assert_file_structure from tests.common import assert_same_as_file from . import assert_image h5py = pytest.importorskip( "h5py", reason="Extra dependencies needed to run wagl package test. " "Try pip install eodatasets3[wagl]", ) WAGL_LANDSAT_OUTPUT: Path = ( Path(__file__).parent / "data/wagl-input/LC80920842016180LGN01/LC80920842016180LGN01.wagl.h5" ) WAGL_SENTINEL_OUTPUT: Path = ( Path(__file__).parent / "data/wagl-input/S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09/" "S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09.wagl.h5" ) L1_METADATA_PATH: Path = ( Path(__file__).parent / "data/wagl-input/LC08_L1TP_092084_20160628_20170323_01_T1.yaml" ) S2_L1_METADATA_PATH: Path = ( Path(__file__).parent / "data/wagl-input/S2A_MSIL1C_20201031T004711_N0209_R102_T53JQJ_20201031T022859.odc-metadata.yaml" ) def test_whole_landsat_wagl_package( l1_ls8_dataset: DatasetDoc, l1_ls8_folder: Path, tmp_path: Path ): out = tmp_path from eodatasets3.scripts import packagewagl # useful for catching things like unclosed files. with expect_no_warnings(): res = CliRunner().invoke( packagewagl.run, map( str, (WAGL_LANDSAT_OUTPUT, "--level1", L1_METADATA_PATH, "--output", out), ), catch_exceptions=False, ) # The last line of output ends with the dataset path. words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1) expected_folder = out / "ga_ls8c_ard_3/092/084/2016/06/28" assert_file_structure( expected_folder, { "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.odc-metadata.yaml": "", "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml": "", "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif": "", "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif": "", "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif": "", "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif": "", }, ) [output_metadata] = expected_folder.rglob("*.odc-metadata.yaml") assert reported_metadata == str( output_metadata ), "Cli didn't report the expected output path" [checksum_file] = expected_folder.rglob("*.sha1") all_output_files = set( p.relative_to(checksum_file.parent) for p in expected_folder.rglob("*") if p != checksum_file ) files_in_checksum = { Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines() } assert all_output_files == files_in_checksum [image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 1978, 1: 4184}) [image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 1979, 1: 4183}) assert_same_as_file( { "$schema": "https://schemas.opendatacube.org/dataset", "id": "787eb74c-e7df-43d6-b562-b796137330ae", "label": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final", "product": { "href": "https://collections.dea.ga.gov.au/product/ga_ls8c_ard_3", "name": "ga_ls8c_ard_3", }, "crs": "epsg:32655", "geometry": { "coordinates": [ [ [386_170.809_107_605_5, -3_787_581.737_315_514_6], [393_422.698_122_467_44, -3_754_539.332_156_166_4], [402_370.463_567_812_2, -3_717_207.883_853_628_3], [405_296.703_429_750_9, -3_713_106.822_612_258_6], [405_302.307_692_307_7, -3_713_085.0], [560_999.714_134_832_8, -3_745_790.820_117_99], [591_203.344_050_317_7, -3_755_934.776_849_929_2], [593_107.5, -3_756_373.614_649_681_4], [593_066.089_284_004_1, -3_756_560.384_007_281_6], [593_115.0, -3_756_576.810_780_758], [593_115.0, -3_769_934.639_090_926_4], [555_895.771_981_598_6, -3_924_204.823_795_153], [554_316.830_569_659_8, -3_931_326.117_549_759], [553_913.572_308_820_1, -3_932_420.854_216_015], [550_505.686_408_068, -3_946_546.219_392_854], [548_673.645_879_151_9, -3_946_645.831_477_726_3], [548_393.076_923_077, -3_947_407.5], [543_888.417_289_877_3, -3_946_906.014_911_907], [535_826.373_854_402_9, -3_947_344.365_997_631_6], [362_232.941_315_876_84, -3_905_575.014_223_633], [362_109.819_892_458_1, -3_904_490.351_889_350_5], [360_592.5, -3_904_126.385_350_318_6], [361_565.347_585_850_9, -3_899_693.716_286_561_5], [360_585.0, -3_891_057.151_898_734_3], [366_618.297_729_428_5, -3_863_717.869_440_751], [386_170.809_107_605_5, -3_787_581.737_315_514_6], ] ], "type": "Polygon", }, "grids": { "default": { "shape": [79, 78], "transform": [ 2981.153_846_153_846, 0.0, 360_585.0, 0.0, -2966.202_531_645_569_7, -3_713_085.0, 0.0, 0.0, 1.0, ], }, "panchromatic": { "shape": [157, 156], "transform": [ 1490.480_769_230_769_3, 0.0, 360_592.5, 0.0, -1492.452_229_299_363, -3_713_092.5, 0.0, 0.0, 1.0, ], }, }, "properties": { "datetime": datetime(2016, 6, 28, 0, 2, 28, 624_635), "dea:dataset_maturity": "final", "dtr:end_datetime": datetime(2016, 6, 28, 0, 2, 43, 114_771), "dtr:start_datetime": datetime(2016, 6, 28, 0, 2, 14, 25815), "eo:cloud_cover": 63.069_613_577_531_236, "eo:gsd": 1490.480_769_230_769_3, "eo:instrument": "OLI_TIRS", "eo:platform": "landsat-8", "eo:sun_azimuth": 33.655_125_34, "eo:sun_elevation": 23.988_361_72, "fmask:clear": 32.735_343_657_403_305, "fmask:cloud": 63.069_613_577_531_236, "fmask:cloud_shadow": 4.139_470_857_647_722, "fmask:snow": 0.005_053_323_801_138_007, "fmask:water": 0.050_518_583_616_596_675, "gqa:abs_iterative_mean_x": 0.21, "gqa:abs_iterative_mean_xy": 0.27, "gqa:abs_iterative_mean_y": 0.18, "gqa:abs_x": 0.3, "gqa:abs_xy": 0.39, "gqa:abs_y": 0.25, "gqa:cep90": 0.46, "gqa:iterative_mean_x": -0.17, "gqa:iterative_mean_xy": 0.21, "gqa:iterative_mean_y": 0.12, "gqa:iterative_stddev_x": 0.19, "gqa:iterative_stddev_xy": 0.25, "gqa:iterative_stddev_y": 0.17, "gqa:mean_x": -0.1, "gqa:mean_xy": 0.14, "gqa:mean_y": 0.1, "gqa:stddev_x": 0.35, "gqa:stddev_xy": 0.45, "gqa:stddev_y": 0.29, "landsat:collection_category": "T1", "landsat:collection_number": 1, "landsat:landsat_product_id": "LC08_L1TP_092084_20160628_20170323_01_T1", "landsat:landsat_scene_id": "LC80920842016180LGN01", "landsat:wrs_path": 92, "landsat:wrs_row": 84, "odc:dataset_version": "3.2.1", "odc:file_format": "GeoTIFF", "odc:processing_datetime": datetime(2019, 7, 11, 23, 29, 29, 21245), "odc:producer": "ga.gov.au", "odc:product_family": "ard", "odc:region_code": "092084", }, "measurements": { "nbar_blue": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif" }, "nbar_coastal_aerosol": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif" }, "nbar_green": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif" }, "nbar_nir": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif" }, "nbar_panchromatic": { "grid": "panchromatic", "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif", }, "nbar_red": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif" }, "nbar_swir_1": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif" }, "nbar_swir_2": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif" }, "nbart_blue": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif" }, "nbart_coastal_aerosol": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif" }, "nbart_green": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif" }, "nbart_nir": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif" }, "nbart_panchromatic": { "grid": "panchromatic", "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif", }, "nbart_red": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif" }, "nbart_swir_1": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif" }, "nbart_swir_2": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif" }, "oa_azimuthal_exiting": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif" }, "oa_azimuthal_incident": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif" }, "oa_combined_terrain_shadow": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif" }, "oa_exiting_angle": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif" }, "oa_fmask": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif" }, "oa_incident_angle": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif" }, "oa_nbar_contiguity": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif" }, "oa_nbart_contiguity": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif" }, "oa_relative_azimuth": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif" }, "oa_relative_slope": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif" }, "oa_satellite_azimuth": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif" }, "oa_satellite_view": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif" }, "oa_solar_azimuth": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif" }, "oa_solar_zenith": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif" }, "oa_time_delta": { "path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif" }, }, "accessories": { "checksum:sha1": { "path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1" }, "metadata:processor": { "path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml" }, "thumbnail:nbar": { "path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg" }, "thumbnail:nbart": { "path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg" }, }, "lineage": {"level1": ["fb1c622e-90aa-50e8-9d5e-ad69db82d0f6"]}, }, output_metadata, ) [proc_info] = expected_folder.rglob("*.proc-info.yaml") assert_same_as_file( { "fmask": { "parameters": { "cloud_buffer_distance_metres": 0.0, "cloud_shadow_buffer_distance_metres": 0.0, "frantz_parallax_sentinel_2": False, }, "percent_class_distribution": { "clear": 32.735_343_657_403_305, "cloud": 63.069_613_577_531_236, "cloud_shadow": 4.139_470_857_647_722, "snow": 0.005_053_323_801_138_007, "water": 0.050_518_583_616_596_675, }, }, "software_versions": [ { "name": "modtran", "url": "http://www.ontar.com/software/productdetails.aspx?item=modtran", "version": "6.0.1", }, { "name": "wagl", "url": "https://github.com/GeoscienceAustralia/wagl.git", "version": "5.3.1+118.g9edd420", }, { "name": "eugl", "url": "https://github.com/OpenDataCubePipelines/eugl.git", "version": "0.0.2+69.gb1d1231", }, {"name": "gverify", "url": None, "version": "v0.25c"}, { "name": "fmask", "url": "https://bitbucket.org/chchrsc/python-fmask", "version": "0.5.3", }, { "name": "tesp", "url": "https://github.com/OpenDataCubePipelines/tesp.git", "version": "0.6.1", }, { "name": "eodatasets3", "url": "https://github.com/GeoscienceAustralia/eo-datasets", "version": eodatasets3.__version__, }, ], }, proc_info, ignore_fields=("gqa", "wagl"), ) for image in expected_folder.rglob("*.tif"): assert cogeo.cog_validate(image), f"Failed COG validation: {image}" [image] = expected_folder.rglob("*_nbar_*_band08.tif") with rasterio.open(image) as d: d: DatasetReader assert d.count == 1, "Expected one band" assert d.nodata == -999.0 assert crc32(d.read(1).tobytes()) == 3_381_159_350 # (Rasterio's checksum is zero on some datasets for some reason? So we use crc above...) assert d.checksum(1) == 58403 assert d.overviews(1) == [8, 16, 31] assert d.driver == "GTiff" assert d.dtypes == ("int16",) assert d.compression == Compression.deflate assert d.height == 157 assert d.width == 156 assert d.block_shapes == [(26, 156)] assert gdal.Open(str(image)).GetRasterBand(1).GetOverview(1).GetBlockSize() == [ 512, 512, ], "Expected overviews to have a larger block size." # OA data should have no overviews. [*oa_images] = expected_folder.rglob("*_oa_*.tif") assert oa_images for image in oa_images: # fmask is the only OA that should have overviews according to spec (and Josh). if "fmask" in image.name: assert_image(image, overviews=[8, 16, 26]) else: assert_image(image, overviews=[]) # Check we didn't get height/width mixed up again :) [thumb_path] = expected_folder.rglob("*_nbar_*.jpg") assert_image(thumb_path, bands=3, shape=(7, 8)) def test_maturity_calculation(): from eodatasets3 import wagl wagl_doc = { "ancillary": { "aerosol": { "id": ["99d73c48-9985-51d2-9639-d37bcdfe119e"], "tier": "AATSR_CMP_MONTH", "value": 0.047_813_605_517_148_97, }, "brdf": { "alpha_1": { "band_1": 0.407_471_513_826_581_4, "band_2": 0.407_472_440_438_251_7, "band_3": 0.564_374_828_124_185, "band_4": 0.452_550_357_394_962_35, "band_5": 0.720_394_875_348_492_4, "band_6": 0.475_077_458_430_413_66, "band_7": 0.549_934_518_094_732, }, "alpha_2": { "band_1": 0.177_715_841_252_848_28, "band_2": 0.177_716_091_422_247_15, "band_3": 0.136_703_039_045_401_32, "band_4": 0.167_629_648_004_969_63, "band_5": 0.090_148_975_875_461_32, "band_6": 0.121_059_126_731_143_88, "band_7": 0.181_073_714_539_622_23, }, "id": [ "2e95bdec-42e4-50a2-9a4c-1ea970e2696d", "d02e1c58-7379-5c2d-a080-995838550d0d", ], "tier": "DEFINITIVE", }, "elevation": { "id": [ "8ad73086-72cf-561a-aa0f-1e3c64d53384", "e75ac77d-1ed0-55a5-888b-9ae48080eae9", ] }, "ozone": { "id": ["83914de1-c12e-5035-af8d-e2dc1baa54d4"], "tier": "DEFINITIVE", "value": 0.295, }, "water_vapour": { "id": ["e68035cd-1cd3-57fc-9b0e-2bf710a3df87"], "tier": "DEFINITIVE", "value": 0.490_000_009_536_743_16, }, } } acq_before_brdf = datetime(2002, 6, 29, tzinfo=timezone.utc) acq_after_brdf = datetime(2002, 7, 1, tzinfo=timezone.utc) proc_after_brdf = acq_after_brdf + timedelta(days=7) assert ( wagl._determine_maturity( acq_after_brdf, acq_after_brdf + timedelta(hours=49), wagl_doc ) == "final" ) assert ( wagl._determine_maturity( acq_after_brdf, acq_after_brdf + timedelta(hours=1), wagl_doc ) == "nrt" ) assert ( wagl._determine_maturity( acq_before_brdf, acq_before_brdf + timedelta(hours=47), wagl_doc ) == "nrt" ) assert ( wagl._determine_maturity( acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc ) == "final" ) wagl_doc["ancillary"]["water_vapour"]["tier"] = "FALLBACK_DATASET" assert ( wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim" ) assert ( wagl._determine_maturity( acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc ) == "interim" ) wagl_doc["ancillary"]["water_vapour"]["tier"] = "DEFINITIVE" wagl_doc["ancillary"]["brdf"]["tier"] = "FALLBACK_DEFAULT" assert ( wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim" ) @contextmanager def expect_no_warnings(): with pytest.warns(None) as warning_record: yield # useful for catching things like unclosed files. if warning_record: messages = "\n".join(f"- {w.message} ({w})\n" for w in warning_record) raise AssertionError(f"Expected no warnings to be produced, got:\n {messages}") def test_sentinel_wagl_package(tmp_path: Path): out = tmp_path from eodatasets3.scripts import packagewagl # No warnings should have been logged during package. # We could tighten this to specific warnings if it proves too noisy, but it's with expect_no_warnings(): res = CliRunner().invoke( packagewagl.run, map( str, ( WAGL_SENTINEL_OUTPUT, "--level1", S2_L1_METADATA_PATH, "--output", out, "--oa-resolution", 998.1818181818181, ), ), catch_exceptions=False, ) words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1) expected_folder = out / "ga_s2am_ard_3/53/JQJ/2020/10/31" assert_file_structure( expected_folder, { "20201031T022859": { "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.odc-metadata.yaml": "", "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml": "", "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif": "", "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif": "", "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif": "", "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif": "", } }, ) [output_metadata] = expected_folder.rglob("*.odc-metadata.yaml") [checksum_file] = expected_folder.rglob("*.sha1") all_output_files = set( p.relative_to(checksum_file.parent) for p in expected_folder.rglob("*") if p != checksum_file and not p.is_dir() ) files_in_checksum = { Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines() } assert all_output_files == files_in_checksum [image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733}) [image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif") assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733}) assert_same_as_file( { "$schema": "https://schemas.opendatacube.org/dataset", "id": "14cfa990-7e2f-4f0c-bd5e-b4cb28c27e8d", "label": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final", "product": { "name": "ga_s2am_ard_3", "href": "https://collections.dea.ga.gov.au/product/ga_s2am_ard_3", }, "crs": "epsg:32753", "geometry": { "type": "Polygon", "coordinates": [ [ [731901.8181818182, 6790240.0], [728854.7368421053, 6790240.0], [752174.154338321, 6890002.646902946], [759379.8080509851, 6900040.0], [762411.0326110948, 6900040.0], [763218.8851094716, 6900040.0], [809760.0, 6900040.0], [809760.0, 6790240.0], [732900.0, 6790240.0], [731901.8181818182, 6790240.0], ] ], }, "grids": { "default": { "shape": [110, 110], "transform": [ 998.1818181818181, 0.0, 699960.0, 0.0, -998.1818181818181, 6900040.0, 0.0, 0.0, 1.0, ], }, "a": { "shape": [55, 55], "transform": [ 1996.3636363636363, 0.0, 699960.0, 0.0, -1996.3636363636363, 6900040.0, 0.0, 0.0, 1.0, ], }, "b": { "shape": [19, 19], "transform": [ 5778.9473684210525, 0.0, 699960.0, 0.0, -5778.9473684210525, 6900040.0, 0.0, 0.0, 1.0, ], }, "c": { "shape": [19, 19], "transform": [ 5778.947368421053, 0.0, 699960.0, 0.0, -5778.947368421053, 6900040.0, 0.0, 0.0, 1.0, ], }, }, "properties": { "datetime": "2020-10-31T00:55:10.954414", "dea:dataset_maturity": "final", "eo:cloud_cover": 11.063428320692061, "eo:gsd": 998.1818181818181, "eo:instrument": "MSI", "eo:platform": "sentinel-2a", "eo:sun_azimuth": 62.9424764928076, "eo:sun_elevation": 26.8398246645449, "fmask:clear": 73.65382838133374, "fmask:cloud": 11.063428320692061, "fmask:cloud_shadow": 0.6983135097842945, "fmask:snow": 14.583962676987106, "fmask:water": 0.0004671112027989303, "gqa:abs_iterative_mean_x": 0.42, "gqa:abs_iterative_mean_xy": 0.53, "gqa:abs_iterative_mean_y": 0.32, "gqa:abs_x": 0.69, "gqa:abs_xy": 1.07, "gqa:abs_y": 0.82, "gqa:cep90": 0.97, "gqa:iterative_mean_x": 0.4, "gqa:iterative_mean_xy": 0.4, "gqa:iterative_mean_y": 0.04, "gqa:iterative_stddev_x": 0.29, "gqa:iterative_stddev_xy": 0.53, "gqa:iterative_stddev_y": 0.44, "gqa:mean_x": 0.38, "gqa:mean_xy": 0.39, "gqa:mean_y": -0.07, "gqa:stddev_x": 1.18, "gqa:stddev_xy": 2.24, "gqa:stddev_y": 1.9, "odc:dataset_version": "3.2.1", "odc:file_format": "GeoTIFF", "odc:processing_datetime": "2021-02-10T03:25:22.635668", "odc:producer": "ga.gov.au", "odc:product_family": "ard", "odc:region_code": "53JQJ", "sat:orbit_state": "descending", "sat:relative_orbit": 102, "sentinel:datastrip_id": "S2A_OPER_MSI_L1C_DS_EPAE_20201031T022859_S20201031T004711_N02.09", "sentinel:sentinel_tile_id": "S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09", "sentinel:datatake_start_datetime": "2020-10-31T02:28:59", }, "measurements": { "nbar_blue": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif" }, "nbar_coastal_aerosol": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif", "grid": "b", }, "nbar_green": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif" }, "nbar_nir_1": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif" }, "nbar_nir_2": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif", "grid": "a", }, "nbar_red": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif" }, "nbar_red_edge_1": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif", "grid": "a", }, "nbar_red_edge_2": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif", "grid": "a", }, "nbar_red_edge_3": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif", "grid": "a", }, "nbar_swir_2": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif", "grid": "a", }, "nbar_swir_3": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif", "grid": "a", }, "nbart_blue": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif" }, "nbart_coastal_aerosol": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif", "grid": "b", }, "nbart_green": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif" }, "nbart_nir_1": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif" }, "nbart_nir_2": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif", "grid": "a", }, "nbart_red": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif" }, "nbart_red_edge_1": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif", "grid": "a", }, "nbart_red_edge_2": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif", "grid": "a", }, "nbart_red_edge_3": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif", "grid": "a", }, "nbart_swir_2": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif", "grid": "a", }, "nbart_swir_3": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif", "grid": "a", }, "oa_azimuthal_exiting": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif" }, "oa_azimuthal_incident": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif" }, "oa_combined_terrain_shadow": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif" }, "oa_exiting_angle": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif" }, "oa_fmask": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif", "grid": "c", }, "oa_incident_angle": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif" }, "oa_nbar_contiguity": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif" }, "oa_nbart_contiguity": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif" }, "oa_relative_azimuth": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif" }, "oa_relative_slope": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif" }, "oa_satellite_azimuth": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif" }, "oa_satellite_view": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif" }, "oa_solar_azimuth": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif" }, "oa_solar_zenith": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif" }, "oa_time_delta": { "path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif" }, }, "accessories": { "checksum:sha1": { "path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1" }, "metadata:processor": { "path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml" }, "thumbnail:nbar": { "path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg" }, "thumbnail:nbart": { "path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg" }, }, "lineage": {"level1": ["e27200c1-0a9c-5e24-bfe1-bbbb3f3bdedc"]}, }, output_metadata, ) [proc_info] = expected_folder.rglob("*.proc-info.yaml") assert_same_as_file( { "fmask": { "parameters": { "cloud_buffer_distance_metres": 0.0, "cloud_shadow_buffer_distance_metres": 0.0, "frantz_parallax_sentinel_2": False, }, "percent_class_distribution": { "clear": 73.65382838133374, "cloud": 11.063428320692061, "cloud_shadow": 0.6983135097842945, "snow": 14.583962676987106, "water": 0.0004671112027989303, }, }, "software_versions": [ { "name": "modtran", "url": "http://www.ontar.com/software/productdetails.aspx?item=modtran", "version": "6.0.1", }, { "name": "wagl", "url": "https://github.com/GeoscienceAustralia/wagl.git", "version": "5.4.1", }, { "name": "eugl", "url": "https://github.com/OpenDataCubePipelines/eugl.git", "version": "0.2.1", }, {"name": "gverify", "url": None, "version": "v0.25c"}, { "name": "fmask", "url": "https://bitbucket.org/chchrsc/python-fmask", "version": "0.5.4", }, { "name": "tesp", "url": "https://github.com/OpenDataCubePipelines/tesp.git", "version": "0.6.2", }, { "name": "eodatasets3", "url": "https://github.com/GeoscienceAustralia/eo-datasets", "version": eodatasets3.__version__, }, ], }, proc_info, ignore_fields=("gqa", "wagl"), ) for image in expected_folder.rglob("*.tif"): assert cogeo.cog_validate(image), f"Failed COG validation: {image}"
true
true
f70335d255da69b8ffa2473040f343fc73572ee4
1,551
py
Python
alleco/spiders/ross_t.py
crocojim18/alleco
29d8eb5b814e48e28a794b1bb5a3db1da47b101c
[ "Apache-2.0" ]
1
2020-12-12T00:33:01.000Z
2020-12-12T00:33:01.000Z
alleco/spiders/ross_t.py
crocojim18/alleco
29d8eb5b814e48e28a794b1bb5a3db1da47b101c
[ "Apache-2.0" ]
null
null
null
alleco/spiders/ross_t.py
crocojim18/alleco
29d8eb5b814e48e28a794b1bb5a3db1da47b101c
[ "Apache-2.0" ]
null
null
null
import scrapy, re from alleco.objects.official import Official class ross_t(scrapy.Spider): name = "ross_t" muniName = "ROSS" muniType = "TOWNSHIP" complete = True def start_requests(self): urls = ['https://www.ross.pa.us/245/Board-of-Commissioners', 'https://www.ross.pa.us/225/Other-Elected-Officials'] for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): if response.url[-2]=='r': for quote in response.xpath('//div[@class="cpTabPanels"]'): arr = [i.strip() for i in quote.xpath('.//text()').getall() if len(i.strip())>0 and '$' not in i] temp = [] peeps = [] for i in arr: temp.append(i) if '@' in i: peeps.append(temp) temp = [] for pers in peeps: name = self._name(pers[1]) if "Commissioner" in pers[1] else None yield Official( muniName=self.muniName, muniType=self.muniType, office="COMMISSIONER", district=pers[0].upper(), name=name, email=pers[-1], vacant=name==None, url=response.url) elif response.url[-2]=='l': for quote in response.xpath('//div[contains(h2/text(),"Ross Tax Collector")]/p[1]'): yield Official( muniName=self.muniName, muniType=self.muniType, office="TAX COLLECTOR", name=quote.xpath('text()[1]').get(), email=quote.xpath('a/@href').get(), phone=quote.xpath('text()[2]').get(), url=response.url) def _name(self,string): return string.split(",")[0][13:]
31.02
102
0.600258
import scrapy, re from alleco.objects.official import Official class ross_t(scrapy.Spider): name = "ross_t" muniName = "ROSS" muniType = "TOWNSHIP" complete = True def start_requests(self): urls = ['https://www.ross.pa.us/245/Board-of-Commissioners', 'https://www.ross.pa.us/225/Other-Elected-Officials'] for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): if response.url[-2]=='r': for quote in response.xpath('//div[@class="cpTabPanels"]'): arr = [i.strip() for i in quote.xpath('.//text()').getall() if len(i.strip())>0 and '$' not in i] temp = [] peeps = [] for i in arr: temp.append(i) if '@' in i: peeps.append(temp) temp = [] for pers in peeps: name = self._name(pers[1]) if "Commissioner" in pers[1] else None yield Official( muniName=self.muniName, muniType=self.muniType, office="COMMISSIONER", district=pers[0].upper(), name=name, email=pers[-1], vacant=name==None, url=response.url) elif response.url[-2]=='l': for quote in response.xpath('//div[contains(h2/text(),"Ross Tax Collector")]/p[1]'): yield Official( muniName=self.muniName, muniType=self.muniType, office="TAX COLLECTOR", name=quote.xpath('text()[1]').get(), email=quote.xpath('a/@href').get(), phone=quote.xpath('text()[2]').get(), url=response.url) def _name(self,string): return string.split(",")[0][13:]
true
true
f703360b45b569aefa99c691fa9c466399212414
1,345
py
Python
PythonInterview/DesigPattern/BehaviorPattern/State.py
xtawfnhdx/PythonInterview
515675ffd86eb1ad3bfa631fd3c88fddcf411e98
[ "Apache-2.0" ]
null
null
null
PythonInterview/DesigPattern/BehaviorPattern/State.py
xtawfnhdx/PythonInterview
515675ffd86eb1ad3bfa631fd3c88fddcf411e98
[ "Apache-2.0" ]
null
null
null
PythonInterview/DesigPattern/BehaviorPattern/State.py
xtawfnhdx/PythonInterview
515675ffd86eb1ad3bfa631fd3c88fddcf411e98
[ "Apache-2.0" ]
null
null
null
""" 状态模式 """ from __future__ import annotations from abc import ABC, abstractmethod class Context: # 状态(状态模式的判断) _state: State = None def __init__(self, state: State) -> None: self.transition_to(state) def transition_to(self, state: State) -> None: # 根据不同状态,切换上下文 self._state = state self._state.context = self # 最终执行器的操作 def request1(self): self._state.handle1() def request2(self): self._state.handle2() class State(ABC): @property def context(self) -> Context: return self._context @context.setter def context(self, context: Context) -> None: self._context = context @abstractmethod def handle1(self) -> None: pass @abstractmethod def handle2(self) -> None: pass class ConcreteStateA(State): def handle1(self) -> None: print('执行了A—1') self.context.transition_to(ConcreteStateB()) def handle2(self) -> None: print('执行了A-2') class ConcreteStateB(State): def handle1(self) -> None: print('执行了B—1') def handle2(self) -> None: print('执行了B—2') self.context.transition_to(ConcreteStateA()) if __name__ == '__main__': context = Context(ConcreteStateA()) context.request1() context.request2() context.request2()
19.492754
52
0.615613
from __future__ import annotations from abc import ABC, abstractmethod class Context: _state: State = None def __init__(self, state: State) -> None: self.transition_to(state) def transition_to(self, state: State) -> None: self._state = state self._state.context = self def request1(self): self._state.handle1() def request2(self): self._state.handle2() class State(ABC): @property def context(self) -> Context: return self._context @context.setter def context(self, context: Context) -> None: self._context = context @abstractmethod def handle1(self) -> None: pass @abstractmethod def handle2(self) -> None: pass class ConcreteStateA(State): def handle1(self) -> None: print('执行了A—1') self.context.transition_to(ConcreteStateB()) def handle2(self) -> None: print('执行了A-2') class ConcreteStateB(State): def handle1(self) -> None: print('执行了B—1') def handle2(self) -> None: print('执行了B—2') self.context.transition_to(ConcreteStateA()) if __name__ == '__main__': context = Context(ConcreteStateA()) context.request1() context.request2() context.request2()
true
true
f70336bae759c307733aa144b61511bf821e100a
5,125
py
Python
src/solutions/common/bizz/customer_signups.py
goubertbrent/oca-backend
b9f59cc02568aecb55d4b54aec05245790ea25fd
[ "Apache-2.0" ]
null
null
null
src/solutions/common/bizz/customer_signups.py
goubertbrent/oca-backend
b9f59cc02568aecb55d4b54aec05245790ea25fd
[ "Apache-2.0" ]
null
null
null
src/solutions/common/bizz/customer_signups.py
goubertbrent/oca-backend
b9f59cc02568aecb55d4b54aec05245790ea25fd
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ import json from mcfw.rpc import arguments, returns from rogerthat.models import Message from rogerthat.models.properties.forms import FormResult from rogerthat.rpc import users from rogerthat.service.api import messaging from rogerthat.to.messaging.forms import TextBlockFormTO, TextBlockTO, FormTO from rogerthat.to.messaging.service_callback_results import FormAcknowledgedCallbackResultTO from rogerthat.to.service import UserDetailsTO from rogerthat.utils.app import get_app_user_tuple from solutions import translate from solutions.common.dal import get_solution_main_branding, get_solution_settings from solutions.common.models import SolutionInboxMessage @arguments(service_user=users.User, service_identity=unicode, message_key=unicode, app_user=users.User, name=unicode, answer_id=unicode, parent_inbox_message=SolutionInboxMessage) def process_updated_customer_signup_message(service_user, service_identity, message_key, app_user, name, answer_id, parent_inbox_message): # type: (users.User, unicode, unicode, users.User, unicode, unicode, SolutionInboxMessage) -> None from solutions.common.bizz.messaging import MESSAGE_TAG_DENY_SIGNUP from solutions.common.restapi.services import rest_create_service_from_signup with users.set_user(service_user): sln_settings = get_solution_settings(service_user) if answer_id == 'decline': widget = TextBlockTO() widget.max_chars = 1024 form = TextBlockFormTO() form.type = TextBlockTO.TYPE form.widget = widget form.positive_button = translate(sln_settings.main_language, 'Confirm') form.negative_button = translate(sln_settings.main_language, 'Cancel') form.javascript_validation = """function run(result) { return result.value ? true : '%s'; }""" % translate(sln_settings.main_language, 'this_field_is_required', _duplicate_backslashes=True) human_user, app_id = get_app_user_tuple(app_user) messaging.send_form(parent_key=parent_inbox_message.message_key, parent_message_key=parent_inbox_message.message_key, message=translate(sln_settings.main_language, 'signup_not_ok'), member=human_user.email(), app_id=app_id, flags=Message.FLAG_AUTO_LOCK, branding=get_solution_main_branding(service_user).branding_key, tag=json.dumps({'__rt__.tag': MESSAGE_TAG_DENY_SIGNUP, 'signup_key': parent_inbox_message.category_key}), form=form, service_identity=service_identity, alert_flags=Message.ALERT_FLAG_VIBRATE) elif answer_id == 'approve': result = rest_create_service_from_signup(parent_inbox_message.category_key, force=True) # type: CreateServiceStatusTO if not result.success: messaging.send(parent_message_key=message_key, message=result.errormsg, answers=[], flags=Message.FLAG_ALLOW_DISMISS, branding=get_solution_main_branding(service_user).branding_key, tag=None, service_identity=service_identity) @returns(FormAcknowledgedCallbackResultTO) @arguments(service_user=users.User, status=int, form_result=FormResult, answer_id=unicode, member=unicode, message_key=unicode, tag=unicode, received_timestamp=int, acked_timestamp=int, parent_message_key=unicode, result_key=unicode, service_identity=unicode, user_details=[UserDetailsTO]) def deny_signup(service_user, status, form_result, answer_id, member, message_key, tag, received_timestamp, acked_timestamp, parent_message_key, result_key, service_identity, user_details): from solutions.common.restapi import rest_customer_signup_reply with users.set_user(service_user): if answer_id == FormTO.POSITIVE: tag_dict = json.loads(tag) rest_customer_signup_reply(tag_dict['signup_key'], form_result.result.value)
55.706522
117
0.674732
import json from mcfw.rpc import arguments, returns from rogerthat.models import Message from rogerthat.models.properties.forms import FormResult from rogerthat.rpc import users from rogerthat.service.api import messaging from rogerthat.to.messaging.forms import TextBlockFormTO, TextBlockTO, FormTO from rogerthat.to.messaging.service_callback_results import FormAcknowledgedCallbackResultTO from rogerthat.to.service import UserDetailsTO from rogerthat.utils.app import get_app_user_tuple from solutions import translate from solutions.common.dal import get_solution_main_branding, get_solution_settings from solutions.common.models import SolutionInboxMessage @arguments(service_user=users.User, service_identity=unicode, message_key=unicode, app_user=users.User, name=unicode, answer_id=unicode, parent_inbox_message=SolutionInboxMessage) def process_updated_customer_signup_message(service_user, service_identity, message_key, app_user, name, answer_id, parent_inbox_message): from solutions.common.bizz.messaging import MESSAGE_TAG_DENY_SIGNUP from solutions.common.restapi.services import rest_create_service_from_signup with users.set_user(service_user): sln_settings = get_solution_settings(service_user) if answer_id == 'decline': widget = TextBlockTO() widget.max_chars = 1024 form = TextBlockFormTO() form.type = TextBlockTO.TYPE form.widget = widget form.positive_button = translate(sln_settings.main_language, 'Confirm') form.negative_button = translate(sln_settings.main_language, 'Cancel') form.javascript_validation = """function run(result) { return result.value ? true : '%s'; }""" % translate(sln_settings.main_language, 'this_field_is_required', _duplicate_backslashes=True) human_user, app_id = get_app_user_tuple(app_user) messaging.send_form(parent_key=parent_inbox_message.message_key, parent_message_key=parent_inbox_message.message_key, message=translate(sln_settings.main_language, 'signup_not_ok'), member=human_user.email(), app_id=app_id, flags=Message.FLAG_AUTO_LOCK, branding=get_solution_main_branding(service_user).branding_key, tag=json.dumps({'__rt__.tag': MESSAGE_TAG_DENY_SIGNUP, 'signup_key': parent_inbox_message.category_key}), form=form, service_identity=service_identity, alert_flags=Message.ALERT_FLAG_VIBRATE) elif answer_id == 'approve': result = rest_create_service_from_signup(parent_inbox_message.category_key, force=True) if not result.success: messaging.send(parent_message_key=message_key, message=result.errormsg, answers=[], flags=Message.FLAG_ALLOW_DISMISS, branding=get_solution_main_branding(service_user).branding_key, tag=None, service_identity=service_identity) @returns(FormAcknowledgedCallbackResultTO) @arguments(service_user=users.User, status=int, form_result=FormResult, answer_id=unicode, member=unicode, message_key=unicode, tag=unicode, received_timestamp=int, acked_timestamp=int, parent_message_key=unicode, result_key=unicode, service_identity=unicode, user_details=[UserDetailsTO]) def deny_signup(service_user, status, form_result, answer_id, member, message_key, tag, received_timestamp, acked_timestamp, parent_message_key, result_key, service_identity, user_details): from solutions.common.restapi import rest_customer_signup_reply with users.set_user(service_user): if answer_id == FormTO.POSITIVE: tag_dict = json.loads(tag) rest_customer_signup_reply(tag_dict['signup_key'], form_result.result.value)
true
true
f70336d4d611fb85c2969f6bdbbb5dbe9b44613a
2,648
py
Python
data_processing/draw_value_map.py
liiliiliil/ride-hailing-platform-with-simulator
c9eae7f718c9e10c7ba4955e5093d4fb21d16d25
[ "MIT" ]
3
2020-04-29T05:42:14.000Z
2021-07-13T17:54:13.000Z
data_processing/draw_value_map.py
liiliiliil/ride-hailing-platform-with-simulator
c9eae7f718c9e10c7ba4955e5093d4fb21d16d25
[ "MIT" ]
null
null
null
data_processing/draw_value_map.py
liiliiliil/ride-hailing-platform-with-simulator
c9eae7f718c9e10c7ba4955e5093d4fb21d16d25
[ "MIT" ]
null
null
null
import os import time import pickle import math import numpy as np import linecache import matplotlib.pyplot as plt # from matplotlib.pyplot import MultipleLocator import grid data_path = 'E:/dataset/didi/processed' save_path = 'E:/dataset/didi/processed/order_20161101_sampled_value_map_fig' data_file_name = 'processed_data' # '.pkl' will be added for binary file value_map_file_name = 'value_map' # '.pkl' will be added for binary file n_time_unit = 144 size_hexagon_to_edge = 0.0048 hexagon_size_factor_for_plot = 1 range_map_longitude = [103.96, 104.18] range_map_latitude = [30.59, 30.77] size_hexagon = size_hexagon_to_edge * 2 / math.sqrt(3) # length to the point if not os.path.exists(save_path): os.mkdir(save_path) with open(os.path.join(data_path, data_file_name+'.pkl'), 'rb') as f: data = pickle.load(f) with open(os.path.join(data_path, value_map_file_name+'.pkl'), 'rb') as f: value_map = pickle.load(f) # make hexagon grid = grid.Hexagon(size_to_edge=size_hexagon_to_edge*hexagon_size_factor_for_plot) grid_interval_lo = size_hexagon * 1.5 grid_interval_la = size_hexagon_to_edge * 2 grid_centers = [] for la in np.arange(range_map_latitude[1]-size_hexagon, range_map_latitude[0]-0.00001, -grid_interval_la): row = [] count = 0 for lo in np.arange(range_map_longitude[0], range_map_longitude[1]+0.00001, grid_interval_lo): if count % 2 == 0: row.append([lo, la]) else: row.append([lo, la+size_hexagon_to_edge]) count += 1 grid_centers.append(row) grid_centers_mat = np.array(grid_centers) shape_grid_centers_mat = grid_centers_mat.shape n_grids = shape_grid_centers_mat[0]*shape_grid_centers_mat[1] grid_index_mat = np.arange(n_grids).reshape(shape_grid_centers_mat[:2]) print('shape of grids is', shape_grid_centers_mat) print('number of grids is', n_grids) grid_centers_flat_T = grid_centers_mat.reshape(n_grids, 2).T max_value = np.max(value_map) min_value = np.min(value_map) print('maximum value in value_map is', max_value) print('minimum value in value_map is', min_value) # value_map = (value_map - min_value) / max_value # max_value = np.max(value_map) # min_value = np.min(value_map) # print('maximum value in value_map after normalization is', max_value) # print('minimum value in value_map after normalization is', min_value) for t in range(n_time_unit): fig = plt.figure() plt.title('value map of time unit %d' % t) plt.scatter(grid_centers_flat_T[0], grid_centers_flat_T[1], c=value_map[t], marker='H', s=100, alpha=0.5) plt.colorbar() fig.savefig(os.path.join(save_path, '%d.jpg'%t))
31.52381
109
0.737915
import os import time import pickle import math import numpy as np import linecache import matplotlib.pyplot as plt import grid data_path = 'E:/dataset/didi/processed' save_path = 'E:/dataset/didi/processed/order_20161101_sampled_value_map_fig' data_file_name = 'processed_data' value_map_file_name = 'value_map' n_time_unit = 144 size_hexagon_to_edge = 0.0048 hexagon_size_factor_for_plot = 1 range_map_longitude = [103.96, 104.18] range_map_latitude = [30.59, 30.77] size_hexagon = size_hexagon_to_edge * 2 / math.sqrt(3) if not os.path.exists(save_path): os.mkdir(save_path) with open(os.path.join(data_path, data_file_name+'.pkl'), 'rb') as f: data = pickle.load(f) with open(os.path.join(data_path, value_map_file_name+'.pkl'), 'rb') as f: value_map = pickle.load(f) grid = grid.Hexagon(size_to_edge=size_hexagon_to_edge*hexagon_size_factor_for_plot) grid_interval_lo = size_hexagon * 1.5 grid_interval_la = size_hexagon_to_edge * 2 grid_centers = [] for la in np.arange(range_map_latitude[1]-size_hexagon, range_map_latitude[0]-0.00001, -grid_interval_la): row = [] count = 0 for lo in np.arange(range_map_longitude[0], range_map_longitude[1]+0.00001, grid_interval_lo): if count % 2 == 0: row.append([lo, la]) else: row.append([lo, la+size_hexagon_to_edge]) count += 1 grid_centers.append(row) grid_centers_mat = np.array(grid_centers) shape_grid_centers_mat = grid_centers_mat.shape n_grids = shape_grid_centers_mat[0]*shape_grid_centers_mat[1] grid_index_mat = np.arange(n_grids).reshape(shape_grid_centers_mat[:2]) print('shape of grids is', shape_grid_centers_mat) print('number of grids is', n_grids) grid_centers_flat_T = grid_centers_mat.reshape(n_grids, 2).T max_value = np.max(value_map) min_value = np.min(value_map) print('maximum value in value_map is', max_value) print('minimum value in value_map is', min_value) for t in range(n_time_unit): fig = plt.figure() plt.title('value map of time unit %d' % t) plt.scatter(grid_centers_flat_T[0], grid_centers_flat_T[1], c=value_map[t], marker='H', s=100, alpha=0.5) plt.colorbar() fig.savefig(os.path.join(save_path, '%d.jpg'%t))
true
true
f7033767526f4de70d6ab0249e98b75127d3a2b1
185
py
Python
Miscellaneous/Big Chess.py
Joon7891/Competitive-Programming
d860b7ad932cd5a6fb91fdc8c53101da57f4a408
[ "MIT" ]
2
2021-04-13T00:19:56.000Z
2021-04-13T01:19:45.000Z
Miscellaneous/Big Chess.py
Joon7891/Competitive-Programming
d860b7ad932cd5a6fb91fdc8c53101da57f4a408
[ "MIT" ]
null
null
null
Miscellaneous/Big Chess.py
Joon7891/Competitive-Programming
d860b7ad932cd5a6fb91fdc8c53101da57f4a408
[ "MIT" ]
1
2020-08-26T12:36:08.000Z
2020-08-26T12:36:08.000Z
w = int(input()) h = int(input()) for i in range(h): output = str() for j in range(w): if (i + j) % 2 == 0: output += '0' else: output += '1' print(output)
15.416667
24
0.464865
w = int(input()) h = int(input()) for i in range(h): output = str() for j in range(w): if (i + j) % 2 == 0: output += '0' else: output += '1' print(output)
true
true
f70338de0edbbe39a2087fe87b86ac520180bef2
1,652
py
Python
bridges/text.py
BridgesUNCC/bridges-python
bf1f9697a45bf6d7748f22dbbcbff4c20cfb2229
[ "MIT" ]
1
2020-09-21T15:13:27.000Z
2020-09-21T15:13:27.000Z
bridges/text.py
BridgesUNCC/bridges-python
bf1f9697a45bf6d7748f22dbbcbff4c20cfb2229
[ "MIT" ]
57
2019-07-02T21:30:28.000Z
2021-11-05T23:27:50.000Z
bridges/text.py
BridgesUNCC/bridges-python
bf1f9697a45bf6d7748f22dbbcbff4c20cfb2229
[ "MIT" ]
3
2020-03-24T16:15:20.000Z
2021-04-14T20:44:43.000Z
from bridges.symbol import * class Text(Symbol): def __init__(self, label = None): super(Text, self).__init__() if label is not None: self._text = label else: self._text = "" self.stroke_width = 1.0 self._font_size = None self._anchor_alignment_lr = None self._anchor_alignment_tb = None self._locx = 0.0 self._locy = 0.0 def get_shape_type(self): return "text" @property def text(self): return self._text @text.setter def text(self, t): self._text = t @property def font_size(self): return self._font_size @font_size.setter def font_size(self, s): if(s < 0.0): raise ValueError("Font size is too small") self._font_size = s def set_anchor_alignment(self, typeLR, typeTB): self._anchor_alignment_lr = typeLR self._anchor_alignment_tb = typeTB def set_anchor_location(self, x, y): self._locx = x self._locy = y def get_json_representation(self): json_builder = super(Text, self).get_json_representation() json_builder['anchor-location'] = [self._locx, self._locy] json_builder['text'] = self.text if self.font_size is not None: json_builder['font-size'] =self.font_size if self._anchor_alignment_lr is not None: json_builder['anchor-alignmentLR'] = self._anchor_alignment_lr if self._anchor_alignment_tb is not None: json_builder['anchor-alignmentTB'] = self._anchor_alignment_tb return json_builder
23.942029
74
0.61138
from bridges.symbol import * class Text(Symbol): def __init__(self, label = None): super(Text, self).__init__() if label is not None: self._text = label else: self._text = "" self.stroke_width = 1.0 self._font_size = None self._anchor_alignment_lr = None self._anchor_alignment_tb = None self._locx = 0.0 self._locy = 0.0 def get_shape_type(self): return "text" @property def text(self): return self._text @text.setter def text(self, t): self._text = t @property def font_size(self): return self._font_size @font_size.setter def font_size(self, s): if(s < 0.0): raise ValueError("Font size is too small") self._font_size = s def set_anchor_alignment(self, typeLR, typeTB): self._anchor_alignment_lr = typeLR self._anchor_alignment_tb = typeTB def set_anchor_location(self, x, y): self._locx = x self._locy = y def get_json_representation(self): json_builder = super(Text, self).get_json_representation() json_builder['anchor-location'] = [self._locx, self._locy] json_builder['text'] = self.text if self.font_size is not None: json_builder['font-size'] =self.font_size if self._anchor_alignment_lr is not None: json_builder['anchor-alignmentLR'] = self._anchor_alignment_lr if self._anchor_alignment_tb is not None: json_builder['anchor-alignmentTB'] = self._anchor_alignment_tb return json_builder
true
true
f70338e59ee89ab97f79697d0e146ad9d944ad81
21,739
py
Python
hgraph/decoder.py
Amir-Mehrpanah/hgraph2graph
6d37153afe09f7684381ce56e8366675e22833e9
[ "MIT" ]
182
2019-11-15T15:59:31.000Z
2022-03-31T09:17:40.000Z
hgraph/decoder.py
Amir-Mehrpanah/hgraph2graph
6d37153afe09f7684381ce56e8366675e22833e9
[ "MIT" ]
30
2020-03-03T16:35:52.000Z
2021-12-16T04:06:57.000Z
hgraph/decoder.py
Amir-Mehrpanah/hgraph2graph
6d37153afe09f7684381ce56e8366675e22833e9
[ "MIT" ]
60
2019-11-15T05:06:11.000Z
2022-03-31T16:43:12.000Z
import torch import torch.nn as nn import rdkit.Chem as Chem import torch.nn.functional as F from hgraph.nnutils import * from hgraph.encoder import IncHierMPNEncoder from hgraph.mol_graph import MolGraph from hgraph.inc_graph import IncTree, IncGraph class HTuple(): def __init__(self, node=None, mess=None, vmask=None, emask=None): self.node, self.mess = node, mess self.vmask, self.emask = vmask, emask class HierMPNDecoder(nn.Module): def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, latent_size, depthT, depthG, dropout, attention=False): super(HierMPNDecoder, self).__init__() self.vocab = vocab self.avocab = avocab self.hidden_size = hidden_size self.embed_size = embed_size self.latent_size = latent_size self.use_attention = attention self.itensor = torch.LongTensor([]).cuda() self.hmpn = IncHierMPNEncoder(vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout) self.rnn_cell = self.hmpn.tree_encoder.rnn self.E_assm = self.hmpn.E_i self.E_order = torch.eye(MolGraph.MAX_POS).cuda() self.topoNN = nn.Sequential( nn.Linear(hidden_size + latent_size, hidden_size), nn.ReLU(), nn.Dropout(dropout), nn.Linear(hidden_size, 1) ) self.clsNN = nn.Sequential( nn.Linear(hidden_size + latent_size, hidden_size), nn.ReLU(), nn.Dropout(dropout), nn.Linear(hidden_size, vocab.size()[0]) ) self.iclsNN = nn.Sequential( nn.Linear(hidden_size + latent_size, hidden_size), nn.ReLU(), nn.Dropout(dropout), nn.Linear(hidden_size, vocab.size()[1]) ) self.matchNN = nn.Sequential( nn.Linear(hidden_size + embed_size + MolGraph.MAX_POS, hidden_size), nn.ReLU(), ) self.W_assm = nn.Linear(hidden_size, latent_size) if latent_size != hidden_size: self.W_root = nn.Linear(latent_size, hidden_size) if self.use_attention: self.A_topo = nn.Linear(hidden_size, latent_size) self.A_cls = nn.Linear(hidden_size, latent_size) self.A_assm = nn.Linear(hidden_size, latent_size) self.topo_loss = nn.BCEWithLogitsLoss(size_average=False) self.cls_loss = nn.CrossEntropyLoss(size_average=False) self.icls_loss = nn.CrossEntropyLoss(size_average=False) self.assm_loss = nn.CrossEntropyLoss(size_average=False) def apply_tree_mask(self, tensors, cur, prev): fnode, fmess, agraph, bgraph, cgraph, scope = tensors agraph = agraph * index_select_ND(cur.emask, 0, agraph) bgraph = bgraph * index_select_ND(cur.emask, 0, bgraph) cgraph = cgraph * index_select_ND(prev.vmask, 0, cgraph) return fnode, fmess, agraph, bgraph, cgraph, scope def apply_graph_mask(self, tensors, hgraph): fnode, fmess, agraph, bgraph, scope = tensors agraph = agraph * index_select_ND(hgraph.emask, 0, agraph) bgraph = bgraph * index_select_ND(hgraph.emask, 0, bgraph) return fnode, fmess, agraph, bgraph, scope def update_graph_mask(self, graph_batch, new_atoms, hgraph): new_atom_index = hgraph.vmask.new_tensor(new_atoms) hgraph.vmask.scatter_(0, new_atom_index, 1) new_atom_set = set(new_atoms) new_bonds = [] #new bonds are the subgraph induced by new_atoms for zid in new_atoms: for nid in graph_batch[zid]: if nid not in new_atom_set: continue new_bonds.append( graph_batch[zid][nid]['mess_idx'] ) new_bond_index = hgraph.emask.new_tensor(new_bonds) if len(new_bonds) > 0: hgraph.emask.scatter_(0, new_bond_index, 1) return new_atom_index, new_bond_index def init_decoder_state(self, tree_batch, tree_tensors, src_root_vecs): batch_size = len(src_root_vecs) num_mess = len(tree_tensors[1]) agraph = tree_tensors[2].clone() bgraph = tree_tensors[3].clone() for i,tup in enumerate(tree_tensors[-1]): root = tup[0] assert agraph[root,-1].item() == 0 agraph[root,-1] = num_mess + i for v in tree_batch.successors(root): mess_idx = tree_batch[root][v]['mess_idx'] assert bgraph[mess_idx,-1].item() == 0 bgraph[mess_idx,-1] = num_mess + i new_tree_tensors = tree_tensors[:2] + [agraph, bgraph] + tree_tensors[4:] htree = HTuple() htree.mess = self.rnn_cell.get_init_state(tree_tensors[1], src_root_vecs) htree.emask = torch.cat( [bgraph.new_zeros(num_mess), bgraph.new_ones(batch_size)], dim=0 ) return htree, new_tree_tensors def attention(self, src_vecs, batch_idx, queries, W_att): size = batch_idx.size() if batch_idx.dim() > 1: batch_idx = batch_idx.view(-1) queries = queries.view(-1, queries.size(-1)) src_vecs = src_vecs.index_select(0, batch_idx) att_score = torch.bmm( src_vecs, W_att(queries).unsqueeze(-1) ) att_vecs = F.softmax(att_score, dim=1) * src_vecs att_vecs = att_vecs.sum(dim=1) return att_vecs if len(size) == 1 else att_vecs.view(size[0], size[1], -1) def get_topo_score(self, src_tree_vecs, batch_idx, topo_vecs): if self.use_attention: topo_cxt = self.attention(src_tree_vecs, batch_idx, topo_vecs, self.A_topo) else: topo_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0) return self.topoNN( torch.cat([topo_vecs, topo_cxt], dim=-1) ).squeeze(-1) def get_cls_score(self, src_tree_vecs, batch_idx, cls_vecs, cls_labs): if self.use_attention: cls_cxt = self.attention(src_tree_vecs, batch_idx, cls_vecs, self.A_cls) else: cls_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0) cls_vecs = torch.cat([cls_vecs, cls_cxt], dim=-1) cls_scores = self.clsNN(cls_vecs) if cls_labs is None: #inference mode icls_scores = self.iclsNN(cls_vecs) #no masking else: vocab_masks = self.vocab.get_mask(cls_labs) icls_scores = self.iclsNN(cls_vecs) + vocab_masks #apply mask by log(x + mask): mask=0 or -INF return cls_scores, icls_scores def get_assm_score(self, src_graph_vecs, batch_idx, assm_vecs): if self.use_attention: assm_cxt = self.attention(src_graph_vecs, batch_idx, assm_vecs, self.A_assm) else: assm_cxt = index_select_ND(src_graph_vecs, 0, batch_idx) return (self.W_assm(assm_vecs) * assm_cxt).sum(dim=-1) def forward(self, src_mol_vecs, graphs, tensors, orders): batch_size = len(orders) tree_batch, graph_batch = graphs tree_tensors, graph_tensors = tensors inter_tensors = tree_tensors src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs) htree, tree_tensors = self.init_decoder_state(tree_batch, tree_tensors, init_vecs) hinter = HTuple( mess = self.rnn_cell.get_init_state(inter_tensors[1]), emask = self.itensor.new_zeros(inter_tensors[1].size(0)) ) hgraph = HTuple( mess = self.rnn_cell.get_init_state(graph_tensors[1]), vmask = self.itensor.new_zeros(graph_tensors[0].size(0)), emask = self.itensor.new_zeros(graph_tensors[1].size(0)) ) all_topo_preds, all_cls_preds, all_assm_preds = [], [], [] new_atoms = [] tree_scope = tree_tensors[-1] for i in range(batch_size): root = tree_batch.nodes[ tree_scope[i][0] ] clab, ilab = self.vocab[ root['label'] ] all_cls_preds.append( (init_vecs[i], i, clab, ilab) ) #cluster prediction new_atoms.extend(root['cluster']) subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph) graph_tensors = self.hmpn.embed_graph(graph_tensors) + (graph_tensors[-1],) #preprocess graph tensors maxt = max([len(x) for x in orders]) max_cls_size = max( [len(attr) * 2 for node,attr in tree_batch.nodes(data='cluster')] ) for t in range(maxt): batch_list = [i for i in range(batch_size) if t < len(orders[i])] assert htree.emask[0].item() == 0 and hinter.emask[0].item() == 0 and hgraph.vmask[0].item() == 0 and hgraph.emask[0].item() == 0 subtree = [], [] for i in batch_list: xid, yid, tlab = orders[i][t] subtree[0].append(xid) if yid is not None: mess_idx = tree_batch[xid][yid]['mess_idx'] subtree[1].append(mess_idx) subtree = htree.emask.new_tensor(subtree[0]), htree.emask.new_tensor(subtree[1]) htree.emask.scatter_(0, subtree[1], 1) hinter.emask.scatter_(0, subtree[1], 1) cur_tree_tensors = self.apply_tree_mask(tree_tensors, htree, hgraph) cur_inter_tensors = self.apply_tree_mask(inter_tensors, hinter, hgraph) cur_graph_tensors = self.apply_graph_mask(graph_tensors, hgraph) htree, hinter, hgraph = self.hmpn(cur_tree_tensors, cur_inter_tensors, cur_graph_tensors, htree, hinter, hgraph, subtree, subgraph) new_atoms = [] for i in batch_list: xid, yid, tlab = orders[i][t] all_topo_preds.append( (htree.node[xid], i, tlab) ) #topology prediction if yid is not None: mess_idx = tree_batch[xid][yid]['mess_idx'] new_atoms.extend( tree_batch.nodes[yid]['cluster'] ) #NOTE: regardless of tlab = 0 or 1 if tlab == 0: continue cls = tree_batch.nodes[yid]['smiles'] clab, ilab = self.vocab[ tree_batch.nodes[yid]['label'] ] mess_idx = tree_batch[xid][yid]['mess_idx'] hmess = self.rnn_cell.get_hidden_state(htree.mess) all_cls_preds.append( (hmess[mess_idx], i, clab, ilab) ) #cluster prediction using message inter_label = tree_batch.nodes[yid]['inter_label'] inter_label = [ (pos, self.vocab[(cls, icls)][1]) for pos,icls in inter_label ] inter_size = self.vocab.get_inter_size(ilab) if len(tree_batch.nodes[xid]['cluster']) > 2: #uncertainty occurs only when previous cluster is a ring nth_child = tree_batch[yid][xid]['label'] #must be yid -> xid (graph order labeling is different from tree) cands = tree_batch.nodes[yid]['assm_cands'] icls = list(zip(*inter_label))[1] cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child) if len(cand_vecs) < max_cls_size: pad_len = max_cls_size - len(cand_vecs) cand_vecs = F.pad(cand_vecs, (0,0,0,pad_len)) batch_idx = hgraph.emask.new_tensor( [i] * max_cls_size ) all_assm_preds.append( (cand_vecs, batch_idx, 0) ) #the label is always the first of assm_cands subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph) topo_vecs, batch_idx, topo_labels = zip_tensors(all_topo_preds) topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, topo_vecs) topo_loss = self.topo_loss(topo_scores, topo_labels.float()) topo_acc = get_accuracy_bin(topo_scores, topo_labels) cls_vecs, batch_idx, cls_labs, icls_labs = zip_tensors(all_cls_preds) cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, cls_vecs, cls_labs) cls_loss = self.cls_loss(cls_scores, cls_labs) + self.icls_loss(icls_scores, icls_labs) cls_acc = get_accuracy(cls_scores, cls_labs) icls_acc = get_accuracy(icls_scores, icls_labs) if len(all_assm_preds) > 0: assm_vecs, batch_idx, assm_labels = zip_tensors(all_assm_preds) assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, assm_vecs) assm_loss = self.assm_loss(assm_scores, assm_labels) assm_acc = get_accuracy_sym(assm_scores, assm_labels) else: assm_loss, assm_acc = 0, 1 loss = (topo_loss + cls_loss + assm_loss) / batch_size return loss, cls_acc, icls_acc, topo_acc, assm_acc def enum_attach(self, hgraph, cands, icls, nth_child): cands = self.itensor.new_tensor(cands) icls_vecs = self.itensor.new_tensor(icls * len(cands)) icls_vecs = self.E_assm( icls_vecs ) nth_child = self.itensor.new_tensor([nth_child] * len(cands.view(-1))) order_vecs = self.E_order.index_select(0, nth_child) cand_vecs = hgraph.node.index_select(0, cands.view(-1)) cand_vecs = torch.cat( [cand_vecs, icls_vecs, order_vecs], dim=-1 ) cand_vecs = self.matchNN(cand_vecs) if len(icls) == 2: cand_vecs = cand_vecs.view(-1, 2, self.hidden_size).sum(dim=1) return cand_vecs def decode(self, src_mol_vecs, greedy=True, max_decode_step=100, beam=5): src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs batch_size = len(src_root_vecs) tree_batch = IncTree(batch_size, node_fdim=2, edge_fdim=3) graph_batch = IncGraph(self.avocab, batch_size, node_fdim=self.hmpn.atom_size, edge_fdim=self.hmpn.atom_size + self.hmpn.bond_size) stack = [[] for i in range(batch_size)] init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs) batch_idx = self.itensor.new_tensor(range(batch_size)) cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, init_vecs, None) root_cls = cls_scores.max(dim=-1)[1] icls_scores = icls_scores + self.vocab.get_mask(root_cls) root_cls, root_icls = root_cls.tolist(), icls_scores.max(dim=-1)[1].tolist() super_root = tree_batch.add_node() for bid in range(batch_size): clab, ilab = root_cls[bid], root_icls[bid] root_idx = tree_batch.add_node( batch_idx.new_tensor([clab, ilab]) ) tree_batch.add_edge(super_root, root_idx) stack[bid].append(root_idx) root_smiles = self.vocab.get_ismiles(ilab) new_atoms, new_bonds, attached = graph_batch.add_mol(bid, root_smiles, [], 0) tree_batch.register_cgraph(root_idx, new_atoms, new_bonds, attached) #invariance: tree_tensors is equal to inter_tensors (but inter_tensor's init_vec is 0) tree_tensors = tree_batch.get_tensors() graph_tensors = graph_batch.get_tensors() htree = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) ) hinter = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) ) hgraph = HTuple( mess = self.rnn_cell.get_init_state(graph_tensors[1]) ) h = self.rnn_cell.get_hidden_state(htree.mess) h[1 : batch_size + 1] = init_vecs #wiring root (only for tree, not inter) for t in range(max_decode_step): batch_list = [ bid for bid in range(batch_size) if len(stack[bid]) > 0 ] if len(batch_list) == 0: break batch_idx = batch_idx.new_tensor(batch_list) cur_tree_nodes = [stack[bid][-1] for bid in batch_list] subtree = batch_idx.new_tensor(cur_tree_nodes), batch_idx.new_tensor([]) subgraph = batch_idx.new_tensor( tree_batch.get_cluster_nodes(cur_tree_nodes) ), batch_idx.new_tensor( tree_batch.get_cluster_edges(cur_tree_nodes) ) htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph) topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, htree.node.index_select(0, subtree[0])) topo_scores = torch.sigmoid(topo_scores) if greedy: topo_preds = topo_scores.tolist() else: topo_preds = torch.bernoulli(topo_scores).tolist() new_mess = [] expand_list = [] for i,bid in enumerate(batch_list): if topo_preds[i] > 0.5 and tree_batch.can_expand(stack[bid][-1]): expand_list.append( (len(new_mess), bid) ) new_node = tree_batch.add_node() #new node label is yet to be predicted edge_feature = batch_idx.new_tensor( [stack[bid][-1], new_node, 0] ) #parent to child is 0 new_edge = tree_batch.add_edge(stack[bid][-1], new_node, edge_feature) stack[bid].append(new_node) new_mess.append(new_edge) else: child = stack[bid].pop() if len(stack[bid]) > 0: nth_child = tree_batch.graph.in_degree(stack[bid][-1]) #edge child -> father has not established edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] ) new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature) new_mess.append(new_edge) subtree = subtree[0], batch_idx.new_tensor(new_mess) subgraph = [], [] htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph) cur_mess = self.rnn_cell.get_hidden_state(htree.mess).index_select(0, subtree[1]) if len(expand_list) > 0: idx_in_mess, expand_list = zip(*expand_list) idx_in_mess = batch_idx.new_tensor( idx_in_mess ) expand_idx = batch_idx.new_tensor( expand_list ) forward_mess = cur_mess.index_select(0, idx_in_mess) cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, expand_idx, forward_mess, None) scores, cls_topk, icls_topk = hier_topk(cls_scores, icls_scores, self.vocab, beam) if not greedy: scores = torch.exp(scores) #score is output of log_softmax shuf_idx = torch.multinomial(scores, beam, replacement=True).tolist() for i,bid in enumerate(expand_list): new_node, fa_node = stack[bid][-1], stack[bid][-2] success = False cls_beam = range(beam) if greedy else shuf_idx[i] for kk in cls_beam: #try until one is chemically valid if success: break clab, ilab = cls_topk[i][kk], icls_topk[i][kk] node_feature = batch_idx.new_tensor( [clab, ilab] ) tree_batch.set_node_feature(new_node, node_feature) smiles, ismiles = self.vocab.get_smiles(clab), self.vocab.get_ismiles(ilab) fa_cluster, _, fa_used = tree_batch.get_cluster(fa_node) inter_cands, anchor_smiles, attach_points = graph_batch.get_assm_cands(fa_cluster, fa_used, ismiles) if len(inter_cands) == 0: continue elif len(inter_cands) == 1: sorted_cands = [(inter_cands[0], 0)] nth_child = 0 else: nth_child = tree_batch.graph.in_degree(fa_node) icls = [self.vocab[ (smiles,x) ][1] for x in anchor_smiles] cands = inter_cands if len(attach_points) <= 2 else [ (x[0],x[-1]) for x in inter_cands ] cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child) batch_idx = batch_idx.new_tensor( [bid] * len(inter_cands) ) assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, cand_vecs).tolist() sorted_cands = sorted( list(zip(inter_cands, assm_scores)), key = lambda x:x[1], reverse=True ) for inter_label,_ in sorted_cands: inter_label = list(zip(inter_label, attach_points)) if graph_batch.try_add_mol(bid, ismiles, inter_label): new_atoms, new_bonds, attached = graph_batch.add_mol(bid, ismiles, inter_label, nth_child) tree_batch.register_cgraph(new_node, new_atoms, new_bonds, attached) tree_batch.update_attached(fa_node, inter_label) success = True break if not success: #force backtrack child = stack[bid].pop() #pop the dummy new_node which can't be added nth_child = tree_batch.graph.in_degree(stack[bid][-1]) edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] ) new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature) child = stack[bid].pop() if len(stack[bid]) > 0: nth_child = tree_batch.graph.in_degree(stack[bid][-1]) edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] ) new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature) return graph_batch.get_mol()
50.67366
161
0.613874
import torch import torch.nn as nn import rdkit.Chem as Chem import torch.nn.functional as F from hgraph.nnutils import * from hgraph.encoder import IncHierMPNEncoder from hgraph.mol_graph import MolGraph from hgraph.inc_graph import IncTree, IncGraph class HTuple(): def __init__(self, node=None, mess=None, vmask=None, emask=None): self.node, self.mess = node, mess self.vmask, self.emask = vmask, emask class HierMPNDecoder(nn.Module): def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, latent_size, depthT, depthG, dropout, attention=False): super(HierMPNDecoder, self).__init__() self.vocab = vocab self.avocab = avocab self.hidden_size = hidden_size self.embed_size = embed_size self.latent_size = latent_size self.use_attention = attention self.itensor = torch.LongTensor([]).cuda() self.hmpn = IncHierMPNEncoder(vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout) self.rnn_cell = self.hmpn.tree_encoder.rnn self.E_assm = self.hmpn.E_i self.E_order = torch.eye(MolGraph.MAX_POS).cuda() self.topoNN = nn.Sequential( nn.Linear(hidden_size + latent_size, hidden_size), nn.ReLU(), nn.Dropout(dropout), nn.Linear(hidden_size, 1) ) self.clsNN = nn.Sequential( nn.Linear(hidden_size + latent_size, hidden_size), nn.ReLU(), nn.Dropout(dropout), nn.Linear(hidden_size, vocab.size()[0]) ) self.iclsNN = nn.Sequential( nn.Linear(hidden_size + latent_size, hidden_size), nn.ReLU(), nn.Dropout(dropout), nn.Linear(hidden_size, vocab.size()[1]) ) self.matchNN = nn.Sequential( nn.Linear(hidden_size + embed_size + MolGraph.MAX_POS, hidden_size), nn.ReLU(), ) self.W_assm = nn.Linear(hidden_size, latent_size) if latent_size != hidden_size: self.W_root = nn.Linear(latent_size, hidden_size) if self.use_attention: self.A_topo = nn.Linear(hidden_size, latent_size) self.A_cls = nn.Linear(hidden_size, latent_size) self.A_assm = nn.Linear(hidden_size, latent_size) self.topo_loss = nn.BCEWithLogitsLoss(size_average=False) self.cls_loss = nn.CrossEntropyLoss(size_average=False) self.icls_loss = nn.CrossEntropyLoss(size_average=False) self.assm_loss = nn.CrossEntropyLoss(size_average=False) def apply_tree_mask(self, tensors, cur, prev): fnode, fmess, agraph, bgraph, cgraph, scope = tensors agraph = agraph * index_select_ND(cur.emask, 0, agraph) bgraph = bgraph * index_select_ND(cur.emask, 0, bgraph) cgraph = cgraph * index_select_ND(prev.vmask, 0, cgraph) return fnode, fmess, agraph, bgraph, cgraph, scope def apply_graph_mask(self, tensors, hgraph): fnode, fmess, agraph, bgraph, scope = tensors agraph = agraph * index_select_ND(hgraph.emask, 0, agraph) bgraph = bgraph * index_select_ND(hgraph.emask, 0, bgraph) return fnode, fmess, agraph, bgraph, scope def update_graph_mask(self, graph_batch, new_atoms, hgraph): new_atom_index = hgraph.vmask.new_tensor(new_atoms) hgraph.vmask.scatter_(0, new_atom_index, 1) new_atom_set = set(new_atoms) new_bonds = [] for zid in new_atoms: for nid in graph_batch[zid]: if nid not in new_atom_set: continue new_bonds.append( graph_batch[zid][nid]['mess_idx'] ) new_bond_index = hgraph.emask.new_tensor(new_bonds) if len(new_bonds) > 0: hgraph.emask.scatter_(0, new_bond_index, 1) return new_atom_index, new_bond_index def init_decoder_state(self, tree_batch, tree_tensors, src_root_vecs): batch_size = len(src_root_vecs) num_mess = len(tree_tensors[1]) agraph = tree_tensors[2].clone() bgraph = tree_tensors[3].clone() for i,tup in enumerate(tree_tensors[-1]): root = tup[0] assert agraph[root,-1].item() == 0 agraph[root,-1] = num_mess + i for v in tree_batch.successors(root): mess_idx = tree_batch[root][v]['mess_idx'] assert bgraph[mess_idx,-1].item() == 0 bgraph[mess_idx,-1] = num_mess + i new_tree_tensors = tree_tensors[:2] + [agraph, bgraph] + tree_tensors[4:] htree = HTuple() htree.mess = self.rnn_cell.get_init_state(tree_tensors[1], src_root_vecs) htree.emask = torch.cat( [bgraph.new_zeros(num_mess), bgraph.new_ones(batch_size)], dim=0 ) return htree, new_tree_tensors def attention(self, src_vecs, batch_idx, queries, W_att): size = batch_idx.size() if batch_idx.dim() > 1: batch_idx = batch_idx.view(-1) queries = queries.view(-1, queries.size(-1)) src_vecs = src_vecs.index_select(0, batch_idx) att_score = torch.bmm( src_vecs, W_att(queries).unsqueeze(-1) ) att_vecs = F.softmax(att_score, dim=1) * src_vecs att_vecs = att_vecs.sum(dim=1) return att_vecs if len(size) == 1 else att_vecs.view(size[0], size[1], -1) def get_topo_score(self, src_tree_vecs, batch_idx, topo_vecs): if self.use_attention: topo_cxt = self.attention(src_tree_vecs, batch_idx, topo_vecs, self.A_topo) else: topo_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0) return self.topoNN( torch.cat([topo_vecs, topo_cxt], dim=-1) ).squeeze(-1) def get_cls_score(self, src_tree_vecs, batch_idx, cls_vecs, cls_labs): if self.use_attention: cls_cxt = self.attention(src_tree_vecs, batch_idx, cls_vecs, self.A_cls) else: cls_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0) cls_vecs = torch.cat([cls_vecs, cls_cxt], dim=-1) cls_scores = self.clsNN(cls_vecs) if cls_labs is None: icls_scores = self.iclsNN(cls_vecs) else: vocab_masks = self.vocab.get_mask(cls_labs) icls_scores = self.iclsNN(cls_vecs) + vocab_masks return cls_scores, icls_scores def get_assm_score(self, src_graph_vecs, batch_idx, assm_vecs): if self.use_attention: assm_cxt = self.attention(src_graph_vecs, batch_idx, assm_vecs, self.A_assm) else: assm_cxt = index_select_ND(src_graph_vecs, 0, batch_idx) return (self.W_assm(assm_vecs) * assm_cxt).sum(dim=-1) def forward(self, src_mol_vecs, graphs, tensors, orders): batch_size = len(orders) tree_batch, graph_batch = graphs tree_tensors, graph_tensors = tensors inter_tensors = tree_tensors src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs) htree, tree_tensors = self.init_decoder_state(tree_batch, tree_tensors, init_vecs) hinter = HTuple( mess = self.rnn_cell.get_init_state(inter_tensors[1]), emask = self.itensor.new_zeros(inter_tensors[1].size(0)) ) hgraph = HTuple( mess = self.rnn_cell.get_init_state(graph_tensors[1]), vmask = self.itensor.new_zeros(graph_tensors[0].size(0)), emask = self.itensor.new_zeros(graph_tensors[1].size(0)) ) all_topo_preds, all_cls_preds, all_assm_preds = [], [], [] new_atoms = [] tree_scope = tree_tensors[-1] for i in range(batch_size): root = tree_batch.nodes[ tree_scope[i][0] ] clab, ilab = self.vocab[ root['label'] ] all_cls_preds.append( (init_vecs[i], i, clab, ilab) ) new_atoms.extend(root['cluster']) subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph) graph_tensors = self.hmpn.embed_graph(graph_tensors) + (graph_tensors[-1],) maxt = max([len(x) for x in orders]) max_cls_size = max( [len(attr) * 2 for node,attr in tree_batch.nodes(data='cluster')] ) for t in range(maxt): batch_list = [i for i in range(batch_size) if t < len(orders[i])] assert htree.emask[0].item() == 0 and hinter.emask[0].item() == 0 and hgraph.vmask[0].item() == 0 and hgraph.emask[0].item() == 0 subtree = [], [] for i in batch_list: xid, yid, tlab = orders[i][t] subtree[0].append(xid) if yid is not None: mess_idx = tree_batch[xid][yid]['mess_idx'] subtree[1].append(mess_idx) subtree = htree.emask.new_tensor(subtree[0]), htree.emask.new_tensor(subtree[1]) htree.emask.scatter_(0, subtree[1], 1) hinter.emask.scatter_(0, subtree[1], 1) cur_tree_tensors = self.apply_tree_mask(tree_tensors, htree, hgraph) cur_inter_tensors = self.apply_tree_mask(inter_tensors, hinter, hgraph) cur_graph_tensors = self.apply_graph_mask(graph_tensors, hgraph) htree, hinter, hgraph = self.hmpn(cur_tree_tensors, cur_inter_tensors, cur_graph_tensors, htree, hinter, hgraph, subtree, subgraph) new_atoms = [] for i in batch_list: xid, yid, tlab = orders[i][t] all_topo_preds.append( (htree.node[xid], i, tlab) ) if yid is not None: mess_idx = tree_batch[xid][yid]['mess_idx'] new_atoms.extend( tree_batch.nodes[yid]['cluster'] ) if tlab == 0: continue cls = tree_batch.nodes[yid]['smiles'] clab, ilab = self.vocab[ tree_batch.nodes[yid]['label'] ] mess_idx = tree_batch[xid][yid]['mess_idx'] hmess = self.rnn_cell.get_hidden_state(htree.mess) all_cls_preds.append( (hmess[mess_idx], i, clab, ilab) ) inter_label = tree_batch.nodes[yid]['inter_label'] inter_label = [ (pos, self.vocab[(cls, icls)][1]) for pos,icls in inter_label ] inter_size = self.vocab.get_inter_size(ilab) if len(tree_batch.nodes[xid]['cluster']) > 2: nth_child = tree_batch[yid][xid]['label'] cands = tree_batch.nodes[yid]['assm_cands'] icls = list(zip(*inter_label))[1] cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child) if len(cand_vecs) < max_cls_size: pad_len = max_cls_size - len(cand_vecs) cand_vecs = F.pad(cand_vecs, (0,0,0,pad_len)) batch_idx = hgraph.emask.new_tensor( [i] * max_cls_size ) all_assm_preds.append( (cand_vecs, batch_idx, 0) ) subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph) topo_vecs, batch_idx, topo_labels = zip_tensors(all_topo_preds) topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, topo_vecs) topo_loss = self.topo_loss(topo_scores, topo_labels.float()) topo_acc = get_accuracy_bin(topo_scores, topo_labels) cls_vecs, batch_idx, cls_labs, icls_labs = zip_tensors(all_cls_preds) cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, cls_vecs, cls_labs) cls_loss = self.cls_loss(cls_scores, cls_labs) + self.icls_loss(icls_scores, icls_labs) cls_acc = get_accuracy(cls_scores, cls_labs) icls_acc = get_accuracy(icls_scores, icls_labs) if len(all_assm_preds) > 0: assm_vecs, batch_idx, assm_labels = zip_tensors(all_assm_preds) assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, assm_vecs) assm_loss = self.assm_loss(assm_scores, assm_labels) assm_acc = get_accuracy_sym(assm_scores, assm_labels) else: assm_loss, assm_acc = 0, 1 loss = (topo_loss + cls_loss + assm_loss) / batch_size return loss, cls_acc, icls_acc, topo_acc, assm_acc def enum_attach(self, hgraph, cands, icls, nth_child): cands = self.itensor.new_tensor(cands) icls_vecs = self.itensor.new_tensor(icls * len(cands)) icls_vecs = self.E_assm( icls_vecs ) nth_child = self.itensor.new_tensor([nth_child] * len(cands.view(-1))) order_vecs = self.E_order.index_select(0, nth_child) cand_vecs = hgraph.node.index_select(0, cands.view(-1)) cand_vecs = torch.cat( [cand_vecs, icls_vecs, order_vecs], dim=-1 ) cand_vecs = self.matchNN(cand_vecs) if len(icls) == 2: cand_vecs = cand_vecs.view(-1, 2, self.hidden_size).sum(dim=1) return cand_vecs def decode(self, src_mol_vecs, greedy=True, max_decode_step=100, beam=5): src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs batch_size = len(src_root_vecs) tree_batch = IncTree(batch_size, node_fdim=2, edge_fdim=3) graph_batch = IncGraph(self.avocab, batch_size, node_fdim=self.hmpn.atom_size, edge_fdim=self.hmpn.atom_size + self.hmpn.bond_size) stack = [[] for i in range(batch_size)] init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs) batch_idx = self.itensor.new_tensor(range(batch_size)) cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, init_vecs, None) root_cls = cls_scores.max(dim=-1)[1] icls_scores = icls_scores + self.vocab.get_mask(root_cls) root_cls, root_icls = root_cls.tolist(), icls_scores.max(dim=-1)[1].tolist() super_root = tree_batch.add_node() for bid in range(batch_size): clab, ilab = root_cls[bid], root_icls[bid] root_idx = tree_batch.add_node( batch_idx.new_tensor([clab, ilab]) ) tree_batch.add_edge(super_root, root_idx) stack[bid].append(root_idx) root_smiles = self.vocab.get_ismiles(ilab) new_atoms, new_bonds, attached = graph_batch.add_mol(bid, root_smiles, [], 0) tree_batch.register_cgraph(root_idx, new_atoms, new_bonds, attached) tree_tensors = tree_batch.get_tensors() graph_tensors = graph_batch.get_tensors() htree = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) ) hinter = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) ) hgraph = HTuple( mess = self.rnn_cell.get_init_state(graph_tensors[1]) ) h = self.rnn_cell.get_hidden_state(htree.mess) h[1 : batch_size + 1] = init_vecs #wiring root (only for tree, not inter) for t in range(max_decode_step): batch_list = [ bid for bid in range(batch_size) if len(stack[bid]) > 0 ] if len(batch_list) == 0: break batch_idx = batch_idx.new_tensor(batch_list) cur_tree_nodes = [stack[bid][-1] for bid in batch_list] subtree = batch_idx.new_tensor(cur_tree_nodes), batch_idx.new_tensor([]) subgraph = batch_idx.new_tensor( tree_batch.get_cluster_nodes(cur_tree_nodes) ), batch_idx.new_tensor( tree_batch.get_cluster_edges(cur_tree_nodes) ) htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph) topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, htree.node.index_select(0, subtree[0])) topo_scores = torch.sigmoid(topo_scores) if greedy: topo_preds = topo_scores.tolist() else: topo_preds = torch.bernoulli(topo_scores).tolist() new_mess = [] expand_list = [] for i,bid in enumerate(batch_list): if topo_preds[i] > 0.5 and tree_batch.can_expand(stack[bid][-1]): expand_list.append( (len(new_mess), bid) ) new_node = tree_batch.add_node() #new node label is yet to be predicted edge_feature = batch_idx.new_tensor( [stack[bid][-1], new_node, 0] ) #parent to child is 0 new_edge = tree_batch.add_edge(stack[bid][-1], new_node, edge_feature) stack[bid].append(new_node) new_mess.append(new_edge) else: child = stack[bid].pop() if len(stack[bid]) > 0: nth_child = tree_batch.graph.in_degree(stack[bid][-1]) #edge child -> father has not established edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] ) new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature) new_mess.append(new_edge) subtree = subtree[0], batch_idx.new_tensor(new_mess) subgraph = [], [] htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph) cur_mess = self.rnn_cell.get_hidden_state(htree.mess).index_select(0, subtree[1]) if len(expand_list) > 0: idx_in_mess, expand_list = zip(*expand_list) idx_in_mess = batch_idx.new_tensor( idx_in_mess ) expand_idx = batch_idx.new_tensor( expand_list ) forward_mess = cur_mess.index_select(0, idx_in_mess) cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, expand_idx, forward_mess, None) scores, cls_topk, icls_topk = hier_topk(cls_scores, icls_scores, self.vocab, beam) if not greedy: scores = torch.exp(scores) #score is output of log_softmax shuf_idx = torch.multinomial(scores, beam, replacement=True).tolist() for i,bid in enumerate(expand_list): new_node, fa_node = stack[bid][-1], stack[bid][-2] success = False cls_beam = range(beam) if greedy else shuf_idx[i] for kk in cls_beam: #try until one is chemically valid if success: break clab, ilab = cls_topk[i][kk], icls_topk[i][kk] node_feature = batch_idx.new_tensor( [clab, ilab] ) tree_batch.set_node_feature(new_node, node_feature) smiles, ismiles = self.vocab.get_smiles(clab), self.vocab.get_ismiles(ilab) fa_cluster, _, fa_used = tree_batch.get_cluster(fa_node) inter_cands, anchor_smiles, attach_points = graph_batch.get_assm_cands(fa_cluster, fa_used, ismiles) if len(inter_cands) == 0: continue elif len(inter_cands) == 1: sorted_cands = [(inter_cands[0], 0)] nth_child = 0 else: nth_child = tree_batch.graph.in_degree(fa_node) icls = [self.vocab[ (smiles,x) ][1] for x in anchor_smiles] cands = inter_cands if len(attach_points) <= 2 else [ (x[0],x[-1]) for x in inter_cands ] cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child) batch_idx = batch_idx.new_tensor( [bid] * len(inter_cands) ) assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, cand_vecs).tolist() sorted_cands = sorted( list(zip(inter_cands, assm_scores)), key = lambda x:x[1], reverse=True ) for inter_label,_ in sorted_cands: inter_label = list(zip(inter_label, attach_points)) if graph_batch.try_add_mol(bid, ismiles, inter_label): new_atoms, new_bonds, attached = graph_batch.add_mol(bid, ismiles, inter_label, nth_child) tree_batch.register_cgraph(new_node, new_atoms, new_bonds, attached) tree_batch.update_attached(fa_node, inter_label) success = True break if not success: #force backtrack child = stack[bid].pop() #pop the dummy new_node which can't be added nth_child = tree_batch.graph.in_degree(stack[bid][-1]) edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] ) new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature) child = stack[bid].pop() if len(stack[bid]) > 0: nth_child = tree_batch.graph.in_degree(stack[bid][-1]) edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] ) new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature) return graph_batch.get_mol()
true
true
f70338ff7323a5c76ee61cfe7dd8a5e65737c22d
13,064
py
Python
ml/rl/test/test_normalization.py
johncliu/Horizon
cfa7a873ada5de3bb01e78e2f237d9849b8270b2
[ "BSD-3-Clause" ]
null
null
null
ml/rl/test/test_normalization.py
johncliu/Horizon
cfa7a873ada5de3bb01e78e2f237d9849b8270b2
[ "BSD-3-Clause" ]
1
2021-08-25T16:13:32.000Z
2021-08-25T16:13:32.000Z
ml/rl/test/test_normalization.py
johncliu/Horizon
cfa7a873ada5de3bb01e78e2f237d9849b8270b2
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest import numpy as np import numpy.testing as npt import six from caffe2.python import core, workspace from ml.rl.caffe_utils import C2 from ml.rl.preprocessing import identify_types, normalization from ml.rl.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM from ml.rl.preprocessing.normalization import ( NormalizationParameters, sort_features_by_normalization, ) from ml.rl.preprocessing.preprocessor_net import PreprocessorNet from ml.rl.test.preprocessing_util import ( BOXCOX_FEATURE_ID, ENUM_FEATURE_ID, PROBABILITY_FEATURE_ID, id_to_type, read_data, ) from ml.rl.test.utils import NumpyFeatureProcessor from scipy import special class TestNormalization(unittest.TestCase): def _feature_type_override(self, feature_id): """ This should only be used to test CONTINUOUS_ACTION """ if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION: return identify_types.CONTINUOUS_ACTION return None def test_prepare_normalization_and_normalize(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( values, 10, feature_type=self._feature_type_override(name) ) for k, v in normalization_parameters.items(): if id_to_type(k) == CONTINUOUS: self.assertEqual(v.feature_type, CONTINUOUS) self.assertIs(v.boxcox_lambda, None) self.assertIs(v.boxcox_shift, None) elif id_to_type(k) == BOXCOX: self.assertEqual(v.feature_type, BOXCOX) self.assertIsNot(v.boxcox_lambda, None) self.assertIsNot(v.boxcox_shift, None) else: assert v.feature_type == id_to_type(k) sorted_features, _ = sort_features_by_normalization(normalization_parameters) norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32) for i, feature in enumerate(sorted_features): input_matrix[:, i] = feature_value_map[feature] input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32)) output_blob, _ = preprocessor.normalize_dense_matrix( input_matrix_blob, sorted_features, normalization_parameters, "", False ) workspace.FeedBlob(input_matrix_blob, input_matrix) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(output_blob) normalized_features = {} on_column = 0 for feature in sorted_features: norm = normalization_parameters[feature] if norm.feature_type == ENUM: column_size = len(norm.possible_values) else: column_size = 1 normalized_features[feature] = normalized_feature_matrix[ :, on_column : (on_column + column_size) ] on_column += column_size self.assertTrue( all( [ np.isfinite(parameter.stddev) and np.isfinite(parameter.mean) for parameter in normalization_parameters.values() ] ) ) for k, v in six.iteritems(normalized_features): self.assertTrue(np.all(np.isfinite(v))) feature_type = normalization_parameters[k].feature_type if feature_type == identify_types.PROBABILITY: sigmoidv = special.expit(v) self.assertTrue( np.all( np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1)) ) ) elif feature_type == identify_types.ENUM: possible_values = normalization_parameters[k].possible_values self.assertEqual(v.shape[0], len(feature_value_map[k])) self.assertEqual(v.shape[1], len(possible_values)) possible_value_map = {} for i, possible_value in enumerate(possible_values): possible_value_map[possible_value] = i for i, row in enumerate(v): original_feature = feature_value_map[k][i] self.assertEqual( possible_value_map[original_feature], np.where(row == 1)[0][0] ) elif feature_type == identify_types.QUANTILE: for i, feature in enumerate(v[0]): original_feature = feature_value_map[k][i] expected = NumpyFeatureProcessor.value_to_quantile( original_feature, normalization_parameters[k].quantiles ) self.assertAlmostEqual(feature, expected, 2) elif feature_type == identify_types.BINARY: pass elif ( feature_type == identify_types.CONTINUOUS or feature_type == identify_types.BOXCOX ): one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01) zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01) zero_mean = np.isclose(np.mean(v), 0, atol=0.01) self.assertTrue( np.all(zero_mean), "mean of feature {} is {}, not 0".format(k, np.mean(v)), ) self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev))) elif feature_type == identify_types.CONTINUOUS_ACTION: less_than_max = v < 1 more_than_min = v > -1 self.assertTrue( np.all(less_than_max), "values are not less than 1: {}".format(v[less_than_max == False]), ) self.assertTrue( np.all(more_than_min), "values are not more than -1: {}".format(v[more_than_min == False]), ) else: raise NotImplementedError() def test_normalize_dense_matrix_enum(self): normalization_parameters = { 1: NormalizationParameters( identify_types.ENUM, None, None, None, None, [12, 4, 2], None, None, None, ), 2: NormalizationParameters( identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None ), 3: NormalizationParameters( identify_types.ENUM, None, None, None, None, [15, 3], None, None, None ), } norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() inputs = np.zeros([4, 3], dtype=np.float32) feature_ids = [2, 1, 3] # Sorted according to feature type inputs[:, feature_ids.index(1)] = [12, 4, 2, 2] inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0] inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE] input_blob = C2.NextBlob("input_blob") workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32)) normalized_output_blob, _ = preprocessor.normalize_dense_matrix( input_blob, feature_ids, normalization_parameters, "", False ) workspace.FeedBlob(input_blob, inputs) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob) np.testing.assert_allclose( np.array( [ [1.0, 1, 0, 0, 1, 0], [2.0, 0, 1, 0, 0, 1], [3.0, 0, 0, 1, 1, 0], [3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0 ] ), normalized_feature_matrix, ) def test_persistency(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( values, feature_type=self._feature_type_override(name) ) s = normalization.serialize(normalization_parameters) read_parameters = normalization.deserialize(s) # Unfortunately, Thrift serializatin seems to lose a bit of precision. # Using `==` will be false. self.assertEqual(read_parameters.keys(), normalization_parameters.keys()) for k in normalization_parameters: self.assertEqual( read_parameters[k].feature_type, normalization_parameters[k].feature_type, ) self.assertEqual( read_parameters[k].possible_values, normalization_parameters[k].possible_values, ) for field in [ "boxcox_lambda", "boxcox_shift", "mean", "stddev", "quantiles", "min_value", "max_value", ]: if getattr(normalization_parameters[k], field) is None: self.assertEqual( getattr(read_parameters[k], field), getattr(normalization_parameters[k], field), ) else: npt.assert_allclose( getattr(read_parameters[k], field), getattr(normalization_parameters[k], field), ) def test_preprocessing_network(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( values, feature_type=self._feature_type_override(name) ) test_features = NumpyFeatureProcessor.preprocess( feature_value_map, normalization_parameters ) net = core.Net("PreprocessingTestNet") C2.set_net(net) preprocessor = PreprocessorNet() name_preprocessed_blob_map = {} for feature_name in feature_value_map: workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32)) preprocessed_blob, _ = preprocessor.preprocess_blob( str(feature_name), [normalization_parameters[feature_name]] ) name_preprocessed_blob_map[feature_name] = preprocessed_blob workspace.CreateNet(net) for feature_name, feature_value in six.iteritems(feature_value_map): feature_value = np.expand_dims(feature_value, -1) workspace.FeedBlob(str(feature_name), feature_value) workspace.RunNetOnce(net) for feature_name in feature_value_map: normalized_features = workspace.FetchBlob( name_preprocessed_blob_map[feature_name] ) if feature_name != ENUM_FEATURE_ID: normalized_features = np.squeeze(normalized_features, -1) tolerance = 0.01 if feature_name == BOXCOX_FEATURE_ID: # At the limit, boxcox has some numerical instability tolerance = 0.5 non_matching = np.where( np.logical_not( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ) ) ) self.assertTrue( np.all( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ) ), "{} does not match: {} {}".format( feature_name, normalized_features[non_matching].tolist(), test_features[feature_name][non_matching].tolist(), ), ) def test_type_override(self): # Take a feature that should be identified as probability feature_value_map = read_data() probability_values = feature_value_map[PROBABILITY_FEATURE_ID] # And ask for a binary anyways parameter = normalization.identify_parameter( probability_values, feature_type=identify_types.BINARY ) self.assertEqual(parameter.feature_type, "BINARY")
40.44582
88
0.568739
import unittest import numpy as np import numpy.testing as npt import six from caffe2.python import core, workspace from ml.rl.caffe_utils import C2 from ml.rl.preprocessing import identify_types, normalization from ml.rl.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM from ml.rl.preprocessing.normalization import ( NormalizationParameters, sort_features_by_normalization, ) from ml.rl.preprocessing.preprocessor_net import PreprocessorNet from ml.rl.test.preprocessing_util import ( BOXCOX_FEATURE_ID, ENUM_FEATURE_ID, PROBABILITY_FEATURE_ID, id_to_type, read_data, ) from ml.rl.test.utils import NumpyFeatureProcessor from scipy import special class TestNormalization(unittest.TestCase): def _feature_type_override(self, feature_id): if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION: return identify_types.CONTINUOUS_ACTION return None def test_prepare_normalization_and_normalize(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( values, 10, feature_type=self._feature_type_override(name) ) for k, v in normalization_parameters.items(): if id_to_type(k) == CONTINUOUS: self.assertEqual(v.feature_type, CONTINUOUS) self.assertIs(v.boxcox_lambda, None) self.assertIs(v.boxcox_shift, None) elif id_to_type(k) == BOXCOX: self.assertEqual(v.feature_type, BOXCOX) self.assertIsNot(v.boxcox_lambda, None) self.assertIsNot(v.boxcox_shift, None) else: assert v.feature_type == id_to_type(k) sorted_features, _ = sort_features_by_normalization(normalization_parameters) norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32) for i, feature in enumerate(sorted_features): input_matrix[:, i] = feature_value_map[feature] input_matrix_blob = "input_matrix_blob" workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32)) output_blob, _ = preprocessor.normalize_dense_matrix( input_matrix_blob, sorted_features, normalization_parameters, "", False ) workspace.FeedBlob(input_matrix_blob, input_matrix) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(output_blob) normalized_features = {} on_column = 0 for feature in sorted_features: norm = normalization_parameters[feature] if norm.feature_type == ENUM: column_size = len(norm.possible_values) else: column_size = 1 normalized_features[feature] = normalized_feature_matrix[ :, on_column : (on_column + column_size) ] on_column += column_size self.assertTrue( all( [ np.isfinite(parameter.stddev) and np.isfinite(parameter.mean) for parameter in normalization_parameters.values() ] ) ) for k, v in six.iteritems(normalized_features): self.assertTrue(np.all(np.isfinite(v))) feature_type = normalization_parameters[k].feature_type if feature_type == identify_types.PROBABILITY: sigmoidv = special.expit(v) self.assertTrue( np.all( np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1)) ) ) elif feature_type == identify_types.ENUM: possible_values = normalization_parameters[k].possible_values self.assertEqual(v.shape[0], len(feature_value_map[k])) self.assertEqual(v.shape[1], len(possible_values)) possible_value_map = {} for i, possible_value in enumerate(possible_values): possible_value_map[possible_value] = i for i, row in enumerate(v): original_feature = feature_value_map[k][i] self.assertEqual( possible_value_map[original_feature], np.where(row == 1)[0][0] ) elif feature_type == identify_types.QUANTILE: for i, feature in enumerate(v[0]): original_feature = feature_value_map[k][i] expected = NumpyFeatureProcessor.value_to_quantile( original_feature, normalization_parameters[k].quantiles ) self.assertAlmostEqual(feature, expected, 2) elif feature_type == identify_types.BINARY: pass elif ( feature_type == identify_types.CONTINUOUS or feature_type == identify_types.BOXCOX ): one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01) zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01) zero_mean = np.isclose(np.mean(v), 0, atol=0.01) self.assertTrue( np.all(zero_mean), "mean of feature {} is {}, not 0".format(k, np.mean(v)), ) self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev))) elif feature_type == identify_types.CONTINUOUS_ACTION: less_than_max = v < 1 more_than_min = v > -1 self.assertTrue( np.all(less_than_max), "values are not less than 1: {}".format(v[less_than_max == False]), ) self.assertTrue( np.all(more_than_min), "values are not more than -1: {}".format(v[more_than_min == False]), ) else: raise NotImplementedError() def test_normalize_dense_matrix_enum(self): normalization_parameters = { 1: NormalizationParameters( identify_types.ENUM, None, None, None, None, [12, 4, 2], None, None, None, ), 2: NormalizationParameters( identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None ), 3: NormalizationParameters( identify_types.ENUM, None, None, None, None, [15, 3], None, None, None ), } norm_net = core.Net("net") C2.set_net(norm_net) preprocessor = PreprocessorNet() inputs = np.zeros([4, 3], dtype=np.float32) feature_ids = [2, 1, 3] inputs[:, feature_ids.index(1)] = [12, 4, 2, 2] inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0] inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE] input_blob = C2.NextBlob("input_blob") workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32)) normalized_output_blob, _ = preprocessor.normalize_dense_matrix( input_blob, feature_ids, normalization_parameters, "", False ) workspace.FeedBlob(input_blob, inputs) workspace.RunNetOnce(norm_net) normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob) np.testing.assert_allclose( np.array( [ [1.0, 1, 0, 0, 1, 0], [2.0, 0, 1, 0, 0, 1], [3.0, 0, 0, 1, 1, 0], [3.0, 0, 0, 1, 0, 0], ] ), normalized_feature_matrix, ) def test_persistency(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( values, feature_type=self._feature_type_override(name) ) s = normalization.serialize(normalization_parameters) read_parameters = normalization.deserialize(s) self.assertEqual(read_parameters.keys(), normalization_parameters.keys()) for k in normalization_parameters: self.assertEqual( read_parameters[k].feature_type, normalization_parameters[k].feature_type, ) self.assertEqual( read_parameters[k].possible_values, normalization_parameters[k].possible_values, ) for field in [ "boxcox_lambda", "boxcox_shift", "mean", "stddev", "quantiles", "min_value", "max_value", ]: if getattr(normalization_parameters[k], field) is None: self.assertEqual( getattr(read_parameters[k], field), getattr(normalization_parameters[k], field), ) else: npt.assert_allclose( getattr(read_parameters[k], field), getattr(normalization_parameters[k], field), ) def test_preprocessing_network(self): feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): normalization_parameters[name] = normalization.identify_parameter( values, feature_type=self._feature_type_override(name) ) test_features = NumpyFeatureProcessor.preprocess( feature_value_map, normalization_parameters ) net = core.Net("PreprocessingTestNet") C2.set_net(net) preprocessor = PreprocessorNet() name_preprocessed_blob_map = {} for feature_name in feature_value_map: workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32)) preprocessed_blob, _ = preprocessor.preprocess_blob( str(feature_name), [normalization_parameters[feature_name]] ) name_preprocessed_blob_map[feature_name] = preprocessed_blob workspace.CreateNet(net) for feature_name, feature_value in six.iteritems(feature_value_map): feature_value = np.expand_dims(feature_value, -1) workspace.FeedBlob(str(feature_name), feature_value) workspace.RunNetOnce(net) for feature_name in feature_value_map: normalized_features = workspace.FetchBlob( name_preprocessed_blob_map[feature_name] ) if feature_name != ENUM_FEATURE_ID: normalized_features = np.squeeze(normalized_features, -1) tolerance = 0.01 if feature_name == BOXCOX_FEATURE_ID: tolerance = 0.5 non_matching = np.where( np.logical_not( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ) ) ) self.assertTrue( np.all( np.isclose( normalized_features, test_features[feature_name], rtol=tolerance, atol=tolerance, ) ), "{} does not match: {} {}".format( feature_name, normalized_features[non_matching].tolist(), test_features[feature_name][non_matching].tolist(), ), ) def test_type_override(self): feature_value_map = read_data() probability_values = feature_value_map[PROBABILITY_FEATURE_ID] parameter = normalization.identify_parameter( probability_values, feature_type=identify_types.BINARY ) self.assertEqual(parameter.feature_type, "BINARY")
true
true
f703395293ac6a585fa8603d6f52a53ad6dfb7f8
214
py
Python
speech_to_text.py
wizelab8/SmartMirror
bad186d4eceb6b6adfdcef90e7d93abfc04d9d61
[ "MIT" ]
null
null
null
speech_to_text.py
wizelab8/SmartMirror
bad186d4eceb6b6adfdcef90e7d93abfc04d9d61
[ "MIT" ]
null
null
null
speech_to_text.py
wizelab8/SmartMirror
bad186d4eceb6b6adfdcef90e7d93abfc04d9d61
[ "MIT" ]
null
null
null
import speech_recognition as sr r=sr.Recognizer() with sr.Microphone() as source: print("Say Something") sudio=r.listen(source) print("Time over") try: print("Text: "+r.recognize_google(audio)) except: pass
17.833333
42
0.733645
import speech_recognition as sr r=sr.Recognizer() with sr.Microphone() as source: print("Say Something") sudio=r.listen(source) print("Time over") try: print("Text: "+r.recognize_google(audio)) except: pass
true
true
f7033a70f3105413b45ff36d1978c4ae3751b697
110
py
Python
Scrolls/spin_box.py
hemidvsmusayev/Tkinter-learn
3d35d7fedbda92a47450b84e3896e701e95de8cf
[ "MIT" ]
1
2020-08-27T12:31:12.000Z
2020-08-27T12:31:12.000Z
Scrolls/spin_box.py
hemidvsmusayev/Tkinter-learn
3d35d7fedbda92a47450b84e3896e701e95de8cf
[ "MIT" ]
null
null
null
Scrolls/spin_box.py
hemidvsmusayev/Tkinter-learn
3d35d7fedbda92a47450b84e3896e701e95de8cf
[ "MIT" ]
null
null
null
import tkinter from tkinter import * win = Tk() sb = Spinbox(win, from_=0, to=10) sb.pack() win.mainloop()
11
33
0.672727
import tkinter from tkinter import * win = Tk() sb = Spinbox(win, from_=0, to=10) sb.pack() win.mainloop()
true
true
f7033ca4dec08844ca82210c2422ba759e6b77a2
1,026
py
Python
visual_dynamics/envs/servoing_env.py
alexlee-gk/visual_dynamics
90227bb0d0aebb1989117b5c25ca311655ca7cc7
[ "MIT" ]
30
2017-04-05T12:55:09.000Z
2022-03-14T14:31:31.000Z
visual_dynamics/envs/servoing_env.py
alexlee-gk/visual_dynamics
90227bb0d0aebb1989117b5c25ca311655ca7cc7
[ "MIT" ]
1
2017-06-19T02:39:03.000Z
2017-06-19T02:39:03.000Z
visual_dynamics/envs/servoing_env.py
alexlee-gk/visual_dynamics
90227bb0d0aebb1989117b5c25ca311655ca7cc7
[ "MIT" ]
13
2017-04-05T12:55:09.000Z
2021-03-16T01:59:12.000Z
import citysim3d.envs from visual_dynamics.envs import Env class ServoingEnv(citysim3d.envs.SimpleQuadPanda3dServoingEnv, Env): def _get_config(self): config = super(ServoingEnv, self)._get_config() config.update({'env': self.env, 'max_time_steps': self.max_time_steps, 'distance_threshold': self.distance_threshold}) return config # class ServoingEnv(citysim3d.envs.ServoingEnv, Env): # def _get_config(self): # config = super(ServoingEnv, self)._get_config() # config.update({'env': self.env}) # return config # # # class SimpleQuadPanda3dServoingEnv(citysim3d.envs.SimpleQuadPanda3dServoingEnv, ServoingEnv): # def _get_config(self): # config = super(SimpleQuadPanda3dServoingEnv, self)._get_config() # config.update({'env': self.env, # 'max_time_steps': self.max_time_steps, # 'distance_threshold': self.distance_threshold}) # return config
35.37931
95
0.650097
import citysim3d.envs from visual_dynamics.envs import Env class ServoingEnv(citysim3d.envs.SimpleQuadPanda3dServoingEnv, Env): def _get_config(self): config = super(ServoingEnv, self)._get_config() config.update({'env': self.env, 'max_time_steps': self.max_time_steps, 'distance_threshold': self.distance_threshold}) return config
true
true
f7033cd7e4893924e98add48a902ed4e0b88f83b
99
py
Python
mmgen/core/runners/__init__.py
HXWAndCL/mmgeneration
9afb1d740bf56a4ecde5064d5bb2a4e2d777638b
[ "Apache-2.0" ]
1
2021-05-27T13:04:41.000Z
2021-05-27T13:04:41.000Z
mmgen/core/runners/__init__.py
HXWAndCL/mmgeneration
9afb1d740bf56a4ecde5064d5bb2a4e2d777638b
[ "Apache-2.0" ]
null
null
null
mmgen/core/runners/__init__.py
HXWAndCL/mmgeneration
9afb1d740bf56a4ecde5064d5bb2a4e2d777638b
[ "Apache-2.0" ]
null
null
null
from .dynamic_iterbased_runner import DynamicIterBasedRunner __all__ = ['DynamicIterBasedRunner']
24.75
60
0.858586
from .dynamic_iterbased_runner import DynamicIterBasedRunner __all__ = ['DynamicIterBasedRunner']
true
true
f7033d3f1df341c0c9e8478d54d75955524ba2a4
3,159
py
Python
transports/africas_talking/api.py
uw-ictd/mwbase
6a46b5c5459a6bb6e1ba84ea74f689da8efe9687
[ "Apache-2.0" ]
1
2021-07-17T00:18:06.000Z
2021-07-17T00:18:06.000Z
transports/africas_talking/api.py
akettel/mwbase
873b4fe8038f16feba5273990b0eb2109f8f05c6
[ "Apache-2.0" ]
4
2017-08-31T17:09:53.000Z
2018-11-28T06:01:00.000Z
transports/africas_talking/api.py
akettel/mwbase
873b4fe8038f16feba5273990b0eb2109f8f05c6
[ "Apache-2.0" ]
2
2018-09-17T22:06:16.000Z
2021-07-17T00:18:09.000Z
#Django Imports from django.conf import settings #Python Imports import requests, os #Local Imports from .at_utils import AfricasTalkingException #Import Afica's Talking Settings AFRICAS_TALKING_SETTINGS = getattr(settings,'AFRICAS_TALKING',{}) API_KEY = AFRICAS_TALKING_SETTINGS.get('API_KEY',None) USERNAME = AFRICAS_TALKING_SETTINGS.get('USERNAME',None) SHORTCODE = AFRICAS_TALKING_SETTINGS.get('SHORTCODE',None) AFRICAS_TALKING_SEND = AFRICAS_TALKING_SETTINGS.get('SEND',False) AFRICAS_TALKING_API_BASE = 'http://api.africastalking.com/version1' HEADERS = {'Accept': 'application/json','apikey':API_KEY} PARAMS = {'username':USERNAME,'bulkSMSMode':1} if SHORTCODE: PARAMS['from'] = SHORTCODE def send_raw(to,message): if not AFRICAS_TALKING_SEND: raise AfricasTalkingException("Africas Talking called when send not set to True") if API_KEY is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY') if USERNAME is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME') params = {'to':to,'message':message} params.update(PARAMS) send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging') post = requests.post(send_url,data=params,headers=HEADERS) #Raise requests.exceptions.HTTPError if 4XX or 5XX post.raise_for_status() return post.json() def send(to,message): data = send_raw(to,message) ''' Example of JSON Response {u'SMSMessageData': {u'Message': u'Sent to 1/1 Total Cost: USD 0.0109', u'Recipients': [{ u'status': u'Success', #u'status': u'Invalid Phone Number', u'cost': u'KES 1.0000', u'number': u'+254708054321', u'messageId': u'ATXid_b50fada5b1af078f2277cacb58ef2447' }] } } ''' # Return tuple (messageId, messageSuccess, extra_data) recipients = data['SMSMessageData']['Recipients'] if len(recipients) == 1: msg_id = recipients[0]['messageId'] msg_success = recipients[0]['status'] == 'Success' return msg_id, msg_success, {'status':recipients[0]['status']} def balance(): if API_KEY is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY') if USERNAME is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME') params = {'username':USERNAME} send_url = os.path.join(AFRICAS_TALKING_API_BASE,'user') post = requests.get(send_url,params=params,headers=HEADERS) #Raise requests.exceptions.HTTPError if 4XX or 5XX post.raise_for_status() data = post.json() return data['UserData']['balance'] def fetch(last_received_id=0): if API_KEY is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY') if USERNAME is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME') params = {'username':USERNAME,'lastReceivedId':last_received_id} send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging') post = requests.get(send_url,params=params,headers=HEADERS) return post
31.277228
89
0.703704
from django.conf import settings import requests, os from .at_utils import AfricasTalkingException AFRICAS_TALKING_SETTINGS = getattr(settings,'AFRICAS_TALKING',{}) API_KEY = AFRICAS_TALKING_SETTINGS.get('API_KEY',None) USERNAME = AFRICAS_TALKING_SETTINGS.get('USERNAME',None) SHORTCODE = AFRICAS_TALKING_SETTINGS.get('SHORTCODE',None) AFRICAS_TALKING_SEND = AFRICAS_TALKING_SETTINGS.get('SEND',False) AFRICAS_TALKING_API_BASE = 'http://api.africastalking.com/version1' HEADERS = {'Accept': 'application/json','apikey':API_KEY} PARAMS = {'username':USERNAME,'bulkSMSMode':1} if SHORTCODE: PARAMS['from'] = SHORTCODE def send_raw(to,message): if not AFRICAS_TALKING_SEND: raise AfricasTalkingException("Africas Talking called when send not set to True") if API_KEY is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY') if USERNAME is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME') params = {'to':to,'message':message} params.update(PARAMS) send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging') post = requests.post(send_url,data=params,headers=HEADERS) #Raise requests.exceptions.HTTPError if 4XX or 5XX post.raise_for_status() return post.json() def send(to,message): data = send_raw(to,message) # Return tuple (messageId, messageSuccess, extra_data) recipients = data['SMSMessageData']['Recipients'] if len(recipients) == 1: msg_id = recipients[0]['messageId'] msg_success = recipients[0]['status'] == 'Success' return msg_id, msg_success, {'status':recipients[0]['status']} def balance(): if API_KEY is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY') if USERNAME is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME') params = {'username':USERNAME} send_url = os.path.join(AFRICAS_TALKING_API_BASE,'user') post = requests.get(send_url,params=params,headers=HEADERS) #Raise requests.exceptions.HTTPError if 4XX or 5XX post.raise_for_status() data = post.json() return data['UserData']['balance'] def fetch(last_received_id=0): if API_KEY is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY') if USERNAME is None: raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME') params = {'username':USERNAME,'lastReceivedId':last_received_id} send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging') post = requests.get(send_url,params=params,headers=HEADERS) return post
true
true
f7033d4e31ab8da3329a0d30ffb3e92dfaa3387b
1,455
py
Python
third-party/paxos/.waf3-2.0.18-96675f149f50dab16c9e2e6aaf5e787b/waflib/processor.py
shenweihai1/rolis-eurosys2022
59b3fd58144496a9b13415e30b41617b34924323
[ "MIT" ]
1
2022-02-13T13:01:25.000Z
2022-02-13T13:01:25.000Z
third-party/paxos/.waf3-2.0.18-96675f149f50dab16c9e2e6aaf5e787b/waflib/processor.py
shenweihai1/rolis-eurosys2022
59b3fd58144496a9b13415e30b41617b34924323
[ "MIT" ]
null
null
null
third-party/paxos/.waf3-2.0.18-96675f149f50dab16c9e2e6aaf5e787b/waflib/processor.py
shenweihai1/rolis-eurosys2022
59b3fd58144496a9b13415e30b41617b34924323
[ "MIT" ]
null
null
null
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file import os,sys,traceback,base64,signal try: import cPickle except ImportError: import pickle as cPickle try: import subprocess32 as subprocess except ImportError: import subprocess try: TimeoutExpired=subprocess.TimeoutExpired except AttributeError: class TimeoutExpired(Exception): pass def run(): txt=sys.stdin.readline().strip() if not txt: sys.exit(1) [cmd,kwargs,cargs]=cPickle.loads(base64.b64decode(txt)) cargs=cargs or{} if not'close_fds'in kwargs: kwargs['close_fds']=False ret=1 out,err,ex,trace=(None,None,None,None) try: proc=subprocess.Popen(cmd,**kwargs) try: out,err=proc.communicate(**cargs) except TimeoutExpired: if kwargs.get('start_new_session')and hasattr(os,'killpg'): os.killpg(proc.pid,signal.SIGKILL) else: proc.kill() out,err=proc.communicate() exc=TimeoutExpired(proc.args,timeout=cargs['timeout'],output=out) exc.stderr=err raise exc ret=proc.returncode except Exception as e: exc_type,exc_value,tb=sys.exc_info() exc_lines=traceback.format_exception(exc_type,exc_value,tb) trace=str(cmd)+'\n'+''.join(exc_lines) ex=e.__class__.__name__ tmp=[ret,out,err,ex,trace] obj=base64.b64encode(cPickle.dumps(tmp)) sys.stdout.write(obj.decode()) sys.stdout.write('\n') sys.stdout.flush() while 1: try: run() except KeyboardInterrupt: break
25.086207
78
0.740893
import os,sys,traceback,base64,signal try: import cPickle except ImportError: import pickle as cPickle try: import subprocess32 as subprocess except ImportError: import subprocess try: TimeoutExpired=subprocess.TimeoutExpired except AttributeError: class TimeoutExpired(Exception): pass def run(): txt=sys.stdin.readline().strip() if not txt: sys.exit(1) [cmd,kwargs,cargs]=cPickle.loads(base64.b64decode(txt)) cargs=cargs or{} if not'close_fds'in kwargs: kwargs['close_fds']=False ret=1 out,err,ex,trace=(None,None,None,None) try: proc=subprocess.Popen(cmd,**kwargs) try: out,err=proc.communicate(**cargs) except TimeoutExpired: if kwargs.get('start_new_session')and hasattr(os,'killpg'): os.killpg(proc.pid,signal.SIGKILL) else: proc.kill() out,err=proc.communicate() exc=TimeoutExpired(proc.args,timeout=cargs['timeout'],output=out) exc.stderr=err raise exc ret=proc.returncode except Exception as e: exc_type,exc_value,tb=sys.exc_info() exc_lines=traceback.format_exception(exc_type,exc_value,tb) trace=str(cmd)+'\n'+''.join(exc_lines) ex=e.__class__.__name__ tmp=[ret,out,err,ex,trace] obj=base64.b64encode(cPickle.dumps(tmp)) sys.stdout.write(obj.decode()) sys.stdout.write('\n') sys.stdout.flush() while 1: try: run() except KeyboardInterrupt: break
true
true
f7033da129b07b9ecaa7a700cbea4abdb55903f6
9,717
py
Python
IPython/utils/PyColorize.py
flexlee/ipython
7528fbd76073c90262b9ac127de57c4c59b23a5c
[ "BSD-3-Clause-Clear" ]
1
2017-02-09T20:01:11.000Z
2017-02-09T20:01:11.000Z
IPython/utils/PyColorize.py
flexlee/ipython
7528fbd76073c90262b9ac127de57c4c59b23a5c
[ "BSD-3-Clause-Clear" ]
null
null
null
IPython/utils/PyColorize.py
flexlee/ipython
7528fbd76073c90262b9ac127de57c4c59b23a5c
[ "BSD-3-Clause-Clear" ]
null
null
null
# -*- coding: utf-8 -*- """ Class and program to colorize python source code for ANSI terminals. Based on an HTML code highlighter by Jurgen Hermann found at: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298 Modifications by Fernando Perez (fperez@colorado.edu). Information on the original HTML highlighter follows: MoinMoin - Python Source Parser Title: Colorize Python source using the built-in tokenizer Submitter: Jurgen Hermann Last Updated:2001/04/06 Version no:1.2 Description: This code is part of MoinMoin (http://moin.sourceforge.net/) and converts Python source code to HTML markup, rendering comments, keywords, operators, numeric and string literals in different colors. It shows how to use the built-in keyword, token and tokenize modules to scan Python source code and re-emit it with no changes to its original formatting (which is the hard part). """ from __future__ import print_function from __future__ import unicode_literals __all__ = ['ANSICodeColors','Parser'] _scheme_default = 'Linux' # Imports import StringIO import keyword import os import optparse import sys import token import tokenize try: generate_tokens = tokenize.generate_tokens except AttributeError: # Python 3. Note that we use the undocumented _tokenize because it expects # strings, not bytes. See also Python issue #9969. generate_tokens = tokenize._tokenize from IPython.utils.coloransi import * ############################################################################# ### Python Source Parser (does Hilighting) ############################################################################# _KEYWORD = token.NT_OFFSET + 1 _TEXT = token.NT_OFFSET + 2 #**************************************************************************** # Builtin color schemes Colors = TermColors # just a shorthand # Build a few color schemes NoColor = ColorScheme( 'NoColor',{ token.NUMBER : Colors.NoColor, token.OP : Colors.NoColor, token.STRING : Colors.NoColor, tokenize.COMMENT : Colors.NoColor, token.NAME : Colors.NoColor, token.ERRORTOKEN : Colors.NoColor, _KEYWORD : Colors.NoColor, _TEXT : Colors.NoColor, 'normal' : Colors.NoColor # color off (usu. Colors.Normal) } ) LinuxColors = ColorScheme( 'Linux',{ token.NUMBER : Colors.LightCyan, token.OP : Colors.Yellow, token.STRING : Colors.LightBlue, tokenize.COMMENT : Colors.LightRed, token.NAME : Colors.Normal, token.ERRORTOKEN : Colors.Red, _KEYWORD : Colors.LightGreen, _TEXT : Colors.Yellow, 'normal' : Colors.Normal # color off (usu. Colors.Normal) } ) LightBGColors = ColorScheme( 'LightBG',{ token.NUMBER : Colors.Cyan, token.OP : Colors.Blue, token.STRING : Colors.Blue, tokenize.COMMENT : Colors.Red, token.NAME : Colors.Normal, token.ERRORTOKEN : Colors.Red, _KEYWORD : Colors.Green, _TEXT : Colors.Blue, 'normal' : Colors.Normal # color off (usu. Colors.Normal) } ) # Build table of color schemes (needed by the parser) ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors], _scheme_default) class Parser: """ Format colored Python source. """ def __init__(self, color_table=None,out = sys.stdout): """ Create a parser with a specified color table and output channel. Call format() to process code. """ self.color_table = color_table and color_table or ANSICodeColors self.out = out def format(self, raw, out = None, scheme = ''): return self.format2(raw, out, scheme)[0] def format2(self, raw, out = None, scheme = ''): """ Parse and send the colored source. If out and scheme are not specified, the defaults (given to constructor) are used. out should be a file-type object. Optionally, out can be given as the string 'str' and the parser will automatically return the output in a string.""" string_output = 0 if out == 'str' or self.out == 'str' or \ isinstance(self.out,StringIO.StringIO): # XXX - I don't really like this state handling logic, but at this # point I don't want to make major changes, so adding the # isinstance() check is the simplest I can do to ensure correct # behavior. out_old = self.out self.out = StringIO.StringIO() string_output = 1 elif out is not None: self.out = out # Fast return of the unmodified input for NoColor scheme if scheme == 'NoColor': error = False self.out.write(raw) if string_output: return raw,error else: return None,error # local shorthands colors = self.color_table[scheme].colors self.colors = colors # put in object so __call__ sees it # Remove trailing whitespace and normalize tabs self.raw = raw.expandtabs().rstrip() # store line offsets in self.lines self.lines = [0, 0] pos = 0 raw_find = self.raw.find lines_append = self.lines.append while 1: pos = raw_find('\n', pos) + 1 if not pos: break lines_append(pos) lines_append(len(self.raw)) # parse the source and write it self.pos = 0 text = StringIO.StringIO(self.raw) error = False try: for atoken in generate_tokens(text.readline): self(*atoken) except tokenize.TokenError as ex: msg = ex.args[0] line = ex.args[1][0] self.out.write("%s\n\n*** ERROR: %s%s%s\n" % (colors[token.ERRORTOKEN], msg, self.raw[self.lines[line]:], colors.normal) ) error = True self.out.write(colors.normal+'\n') if string_output: output = self.out.getvalue() self.out = out_old return (output, error) return (None, error) def __call__(self, toktype, toktext, start_pos, end_pos, line): """ Token handler, with syntax highlighting.""" (srow,scol) = start_pos (erow,ecol) = end_pos colors = self.colors owrite = self.out.write # line separator, so this works across platforms linesep = os.linesep # calculate new positions oldpos = self.pos newpos = self.lines[srow] + scol self.pos = newpos + len(toktext) # send the original whitespace, if needed if newpos > oldpos: owrite(self.raw[oldpos:newpos]) # skip indenting tokens if toktype in [token.INDENT, token.DEDENT]: self.pos = newpos return # map token type to a color group if token.LPAR <= toktype and toktype <= token.OP: toktype = token.OP elif toktype == token.NAME and keyword.iskeyword(toktext): toktype = _KEYWORD color = colors.get(toktype, colors[_TEXT]) #print '<%s>' % toktext, # dbg # Triple quoted strings must be handled carefully so that backtracking # in pagers works correctly. We need color terminators on _each_ line. if linesep in toktext: toktext = toktext.replace(linesep, '%s%s%s' % (colors.normal,linesep,color)) # send text owrite('%s%s%s' % (color,toktext,colors.normal)) def main(argv=None): """Run as a command-line script: colorize a python file or stdin using ANSI color escapes and print to stdout. Inputs: - argv(None): a list of strings like sys.argv[1:] giving the command-line arguments. If None, use sys.argv[1:]. """ usage_msg = """%prog [options] [filename] Colorize a python file or stdin using ANSI color escapes and print to stdout. If no filename is given, or if filename is -, read standard input.""" parser = optparse.OptionParser(usage=usage_msg) newopt = parser.add_option newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store', choices=['Linux','LightBG','NoColor'],default=_scheme_default, help="give the color scheme to use. Currently only 'Linux'\ (default) and 'LightBG' and 'NoColor' are implemented (give without\ quotes)") opts,args = parser.parse_args(argv) if len(args) > 1: parser.error("you must give at most one filename.") if len(args) == 0: fname = '-' # no filename given; setup to read from stdin else: fname = args[0] if fname == '-': stream = sys.stdin else: try: stream = open(fname) except IOError as msg: print(msg, file=sys.stderr) sys.exit(1) parser = Parser() # we need nested try blocks because pre-2.5 python doesn't support unified # try-except-finally try: try: # write colorized version to stdout parser.format(stream.read(),scheme=opts.scheme_name) except IOError as msg: # if user reads through a pager and quits, don't print traceback if msg.args != (32,'Broken pipe'): raise finally: if stream is not sys.stdin: stream.close() # in case a non-handled exception happened above if __name__ == "__main__": main()
31.244373
79
0.594731
from __future__ import print_function from __future__ import unicode_literals __all__ = ['ANSICodeColors','Parser'] _scheme_default = 'Linux' import StringIO import keyword import os import optparse import sys import token import tokenize try: generate_tokens = tokenize.generate_tokens except AttributeError: generate_tokens = tokenize._tokenize from IPython.utils.coloransi import * _KEYWORD = token.NT_OFFSET + 1 _TEXT = token.NT_OFFSET + 2 Colors = TermColors NoColor = ColorScheme( 'NoColor',{ token.NUMBER : Colors.NoColor, token.OP : Colors.NoColor, token.STRING : Colors.NoColor, tokenize.COMMENT : Colors.NoColor, token.NAME : Colors.NoColor, token.ERRORTOKEN : Colors.NoColor, _KEYWORD : Colors.NoColor, _TEXT : Colors.NoColor, 'normal' : Colors.NoColor } ) LinuxColors = ColorScheme( 'Linux',{ token.NUMBER : Colors.LightCyan, token.OP : Colors.Yellow, token.STRING : Colors.LightBlue, tokenize.COMMENT : Colors.LightRed, token.NAME : Colors.Normal, token.ERRORTOKEN : Colors.Red, _KEYWORD : Colors.LightGreen, _TEXT : Colors.Yellow, 'normal' : Colors.Normal } ) LightBGColors = ColorScheme( 'LightBG',{ token.NUMBER : Colors.Cyan, token.OP : Colors.Blue, token.STRING : Colors.Blue, tokenize.COMMENT : Colors.Red, token.NAME : Colors.Normal, token.ERRORTOKEN : Colors.Red, _KEYWORD : Colors.Green, _TEXT : Colors.Blue, 'normal' : Colors.Normal } ) ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors], _scheme_default) class Parser: def __init__(self, color_table=None,out = sys.stdout): self.color_table = color_table and color_table or ANSICodeColors self.out = out def format(self, raw, out = None, scheme = ''): return self.format2(raw, out, scheme)[0] def format2(self, raw, out = None, scheme = ''): string_output = 0 if out == 'str' or self.out == 'str' or \ isinstance(self.out,StringIO.StringIO): # point I don't want to make major changes, so adding the out_old = self.out self.out = StringIO.StringIO() string_output = 1 elif out is not None: self.out = out if scheme == 'NoColor': error = False self.out.write(raw) if string_output: return raw,error else: return None,error colors = self.color_table[scheme].colors self.colors = colors self.raw = raw.expandtabs().rstrip() self.lines = [0, 0] pos = 0 raw_find = self.raw.find lines_append = self.lines.append while 1: pos = raw_find('\n', pos) + 1 if not pos: break lines_append(pos) lines_append(len(self.raw)) self.pos = 0 text = StringIO.StringIO(self.raw) error = False try: for atoken in generate_tokens(text.readline): self(*atoken) except tokenize.TokenError as ex: msg = ex.args[0] line = ex.args[1][0] self.out.write("%s\n\n*** ERROR: %s%s%s\n" % (colors[token.ERRORTOKEN], msg, self.raw[self.lines[line]:], colors.normal) ) error = True self.out.write(colors.normal+'\n') if string_output: output = self.out.getvalue() self.out = out_old return (output, error) return (None, error) def __call__(self, toktype, toktext, start_pos, end_pos, line): (srow,scol) = start_pos (erow,ecol) = end_pos colors = self.colors owrite = self.out.write linesep = os.linesep oldpos = self.pos newpos = self.lines[srow] + scol self.pos = newpos + len(toktext) if newpos > oldpos: owrite(self.raw[oldpos:newpos]) if toktype in [token.INDENT, token.DEDENT]: self.pos = newpos return if token.LPAR <= toktype and toktype <= token.OP: toktype = token.OP elif toktype == token.NAME and keyword.iskeyword(toktext): toktype = _KEYWORD color = colors.get(toktype, colors[_TEXT]) if linesep in toktext: toktext = toktext.replace(linesep, '%s%s%s' % (colors.normal,linesep,color)) owrite('%s%s%s' % (color,toktext,colors.normal)) def main(argv=None): usage_msg = """%prog [options] [filename] Colorize a python file or stdin using ANSI color escapes and print to stdout. If no filename is given, or if filename is -, read standard input.""" parser = optparse.OptionParser(usage=usage_msg) newopt = parser.add_option newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store', choices=['Linux','LightBG','NoColor'],default=_scheme_default, help="give the color scheme to use. Currently only 'Linux'\ (default) and 'LightBG' and 'NoColor' are implemented (give without\ quotes)") opts,args = parser.parse_args(argv) if len(args) > 1: parser.error("you must give at most one filename.") if len(args) == 0: fname = '-' else: fname = args[0] if fname == '-': stream = sys.stdin else: try: stream = open(fname) except IOError as msg: print(msg, file=sys.stderr) sys.exit(1) parser = Parser() # try-except-finally try: try: # write colorized version to stdout parser.format(stream.read(),scheme=opts.scheme_name) except IOError as msg: # if user reads through a pager and quits, don't print traceback if msg.args != (32,'Broken pipe'): raise finally: if stream is not sys.stdin: stream.close() if __name__ == "__main__": main()
true
true
f7033de84a70c8e86392ecb3d7d2137d33cc8e79
1,685
py
Python
duckbot/util/logger.py
NicholasMarasco/duckbot
4aec17ddb426094229d0ea006520d82a1fc8a057
[ "MIT" ]
4
2018-06-15T18:29:15.000Z
2019-09-12T00:24:57.000Z
duckbot/util/logger.py
NicholasMarasco/duckbot
4aec17ddb426094229d0ea006520d82a1fc8a057
[ "MIT" ]
40
2018-04-26T18:43:52.000Z
2021-06-01T21:55:32.000Z
duckbot/util/logger.py
NicholasMarasco/duckbot
4aec17ddb426094229d0ea006520d82a1fc8a057
[ "MIT" ]
null
null
null
# Last Updated: 2.2 from datetime import datetime from util.diagMessage import DiagMessage # Logger class # Buffers and writes messages to a file class Logger: BUFFER_MAX = 10 DEFAULT_FN = "../log.txt" # Constructor for logger class # Params: fn - file name to use or leave default # log - flag to keep a log file or not # Return: Logger instance def __init__(self, fn = DEFAULT_FN, log = True): #{{{ self.keep_log = log self.fn = fn self.log_buffer = [] if self.keep_log: self.log(DiagMessage("LOG0000I")) #}}} # Append line to internal log buffer, flush if needed # Params: diag - DiagMessage to log # flush - bool flag for flushing buffer early # Return: None def log(self, diag, flush=False): #{{{ if self.keep_log: self.log_buffer.append(str(datetime.now()) + " - " + diag.msg) if len(self.log_buffer) >= self.BUFFER_MAX or flush: self._write() elif not flush: print(diag.msg) #}}} # Write contents of buffer out to file # Params: None # Return: None def _write(self): #{{{ print("Writing log...") if debug else None with open(self.fn,'a') as logfile: for line in self.log_buffer: try: logfile.write(line) except TypeError: logfile.write(str(datetime.now())+" - LOG ERR") except UnicodeEncodeError: logfile.write(str(line.encode("utf-8","replace"))) logfile.write("\n") del self.log_buffer[:] #}}}
30.636364
74
0.554896
from datetime import datetime from util.diagMessage import DiagMessage class Logger: BUFFER_MAX = 10 DEFAULT_FN = "../log.txt" def __init__(self, fn = DEFAULT_FN, log = True): self.keep_log = log self.fn = fn self.log_buffer = [] if self.keep_log: self.log(DiagMessage("LOG0000I")) def log(self, diag, flush=False): if self.keep_log: self.log_buffer.append(str(datetime.now()) + " - " + diag.msg) if len(self.log_buffer) >= self.BUFFER_MAX or flush: self._write() elif not flush: print(diag.msg) def _write(self): print("Writing log...") if debug else None with open(self.fn,'a') as logfile: for line in self.log_buffer: try: logfile.write(line) except TypeError: logfile.write(str(datetime.now())+" - LOG ERR") except UnicodeEncodeError: logfile.write(str(line.encode("utf-8","replace"))) logfile.write("\n") del self.log_buffer[:]
true
true
f7033f127c32c6571eaa25db4bd8797b56f614e4
5,418
py
Python
thimbles/metadata.py
quidditymaster/thimbles
b122654a012f0eb4f043d1ee757f884707c97615
[ "MIT" ]
null
null
null
thimbles/metadata.py
quidditymaster/thimbles
b122654a012f0eb4f043d1ee757f884707c97615
[ "MIT" ]
null
null
null
thimbles/metadata.py
quidditymaster/thimbles
b122654a012f0eb4f043d1ee757f884707c97615
[ "MIT" ]
null
null
null
# Standard Library from copy import deepcopy # 3rd Party # Internal # ########################################################################### # class MetaData (dict): """ A class for holding information about an object """ def __init__ (self,*args,**kwargs): super(MetaData,self).__init__(*args,**kwargs) def __repr__ (self): reprout = 'MetaData {' if len(self) == 0: return reprout + "}" reprout += "\n" for key in self: value = str(repr(self[key])).split("\n") reprout += " "+str(key)+" : " reprout += value[0].strip()+"\n" if len(value) > 1: reprout += " "*(len(key))+" ...\n" reprout += "}\n" return reprout def __str__ (self): return super(MetaData,self).__repr__() def _type_check_other (self,other): if not isinstance(other,dict): raise TypeError("other must be a subclass of dict") def __add__ (self,other): return self.combine(other,key_conflicts='raise') def __iadd__ (self,other): self._type_check_other(other) for key in other: if key in self: continue self[key] = other[key] return self def combine (self,other,key_conflicts='ignore',return_=False): """ Combine two MetaData dictionaries together. Parameters ---------- other : dict subclass Any dictionary object will work including other MetaData Dictionaries key_conflicts : 'ignore' (default), 'merge', 'warn', 'raise' Defined the method to handle key conflicts * ignore : if key is in conflict, keep the current key with no warning * merge : convert key to string and add integers until unique key is found * warn : print a warning message for key conflicts. Keep current key * raise : raise error message for key conflicts. return_ : boolean If True then it will keep the data in place and return a copy with with the concatenation Returns ------- info : MetaData Returns an information object with keys and information concatenated from the two Raises ------ KeyError : If key_conflicts=='raise' is True and conflicts exist between two keys Notes ----- __1)__ If a key is in conflict but the data the key refers to is the same then no messages or errors will be raised Special cases ------------- add operator : info1 + info2 This will raise errors for key conflicts between the two iadd operator : info1 += info2 This will ignore key conflicts and always takes info1 keys as default """ self._type_check_other(other) def errmsg (key): return "Warning: key conflict '"+str(key)+"'" key_conflicts = key_conflicts.lower() if return_: out = self.copy() else: out = self if key_conflicts=='merge': for key in other: if key in self and self[key]==other[key]: continue i = 0 base_key = deepcopy(key) while key in self: key = str(base_key)+"_"+str(i) i += 1 out[key] = other[base_key] return out # else: for key in other: if key in self: # if the data's the same don't worry about it if self[key]==other[key]: continue # resolve conflicts if key_conflicts=='raise': raise KeyError(errmsg(key)) elif key_conflicts=='warn': print(errmsg(key)) else: continue out[key] = other[key] if return_: return out def copy (self): return deepcopy(self) def header_list(self): """returns a list of the values belonging to keys beginning header_ """ keys = list(self.keys()) headers = [] for key in keys: try: keystart = key[:7] if keystart == "header_": headers.append(self[key]) except: pass return headers def guess_observation_time(self, headers=None): if headers == None: headers = self.header_list() obs_time = None for hdr in headers: try: obs_time = hdr["ut"] break except: pass return obs_time def guess_airmass(self, headers): if headers == None: headers = self.header_list() airmass = None for hdr in headers: try: airmass = hdr["airmass"] break except: pass return airmass def guess_object_name(self): return None
30.268156
89
0.486342
from copy import deepcopy class MetaData (dict): def __init__ (self,*args,**kwargs): super(MetaData,self).__init__(*args,**kwargs) def __repr__ (self): reprout = 'MetaData {' if len(self) == 0: return reprout + "}" reprout += "\n" for key in self: value = str(repr(self[key])).split("\n") reprout += " "+str(key)+" : " reprout += value[0].strip()+"\n" if len(value) > 1: reprout += " "*(len(key))+" ...\n" reprout += "}\n" return reprout def __str__ (self): return super(MetaData,self).__repr__() def _type_check_other (self,other): if not isinstance(other,dict): raise TypeError("other must be a subclass of dict") def __add__ (self,other): return self.combine(other,key_conflicts='raise') def __iadd__ (self,other): self._type_check_other(other) for key in other: if key in self: continue self[key] = other[key] return self def combine (self,other,key_conflicts='ignore',return_=False): self._type_check_other(other) def errmsg (key): return "Warning: key conflict '"+str(key)+"'" key_conflicts = key_conflicts.lower() if return_: out = self.copy() else: out = self if key_conflicts=='merge': for key in other: if key in self and self[key]==other[key]: continue i = 0 base_key = deepcopy(key) while key in self: key = str(base_key)+"_"+str(i) i += 1 out[key] = other[base_key] return out for key in other: if key in self: if self[key]==other[key]: continue if key_conflicts=='raise': raise KeyError(errmsg(key)) elif key_conflicts=='warn': print(errmsg(key)) else: continue out[key] = other[key] if return_: return out def copy (self): return deepcopy(self) def header_list(self): keys = list(self.keys()) headers = [] for key in keys: try: keystart = key[:7] if keystart == "header_": headers.append(self[key]) except: pass return headers def guess_observation_time(self, headers=None): if headers == None: headers = self.header_list() obs_time = None for hdr in headers: try: obs_time = hdr["ut"] break except: pass return obs_time def guess_airmass(self, headers): if headers == None: headers = self.header_list() airmass = None for hdr in headers: try: airmass = hdr["airmass"] break except: pass return airmass def guess_object_name(self): return None
true
true
f7033f4a9423ceac627bb81964971329e2cecd25
1,373
py
Python
cfg/dfaccto/event.py
lw0/dfaccto_lib
9162fbe0649db2d8735b7d62b92367488f4d716e
[ "MIT" ]
null
null
null
cfg/dfaccto/event.py
lw0/dfaccto_lib
9162fbe0649db2d8735b7d62b92367488f4d716e
[ "MIT" ]
null
null
null
cfg/dfaccto/event.py
lw0/dfaccto_lib
9162fbe0649db2d8735b7d62b92367488f4d716e
[ "MIT" ]
null
null
null
Inc('dfaccto/util.py', abs=True) class _Event(ModuleContext): def __init__(self): ModuleContext.__init__(self) self._setup_packages() def _setup_packages(self): self.pkg = Pkg('dfaccto_event', x_templates={self.File('generic/package.vhd.tpl'): self.File('pkg/dfaccto_event.vhd')}) with self.pkg: self.tEvent = self.TypeEvent('Event') def TypeEvent(self, name, stb_bits=None, ack_bits=None): tlogic = Util.tlogic if stb_bits is not None: tsdata = Util.TypeUnsigned('{}Strb'.format(name), width=stb_bits) else: tsdata = None if ack_bits is not None: tadata = Util.TypeUnsigned('{}Ack'.format(name), width=ack_bits) else: tadata = None return TypeC(name, x_is_event=True, x_definition=self.Part('types/definition/event.part.tpl'), x_format_ms=self.Part('types/format/event_ms.part.tpl'), x_format_sm=self.Part('types/format/event_sm.part.tpl'), x_wrapeport=self.Part('types/wrapeport/event.part.tpl'), x_wrapeconv=self.Part('types/wrapeconv/event.part.tpl'), x_wrapipmap=self.Part('types/wrapipmap/event.part.tpl'), x_wrapigmap=None, x_tlogic=tlogic, x_tsdata=tsdata, x_tadata=tadata, x_cnull=lambda t: Con('{}Null'.format(name), t, value=Lit({'stb': False, 'ack': False}))) Event = _Event()
32.690476
99
0.658412
Inc('dfaccto/util.py', abs=True) class _Event(ModuleContext): def __init__(self): ModuleContext.__init__(self) self._setup_packages() def _setup_packages(self): self.pkg = Pkg('dfaccto_event', x_templates={self.File('generic/package.vhd.tpl'): self.File('pkg/dfaccto_event.vhd')}) with self.pkg: self.tEvent = self.TypeEvent('Event') def TypeEvent(self, name, stb_bits=None, ack_bits=None): tlogic = Util.tlogic if stb_bits is not None: tsdata = Util.TypeUnsigned('{}Strb'.format(name), width=stb_bits) else: tsdata = None if ack_bits is not None: tadata = Util.TypeUnsigned('{}Ack'.format(name), width=ack_bits) else: tadata = None return TypeC(name, x_is_event=True, x_definition=self.Part('types/definition/event.part.tpl'), x_format_ms=self.Part('types/format/event_ms.part.tpl'), x_format_sm=self.Part('types/format/event_sm.part.tpl'), x_wrapeport=self.Part('types/wrapeport/event.part.tpl'), x_wrapeconv=self.Part('types/wrapeconv/event.part.tpl'), x_wrapipmap=self.Part('types/wrapipmap/event.part.tpl'), x_wrapigmap=None, x_tlogic=tlogic, x_tsdata=tsdata, x_tadata=tadata, x_cnull=lambda t: Con('{}Null'.format(name), t, value=Lit({'stb': False, 'ack': False}))) Event = _Event()
true
true
f7033f7131f37c959b46efa3f188e5997f2d6feb
13,433
py
Python
core/smplx/lbs_.py
boycehbz/DMMR
18fcee7ce584fdccfa08bcda883d9b4fcb962c04
[ "MIT" ]
37
2021-12-15T03:13:38.000Z
2022-03-31T23:01:25.000Z
core/smplx/lbs_.py
boycehbz/DMMR
18fcee7ce584fdccfa08bcda883d9b4fcb962c04
[ "MIT" ]
4
2021-12-21T19:19:25.000Z
2022-03-27T07:50:43.000Z
core/smplx/lbs_.py
boycehbz/DMMR
18fcee7ce584fdccfa08bcda883d9b4fcb962c04
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # You can only use this computer program if you have closed # a license agreement with MPG or you get the right to use the computer # program from someone who is authorized to grant you that right. # Any use of the computer program without a valid license is prohibited and # liable to prosecution. # # Copyright©2019 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # Contact: ps-license@tuebingen.mpg.de from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy as np import torch import torch.nn.functional as F from .utils import rot_mat_to_euler def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx, dynamic_lmk_b_coords, neck_kin_chain, dtype=torch.float32): ''' Compute the faces, barycentric coordinates for the dynamic landmarks To do so, we first compute the rotation of the neck around the y-axis and then use a pre-computed look-up table to find the faces and the barycentric coordinates that will be used. Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) for providing the original TensorFlow implementation and for the LUT. Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices pose: torch.tensor Bx(Jx3), dtype = torch.float32 The current pose of the body model dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long The look-up table from neck rotation to faces dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 The look-up table from neck rotation to barycentric coordinates neck_kin_chain: list A python list that contains the indices of the joints that form the kinematic chain of the neck. dtype: torch.dtype, optional Returns ------- dyn_lmk_faces_idx: torch.tensor, dtype = torch.long A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 A tensor of size BxL that contains the indices of the faces that will be used to compute the current dynamic landmarks. ''' batch_size = vertices.shape[0] aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1, neck_kin_chain) rot_mats = batch_rodrigues( aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3) rel_rot_mat = torch.eye(3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0) for idx in range(len(neck_kin_chain)): rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat) y_rot_angle = torch.round( torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi, max=39)).to(dtype=torch.long) neg_mask = y_rot_angle.lt(0).to(dtype=torch.long) mask = y_rot_angle.lt(-39).to(dtype=torch.long) neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle) y_rot_angle = (neg_mask * neg_vals + (1 - neg_mask) * y_rot_angle) dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx, 0, y_rot_angle) dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords, 0, y_rot_angle) return dyn_lmk_faces_idx, dyn_lmk_b_coords def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords): ''' Calculates landmarks by barycentric interpolation Parameters ---------- vertices: torch.tensor BxVx3, dtype = torch.float32 The tensor of input vertices faces: torch.tensor Fx3, dtype = torch.long The faces of the mesh lmk_faces_idx: torch.tensor L, dtype = torch.long The tensor with the indices of the faces used to calculate the landmarks. lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32 The tensor of barycentric coordinates that are used to interpolate the landmarks Returns ------- landmarks: torch.tensor BxLx3, dtype = torch.float32 The coordinates of the landmarks for each mesh in the batch ''' # Extract the indices of the vertices for each face # BxLx3 batch_size, num_verts = vertices.shape[:2] device = vertices.device lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view( batch_size, -1, 3) lmk_faces += torch.arange( batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts lmk_vertices = vertices.view(-1, 3)[lmk_faces].view( batch_size, -1, 3, 3) landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords]) return landmarks def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents, lbs_weights, pose2rot=True, dtype=torch.float32): ''' Performs Linear Blend Skinning with the given shape and pose parameters Parameters ---------- betas : torch.tensor BxNB The tensor of shape parameters pose : torch.tensor Bx(J + 1) * 3 The pose parameters in axis-angle format v_template torch.tensor BxVx3 The template mesh that will be deformed shapedirs : torch.tensor 1xNB The tensor of PCA shape displacements posedirs : torch.tensor Px(V * 3) The pose PCA coefficients J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices parents: torch.tensor J The array that describes the kinematic tree for the model lbs_weights: torch.tensor N x V x (J + 1) The linear blend skinning weights that represent how much the rotation matrix of each part affects each vertex pose2rot: bool, optional Flag on whether to convert the input pose tensor to rotation matrices. The default value is True. If False, then the pose tensor should already contain rotation matrices and have a size of Bx(J + 1)x9 dtype: torch.dtype, optional Returns ------- verts: torch.tensor BxVx3 The vertices of the mesh after applying the shape and pose displacements. joints: torch.tensor BxJx3 The joints of the model ''' batch_size = max(betas.shape[0], pose.shape[0]) device = betas.device # Add shape contribution v_shaped = v_template + blend_shapes(betas, shapedirs) # v_shaped *= scale # Get the joints # NxJx3 array J = vertices2joints(J_regressor, v_shaped) # 3. Add pose blend shapes # N x J x 3 x 3 ident = torch.eye(3, dtype=dtype, device=device) if pose2rot: rot_mats = batch_rodrigues( pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3]) pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1]) # (N x P) x (P, V * 3) -> N x V x 3 pose_offsets = torch.matmul(pose_feature, posedirs) \ .view(batch_size, -1, 3) else: pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident rot_mats = pose.view(batch_size, -1, 3, 3) pose_offsets = torch.matmul(pose_feature.view(batch_size, -1), posedirs).view(batch_size, -1, 3) v_posed = pose_offsets + v_shaped # 4. Get the global joint location J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) # 5. Do skinning: # W is N x V x (J + 1) W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1]) # (N x V x (J + 1)) x (N x (J + 1) x 16) num_joints = J_regressor.shape[0] T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \ .view(batch_size, -1, 4, 4) homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1], dtype=dtype, device=device) v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1)) verts = v_homo[:, :, :3, 0] return verts, J_transformed def vertices2joints(J_regressor, vertices): ''' Calculates the 3D joint locations from the vertices Parameters ---------- J_regressor : torch.tensor JxV The regressor array that is used to calculate the joints from the position of the vertices vertices : torch.tensor BxVx3 The tensor of mesh vertices Returns ------- torch.tensor BxJx3 The location of the joints ''' return torch.einsum('bik,ji->bjk', [vertices, J_regressor]) def blend_shapes(betas, shape_disps): ''' Calculates the per vertex displacement due to the blend shapes Parameters ---------- betas : torch.tensor Bx(num_betas) Blend shape coefficients shape_disps: torch.tensor Vx3x(num_betas) Blend shapes Returns ------- torch.tensor BxVx3 The per-vertex displacement due to shape deformation ''' # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l] # i.e. Multiply each shape displacement by its corresponding beta and # then sum them. blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps]) return blend_shape def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32): ''' Calculates the rotation matrices for a batch of rotation vectors Parameters ---------- rot_vecs: torch.tensor Nx3 array of N axis-angle vectors Returns ------- R: torch.tensor Nx3x3 The rotation matrices for the given axis-angle parameters ''' batch_size = rot_vecs.shape[0] device = rot_vecs.device angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True) rot_dir = rot_vecs / angle cos = torch.unsqueeze(torch.cos(angle), dim=1) sin = torch.unsqueeze(torch.sin(angle), dim=1) # Bx1 arrays rx, ry, rz = torch.split(rot_dir, 1, dim=1) K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device) zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device) K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \ .view((batch_size, 3, 3)) ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0) rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K) return rot_mat def transform_mat(R, t): ''' Creates a batch of transformation matrices Args: - R: Bx3x3 array of a batch of rotation matrices - t: Bx3x1 array of a batch of translation vectors Returns: - T: Bx4x4 Transformation matrix ''' # No padding left or right, only add an extra row return torch.cat([F.pad(R, [0, 0, 0, 1]), F.pad(t, [0, 0, 0, 1], value=1)], dim=2) def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32): """ Applies a batch of rigid transformations to the joints Parameters ---------- rot_mats : torch.tensor BxNx3x3 Tensor of rotation matrices joints : torch.tensor BxNx3 Locations of joints parents : torch.tensor BxN The kinematic tree of each object dtype : torch.dtype, optional: The data type of the created tensors, the default is torch.float32 Returns ------- posed_joints : torch.tensor BxNx3 The locations of the joints after applying the pose rotations rel_transforms : torch.tensor BxNx4x4 The relative (with respect to the root joint) rigid transformations for all the joints """ joints = torch.unsqueeze(joints, dim=-1) rel_joints = joints.clone() rel_joints[:, 1:] -= joints[:, parents[1:]] transforms_mat = transform_mat( rot_mats.reshape(-1, 3, 3), rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4) # transforms_mat[:, 0][:,:3,:3] *= scale transform_chain = [transforms_mat[:, 0]] for i in range(1, parents.shape[0]): # Subtract the joint location at the rest pose # No need for rotation, since it's identity when at rest curr_res = torch.matmul(transform_chain[parents[i]], transforms_mat[:, i]) transform_chain.append(curr_res) transforms = torch.stack(transform_chain, dim=1) # The last column of the transformations contains the posed joints posed_joints = transforms[:, :, :3, 3] # The last column of the transformations contains the posed joints posed_joints = transforms[:, :, :3, 3] joints_homogen = F.pad(joints, [0, 0, 0, 1]) rel_transforms = transforms - F.pad( torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0]) return posed_joints, rel_transforms
36.207547
79
0.633812
from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy as np import torch import torch.nn.functional as F from .utils import rot_mat_to_euler def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx, dynamic_lmk_b_coords, neck_kin_chain, dtype=torch.float32): batch_size = vertices.shape[0] aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1, neck_kin_chain) rot_mats = batch_rodrigues( aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3) rel_rot_mat = torch.eye(3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0) for idx in range(len(neck_kin_chain)): rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat) y_rot_angle = torch.round( torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi, max=39)).to(dtype=torch.long) neg_mask = y_rot_angle.lt(0).to(dtype=torch.long) mask = y_rot_angle.lt(-39).to(dtype=torch.long) neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle) y_rot_angle = (neg_mask * neg_vals + (1 - neg_mask) * y_rot_angle) dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx, 0, y_rot_angle) dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords, 0, y_rot_angle) return dyn_lmk_faces_idx, dyn_lmk_b_coords def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords): batch_size, num_verts = vertices.shape[:2] device = vertices.device lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view( batch_size, -1, 3) lmk_faces += torch.arange( batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts lmk_vertices = vertices.view(-1, 3)[lmk_faces].view( batch_size, -1, 3, 3) landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords]) return landmarks def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents, lbs_weights, pose2rot=True, dtype=torch.float32): batch_size = max(betas.shape[0], pose.shape[0]) device = betas.device v_shaped = v_template + blend_shapes(betas, shapedirs) J = vertices2joints(J_regressor, v_shaped) ident = torch.eye(3, dtype=dtype, device=device) if pose2rot: rot_mats = batch_rodrigues( pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3]) pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1]) pose_offsets = torch.matmul(pose_feature, posedirs) \ .view(batch_size, -1, 3) else: pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident rot_mats = pose.view(batch_size, -1, 3, 3) pose_offsets = torch.matmul(pose_feature.view(batch_size, -1), posedirs).view(batch_size, -1, 3) v_posed = pose_offsets + v_shaped J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1]) num_joints = J_regressor.shape[0] T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \ .view(batch_size, -1, 4, 4) homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1], dtype=dtype, device=device) v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1)) verts = v_homo[:, :, :3, 0] return verts, J_transformed def vertices2joints(J_regressor, vertices): return torch.einsum('bik,ji->bjk', [vertices, J_regressor]) def blend_shapes(betas, shape_disps): blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps]) return blend_shape def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32): batch_size = rot_vecs.shape[0] device = rot_vecs.device angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True) rot_dir = rot_vecs / angle cos = torch.unsqueeze(torch.cos(angle), dim=1) sin = torch.unsqueeze(torch.sin(angle), dim=1) rx, ry, rz = torch.split(rot_dir, 1, dim=1) K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device) zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device) K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \ .view((batch_size, 3, 3)) ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0) rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K) return rot_mat def transform_mat(R, t): return torch.cat([F.pad(R, [0, 0, 0, 1]), F.pad(t, [0, 0, 0, 1], value=1)], dim=2) def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32): joints = torch.unsqueeze(joints, dim=-1) rel_joints = joints.clone() rel_joints[:, 1:] -= joints[:, parents[1:]] transforms_mat = transform_mat( rot_mats.reshape(-1, 3, 3), rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4) transform_chain = [transforms_mat[:, 0]] for i in range(1, parents.shape[0]): curr_res = torch.matmul(transform_chain[parents[i]], transforms_mat[:, i]) transform_chain.append(curr_res) transforms = torch.stack(transform_chain, dim=1) # The last column of the transformations contains the posed joints posed_joints = transforms[:, :, :3, 3] # The last column of the transformations contains the posed joints posed_joints = transforms[:, :, :3, 3] joints_homogen = F.pad(joints, [0, 0, 0, 1]) rel_transforms = transforms - F.pad( torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0]) return posed_joints, rel_transforms
true
true
f7033f9a0566aab4239b64a1fc7d18e9989750b5
8,310
py
Python
foxbmsflashtool/stm32flasher.py
foxBMS/foxBMS-flashtool
1919f9742158938367d723ff25d0dc98cb13cfae
[ "CC-BY-4.0" ]
null
null
null
foxbmsflashtool/stm32flasher.py
foxBMS/foxBMS-flashtool
1919f9742158938367d723ff25d0dc98cb13cfae
[ "CC-BY-4.0" ]
null
null
null
foxbmsflashtool/stm32flasher.py
foxBMS/foxBMS-flashtool
1919f9742158938367d723ff25d0dc98cb13cfae
[ "CC-BY-4.0" ]
3
2017-08-30T05:07:16.000Z
2021-06-15T08:13:56.000Z
""" foxBMS Software License Copyright 2010-2016, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V. All rights reserved. BSD 3-Clause License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. We kindly request you to use one or more of the following phrases to refer to foxBMS in your hardware, software, documentation or advertising materials: "This product uses parts of foxBMS" "This product includes parts of foxBMS" "This product is derived from foxBMS" If you use foxBMS in your products, we encourage you to contact us at: CONTACT INFORMATION Fraunhofer IISB ; Schottkystrasse 10 ; 91058 Erlangen, Germany Dr.-Ing. Vincent LORENTZ +49 9131-761-346 info@foxbms.org www.foxbms.org :author: Martin Giegerich <martin.giegerich@iisb.fraunhofer.de> """ import stm32interface import argparse import sys import logging """ flash tool implementation to the STM32F4 microcontroller - for detailed insight to the USART protocol refer to STM32 appnote AN3155 - for detailed insight to the device bootloader" refer to STM32 appnote AN2606 """ class STM32Flasher(stm32interface.STM32Interface): def __init__(self, port = None, file = None, baudrate=115200, address = 0x08000000, goaddress = -1, bytes = 256,**kwargs): stm32interface.STM32Interface.__init__(self, port, baudrate) self._file = file self.bytes = bytes self.address = address self._doBeforeInit() self.init() def _doBeforeInit(self): ''' abstract method to optionally reset microcontroller or toggle boot pins ''' pass def __enter__(self): return self def read(self): data = [] length = self.bytes address = self.address logging.debug("Flash Read Start, Length: {0}, Address: {1:#x} ".format(length, address)) while length > 256: logging.debug("Read {0} bytes at {1:#x}".format(256, address)) data = data + self.readMemory(address, 256) address += 256 length -= 256 logging.info("[{0}/{1}] read ".format(self.bytes-length, self.bytes)) logging.debug("Read {0} bytes at {1:#x}".format(length, address)) data = data + self.readMemory(address, length) logging.info("[{0}/{1}] read".format(self.bytes, self.bytes)) return data def write(self, data): logging.debug("Flash Write Start") length = len(data) alllng = len(data) address = self.address offset = 0 while length > 256: logging.debug("Write {0} bytes at {1:#x}".format(256, address)) self.writeMemory(address, data[offset:offset+256]) offset += 256 address += 256 length -= 256 logging.info("[{0}/{1}] written".format(alllng-length, alllng)) logging.debug("Write {0} bytes at {1:#x}".format(length, address)) self.writeMemory(address, data[offset:offset+length] ) logging.info("[{0}/{1}] written".format(alllng, alllng)) #logging.info("Flash Write End") def erase(self): logging.info("Flash Erase Start") super(STM32Flasher, self).erase() logging.info("Flash Erase End") def verify(self, data): logging.info("Flash verify") self.bytes = len(data) verify = self.read() if data == verify: logging.info("Verify successful") return True else: self.veriFail = str(len(data)) + ' vs ' + str(len(verify)) + '\n' for i in xrange(0, len(data)): if data[i] != verify[i]: self.veriFail += hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i]) + '\n' logging.error(self.veriFail) return False def __str__(self): id = self.getId()[1:3] # id without length byte and ack byte version = self.getVersion() return "ID: %s Bootloader version: %x" % (hex(reduce(lambda x, y: x*0x100+y, id)), version[0]) def auto_int(x): return int(x, 0) def main(): parser = argparse.ArgumentParser(description='STM32 flash tool', formatter_class=argparse.RawDescriptionHelpFormatter, epilog = '''\ Example: %s --port COM3 --erase --write --verify build/src/general/foxbms_flash.bin Copyright (c) 2015, 2016 Fraunhofer IISB. All rights reserved. This program has been released under the conditions of the 3-clause BSD license. ''' % sys.argv[0]) parser.add_argument('-v', '--verbosity', action='count', default=0, help="increase output verbosity") parser.add_argument('--erase', '-e', action='store_true', help='erase firmware') parser.add_argument('--read', '-r', action='store_true', help='read and store firmware') parser.add_argument('--write', '-w', action='store_true', help='writes firmware') parser.add_argument('--verify', '-y', action='store_true', help='verify the firmware') parser.add_argument('--bytes', '-s', type=int, default = 256, help='bytes to read from the firmware') parser.add_argument('--bauds', '-b', type=int, default=115200, help='transfer speed (bauds)') parser.add_argument('--port', '-p', type=str, default='/dev/tty.usbserial-ftCYPMYJ', help='ttyUSB port') parser.add_argument('--address', '-a', type=auto_int, default=0x08000000, help='target address') parser.add_argument('--goaddress', '-g', type=auto_int, default=-1, help='start address (use -1 for default)') parser.add_argument('firmware', metavar = 'FIRMWARE FILE', help='firmware binary') args = parser.parse_args() if args.verbosity == 1: logging.basicConfig(level = logging.INFO) elif args.verbosity > 1: logging.basicConfig(level = logging.DEBUG) else: logging.basicConfig(level = logging.ERROR) if args.read: if args.erase: parser.error('Cannot use --erase together with --read') if args.write: parser.error('Cannot use --write together with --read') if args.bytes == None: parser.error('Please give a length (in bytes) to read') with STM32Flasher(**vars(args)) as flasher: if args.write or args.verify: with open(args.firmware, 'rb') as f: data = map(lambda c: ord(c), f.read()) if args.erase: flasher.erase() if args.write: flasher.write(data) if args.verify: flasher.verify(data) if args.read: rdata = flasher.read() with open(args.firmware, 'wb') as f: f.write(''.join(map(chr,rdata))) if args.goaddress > -1: flasher.go(args.goaddress) if __name__ == "__main__": main()
38.472222
114
0.644404
import stm32interface import argparse import sys import logging class STM32Flasher(stm32interface.STM32Interface): def __init__(self, port = None, file = None, baudrate=115200, address = 0x08000000, goaddress = -1, bytes = 256,**kwargs): stm32interface.STM32Interface.__init__(self, port, baudrate) self._file = file self.bytes = bytes self.address = address self._doBeforeInit() self.init() def _doBeforeInit(self): pass def __enter__(self): return self def read(self): data = [] length = self.bytes address = self.address logging.debug("Flash Read Start, Length: {0}, Address: {1:#x} ".format(length, address)) while length > 256: logging.debug("Read {0} bytes at {1:#x}".format(256, address)) data = data + self.readMemory(address, 256) address += 256 length -= 256 logging.info("[{0}/{1}] read ".format(self.bytes-length, self.bytes)) logging.debug("Read {0} bytes at {1:#x}".format(length, address)) data = data + self.readMemory(address, length) logging.info("[{0}/{1}] read".format(self.bytes, self.bytes)) return data def write(self, data): logging.debug("Flash Write Start") length = len(data) alllng = len(data) address = self.address offset = 0 while length > 256: logging.debug("Write {0} bytes at {1:#x}".format(256, address)) self.writeMemory(address, data[offset:offset+256]) offset += 256 address += 256 length -= 256 logging.info("[{0}/{1}] written".format(alllng-length, alllng)) logging.debug("Write {0} bytes at {1:#x}".format(length, address)) self.writeMemory(address, data[offset:offset+length] ) logging.info("[{0}/{1}] written".format(alllng, alllng)) def erase(self): logging.info("Flash Erase Start") super(STM32Flasher, self).erase() logging.info("Flash Erase End") def verify(self, data): logging.info("Flash verify") self.bytes = len(data) verify = self.read() if data == verify: logging.info("Verify successful") return True else: self.veriFail = str(len(data)) + ' vs ' + str(len(verify)) + '\n' for i in xrange(0, len(data)): if data[i] != verify[i]: self.veriFail += hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i]) + '\n' logging.error(self.veriFail) return False def __str__(self): id = self.getId()[1:3] version = self.getVersion() return "ID: %s Bootloader version: %x" % (hex(reduce(lambda x, y: x*0x100+y, id)), version[0]) def auto_int(x): return int(x, 0) def main(): parser = argparse.ArgumentParser(description='STM32 flash tool', formatter_class=argparse.RawDescriptionHelpFormatter, epilog = '''\ Example: %s --port COM3 --erase --write --verify build/src/general/foxbms_flash.bin Copyright (c) 2015, 2016 Fraunhofer IISB. All rights reserved. This program has been released under the conditions of the 3-clause BSD license. ''' % sys.argv[0]) parser.add_argument('-v', '--verbosity', action='count', default=0, help="increase output verbosity") parser.add_argument('--erase', '-e', action='store_true', help='erase firmware') parser.add_argument('--read', '-r', action='store_true', help='read and store firmware') parser.add_argument('--write', '-w', action='store_true', help='writes firmware') parser.add_argument('--verify', '-y', action='store_true', help='verify the firmware') parser.add_argument('--bytes', '-s', type=int, default = 256, help='bytes to read from the firmware') parser.add_argument('--bauds', '-b', type=int, default=115200, help='transfer speed (bauds)') parser.add_argument('--port', '-p', type=str, default='/dev/tty.usbserial-ftCYPMYJ', help='ttyUSB port') parser.add_argument('--address', '-a', type=auto_int, default=0x08000000, help='target address') parser.add_argument('--goaddress', '-g', type=auto_int, default=-1, help='start address (use -1 for default)') parser.add_argument('firmware', metavar = 'FIRMWARE FILE', help='firmware binary') args = parser.parse_args() if args.verbosity == 1: logging.basicConfig(level = logging.INFO) elif args.verbosity > 1: logging.basicConfig(level = logging.DEBUG) else: logging.basicConfig(level = logging.ERROR) if args.read: if args.erase: parser.error('Cannot use --erase together with --read') if args.write: parser.error('Cannot use --write together with --read') if args.bytes == None: parser.error('Please give a length (in bytes) to read') with STM32Flasher(**vars(args)) as flasher: if args.write or args.verify: with open(args.firmware, 'rb') as f: data = map(lambda c: ord(c), f.read()) if args.erase: flasher.erase() if args.write: flasher.write(data) if args.verify: flasher.verify(data) if args.read: rdata = flasher.read() with open(args.firmware, 'wb') as f: f.write(''.join(map(chr,rdata))) if args.goaddress > -1: flasher.go(args.goaddress) if __name__ == "__main__": main()
true
true
f7033fb7efae86dad225efa7b01c333e805ff430
3,804
py
Python
covsirphy/loading/db_owid.py
ardhanii/covid19-sir
87881963c49a2fc5b6235c8b21269d216acaa941
[ "Apache-2.0" ]
97
2020-05-15T15:20:15.000Z
2022-03-18T02:55:54.000Z
covsirphy/loading/db_owid.py
ardhanii/covid19-sir
87881963c49a2fc5b6235c8b21269d216acaa941
[ "Apache-2.0" ]
970
2020-06-01T13:48:34.000Z
2022-03-29T08:20:49.000Z
covsirphy/loading/db_owid.py
ardhani31/Covid19-SIRV-v3
59d95156b375c41259c46ce4e656b86903f92ec2
[ "Apache-2.0" ]
36
2020-05-15T15:36:43.000Z
2022-02-25T17:59:08.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import country_converter as coco import pandas as pd from covsirphy.util.term import Term from covsirphy.loading.db_base import _RemoteDatabase class _OWID(_RemoteDatabase): """ Access "Our World In Data". https://github.com/owid/covid-19-data/tree/master/public/data https://ourworldindata.org/coronavirus Args: filename (str): CSV filename to save records """ # URL for vaccine data URL_V = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/" URL_V_REC = f"{URL_V}vaccinations.csv" URL_V_LOC = f"{URL_V}locations.csv" # URL for PCR data URL_P = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/" URL_P_REC = f"{URL_P}covid-testing-all-observations.csv" # Citation CITATION = "Hasell, J., Mathieu, E., Beltekian, D. et al." \ " A cross-country database of COVID-19 testing. Sci Data 7, 345 (2020)." \ " https://doi.org/10.1038/s41597-020-00688-8" # Column names and data types # {"name in database": "name defined in Term class"} COL_DICT = { "date": Term.DATE, "location": Term.COUNTRY, Term.PROVINCE: Term.PROVINCE, "iso_code": Term.ISO3, "vaccines": Term.PRODUCT, "total_vaccinations": Term.VAC, "people_vaccinated": Term.V_ONCE, "people_fully_vaccinated": Term.V_FULL, "tests": Term.TESTS, } def download(self, verbose): """ Download the dataset from the server and set the list of primary sources. Args: verbose (int): level of verbosity Returns: pandas.DataFrame Index reset index Columns defined by the first values of self.COL_DICT.values() Note: If @verbose is equal to or over 1, how to show the list will be explained. """ # Download datasets if verbose: print("Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/") # Vaccinations v_rec_cols = [ "date", "location", "iso_code", "total_vaccinations", "people_vaccinated", "people_fully_vaccinated"] v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols) v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=["location", "vaccines"]) v_df = v_rec_df.merge(v_loc_df, how="left", on="location") # Tests pcr_rec_cols = ["ISO code", "Date", "Daily change in cumulative total", "Cumulative total"] pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols) pcr_df = pcr_df.rename(columns={"ISO code": "iso_code", "Date": "date"}) pcr_df["cumsum"] = pcr_df.groupby("iso_code")["Daily change in cumulative total"].cumsum() pcr_df = pcr_df.assign(tests=lambda x: x["Cumulative total"].fillna(x["cumsum"])) # Combine data (vaccinations/tests) df = v_df.set_index(["iso_code", "date"]) df = df.combine_first(pcr_df.set_index(["iso_code", "date"]).loc[:, ["tests"]]) df = df.reset_index() # Location (country/province) df["location"] = df["location"].replace( { # COG "Congo": "Republic of the Congo", } ) df = df.loc[~df["iso_code"].str.contains("OWID_")] df["location"] = df.groupby("iso_code")["location"].bfill() df.loc[df["location"] == df["iso_code"], "location"] = None df.loc[df["location"].isna(), "location"] = df.loc[df["location"].isna(), "iso_code"].apply( lambda x: coco.convert(x, to="name_short", not_found=None)) df[self.PROVINCE] = self.UNKNOWN return df
40.468085
113
0.608307
import country_converter as coco import pandas as pd from covsirphy.util.term import Term from covsirphy.loading.db_base import _RemoteDatabase class _OWID(_RemoteDatabase): URL_V = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/" URL_V_REC = f"{URL_V}vaccinations.csv" URL_V_LOC = f"{URL_V}locations.csv" URL_P = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/" URL_P_REC = f"{URL_P}covid-testing-all-observations.csv" CITATION = "Hasell, J., Mathieu, E., Beltekian, D. et al." \ " A cross-country database of COVID-19 testing. Sci Data 7, 345 (2020)." \ " https://doi.org/10.1038/s41597-020-00688-8" COL_DICT = { "date": Term.DATE, "location": Term.COUNTRY, Term.PROVINCE: Term.PROVINCE, "iso_code": Term.ISO3, "vaccines": Term.PRODUCT, "total_vaccinations": Term.VAC, "people_vaccinated": Term.V_ONCE, "people_fully_vaccinated": Term.V_FULL, "tests": Term.TESTS, } def download(self, verbose): if verbose: print("Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/") v_rec_cols = [ "date", "location", "iso_code", "total_vaccinations", "people_vaccinated", "people_fully_vaccinated"] v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols) v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=["location", "vaccines"]) v_df = v_rec_df.merge(v_loc_df, how="left", on="location") pcr_rec_cols = ["ISO code", "Date", "Daily change in cumulative total", "Cumulative total"] pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols) pcr_df = pcr_df.rename(columns={"ISO code": "iso_code", "Date": "date"}) pcr_df["cumsum"] = pcr_df.groupby("iso_code")["Daily change in cumulative total"].cumsum() pcr_df = pcr_df.assign(tests=lambda x: x["Cumulative total"].fillna(x["cumsum"])) df = v_df.set_index(["iso_code", "date"]) df = df.combine_first(pcr_df.set_index(["iso_code", "date"]).loc[:, ["tests"]]) df = df.reset_index() df["location"] = df["location"].replace( { "Congo": "Republic of the Congo", } ) df = df.loc[~df["iso_code"].str.contains("OWID_")] df["location"] = df.groupby("iso_code")["location"].bfill() df.loc[df["location"] == df["iso_code"], "location"] = None df.loc[df["location"].isna(), "location"] = df.loc[df["location"].isna(), "iso_code"].apply( lambda x: coco.convert(x, to="name_short", not_found=None)) df[self.PROVINCE] = self.UNKNOWN return df
true
true
f7033ff4f37a0b03e3ac28f1687f598056116852
1,262
py
Python
cli/print_command.py
nicogno/biodynamo
f875994d4ea9aa07f938283719d5db83e450bfb8
[ "Apache-2.0" ]
null
null
null
cli/print_command.py
nicogno/biodynamo
f875994d4ea9aa07f938283719d5db83e450bfb8
[ "Apache-2.0" ]
null
null
null
cli/print_command.py
nicogno/biodynamo
f875994d4ea9aa07f938283719d5db83e450bfb8
[ "Apache-2.0" ]
null
null
null
# ----------------------------------------------------------------------------- # # Copyright (C) 2021 CERN & Newcastle University for the benefit of the # BioDynaMo collaboration. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # # See the LICENSE file distributed with this work for details. # See the NOTICE file distributed with this work for additional information # regarding copyright ownership. # # ----------------------------------------------------------------------------- class Print: PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' @staticmethod def success(message): print(Print.BOLD + Print.GREEN + str(message) + Print.END) @staticmethod def error(message): print(Print.RED + str(message) + Print.END) @staticmethod def warning(message): print(Print.YELLOW + str(message) + Print.END) @staticmethod def new_step(message): print('\n' + Print.BOLD + Print.BLUE + str(message) + Print.END)
30.047619
79
0.568146
class Print: PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' @staticmethod def success(message): print(Print.BOLD + Print.GREEN + str(message) + Print.END) @staticmethod def error(message): print(Print.RED + str(message) + Print.END) @staticmethod def warning(message): print(Print.YELLOW + str(message) + Print.END) @staticmethod def new_step(message): print('\n' + Print.BOLD + Print.BLUE + str(message) + Print.END)
true
true
f70342d9e8c689b06d4e580e179acf489a73a9f6
86,371
py
Python
flexget/tests/test_series.py
vxcamiloxv/Flexget
f18e53b59b768515d8e67464b8cc41bddfc00c33
[ "MIT" ]
null
null
null
flexget/tests/test_series.py
vxcamiloxv/Flexget
f18e53b59b768515d8e67464b8cc41bddfc00c33
[ "MIT" ]
null
null
null
flexget/tests/test_series.py
vxcamiloxv/Flexget
f18e53b59b768515d8e67464b8cc41bddfc00c33
[ "MIT" ]
null
null
null
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin from io import StringIO import pytest from jinja2 import Template from flexget.entry import Entry from flexget.logger import capture_output from flexget.manager import get_parser, Session from flexget.task import TaskAbort from flexget.components.series import db def age_series(**kwargs): import datetime session = Session() session.query(db.EpisodeRelease).update({'first_seen': datetime.datetime.now() - datetime.timedelta(**kwargs)}) session.commit() @pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True) def config(request): """Override and parametrize default config fixture for all series tests.""" newconfig = Template(request.cls.config).render({'parser': request.param}) # Make sure we remembered to put the section in config assert request.cls.config != newconfig, 'config parameterization did nothing?' return newconfig class TestQuality(object): config = """ templates: global: parsing: series: {{parser}} tasks: exact_quality: mock: - {title: 'QTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'QTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'QTest.S01E01.DSR.XViD-FlexGet'} - {title: 'QTest.S01E01.1080p.XViD-FlexGet'} - {title: 'QTest.S01E01.720p.XViD-FlexGet'} series: - QTest: quality: 720p quality_fail: mock: - {title: 'Q2Test.S01E01.HDTV.XViD-FlexGet'} - {title: 'Q2Test.S01E01.PDTV.XViD-FlexGet'} - {title: 'Q2Test.S01E01.DSR.XViD-FlexGet'} series: - Q2Test: quality: 720p min_quality: mock: - {title: 'MinQTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'MinQTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'MinQTest.S01E01.DSR.XViD-FlexGet'} - {title: 'MinQTest.S01E01.1080p.XViD-FlexGet'} - {title: 'MinQTest.S01E01.720p.XViD-FlexGet'} series: - MinQTest: quality: ">720p" max_quality: mock: - {title: 'MaxQTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.DSR.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.1080p.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.720p.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.720p.bluray-FlexGet'} series: - MaxQTest: quality: "<720p <=HDTV" min_max_quality: mock: - {title: 'MinMaxQTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.DSR.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.720p.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.HR.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.1080p.XViD-FlexGet'} series: - MinMaxQTest: quality: 480p-hr max_unknown_quality: mock: - {title: 'MaxUnknownQTest.S01E01.XViD-FlexGet'} series: - MaxUnknownQTest: quality: "<=hdtv" quality_from_group: mock: - {title: 'GroupQual.S01E01.HDTV.XViD-FlexGet'} - {title: 'GroupQual.S01E01.PDTV.XViD-FlexGet'} - {title: 'GroupQual.S01E01.DSR.XViD-FlexGet'} - {title: 'GroupQual.S01E01.1080p.XViD-FlexGet'} - {title: 'GroupQual.S01E01.720p.XViD-FlexGet'} - {title: 'Other.S01E01.hdtv.dd5.1.XViD-FlexGet'} - {title: 'Other.S01E01.720p.hdtv.XViD-FlexGet'} series: 720P: - GroupQual # Test that an integer group name doesn't cause an exception. 1080: - Test hdtv <hr !dd5.1: - Other quality_in_series_name: mock: - title: my 720p show S01E01 - title: my 720p show S01E02 720p series: - my 720p show: quality: '<720p' """ def test_exact_quality(self, execute_task): """Series plugin: choose by quality""" task = execute_task('exact_quality') assert task.find_entry('accepted', title='QTest.S01E01.720p.XViD-FlexGet'), \ '720p should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_quality_fail(self, execute_task): task = execute_task('quality_fail') assert not task.accepted, 'No qualities should have matched' def test_min_quality(self, execute_task): """Series plugin: min_quality""" task = execute_task('min_quality') assert task.find_entry('accepted', title='MinQTest.S01E01.1080p.XViD-FlexGet'), \ 'MinQTest.S01E01.1080p.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_max_quality(self, execute_task): """Series plugin: max_quality""" task = execute_task('max_quality') assert task.find_entry('accepted', title='MaxQTest.S01E01.HDTV.XViD-FlexGet'), \ 'MaxQTest.S01E01.HDTV.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_min_max_quality(self, execute_task): """Series plugin: min_quality with max_quality""" task = execute_task('min_max_quality') assert task.find_entry('accepted', title='MinMaxQTest.S01E01.HR.XViD-FlexGet'), \ 'MinMaxQTest.S01E01.HR.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_max_unknown_quality(self, execute_task): """Series plugin: max quality with unknown quality""" task = execute_task('max_unknown_quality') assert len(task.accepted) == 1, 'should have accepted' def test_group_quality(self, execute_task): """Series plugin: quality from group name""" task = execute_task('quality_from_group') assert task.find_entry('accepted', title='GroupQual.S01E01.720p.XViD-FlexGet'), \ 'GroupQual.S01E01.720p.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one (no entries should pass for series `other`' def test_quality_in_series_name(self, execute_task): """Make sure quality in title does not get parsed as quality""" task = execute_task('quality_in_series_name') assert task.find_entry('accepted', title='my 720p show S01E01'), \ 'quality in title should not have been parsed' assert len(task.accepted) == 1, 'should not have accepted 720p entry' class TestDatabase(object): config = """ templates: global: parsing: series: {{parser}} series: - some series - progress tasks: test_1: mock: - {title: 'Some.Series.S01E20.720p.XViD-FlexGet'} test_2: mock: - {title: 'Some.Series.S01E20.720p.XViD-DoppelGanger'} progress_1: mock: - {title: 'Progress.S01E20.720p-FlexGet'} - {title: 'Progress.S01E20.HDTV-FlexGet'} progress_2: mock: - {title: 'Progress.S01E20.720p.Another-FlexGet'} - {title: 'Progress.S01E20.HDTV-Another-FlexGet'} """ def test_database(self, execute_task): """Series plugin: simple database""" task = execute_task('test_1') task = execute_task('test_2') assert task.find_entry('rejected', title='Some.Series.S01E20.720p.XViD-DoppelGanger'), \ 'failed basic download remembering' def test_doppelgangers(self, execute_task): """Series plugin: doppelganger releases (dupes)""" task = execute_task('progress_1') assert task.find_entry('accepted', title='Progress.S01E20.720p-FlexGet'), \ 'best quality not accepted' # should not accept anything task = execute_task('progress_1') assert not task.accepted, 'repeated execution accepted' # introduce new doppelgangers task = execute_task('progress_2') assert not task.accepted, 'doppelgangers accepted' class TestFilterSeries(object): config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: 'Some.Series.S01E20.720p.XViD-FlexGet'} - {title: 'Another.Series.S01E20.720p.XViD-FlexGet'} - {title: 'Another.Series.S01E21.1080p.H264-FlexGet'} - {title: 'Date.Series.10-11-2008.XViD'} - {title: 'Date.Series.10.12.2008.XViD'} - {title: 'Date.Series.2008-10-13.XViD'} - {title: 'Date.Series.10.14.09.XViD'} - {title: 'Date Series 2010 11 17 XViD'} - {title: 'Useless title', filename: 'Filename.Series.S01E26.XViD'} - {title: 'Empty.Description.S01E22.XViD', description: ''} # test chaining regexp: reject: - 1080p series: - another series - date series - filename series - empty description - (some) series metainfo_series_override: metainfo_series: yes mock: - {title: 'Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet'} - {title: 'Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet'} series: - Test Series test_all_series_mode: mock: - {title: 'Test.Series.S01E02.PDTV.XViD-FlexGet'} - {title: 'Test Series - 1x03 - PDTV XViD-FlexGet'} - {title: 'Other.Show.S02E01.PDTV.XViD-FlexGet'} - {title: 'other show season 2 episode 2'} - {title: 'Date.Show.03-29-2012.HDTV.XViD-FlexGet'} all_series: yes test_alternate_name: mock: - title: The.Show.S01E01 - title: Other.Name.S01E02 - title: many.names.S01E01 - title: name.1.S01E02 - title: name.2.S01E03 - title: paren.title.2013.S01E01 series: - The Show: alternate_name: Other Name - many names: alternate_name: - name 1 - name 2 - paren title (US): alternate_name: paren title 2013 test_input_order_preserved: series: - Some Show """ def test_smoke(self, execute_task): """Series plugin: test several standard features""" task = execute_task('test') # normal passing assert task.find_entry(title='Another.Series.S01E20.720p.XViD-FlexGet'), \ 'Another.Series.S01E20.720p.XViD-FlexGet should have passed' # series with brackets assert task.find_entry('accepted', title='Some.Series.S01E20.720p.XViD-FlexGet'), \ 'Some.Series.S01E20.720p.XViD-FlexGet should have been accepted' # date formats df = ['Date.Series.10-11-2008.XViD', 'Date.Series.10.12.2008.XViD', 'Date Series 2010 11 17 XViD', 'Date.Series.2008-10-13.XViD', 'Date.Series.10.14.09.XViD'] for d in df: entry = task.find_entry(title=d) assert entry, 'Date format did not match %s' % d assert 'series_parser' in entry, 'series_parser missing from %s' % d assert entry['series_parser'].id_type == 'date', '%s did not return three groups for dates' % d # parse from filename assert task.find_entry(filename='Filename.Series.S01E26.XViD'), 'Filename parsing failed' # empty description assert task.find_entry(title='Empty.Description.S01E22.XViD'), 'Empty Description failed' # chaining with regexp plugin assert task.find_entry('rejected', title='Another.Series.S01E21.1080p.H264-FlexGet'), \ 'regexp chaining' def test_metainfo_series_override(self, execute_task): """Series plugin: override metainfo_series""" task = execute_task('metainfo_series_override') # Make sure the metainfo_series plugin is working first entry = task.find_entry('entries', title='Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet') assert entry['series_guessed'], 'series should have been guessed' assert entry['series_name'] == entry['series_parser'].name == 'Other Show With Extra Crap', \ 'metainfo_series is not running' # Make sure the good series data overrode metainfo data for the listed series entry = task.find_entry('accepted', title='Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet') assert not entry.get('series_guessed'), 'series plugin should override series_guessed' assert entry['series_name'] == entry['series_parser'].name == 'Test Series', \ 'Series name should be \'Test Series\', was: entry: %s, parser: %s' % ( entry['series_name'], entry['series_parser'].name) def test_all_series_mode(self, execute_task): """Series plugin: test all option""" task = execute_task('test_all_series_mode') assert task.find_entry('accepted', title='Test.Series.S01E02.PDTV.XViD-FlexGet') task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet') entry = task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet') assert entry assert entry.get('series_name') == 'Test Series' entry = task.find_entry('accepted', title='Other.Show.S02E01.PDTV.XViD-FlexGet') assert entry.get('series_guessed') entry2 = task.find_entry('accepted', title='other show season 2 episode 2') # Make sure case is normalized so series are marked with the same name no matter the case in the title assert entry.get('series_name') == entry2.get( 'series_name') == 'Other Show', 'Series names should be in title case' entry = task.find_entry('accepted', title='Date.Show.03-29-2012.HDTV.XViD-FlexGet') assert entry.get('series_guessed') assert entry.get('series_name') == 'Date Show' def test_alternate_name(self, execute_task): task = execute_task('test_alternate_name') assert all(e.accepted for e in task.all_entries), 'All releases should have matched a show' @pytest.mark.parametrize('reverse', [False, True]) def test_input_order_preserved(self, manager, execute_task, reverse): """If multiple versions of an episode are acceptable, make sure the first one is accepted.""" entries = [ Entry(title='Some Show S01E01 720p proper', url='http://a'), Entry(title='Some Show S01E01 1080p', url='http://b') ] if reverse: entries.reverse() task = execute_task('test_input_order_preserved', options={'inject': entries}) assert task.accepted[0] == entries[0], 'first entry should have been accepted' class TestEpisodeAdvancement(object): config = """ templates: global: parsing: series: {{parser}} tasks: test_backwards_1: mock: - {title: 'backwards s02e12'} - {title: 'backwards s02e10'} series: - backwards test_backwards_2: mock: - {title: 'backwards s02e01'} series: - backwards test_backwards_3: mock: - {title: 'backwards s01e01'} series: - backwards test_backwards_okay_1: mock: - {title: 'backwards s01e02'} series: - backwards: tracking: backfill test_backwards_okay_2: mock: - {title: 'backwards s01e03'} series: - backwards: tracking: no test_forwards_1: mock: - {title: 'forwards s01e01'} series: - forwards test_forwards_2: mock: - {title: 'forwards s02e01'} series: - forwards test_forwards_3: mock: - {title: 'forwards s03e01'} series: - forwards test_forwards_4: mock: - {title: 'forwards s04e02'} series: - forwards test_forwards_5: mock: - {title: 'forwards s05e01'} series: - forwards test_forwards_okay_1: mock: - {title: 'forwards s05e01'} series: - forwards: tracking: no test_unordered: mock: - {title: 'zzz s01e05'} - {title: 'zzz s01e06'} - {title: 'zzz s01e07'} - {title: 'zzz s01e08'} - {title: 'zzz s01e09'} - {title: 'zzz s01e10'} - {title: 'zzz s01e15'} - {title: 'zzz s01e14'} - {title: 'zzz s01e13'} - {title: 'zzz s01e12'} - {title: 'zzz s01e11'} - {title: 'zzz s01e01'} series: - zzz test_seq1: mock: - title: seq 05 series: - seq test_seq2: mock: - title: seq 06 series: - seq test_seq3: mock: - title: seq 10 series: - seq test_seq4: mock: - title: seq 01 series: - seq """ def test_backwards(self, execute_task): """Series plugin: episode advancement (backwards)""" task = execute_task('test_backwards_1') assert task.find_entry('accepted', title='backwards s02e12'), \ 'backwards s02e12 should have been accepted' assert task.find_entry('accepted', title='backwards s02e10'), \ 'backwards s02e10 should have been accepted within grace margin' task = execute_task('test_backwards_2') assert task.find_entry('accepted', title='backwards s02e01'), \ 'backwards s02e01 should have been accepted, in current season' task = execute_task('test_backwards_3') assert task.find_entry('rejected', title='backwards s01e01'), \ 'backwards s01e01 should have been rejected, in previous season' task = execute_task('test_backwards_okay_1') assert task.find_entry('accepted', title='backwards s01e02'), \ 'backwards s01e01 should have been accepted, backfill enabled' task = execute_task('test_backwards_okay_2') assert task.find_entry('accepted', title='backwards s01e03'), \ 'backwards s01e01 should have been accepted, tracking off' def test_forwards(self, execute_task): """Series plugin: episode advancement (future)""" task = execute_task('test_forwards_1') assert task.find_entry('accepted', title='forwards s01e01'), \ 'forwards s01e01 should have been accepted' task = execute_task('test_forwards_2') assert task.find_entry('accepted', title='forwards s02e01'), \ 'forwards s02e01 should have been accepted' task = execute_task('test_forwards_3') assert task.find_entry('accepted', title='forwards s03e01'), \ 'forwards s03e01 should have been accepted' task = execute_task('test_forwards_4') assert task.find_entry('rejected', title='forwards s04e02'), \ 'forwards s04e02 should have been rejected' task = execute_task('test_forwards_5') assert task.find_entry('rejected', title='forwards s05e01'), \ 'forwards s05e01 should have been rejected' task = execute_task('test_forwards_okay_1') assert task.find_entry('accepted', title='forwards s05e01'), \ 'forwards s05e01 should have been accepted with tracking turned off' def test_unordered(self, execute_task): """Series plugin: unordered episode advancement""" task = execute_task('test_unordered') assert len(task.accepted) == 12, \ 'not everyone was accepted' def test_sequence(self, execute_task): # First should be accepted task = execute_task('test_seq1') entry = task.find_entry('accepted', title='seq 05') assert entry['series_id'] == 5 # Next in sequence should be accepted task = execute_task('test_seq2') entry = task.find_entry('accepted', title='seq 06') assert entry['series_id'] == 6 # Should be too far in the future task = execute_task('test_seq3') entry = task.find_entry(title='seq 10') assert entry not in task.accepted, 'Should have been too far in future' # Should be too far in the past task = execute_task('test_seq4') entry = task.find_entry(title='seq 01') assert entry not in task.accepted, 'Should have been too far in the past' class TestFilterSeriesPriority(object): config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: 'foobar 720p s01e01'} - {title: 'foobar hdtv s01e01'} regexp: reject: - 720p series: - foobar """ def test_priorities(self, execute_task): """Series plugin: regexp plugin is able to reject before series plugin""" task = execute_task('test') assert task.find_entry('rejected', title='foobar 720p s01e01'), \ 'foobar 720p s01e01 should have been rejected' assert task.find_entry('accepted', title='foobar hdtv s01e01'), \ 'foobar hdtv s01e01 is not accepted' class TestPropers(object): config = """ templates: global: parsing: series: {{parser}} # prevents seen from rejecting on second execution, # we want to see that series is able to reject disable: builtins series: - test - foobar - asfd: quality: HR-1080p - V - tftest: propers: 3 hours - notest: propers: no tasks: propers_1: mock: - {title: 'Test.S01E01.720p-FlexGet'} # introduce proper, should be accepted propers_2: mock: - {title: 'Test.S01E01.720p.Proper-FlexGet'} # introduce non-proper, should not be downloaded propers_3: mock: - {title: 'Test.S01E01.FlexGet'} # introduce proper at the same time, should nuke non-proper and get proper proper_at_first: mock: - {title: 'Foobar.S01E01.720p.FlexGet'} - {title: 'Foobar.S01E01.720p.proper.FlexGet'} # test a lot of propers at once lot_propers: mock: - {title: 'V.2009.S01E01.PROPER.HDTV.A'} - {title: 'V.2009.S01E01.PROPER.HDTV.B'} - {title: 'V.2009.S01E01.PROPER.HDTV.C'} diff_quality_1: mock: - {title: 'Test.S01E02.720p-FlexGet'} # low quality proper, should not be accepted diff_quality_2: mock: - {title: 'Test.S01E02.HDTV.Proper-FlexGet'} # min + max quality with propers min_max_quality_1: mock: - {title: 'asfd.S01E01.720p-FlexGet'} min_max_quality_2: mock: - {title: 'asfd.S01E01.720p.Proper-FlexGet'} proper_timeframe_1: mock: - {title: 'TFTest.S01E01.720p-FlexGet'} proper_timeframe_2: mock: - {title: 'TFTest.S01E01.720p.proper-FlexGet'} no_propers_1: mock: - {title: 'NoTest.S01E01.720p-FlexGet'} no_propers_2: mock: - {title: 'NoTest.S01E01.720p.proper-FlexGet'} proper_upgrade_1: mock: - {title: 'Test.S02E01.hdtv.proper'} proper_upgrade_2: mock: - {title: 'Test.S02E01.hdtv.real.proper'} anime_proper_1: mock: - title: test 04v0 hdtv anime_proper_2: mock: - title: test 04 hdtv fastsub_proper_1: mock: - title: test s01e01 Fastsub hdtv fastsub_proper_2: mock: - title: test s01e01 Fastsub repack hdtv fastsub_proper_3: mock: - title: test s01e01 hdtv fastsub_proper_4: mock: - title: test s01e01 proper hdtv """ def test_propers_timeframe(self, execute_task): """Series plugin: propers timeframe""" task = execute_task('proper_timeframe_1') assert task.find_entry('accepted', title='TFTest.S01E01.720p-FlexGet'), \ 'Did not accept before timeframe' # let 6 hours pass age_series(hours=6) task = execute_task('proper_timeframe_2') assert task.find_entry('rejected', title='TFTest.S01E01.720p.proper-FlexGet'), \ 'Did not reject after proper timeframe' def test_no_propers(self, execute_task): """Series plugin: no propers at all""" task = execute_task('no_propers_1') assert len(task.accepted) == 1, 'broken badly' task = execute_task('no_propers_2') assert len(task.rejected) == 1, 'accepted proper' def test_min_max_propers(self, execute_task): """Series plugin: min max propers""" task = execute_task('min_max_quality_1') assert len(task.accepted) == 1, 'uhh, broken badly' task = execute_task('min_max_quality_2') assert len(task.accepted) == 1, 'should have accepted proper' def test_lot_propers(self, execute_task): """Series plugin: proper flood""" task = execute_task('lot_propers') assert len(task.accepted) == 1, 'should have accepted (only) one of the propers' def test_diff_quality_propers(self, execute_task): """Series plugin: proper in different/wrong quality""" task = execute_task('diff_quality_1') assert len(task.accepted) == 1 task = execute_task('diff_quality_2') assert len(task.accepted) == 0, 'should not have accepted lower quality proper' def test_propers(self, execute_task): """Series plugin: proper accepted after episode is downloaded""" # start with normal download ... task = execute_task('propers_1') assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \ 'Test.S01E01-FlexGet should have been accepted' # rejects downloaded task = execute_task('propers_1') assert task.find_entry('rejected', title='Test.S01E01.720p-FlexGet'), \ 'Test.S01E01-FlexGet should have been rejected' # accepts proper task = execute_task('propers_2') assert task.find_entry('accepted', title='Test.S01E01.720p.Proper-FlexGet'), \ 'new undownloaded proper should have been accepted' # reject downloaded proper task = execute_task('propers_2') assert task.find_entry('rejected', title='Test.S01E01.720p.Proper-FlexGet'), \ 'downloaded proper should have been rejected' # reject episode that has been downloaded normally and with proper task = execute_task('propers_3') assert task.find_entry('rejected', title='Test.S01E01.FlexGet'), \ 'Test.S01E01.FlexGet should have been rejected' def test_proper_available(self, execute_task): """Series plugin: proper available immediately""" task = execute_task('proper_at_first') assert task.find_entry('accepted', title='Foobar.S01E01.720p.proper.FlexGet'), \ 'Foobar.S01E01.720p.proper.FlexGet should have been accepted' def test_proper_upgrade(self, execute_task): """Series plugin: real proper after proper""" task = execute_task('proper_upgrade_1') assert task.find_entry('accepted', title='Test.S02E01.hdtv.proper') task = execute_task('proper_upgrade_2') assert task.find_entry('accepted', title='Test.S02E01.hdtv.real.proper') def test_anime_proper(self, execute_task): task = execute_task('anime_proper_1') assert task.accepted, 'ep should have accepted' task = execute_task('anime_proper_2') assert task.accepted, 'proper ep should have been accepted' def test_fastsub_proper(self, execute_task): task = execute_task('fastsub_proper_1') assert task.accepted, 'ep should have accepted' task = execute_task('fastsub_proper_2') assert task.accepted, 'proper ep should have been accepted' task = execute_task('fastsub_proper_3') assert task.accepted, 'proper ep should have been accepted' task = execute_task('fastsub_proper_4') assert task.accepted, 'proper ep should have been accepted' class TestSimilarNames(object): # hmm, not very good way to test this .. seriesparser should be tested alone? config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: 'FooBar.S03E01.DSR-FlexGet'} - {title: 'FooBar: FirstAlt.S02E01.DSR-FlexGet'} - {title: 'FooBar: SecondAlt.S01E01.DSR-FlexGet'} series: - FooBar - 'FooBar: FirstAlt' - 'FooBar: SecondAlt' test_ambiguous: mock: - title: Foo.2.2 series: - Foo: identified_by: sequence - Foo 2: identified_by: sequence """ def test_names(self, execute_task): """Series plugin: similar namings""" task = execute_task('test') assert task.find_entry('accepted', title='FooBar.S03E01.DSR-FlexGet'), 'Standard failed?' assert task.find_entry('accepted', title='FooBar: FirstAlt.S02E01.DSR-FlexGet'), 'FirstAlt failed' assert task.find_entry('accepted', title='FooBar: SecondAlt.S01E01.DSR-FlexGet'), 'SecondAlt failed' def test_ambiguous(self, execute_task): task = execute_task('test_ambiguous') # In the event of ambiguous match, more specific one should be chosen assert task.find_entry('accepted', title='Foo.2.2')['series_name'] == 'Foo 2' class TestDuplicates(object): config = """ templates: global: parsing: series: {{parser}} # just cleans log a bit .. disable: - seen tasks: test_dupes: mock: - {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[FlexGet]'} - {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[ASDF]'} series: - Foo 2009 test_1: mock: - {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[FlexGet]'} - {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[ASDF]'} series: - foo bar test_2: mock: - {title: 'Foo.Bar.S02E04.XviD-2HD[ASDF]'} - {title: 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]'} - {title: 'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]'} - {title: 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]'} - {title: 'Foo.Bar.S02E03.HDTV.XviD-FlexGet'} - {title: 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY'} series: - foo bar test_true_dupes: mock: - {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'} - {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'} - {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'} series: - dupe """ def test_dupes(self, execute_task): """Series plugin: dupes with same quality""" task = execute_task('test_dupes') assert len(task.accepted) == 1, 'accepted both' def test_true_dupes(self, execute_task): """Series plugin: true duplicate items""" task = execute_task('test_true_dupes') assert len(task.accepted) == 1, 'should have accepted (only) one' def test_downloaded(self, execute_task): """Series plugin: multiple downloaded and new episodes are handled correctly""" task = execute_task('test_1') task = execute_task('test_2') # these should be accepted accepted = ['Foo.Bar.S02E03.HDTV.XviD-FlexGet', 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY'] for item in accepted: assert task.find_entry('accepted', title=item), \ '%s should have been accepted' % item # these should be rejected rejected = ['Foo.Bar.S02E04.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]', 'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]'] for item in rejected: assert task.find_entry('rejected', title=item), \ '%s should have been rejected' % item class TestQualities(object): config = """ templates: global: parsing: series: {{parser}} disable: builtins series: - FooBar: qualities: - SDTV - 720p - 1080p - FooBaz: upgrade: yes qualities: - hdtv - hr - 720p - FooBum: quality: 720p-1080i upgrade: yes - FooD: target: 720p timeframe: 0 hours upgrade: yes tasks: test_1: mock: - {title: 'FooBar.S01E01.PDTV-FlexGet'} - {title: 'FooBar.S01E01.1080p-FlexGet'} - {title: 'FooBar.S01E01.HR-FlexGet'} test_2: mock: - {title: 'FooBar.S01E01.720p-FlexGet'} propers_1: mock: - {title: 'FooBar.S01E02.720p-FlexGet'} propers_2: mock: - {title: 'FooBar.S01E02.720p.Proper-FlexGet'} upgrade_1: mock: - {title: 'FooBaz.S01E02.pdtv-FlexGet'} - {title: 'FooBaz.S01E02.HR-FlexGet'} upgrade_2: mock: - {title: 'FooBaz.S01E02.720p-FlexGet'} - {title: 'FooBaz.S01E02.1080p-FlexGet'} upgrade_3: mock: - {title: 'FooBaz.S01E02.hdtv-FlexGet'} - {title: 'FooBaz.S01E02.720p rc-FlexGet'} quality_upgrade_1: mock: - title: FooBum.S03E01.1080p # too high - title: FooBum.S03E01.hdtv # too low - title: FooBum.S03E01.720p # in range quality_upgrade_2: mock: - title: FooBum.S03E01.1080i # should be upgraded to - title: FooBum.S03E01.720p-ver2 # Duplicate ep target_1: mock: - title: Food.S06E11.hdtv target_2: mock: - title: Food.S06E11.1080p - title: Food.S06E11.720p """ def test_qualities(self, execute_task): """Series plugin: qualities""" task = execute_task('test_1') assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet'), \ 'Didn''t accept FooBar.S01E01.PDTV-FlexGet' assert task.find_entry('accepted', title='FooBar.S01E01.1080p-FlexGet'), \ 'Didn''t accept FooBar.S01E01.1080p-FlexGet' assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \ 'Accepted FooBar.S01E01.HR-FlexGet' task = execute_task('test_2') assert task.find_entry('accepted', title='FooBar.S01E01.720p-FlexGet'), \ 'Didn''t accept FooBar.S01E01.720p-FlexGet' # test that it rejects them afterwards task = execute_task('test_1') assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-FlexGet'), \ 'Didn\'t reject FooBar.S01E01.PDTV-FlexGet' assert task.find_entry('rejected', title='FooBar.S01E01.1080p-FlexGet'), \ 'Didn\'t reject FooBar.S01E01.1080p-FlexGet' assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \ 'Accepted FooBar.S01E01.HR-FlexGet' def test_propers(self, execute_task): """Series plugin: qualities + propers""" task = execute_task('propers_1') assert task.accepted task = execute_task('propers_2') assert task.accepted, 'proper not accepted' task = execute_task('propers_2') assert not task.accepted, 'proper accepted again' def test_qualities_upgrade(self, execute_task): task = execute_task('upgrade_1') assert task.find_entry('accepted', title='FooBaz.S01E02.HR-FlexGet'), 'HR quality should be accepted' assert len(task.accepted) == 1, 'Only best quality should be accepted' task = execute_task('upgrade_2') assert task.find_entry('accepted', title='FooBaz.S01E02.720p-FlexGet'), '720p quality should be accepted' assert len(task.accepted) == 1, 'Only best quality should be accepted' task = execute_task('upgrade_3') assert not task.accepted, 'Should not have accepted worse qualities' def test_quality_upgrade(self, execute_task): task = execute_task('quality_upgrade_1') assert len(task.accepted) == 1, 'Only one ep should have passed quality filter' assert task.find_entry('accepted', title='FooBum.S03E01.720p') task = execute_task('quality_upgrade_2') assert len(task.accepted) == 1, 'one ep should be valid upgrade' assert task.find_entry('accepted', title='FooBum.S03E01.1080i') def test_target_upgrade(self, execute_task): task = execute_task('target_1') assert len(task.accepted) == 1, 'Only one ep should have been grabbed' assert task.find_entry('accepted', title='Food.S06E11.hdtv') task = execute_task('target_2') assert len(task.accepted) == 1, 'one ep should be valid upgrade' assert task.find_entry('accepted', title='Food.S06E11.720p'), 'Should upgrade to `target`' class TestIdioticNumbering(object): config = """ templates: global: parsing: series: {{parser}} series: - FooBar: identified_by: ep tasks: test_1: mock: - {title: 'FooBar.S01E01.PDTV-FlexGet'} test_2: mock: - {title: 'FooBar.102.PDTV-FlexGet'} """ def test_idiotic(self, execute_task): """Series plugin: idiotic numbering scheme""" task = execute_task('test_1') task = execute_task('test_2') entry = task.find_entry(title='FooBar.102.PDTV-FlexGet') assert entry, 'entry not found?' assert entry['series_season'] == 1, 'season not detected' assert entry['series_episode'] == 2, 'episode not detected' class TestNormalization(object): config = """ templates: global: parsing: series: {{parser}} disable: [seen] tasks: test_1: mock: - {title: 'FooBar.S01E01.PDTV-FlexGet'} series: - FOOBAR test_2: mock: - {title: 'FooBar.S01E01.PDTV-aoeu'} series: - foobar test_3: mock: - title: Foo bar & co 2012.s01e01.sdtv.a series: - foo bar & co 2012 test_4: mock: - title: Foo bar & co 2012.s01e01.sdtv.b series: - Foo/Bar and Co. (2012) """ def test_capitalization(self, execute_task): """Series plugin: configuration capitalization""" task = execute_task('test_1') assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet') task = execute_task('test_2') assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-aoeu') def test_normalization(self, execute_task): task = execute_task('test_3') assert task.find_entry('accepted', title='Foo bar & co 2012.s01e01.sdtv.a') task = execute_task('test_4') assert task.find_entry('rejected', title='Foo bar & co 2012.s01e01.sdtv.b') class TestMixedNumbering(object): config = """ templates: global: parsing: series: {{parser}} series: - FooBar: identified_by: ep tasks: test_1: mock: - {title: 'FooBar.S03E07.PDTV-FlexGet'} test_2: mock: - {title: 'FooBar.0307.PDTV-FlexGet'} """ def test_mixednumbering(self, execute_task): """Series plugin: Mixed series numbering""" task = execute_task('test_1') assert task.find_entry('accepted', title='FooBar.S03E07.PDTV-FlexGet') task = execute_task('test_2') assert task.find_entry('rejected', title='FooBar.0307.PDTV-FlexGet') class TestExact(object): config = """ templates: global: parsing: series: {{parser}} tasks: auto: mock: - {title: 'ABC.MIAMI.S01E01.PDTV-FlexGet'} - {title: 'ABC.S01E01.PDTV-FlexGet'} - {title: 'ABC.LA.S01E01.PDTV-FlexGet'} series: - ABC - ABC LA - ABC Miami name_regexp: mock: - title: show s09e05 hdtv - title: show a s09e06 hdtv series: - show: name_regexp: ^show exact: yes date: mock: - title: date show 04.01.2011 hdtv - title: date show b 04.02.2011 hdtv series: - date show: exact: yes """ def test_auto(self, execute_task): """Series plugin: auto enable exact""" task = execute_task('auto') assert task.find_entry('accepted', title='ABC.S01E01.PDTV-FlexGet') assert task.find_entry('accepted', title='ABC.LA.S01E01.PDTV-FlexGet') assert task.find_entry('accepted', title='ABC.MIAMI.S01E01.PDTV-FlexGet') def test_with_name_regexp(self, execute_task): task = execute_task('name_regexp') assert task.find_entry('accepted', title='show s09e05 hdtv') assert not task.find_entry('accepted', title='show a s09e06 hdtv') def test_dated_show(self, execute_task): task = execute_task('date') assert task.find_entry('accepted', title='date show 04.01.2011 hdtv') assert not task.find_entry('accepted', title='date show b 04.02.2011 hdtv') class TestTimeframe(object): config = """ templates: global: parsing: series: {{parser}} series: - test: timeframe: 5 hours target: 720p tasks: test_no_waiting: mock: - {title: 'Test.S01E01.720p-FlexGet'} test_stop_waiting_1: mock: - {title: 'Test.S01E02.HDTV-FlexGet'} test_stop_waiting_2: mock: - {title: 'Test.S01E02.720p-FlexGet'} test_proper_afterwards: mock: - {title: 'Test.S01E02.720p.Proper-FlexGet'} test_expires: mock: - {title: 'Test.S01E03.pdtv-FlexGet'} test_min_max_fail: series: - mm test: timeframe: 5 hours target: 720p quality: hdtv+ <=720p mock: - {title: 'MM Test.S01E02.pdtv-FlexGet'} - {title: 'MM Test.S01E02.1080p-FlexGet'} test_min_max_pass: series: - mm test: timeframe: 5 hours target: 720p quality: hdtv+ <=720p mock: - {title: 'MM Test.S01E02.pdtv-FlexGet'} - {title: 'MM Test.S01E02.hdtv-FlexGet'} - {title: 'MM Test.S01E02.1080p-FlexGet'} test_qualities_fail: series: - q test: timeframe: 5 hours qualities: - hdtv - 1080p mock: - {title: 'Q Test.S01E02.pdtv-FlexGet'} - {title: 'Q Test.S01E02.1080p-FlexGet'} test_qualities_pass: series: - q test: timeframe: 5 hours qualities: - sdtv - 720p mock: - {title: 'Q Test.S01E02.1080p-FlexGet'} test_with_quality_1: series: - q test: timeframe: 5 hours quality: hdtv+ target: 720p mock: - title: q test s01e01 pdtv 720p test_with_quality_2: series: - q test: timeframe: 5 hours quality: hdtv+ target: 720p mock: - title: q test s01e01 hdtv """ def test_no_waiting(self, execute_task): """Series plugin: no timeframe waiting needed""" task = execute_task('test_no_waiting') assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \ '720p not accepted immediattely' def test_stop_waiting(self, execute_task): """Series plugin: timeframe quality appears, stop waiting, proper appears""" task = execute_task('test_stop_waiting_1') assert task.entries and not task.accepted task = execute_task('test_stop_waiting_2') assert task.find_entry('accepted', title='Test.S01E02.720p-FlexGet'), \ '720p should have caused stop waiting' task = execute_task('test_proper_afterwards') assert task.find_entry('accepted', title='Test.S01E02.720p.Proper-FlexGet'), \ 'proper should have been accepted' def test_expires(self, execute_task): """Series plugin: timeframe expires""" # first execution should not accept anything task = execute_task('test_expires') assert not task.accepted # let 3 hours pass age_series(hours=3) task = execute_task('test_expires') assert not task.accepted, 'expired too soon' # let another 3 hours pass, should expire now! age_series(hours=6) task = execute_task('test_expires') assert task.accepted, 'timeframe didn\'t expire' def test_min_max_fail(self, execute_task): task = execute_task('test_min_max_fail') assert not task.accepted # Let 6 hours pass, timeframe should not even been started, as pdtv doesn't meet min_quality age_series(hours=6) task = execute_task('test_min_max_fail') assert task.entries and not task.accepted def test_min_max_pass(self, execute_task): task = execute_task('test_min_max_pass') assert not task.accepted # Let 6 hours pass, timeframe should expire and accept hdtv copy age_series(hours=6) task = execute_task('test_min_max_pass') assert task.find_entry('accepted', title='MM Test.S01E02.hdtv-FlexGet') assert len(task.accepted) == 1 def test_qualities_fail(self, execute_task): task = execute_task('test_qualities_fail') assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet'), \ 'should have accepted wanted quality' assert len(task.accepted) == 1 # Let 6 hours pass, timeframe should not even been started, as we already have one of our qualities age_series(hours=6) task = execute_task('test_qualities_fail') assert task.entries and not task.accepted def test_qualities_pass(self, execute_task): task = execute_task('test_qualities_pass') assert not task.accepted, 'None of the qualities should have matched' # Let 6 hours pass, timeframe should expire and accept 1080p copy age_series(hours=6) task = execute_task('test_qualities_pass') assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet') assert len(task.accepted) == 1 def test_with_quality(self, execute_task): task = execute_task('test_with_quality_1') assert not task.accepted, 'Entry does not pass quality' age_series(hours=6) # Entry from first test feed should not pass quality task = execute_task('test_with_quality_1') assert not task.accepted, 'Entry does not pass quality' # Timeframe should not yet have started task = execute_task('test_with_quality_2') assert not task.accepted, 'Timeframe should not yet have passed' age_series(hours=6) task = execute_task('test_with_quality_2') assert task.accepted, 'Timeframe should have passed' class TestBacklog(object): config = """ templates: global: parsing: series: {{parser}} tasks: backlog: mock: - {title: 'Test.S01E01.hdtv-FlexGet'} series: - test: {timeframe: 6 hours} """ def testBacklog(self, manager, execute_task): """Series plugin: backlog""" task = execute_task('backlog') assert task.entries and not task.accepted, 'no entries at the start' # simulate test going away from the task del (manager.config['tasks']['backlog']['mock']) age_series(hours=12) task = execute_task('backlog') assert task.accepted, 'backlog is not injecting episodes' class TestManipulate(object): """Tests that it's possible to manipulate entries before they're parsed by series plugin""" config = """ templates: global: parsing: series: {{parser}} tasks: test_1: mock: - {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'} series: - test test_2: mock: - {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'} series: - test manipulate: - title: extract: '^PREFIX: (.*)' """ def testManipulate(self, execute_task): """Series plugin: test manipulation priority""" # should not work with the prefix task = execute_task('test_1') assert not task.accepted, 'series accepted even with prefix?' assert not task.accepted, 'series rejecte even with prefix?' task = execute_task('test_2') assert task.accepted, 'manipulate failed to pre-clean title' class TestFromGroup(object): config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: '[Ignored] Test 12'} - {title: '[FlexGet] Test 12'} - {title: 'Test.13.HDTV-Ignored'} - {title: 'Test.13.HDTV-FlexGet'} - {title: 'Test.14.HDTV-Name'} - {title: 'Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3'} - {title: 'Test :: h264 10-bit | Softsubs (Ignore) | Episode 3'} series: - test: {from_group: [Name, FlexGet]} """ def test_from_group(self, execute_task): """Series plugin: test from_group""" task = execute_task('test') assert task.find_entry('accepted', title='[FlexGet] Test 12') assert task.find_entry('accepted', title='Test.13.HDTV-FlexGet') assert task.find_entry('accepted', title='Test.14.HDTV-Name') assert task.find_entry('accepted', title='Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3') class TestBegin(object): config = """ templates: global: parsing: series: {{parser}} eps: mock: - {title: 'WTest.S02E03.HDTV.XViD-FlexGet'} - {title: 'W2Test.S02E03.HDTV.XViD-FlexGet'} tasks: season_id_test: template: eps series: - WTest: begin: S02 - W2Test: begin: S03 before_ep_test: template: eps series: - WTest: begin: S02E05 - W2Test: begin: S03E02 after_ep_test: template: eps series: - WTest: begin: S02E03 - W2Test: begin: S02E01 before_seq_test: mock: - title: WTest.1.HDTV.XViD-FlexGet - title: W2Test.13.HDTV.XViD-FlexGet series: - WTest: begin: 2 - W2Test: begin: 120 after_seq_test: mock: - title: WTest.2.HDTV.XViD-FlexGet - title: W2Test.123.HDTV.XViD-FlexGet series: - WTest: begin: 2 - W2Test: begin: 120 before_date_test: mock: - title: WTest.2001.6.6.HDTV.XViD-FlexGet - title: W2Test.12.30.2012.HDTV.XViD-FlexGet series: - WTest: begin: '2009-05-05' - W2Test: begin: '2012-12-31' after_date_test: mock: - title: WTest.2009.5.5.HDTV.XViD-FlexGet - title: W2Test.1.1.2013.HDTV.XViD-FlexGet series: - WTest: begin: '2009-05-05' - W2Test: begin: '2012-12-31' test_advancement1: mock: - title: WTest.S01E01 series: - WTest test_advancement2: mock: - title: WTest.S03E01 series: - WTest test_advancement3: mock: - title: WTest.S03E01 series: - WTest: begin: S03E01 """ def test_season_id(self, execute_task): task = execute_task('season_id_test') assert task.find_entry('accepted', title='WTest.S02E03.HDTV.XViD-FlexGet'), \ 'Entry should have been accepted, it\'s after the begin episode' assert task.find_entry('rejected', title='W2Test.S02E03.HDTV.XViD-FlexGet'), \ 'Entry should have been rejected, it\'s before the begin episode' def test_before_ep(self, execute_task): task = execute_task('before_ep_test') assert not task.accepted, 'No entries should have been accepted, they are before the begin episode' def test_after_ep(self, execute_task): task = execute_task('after_ep_test') assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode' def test_before_seq(self, execute_task): task = execute_task('before_seq_test') assert not task.accepted, 'No entries should have been accepted, they are before the begin episode' def test_after_seq(self, execute_task): task = execute_task('after_seq_test') assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode' def test_before_date(self, execute_task): task = execute_task('before_date_test') assert not task.accepted, 'No entries should have been accepted, they are before the begin episode' def test_after_date(self, execute_task): task = execute_task('after_date_test') assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode' def test_advancement(self, execute_task): # Put S01E01 into the database as latest download task = execute_task('test_advancement1') assert task.accepted # Just verify regular ep advancement would block S03E01 task = execute_task('test_advancement2') assert not task.accepted, 'Episode advancement should have blocked' # Make sure ep advancement doesn't block it when we've set begin to that ep task = execute_task('test_advancement3') assert task.accepted, 'Episode should have been accepted' class TestSeriesPremiere(object): config = """ templates: global: parsing: series: {{parser}} metainfo_series: yes series_premiere: yes tasks: test: mock: - {title: 'Foobar.S01E01.PDTV-FlexGet'} - {title: 'Foobar.S01E11.1080p-FlexGet'} - {title: 'Foobar.S02E02.HR-FlexGet'} """ def testOnlyPremieres(self, execute_task): """Test series premiere""" task = execute_task('test') assert task.find_entry('accepted', title='Foobar.S01E01.PDTV-FlexGet', series_name='Foobar', series_season=1, series_episode=1), 'Series premiere should have been accepted' assert len(task.accepted) == 1 # TODO: Add more tests, test interaction with series plugin and series_exists class TestImportSeries(object): config = """ templates: global: parsing: series: {{parser}} tasks: timeframe_max: configure_series: settings: propers: 12 hours target: 720p timeframe: 5 minutes quality: "<=720p <=bluray" from: mock: - title: the show mock: - title: the show s03e02 1080p bluray - title: the show s03e02 hdtv test_import_altnames: configure_series: from: mock: - {title: 'the show', configure_series_alternate_name: 'le show'} mock: - title: le show s03e03 """ def test_timeframe_max(self, execute_task): """Tests configure_series as well as timeframe with max_quality.""" task = execute_task('timeframe_max') assert not task.accepted, 'Entry shouldnot have been accepted on first run.' age_series(minutes=6) task = execute_task('timeframe_max') assert task.find_entry('accepted', title='the show s03e02 hdtv'), \ 'hdtv should have been accepted after timeframe.' def test_import_altnames(self, execute_task): """Tests configure_series with alternate_name.""" task = execute_task('test_import_altnames') entry = task.find_entry(title='le show s03e03') assert entry.accepted, 'entry matching series alternate name should have been accepted.' assert entry['series_name'] == 'the show', 'entry series should be set to the main name' class TestIDTypes(object): config = """ templates: global: parsing: series: {{parser}} tasks: all_types: series: - episode - seasonless episode - date - sequence - stupid id: id_regexp: (\\dcat) mock: - title: episode S03E04 - title: episode 3x05 - title: date 2011.4.3 other crap hdtv - title: date 4.5.11 - title: sequence 003 - title: sequence 4 - title: stupid id 3cat - title: seasonless episode e01 """ def test_id_types(self, execute_task): task = execute_task('all_types') for entry in task.entries: assert entry['series_name'], '%s not parsed by series plugin' % entry['title'] assert entry['series_id_type'] in entry['series_name'] class TestCaseChange(object): config = """ templates: global: parsing: series: {{parser}} tasks: first: mock: - title: theshow s02e04 series: - TheShow second: mock: - title: thEshoW s02e04 other series: - THESHOW """ def test_case_change(self, execute_task): task = execute_task('first') # Make sure series_name uses case from config, make sure episode is accepted assert task.find_entry('accepted', title='theshow s02e04', series_name='TheShow') task = execute_task('second') # Make sure series_name uses new case from config, make sure ep is rejected because we have a copy assert task.find_entry('rejected', title='thEshoW s02e04 other', series_name='THESHOW') class TestInvalidSeries(object): config = """ templates: global: parsing: series: {{parser}} tasks: blank: mock: - title: whatever series: - '': quality: 720p """ def test_blank_series(self, execute_task): """Make sure a blank series doesn't crash.""" task = execute_task('blank') assert not task.aborted, 'Task should not have aborted' class TestDoubleEps(object): config = """ templates: global: parsing: series: {{parser}} tasks: test_double1: mock: - title: double S01E02-E03 series: - double test_double2: mock: - title: double S01E03 series: - double """ def test_double(self, execute_task): # First should be accepted task = execute_task('test_double1') assert task.find_entry('accepted', title='double S01E02-E03') # We already got ep 3 as part of double, should not be accepted task = execute_task('test_double2') assert not task.find_entry('accepted', title='double S01E03') class TestAutoLockin(object): config = """ templates: global: parsing: series: {{parser}} series: - FooBar - BarFood tasks: try_date_1: mock: - title: FooBar 2012-10-10 HDTV lock_ep: mock: - title: FooBar S01E01 HDTV - title: FooBar S01E02 HDTV - title: FooBar S01E03 HDTV try_date_2: mock: - title: FooBar 2012-10-11 HDTV test_special_lock: mock: - title: BarFood christmas special HDTV - title: BarFood easter special HDTV - title: BarFood haloween special HDTV - title: BarFood bad special HDTV try_reg: mock: - title: BarFood S01E01 HDTV - title: BarFood 2012-9-9 HDTV """ def test_ep_lockin(self, execute_task): task = execute_task('try_date_1') assert task.find_entry('accepted', title='FooBar 2012-10-10 HDTV'), \ 'dates should be accepted before locked in on an identifier type' task = execute_task('lock_ep') assert len(task.accepted) == 3, 'All ep mode episodes should have been accepted' task = execute_task('try_date_2') assert not task.find_entry('accepted', title='FooBar 2012-10-11 HDTV'), \ 'dates should not be accepted after series has locked in to ep mode' def test_special_lock(self, execute_task): """Make sure series plugin does not lock in to type 'special'""" task = execute_task('test_special_lock') assert len(task.accepted) == 4, 'All specials should have been accepted' task = execute_task('try_reg') assert len(task.accepted) == 2, 'Specials should not have caused episode type lock-in' class TestReruns(object): config = """ templates: global: parsing: series: {{parser}} tasks: one_accept: mock: - title: the show s01e01 - title: the show s01e01 different series: - the show rerun: 2 mock_output: yes """ def test_one_accept(self, execute_task): task = execute_task('one_accept') assert len(task.mock_output) == 1, \ 'should have accepted once!: %s' % ', '.join(e['title'] for e in task.mock_output) class TestSpecials(object): config = """ templates: global: parsing: series: {{parser}} tasks: preferspecials: mock: - title: the show s03e04 special series: - the show: prefer_specials: True nopreferspecials: mock: - title: the show s03e05 special series: - the show: prefer_specials: False assumespecial: mock: - title: the show SOMETHING series: - the show: assume_special: True noassumespecial: mock: - title: the show SOMETHING series: - the show: assume_special: False special_looks_like_season_pack: mock: - title: Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget series: - Doctor Who """ def test_prefer_specials(self, execute_task): # Test that an entry matching both ep and special is flagged as a special when prefer_specials is True task = execute_task('preferspecials') entry = task.find_entry('accepted', title='the show s03e04 special') assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged a special was not.' def test_not_prefer_specials(self, execute_task): # Test that an entry matching both ep and special is flagged as an ep when prefer_specials is False task = execute_task('nopreferspecials') entry = task.find_entry('accepted', title='the show s03e05 special') assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged a special was.' def test_assume_special(self, execute_task): # Test that an entry with no ID found gets flagged as a special and accepted if assume_special is True task = execute_task('assumespecial') entry = task.find_entry(title='the show SOMETHING') assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged as a special was not.' assert entry.accepted, 'Entry which should have been accepted was not.' def test_not_assume_special(self, execute_task): # Test that an entry with no ID found does not get flagged as a special and accepted if assume_special is False task = execute_task('noassumespecial') entry = task.find_entry(title='the show SOMETHING') assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged as a special was.' assert not entry.accepted, 'Entry which should not have been accepted was.' def test_special_looks_like_a_season_pack(self, execute_task): """Make sure special episodes are not being parsed as season packs""" task = execute_task('special_looks_like_season_pack') entry = task.find_entry(title='Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget') assert entry.get('series_id_type') == 'special', 'Entry should have been flagged as a special' assert not entry['season_pack'], 'Entry should not have been flagged as a season pack' assert entry.accepted, 'Entry which should not have been accepted was.' class TestAlternateNames(object): config = """ templates: global: parsing: series: {{parser}} tasks: alternate_name: series: - Some Show: begin: S01E01 alternate_name: Other Show another_alternate_name: series: - Some Show: alternate_name: Good Show set_other_alternate_name: mock: - title: Third.Show.S01E01 - title: Other.Show.S01E01 series: - Some Show: alternate_name: Third Show rerun: 0 duplicate_names_in_different_series: series: - First Show: begin: S01E01 alternate_name: Third Show - Second Show: begin: S01E01 alternate_name: Third Show """ def test_set_alternate_name(self, execute_task): # Tests that old alternate names are not kept in the database. task = execute_task('alternate_name') task = execute_task('set_other_alternate_name') assert task.find_entry('accepted', title='Third.Show.S01E01'), \ 'A new alternate name should have been associated with the series.' assert task.find_entry('undecided', title='Other.Show.S01E01'), \ 'The old alternate name for the series is still present.' def test_duplicate_alternate_names_in_different_series(self, execute_task): with pytest.raises(TaskAbort) as ex: execute_task('duplicate_names_in_different_series') # only test that the reason is about alternate names, not which names. reason = 'Error adding alternate name' assert ex.value.reason[:27] == reason, \ 'Wrong reason for task abortion. Should be about duplicate alternate names.' # Test the DB behaves like we expect ie. alternate names cannot def test_alternate_names_are_removed_from_db(self, execute_task): from flexget.manager import Session with Session() as session: execute_task('alternate_name') # test the current state of alternate names assert len(session.query(db.AlternateNames).all()) == 1, 'There should be one alternate name present.' assert session.query(db.AlternateNames).first().alt_name == 'Other Show', \ 'Alternate name should have been Other Show.' # run another task that overwrites the alternate names execute_task('another_alternate_name') assert len(session.query(db.AlternateNames).all()) == 1, \ 'The old alternate name should have been removed from the database.' assert session.query(db.AlternateNames).first().alt_name == 'Good Show', \ 'The alternate name in the database should be the new one, Good Show.' class TestCLI(object): config = """ templates: global: parsing: series: {{parser}} tasks: learn_series: series: - Some Show - Other Show mock: - title: Some Series S01E01 - title: Other Series S01E02 """ def test_series_list(self, manager, execute_task): """Very rudimentary test, mostly makes sure this doesn't crash.""" execute_task('learn_series') options = get_parser().parse_args(['series', 'list', '--porcelain']) buffer = StringIO() with capture_output(buffer, loglevel='error'): manager.handle_cli(options=options) lines = buffer.getvalue().split('\n') assert all(any(line.lstrip().startswith(series) for line in lines) for series in ['Some Show', 'Other Show']) class TestSeriesRemove(object): config = """ templates: global: parsing: series: {{parser}} tasks: get_episode: seen: local series: - My Show mock: - title: My Show S01E01 1080p - title: My Show S01E01 720p remove_episode: seen: no mock: - title: My Show S01E01 series_name: My Show series_id: S01E01 accept_all: yes series_remove: yes """ def test_remove_episode(self, execute_task): task = execute_task('get_episode') assert len(task.accepted) == 1 first_rls = task.accepted[0] task = execute_task('get_episode') assert not task.accepted, 'series plugin duplicate blocking not working?' task = execute_task('remove_episode') task = execute_task('get_episode') assert len(task.accepted) == 1, 'new release not accepted after forgetting ep' assert task.accepted[0] != first_rls, 'same release accepted on second run' class TestSeriesSeasonPack(object): _config = """ templates: global: parsing: series: internal series: - foo: season_packs: yes - bar: season_packs: yes tracking: backfill - baz: season_packs: 3 - boo: season_packs: always - bla: season_packs: only - bro: season_packs: threshold: 1 reject_eps: yes tasks: multiple_formats: mock: - title: foo.s01.720p-flexget - title: foo.2xALL.720p-flexget foo_s01: mock: - title: foo.s01.720p-flexget foo_s02: mock: - title: foo.s02.720p-flexget foo_s03: mock: - title: foo.s03.720p-flexget foo_s01ep1: mock: - title: foo.s01e1.720p-flexget foo_s02ep1: mock: - title: foo.s02e1.720p-flexget season_pack_priority: mock: - title: foo.s01e1.720p-flexget - title: foo.s01e2.720p-flexget - title: foo.s01e3.720p-flexget - title: foo.s01e4.720p-flexget - title: foo.s01e5.720p-flexget - title: foo.s01.720p-flexget respect_begin: series: - bar: begin: s02e01 season_packs: yes mock: - title: bar.s01.720p-flexget - title: bar.s02.720p-flexget several_seasons: mock: - title: foo.s03.720p-flexget - title: foo.s07.720p-flexget - title: foo.s03.1080p-flexget - title: foo.s06.720p-flexget - title: foo.s09.720p-flexget test_backfill_1: mock: - title: bar.s03.720p-flexget test_backfill_2: mock: - title: bar.s02.720p-flexget test_backfill_3: mock: - title: bar.s03e01.720p-flexget test_backfill_4: mock: - title: bar.s02e01.1080p-flexget test_specific_season_pack_threshold_1: mock: - title: baz.s01e01.720p-flexget - title: baz.s01e02.720p-flexget - title: baz.s01e03.720p-flexget test_specific_season_pack_threshold_2: mock: - title: baz.s01.720p-flexget test_specific_season_pack_threshold_3: mock: - title: baz.s01e01.720p-flexget - title: baz.s01e02.720p-flexget - title: baz.s01e03.720p-flexget - title: baz.s01e04.720p-flexget test_always_get_season_pack_1: mock: - title: boo.s01e01.720p-flexget - title: boo.s01e02.720p-flexget - title: boo.s01e03.720p-flexget - title: boo.s01e04.720p-flexget test_always_get_season_pack_2: mock: - title: boo.s01.720p-flexget test_only_get_season_packs: mock: - title: bla.s01.720p-flexget - title: bla.s02e01.720p-flexget test_proper_season_pack: mock: - title: foo.s01.720p-flexget - title: foo.s01.720p.proper-flexget test_proper_season_pack_2: mock: - title: foo.s01.720p-flexget test_proper_season_pack_3: mock: - title: foo.s01.720p.proper-flexget test_all_series: mock: - title: show.name.s01.720p.HDTV-Group all_series: season_packs: yes test_with_dict_config_1: mock: - title: bro.s01e01.720p.HDTV-Flexget - title: bro.s01.720p.HDTV-Flexget test_with_dict_config_2: mock: - title: bro.s02.720p.HDTV-Flexget """ @pytest.fixture() def config(self): """Overrides outer config fixture since season pack support does not work with guessit parser""" return self._config def test_season_pack_simple(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 def test_basic_tracking(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 task = execute_task('foo_s01ep1') assert len(task.accepted) == 0 task = execute_task('foo_s02ep1') assert len(task.accepted) == 1 def test_season_pack_takes_priority(self, execute_task): task = execute_task('season_pack_priority') assert len(task.accepted) == 1 entry = task.find_entry(title='foo.s01.720p-flexget') assert entry.accepted def test_respect_begin(self, execute_task): task = execute_task('respect_begin') assert len(task.accepted) == 1 entry = task.find_entry(title='bar.s02.720p-flexget') assert entry.accepted def test_tracking_rules_old_eps(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 task = execute_task('foo_s02') assert len(task.accepted) == 1 task = execute_task('foo_s01ep1') assert not task.accepted def test_tracking_rules_old_season(self, execute_task): task = execute_task('foo_s02') assert len(task.accepted) == 1 task = execute_task('foo_s01') assert not task.accepted def test_tracking_rules_new_season(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 task = execute_task('foo_s03') assert not task.accepted def test_several_seasons(self, execute_task): task = execute_task('several_seasons') assert len(task.accepted) == 4 def test_multiple_formats(self, execute_task): task = execute_task('multiple_formats') assert len(task.accepted) == 2 def test_backfill(self, execute_task): task = execute_task('test_backfill_1') assert len(task.accepted) == 1 task = execute_task('test_backfill_2') assert len(task.accepted) == 1 task = execute_task('test_backfill_3') assert not task.accepted task = execute_task('test_backfill_4') assert not task.accepted def test_default_threshold(self, execute_task): task = execute_task('foo_s01ep1') assert len(task.accepted) == 1 task = execute_task('foo_s01') assert len(task.accepted) == 0 def test_specific_season_pack_threshold_positive(self, execute_task): task = execute_task('test_specific_season_pack_threshold_1') assert len(task.accepted) == 3 task = execute_task('test_specific_season_pack_threshold_2') assert len(task.accepted) == 1 def test_specific_season_pack_threshold_negative(self, execute_task): task = execute_task('test_specific_season_pack_threshold_3') assert len(task.accepted) == 4 task = execute_task('test_specific_season_pack_threshold_2') assert not task.accepted def test_loose_threshold(self, execute_task): task = execute_task('test_always_get_season_pack_1') assert len(task.accepted) == 4 task = execute_task('test_always_get_season_pack_2') assert len(task.accepted) == 1 def test_exclusive(self, execute_task): task = execute_task('test_only_get_season_packs') assert len(task.accepted) == 1 entry = task.find_entry(title='bla.s01.720p-flexget') assert entry.accepted def test_proper_season_pack(self, execute_task): """Series plugin: proper available immediately""" task = execute_task('test_proper_season_pack') assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget') def test_proper_season_pack_2(self, execute_task): """Series plugin: proper available immediately""" task = execute_task('test_proper_season_pack_2') assert task.find_entry('accepted', title='foo.s01.720p-flexget') task = execute_task('test_proper_season_pack_3') assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget') def test_all_series(self, execute_task): task = execute_task('test_all_series') assert task.find_entry('accepted', title='show.name.s01.720p.HDTV-Group') def test_advanced_config(self, execute_task): task = execute_task('test_with_dict_config_1') assert not task.find_entry('accepted', title='bro.s01e01.720p.HDTV-Flexget') assert task.find_entry('accepted', title='bro.s01.720p.HDTV-Flexget') execute_task('test_with_dict_config_2', options={'inject': [Entry(title='bro.s02e01.720p.HDTV-Flexget', url='')], 'immortal': True}) task = execute_task('test_with_dict_config_2') assert task.find_entry('accepted', title='bro.s02.720p.HDTV-Flexget') class TestSeriesDDAudio(object): _config = """ templates: global: parsing: series: internal tasks: min_quality: mock: - {title: 'MinQATest.S01E01.720p.XViD.DD5.1-FlexGet'} - {title: 'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'} series: - MinQATest: quality: ">dd5.1" max_quality: mock: - {title: 'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'} - {title: 'MaxQATest.S01E01.720p.XViD.DD+5.1-FlexGet'} series: - MaxQATest: quality: "<=dd5.1" test_channels: mock: - {title: 'Channels.S01E01.1080p.HDTV.DD+2.0-FlexGet'} - {title: 'Channels.S01E01.1080p.HDTV.DD+5.1-FlexGet'} - {title: 'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'} series: - Channels: quality: dd+5.1 """ @pytest.fixture() def config(self): """Overrides outer config fixture since DD+ and arbitrary channels support does not work with guessit parser""" return self._config def test_min_quality(self, execute_task): """Series plugin: min_quality""" task = execute_task('min_quality') assert task.find_entry('accepted', title='MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'), \ 'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only two' def test_max_quality(self, execute_task): """Series plugin: max_quality""" task = execute_task('max_quality') assert task.find_entry('accepted', title='MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'), \ 'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_channels(self, execute_task): """Series plugin: max_quality""" task = execute_task('test_channels') assert task.find_entry(title='Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'), \ 'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one'
35.764389
119
0.56974
from __future__ import unicode_literals, division, absolute_import from builtins import * from io import StringIO import pytest from jinja2 import Template from flexget.entry import Entry from flexget.logger import capture_output from flexget.manager import get_parser, Session from flexget.task import TaskAbort from flexget.components.series import db def age_series(**kwargs): import datetime session = Session() session.query(db.EpisodeRelease).update({'first_seen': datetime.datetime.now() - datetime.timedelta(**kwargs)}) session.commit() @pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True) def config(request): newconfig = Template(request.cls.config).render({'parser': request.param}) assert request.cls.config != newconfig, 'config parameterization did nothing?' return newconfig class TestQuality(object): config = """ templates: global: parsing: series: {{parser}} tasks: exact_quality: mock: - {title: 'QTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'QTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'QTest.S01E01.DSR.XViD-FlexGet'} - {title: 'QTest.S01E01.1080p.XViD-FlexGet'} - {title: 'QTest.S01E01.720p.XViD-FlexGet'} series: - QTest: quality: 720p quality_fail: mock: - {title: 'Q2Test.S01E01.HDTV.XViD-FlexGet'} - {title: 'Q2Test.S01E01.PDTV.XViD-FlexGet'} - {title: 'Q2Test.S01E01.DSR.XViD-FlexGet'} series: - Q2Test: quality: 720p min_quality: mock: - {title: 'MinQTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'MinQTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'MinQTest.S01E01.DSR.XViD-FlexGet'} - {title: 'MinQTest.S01E01.1080p.XViD-FlexGet'} - {title: 'MinQTest.S01E01.720p.XViD-FlexGet'} series: - MinQTest: quality: ">720p" max_quality: mock: - {title: 'MaxQTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.DSR.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.1080p.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.720p.XViD-FlexGet'} - {title: 'MaxQTest.S01E01.720p.bluray-FlexGet'} series: - MaxQTest: quality: "<720p <=HDTV" min_max_quality: mock: - {title: 'MinMaxQTest.S01E01.HDTV.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.PDTV.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.DSR.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.720p.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.HR.XViD-FlexGet'} - {title: 'MinMaxQTest.S01E01.1080p.XViD-FlexGet'} series: - MinMaxQTest: quality: 480p-hr max_unknown_quality: mock: - {title: 'MaxUnknownQTest.S01E01.XViD-FlexGet'} series: - MaxUnknownQTest: quality: "<=hdtv" quality_from_group: mock: - {title: 'GroupQual.S01E01.HDTV.XViD-FlexGet'} - {title: 'GroupQual.S01E01.PDTV.XViD-FlexGet'} - {title: 'GroupQual.S01E01.DSR.XViD-FlexGet'} - {title: 'GroupQual.S01E01.1080p.XViD-FlexGet'} - {title: 'GroupQual.S01E01.720p.XViD-FlexGet'} - {title: 'Other.S01E01.hdtv.dd5.1.XViD-FlexGet'} - {title: 'Other.S01E01.720p.hdtv.XViD-FlexGet'} series: 720P: - GroupQual # Test that an integer group name doesn't cause an exception. 1080: - Test hdtv <hr !dd5.1: - Other quality_in_series_name: mock: - title: my 720p show S01E01 - title: my 720p show S01E02 720p series: - my 720p show: quality: '<720p' """ def test_exact_quality(self, execute_task): task = execute_task('exact_quality') assert task.find_entry('accepted', title='QTest.S01E01.720p.XViD-FlexGet'), \ '720p should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_quality_fail(self, execute_task): task = execute_task('quality_fail') assert not task.accepted, 'No qualities should have matched' def test_min_quality(self, execute_task): task = execute_task('min_quality') assert task.find_entry('accepted', title='MinQTest.S01E01.1080p.XViD-FlexGet'), \ 'MinQTest.S01E01.1080p.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_max_quality(self, execute_task): task = execute_task('max_quality') assert task.find_entry('accepted', title='MaxQTest.S01E01.HDTV.XViD-FlexGet'), \ 'MaxQTest.S01E01.HDTV.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_min_max_quality(self, execute_task): task = execute_task('min_max_quality') assert task.find_entry('accepted', title='MinMaxQTest.S01E01.HR.XViD-FlexGet'), \ 'MinMaxQTest.S01E01.HR.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_max_unknown_quality(self, execute_task): task = execute_task('max_unknown_quality') assert len(task.accepted) == 1, 'should have accepted' def test_group_quality(self, execute_task): task = execute_task('quality_from_group') assert task.find_entry('accepted', title='GroupQual.S01E01.720p.XViD-FlexGet'), \ 'GroupQual.S01E01.720p.XViD-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one (no entries should pass for series `other`' def test_quality_in_series_name(self, execute_task): task = execute_task('quality_in_series_name') assert task.find_entry('accepted', title='my 720p show S01E01'), \ 'quality in title should not have been parsed' assert len(task.accepted) == 1, 'should not have accepted 720p entry' class TestDatabase(object): config = """ templates: global: parsing: series: {{parser}} series: - some series - progress tasks: test_1: mock: - {title: 'Some.Series.S01E20.720p.XViD-FlexGet'} test_2: mock: - {title: 'Some.Series.S01E20.720p.XViD-DoppelGanger'} progress_1: mock: - {title: 'Progress.S01E20.720p-FlexGet'} - {title: 'Progress.S01E20.HDTV-FlexGet'} progress_2: mock: - {title: 'Progress.S01E20.720p.Another-FlexGet'} - {title: 'Progress.S01E20.HDTV-Another-FlexGet'} """ def test_database(self, execute_task): task = execute_task('test_1') task = execute_task('test_2') assert task.find_entry('rejected', title='Some.Series.S01E20.720p.XViD-DoppelGanger'), \ 'failed basic download remembering' def test_doppelgangers(self, execute_task): task = execute_task('progress_1') assert task.find_entry('accepted', title='Progress.S01E20.720p-FlexGet'), \ 'best quality not accepted' # should not accept anything task = execute_task('progress_1') assert not task.accepted, 'repeated execution accepted' # introduce new doppelgangers task = execute_task('progress_2') assert not task.accepted, 'doppelgangers accepted' class TestFilterSeries(object): config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: 'Some.Series.S01E20.720p.XViD-FlexGet'} - {title: 'Another.Series.S01E20.720p.XViD-FlexGet'} - {title: 'Another.Series.S01E21.1080p.H264-FlexGet'} - {title: 'Date.Series.10-11-2008.XViD'} - {title: 'Date.Series.10.12.2008.XViD'} - {title: 'Date.Series.2008-10-13.XViD'} - {title: 'Date.Series.10.14.09.XViD'} - {title: 'Date Series 2010 11 17 XViD'} - {title: 'Useless title', filename: 'Filename.Series.S01E26.XViD'} - {title: 'Empty.Description.S01E22.XViD', description: ''} # test chaining regexp: reject: - 1080p series: - another series - date series - filename series - empty description - (some) series metainfo_series_override: metainfo_series: yes mock: - {title: 'Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet'} - {title: 'Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet'} series: - Test Series test_all_series_mode: mock: - {title: 'Test.Series.S01E02.PDTV.XViD-FlexGet'} - {title: 'Test Series - 1x03 - PDTV XViD-FlexGet'} - {title: 'Other.Show.S02E01.PDTV.XViD-FlexGet'} - {title: 'other show season 2 episode 2'} - {title: 'Date.Show.03-29-2012.HDTV.XViD-FlexGet'} all_series: yes test_alternate_name: mock: - title: The.Show.S01E01 - title: Other.Name.S01E02 - title: many.names.S01E01 - title: name.1.S01E02 - title: name.2.S01E03 - title: paren.title.2013.S01E01 series: - The Show: alternate_name: Other Name - many names: alternate_name: - name 1 - name 2 - paren title (US): alternate_name: paren title 2013 test_input_order_preserved: series: - Some Show """ def test_smoke(self, execute_task): task = execute_task('test') # normal passing assert task.find_entry(title='Another.Series.S01E20.720p.XViD-FlexGet'), \ 'Another.Series.S01E20.720p.XViD-FlexGet should have passed' # series with brackets assert task.find_entry('accepted', title='Some.Series.S01E20.720p.XViD-FlexGet'), \ 'Some.Series.S01E20.720p.XViD-FlexGet should have been accepted' # date formats df = ['Date.Series.10-11-2008.XViD', 'Date.Series.10.12.2008.XViD', 'Date Series 2010 11 17 XViD', 'Date.Series.2008-10-13.XViD', 'Date.Series.10.14.09.XViD'] for d in df: entry = task.find_entry(title=d) assert entry, 'Date format did not match %s' % d assert 'series_parser' in entry, 'series_parser missing from %s' % d assert entry['series_parser'].id_type == 'date', '%s did not return three groups for dates' % d # parse from filename assert task.find_entry(filename='Filename.Series.S01E26.XViD'), 'Filename parsing failed' # empty description assert task.find_entry(title='Empty.Description.S01E22.XViD'), 'Empty Description failed' # chaining with regexp plugin assert task.find_entry('rejected', title='Another.Series.S01E21.1080p.H264-FlexGet'), \ 'regexp chaining' def test_metainfo_series_override(self, execute_task): task = execute_task('metainfo_series_override') # Make sure the metainfo_series plugin is working first entry = task.find_entry('entries', title='Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet') assert entry['series_guessed'], 'series should have been guessed' assert entry['series_name'] == entry['series_parser'].name == 'Other Show With Extra Crap', \ 'metainfo_series is not running' # Make sure the good series data overrode metainfo data for the listed series entry = task.find_entry('accepted', title='Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet') assert not entry.get('series_guessed'), 'series plugin should override series_guessed' assert entry['series_name'] == entry['series_parser'].name == 'Test Series', \ 'Series name should be \'Test Series\', was: entry: %s, parser: %s' % ( entry['series_name'], entry['series_parser'].name) def test_all_series_mode(self, execute_task): task = execute_task('test_all_series_mode') assert task.find_entry('accepted', title='Test.Series.S01E02.PDTV.XViD-FlexGet') task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet') entry = task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet') assert entry assert entry.get('series_name') == 'Test Series' entry = task.find_entry('accepted', title='Other.Show.S02E01.PDTV.XViD-FlexGet') assert entry.get('series_guessed') entry2 = task.find_entry('accepted', title='other show season 2 episode 2') # Make sure case is normalized so series are marked with the same name no matter the case in the title assert entry.get('series_name') == entry2.get( 'series_name') == 'Other Show', 'Series names should be in title case' entry = task.find_entry('accepted', title='Date.Show.03-29-2012.HDTV.XViD-FlexGet') assert entry.get('series_guessed') assert entry.get('series_name') == 'Date Show' def test_alternate_name(self, execute_task): task = execute_task('test_alternate_name') assert all(e.accepted for e in task.all_entries), 'All releases should have matched a show' @pytest.mark.parametrize('reverse', [False, True]) def test_input_order_preserved(self, manager, execute_task, reverse): entries = [ Entry(title='Some Show S01E01 720p proper', url='http://a'), Entry(title='Some Show S01E01 1080p', url='http://b') ] if reverse: entries.reverse() task = execute_task('test_input_order_preserved', options={'inject': entries}) assert task.accepted[0] == entries[0], 'first entry should have been accepted' class TestEpisodeAdvancement(object): config = """ templates: global: parsing: series: {{parser}} tasks: test_backwards_1: mock: - {title: 'backwards s02e12'} - {title: 'backwards s02e10'} series: - backwards test_backwards_2: mock: - {title: 'backwards s02e01'} series: - backwards test_backwards_3: mock: - {title: 'backwards s01e01'} series: - backwards test_backwards_okay_1: mock: - {title: 'backwards s01e02'} series: - backwards: tracking: backfill test_backwards_okay_2: mock: - {title: 'backwards s01e03'} series: - backwards: tracking: no test_forwards_1: mock: - {title: 'forwards s01e01'} series: - forwards test_forwards_2: mock: - {title: 'forwards s02e01'} series: - forwards test_forwards_3: mock: - {title: 'forwards s03e01'} series: - forwards test_forwards_4: mock: - {title: 'forwards s04e02'} series: - forwards test_forwards_5: mock: - {title: 'forwards s05e01'} series: - forwards test_forwards_okay_1: mock: - {title: 'forwards s05e01'} series: - forwards: tracking: no test_unordered: mock: - {title: 'zzz s01e05'} - {title: 'zzz s01e06'} - {title: 'zzz s01e07'} - {title: 'zzz s01e08'} - {title: 'zzz s01e09'} - {title: 'zzz s01e10'} - {title: 'zzz s01e15'} - {title: 'zzz s01e14'} - {title: 'zzz s01e13'} - {title: 'zzz s01e12'} - {title: 'zzz s01e11'} - {title: 'zzz s01e01'} series: - zzz test_seq1: mock: - title: seq 05 series: - seq test_seq2: mock: - title: seq 06 series: - seq test_seq3: mock: - title: seq 10 series: - seq test_seq4: mock: - title: seq 01 series: - seq """ def test_backwards(self, execute_task): task = execute_task('test_backwards_1') assert task.find_entry('accepted', title='backwards s02e12'), \ 'backwards s02e12 should have been accepted' assert task.find_entry('accepted', title='backwards s02e10'), \ 'backwards s02e10 should have been accepted within grace margin' task = execute_task('test_backwards_2') assert task.find_entry('accepted', title='backwards s02e01'), \ 'backwards s02e01 should have been accepted, in current season' task = execute_task('test_backwards_3') assert task.find_entry('rejected', title='backwards s01e01'), \ 'backwards s01e01 should have been rejected, in previous season' task = execute_task('test_backwards_okay_1') assert task.find_entry('accepted', title='backwards s01e02'), \ 'backwards s01e01 should have been accepted, backfill enabled' task = execute_task('test_backwards_okay_2') assert task.find_entry('accepted', title='backwards s01e03'), \ 'backwards s01e01 should have been accepted, tracking off' def test_forwards(self, execute_task): task = execute_task('test_forwards_1') assert task.find_entry('accepted', title='forwards s01e01'), \ 'forwards s01e01 should have been accepted' task = execute_task('test_forwards_2') assert task.find_entry('accepted', title='forwards s02e01'), \ 'forwards s02e01 should have been accepted' task = execute_task('test_forwards_3') assert task.find_entry('accepted', title='forwards s03e01'), \ 'forwards s03e01 should have been accepted' task = execute_task('test_forwards_4') assert task.find_entry('rejected', title='forwards s04e02'), \ 'forwards s04e02 should have been rejected' task = execute_task('test_forwards_5') assert task.find_entry('rejected', title='forwards s05e01'), \ 'forwards s05e01 should have been rejected' task = execute_task('test_forwards_okay_1') assert task.find_entry('accepted', title='forwards s05e01'), \ 'forwards s05e01 should have been accepted with tracking turned off' def test_unordered(self, execute_task): task = execute_task('test_unordered') assert len(task.accepted) == 12, \ 'not everyone was accepted' def test_sequence(self, execute_task): # First should be accepted task = execute_task('test_seq1') entry = task.find_entry('accepted', title='seq 05') assert entry['series_id'] == 5 # Next in sequence should be accepted task = execute_task('test_seq2') entry = task.find_entry('accepted', title='seq 06') assert entry['series_id'] == 6 # Should be too far in the future task = execute_task('test_seq3') entry = task.find_entry(title='seq 10') assert entry not in task.accepted, 'Should have been too far in future' # Should be too far in the past task = execute_task('test_seq4') entry = task.find_entry(title='seq 01') assert entry not in task.accepted, 'Should have been too far in the past' class TestFilterSeriesPriority(object): config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: 'foobar 720p s01e01'} - {title: 'foobar hdtv s01e01'} regexp: reject: - 720p series: - foobar """ def test_priorities(self, execute_task): task = execute_task('test') assert task.find_entry('rejected', title='foobar 720p s01e01'), \ 'foobar 720p s01e01 should have been rejected' assert task.find_entry('accepted', title='foobar hdtv s01e01'), \ 'foobar hdtv s01e01 is not accepted' class TestPropers(object): config = """ templates: global: parsing: series: {{parser}} # prevents seen from rejecting on second execution, # we want to see that series is able to reject disable: builtins series: - test - foobar - asfd: quality: HR-1080p - V - tftest: propers: 3 hours - notest: propers: no tasks: propers_1: mock: - {title: 'Test.S01E01.720p-FlexGet'} # introduce proper, should be accepted propers_2: mock: - {title: 'Test.S01E01.720p.Proper-FlexGet'} # introduce non-proper, should not be downloaded propers_3: mock: - {title: 'Test.S01E01.FlexGet'} # introduce proper at the same time, should nuke non-proper and get proper proper_at_first: mock: - {title: 'Foobar.S01E01.720p.FlexGet'} - {title: 'Foobar.S01E01.720p.proper.FlexGet'} # test a lot of propers at once lot_propers: mock: - {title: 'V.2009.S01E01.PROPER.HDTV.A'} - {title: 'V.2009.S01E01.PROPER.HDTV.B'} - {title: 'V.2009.S01E01.PROPER.HDTV.C'} diff_quality_1: mock: - {title: 'Test.S01E02.720p-FlexGet'} # low quality proper, should not be accepted diff_quality_2: mock: - {title: 'Test.S01E02.HDTV.Proper-FlexGet'} # min + max quality with propers min_max_quality_1: mock: - {title: 'asfd.S01E01.720p-FlexGet'} min_max_quality_2: mock: - {title: 'asfd.S01E01.720p.Proper-FlexGet'} proper_timeframe_1: mock: - {title: 'TFTest.S01E01.720p-FlexGet'} proper_timeframe_2: mock: - {title: 'TFTest.S01E01.720p.proper-FlexGet'} no_propers_1: mock: - {title: 'NoTest.S01E01.720p-FlexGet'} no_propers_2: mock: - {title: 'NoTest.S01E01.720p.proper-FlexGet'} proper_upgrade_1: mock: - {title: 'Test.S02E01.hdtv.proper'} proper_upgrade_2: mock: - {title: 'Test.S02E01.hdtv.real.proper'} anime_proper_1: mock: - title: test 04v0 hdtv anime_proper_2: mock: - title: test 04 hdtv fastsub_proper_1: mock: - title: test s01e01 Fastsub hdtv fastsub_proper_2: mock: - title: test s01e01 Fastsub repack hdtv fastsub_proper_3: mock: - title: test s01e01 hdtv fastsub_proper_4: mock: - title: test s01e01 proper hdtv """ def test_propers_timeframe(self, execute_task): task = execute_task('proper_timeframe_1') assert task.find_entry('accepted', title='TFTest.S01E01.720p-FlexGet'), \ 'Did not accept before timeframe' # let 6 hours pass age_series(hours=6) task = execute_task('proper_timeframe_2') assert task.find_entry('rejected', title='TFTest.S01E01.720p.proper-FlexGet'), \ 'Did not reject after proper timeframe' def test_no_propers(self, execute_task): task = execute_task('no_propers_1') assert len(task.accepted) == 1, 'broken badly' task = execute_task('no_propers_2') assert len(task.rejected) == 1, 'accepted proper' def test_min_max_propers(self, execute_task): task = execute_task('min_max_quality_1') assert len(task.accepted) == 1, 'uhh, broken badly' task = execute_task('min_max_quality_2') assert len(task.accepted) == 1, 'should have accepted proper' def test_lot_propers(self, execute_task): task = execute_task('lot_propers') assert len(task.accepted) == 1, 'should have accepted (only) one of the propers' def test_diff_quality_propers(self, execute_task): task = execute_task('diff_quality_1') assert len(task.accepted) == 1 task = execute_task('diff_quality_2') assert len(task.accepted) == 0, 'should not have accepted lower quality proper' def test_propers(self, execute_task): # start with normal download ... task = execute_task('propers_1') assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \ 'Test.S01E01-FlexGet should have been accepted' # rejects downloaded task = execute_task('propers_1') assert task.find_entry('rejected', title='Test.S01E01.720p-FlexGet'), \ 'Test.S01E01-FlexGet should have been rejected' # accepts proper task = execute_task('propers_2') assert task.find_entry('accepted', title='Test.S01E01.720p.Proper-FlexGet'), \ 'new undownloaded proper should have been accepted' # reject downloaded proper task = execute_task('propers_2') assert task.find_entry('rejected', title='Test.S01E01.720p.Proper-FlexGet'), \ 'downloaded proper should have been rejected' # reject episode that has been downloaded normally and with proper task = execute_task('propers_3') assert task.find_entry('rejected', title='Test.S01E01.FlexGet'), \ 'Test.S01E01.FlexGet should have been rejected' def test_proper_available(self, execute_task): task = execute_task('proper_at_first') assert task.find_entry('accepted', title='Foobar.S01E01.720p.proper.FlexGet'), \ 'Foobar.S01E01.720p.proper.FlexGet should have been accepted' def test_proper_upgrade(self, execute_task): task = execute_task('proper_upgrade_1') assert task.find_entry('accepted', title='Test.S02E01.hdtv.proper') task = execute_task('proper_upgrade_2') assert task.find_entry('accepted', title='Test.S02E01.hdtv.real.proper') def test_anime_proper(self, execute_task): task = execute_task('anime_proper_1') assert task.accepted, 'ep should have accepted' task = execute_task('anime_proper_2') assert task.accepted, 'proper ep should have been accepted' def test_fastsub_proper(self, execute_task): task = execute_task('fastsub_proper_1') assert task.accepted, 'ep should have accepted' task = execute_task('fastsub_proper_2') assert task.accepted, 'proper ep should have been accepted' task = execute_task('fastsub_proper_3') assert task.accepted, 'proper ep should have been accepted' task = execute_task('fastsub_proper_4') assert task.accepted, 'proper ep should have been accepted' class TestSimilarNames(object): # hmm, not very good way to test this .. seriesparser should be tested alone? config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: 'FooBar.S03E01.DSR-FlexGet'} - {title: 'FooBar: FirstAlt.S02E01.DSR-FlexGet'} - {title: 'FooBar: SecondAlt.S01E01.DSR-FlexGet'} series: - FooBar - 'FooBar: FirstAlt' - 'FooBar: SecondAlt' test_ambiguous: mock: - title: Foo.2.2 series: - Foo: identified_by: sequence - Foo 2: identified_by: sequence """ def test_names(self, execute_task): task = execute_task('test') assert task.find_entry('accepted', title='FooBar.S03E01.DSR-FlexGet'), 'Standard failed?' assert task.find_entry('accepted', title='FooBar: FirstAlt.S02E01.DSR-FlexGet'), 'FirstAlt failed' assert task.find_entry('accepted', title='FooBar: SecondAlt.S01E01.DSR-FlexGet'), 'SecondAlt failed' def test_ambiguous(self, execute_task): task = execute_task('test_ambiguous') # In the event of ambiguous match, more specific one should be chosen assert task.find_entry('accepted', title='Foo.2.2')['series_name'] == 'Foo 2' class TestDuplicates(object): config = """ templates: global: parsing: series: {{parser}} # just cleans log a bit .. disable: - seen tasks: test_dupes: mock: - {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[FlexGet]'} - {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[ASDF]'} series: - Foo 2009 test_1: mock: - {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[FlexGet]'} - {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[ASDF]'} series: - foo bar test_2: mock: - {title: 'Foo.Bar.S02E04.XviD-2HD[ASDF]'} - {title: 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]'} - {title: 'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]'} - {title: 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]'} - {title: 'Foo.Bar.S02E03.HDTV.XviD-FlexGet'} - {title: 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY'} series: - foo bar test_true_dupes: mock: - {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'} - {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'} - {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'} series: - dupe """ def test_dupes(self, execute_task): task = execute_task('test_dupes') assert len(task.accepted) == 1, 'accepted both' def test_true_dupes(self, execute_task): task = execute_task('test_true_dupes') assert len(task.accepted) == 1, 'should have accepted (only) one' def test_downloaded(self, execute_task): task = execute_task('test_1') task = execute_task('test_2') # these should be accepted accepted = ['Foo.Bar.S02E03.HDTV.XviD-FlexGet', 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY'] for item in accepted: assert task.find_entry('accepted', title=item), \ '%s should have been accepted' % item # these should be rejected rejected = ['Foo.Bar.S02E04.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]', 'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]'] for item in rejected: assert task.find_entry('rejected', title=item), \ '%s should have been rejected' % item class TestQualities(object): config = """ templates: global: parsing: series: {{parser}} disable: builtins series: - FooBar: qualities: - SDTV - 720p - 1080p - FooBaz: upgrade: yes qualities: - hdtv - hr - 720p - FooBum: quality: 720p-1080i upgrade: yes - FooD: target: 720p timeframe: 0 hours upgrade: yes tasks: test_1: mock: - {title: 'FooBar.S01E01.PDTV-FlexGet'} - {title: 'FooBar.S01E01.1080p-FlexGet'} - {title: 'FooBar.S01E01.HR-FlexGet'} test_2: mock: - {title: 'FooBar.S01E01.720p-FlexGet'} propers_1: mock: - {title: 'FooBar.S01E02.720p-FlexGet'} propers_2: mock: - {title: 'FooBar.S01E02.720p.Proper-FlexGet'} upgrade_1: mock: - {title: 'FooBaz.S01E02.pdtv-FlexGet'} - {title: 'FooBaz.S01E02.HR-FlexGet'} upgrade_2: mock: - {title: 'FooBaz.S01E02.720p-FlexGet'} - {title: 'FooBaz.S01E02.1080p-FlexGet'} upgrade_3: mock: - {title: 'FooBaz.S01E02.hdtv-FlexGet'} - {title: 'FooBaz.S01E02.720p rc-FlexGet'} quality_upgrade_1: mock: - title: FooBum.S03E01.1080p # too high - title: FooBum.S03E01.hdtv # too low - title: FooBum.S03E01.720p # in range quality_upgrade_2: mock: - title: FooBum.S03E01.1080i # should be upgraded to - title: FooBum.S03E01.720p-ver2 # Duplicate ep target_1: mock: - title: Food.S06E11.hdtv target_2: mock: - title: Food.S06E11.1080p - title: Food.S06E11.720p """ def test_qualities(self, execute_task): task = execute_task('test_1') assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet'), \ 'Didn''t accept FooBar.S01E01.PDTV-FlexGet' assert task.find_entry('accepted', title='FooBar.S01E01.1080p-FlexGet'), \ 'Didn''t accept FooBar.S01E01.1080p-FlexGet' assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \ 'Accepted FooBar.S01E01.HR-FlexGet' task = execute_task('test_2') assert task.find_entry('accepted', title='FooBar.S01E01.720p-FlexGet'), \ 'Didn''t accept FooBar.S01E01.720p-FlexGet' # test that it rejects them afterwards task = execute_task('test_1') assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-FlexGet'), \ 'Didn\'t reject FooBar.S01E01.PDTV-FlexGet' assert task.find_entry('rejected', title='FooBar.S01E01.1080p-FlexGet'), \ 'Didn\'t reject FooBar.S01E01.1080p-FlexGet' assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \ 'Accepted FooBar.S01E01.HR-FlexGet' def test_propers(self, execute_task): task = execute_task('propers_1') assert task.accepted task = execute_task('propers_2') assert task.accepted, 'proper not accepted' task = execute_task('propers_2') assert not task.accepted, 'proper accepted again' def test_qualities_upgrade(self, execute_task): task = execute_task('upgrade_1') assert task.find_entry('accepted', title='FooBaz.S01E02.HR-FlexGet'), 'HR quality should be accepted' assert len(task.accepted) == 1, 'Only best quality should be accepted' task = execute_task('upgrade_2') assert task.find_entry('accepted', title='FooBaz.S01E02.720p-FlexGet'), '720p quality should be accepted' assert len(task.accepted) == 1, 'Only best quality should be accepted' task = execute_task('upgrade_3') assert not task.accepted, 'Should not have accepted worse qualities' def test_quality_upgrade(self, execute_task): task = execute_task('quality_upgrade_1') assert len(task.accepted) == 1, 'Only one ep should have passed quality filter' assert task.find_entry('accepted', title='FooBum.S03E01.720p') task = execute_task('quality_upgrade_2') assert len(task.accepted) == 1, 'one ep should be valid upgrade' assert task.find_entry('accepted', title='FooBum.S03E01.1080i') def test_target_upgrade(self, execute_task): task = execute_task('target_1') assert len(task.accepted) == 1, 'Only one ep should have been grabbed' assert task.find_entry('accepted', title='Food.S06E11.hdtv') task = execute_task('target_2') assert len(task.accepted) == 1, 'one ep should be valid upgrade' assert task.find_entry('accepted', title='Food.S06E11.720p'), 'Should upgrade to `target`' class TestIdioticNumbering(object): config = """ templates: global: parsing: series: {{parser}} series: - FooBar: identified_by: ep tasks: test_1: mock: - {title: 'FooBar.S01E01.PDTV-FlexGet'} test_2: mock: - {title: 'FooBar.102.PDTV-FlexGet'} """ def test_idiotic(self, execute_task): task = execute_task('test_1') task = execute_task('test_2') entry = task.find_entry(title='FooBar.102.PDTV-FlexGet') assert entry, 'entry not found?' assert entry['series_season'] == 1, 'season not detected' assert entry['series_episode'] == 2, 'episode not detected' class TestNormalization(object): config = """ templates: global: parsing: series: {{parser}} disable: [seen] tasks: test_1: mock: - {title: 'FooBar.S01E01.PDTV-FlexGet'} series: - FOOBAR test_2: mock: - {title: 'FooBar.S01E01.PDTV-aoeu'} series: - foobar test_3: mock: - title: Foo bar & co 2012.s01e01.sdtv.a series: - foo bar & co 2012 test_4: mock: - title: Foo bar & co 2012.s01e01.sdtv.b series: - Foo/Bar and Co. (2012) """ def test_capitalization(self, execute_task): task = execute_task('test_1') assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet') task = execute_task('test_2') assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-aoeu') def test_normalization(self, execute_task): task = execute_task('test_3') assert task.find_entry('accepted', title='Foo bar & co 2012.s01e01.sdtv.a') task = execute_task('test_4') assert task.find_entry('rejected', title='Foo bar & co 2012.s01e01.sdtv.b') class TestMixedNumbering(object): config = """ templates: global: parsing: series: {{parser}} series: - FooBar: identified_by: ep tasks: test_1: mock: - {title: 'FooBar.S03E07.PDTV-FlexGet'} test_2: mock: - {title: 'FooBar.0307.PDTV-FlexGet'} """ def test_mixednumbering(self, execute_task): task = execute_task('test_1') assert task.find_entry('accepted', title='FooBar.S03E07.PDTV-FlexGet') task = execute_task('test_2') assert task.find_entry('rejected', title='FooBar.0307.PDTV-FlexGet') class TestExact(object): config = """ templates: global: parsing: series: {{parser}} tasks: auto: mock: - {title: 'ABC.MIAMI.S01E01.PDTV-FlexGet'} - {title: 'ABC.S01E01.PDTV-FlexGet'} - {title: 'ABC.LA.S01E01.PDTV-FlexGet'} series: - ABC - ABC LA - ABC Miami name_regexp: mock: - title: show s09e05 hdtv - title: show a s09e06 hdtv series: - show: name_regexp: ^show exact: yes date: mock: - title: date show 04.01.2011 hdtv - title: date show b 04.02.2011 hdtv series: - date show: exact: yes """ def test_auto(self, execute_task): task = execute_task('auto') assert task.find_entry('accepted', title='ABC.S01E01.PDTV-FlexGet') assert task.find_entry('accepted', title='ABC.LA.S01E01.PDTV-FlexGet') assert task.find_entry('accepted', title='ABC.MIAMI.S01E01.PDTV-FlexGet') def test_with_name_regexp(self, execute_task): task = execute_task('name_regexp') assert task.find_entry('accepted', title='show s09e05 hdtv') assert not task.find_entry('accepted', title='show a s09e06 hdtv') def test_dated_show(self, execute_task): task = execute_task('date') assert task.find_entry('accepted', title='date show 04.01.2011 hdtv') assert not task.find_entry('accepted', title='date show b 04.02.2011 hdtv') class TestTimeframe(object): config = """ templates: global: parsing: series: {{parser}} series: - test: timeframe: 5 hours target: 720p tasks: test_no_waiting: mock: - {title: 'Test.S01E01.720p-FlexGet'} test_stop_waiting_1: mock: - {title: 'Test.S01E02.HDTV-FlexGet'} test_stop_waiting_2: mock: - {title: 'Test.S01E02.720p-FlexGet'} test_proper_afterwards: mock: - {title: 'Test.S01E02.720p.Proper-FlexGet'} test_expires: mock: - {title: 'Test.S01E03.pdtv-FlexGet'} test_min_max_fail: series: - mm test: timeframe: 5 hours target: 720p quality: hdtv+ <=720p mock: - {title: 'MM Test.S01E02.pdtv-FlexGet'} - {title: 'MM Test.S01E02.1080p-FlexGet'} test_min_max_pass: series: - mm test: timeframe: 5 hours target: 720p quality: hdtv+ <=720p mock: - {title: 'MM Test.S01E02.pdtv-FlexGet'} - {title: 'MM Test.S01E02.hdtv-FlexGet'} - {title: 'MM Test.S01E02.1080p-FlexGet'} test_qualities_fail: series: - q test: timeframe: 5 hours qualities: - hdtv - 1080p mock: - {title: 'Q Test.S01E02.pdtv-FlexGet'} - {title: 'Q Test.S01E02.1080p-FlexGet'} test_qualities_pass: series: - q test: timeframe: 5 hours qualities: - sdtv - 720p mock: - {title: 'Q Test.S01E02.1080p-FlexGet'} test_with_quality_1: series: - q test: timeframe: 5 hours quality: hdtv+ target: 720p mock: - title: q test s01e01 pdtv 720p test_with_quality_2: series: - q test: timeframe: 5 hours quality: hdtv+ target: 720p mock: - title: q test s01e01 hdtv """ def test_no_waiting(self, execute_task): task = execute_task('test_no_waiting') assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \ '720p not accepted immediattely' def test_stop_waiting(self, execute_task): task = execute_task('test_stop_waiting_1') assert task.entries and not task.accepted task = execute_task('test_stop_waiting_2') assert task.find_entry('accepted', title='Test.S01E02.720p-FlexGet'), \ '720p should have caused stop waiting' task = execute_task('test_proper_afterwards') assert task.find_entry('accepted', title='Test.S01E02.720p.Proper-FlexGet'), \ 'proper should have been accepted' def test_expires(self, execute_task): # first execution should not accept anything task = execute_task('test_expires') assert not task.accepted # let 3 hours pass age_series(hours=3) task = execute_task('test_expires') assert not task.accepted, 'expired too soon' # let another 3 hours pass, should expire now! age_series(hours=6) task = execute_task('test_expires') assert task.accepted, 'timeframe didn\'t expire' def test_min_max_fail(self, execute_task): task = execute_task('test_min_max_fail') assert not task.accepted age_series(hours=6) task = execute_task('test_min_max_fail') assert task.entries and not task.accepted def test_min_max_pass(self, execute_task): task = execute_task('test_min_max_pass') assert not task.accepted # Let 6 hours pass, timeframe should expire and accept hdtv copy age_series(hours=6) task = execute_task('test_min_max_pass') assert task.find_entry('accepted', title='MM Test.S01E02.hdtv-FlexGet') assert len(task.accepted) == 1 def test_qualities_fail(self, execute_task): task = execute_task('test_qualities_fail') assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet'), \ 'should have accepted wanted quality' assert len(task.accepted) == 1 # Let 6 hours pass, timeframe should not even been started, as we already have one of our qualities age_series(hours=6) task = execute_task('test_qualities_fail') assert task.entries and not task.accepted def test_qualities_pass(self, execute_task): task = execute_task('test_qualities_pass') assert not task.accepted, 'None of the qualities should have matched' # Let 6 hours pass, timeframe should expire and accept 1080p copy age_series(hours=6) task = execute_task('test_qualities_pass') assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet') assert len(task.accepted) == 1 def test_with_quality(self, execute_task): task = execute_task('test_with_quality_1') assert not task.accepted, 'Entry does not pass quality' age_series(hours=6) # Entry from first test feed should not pass quality task = execute_task('test_with_quality_1') assert not task.accepted, 'Entry does not pass quality' # Timeframe should not yet have started task = execute_task('test_with_quality_2') assert not task.accepted, 'Timeframe should not yet have passed' age_series(hours=6) task = execute_task('test_with_quality_2') assert task.accepted, 'Timeframe should have passed' class TestBacklog(object): config = """ templates: global: parsing: series: {{parser}} tasks: backlog: mock: - {title: 'Test.S01E01.hdtv-FlexGet'} series: - test: {timeframe: 6 hours} """ def testBacklog(self, manager, execute_task): task = execute_task('backlog') assert task.entries and not task.accepted, 'no entries at the start' # simulate test going away from the task del (manager.config['tasks']['backlog']['mock']) age_series(hours=12) task = execute_task('backlog') assert task.accepted, 'backlog is not injecting episodes' class TestManipulate(object): config = """ templates: global: parsing: series: {{parser}} tasks: test_1: mock: - {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'} series: - test test_2: mock: - {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'} series: - test manipulate: - title: extract: '^PREFIX: (.*)' """ def testManipulate(self, execute_task): # should not work with the prefix task = execute_task('test_1') assert not task.accepted, 'series accepted even with prefix?' assert not task.accepted, 'series rejecte even with prefix?' task = execute_task('test_2') assert task.accepted, 'manipulate failed to pre-clean title' class TestFromGroup(object): config = """ templates: global: parsing: series: {{parser}} tasks: test: mock: - {title: '[Ignored] Test 12'} - {title: '[FlexGet] Test 12'} - {title: 'Test.13.HDTV-Ignored'} - {title: 'Test.13.HDTV-FlexGet'} - {title: 'Test.14.HDTV-Name'} - {title: 'Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3'} - {title: 'Test :: h264 10-bit | Softsubs (Ignore) | Episode 3'} series: - test: {from_group: [Name, FlexGet]} """ def test_from_group(self, execute_task): task = execute_task('test') assert task.find_entry('accepted', title='[FlexGet] Test 12') assert task.find_entry('accepted', title='Test.13.HDTV-FlexGet') assert task.find_entry('accepted', title='Test.14.HDTV-Name') assert task.find_entry('accepted', title='Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3') class TestBegin(object): config = """ templates: global: parsing: series: {{parser}} eps: mock: - {title: 'WTest.S02E03.HDTV.XViD-FlexGet'} - {title: 'W2Test.S02E03.HDTV.XViD-FlexGet'} tasks: season_id_test: template: eps series: - WTest: begin: S02 - W2Test: begin: S03 before_ep_test: template: eps series: - WTest: begin: S02E05 - W2Test: begin: S03E02 after_ep_test: template: eps series: - WTest: begin: S02E03 - W2Test: begin: S02E01 before_seq_test: mock: - title: WTest.1.HDTV.XViD-FlexGet - title: W2Test.13.HDTV.XViD-FlexGet series: - WTest: begin: 2 - W2Test: begin: 120 after_seq_test: mock: - title: WTest.2.HDTV.XViD-FlexGet - title: W2Test.123.HDTV.XViD-FlexGet series: - WTest: begin: 2 - W2Test: begin: 120 before_date_test: mock: - title: WTest.2001.6.6.HDTV.XViD-FlexGet - title: W2Test.12.30.2012.HDTV.XViD-FlexGet series: - WTest: begin: '2009-05-05' - W2Test: begin: '2012-12-31' after_date_test: mock: - title: WTest.2009.5.5.HDTV.XViD-FlexGet - title: W2Test.1.1.2013.HDTV.XViD-FlexGet series: - WTest: begin: '2009-05-05' - W2Test: begin: '2012-12-31' test_advancement1: mock: - title: WTest.S01E01 series: - WTest test_advancement2: mock: - title: WTest.S03E01 series: - WTest test_advancement3: mock: - title: WTest.S03E01 series: - WTest: begin: S03E01 """ def test_season_id(self, execute_task): task = execute_task('season_id_test') assert task.find_entry('accepted', title='WTest.S02E03.HDTV.XViD-FlexGet'), \ 'Entry should have been accepted, it\'s after the begin episode' assert task.find_entry('rejected', title='W2Test.S02E03.HDTV.XViD-FlexGet'), \ 'Entry should have been rejected, it\'s before the begin episode' def test_before_ep(self, execute_task): task = execute_task('before_ep_test') assert not task.accepted, 'No entries should have been accepted, they are before the begin episode' def test_after_ep(self, execute_task): task = execute_task('after_ep_test') assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode' def test_before_seq(self, execute_task): task = execute_task('before_seq_test') assert not task.accepted, 'No entries should have been accepted, they are before the begin episode' def test_after_seq(self, execute_task): task = execute_task('after_seq_test') assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode' def test_before_date(self, execute_task): task = execute_task('before_date_test') assert not task.accepted, 'No entries should have been accepted, they are before the begin episode' def test_after_date(self, execute_task): task = execute_task('after_date_test') assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode' def test_advancement(self, execute_task): # Put S01E01 into the database as latest download task = execute_task('test_advancement1') assert task.accepted # Just verify regular ep advancement would block S03E01 task = execute_task('test_advancement2') assert not task.accepted, 'Episode advancement should have blocked' # Make sure ep advancement doesn't block it when we've set begin to that ep task = execute_task('test_advancement3') assert task.accepted, 'Episode should have been accepted' class TestSeriesPremiere(object): config = """ templates: global: parsing: series: {{parser}} metainfo_series: yes series_premiere: yes tasks: test: mock: - {title: 'Foobar.S01E01.PDTV-FlexGet'} - {title: 'Foobar.S01E11.1080p-FlexGet'} - {title: 'Foobar.S02E02.HR-FlexGet'} """ def testOnlyPremieres(self, execute_task): task = execute_task('test') assert task.find_entry('accepted', title='Foobar.S01E01.PDTV-FlexGet', series_name='Foobar', series_season=1, series_episode=1), 'Series premiere should have been accepted' assert len(task.accepted) == 1 # TODO: Add more tests, test interaction with series plugin and series_exists class TestImportSeries(object): config = """ templates: global: parsing: series: {{parser}} tasks: timeframe_max: configure_series: settings: propers: 12 hours target: 720p timeframe: 5 minutes quality: "<=720p <=bluray" from: mock: - title: the show mock: - title: the show s03e02 1080p bluray - title: the show s03e02 hdtv test_import_altnames: configure_series: from: mock: - {title: 'the show', configure_series_alternate_name: 'le show'} mock: - title: le show s03e03 """ def test_timeframe_max(self, execute_task): task = execute_task('timeframe_max') assert not task.accepted, 'Entry shouldnot have been accepted on first run.' age_series(minutes=6) task = execute_task('timeframe_max') assert task.find_entry('accepted', title='the show s03e02 hdtv'), \ 'hdtv should have been accepted after timeframe.' def test_import_altnames(self, execute_task): task = execute_task('test_import_altnames') entry = task.find_entry(title='le show s03e03') assert entry.accepted, 'entry matching series alternate name should have been accepted.' assert entry['series_name'] == 'the show', 'entry series should be set to the main name' class TestIDTypes(object): config = """ templates: global: parsing: series: {{parser}} tasks: all_types: series: - episode - seasonless episode - date - sequence - stupid id: id_regexp: (\\dcat) mock: - title: episode S03E04 - title: episode 3x05 - title: date 2011.4.3 other crap hdtv - title: date 4.5.11 - title: sequence 003 - title: sequence 4 - title: stupid id 3cat - title: seasonless episode e01 """ def test_id_types(self, execute_task): task = execute_task('all_types') for entry in task.entries: assert entry['series_name'], '%s not parsed by series plugin' % entry['title'] assert entry['series_id_type'] in entry['series_name'] class TestCaseChange(object): config = """ templates: global: parsing: series: {{parser}} tasks: first: mock: - title: theshow s02e04 series: - TheShow second: mock: - title: thEshoW s02e04 other series: - THESHOW """ def test_case_change(self, execute_task): task = execute_task('first') # Make sure series_name uses case from config, make sure episode is accepted assert task.find_entry('accepted', title='theshow s02e04', series_name='TheShow') task = execute_task('second') # Make sure series_name uses new case from config, make sure ep is rejected because we have a copy assert task.find_entry('rejected', title='thEshoW s02e04 other', series_name='THESHOW') class TestInvalidSeries(object): config = """ templates: global: parsing: series: {{parser}} tasks: blank: mock: - title: whatever series: - '': quality: 720p """ def test_blank_series(self, execute_task): task = execute_task('blank') assert not task.aborted, 'Task should not have aborted' class TestDoubleEps(object): config = """ templates: global: parsing: series: {{parser}} tasks: test_double1: mock: - title: double S01E02-E03 series: - double test_double2: mock: - title: double S01E03 series: - double """ def test_double(self, execute_task): # First should be accepted task = execute_task('test_double1') assert task.find_entry('accepted', title='double S01E02-E03') # We already got ep 3 as part of double, should not be accepted task = execute_task('test_double2') assert not task.find_entry('accepted', title='double S01E03') class TestAutoLockin(object): config = """ templates: global: parsing: series: {{parser}} series: - FooBar - BarFood tasks: try_date_1: mock: - title: FooBar 2012-10-10 HDTV lock_ep: mock: - title: FooBar S01E01 HDTV - title: FooBar S01E02 HDTV - title: FooBar S01E03 HDTV try_date_2: mock: - title: FooBar 2012-10-11 HDTV test_special_lock: mock: - title: BarFood christmas special HDTV - title: BarFood easter special HDTV - title: BarFood haloween special HDTV - title: BarFood bad special HDTV try_reg: mock: - title: BarFood S01E01 HDTV - title: BarFood 2012-9-9 HDTV """ def test_ep_lockin(self, execute_task): task = execute_task('try_date_1') assert task.find_entry('accepted', title='FooBar 2012-10-10 HDTV'), \ 'dates should be accepted before locked in on an identifier type' task = execute_task('lock_ep') assert len(task.accepted) == 3, 'All ep mode episodes should have been accepted' task = execute_task('try_date_2') assert not task.find_entry('accepted', title='FooBar 2012-10-11 HDTV'), \ 'dates should not be accepted after series has locked in to ep mode' def test_special_lock(self, execute_task): task = execute_task('test_special_lock') assert len(task.accepted) == 4, 'All specials should have been accepted' task = execute_task('try_reg') assert len(task.accepted) == 2, 'Specials should not have caused episode type lock-in' class TestReruns(object): config = """ templates: global: parsing: series: {{parser}} tasks: one_accept: mock: - title: the show s01e01 - title: the show s01e01 different series: - the show rerun: 2 mock_output: yes """ def test_one_accept(self, execute_task): task = execute_task('one_accept') assert len(task.mock_output) == 1, \ 'should have accepted once!: %s' % ', '.join(e['title'] for e in task.mock_output) class TestSpecials(object): config = """ templates: global: parsing: series: {{parser}} tasks: preferspecials: mock: - title: the show s03e04 special series: - the show: prefer_specials: True nopreferspecials: mock: - title: the show s03e05 special series: - the show: prefer_specials: False assumespecial: mock: - title: the show SOMETHING series: - the show: assume_special: True noassumespecial: mock: - title: the show SOMETHING series: - the show: assume_special: False special_looks_like_season_pack: mock: - title: Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget series: - Doctor Who """ def test_prefer_specials(self, execute_task): # Test that an entry matching both ep and special is flagged as a special when prefer_specials is True task = execute_task('preferspecials') entry = task.find_entry('accepted', title='the show s03e04 special') assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged a special was not.' def test_not_prefer_specials(self, execute_task): # Test that an entry matching both ep and special is flagged as an ep when prefer_specials is False task = execute_task('nopreferspecials') entry = task.find_entry('accepted', title='the show s03e05 special') assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged a special was.' def test_assume_special(self, execute_task): # Test that an entry with no ID found gets flagged as a special and accepted if assume_special is True task = execute_task('assumespecial') entry = task.find_entry(title='the show SOMETHING') assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged as a special was not.' assert entry.accepted, 'Entry which should have been accepted was not.' def test_not_assume_special(self, execute_task): # Test that an entry with no ID found does not get flagged as a special and accepted if assume_special is False task = execute_task('noassumespecial') entry = task.find_entry(title='the show SOMETHING') assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged as a special was.' assert not entry.accepted, 'Entry which should not have been accepted was.' def test_special_looks_like_a_season_pack(self, execute_task): task = execute_task('special_looks_like_season_pack') entry = task.find_entry(title='Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget') assert entry.get('series_id_type') == 'special', 'Entry should have been flagged as a special' assert not entry['season_pack'], 'Entry should not have been flagged as a season pack' assert entry.accepted, 'Entry which should not have been accepted was.' class TestAlternateNames(object): config = """ templates: global: parsing: series: {{parser}} tasks: alternate_name: series: - Some Show: begin: S01E01 alternate_name: Other Show another_alternate_name: series: - Some Show: alternate_name: Good Show set_other_alternate_name: mock: - title: Third.Show.S01E01 - title: Other.Show.S01E01 series: - Some Show: alternate_name: Third Show rerun: 0 duplicate_names_in_different_series: series: - First Show: begin: S01E01 alternate_name: Third Show - Second Show: begin: S01E01 alternate_name: Third Show """ def test_set_alternate_name(self, execute_task): # Tests that old alternate names are not kept in the database. task = execute_task('alternate_name') task = execute_task('set_other_alternate_name') assert task.find_entry('accepted', title='Third.Show.S01E01'), \ 'A new alternate name should have been associated with the series.' assert task.find_entry('undecided', title='Other.Show.S01E01'), \ 'The old alternate name for the series is still present.' def test_duplicate_alternate_names_in_different_series(self, execute_task): with pytest.raises(TaskAbort) as ex: execute_task('duplicate_names_in_different_series') # only test that the reason is about alternate names, not which names. reason = 'Error adding alternate name' assert ex.value.reason[:27] == reason, \ 'Wrong reason for task abortion. Should be about duplicate alternate names.' # Test the DB behaves like we expect ie. alternate names cannot def test_alternate_names_are_removed_from_db(self, execute_task): from flexget.manager import Session with Session() as session: execute_task('alternate_name') # test the current state of alternate names assert len(session.query(db.AlternateNames).all()) == 1, 'There should be one alternate name present.' assert session.query(db.AlternateNames).first().alt_name == 'Other Show', \ 'Alternate name should have been Other Show.' # run another task that overwrites the alternate names execute_task('another_alternate_name') assert len(session.query(db.AlternateNames).all()) == 1, \ 'The old alternate name should have been removed from the database.' assert session.query(db.AlternateNames).first().alt_name == 'Good Show', \ 'The alternate name in the database should be the new one, Good Show.' class TestCLI(object): config = """ templates: global: parsing: series: {{parser}} tasks: learn_series: series: - Some Show - Other Show mock: - title: Some Series S01E01 - title: Other Series S01E02 """ def test_series_list(self, manager, execute_task): execute_task('learn_series') options = get_parser().parse_args(['series', 'list', '--porcelain']) buffer = StringIO() with capture_output(buffer, loglevel='error'): manager.handle_cli(options=options) lines = buffer.getvalue().split('\n') assert all(any(line.lstrip().startswith(series) for line in lines) for series in ['Some Show', 'Other Show']) class TestSeriesRemove(object): config = """ templates: global: parsing: series: {{parser}} tasks: get_episode: seen: local series: - My Show mock: - title: My Show S01E01 1080p - title: My Show S01E01 720p remove_episode: seen: no mock: - title: My Show S01E01 series_name: My Show series_id: S01E01 accept_all: yes series_remove: yes """ def test_remove_episode(self, execute_task): task = execute_task('get_episode') assert len(task.accepted) == 1 first_rls = task.accepted[0] task = execute_task('get_episode') assert not task.accepted, 'series plugin duplicate blocking not working?' task = execute_task('remove_episode') task = execute_task('get_episode') assert len(task.accepted) == 1, 'new release not accepted after forgetting ep' assert task.accepted[0] != first_rls, 'same release accepted on second run' class TestSeriesSeasonPack(object): _config = """ templates: global: parsing: series: internal series: - foo: season_packs: yes - bar: season_packs: yes tracking: backfill - baz: season_packs: 3 - boo: season_packs: always - bla: season_packs: only - bro: season_packs: threshold: 1 reject_eps: yes tasks: multiple_formats: mock: - title: foo.s01.720p-flexget - title: foo.2xALL.720p-flexget foo_s01: mock: - title: foo.s01.720p-flexget foo_s02: mock: - title: foo.s02.720p-flexget foo_s03: mock: - title: foo.s03.720p-flexget foo_s01ep1: mock: - title: foo.s01e1.720p-flexget foo_s02ep1: mock: - title: foo.s02e1.720p-flexget season_pack_priority: mock: - title: foo.s01e1.720p-flexget - title: foo.s01e2.720p-flexget - title: foo.s01e3.720p-flexget - title: foo.s01e4.720p-flexget - title: foo.s01e5.720p-flexget - title: foo.s01.720p-flexget respect_begin: series: - bar: begin: s02e01 season_packs: yes mock: - title: bar.s01.720p-flexget - title: bar.s02.720p-flexget several_seasons: mock: - title: foo.s03.720p-flexget - title: foo.s07.720p-flexget - title: foo.s03.1080p-flexget - title: foo.s06.720p-flexget - title: foo.s09.720p-flexget test_backfill_1: mock: - title: bar.s03.720p-flexget test_backfill_2: mock: - title: bar.s02.720p-flexget test_backfill_3: mock: - title: bar.s03e01.720p-flexget test_backfill_4: mock: - title: bar.s02e01.1080p-flexget test_specific_season_pack_threshold_1: mock: - title: baz.s01e01.720p-flexget - title: baz.s01e02.720p-flexget - title: baz.s01e03.720p-flexget test_specific_season_pack_threshold_2: mock: - title: baz.s01.720p-flexget test_specific_season_pack_threshold_3: mock: - title: baz.s01e01.720p-flexget - title: baz.s01e02.720p-flexget - title: baz.s01e03.720p-flexget - title: baz.s01e04.720p-flexget test_always_get_season_pack_1: mock: - title: boo.s01e01.720p-flexget - title: boo.s01e02.720p-flexget - title: boo.s01e03.720p-flexget - title: boo.s01e04.720p-flexget test_always_get_season_pack_2: mock: - title: boo.s01.720p-flexget test_only_get_season_packs: mock: - title: bla.s01.720p-flexget - title: bla.s02e01.720p-flexget test_proper_season_pack: mock: - title: foo.s01.720p-flexget - title: foo.s01.720p.proper-flexget test_proper_season_pack_2: mock: - title: foo.s01.720p-flexget test_proper_season_pack_3: mock: - title: foo.s01.720p.proper-flexget test_all_series: mock: - title: show.name.s01.720p.HDTV-Group all_series: season_packs: yes test_with_dict_config_1: mock: - title: bro.s01e01.720p.HDTV-Flexget - title: bro.s01.720p.HDTV-Flexget test_with_dict_config_2: mock: - title: bro.s02.720p.HDTV-Flexget """ @pytest.fixture() def config(self): return self._config def test_season_pack_simple(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 def test_basic_tracking(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 task = execute_task('foo_s01ep1') assert len(task.accepted) == 0 task = execute_task('foo_s02ep1') assert len(task.accepted) == 1 def test_season_pack_takes_priority(self, execute_task): task = execute_task('season_pack_priority') assert len(task.accepted) == 1 entry = task.find_entry(title='foo.s01.720p-flexget') assert entry.accepted def test_respect_begin(self, execute_task): task = execute_task('respect_begin') assert len(task.accepted) == 1 entry = task.find_entry(title='bar.s02.720p-flexget') assert entry.accepted def test_tracking_rules_old_eps(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 task = execute_task('foo_s02') assert len(task.accepted) == 1 task = execute_task('foo_s01ep1') assert not task.accepted def test_tracking_rules_old_season(self, execute_task): task = execute_task('foo_s02') assert len(task.accepted) == 1 task = execute_task('foo_s01') assert not task.accepted def test_tracking_rules_new_season(self, execute_task): task = execute_task('foo_s01') assert len(task.accepted) == 1 task = execute_task('foo_s03') assert not task.accepted def test_several_seasons(self, execute_task): task = execute_task('several_seasons') assert len(task.accepted) == 4 def test_multiple_formats(self, execute_task): task = execute_task('multiple_formats') assert len(task.accepted) == 2 def test_backfill(self, execute_task): task = execute_task('test_backfill_1') assert len(task.accepted) == 1 task = execute_task('test_backfill_2') assert len(task.accepted) == 1 task = execute_task('test_backfill_3') assert not task.accepted task = execute_task('test_backfill_4') assert not task.accepted def test_default_threshold(self, execute_task): task = execute_task('foo_s01ep1') assert len(task.accepted) == 1 task = execute_task('foo_s01') assert len(task.accepted) == 0 def test_specific_season_pack_threshold_positive(self, execute_task): task = execute_task('test_specific_season_pack_threshold_1') assert len(task.accepted) == 3 task = execute_task('test_specific_season_pack_threshold_2') assert len(task.accepted) == 1 def test_specific_season_pack_threshold_negative(self, execute_task): task = execute_task('test_specific_season_pack_threshold_3') assert len(task.accepted) == 4 task = execute_task('test_specific_season_pack_threshold_2') assert not task.accepted def test_loose_threshold(self, execute_task): task = execute_task('test_always_get_season_pack_1') assert len(task.accepted) == 4 task = execute_task('test_always_get_season_pack_2') assert len(task.accepted) == 1 def test_exclusive(self, execute_task): task = execute_task('test_only_get_season_packs') assert len(task.accepted) == 1 entry = task.find_entry(title='bla.s01.720p-flexget') assert entry.accepted def test_proper_season_pack(self, execute_task): task = execute_task('test_proper_season_pack') assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget') def test_proper_season_pack_2(self, execute_task): task = execute_task('test_proper_season_pack_2') assert task.find_entry('accepted', title='foo.s01.720p-flexget') task = execute_task('test_proper_season_pack_3') assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget') def test_all_series(self, execute_task): task = execute_task('test_all_series') assert task.find_entry('accepted', title='show.name.s01.720p.HDTV-Group') def test_advanced_config(self, execute_task): task = execute_task('test_with_dict_config_1') assert not task.find_entry('accepted', title='bro.s01e01.720p.HDTV-Flexget') assert task.find_entry('accepted', title='bro.s01.720p.HDTV-Flexget') execute_task('test_with_dict_config_2', options={'inject': [Entry(title='bro.s02e01.720p.HDTV-Flexget', url='')], 'immortal': True}) task = execute_task('test_with_dict_config_2') assert task.find_entry('accepted', title='bro.s02.720p.HDTV-Flexget') class TestSeriesDDAudio(object): _config = """ templates: global: parsing: series: internal tasks: min_quality: mock: - {title: 'MinQATest.S01E01.720p.XViD.DD5.1-FlexGet'} - {title: 'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'} series: - MinQATest: quality: ">dd5.1" max_quality: mock: - {title: 'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'} - {title: 'MaxQATest.S01E01.720p.XViD.DD+5.1-FlexGet'} series: - MaxQATest: quality: "<=dd5.1" test_channels: mock: - {title: 'Channels.S01E01.1080p.HDTV.DD+2.0-FlexGet'} - {title: 'Channels.S01E01.1080p.HDTV.DD+5.1-FlexGet'} - {title: 'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'} series: - Channels: quality: dd+5.1 """ @pytest.fixture() def config(self): return self._config def test_min_quality(self, execute_task): task = execute_task('min_quality') assert task.find_entry('accepted', title='MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'), \ 'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only two' def test_max_quality(self, execute_task): task = execute_task('max_quality') assert task.find_entry('accepted', title='MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'), \ 'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one' def test_channels(self, execute_task): task = execute_task('test_channels') assert task.find_entry(title='Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'), \ 'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet should have been accepted' assert len(task.accepted) == 1, 'should have accepted only one'
true
true
f70343e6e7ddd0d8985779f9174451404e9e56fc
1,162
py
Python
test/moduletest/CloudTestModule.py
noralsydmp/icetea
b486cdc8e0d2211e118f1f8211aa4d284ca02422
[ "Apache-2.0" ]
6
2018-08-10T17:11:10.000Z
2020-04-29T07:05:36.000Z
test/moduletest/CloudTestModule.py
noralsydmp/icetea
b486cdc8e0d2211e118f1f8211aa4d284ca02422
[ "Apache-2.0" ]
58
2018-08-13T08:36:08.000Z
2021-07-07T08:32:52.000Z
test/moduletest/CloudTestModule.py
noralsydmp/icetea
b486cdc8e0d2211e118f1f8211aa4d284ca02422
[ "Apache-2.0" ]
7
2018-08-10T12:53:18.000Z
2021-11-08T05:15:42.000Z
#!/usr/bin/env python """ Copyright 2017 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # pylint: disable=unused-argument import mock def create(*args, **kwargs): """ Create a Mock object that imitates a valid Cloud module. :param args: Not used :param kwargs: Not used :return: mock.MagicMock """ attrs = {"client.get_suite.return_value": True, "get_campaign_id.side_effect": [True, KeyError], "get_campaigns.return_value": True, "update_testcase.return_value": True, "upload_results.side_effect": [True, False]} mock_module = mock.MagicMock() mock_module.configure_mock(**attrs) return mock_module
34.176471
100
0.729776
import mock def create(*args, **kwargs): attrs = {"client.get_suite.return_value": True, "get_campaign_id.side_effect": [True, KeyError], "get_campaigns.return_value": True, "update_testcase.return_value": True, "upload_results.side_effect": [True, False]} mock_module = mock.MagicMock() mock_module.configure_mock(**attrs) return mock_module
true
true
f703448814e4eb2abf9bb8db939e2be59c31a6c8
8,765
py
Python
tools/deletebyquery/deletebyquery.py
tloubrieu-jpl/incubator-sdap-nexus
5bf903f04f12eb27f25ea2aa738c617ca404a87b
[ "Apache-2.0" ]
1
2019-11-25T18:49:26.000Z
2019-11-25T18:49:26.000Z
tools/deletebyquery/deletebyquery.py
ifenty/incubator-sdap-nexus
3059c66f53d3f3d24c74d557c7632bdcc7f1eeec
[ "Apache-2.0" ]
null
null
null
tools/deletebyquery/deletebyquery.py
ifenty/incubator-sdap-nexus
3059c66f53d3f3d24c74d557c7632bdcc7f1eeec
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import uuid from random import sample import cassandra.concurrent from cassandra.cluster import Cluster from cassandra.policies import RoundRobinPolicy, TokenAwarePolicy from solrcloudpy import SolrConnection, SearchOptions from six.moves import input solr_connection = None solr_collection = None SOLR_UNIQUE_KEY = None cassandra_cluster = None cassandra_session = None cassandra_table = None logging.basicConfig() logging.getLogger().setLevel(logging.INFO) logging.getLogger().handlers[0].setFormatter( logging.Formatter(fmt="%(asctime)s %(levelname)s:%(name)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S")) def init(args): global solr_connection solr_connection = SolrConnection(args.solr) global solr_collection solr_collection = solr_connection[args.collection] global SOLR_UNIQUE_KEY SOLR_UNIQUE_KEY = args.solrIdField dc_policy = RoundRobinPolicy() token_policy = TokenAwarePolicy(dc_policy) global cassandra_cluster cassandra_cluster = Cluster(contact_points=args.cassandra, port=args.cassandraPort, protocol_version=int(args.cassandraProtocolVersion), load_balancing_policy=token_policy) global cassandra_session cassandra_session = cassandra_cluster.connect(keyspace=args.cassandraKeyspace) global cassandra_table cassandra_table = args.cassandraTable def delete_by_query(args): if args.query: se = SearchOptions() se.commonparams.q(args.query) \ .fl(SOLR_UNIQUE_KEY) \ .fl('id') for fq in args.filterquery if args.filterquery is not None else []: se.commonparams.fq(fq) query = se elif args.jsonparams: se = SearchOptions(**json.loads(args.jsonparams)) se.commonparams.fl(SOLR_UNIQUE_KEY) \ .fl('id') query = se else: raise RuntimeError("either query or jsonparams is required") if check_query(query): logging.info("Collecting tiles ....") solr_docs = do_solr_query(query) if confirm_delete(len(solr_docs)): deleted_ids = do_delete(solr_docs, query) logging.info("Deleted tile IDs %s" % json.dumps([str(doc_id) for doc_id in deleted_ids], indent=2)) else: logging.info("Exiting") return else: logging.info("Exiting") return def confirm_delete(num_found): do_continue = input( "This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found) while do_continue not in ['y', 'n']: do_continue = input( "This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found) return do_continue == 'y' def check_query(query): solr_response = solr_collection.search(query) num_found = solr_response.result.response.numFound if num_found == 0: logging.info("Query returned 0 results") return False do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found) while do_continue not in ['y', 'n', 's', '']: do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found) if do_continue == 'y' or do_continue == '': return True elif do_continue == 'n': return False else: se = SearchOptions() se.commonparams.q('%s:%s' % (SOLR_UNIQUE_KEY, sample(solr_response.result.response.docs, 1)[0][SOLR_UNIQUE_KEY])) logging.info(json.dumps(solr_collection.search(se).result.response.docs[0], indent=2)) return check_query(query) def do_solr_query(query): doc_ids = [] next_cursor_mark = "*" query.commonparams.sort('%s asc' % SOLR_UNIQUE_KEY) while True: query.commonparams.remove_param('cursorMark') query.commonparams.add_params(cursorMark=next_cursor_mark) solr_response = solr_collection.search(query) try: result_next_cursor_mark = solr_response.result.nextCursorMark except AttributeError: # No Results return [] if result_next_cursor_mark == next_cursor_mark: break else: next_cursor_mark = solr_response.result.nextCursorMark doc_ids.extend([uuid.UUID(doc['id']) for doc in solr_response.result.response.docs]) return doc_ids def do_delete(doc_ids, query): logging.info("Executing Cassandra delete...") delete_from_cassandra(doc_ids) logging.info("Executing Solr delete...") delete_from_solr(query) return doc_ids def delete_from_cassandra(doc_ids): statement = cassandra_session.prepare("DELETE FROM %s WHERE tile_id=?" % cassandra_table) results = cassandra.concurrent.execute_concurrent_with_args(cassandra_session, statement, [(doc_id,) for doc_id in doc_ids]) for (success, result) in results: if not success: logging.warning("Could not delete tile %s" % result) def delete_from_solr(query): solr_collection.delete(query, commit=False) solr_collection.commit() def parse_args(): parser = argparse.ArgumentParser(description='Delete data from NEXUS using a Solr Query', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--solr', help='The url of the SOLR server.', required=True, metavar='127.0.0.1:8983') parser.add_argument('--collection', help='The name of the SOLR collection.', required=True, metavar='nexustiles') parser.add_argument('--solrIdField', help='The name of the unique ID field for this collection.', required=False, default='solr_id_s', metavar='solr_id_s') parser.add_argument('--cassandra', help='The hostname(s) or IP(s) of the Cassandra server(s).', required=True, nargs='+', metavar=('127.0.0.100', '127.0.0.101')) parser.add_argument('-k', '--cassandraKeyspace', help='The Cassandra keyspace.', required=True, metavar='nexustiles') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-q', '--query', help='The ''q'' parameter passed to SOLR Search', metavar='*:*') group.add_argument('--jsonparams', help='Full query prameters formatted as JSON') parser.add_argument('-fq', '--filterquery', help='The ''fq'' parameter passed to SOLR Search. Only used if --jsonparams is not provided', required=False, nargs='+') parser.add_argument('-t', '--cassandraTable', help='The name of the cassandra table.', required=False, default='sea_surface_temp') parser.add_argument('-p', '--cassandraPort', help='The port used to connect to Cassandra.', required=False, default='9042') parser.add_argument('-pv', '--cassandraProtocolVersion', help='The version of the Cassandra protocol the driver should use.', required=False, choices=['1', '2', '3', '4', '5'], default='3') return parser.parse_args() if __name__ == "__main__": the_args = parse_args() init(the_args) delete_by_query(the_args)
34.644269
129
0.625214
import argparse import json import logging import uuid from random import sample import cassandra.concurrent from cassandra.cluster import Cluster from cassandra.policies import RoundRobinPolicy, TokenAwarePolicy from solrcloudpy import SolrConnection, SearchOptions from six.moves import input solr_connection = None solr_collection = None SOLR_UNIQUE_KEY = None cassandra_cluster = None cassandra_session = None cassandra_table = None logging.basicConfig() logging.getLogger().setLevel(logging.INFO) logging.getLogger().handlers[0].setFormatter( logging.Formatter(fmt="%(asctime)s %(levelname)s:%(name)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S")) def init(args): global solr_connection solr_connection = SolrConnection(args.solr) global solr_collection solr_collection = solr_connection[args.collection] global SOLR_UNIQUE_KEY SOLR_UNIQUE_KEY = args.solrIdField dc_policy = RoundRobinPolicy() token_policy = TokenAwarePolicy(dc_policy) global cassandra_cluster cassandra_cluster = Cluster(contact_points=args.cassandra, port=args.cassandraPort, protocol_version=int(args.cassandraProtocolVersion), load_balancing_policy=token_policy) global cassandra_session cassandra_session = cassandra_cluster.connect(keyspace=args.cassandraKeyspace) global cassandra_table cassandra_table = args.cassandraTable def delete_by_query(args): if args.query: se = SearchOptions() se.commonparams.q(args.query) \ .fl(SOLR_UNIQUE_KEY) \ .fl('id') for fq in args.filterquery if args.filterquery is not None else []: se.commonparams.fq(fq) query = se elif args.jsonparams: se = SearchOptions(**json.loads(args.jsonparams)) se.commonparams.fl(SOLR_UNIQUE_KEY) \ .fl('id') query = se else: raise RuntimeError("either query or jsonparams is required") if check_query(query): logging.info("Collecting tiles ....") solr_docs = do_solr_query(query) if confirm_delete(len(solr_docs)): deleted_ids = do_delete(solr_docs, query) logging.info("Deleted tile IDs %s" % json.dumps([str(doc_id) for doc_id in deleted_ids], indent=2)) else: logging.info("Exiting") return else: logging.info("Exiting") return def confirm_delete(num_found): do_continue = input( "This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found) while do_continue not in ['y', 'n']: do_continue = input( "This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found) return do_continue == 'y' def check_query(query): solr_response = solr_collection.search(query) num_found = solr_response.result.response.numFound if num_found == 0: logging.info("Query returned 0 results") return False do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found) while do_continue not in ['y', 'n', 's', '']: do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found) if do_continue == 'y' or do_continue == '': return True elif do_continue == 'n': return False else: se = SearchOptions() se.commonparams.q('%s:%s' % (SOLR_UNIQUE_KEY, sample(solr_response.result.response.docs, 1)[0][SOLR_UNIQUE_KEY])) logging.info(json.dumps(solr_collection.search(se).result.response.docs[0], indent=2)) return check_query(query) def do_solr_query(query): doc_ids = [] next_cursor_mark = "*" query.commonparams.sort('%s asc' % SOLR_UNIQUE_KEY) while True: query.commonparams.remove_param('cursorMark') query.commonparams.add_params(cursorMark=next_cursor_mark) solr_response = solr_collection.search(query) try: result_next_cursor_mark = solr_response.result.nextCursorMark except AttributeError: return [] if result_next_cursor_mark == next_cursor_mark: break else: next_cursor_mark = solr_response.result.nextCursorMark doc_ids.extend([uuid.UUID(doc['id']) for doc in solr_response.result.response.docs]) return doc_ids def do_delete(doc_ids, query): logging.info("Executing Cassandra delete...") delete_from_cassandra(doc_ids) logging.info("Executing Solr delete...") delete_from_solr(query) return doc_ids def delete_from_cassandra(doc_ids): statement = cassandra_session.prepare("DELETE FROM %s WHERE tile_id=?" % cassandra_table) results = cassandra.concurrent.execute_concurrent_with_args(cassandra_session, statement, [(doc_id,) for doc_id in doc_ids]) for (success, result) in results: if not success: logging.warning("Could not delete tile %s" % result) def delete_from_solr(query): solr_collection.delete(query, commit=False) solr_collection.commit() def parse_args(): parser = argparse.ArgumentParser(description='Delete data from NEXUS using a Solr Query', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--solr', help='The url of the SOLR server.', required=True, metavar='127.0.0.1:8983') parser.add_argument('--collection', help='The name of the SOLR collection.', required=True, metavar='nexustiles') parser.add_argument('--solrIdField', help='The name of the unique ID field for this collection.', required=False, default='solr_id_s', metavar='solr_id_s') parser.add_argument('--cassandra', help='The hostname(s) or IP(s) of the Cassandra server(s).', required=True, nargs='+', metavar=('127.0.0.100', '127.0.0.101')) parser.add_argument('-k', '--cassandraKeyspace', help='The Cassandra keyspace.', required=True, metavar='nexustiles') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-q', '--query', help='The ''q'' parameter passed to SOLR Search', metavar='*:*') group.add_argument('--jsonparams', help='Full query prameters formatted as JSON') parser.add_argument('-fq', '--filterquery', help='The ''fq'' parameter passed to SOLR Search. Only used if --jsonparams is not provided', required=False, nargs='+') parser.add_argument('-t', '--cassandraTable', help='The name of the cassandra table.', required=False, default='sea_surface_temp') parser.add_argument('-p', '--cassandraPort', help='The port used to connect to Cassandra.', required=False, default='9042') parser.add_argument('-pv', '--cassandraProtocolVersion', help='The version of the Cassandra protocol the driver should use.', required=False, choices=['1', '2', '3', '4', '5'], default='3') return parser.parse_args() if __name__ == "__main__": the_args = parse_args() init(the_args) delete_by_query(the_args)
true
true
f7034515e7982807c4d1cc43fd9bae671c33fcd0
1,080
py
Python
parse_ssdeep_output.py
tanner-g/file_attribution_script
cd9a9494c899fbaca6eac4f334b2fce98daf7d38
[ "MIT" ]
null
null
null
parse_ssdeep_output.py
tanner-g/file_attribution_script
cd9a9494c899fbaca6eac4f334b2fce98daf7d38
[ "MIT" ]
null
null
null
parse_ssdeep_output.py
tanner-g/file_attribution_script
cd9a9494c899fbaca6eac4f334b2fce98daf7d38
[ "MIT" ]
null
null
null
from decimal import * # Purpose: read in SSDEEP output and print findings. # Author: Tanner G. def main(): file = open("ssdeep_comparison", "r") # read past first line of output file.readline() filea_data = file.readline() fileb_data = file.readline() file.close() totalCount = 0 similarities = 0 index = 0 max_len = len(filea_data) while index < max_len: totalCount +=1 if filea_data[index] == "," or fileb_data[index] == ",": index = max_len totalCount -=1 break elif filea_data[index] == fileb_data[index]: similarities +=1 index +=1 else: index+=1 continue print("------------------") print("Stats from ssdeep:") print("------------------") print("Total Count: " + str(totalCount)) print("Similarities: " + str(similarities)) ratio = (Decimal(similarities)/Decimal(totalCount) * 100) print ("Hash similarity detected: " + str(ratio)[:5] + "%") outputFile = open("ssdeep_stats", "w") outputFile.write("count:"+str(totalCount)+",ratio:"+str(ratio)[:5]+"\n") outputFile.close() if __name__ == "__main__": main()
25.116279
73
0.636111
from decimal import * def main(): file = open("ssdeep_comparison", "r") file.readline() filea_data = file.readline() fileb_data = file.readline() file.close() totalCount = 0 similarities = 0 index = 0 max_len = len(filea_data) while index < max_len: totalCount +=1 if filea_data[index] == "," or fileb_data[index] == ",": index = max_len totalCount -=1 break elif filea_data[index] == fileb_data[index]: similarities +=1 index +=1 else: index+=1 continue print("------------------") print("Stats from ssdeep:") print("------------------") print("Total Count: " + str(totalCount)) print("Similarities: " + str(similarities)) ratio = (Decimal(similarities)/Decimal(totalCount) * 100) print ("Hash similarity detected: " + str(ratio)[:5] + "%") outputFile = open("ssdeep_stats", "w") outputFile.write("count:"+str(totalCount)+",ratio:"+str(ratio)[:5]+"\n") outputFile.close() if __name__ == "__main__": main()
true
true
f703451d94b6fa098060ce938c6c71ce7ce1964f
1,738
py
Python
redis-cache.py
voidabhi/python-scripts
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
[ "MIT" ]
2
2015-06-01T18:33:38.000Z
2018-11-21T19:40:37.000Z
redis-cache.py
voidabhi/python-scripts
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
[ "MIT" ]
102
2015-01-20T17:26:52.000Z
2017-12-28T17:32:51.000Z
redis-cache.py
voidabhi/python-scripts
a6d06bd3ccf4ec24df521a3cf305d22176f68a18
[ "MIT" ]
3
2020-03-02T06:54:18.000Z
2021-01-07T16:36:35.000Z
#!/usr/bin/env python import cPickle from functools import wraps def redis_lru(capacity=5000, slice=slice(None)): def decorator(func): cache_keys = "lru:keys:%s" % (func.__name__,) cache_vals = "lru:vals:%s" % (func.__name__,) cache_hits = "lru:hits:%s" % (func.__name__,) cache_miss = "lru:miss:%s" % (func.__name__,) lvars = [None] # closure mutable def add(key, value): eject() conn = lvars[0] conn.incr(cache_miss) conn.hset(cache_vals, key, cPickle.dumps(value)) conn.zadd(cache_keys, 0, key) return value def get(key): conn = lvars[0] value = conn.hget(cache_vals, key) if value: conn.incr(cache_hits) conn.zincrby(cache_keys, key, 1.0) value = cPickle.loads(value) return value def eject(): conn = lvars[0] count = min((capacity / 10) or 1, 1000) if conn.zcard(cache_keys) >= capacity: eject = conn.zrange(cache_keys, 0, count) conn.zremrangebyrank(cache_keys, 0, count) conn.hdel(cache_vals, *eject) @wraps(func) def wrapper(*args, **kwargs): conn = lvars[0] if conn: items = args + tuple(sorted(kwargs.items())) key = cPickle.dumps(items[slice]) return get(key) or add(key, func(*args, **kwargs)) else: return func(*args, **kwargs) def info(): conn = lvars[0] size = int(conn.zcard(cache_keys) or 0) hits, misses = int(conn.get(cache_hits) or 0), int(conn.get(cache_miss) or 0) return hits, misses, capacity, size def clear(): conn = lvars[0] conn.delete(cache_keys, cache_vals) conn.delete(cache_hits, cache_miss) def init(conn): lvars[0] = conn wrapper.init = init wrapper.info = info wrapper.clear = clear return wrapper return decorator
25.558824
80
0.644419
import cPickle from functools import wraps def redis_lru(capacity=5000, slice=slice(None)): def decorator(func): cache_keys = "lru:keys:%s" % (func.__name__,) cache_vals = "lru:vals:%s" % (func.__name__,) cache_hits = "lru:hits:%s" % (func.__name__,) cache_miss = "lru:miss:%s" % (func.__name__,) lvars = [None] def add(key, value): eject() conn = lvars[0] conn.incr(cache_miss) conn.hset(cache_vals, key, cPickle.dumps(value)) conn.zadd(cache_keys, 0, key) return value def get(key): conn = lvars[0] value = conn.hget(cache_vals, key) if value: conn.incr(cache_hits) conn.zincrby(cache_keys, key, 1.0) value = cPickle.loads(value) return value def eject(): conn = lvars[0] count = min((capacity / 10) or 1, 1000) if conn.zcard(cache_keys) >= capacity: eject = conn.zrange(cache_keys, 0, count) conn.zremrangebyrank(cache_keys, 0, count) conn.hdel(cache_vals, *eject) @wraps(func) def wrapper(*args, **kwargs): conn = lvars[0] if conn: items = args + tuple(sorted(kwargs.items())) key = cPickle.dumps(items[slice]) return get(key) or add(key, func(*args, **kwargs)) else: return func(*args, **kwargs) def info(): conn = lvars[0] size = int(conn.zcard(cache_keys) or 0) hits, misses = int(conn.get(cache_hits) or 0), int(conn.get(cache_miss) or 0) return hits, misses, capacity, size def clear(): conn = lvars[0] conn.delete(cache_keys, cache_vals) conn.delete(cache_hits, cache_miss) def init(conn): lvars[0] = conn wrapper.init = init wrapper.info = info wrapper.clear = clear return wrapper return decorator
true
true