hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
1c49d71f05cafc9fa6e61f1005654be4254a0bd8
1,339
py
Python
transcribe/scribe_modules/yum_repos.py
aakarshg/scribe
0ae48546f9d461f9421305d0902ed73b81c0f112
[ "Apache-2.0" ]
null
null
null
transcribe/scribe_modules/yum_repos.py
aakarshg/scribe
0ae48546f9d461f9421305d0902ed73b81c0f112
[ "Apache-2.0" ]
null
null
null
transcribe/scribe_modules/yum_repos.py
aakarshg/scribe
0ae48546f9d461f9421305d0902ed73b81c0f112
[ "Apache-2.0" ]
null
null
null
from . import ScribeModuleBaseClass from . lib.util import format_url base_url = "http://mirror.centos.org/centos/$releasever/{}/$basearch/" # object_dict = {} class Yum_repos(ScribeModuleBaseClass): def __init__(self, input_dict=None, module_name=None, host_name=None, input_type=None, scribe_uuid=None): ScribeModuleBaseClass.__init__(self, module_name=module_name, input_dict=input_dict, host_name=host_name, input_type=input_type, scribe_uuid=scribe_uuid) if input_dict: # object_dict['repo_name'] = input_dict['repoid'] # object_dict['repo_state'] = self.update_repo_state(input_dict) # object_dict['base_url'] = format_url(base_url, self.repo_name) self.repo_name = input_dict['repoid'] self.repo_state = self.update_repo_state(input_dict) # This is just for the sake of it self.base_url = format_url(base_url, self.repo_name) def update_repo_state(self, value): if value['state'] == 'enabled': return 1 return 0 def __iter__(self): for attr, value in self.__dict__.items(): yield attr, value
38.257143
76
0.589246
from . import ScribeModuleBaseClass from . lib.util import format_url base_url = "http://mirror.centos.org/centos/$releasever/{}/$basearch/" class Yum_repos(ScribeModuleBaseClass): def __init__(self, input_dict=None, module_name=None, host_name=None, input_type=None, scribe_uuid=None): ScribeModuleBaseClass.__init__(self, module_name=module_name, input_dict=input_dict, host_name=host_name, input_type=input_type, scribe_uuid=scribe_uuid) if input_dict: self.repo_name = input_dict['repoid'] self.repo_state = self.update_repo_state(input_dict) self.base_url = format_url(base_url, self.repo_name) def update_repo_state(self, value): if value['state'] == 'enabled': return 1 return 0 def __iter__(self): for attr, value in self.__dict__.items(): yield attr, value
true
true
1c49d7dd71ba7d729f7fdaf9ace0f3e50bc1f6c4
4,730
py
Python
src/v5.3/resources/swagger_client/models/tpdm_credential_student_academic_record.py
xmarcosx/edfi-notebook
0564ebdf1d0f45a9d25056e7e61369f0a837534d
[ "Apache-2.0" ]
2
2021-04-27T17:18:17.000Z
2021-04-27T19:14:39.000Z
src/v5.1/resources/swagger_client/models/tpdm_credential_student_academic_record.py
xmarcosx/edfi-notebook
0564ebdf1d0f45a9d25056e7e61369f0a837534d
[ "Apache-2.0" ]
null
null
null
src/v5.1/resources/swagger_client/models/tpdm_credential_student_academic_record.py
xmarcosx/edfi-notebook
0564ebdf1d0f45a9d25056e7e61369f0a837534d
[ "Apache-2.0" ]
1
2022-01-06T09:43:11.000Z
2022-01-06T09:43:11.000Z
# coding: utf-8 """ Ed-Fi Operational Data Store API The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501 OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from swagger_client.configuration import Configuration class TpdmCredentialStudentAcademicRecord(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'student_academic_record_reference': 'EdFiStudentAcademicRecordReference' } attribute_map = { 'student_academic_record_reference': 'studentAcademicRecordReference' } def __init__(self, student_academic_record_reference=None, _configuration=None): # noqa: E501 """TpdmCredentialStudentAcademicRecord - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._student_academic_record_reference = None self.discriminator = None self.student_academic_record_reference = student_academic_record_reference @property def student_academic_record_reference(self): """Gets the student_academic_record_reference of this TpdmCredentialStudentAcademicRecord. # noqa: E501 :return: The student_academic_record_reference of this TpdmCredentialStudentAcademicRecord. # noqa: E501 :rtype: EdFiStudentAcademicRecordReference """ return self._student_academic_record_reference @student_academic_record_reference.setter def student_academic_record_reference(self, student_academic_record_reference): """Sets the student_academic_record_reference of this TpdmCredentialStudentAcademicRecord. :param student_academic_record_reference: The student_academic_record_reference of this TpdmCredentialStudentAcademicRecord. # noqa: E501 :type: EdFiStudentAcademicRecordReference """ if self._configuration.client_side_validation and student_academic_record_reference is None: raise ValueError("Invalid value for `student_academic_record_reference`, must not be `None`") # noqa: E501 self._student_academic_record_reference = student_academic_record_reference def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(TpdmCredentialStudentAcademicRecord, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, TpdmCredentialStudentAcademicRecord): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, TpdmCredentialStudentAcademicRecord): return True return self.to_dict() != other.to_dict()
37.84
482
0.664482
import pprint import re import six from swagger_client.configuration import Configuration class TpdmCredentialStudentAcademicRecord(object): swagger_types = { 'student_academic_record_reference': 'EdFiStudentAcademicRecordReference' } attribute_map = { 'student_academic_record_reference': 'studentAcademicRecordReference' } def __init__(self, student_academic_record_reference=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._student_academic_record_reference = None self.discriminator = None self.student_academic_record_reference = student_academic_record_reference @property def student_academic_record_reference(self): return self._student_academic_record_reference @student_academic_record_reference.setter def student_academic_record_reference(self, student_academic_record_reference): if self._configuration.client_side_validation and student_academic_record_reference is None: raise ValueError("Invalid value for `student_academic_record_reference`, must not be `None`") self._student_academic_record_reference = student_academic_record_reference def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(TpdmCredentialStudentAcademicRecord, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, TpdmCredentialStudentAcademicRecord): return False return self.to_dict() == other.to_dict() def __ne__(self, other): if not isinstance(other, TpdmCredentialStudentAcademicRecord): return True return self.to_dict() != other.to_dict()
true
true
1c49d846d74671fb13187159a9661e0807baa518
4,859
py
Python
SecML/src/secml/ml/kernels/c_kernel_poly.py
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
77698340906fd0ec68d857315283d849e236ebd7
[ "MIT" ]
5
2020-07-09T13:03:34.000Z
2021-02-16T17:15:26.000Z
SecML/src/secml/ml/kernels/c_kernel_poly.py
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
77698340906fd0ec68d857315283d849e236ebd7
[ "MIT" ]
1
2021-12-30T21:11:50.000Z
2021-12-30T21:11:50.000Z
SecML/src/secml/ml/kernels/c_kernel_poly.py
dsolanno/Poisoning-Attacks-on-Algorithmic-Fairness
77698340906fd0ec68d857315283d849e236ebd7
[ "MIT" ]
2
2021-03-22T19:22:56.000Z
2021-09-19T20:07:10.000Z
""" .. module:: CKernelPoly :synopsis: Polynomial kernel .. moduleauthor:: Battista Biggio <battista.biggio@unica.it> .. moduleauthor:: Marco Melis <marco.melis@unica.it> """ from sklearn import metrics from secml.array import CArray from secml.ml.kernels import CKernel class CKernelPoly(CKernel): """Polynomial kernel. Given matrices X and RV, this is computed by:: K(x, rv) = (coef0 + gamma * <x, rv>)^degree for each pair of rows in X and in RV. Parameters ---------- degree : int, optional Kernel degree. Default 2. gamma : float, optional Free parameter to be used for balancing. Default 1.0. coef0 : float, optional Free parameter used for trading off the influence of higher-order versus lower-order terms in the kernel. Default 1.0. Attributes ---------- class_type : 'poly' Examples -------- >>> from secml.array import CArray >>> from secml.ml.kernels.c_kernel_poly import CKernelPoly >>> print(CKernelPoly(degree=3, gamma=0.001, coef0=2).k(CArray([[1,2],[3,4]]), CArray([[10,20],[30,40]]))) CArray([[ 8.615125 9.393931] [ 9.393931 11.390625]]) >>> print(CKernelPoly().k(CArray([[1,2],[3,4]]))) CArray([[ 36. 144.] [144. 676.]]) """ __class_type = 'poly' def __init__(self, degree=2, gamma=1.0, coef0=1.0): # kernel parameters self.degree = degree self.gamma = gamma self.coef0 = coef0 super(CKernelPoly, self).__init__() @property def degree(self): """Degree parameter.""" return self._degree @degree.setter def degree(self, degree): """Sets degree parameter. Parameters ---------- degree : int Default is 2. Integer degree of the kernel. """ self._degree = int(degree) @property def gamma(self): """Gamma parameter.""" return self._gamma @gamma.setter def gamma(self, gamma): """Sets gamma parameter. Parameters ---------- gamma : float Default is 1.0. This is a free parameter to be used for balancing. """ self._gamma = float(gamma) @property def coef0(self): """Coef0 parameter.""" return self._coef0 @coef0.setter def coef0(self, coef0): """Sets coef0 parameter. Parameters ---------- coef0 : float Default is 1.0. Free parameter used for trading off the influence of higher-order versus lower-order terms in the kernel. """ self._coef0 = float(coef0) def _forward(self, x): """Compute the polynomial kernel between x and cached rv. Parameters ---------- x : CArray or array_like Array of shape (n_x, n_features). Returns ------- kernel : CArray Kernel between x and rv. Array of shape (n_x, n_rv). """ return CArray(metrics.pairwise.polynomial_kernel( CArray(x).get_data(), CArray(self._rv).get_data(), self.degree, self.gamma, self.coef0)) # TODO: check for high gamma, # we may have uncontrolled behavior (too high values) def _backward(self, w=None): """Calculate Polynomial kernel gradient wrt cached vector 'x'. The gradient of Polynomial kernel is given by:: dK(rv,x)/dy = rv * gamma * degree * k(rv,x, degree-1) Parameters ---------- w : CArray of shape (1, n_rv) or None if CArray, it is pre-multiplied to the gradient of the module, as in standard reverse-mode autodiff. Returns ------- kernel_gradient : CArray Kernel gradient of rv with respect to vector x, shape (n_rv, n_features) if n_rv > 1 and w is None, else (1, n_features). """ # Checking if cached x is a vector if not self._cached_x.is_vector_like: raise ValueError( "kernel gradient can be computed only wrt vector-like arrays.") if self._rv is None: raise ValueError("Please run forward with caching=True or set" "`rv` first.") k = CArray(metrics.pairwise.polynomial_kernel( self._rv.get_data(), self._cached_x.get_data(), self.degree - 1, self.gamma, self.coef0)) # Format of output array should be the same as cached x if self._cached_x.issparse: rv = self._rv.tosparse() # Casting the kernel to sparse for efficient broadcasting k = k.tosparse() else: rv = self._rv.todense() grad = rv * k * self.gamma * self.degree return grad if w is None else w.dot(grad)
27.297753
110
0.570076
from sklearn import metrics from secml.array import CArray from secml.ml.kernels import CKernel class CKernelPoly(CKernel): __class_type = 'poly' def __init__(self, degree=2, gamma=1.0, coef0=1.0): self.degree = degree self.gamma = gamma self.coef0 = coef0 super(CKernelPoly, self).__init__() @property def degree(self): return self._degree @degree.setter def degree(self, degree): self._degree = int(degree) @property def gamma(self): return self._gamma @gamma.setter def gamma(self, gamma): self._gamma = float(gamma) @property def coef0(self): return self._coef0 @coef0.setter def coef0(self, coef0): self._coef0 = float(coef0) def _forward(self, x): return CArray(metrics.pairwise.polynomial_kernel( CArray(x).get_data(), CArray(self._rv).get_data(), self.degree, self.gamma, self.coef0)) def _backward(self, w=None): if not self._cached_x.is_vector_like: raise ValueError( "kernel gradient can be computed only wrt vector-like arrays.") if self._rv is None: raise ValueError("Please run forward with caching=True or set" "`rv` first.") k = CArray(metrics.pairwise.polynomial_kernel( self._rv.get_data(), self._cached_x.get_data(), self.degree - 1, self.gamma, self.coef0)) if self._cached_x.issparse: rv = self._rv.tosparse() k = k.tosparse() else: rv = self._rv.todense() grad = rv * k * self.gamma * self.degree return grad if w is None else w.dot(grad)
true
true
1c49d916365ec2c44186d26762ebf015bff76d09
246
py
Python
setup.py
dartmouthrobotics/gds_tools
35b26b32b0d59fccf08050014bd60fd8b97fd5aa
[ "MIT" ]
null
null
null
setup.py
dartmouthrobotics/gds_tools
35b26b32b0d59fccf08050014bd60fd8b97fd5aa
[ "MIT" ]
null
null
null
setup.py
dartmouthrobotics/gds_tools
35b26b32b0d59fccf08050014bd60fd8b97fd5aa
[ "MIT" ]
null
null
null
from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup # fetch values from package.xml setup_args = generate_distutils_setup( packages=['gds_tools'], package_dir={'': 'src'}, ) setup(**setup_args)
22.363636
60
0.768293
from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( packages=['gds_tools'], package_dir={'': 'src'}, ) setup(**setup_args)
true
true
1c49da5e3caa5cc5693ed524f38852b909517aba
19,014
py
Python
nova/network/security_group/quantum_driver.py
bopopescu/Nova-31
cabc3f7a905ea982cf9d2832a3990ae8e061d963
[ "Apache-2.0" ]
1
2021-04-08T10:13:03.000Z
2021-04-08T10:13:03.000Z
nova/network/security_group/quantum_driver.py
bopopescu/Nova-31
cabc3f7a905ea982cf9d2832a3990ae8e061d963
[ "Apache-2.0" ]
null
null
null
nova/network/security_group/quantum_driver.py
bopopescu/Nova-31
cabc3f7a905ea982cf9d2832a3990ae8e061d963
[ "Apache-2.0" ]
1
2020-07-24T08:19:18.000Z
2020-07-24T08:19:18.000Z
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Aaron Rosen, Nicira Networks, Inc. from oslo.config import cfg from quantumclient.common import exceptions as q_exc from quantumclient.quantum import v2_0 as quantumv20 from webob import exc from nova.compute import api as compute_api from nova import exception from nova.network import quantumv2 from nova.network.security_group import security_group_base from nova.openstack.common import excutils from nova.openstack.common import log as logging from nova.openstack.common import uuidutils from nova import utils wrap_check_security_groups_policy = compute_api.policy_decorator( scope='compute:security_groups') CONF = cfg.CONF LOG = logging.getLogger(__name__) class SecurityGroupAPI(security_group_base.SecurityGroupBase): id_is_uuid = True def create_security_group(self, context, name, description): quantum = quantumv2.get_client(context) body = self._make_quantum_security_group_dict(name, description) try: security_group = quantum.create_security_group( body).get('security_group') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error creating security group %s"), name) if e.status_code == 401: # TODO(arosen) Cannot raise generic response from quantum here # as this error code could be related to bad input or over # quota raise exc.HTTPBadRequest() raise e return self._convert_to_nova_security_group_format(security_group) def _convert_to_nova_security_group_format(self, security_group): nova_group = {} nova_group['id'] = security_group['id'] nova_group['description'] = security_group['description'] nova_group['name'] = security_group['name'] nova_group['project_id'] = security_group['tenant_id'] nova_group['rules'] = [] for rule in security_group.get('security_group_rules', []): if rule['direction'] == 'ingress': nova_group['rules'].append( self._convert_to_nova_security_group_rule_format(rule)) return nova_group def _convert_to_nova_security_group_rule_format(self, rule): nova_rule = {} nova_rule['id'] = rule['id'] nova_rule['parent_group_id'] = rule['security_group_id'] nova_rule['protocol'] = rule['protocol'] if rule['port_range_min'] is None: nova_rule['from_port'] = -1 else: nova_rule['from_port'] = rule['port_range_min'] if rule['port_range_max'] is None: nova_rule['to_port'] = -1 else: nova_rule['to_port'] = rule['port_range_max'] nova_rule['group_id'] = rule['remote_group_id'] nova_rule['cidr'] = rule['remote_ip_prefix'] return nova_rule def get(self, context, name=None, id=None, map_exception=False): quantum = quantumv2.get_client(context) try: if not id and name: id = quantumv20.find_resourceid_by_name_or_id( quantum, 'security_group', name) group = quantum.show_security_group(id).get('security_group') except q_exc.QuantumClientException as e: if e.status_code == 404: LOG.debug(_("Quantum security group %s not found"), name) self.raise_not_found(e.message) else: LOG.error(_("Quantum Error: %s"), e) raise e return self._convert_to_nova_security_group_format(group) def list(self, context, names=None, ids=None, project=None, search_opts=None): """Returns list of security group rules owned by tenant.""" quantum = quantumv2.get_client(context) search_opts = {} if names: search_opts['name'] = names if ids: search_opts['id'] = ids if project: search_opts['tenant_id'] = project try: security_groups = quantum.list_security_groups(**search_opts).get( 'security_groups') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error getting security groups")) raise e converted_rules = [] for security_group in security_groups: converted_rules.append( self._convert_to_nova_security_group_format(security_group)) return converted_rules def validate_id(self, id): if not uuidutils.is_uuid_like(id): msg = _("Security group id should be uuid") self.raise_invalid_property(msg) return id def destroy(self, context, security_group): """This function deletes a security group.""" quantum = quantumv2.get_client(context) try: quantum.delete_security_group(security_group['id']) except q_exc.QuantumClientException as e: if e.status_code == 404: self.raise_not_found(e.message) elif e.status_code == 409: self.raise_invalid_property(e.message) else: LOG.error(_("Quantum Error: %s"), e) raise e def add_rules(self, context, id, name, vals): """Add security group rule(s) to security group. Note: the Nova security group API doesn't support adding muliple security group rules at once but the EC2 one does. Therefore, this function is writen to support both. Multiple rules are installed to a security group in quantum using bulk support.""" quantum = quantumv2.get_client(context) body = self._make_quantum_security_group_rules_list(vals) try: rules = quantum.create_security_group_rule( body).get('security_group_rules') except q_exc.QuantumClientException as e: if e.status_code == 409: LOG.exception(_("Quantum Error getting security group %s"), name) self.raise_not_found(e.message) else: LOG.exception(_("Quantum Error:")) raise e converted_rules = [] for rule in rules: converted_rules.append( self._convert_to_nova_security_group_rule_format(rule)) return converted_rules def _make_quantum_security_group_dict(self, name, description): return {'security_group': {'name': name, 'description': description}} def _make_quantum_security_group_rules_list(self, rules): new_rules = [] for rule in rules: new_rule = {} # nova only supports ingress rules so all rules are ingress. new_rule['direction'] = "ingress" new_rule['protocol'] = rule.get('protocol') # FIXME(arosen) Nova does not expose ethertype on security group # rules. Therefore, in the case of self referential rules we # should probably assume they want to allow both IPv4 and IPv6. # Unfortunately, this would require adding two rules in quantum. # The reason we do not do this is because when the user using the # nova api wants to remove the rule we'd have to have some way to # know that we should delete both of these rules in quantum. # For now, self referential rules only support IPv4. if not rule.get('cidr'): new_rule['ethertype'] = 'IPv4' else: new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr')) new_rule['remote_ip_prefix'] = rule.get('cidr') new_rule['security_group_id'] = rule.get('parent_group_id') new_rule['remote_group_id'] = rule.get('group_id') if rule['from_port'] != -1: new_rule['port_range_min'] = rule['from_port'] if rule['to_port'] != -1: new_rule['port_range_max'] = rule['to_port'] new_rules.append(new_rule) return {'security_group_rules': new_rules} def remove_rules(self, context, security_group, rule_ids): quantum = quantumv2.get_client(context) rule_ids = set(rule_ids) try: # The ec2 api allows one to delete multiple security group rules # at once. Since there is no bulk delete for quantum the best # thing we can do is delete the rules one by one and hope this # works.... :/ for rule_id in range(0, len(rule_ids)): quantum.delete_security_group_rule(rule_ids.pop()) except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error unable to delete %s"), rule_ids) raise e def get_rule(self, context, id): quantum = quantumv2.get_client(context) try: rule = quantum.show_security_group_rule( id).get('security_group_rule') except q_exc.QuantumClientException as e: if e.status_code == 404: LOG.debug(_("Quantum security group rule %s not found"), id) self.raise_not_found(e.message) else: LOG.error(_("Quantum Error: %s"), e) raise e return self._convert_to_nova_security_group_rule_format(rule) def get_instances_security_groups_bindings(self, context): """Returns a dict(instance_id, [security_groups]) to allow obtaining all of the instances and their security groups in one shot.""" quantum = quantumv2.get_client(context) ports = quantum.list_ports().get('ports') security_groups = quantum.list_security_groups().get('security_groups') security_group_lookup = {} instances_security_group_bindings = {} for security_group in security_groups: security_group_lookup[security_group['id']] = security_group for port in ports: for port_security_group in port.get('security_groups', []): try: sg = security_group_lookup[port_security_group] # name is optional in quantum so if not specified return id if sg.get('name'): sg_entry = {'name': sg['name']} else: sg_entry = {'name': sg['id']} instances_security_group_bindings.setdefault( port['device_id'], []).append(sg_entry) except KeyError: # This should only happen due to a race condition # if the security group on a port was deleted after the # ports were returned. We pass since this security # group is no longer on the port. pass return instances_security_group_bindings def get_instance_security_groups(self, context, instance_id, instance_uuid=None, detailed=False): """Returns the security groups that are associated with an instance. If detailed is True then it also returns the full details of the security groups associated with an instance. """ quantum = quantumv2.get_client(context) if instance_uuid: params = {'device_id': instance_uuid} else: params = {'device_id': instance_id} ports = quantum.list_ports(**params) security_groups = quantum.list_security_groups().get('security_groups') security_group_lookup = {} for security_group in security_groups: security_group_lookup[security_group['id']] = security_group ret = [] for port in ports['ports']: for security_group in port.get('security_groups', []): try: if detailed: ret.append(self._convert_to_nova_security_group_format( security_group_lookup[security_group])) else: name = security_group_lookup[security_group].get( 'name') # Since the name is optional for # quantum security groups if not name: name = security_group['id'] ret.append({'name': name}) except KeyError: # This should only happen due to a race condition # if the security group on a port was deleted after the # ports were returned. We pass since this security # group is no longer on the port. pass return ret def _has_security_group_requirements(self, port): port_security_enabled = port.get('port_security_enabled') has_ip = port.get('fixed_ips') if port_security_enabled and has_ip: return True else: return False @wrap_check_security_groups_policy def add_to_instance(self, context, instance, security_group_name): """Add security group to the instance.""" quantum = quantumv2.get_client(context) try: security_group_id = quantumv20.find_resourceid_by_name_or_id( quantum, 'security_group', security_group_name) except q_exc.QuantumClientException as e: if e.status_code == 404: msg = ("Security group %s is not found for project %s" % (security_group_name, context.project_id)) self.raise_not_found(msg) else: LOG.exception(_("Quantum Error:")) raise e params = {'device_id': instance['uuid']} try: ports = quantum.list_ports(**params).get('ports') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error:")) raise e if not ports: msg = ("instance_id %s could not be found as device id on" " any ports" % instance['uuid']) self.raise_not_found(msg) for port in ports: if not self._has_security_group_requirements(port): LOG.warn(_("Cannot add security group %(name)s to %(instance)s" " since the port %(port_id)s does not meet security" " requirements"), {'name': security_group_name, 'instance': instance['uuid'], 'port_id': port['id']}) raise exception.SecurityGroupCannotBeApplied() if 'security_groups' not in port: port['security_groups'] = [] port['security_groups'].append(security_group_id) updated_port = {'security_groups': port['security_groups']} try: LOG.info(_("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) quantum.update_port(port['id'], {'port': updated_port}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_("Quantum Error:")) @wrap_check_security_groups_policy def remove_from_instance(self, context, instance, security_group_name): """Remove the security group associated with the instance.""" quantum = quantumv2.get_client(context) try: security_group_id = quantumv20.find_resourceid_by_name_or_id( quantum, 'security_group', security_group_name) except q_exc.QuantumClientException as e: if e.status_code == 404: msg = ("Security group %s is not found for project %s" % (security_group_name, context.project_id)) self.raise_not_found(msg) else: LOG.exception(_("Quantum Error:")) raise e params = {'device_id': instance['uuid']} try: ports = quantum.list_ports(**params).get('ports') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error:")) raise e if not ports: msg = ("instance_id %s could not be found as device id on" " any ports" % instance['uuid']) self.raise_not_found(msg) found_security_group = False for port in ports: try: port.get('security_groups', []).remove(security_group_id) except ValueError: # When removing a security group from an instance the security # group should be on both ports since it was added this way if # done through the nova api. In case it is not a 404 is only # raised if the security group is not found on any of the # ports on the instance. continue updated_port = {'security_groups': port['security_groups']} try: LOG.info(_("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) quantum.update_port(port['id'], {'port': updated_port}) found_security_group = True except Exception: LOG.exception(_("Quantum Error:")) raise e if not found_security_group: msg = (_("Security group %(security_group_name)s not assocaited " "with the instance %(instance)s"), {'security_group_name': security_group_name, 'instance': instance['uuid']}) self.raise_not_found(msg) def populate_security_groups(self, instance, security_groups): # Setting to emply list since we do not want to populate this field # in the nova database if using the quantum driver instance['security_groups'] = []
43.610092
79
0.592879
from oslo.config import cfg from quantumclient.common import exceptions as q_exc from quantumclient.quantum import v2_0 as quantumv20 from webob import exc from nova.compute import api as compute_api from nova import exception from nova.network import quantumv2 from nova.network.security_group import security_group_base from nova.openstack.common import excutils from nova.openstack.common import log as logging from nova.openstack.common import uuidutils from nova import utils wrap_check_security_groups_policy = compute_api.policy_decorator( scope='compute:security_groups') CONF = cfg.CONF LOG = logging.getLogger(__name__) class SecurityGroupAPI(security_group_base.SecurityGroupBase): id_is_uuid = True def create_security_group(self, context, name, description): quantum = quantumv2.get_client(context) body = self._make_quantum_security_group_dict(name, description) try: security_group = quantum.create_security_group( body).get('security_group') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error creating security group %s"), name) if e.status_code == 401: raise exc.HTTPBadRequest() raise e return self._convert_to_nova_security_group_format(security_group) def _convert_to_nova_security_group_format(self, security_group): nova_group = {} nova_group['id'] = security_group['id'] nova_group['description'] = security_group['description'] nova_group['name'] = security_group['name'] nova_group['project_id'] = security_group['tenant_id'] nova_group['rules'] = [] for rule in security_group.get('security_group_rules', []): if rule['direction'] == 'ingress': nova_group['rules'].append( self._convert_to_nova_security_group_rule_format(rule)) return nova_group def _convert_to_nova_security_group_rule_format(self, rule): nova_rule = {} nova_rule['id'] = rule['id'] nova_rule['parent_group_id'] = rule['security_group_id'] nova_rule['protocol'] = rule['protocol'] if rule['port_range_min'] is None: nova_rule['from_port'] = -1 else: nova_rule['from_port'] = rule['port_range_min'] if rule['port_range_max'] is None: nova_rule['to_port'] = -1 else: nova_rule['to_port'] = rule['port_range_max'] nova_rule['group_id'] = rule['remote_group_id'] nova_rule['cidr'] = rule['remote_ip_prefix'] return nova_rule def get(self, context, name=None, id=None, map_exception=False): quantum = quantumv2.get_client(context) try: if not id and name: id = quantumv20.find_resourceid_by_name_or_id( quantum, 'security_group', name) group = quantum.show_security_group(id).get('security_group') except q_exc.QuantumClientException as e: if e.status_code == 404: LOG.debug(_("Quantum security group %s not found"), name) self.raise_not_found(e.message) else: LOG.error(_("Quantum Error: %s"), e) raise e return self._convert_to_nova_security_group_format(group) def list(self, context, names=None, ids=None, project=None, search_opts=None): quantum = quantumv2.get_client(context) search_opts = {} if names: search_opts['name'] = names if ids: search_opts['id'] = ids if project: search_opts['tenant_id'] = project try: security_groups = quantum.list_security_groups(**search_opts).get( 'security_groups') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error getting security groups")) raise e converted_rules = [] for security_group in security_groups: converted_rules.append( self._convert_to_nova_security_group_format(security_group)) return converted_rules def validate_id(self, id): if not uuidutils.is_uuid_like(id): msg = _("Security group id should be uuid") self.raise_invalid_property(msg) return id def destroy(self, context, security_group): quantum = quantumv2.get_client(context) try: quantum.delete_security_group(security_group['id']) except q_exc.QuantumClientException as e: if e.status_code == 404: self.raise_not_found(e.message) elif e.status_code == 409: self.raise_invalid_property(e.message) else: LOG.error(_("Quantum Error: %s"), e) raise e def add_rules(self, context, id, name, vals): quantum = quantumv2.get_client(context) body = self._make_quantum_security_group_rules_list(vals) try: rules = quantum.create_security_group_rule( body).get('security_group_rules') except q_exc.QuantumClientException as e: if e.status_code == 409: LOG.exception(_("Quantum Error getting security group %s"), name) self.raise_not_found(e.message) else: LOG.exception(_("Quantum Error:")) raise e converted_rules = [] for rule in rules: converted_rules.append( self._convert_to_nova_security_group_rule_format(rule)) return converted_rules def _make_quantum_security_group_dict(self, name, description): return {'security_group': {'name': name, 'description': description}} def _make_quantum_security_group_rules_list(self, rules): new_rules = [] for rule in rules: new_rule = {} new_rule['direction'] = "ingress" new_rule['protocol'] = rule.get('protocol') # know that we should delete both of these rules in quantum. # For now, self referential rules only support IPv4. if not rule.get('cidr'): new_rule['ethertype'] = 'IPv4' else: new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr')) new_rule['remote_ip_prefix'] = rule.get('cidr') new_rule['security_group_id'] = rule.get('parent_group_id') new_rule['remote_group_id'] = rule.get('group_id') if rule['from_port'] != -1: new_rule['port_range_min'] = rule['from_port'] if rule['to_port'] != -1: new_rule['port_range_max'] = rule['to_port'] new_rules.append(new_rule) return {'security_group_rules': new_rules} def remove_rules(self, context, security_group, rule_ids): quantum = quantumv2.get_client(context) rule_ids = set(rule_ids) try: # The ec2 api allows one to delete multiple security group rules # at once. Since there is no bulk delete for quantum the best # thing we can do is delete the rules one by one and hope this # works.... :/ for rule_id in range(0, len(rule_ids)): quantum.delete_security_group_rule(rule_ids.pop()) except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error unable to delete %s"), rule_ids) raise e def get_rule(self, context, id): quantum = quantumv2.get_client(context) try: rule = quantum.show_security_group_rule( id).get('security_group_rule') except q_exc.QuantumClientException as e: if e.status_code == 404: LOG.debug(_("Quantum security group rule %s not found"), id) self.raise_not_found(e.message) else: LOG.error(_("Quantum Error: %s"), e) raise e return self._convert_to_nova_security_group_rule_format(rule) def get_instances_security_groups_bindings(self, context): quantum = quantumv2.get_client(context) ports = quantum.list_ports().get('ports') security_groups = quantum.list_security_groups().get('security_groups') security_group_lookup = {} instances_security_group_bindings = {} for security_group in security_groups: security_group_lookup[security_group['id']] = security_group for port in ports: for port_security_group in port.get('security_groups', []): try: sg = security_group_lookup[port_security_group] # name is optional in quantum so if not specified return id if sg.get('name'): sg_entry = {'name': sg['name']} else: sg_entry = {'name': sg['id']} instances_security_group_bindings.setdefault( port['device_id'], []).append(sg_entry) except KeyError: # This should only happen due to a race condition # if the security group on a port was deleted after the # ports were returned. We pass since this security # group is no longer on the port. pass return instances_security_group_bindings def get_instance_security_groups(self, context, instance_id, instance_uuid=None, detailed=False): quantum = quantumv2.get_client(context) if instance_uuid: params = {'device_id': instance_uuid} else: params = {'device_id': instance_id} ports = quantum.list_ports(**params) security_groups = quantum.list_security_groups().get('security_groups') security_group_lookup = {} for security_group in security_groups: security_group_lookup[security_group['id']] = security_group ret = [] for port in ports['ports']: for security_group in port.get('security_groups', []): try: if detailed: ret.append(self._convert_to_nova_security_group_format( security_group_lookup[security_group])) else: name = security_group_lookup[security_group].get( 'name') # Since the name is optional for # quantum security groups if not name: name = security_group['id'] ret.append({'name': name}) except KeyError: # This should only happen due to a race condition # if the security group on a port was deleted after the # ports were returned. We pass since this security # group is no longer on the port. pass return ret def _has_security_group_requirements(self, port): port_security_enabled = port.get('port_security_enabled') has_ip = port.get('fixed_ips') if port_security_enabled and has_ip: return True else: return False @wrap_check_security_groups_policy def add_to_instance(self, context, instance, security_group_name): quantum = quantumv2.get_client(context) try: security_group_id = quantumv20.find_resourceid_by_name_or_id( quantum, 'security_group', security_group_name) except q_exc.QuantumClientException as e: if e.status_code == 404: msg = ("Security group %s is not found for project %s" % (security_group_name, context.project_id)) self.raise_not_found(msg) else: LOG.exception(_("Quantum Error:")) raise e params = {'device_id': instance['uuid']} try: ports = quantum.list_ports(**params).get('ports') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error:")) raise e if not ports: msg = ("instance_id %s could not be found as device id on" " any ports" % instance['uuid']) self.raise_not_found(msg) for port in ports: if not self._has_security_group_requirements(port): LOG.warn(_("Cannot add security group %(name)s to %(instance)s" " since the port %(port_id)s does not meet security" " requirements"), {'name': security_group_name, 'instance': instance['uuid'], 'port_id': port['id']}) raise exception.SecurityGroupCannotBeApplied() if 'security_groups' not in port: port['security_groups'] = [] port['security_groups'].append(security_group_id) updated_port = {'security_groups': port['security_groups']} try: LOG.info(_("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) quantum.update_port(port['id'], {'port': updated_port}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_("Quantum Error:")) @wrap_check_security_groups_policy def remove_from_instance(self, context, instance, security_group_name): quantum = quantumv2.get_client(context) try: security_group_id = quantumv20.find_resourceid_by_name_or_id( quantum, 'security_group', security_group_name) except q_exc.QuantumClientException as e: if e.status_code == 404: msg = ("Security group %s is not found for project %s" % (security_group_name, context.project_id)) self.raise_not_found(msg) else: LOG.exception(_("Quantum Error:")) raise e params = {'device_id': instance['uuid']} try: ports = quantum.list_ports(**params).get('ports') except q_exc.QuantumClientException as e: LOG.exception(_("Quantum Error:")) raise e if not ports: msg = ("instance_id %s could not be found as device id on" " any ports" % instance['uuid']) self.raise_not_found(msg) found_security_group = False for port in ports: try: port.get('security_groups', []).remove(security_group_id) except ValueError: # When removing a security group from an instance the security # group should be on both ports since it was added this way if # done through the nova api. In case it is not a 404 is only # raised if the security group is not found on any of the # ports on the instance. continue updated_port = {'security_groups': port['security_groups']} try: LOG.info(_("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) quantum.update_port(port['id'], {'port': updated_port}) found_security_group = True except Exception: LOG.exception(_("Quantum Error:")) raise e if not found_security_group: msg = (_("Security group %(security_group_name)s not assocaited " "with the instance %(instance)s"), {'security_group_name': security_group_name, 'instance': instance['uuid']}) self.raise_not_found(msg) def populate_security_groups(self, instance, security_groups): # Setting to emply list since we do not want to populate this field # in the nova database if using the quantum driver instance['security_groups'] = []
true
true
1c49db13db2a6633c33e03c70a86b0802f23aadc
734
py
Python
dashboard/migrations/0003_auto_20210922_0014.py
scholarsportal/sp_ask_admin_dashboard
0aa99197a74d30f6b2634ce4d4e9a4654828e2ba
[ "MIT" ]
1
2021-06-30T09:23:07.000Z
2021-06-30T09:23:07.000Z
dashboard/migrations/0003_auto_20210922_0014.py
scholarsportal/sp_ask_admin_dashboard
0aa99197a74d30f6b2634ce4d4e9a4654828e2ba
[ "MIT" ]
9
2021-07-02T04:09:23.000Z
2021-07-06T07:06:38.000Z
dashboard/migrations/0003_auto_20210922_0014.py
scholarsportal/sp_ask_admin_dashboard
0aa99197a74d30f6b2634ce4d4e9a4654828e2ba
[ "MIT" ]
1
2021-11-30T20:47:56.000Z
2021-11-30T20:47:56.000Z
# Generated by Django 2.2.19 on 2021-09-22 04:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dashboard', '0002_auto_20210922_0012'), ] operations = [ migrations.RenameModel( old_name='Chat', new_name='ChatLightAssessment', ), migrations.AddIndex( model_name='chatlightassessment', index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_07252a_idx'), ), migrations.AddIndex( model_name='chatreferencequestion', index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_780c55_idx'), ), ]
28.230769
103
0.621253
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dashboard', '0002_auto_20210922_0012'), ] operations = [ migrations.RenameModel( old_name='Chat', new_name='ChatLightAssessment', ), migrations.AddIndex( model_name='chatlightassessment', index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_07252a_idx'), ), migrations.AddIndex( model_name='chatreferencequestion', index=models.Index(fields=['lh3ChatID', 'queueID'], name='dashboard_c_lh3Chat_780c55_idx'), ), ]
true
true
1c49e00453cdcd61b12b653597c4d0488629ad01
14,791
py
Python
tests/tests/test_compressor.py
karlwnw/django-pipeline
eeb92660c18d969b955e0115ab909a64fb16d92e
[ "MIT" ]
598
2015-12-18T01:25:23.000Z
2022-03-31T13:57:01.000Z
tests/tests/test_compressor.py
karlwnw/django-pipeline
eeb92660c18d969b955e0115ab909a64fb16d92e
[ "MIT" ]
288
2015-12-18T01:30:20.000Z
2022-02-22T16:02:12.000Z
tests/tests/test_compressor.py
karlwnw/django-pipeline
eeb92660c18d969b955e0115ab909a64fb16d92e
[ "MIT" ]
178
2015-12-20T06:58:57.000Z
2022-03-04T21:53:43.000Z
import base64 import io import os import sys try: from mock import patch except ImportError: from unittest.mock import patch # noqa from unittest import skipIf, skipUnless from django.conf import settings from django.test import TestCase from django.test.client import RequestFactory from pipeline.compressors import ( Compressor, TEMPLATE_FUNC, SubProcessCompressor) from pipeline.compressors.yuglify import YuglifyCompressor from pipeline.collector import default_collector from tests.utils import _, pipeline_settings @pipeline_settings( CSS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor', JS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor') class CompressorTest(TestCase): def setUp(self): self.maxDiff = None self.compressor = Compressor() default_collector.collect() def test_js_compressor_class(self): self.assertEqual(self.compressor.js_compressor, YuglifyCompressor) def test_css_compressor_class(self): self.assertEqual(self.compressor.css_compressor, YuglifyCompressor) def test_concatenate_and_rewrite(self): css = self.compressor.concatenate_and_rewrite([ _('pipeline/css/first.css'), _('pipeline/css/second.css') ], 'css/screen.css') expected = """.concat {\n display: none;\n}\n\n.concatenate {\n display: block;\n}\n""" self.assertEqual(expected, css) def test_concatenate(self): js = self.compressor.concatenate([ _('pipeline/js/first.js'), _('pipeline/js/second.js') ]) expected = """(function() {\n window.concat = function() {\n console.log(arguments);\n }\n}()) // No semicolon\n\n;(function() {\n window.cat = function() {\n console.log("hello world");\n }\n}());\n""" self.assertEqual(expected, js) @patch.object(base64, 'b64encode') def test_encoded_content(self, mock): self.compressor.asset_contents.clear() self.compressor.encoded_content(_('pipeline/images/arrow.png')) self.assertTrue(mock.called) mock.reset_mock() self.compressor.encoded_content(_('pipeline/images/arrow.png')) self.assertFalse(mock.called) def test_encoded_content_output(self): self.compressor.asset_contents.clear() encoded = self.compressor.encoded_content(_('pipeline/images/arrow.png')) expected = ('iVBORw0KGgoAAAANSUhEUgAAAAkAAAAGCAYAAAARx7TFAAAAMk' 'lEQVR42oXKwQkAMAxC0Q7rEk5voSEepCHC9/SOpLV3JPULgArV' 'RtDIMEEiQ4NECRNdciCfK3K3wvEAAAAASUVORK5CYII=') self.assertEqual(encoded, expected) def test_relative_path(self): relative_path = self.compressor.relative_path("images/sprite.png", 'css/screen.css') self.assertEqual(relative_path, '../images/sprite.png') def test_base_path(self): base_path = self.compressor.base_path([ _('js/templates/form.jst'), _('js/templates/field.jst') ]) self.assertEqual(base_path, _('js/templates')) def test_absolute_path(self): absolute_path = self.compressor.absolute_path( '../../images/sprite.png', 'css/plugins/') self.assertEqual(absolute_path, 'images/sprite.png') absolute_path = self.compressor.absolute_path( '/images/sprite.png', 'css/plugins/') self.assertEqual(absolute_path, '/images/sprite.png') def test_template_name(self): name = self.compressor.template_name( 'templates/photo/detail.jst', 'templates/') self.assertEqual(name, 'photo_detail') name = self.compressor.template_name('templates/photo_edit.jst', '') self.assertEqual(name, 'photo_edit') name = self.compressor.template_name( 'templates\photo\detail.jst', 'templates\\') self.assertEqual(name, 'photo_detail') @pipeline_settings(TEMPLATE_SEPARATOR='/') def test_template_name_separator(self): name = self.compressor.template_name( 'templates/photo/detail.jst', 'templates/') self.assertEqual(name, 'photo/detail') name = self.compressor.template_name('templates/photo_edit.jst', '') self.assertEqual(name, 'photo_edit') name = self.compressor.template_name( 'templates\photo\detail.jst', 'templates\\') self.assertEqual(name, 'photo/detail') def test_compile_templates(self): templates = self.compressor.compile_templates([_('pipeline/templates/photo/list.jst')]) self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'list\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC) templates = self.compressor.compile_templates([ _('pipeline/templates/video/detail.jst'), _('pipeline/templates/photo/detail.jst') ]) self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'video_detail\'] = template(\'<div class="video">\\n <video src="<%%= src %%>" />\\n <div class="caption">\\n <%%= description %%>\\n </div>\\n</div>\');\nwindow.JST[\'photo_detail\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%> by <%%= author %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC) def test_embeddable(self): self.assertFalse(self.compressor.embeddable(_('pipeline/images/sprite.png'), None)) self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.png'), 'datauri')) self.assertTrue(self.compressor.embeddable(_('pipeline/images/embed/arrow.png'), 'datauri')) self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.dat'), 'datauri')) def test_construct_asset_path(self): asset_path = self.compressor.construct_asset_path( "../../images/sprite.png", "css/plugins/gallery.css", "css/gallery.css") self.assertEqual(asset_path, "../images/sprite.png") asset_path = self.compressor.construct_asset_path( "/images/sprite.png", "css/plugins/gallery.css", "css/gallery.css") self.assertEqual(asset_path, "/images/sprite.png") def test_url_rewrite(self): output = self.compressor.concatenate_and_rewrite([ _('pipeline/css/urls.css'), ], 'css/screen.css') self.assertEqual(""".embedded-url-svg { background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E% 3C/svg%3E"); } @font-face { font-family: 'Pipeline'; src: url(../pipeline/fonts/pipeline.eot); src: url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'); src: local('☺'), url(../pipeline/fonts/pipeline.woff) format('woff'), url(../pipeline/fonts/pipeline.ttf) format('truetype'), url(../pipeline/fonts/pipeline.svg#IyfZbseF) format('svg'); font-weight: normal; font-style: normal; } .relative-url { background-image: url(../pipeline/images/sprite-buttons.png); } .relative-url-querystring { background-image: url(../pipeline/images/sprite-buttons.png?v=1.0#foo=bar); } .absolute-url { background-image: url(/images/sprite-buttons.png); } .absolute-full-url { background-image: url(http://localhost/images/sprite-buttons.png); } .no-protocol-url { background-image: url(//images/sprite-buttons.png); } .anchor-tag-url { background-image: url(#image-gradient); } @font-face{src:url(../pipeline/fonts/pipeline.eot);src:url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'),url(../pipeline/fonts/pipeline.woff) format('woff'),url(../pipeline/fonts/pipeline.ttf) format('truetype');} """, output) def test_url_rewrite_data_uri(self): output = self.compressor.concatenate_and_rewrite([ _('pipeline/css/nested/nested.css'), ], 'pipeline/screen.css') self.assertEqual(""".data-url { background-image: url(data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E); } .data-url-quoted { background-image: url('data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E'); } """, output) @skipIf(sys.platform.startswith("win"), "requires posix platform") def test_compressor_subprocess_unicode(self): path = os.path.dirname(os.path.dirname(__file__)) content = io.open(path + '/assets/css/unicode.css', encoding="utf-8").read() output = SubProcessCompressor(False).execute_command(('cat',), content) self.assertEqual(""".some_class { // Some unicode content: "áéíóú"; } """, output) def tearDown(self): default_collector.clear() class CompressorImplementationTest(TestCase): maxDiff = None def setUp(self): self.compressor = Compressor() default_collector.collect(RequestFactory().get('/')) def tearDown(self): default_collector.clear() def _test_compressor(self, compressor_cls, compress_type, expected_file): override_settings = { ("%s_COMPRESSOR" % compress_type.upper()): compressor_cls, } with pipeline_settings(**override_settings): if compress_type == 'js': result = self.compressor.compress_js( [_('pipeline/js/first.js'), _('pipeline/js/second.js')]) else: result = self.compressor.compress_css( [_('pipeline/css/first.css'), _('pipeline/css/second.css')], os.path.join('pipeline', 'css', os.path.basename(expected_file))) with self.compressor.storage.open(expected_file, 'r') as f: expected = f.read() self.assertEqual(result, expected) def test_jsmin(self): self._test_compressor('pipeline.compressors.jsmin.JSMinCompressor', 'js', 'pipeline/compressors/jsmin.js') def test_slimit(self): self._test_compressor('pipeline.compressors.slimit.SlimItCompressor', 'js', 'pipeline/compressors/slimit.js') def test_csshtmljsminify(self): self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor', 'css', 'pipeline/compressors/csshtmljsminify.css') self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor', 'js', 'pipeline/compressors/csshtmljsminify.js') @skipUnless(settings.HAS_NODE, "requires node") def test_uglifyjs(self): self._test_compressor('pipeline.compressors.uglifyjs.UglifyJSCompressor', 'js', 'pipeline/compressors/uglifyjs.js') @skipUnless(settings.HAS_NODE, "requires node") def test_terser(self): self._test_compressor('pipeline.compressors.terser.TerserCompressor', 'js', 'pipeline/compressors/terser.js') @skipUnless(settings.HAS_NODE, "requires node") def test_yuglify(self): self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor', 'css', 'pipeline/compressors/yuglify.css') self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor', 'js', 'pipeline/compressors/yuglify.js') @skipUnless(settings.HAS_NODE, "requires node") def test_cssmin(self): self._test_compressor('pipeline.compressors.cssmin.CSSMinCompressor', 'css', 'pipeline/compressors/cssmin.css') @skipUnless(settings.HAS_NODE, "requires node") @skipUnless(settings.HAS_JAVA, "requires java") def test_closure(self): self._test_compressor('pipeline.compressors.closure.ClosureCompressor', 'js', 'pipeline/compressors/closure.js') @skipUnless(settings.HAS_NODE, "requires node") @skipUnless(settings.HAS_JAVA, "requires java") def test_yui_js(self): self._test_compressor('pipeline.compressors.yui.YUICompressor', 'js', 'pipeline/compressors/yui.js') @skipUnless(settings.HAS_NODE, "requires node") @skipUnless(settings.HAS_JAVA, "requires java") def test_yui_css(self): self._test_compressor('pipeline.compressors.yui.YUICompressor', 'css', 'pipeline/compressors/yui.css') @skipUnless(settings.HAS_CSSTIDY, "requires csstidy") def test_csstidy(self): self._test_compressor('pipeline.compressors.csstidy.CSSTidyCompressor', 'css', 'pipeline/compressors/csstidy.css')
51.898246
1,213
0.685687
import base64 import io import os import sys try: from mock import patch except ImportError: from unittest.mock import patch from unittest import skipIf, skipUnless from django.conf import settings from django.test import TestCase from django.test.client import RequestFactory from pipeline.compressors import ( Compressor, TEMPLATE_FUNC, SubProcessCompressor) from pipeline.compressors.yuglify import YuglifyCompressor from pipeline.collector import default_collector from tests.utils import _, pipeline_settings @pipeline_settings( CSS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor', JS_COMPRESSOR='pipeline.compressors.yuglify.YuglifyCompressor') class CompressorTest(TestCase): def setUp(self): self.maxDiff = None self.compressor = Compressor() default_collector.collect() def test_js_compressor_class(self): self.assertEqual(self.compressor.js_compressor, YuglifyCompressor) def test_css_compressor_class(self): self.assertEqual(self.compressor.css_compressor, YuglifyCompressor) def test_concatenate_and_rewrite(self): css = self.compressor.concatenate_and_rewrite([ _('pipeline/css/first.css'), _('pipeline/css/second.css') ], 'css/screen.css') expected = """.concat {\n display: none;\n}\n\n.concatenate {\n display: block;\n}\n""" self.assertEqual(expected, css) def test_concatenate(self): js = self.compressor.concatenate([ _('pipeline/js/first.js'), _('pipeline/js/second.js') ]) expected = """(function() {\n window.concat = function() {\n console.log(arguments);\n }\n}()) // No semicolon\n\n;(function() {\n window.cat = function() {\n console.log("hello world");\n }\n}());\n""" self.assertEqual(expected, js) @patch.object(base64, 'b64encode') def test_encoded_content(self, mock): self.compressor.asset_contents.clear() self.compressor.encoded_content(_('pipeline/images/arrow.png')) self.assertTrue(mock.called) mock.reset_mock() self.compressor.encoded_content(_('pipeline/images/arrow.png')) self.assertFalse(mock.called) def test_encoded_content_output(self): self.compressor.asset_contents.clear() encoded = self.compressor.encoded_content(_('pipeline/images/arrow.png')) expected = ('iVBORw0KGgoAAAANSUhEUgAAAAkAAAAGCAYAAAARx7TFAAAAMk' 'lEQVR42oXKwQkAMAxC0Q7rEk5voSEepCHC9/SOpLV3JPULgArV' 'RtDIMEEiQ4NECRNdciCfK3K3wvEAAAAASUVORK5CYII=') self.assertEqual(encoded, expected) def test_relative_path(self): relative_path = self.compressor.relative_path("images/sprite.png", 'css/screen.css') self.assertEqual(relative_path, '../images/sprite.png') def test_base_path(self): base_path = self.compressor.base_path([ _('js/templates/form.jst'), _('js/templates/field.jst') ]) self.assertEqual(base_path, _('js/templates')) def test_absolute_path(self): absolute_path = self.compressor.absolute_path( '../../images/sprite.png', 'css/plugins/') self.assertEqual(absolute_path, 'images/sprite.png') absolute_path = self.compressor.absolute_path( '/images/sprite.png', 'css/plugins/') self.assertEqual(absolute_path, '/images/sprite.png') def test_template_name(self): name = self.compressor.template_name( 'templates/photo/detail.jst', 'templates/') self.assertEqual(name, 'photo_detail') name = self.compressor.template_name('templates/photo_edit.jst', '') self.assertEqual(name, 'photo_edit') name = self.compressor.template_name( 'templates\photo\detail.jst', 'templates\\') self.assertEqual(name, 'photo_detail') @pipeline_settings(TEMPLATE_SEPARATOR='/') def test_template_name_separator(self): name = self.compressor.template_name( 'templates/photo/detail.jst', 'templates/') self.assertEqual(name, 'photo/detail') name = self.compressor.template_name('templates/photo_edit.jst', '') self.assertEqual(name, 'photo_edit') name = self.compressor.template_name( 'templates\photo\detail.jst', 'templates\\') self.assertEqual(name, 'photo/detail') def test_compile_templates(self): templates = self.compressor.compile_templates([_('pipeline/templates/photo/list.jst')]) self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'list\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC) templates = self.compressor.compile_templates([ _('pipeline/templates/video/detail.jst'), _('pipeline/templates/photo/detail.jst') ]) self.assertEqual(templates, """window.JST = window.JST || {};\n%s\nwindow.JST[\'video_detail\'] = template(\'<div class="video">\\n <video src="<%%= src %%>" />\\n <div class="caption">\\n <%%= description %%>\\n </div>\\n</div>\');\nwindow.JST[\'photo_detail\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%> by <%%= author %%>\\n </div>\\n</div>\');\n""" % TEMPLATE_FUNC) def test_embeddable(self): self.assertFalse(self.compressor.embeddable(_('pipeline/images/sprite.png'), None)) self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.png'), 'datauri')) self.assertTrue(self.compressor.embeddable(_('pipeline/images/embed/arrow.png'), 'datauri')) self.assertFalse(self.compressor.embeddable(_('pipeline/images/arrow.dat'), 'datauri')) def test_construct_asset_path(self): asset_path = self.compressor.construct_asset_path( "../../images/sprite.png", "css/plugins/gallery.css", "css/gallery.css") self.assertEqual(asset_path, "../images/sprite.png") asset_path = self.compressor.construct_asset_path( "/images/sprite.png", "css/plugins/gallery.css", "css/gallery.css") self.assertEqual(asset_path, "/images/sprite.png") def test_url_rewrite(self): output = self.compressor.concatenate_and_rewrite([ _('pipeline/css/urls.css'), ], 'css/screen.css') self.assertEqual(""".embedded-url-svg { background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E% 3C/svg%3E"); } @font-face { font-family: 'Pipeline'; src: url(../pipeline/fonts/pipeline.eot); src: url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'); src: local('☺'), url(../pipeline/fonts/pipeline.woff) format('woff'), url(../pipeline/fonts/pipeline.ttf) format('truetype'), url(../pipeline/fonts/pipeline.svg#IyfZbseF) format('svg'); font-weight: normal; font-style: normal; } .relative-url { background-image: url(../pipeline/images/sprite-buttons.png); } .relative-url-querystring { background-image: url(../pipeline/images/sprite-buttons.png?v=1.0#foo=bar); } .absolute-url { background-image: url(/images/sprite-buttons.png); } .absolute-full-url { background-image: url(http://localhost/images/sprite-buttons.png); } .no-protocol-url { background-image: url(//images/sprite-buttons.png); } .anchor-tag-url { background-image: url(#image-gradient); } @font-face{src:url(../pipeline/fonts/pipeline.eot);src:url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'),url(../pipeline/fonts/pipeline.woff) format('woff'),url(../pipeline/fonts/pipeline.ttf) format('truetype');} """, output) def test_url_rewrite_data_uri(self): output = self.compressor.concatenate_and_rewrite([ _('pipeline/css/nested/nested.css'), ], 'pipeline/screen.css') self.assertEqual(""".data-url { background-image: url(data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E); } .data-url-quoted { background-image: url('data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E'); } """, output) @skipIf(sys.platform.startswith("win"), "requires posix platform") def test_compressor_subprocess_unicode(self): path = os.path.dirname(os.path.dirname(__file__)) content = io.open(path + '/assets/css/unicode.css', encoding="utf-8").read() output = SubProcessCompressor(False).execute_command(('cat',), content) self.assertEqual(""".some_class { // Some unicode content: "áéíóú"; } """, output) def tearDown(self): default_collector.clear() class CompressorImplementationTest(TestCase): maxDiff = None def setUp(self): self.compressor = Compressor() default_collector.collect(RequestFactory().get('/')) def tearDown(self): default_collector.clear() def _test_compressor(self, compressor_cls, compress_type, expected_file): override_settings = { ("%s_COMPRESSOR" % compress_type.upper()): compressor_cls, } with pipeline_settings(**override_settings): if compress_type == 'js': result = self.compressor.compress_js( [_('pipeline/js/first.js'), _('pipeline/js/second.js')]) else: result = self.compressor.compress_css( [_('pipeline/css/first.css'), _('pipeline/css/second.css')], os.path.join('pipeline', 'css', os.path.basename(expected_file))) with self.compressor.storage.open(expected_file, 'r') as f: expected = f.read() self.assertEqual(result, expected) def test_jsmin(self): self._test_compressor('pipeline.compressors.jsmin.JSMinCompressor', 'js', 'pipeline/compressors/jsmin.js') def test_slimit(self): self._test_compressor('pipeline.compressors.slimit.SlimItCompressor', 'js', 'pipeline/compressors/slimit.js') def test_csshtmljsminify(self): self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor', 'css', 'pipeline/compressors/csshtmljsminify.css') self._test_compressor('pipeline.compressors.csshtmljsminify.CssHtmlJsMinifyCompressor', 'js', 'pipeline/compressors/csshtmljsminify.js') @skipUnless(settings.HAS_NODE, "requires node") def test_uglifyjs(self): self._test_compressor('pipeline.compressors.uglifyjs.UglifyJSCompressor', 'js', 'pipeline/compressors/uglifyjs.js') @skipUnless(settings.HAS_NODE, "requires node") def test_terser(self): self._test_compressor('pipeline.compressors.terser.TerserCompressor', 'js', 'pipeline/compressors/terser.js') @skipUnless(settings.HAS_NODE, "requires node") def test_yuglify(self): self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor', 'css', 'pipeline/compressors/yuglify.css') self._test_compressor('pipeline.compressors.yuglify.YuglifyCompressor', 'js', 'pipeline/compressors/yuglify.js') @skipUnless(settings.HAS_NODE, "requires node") def test_cssmin(self): self._test_compressor('pipeline.compressors.cssmin.CSSMinCompressor', 'css', 'pipeline/compressors/cssmin.css') @skipUnless(settings.HAS_NODE, "requires node") @skipUnless(settings.HAS_JAVA, "requires java") def test_closure(self): self._test_compressor('pipeline.compressors.closure.ClosureCompressor', 'js', 'pipeline/compressors/closure.js') @skipUnless(settings.HAS_NODE, "requires node") @skipUnless(settings.HAS_JAVA, "requires java") def test_yui_js(self): self._test_compressor('pipeline.compressors.yui.YUICompressor', 'js', 'pipeline/compressors/yui.js') @skipUnless(settings.HAS_NODE, "requires node") @skipUnless(settings.HAS_JAVA, "requires java") def test_yui_css(self): self._test_compressor('pipeline.compressors.yui.YUICompressor', 'css', 'pipeline/compressors/yui.css') @skipUnless(settings.HAS_CSSTIDY, "requires csstidy") def test_csstidy(self): self._test_compressor('pipeline.compressors.csstidy.CSSTidyCompressor', 'css', 'pipeline/compressors/csstidy.css')
true
true
1c49e18ac0e8dba6da218abdc4c6d3a737ca47f4
34,988
py
Python
flask_backend/anime_env/Lib/site-packages/gevent/tests/test__socket_dns.py
shafqatshad/AnmieRecommenderSystem
f58d6ab2b3614aa81208ec844ef99963c988c69d
[ "Apache-2.0" ]
null
null
null
flask_backend/anime_env/Lib/site-packages/gevent/tests/test__socket_dns.py
shafqatshad/AnmieRecommenderSystem
f58d6ab2b3614aa81208ec844ef99963c988c69d
[ "Apache-2.0" ]
null
null
null
flask_backend/anime_env/Lib/site-packages/gevent/tests/test__socket_dns.py
shafqatshad/AnmieRecommenderSystem
f58d6ab2b3614aa81208ec844ef99963c988c69d
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import from __future__ import division import gevent from gevent import monkey import os import re import unittest import socket from time import time import traceback import gevent.socket as gevent_socket import gevent.testing as greentest from gevent.testing import util from gevent.testing.six import xrange from gevent.testing import flaky from gevent.testing.skipping import skipWithoutExternalNetwork resolver = gevent.get_hub().resolver util.debug('Resolver: %s', resolver) if getattr(resolver, 'pool', None) is not None: resolver.pool.size = 1 from gevent.testing.sysinfo import RESOLVER_NOT_SYSTEM from gevent.testing.sysinfo import RESOLVER_DNSPYTHON from gevent.testing.sysinfo import RESOLVER_ARES from gevent.testing.sysinfo import PY2 from gevent.testing.sysinfo import PYPY import gevent.testing.timing assert gevent_socket.gaierror is socket.gaierror assert gevent_socket.error is socket.error RUN_ALL_HOST_TESTS = os.getenv('GEVENTTEST_RUN_ALL_ETC_HOST_TESTS', '') def add(klass, hostname, name=None, skip=None, skip_reason=None): call = callable(hostname) def _setattr(k, n, func): if skip: func = greentest.skipIf(skip, skip_reason,)(func) if not hasattr(k, n): setattr(k, n, func) if name is None: if call: name = hostname.__name__ else: name = re.sub(r'[^\w]+', '_', repr(hostname)) assert name, repr(hostname) def test_getaddrinfo_http(self): x = hostname() if call else hostname self._test('getaddrinfo', x, 'http') test_getaddrinfo_http.__name__ = 'test_%s_getaddrinfo_http' % name _setattr(klass, test_getaddrinfo_http.__name__, test_getaddrinfo_http) def test_gethostbyname(self): x = hostname() if call else hostname ipaddr = self._test('gethostbyname', x) if not isinstance(ipaddr, Exception): self._test('gethostbyaddr', ipaddr) test_gethostbyname.__name__ = 'test_%s_gethostbyname' % name _setattr(klass, test_gethostbyname.__name__, test_gethostbyname) def test3(self): x = hostname() if call else hostname self._test('gethostbyname_ex', x) test3.__name__ = 'test_%s_gethostbyname_ex' % name _setattr(klass, test3.__name__, test3) def test4(self): x = hostname() if call else hostname self._test('gethostbyaddr', x) test4.__name__ = 'test_%s_gethostbyaddr' % name _setattr(klass, test4.__name__, test4) def test5(self): x = hostname() if call else hostname self._test('getnameinfo', (x, 80), 0) test5.__name__ = 'test_%s_getnameinfo' % name _setattr(klass, test5.__name__, test5) @skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo") class TestCase(greentest.TestCase): maxDiff = None __timeout__ = 30 switch_expected = None TRACE = not util.QUIET and os.getenv('GEVENT_DEBUG', '') == 'trace' verbose_dns = TRACE def trace(self, message, *args, **kwargs): if self.TRACE: util.debug(message, *args, **kwargs) # Things that the stdlib should never raise and neither should we; # these indicate bugs in our code and we want to raise them. REAL_ERRORS = (AttributeError, ValueError, NameError) def __run_resolver(self, function, args): try: result = function(*args) assert not isinstance(result, BaseException), repr(result) return result except self.REAL_ERRORS: raise except Exception as ex: # pylint:disable=broad-except if self.TRACE: traceback.print_exc() return ex def __trace_call(self, result, runtime, function, *args): util.debug(self.__format_call(function, args)) self.__trace_fresult(result, runtime) def __format_call(self, function, args): args = repr(args) if args.endswith(',)'): args = args[:-2] + ')' try: module = function.__module__.replace('gevent._socketcommon', 'gevent') name = function.__name__ return '%s:%s%s' % (module, name, args) except AttributeError: return function + args def __trace_fresult(self, result, seconds): if isinstance(result, Exception): msg = ' -=> raised %r' % (result, ) else: msg = ' -=> returned %r' % (result, ) time_ms = ' %.2fms' % (seconds * 1000.0, ) space = 80 - len(msg) - len(time_ms) if space > 0: space = ' ' * space else: space = '' util.debug(msg + space + time_ms) if not TRACE: def run_resolver(self, function, func_args): now = time() return self.__run_resolver(function, func_args), time() - now else: def run_resolver(self, function, func_args): self.trace(self.__format_call(function, func_args)) delta = time() result = self.__run_resolver(function, func_args) delta = time() - delta self.__trace_fresult(result, delta) return result, delta def setUp(self): super(TestCase, self).setUp() if not self.verbose_dns: # Silence the default reporting of errors from the ThreadPool, # we handle those here. gevent.get_hub().exception_stream = None def tearDown(self): if not self.verbose_dns: try: del gevent.get_hub().exception_stream except AttributeError: pass # Happens under leak tests super(TestCase, self).tearDown() def should_log_results(self, result1, result2): if not self.verbose_dns: return False if isinstance(result1, BaseException) and isinstance(result2, BaseException): return type(result1) is not type(result2) return repr(result1) != repr(result2) def _test(self, func_name, *args): """ Runs the function *func_name* with *args* and compares gevent and the system. Returns the gevent result. """ gevent_func = getattr(gevent_socket, func_name) real_func = monkey.get_original('socket', func_name) tester = getattr(self, '_run_test_' + func_name, self._run_test_generic) result = tester(func_name, real_func, gevent_func, args) _real_result, time_real, gevent_result, time_gevent = result if self.verbose_dns and time_gevent > time_real + 0.02 and time_gevent > 0.03: msg = 'gevent:%s%s took %dms versus %dms stdlib' % ( func_name, args, time_gevent * 1000.0, time_real * 1000.0) if time_gevent > time_real + 1: word = 'VERY' else: word = 'quite' util.log('\nWARNING: %s slow: %s', word, msg, color='warning') return gevent_result def _run_test_generic(self, func_name, real_func, gevent_func, func_args): real_result, time_real = self.run_resolver(real_func, func_args) gevent_result, time_gevent = self.run_resolver(gevent_func, func_args) if util.QUIET and self.should_log_results(real_result, gevent_result): util.log('') self.__trace_call(real_result, time_real, real_func, func_args) self.__trace_call(gevent_result, time_gevent, gevent_func, func_args) self.assertEqualResults(real_result, gevent_result, func_name) return real_result, time_real, gevent_result, time_gevent def _normalize_result(self, result, func_name): norm_name = '_normalize_result_' + func_name if hasattr(self, norm_name): return getattr(self, norm_name)(result) return result NORMALIZE_GAI_IGNORE_CANONICAL_NAME = RESOLVER_ARES # It tends to return them even when not asked for if not RESOLVER_NOT_SYSTEM: def _normalize_result_getaddrinfo(self, result): return result def _normalize_result_gethostbyname_ex(self, result): return result else: def _normalize_result_gethostbyname_ex(self, result): # Often the second and third part of the tuple (hostname, aliaslist, ipaddrlist) # can be in different orders if we're hitting different servers, # or using the native and ares resolvers due to load-balancing techniques. # We sort them. if isinstance(result, BaseException): return result # result[1].sort() # we wind up discarding this # On Py2 in test_russion_gethostbyname_ex, this # is actually an integer, for some reason. In TestLocalhost.tets__ip6_localhost, # the result isn't this long (maybe an error?). try: result[2].sort() except AttributeError: pass except IndexError: return result # On some systems, a random alias is found in the aliaslist # by the system resolver, but not by cares, and vice versa. We deem the aliaslist # unimportant and discard it. # On some systems (Travis CI), the ipaddrlist for 'localhost' can come back # with two entries 10.28.141.171 (presumably two interfaces?) for c-ares ips = result[2] if ips == ['10.28.141.171', '10.28.141.171']: ips = ['10.28.141.171'] # On some systems, the hostname can get caps return (result[0].lower(), [], ips) def _normalize_result_getaddrinfo(self, result): # Result is a list # (family, socktype, proto, canonname, sockaddr) # e.g., # (AF_INET, SOCK_STREAM, IPPROTO_TCP, 'readthedocs.io', (10.28.141.171, 80)) if isinstance(result, BaseException): return result # On Python 3, the builtin resolver can return SOCK_RAW results, but # c-ares doesn't do that. So we remove those if we find them. # Likewise, on certain Linux systems, even on Python 2, IPPROTO_SCTP (132) # results may be returned --- but that may not even have a constant in the # socket module! So to be safe, we strip out anything that's not # SOCK_STREAM or SOCK_DGRAM if isinstance(result, list): result = [ x for x in result if x[1] in (socket.SOCK_STREAM, socket.SOCK_DGRAM) and x[2] in (socket.IPPROTO_TCP, socket.IPPROTO_UDP) ] if self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME: result = [ (family, kind, proto, '', addr) for family, kind, proto, _, addr in result ] if isinstance(result, list): result.sort() return result def _normalize_result_getnameinfo(self, result): return result NORMALIZE_GHBA_IGNORE_ALIAS = False def _normalize_result_gethostbyaddr(self, result): if not RESOLVER_NOT_SYSTEM: return result if self.NORMALIZE_GHBA_IGNORE_ALIAS and isinstance(result, tuple): # On some systems, a random alias is found in the aliaslist # by the system resolver, but not by cares and vice versa. This is *probably* only the # case for localhost or things otherwise in /etc/hosts. We deem the aliaslist # unimportant and discard it. return (result[0], [], result[2]) return result def _compare_exceptions_strict(self, real_result, gevent_result, func_name): if repr(real_result) == repr(gevent_result): # Catch things like `OverflowError('port must be 0-65535.',)``` return msg = (func_name, 'system:', repr(real_result), 'gevent:', repr(gevent_result)) self.assertIs(type(gevent_result), type(real_result), msg) if isinstance(real_result, TypeError): return if PYPY and isinstance(real_result, socket.herror): # PyPy doesn't do errno or multiple arguments in herror; # it just puts a string like 'host lookup failed: <thehost>'; # it must be doing that manually. return self.assertEqual(real_result.args, gevent_result.args, msg) if hasattr(real_result, 'errno'): self.assertEqual(real_result.errno, gevent_result.errno) def _compare_exceptions_lenient(self, real_result, gevent_result, func_name): try: self._compare_exceptions_strict(real_result, gevent_result, func_name) except AssertionError: # Allow raising different things in a few rare cases. if ( func_name not in ( 'getaddrinfo', 'gethostbyaddr', 'gethostbyname', 'gethostbyname_ex', 'getnameinfo', ) or type(real_result) not in (socket.herror, socket.gaierror) or type(gevent_result) not in (socket.herror, socket.gaierror, socket.error) ): raise util.log('WARNING: error type mismatch for %s: %r (gevent) != %r (stdlib)', func_name, gevent_result, real_result, color='warning') _compare_exceptions = _compare_exceptions_lenient if RESOLVER_NOT_SYSTEM else _compare_exceptions_strict def _compare_results(self, real_result, gevent_result, func_name): if real_result == gevent_result: return True compare_func = getattr(self, '_compare_results_' + func_name, self._generic_compare_results) return compare_func(real_result, gevent_result, func_name) def _generic_compare_results(self, real_result, gevent_result, func_name): try: if len(real_result) != len(gevent_result): return False except TypeError: return False return all(self._compare_results(x, y, func_name) for (x, y) in zip(real_result, gevent_result)) def _compare_results_getaddrinfo(self, real_result, gevent_result, func_name): # On some systems, we find more results with # one resolver than we do with the other resolver. # So as long as they have some subset in common, # we'll take it. if not set(real_result).isdisjoint(set(gevent_result)): return True return self._generic_compare_results(real_result, gevent_result, func_name) def _compare_address_strings(self, a, b): # IPv6 address from different requests might be different a_segments = a.count(':') b_segments = b.count(':') if a_segments and b_segments: if a_segments == b_segments and a_segments in (4, 5, 6, 7): return True if a.rstrip(':').startswith(b.rstrip(':')) or b.rstrip(':').startswith(a.rstrip(':')): return True if a_segments >= 2 and b_segments >= 2 and a.split(':')[:2] == b.split(':')[:2]: return True return a.split('.', 1)[-1] == b.split('.', 1)[-1] def _compare_results_gethostbyname(self, real_result, gevent_result, _func_name): # Both strings. return self._compare_address_strings(real_result, gevent_result) def _compare_results_gethostbyname_ex(self, real_result, gevent_result, _func_name): # Results are IPv4 only: # (hostname, [aliaslist], [ipaddrlist]) # As for getaddrinfo, we'll just check the ipaddrlist has something in common. return not set(real_result[2]).isdisjoint(set(gevent_result[2])) def assertEqualResults(self, real_result, gevent_result, func_name): errors = ( OverflowError, TypeError, UnicodeError, socket.error, socket.gaierror, socket.herror, ) if isinstance(real_result, errors) and isinstance(gevent_result, errors): self._compare_exceptions(real_result, gevent_result, func_name) return real_result = self._normalize_result(real_result, func_name) gevent_result = self._normalize_result(gevent_result, func_name) if self._compare_results(real_result, gevent_result, func_name): return # If we're using a different resolver, allow the real resolver to generate an # error that the gevent resolver actually gets an answer to. if ( RESOLVER_NOT_SYSTEM and isinstance(real_result, errors) and not isinstance(gevent_result, errors) ): return # On PyPy, socket.getnameinfo() can produce results even when the hostname resolves to # multiple addresses, like www.gevent.org does. DNSPython (and c-ares?) don't do that, # they refuse to pick a name and raise ``socket.error`` if ( RESOLVER_NOT_SYSTEM and PYPY and func_name == 'getnameinfo' and isinstance(gevent_result, socket.error) and not isinstance(real_result, socket.error) ): return # From 2.7 on, assertEqual does a better job highlighting the results than we would # because it calls assertSequenceEqual, which highlights the exact # difference in the tuple self.assertEqual(real_result, gevent_result) class TestTypeError(TestCase): pass add(TestTypeError, None) add(TestTypeError, 25) class TestHostname(TestCase): NORMALIZE_GHBA_IGNORE_ALIAS = True def __normalize_name(self, result): if (RESOLVER_ARES or RESOLVER_DNSPYTHON) and isinstance(result, tuple): # The system resolver can return the FQDN, in the first result, # when given certain configurations. But c-ares and dnspython # do not. name = result[0] name = name.split('.', 1)[0] result = (name,) + result[1:] return result def _normalize_result_gethostbyaddr(self, result): result = TestCase._normalize_result_gethostbyaddr(self, result) return self.__normalize_name(result) def _normalize_result_getnameinfo(self, result): result = TestCase._normalize_result_getnameinfo(self, result) if PY2: # Not sure why we only saw this on Python 2 result = self.__normalize_name(result) return result add( TestHostname, socket.gethostname, skip=greentest.RUNNING_ON_TRAVIS and greentest.RESOLVER_NOT_SYSTEM, skip_reason=("Sometimes get a different result for getaddrinfo " "with dnspython; c-ares produces different results for " "localhost on Travis beginning Sept 2019") ) class TestLocalhost(TestCase): # certain tests in test_patched_socket.py only work if getaddrinfo('localhost') does not switch # (e.g. NetworkConnectionAttributesTest.testSourceAddress) #switch_expected = False # XXX: The above has been commented out for some time. Apparently this isn't the case # anymore. def _normalize_result_getaddrinfo(self, result): if RESOLVER_NOT_SYSTEM: # We see that some impls (OS X) return extra results # like DGRAM that ares does not. return () return super(TestLocalhost, self)._normalize_result_getaddrinfo(result) NORMALIZE_GHBA_IGNORE_ALIAS = True if greentest.RUNNING_ON_TRAVIS and greentest.PY2 and RESOLVER_NOT_SYSTEM: def _normalize_result_gethostbyaddr(self, result): # Beginning in November 2017 after an upgrade to Travis, # we started seeing ares return ::1 for localhost, but # the system resolver is still returning 10.28.141.171 under Python 2 result = super(TestLocalhost, self)._normalize_result_gethostbyaddr(result) if isinstance(result, tuple): result = (result[0], result[1], ['10.28.141.171']) return result add( TestLocalhost, 'ip6-localhost', skip=RESOLVER_DNSPYTHON, # XXX: Fix these. skip_reason="Can return gaierror(-2)" ) add( TestLocalhost, 'localhost', skip=greentest.RUNNING_ON_TRAVIS, skip_reason="Can return gaierror(-2)" ) class TestNonexistent(TestCase): pass add(TestNonexistent, 'nonexistentxxxyyy') class Test1234(TestCase): pass add(Test1234, '1.2.3.4') class Test127001(TestCase): NORMALIZE_GHBA_IGNORE_ALIAS = True add( Test127001, '10.28.141.171', # skip=RESOLVER_DNSPYTHON, # skip_reason="Beginning Dec 1 2017, ares started returning ip6-localhost " # "instead of localhost" ) class TestBroadcast(TestCase): switch_expected = False if RESOLVER_DNSPYTHON: # dnspython raises errors for broadcasthost/255.255.255.255, but the system # can resolve it. @unittest.skip('ares raises errors for broadcasthost/255.255.255.255') def test__broadcast__gethostbyaddr(self): return test__broadcast__gethostbyname = test__broadcast__gethostbyaddr add(TestBroadcast, '<broadcast>') from gevent.resolver._hostsfile import HostsFile class SanitizedHostsFile(HostsFile): def iter_all_host_addr_pairs(self): for name, addr in super(SanitizedHostsFile, self).iter_all_host_addr_pairs(): if (RESOLVER_NOT_SYSTEM and (name.endswith('local') # ignore bonjour, ares can't find them # ignore common aliases that ares can't find or addr == '255.255.255.255' or name == 'broadcasthost' # We get extra results from some impls, like OS X # it returns DGRAM results or name == 'localhost')): continue # pragma: no cover if name.endswith('local'): # These can only be found if bonjour is running, # and are very slow to do so with the system resolver on OS X continue yield name, addr @greentest.skipIf(greentest.RUNNING_ON_CI, "This sometimes randomly fails on Travis with ares and on appveyor, beginning Feb 13, 2018") # Probably due to round-robin DNS, # since this is not actually the system's etc hosts file. # TODO: Rethink this. We need something reliable. Go back to using # the system's etc hosts? class TestEtcHosts(TestCase): MAX_HOSTS = int(os.getenv('GEVENTTEST_MAX_ETC_HOSTS', '10')) @classmethod def populate_tests(cls): hf = SanitizedHostsFile(os.path.join(os.path.dirname(__file__), 'hosts_file.txt')) all_etc_hosts = sorted(hf.iter_all_host_addr_pairs()) if len(all_etc_hosts) > cls.MAX_HOSTS and not RUN_ALL_HOST_TESTS: all_etc_hosts = all_etc_hosts[:cls.MAX_HOSTS] for host, ip in all_etc_hosts: add(cls, host) add(cls, ip) TestEtcHosts.populate_tests() class TestGeventOrg(TestCase): # For this test to work correctly, it needs to resolve to # an address with a single A record; round-robin DNS and multiple A records # may mess it up (subsequent requests---and we always make two---may return # unequal results). We used to use gevent.org, but that now has multiple A records; # trying www.gevent.org which is a CNAME to readthedocs.org then worked, but it became # an alias for python-gevent.readthedocs.org, which is an alias for readthedocs.io, # and which also has multiple addresses. So we run the resolver twice to try to get # the different answers, if needed. HOSTNAME = 'www.gevent.org' if RESOLVER_NOT_SYSTEM: def _normalize_result_gethostbyname(self, result): if result == '104.17.33.82': result = '104.17.32.82' return result def _normalize_result_gethostbyname_ex(self, result): result = super(TestGeventOrg, self)._normalize_result_gethostbyname_ex(result) if result[0] == 'python-gevent.readthedocs.org': result = ('readthedocs.io', ) + result[1:] return result def test_AI_CANONNAME(self): # Not all systems support AI_CANONNAME; notably tha manylinux # resolvers *sometimes* do not. Specifically, sometimes they # provide the canonical name *only* on the first result. args = ( # host TestGeventOrg.HOSTNAME, # port None, # family socket.AF_INET, # type 0, # proto 0, # flags socket.AI_CANONNAME ) gevent_result = gevent_socket.getaddrinfo(*args) self.assertEqual(gevent_result[0][3], 'readthedocs.io') real_result = socket.getaddrinfo(*args) self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME = not all(r[3] for r in real_result) try: self.assertEqualResults(real_result, gevent_result, 'getaddrinfo') finally: del self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME add(TestGeventOrg, TestGeventOrg.HOSTNAME) class TestFamily(TestCase): def test_inet(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_INET) def test_unspec(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_UNSPEC) def test_badvalue(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255) self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255000) self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, -1) @unittest.skipIf(RESOLVER_DNSPYTHON, "Raises the wrong errno") def test_badtype(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, 'x') class Test_getaddrinfo(TestCase): def _test_getaddrinfo(self, *args): self._test('getaddrinfo', *args) def test_80(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 80) def test_int_string(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, '80') def test_0(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 0) def test_http(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 'http') def test_notexistent_tld(self): self._test_getaddrinfo('myhost.mytld', 53) def test_notexistent_dot_com(self): self._test_getaddrinfo('sdfsdfgu5e66098032453245wfdggd.com', 80) def test1(self): return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 52, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, 0) def test2(self): return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 53, socket.AF_INET, socket.SOCK_DGRAM, 17) @unittest.skipIf(RESOLVER_DNSPYTHON, "dnspython only returns some of the possibilities") def test3(self): return self._test_getaddrinfo('google.com', 'http', socket.AF_INET6) @greentest.skipIf(PY2, "Enums only on Python 3.4+") def test_enums(self): # https://github.com/gevent/gevent/issues/1310 # On Python 3, getaddrinfo does special things to make sure that # the fancy enums are returned. gai = gevent_socket.getaddrinfo('example.com', 80, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) af, socktype, _proto, _canonname, _sa = gai[0] self.assertIs(socktype, socket.SOCK_STREAM) self.assertIs(af, socket.AF_INET) class TestInternational(TestCase): if PY2: # We expect these to raise UnicodeEncodeError, which is a # subclass of ValueError REAL_ERRORS = set(TestCase.REAL_ERRORS) - {ValueError,} if RESOLVER_ARES: def test_russian_getaddrinfo_http(self): # And somehow, test_russion_getaddrinfo_http (``getaddrinfo(name, 'http')``) # manages to work with recent versions of Python 2, but our preemptive encoding # to ASCII causes it to fail with the c-ares resolver; but only that one test out of # all of them. self.skipTest("ares fails to encode.") # dns python can actually resolve these: it uses # the 2008 version of idna encoding, whereas on Python 2, # with the default resolver, it tries to encode to ascii and # raises a UnicodeEncodeError. So we get different results. add(TestInternational, u'президент.рф', 'russian', skip=(PY2 and RESOLVER_DNSPYTHON), skip_reason="dnspython can actually resolve these") add(TestInternational, u'президент.рф'.encode('idna'), 'idna') @skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo") class TestInterrupted_gethostbyname(gevent.testing.timing.AbstractGenericWaitTestCase): # There are refs to a Waiter in the C code that don't go # away yet; one gc may or may not do it. @greentest.ignores_leakcheck def test_returns_none_after_timeout(self): super(TestInterrupted_gethostbyname, self).test_returns_none_after_timeout() def wait(self, timeout): with gevent.Timeout(timeout, False): for index in xrange(1000000): try: gevent_socket.gethostbyname('www.x%s.com' % index) except socket.error: pass raise AssertionError('Timeout was not raised') def cleanup(self): # Depending on timing, this can raise: # (This suddenly started happening on Apr 6 2016; www.x1000000.com # is apparently no longer around) # File "test__socket_dns.py", line 538, in cleanup # gevent.get_hub().threadpool.join() # File "/home/travis/build/gevent/gevent/src/gevent/threadpool.py", line 108, in join # sleep(delay) # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 169, in sleep # hub.wait(loop.timer(seconds, ref=ref)) # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 651, in wait # result = waiter.get() # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 899, in get # return self.hub.switch() # File "/home/travis/build/gevent/gevent/src/greentest/greentest.py", line 520, in switch # return _original_Hub.switch(self, *args) # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 630, in switch # return RawGreenlet.switch(self) # gaierror: [Errno -2] Name or service not known try: gevent.get_hub().threadpool.join() except Exception: # pragma: no cover pylint:disable=broad-except traceback.print_exc() # class TestInterrupted_getaddrinfo(greentest.GenericWaitTestCase): # # def wait(self, timeout): # with gevent.Timeout(timeout, False): # for index in range(1000): # try: # gevent_socket.getaddrinfo('www.a%s.com' % index, 'http') # except socket.gaierror: # pass class TestBadName(TestCase): pass add(TestBadName, 'xxxxxxxxxxxx') class TestBadIP(TestCase): pass add(TestBadIP, '1.2.3.400') @greentest.skipIf(greentest.RUNNING_ON_TRAVIS, "Travis began returning ip6-localhost") class Test_getnameinfo_127001(TestCase): def test(self): self._test('getnameinfo', ('10.28.141.171', 80), 0) def test_DGRAM(self): self._test('getnameinfo', ('10.28.141.171', 779), 0) self._test('getnameinfo', ('10.28.141.171', 779), socket.NI_DGRAM) def test_NOFQDN(self): # I get ('localhost', 'www') with _socket but ('localhost.localdomain', 'www') with gevent.socket self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NOFQDN) def test_NAMEREQD(self): self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NAMEREQD) class Test_getnameinfo_geventorg(TestCase): @unittest.skipIf(RESOLVER_DNSPYTHON, "dnspython raises an error when multiple results are returned") def test_NUMERICHOST(self): self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0) self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICHOST) @unittest.skipIf(RESOLVER_DNSPYTHON, "dnspython raises an error when multiple results are returned") def test_NUMERICSERV(self): self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICSERV) def test_domain1(self): self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0) def test_domain2(self): self._test('getnameinfo', ('www.gevent.org', 80), 0) def test_port_zero(self): self._test('getnameinfo', ('www.gevent.org', 0), 0) class Test_getnameinfo_fail(TestCase): def test_port_string(self): self._test('getnameinfo', ('www.gevent.org', 'http'), 0) def test_bad_flags(self): self._test('getnameinfo', ('localhost', 80), 55555555) class TestInvalidPort(TestCase): @flaky.reraises_flaky_race_condition() def test_overflow_neg_one(self): # An Appveyor beginning 2019-03-21, the system resolver # sometimes returns ('23.100.69.251', '65535') instead of # raising an error. That IP address belongs to # readthedocs[.io?] which is where www.gevent.org is a CNAME # to...but it doesn't actually *reverse* to readthedocs.io. # Can't reproduce locally, not sure what's happening self._test('getnameinfo', ('www.gevent.org', -1), 0) # Beginning with PyPy 2.7 7.1 on Appveyor, we sometimes see this # return an OverflowError instead of the TypeError about None @greentest.skipOnLibuvOnPyPyOnWin("Errors dont match") def test_typeerror_none(self): self._test('getnameinfo', ('www.gevent.org', None), 0) # Beginning with PyPy 2.7 7.1 on Appveyor, we sometimes see this # return an TypeError instead of the OverflowError. # XXX: But see Test_getnameinfo_fail.test_port_string where this does work. @greentest.skipOnLibuvOnPyPyOnWin("Errors don't match") def test_typeerror_str(self): self._test('getnameinfo', ('www.gevent.org', 'x'), 0) def test_overflow_port_too_large(self): self._test('getnameinfo', ('www.gevent.org', 65536), 0) if __name__ == '__main__': greentest.main()
37.865801
110
0.636933
from __future__ import print_function from __future__ import absolute_import from __future__ import division import gevent from gevent import monkey import os import re import unittest import socket from time import time import traceback import gevent.socket as gevent_socket import gevent.testing as greentest from gevent.testing import util from gevent.testing.six import xrange from gevent.testing import flaky from gevent.testing.skipping import skipWithoutExternalNetwork resolver = gevent.get_hub().resolver util.debug('Resolver: %s', resolver) if getattr(resolver, 'pool', None) is not None: resolver.pool.size = 1 from gevent.testing.sysinfo import RESOLVER_NOT_SYSTEM from gevent.testing.sysinfo import RESOLVER_DNSPYTHON from gevent.testing.sysinfo import RESOLVER_ARES from gevent.testing.sysinfo import PY2 from gevent.testing.sysinfo import PYPY import gevent.testing.timing assert gevent_socket.gaierror is socket.gaierror assert gevent_socket.error is socket.error RUN_ALL_HOST_TESTS = os.getenv('GEVENTTEST_RUN_ALL_ETC_HOST_TESTS', '') def add(klass, hostname, name=None, skip=None, skip_reason=None): call = callable(hostname) def _setattr(k, n, func): if skip: func = greentest.skipIf(skip, skip_reason,)(func) if not hasattr(k, n): setattr(k, n, func) if name is None: if call: name = hostname.__name__ else: name = re.sub(r'[^\w]+', '_', repr(hostname)) assert name, repr(hostname) def test_getaddrinfo_http(self): x = hostname() if call else hostname self._test('getaddrinfo', x, 'http') test_getaddrinfo_http.__name__ = 'test_%s_getaddrinfo_http' % name _setattr(klass, test_getaddrinfo_http.__name__, test_getaddrinfo_http) def test_gethostbyname(self): x = hostname() if call else hostname ipaddr = self._test('gethostbyname', x) if not isinstance(ipaddr, Exception): self._test('gethostbyaddr', ipaddr) test_gethostbyname.__name__ = 'test_%s_gethostbyname' % name _setattr(klass, test_gethostbyname.__name__, test_gethostbyname) def test3(self): x = hostname() if call else hostname self._test('gethostbyname_ex', x) test3.__name__ = 'test_%s_gethostbyname_ex' % name _setattr(klass, test3.__name__, test3) def test4(self): x = hostname() if call else hostname self._test('gethostbyaddr', x) test4.__name__ = 'test_%s_gethostbyaddr' % name _setattr(klass, test4.__name__, test4) def test5(self): x = hostname() if call else hostname self._test('getnameinfo', (x, 80), 0) test5.__name__ = 'test_%s_getnameinfo' % name _setattr(klass, test5.__name__, test5) @skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo") class TestCase(greentest.TestCase): maxDiff = None __timeout__ = 30 switch_expected = None TRACE = not util.QUIET and os.getenv('GEVENT_DEBUG', '') == 'trace' verbose_dns = TRACE def trace(self, message, *args, **kwargs): if self.TRACE: util.debug(message, *args, **kwargs) REAL_ERRORS = (AttributeError, ValueError, NameError) def __run_resolver(self, function, args): try: result = function(*args) assert not isinstance(result, BaseException), repr(result) return result except self.REAL_ERRORS: raise except Exception as ex: if self.TRACE: traceback.print_exc() return ex def __trace_call(self, result, runtime, function, *args): util.debug(self.__format_call(function, args)) self.__trace_fresult(result, runtime) def __format_call(self, function, args): args = repr(args) if args.endswith(',)'): args = args[:-2] + ')' try: module = function.__module__.replace('gevent._socketcommon', 'gevent') name = function.__name__ return '%s:%s%s' % (module, name, args) except AttributeError: return function + args def __trace_fresult(self, result, seconds): if isinstance(result, Exception): msg = ' -=> raised %r' % (result, ) else: msg = ' -=> returned %r' % (result, ) time_ms = ' %.2fms' % (seconds * 1000.0, ) space = 80 - len(msg) - len(time_ms) if space > 0: space = ' ' * space else: space = '' util.debug(msg + space + time_ms) if not TRACE: def run_resolver(self, function, func_args): now = time() return self.__run_resolver(function, func_args), time() - now else: def run_resolver(self, function, func_args): self.trace(self.__format_call(function, func_args)) delta = time() result = self.__run_resolver(function, func_args) delta = time() - delta self.__trace_fresult(result, delta) return result, delta def setUp(self): super(TestCase, self).setUp() if not self.verbose_dns: gevent.get_hub().exception_stream = None def tearDown(self): if not self.verbose_dns: try: del gevent.get_hub().exception_stream except AttributeError: pass super(TestCase, self).tearDown() def should_log_results(self, result1, result2): if not self.verbose_dns: return False if isinstance(result1, BaseException) and isinstance(result2, BaseException): return type(result1) is not type(result2) return repr(result1) != repr(result2) def _test(self, func_name, *args): gevent_func = getattr(gevent_socket, func_name) real_func = monkey.get_original('socket', func_name) tester = getattr(self, '_run_test_' + func_name, self._run_test_generic) result = tester(func_name, real_func, gevent_func, args) _real_result, time_real, gevent_result, time_gevent = result if self.verbose_dns and time_gevent > time_real + 0.02 and time_gevent > 0.03: msg = 'gevent:%s%s took %dms versus %dms stdlib' % ( func_name, args, time_gevent * 1000.0, time_real * 1000.0) if time_gevent > time_real + 1: word = 'VERY' else: word = 'quite' util.log('\nWARNING: %s slow: %s', word, msg, color='warning') return gevent_result def _run_test_generic(self, func_name, real_func, gevent_func, func_args): real_result, time_real = self.run_resolver(real_func, func_args) gevent_result, time_gevent = self.run_resolver(gevent_func, func_args) if util.QUIET and self.should_log_results(real_result, gevent_result): util.log('') self.__trace_call(real_result, time_real, real_func, func_args) self.__trace_call(gevent_result, time_gevent, gevent_func, func_args) self.assertEqualResults(real_result, gevent_result, func_name) return real_result, time_real, gevent_result, time_gevent def _normalize_result(self, result, func_name): norm_name = '_normalize_result_' + func_name if hasattr(self, norm_name): return getattr(self, norm_name)(result) return result NORMALIZE_GAI_IGNORE_CANONICAL_NAME = RESOLVER_ARES if not RESOLVER_NOT_SYSTEM: def _normalize_result_getaddrinfo(self, result): return result def _normalize_result_gethostbyname_ex(self, result): return result else: def _normalize_result_gethostbyname_ex(self, result): # or using the native and ares resolvers due to load-balancing techniques. # We sort them. if isinstance(result, BaseException): return result # result[1].sort() # we wind up discarding this # On Py2 in test_russion_gethostbyname_ex, this # is actually an integer, for some reason. In TestLocalhost.tets__ip6_localhost, # the result isn't this long (maybe an error?). try: result[2].sort() except AttributeError: pass except IndexError: return result ips = result[2] if ips == ['10.28.141.171', '10.28.141.171']: ips = ['10.28.141.171'] return (result[0].lower(), [], ips) def _normalize_result_getaddrinfo(self, result): if isinstance(result, BaseException): return result # Likewise, on certain Linux systems, even on Python 2, IPPROTO_SCTP (132) # results may be returned --- but that may not even have a constant in the # socket module! So to be safe, we strip out anything that's not if isinstance(result, list): result = [ x for x in result if x[1] in (socket.SOCK_STREAM, socket.SOCK_DGRAM) and x[2] in (socket.IPPROTO_TCP, socket.IPPROTO_UDP) ] if self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME: result = [ (family, kind, proto, '', addr) for family, kind, proto, _, addr in result ] if isinstance(result, list): result.sort() return result def _normalize_result_getnameinfo(self, result): return result NORMALIZE_GHBA_IGNORE_ALIAS = False def _normalize_result_gethostbyaddr(self, result): if not RESOLVER_NOT_SYSTEM: return result if self.NORMALIZE_GHBA_IGNORE_ALIAS and isinstance(result, tuple): return (result[0], [], result[2]) return result def _compare_exceptions_strict(self, real_result, gevent_result, func_name): if repr(real_result) == repr(gevent_result): return msg = (func_name, 'system:', repr(real_result), 'gevent:', repr(gevent_result)) self.assertIs(type(gevent_result), type(real_result), msg) if isinstance(real_result, TypeError): return if PYPY and isinstance(real_result, socket.herror): # it just puts a string like 'host lookup failed: <thehost>'; # it must be doing that manually. return self.assertEqual(real_result.args, gevent_result.args, msg) if hasattr(real_result, 'errno'): self.assertEqual(real_result.errno, gevent_result.errno) def _compare_exceptions_lenient(self, real_result, gevent_result, func_name): try: self._compare_exceptions_strict(real_result, gevent_result, func_name) except AssertionError: # Allow raising different things in a few rare cases. if ( func_name not in ( 'getaddrinfo', 'gethostbyaddr', 'gethostbyname', 'gethostbyname_ex', 'getnameinfo', ) or type(real_result) not in (socket.herror, socket.gaierror) or type(gevent_result) not in (socket.herror, socket.gaierror, socket.error) ): raise util.log('WARNING: error type mismatch for %s: %r (gevent) != %r (stdlib)', func_name, gevent_result, real_result, color='warning') _compare_exceptions = _compare_exceptions_lenient if RESOLVER_NOT_SYSTEM else _compare_exceptions_strict def _compare_results(self, real_result, gevent_result, func_name): if real_result == gevent_result: return True compare_func = getattr(self, '_compare_results_' + func_name, self._generic_compare_results) return compare_func(real_result, gevent_result, func_name) def _generic_compare_results(self, real_result, gevent_result, func_name): try: if len(real_result) != len(gevent_result): return False except TypeError: return False return all(self._compare_results(x, y, func_name) for (x, y) in zip(real_result, gevent_result)) def _compare_results_getaddrinfo(self, real_result, gevent_result, func_name): # On some systems, we find more results with # one resolver than we do with the other resolver. # So as long as they have some subset in common, # we'll take it. if not set(real_result).isdisjoint(set(gevent_result)): return True return self._generic_compare_results(real_result, gevent_result, func_name) def _compare_address_strings(self, a, b): a_segments = a.count(':') b_segments = b.count(':') if a_segments and b_segments: if a_segments == b_segments and a_segments in (4, 5, 6, 7): return True if a.rstrip(':').startswith(b.rstrip(':')) or b.rstrip(':').startswith(a.rstrip(':')): return True if a_segments >= 2 and b_segments >= 2 and a.split(':')[:2] == b.split(':')[:2]: return True return a.split('.', 1)[-1] == b.split('.', 1)[-1] def _compare_results_gethostbyname(self, real_result, gevent_result, _func_name): return self._compare_address_strings(real_result, gevent_result) def _compare_results_gethostbyname_ex(self, real_result, gevent_result, _func_name): return not set(real_result[2]).isdisjoint(set(gevent_result[2])) def assertEqualResults(self, real_result, gevent_result, func_name): errors = ( OverflowError, TypeError, UnicodeError, socket.error, socket.gaierror, socket.herror, ) if isinstance(real_result, errors) and isinstance(gevent_result, errors): self._compare_exceptions(real_result, gevent_result, func_name) return real_result = self._normalize_result(real_result, func_name) gevent_result = self._normalize_result(gevent_result, func_name) if self._compare_results(real_result, gevent_result, func_name): return # If we're using a different resolver, allow the real resolver to generate an if ( RESOLVER_NOT_SYSTEM and isinstance(real_result, errors) and not isinstance(gevent_result, errors) ): return # they refuse to pick a name and raise ``socket.error`` if ( RESOLVER_NOT_SYSTEM and PYPY and func_name == 'getnameinfo' and isinstance(gevent_result, socket.error) and not isinstance(real_result, socket.error) ): return # From 2.7 on, assertEqual does a better job highlighting the results than we would # because it calls assertSequenceEqual, which highlights the exact # difference in the tuple self.assertEqual(real_result, gevent_result) class TestTypeError(TestCase): pass add(TestTypeError, None) add(TestTypeError, 25) class TestHostname(TestCase): NORMALIZE_GHBA_IGNORE_ALIAS = True def __normalize_name(self, result): if (RESOLVER_ARES or RESOLVER_DNSPYTHON) and isinstance(result, tuple): # The system resolver can return the FQDN, in the first result, # when given certain configurations. But c-ares and dnspython # do not. name = result[0] name = name.split('.', 1)[0] result = (name,) + result[1:] return result def _normalize_result_gethostbyaddr(self, result): result = TestCase._normalize_result_gethostbyaddr(self, result) return self.__normalize_name(result) def _normalize_result_getnameinfo(self, result): result = TestCase._normalize_result_getnameinfo(self, result) if PY2: # Not sure why we only saw this on Python 2 result = self.__normalize_name(result) return result add( TestHostname, socket.gethostname, skip=greentest.RUNNING_ON_TRAVIS and greentest.RESOLVER_NOT_SYSTEM, skip_reason=("Sometimes get a different result for getaddrinfo " "with dnspython; c-ares produces different results for " "localhost on Travis beginning Sept 2019") ) class TestLocalhost(TestCase): # certain tests in test_patched_socket.py only work if getaddrinfo('localhost') does not switch # (e.g. NetworkConnectionAttributesTest.testSourceAddress) #switch_expected = False # XXX: The above has been commented out for some time. Apparently this isn't the case def _normalize_result_getaddrinfo(self, result): if RESOLVER_NOT_SYSTEM: return () return super(TestLocalhost, self)._normalize_result_getaddrinfo(result) NORMALIZE_GHBA_IGNORE_ALIAS = True if greentest.RUNNING_ON_TRAVIS and greentest.PY2 and RESOLVER_NOT_SYSTEM: def _normalize_result_gethostbyaddr(self, result): result = super(TestLocalhost, self)._normalize_result_gethostbyaddr(result) if isinstance(result, tuple): result = (result[0], result[1], ['10.28.141.171']) return result add( TestLocalhost, 'ip6-localhost', skip=RESOLVER_DNSPYTHON, skip_reason="Can return gaierror(-2)" ) add( TestLocalhost, 'localhost', skip=greentest.RUNNING_ON_TRAVIS, skip_reason="Can return gaierror(-2)" ) class TestNonexistent(TestCase): pass add(TestNonexistent, 'nonexistentxxxyyy') class Test1234(TestCase): pass add(Test1234, '1.2.3.4') class Test127001(TestCase): NORMALIZE_GHBA_IGNORE_ALIAS = True add( Test127001, '10.28.141.171', ) class TestBroadcast(TestCase): switch_expected = False if RESOLVER_DNSPYTHON: @unittest.skip('ares raises errors for broadcasthost/255.255.255.255') def test__broadcast__gethostbyaddr(self): return test__broadcast__gethostbyname = test__broadcast__gethostbyaddr add(TestBroadcast, '<broadcast>') from gevent.resolver._hostsfile import HostsFile class SanitizedHostsFile(HostsFile): def iter_all_host_addr_pairs(self): for name, addr in super(SanitizedHostsFile, self).iter_all_host_addr_pairs(): if (RESOLVER_NOT_SYSTEM and (name.endswith('local') # ignore common aliases that ares can't find or addr == '255.255.255.255' or name == 'broadcasthost' or name == 'localhost')): continue if name.endswith('local'): continue yield name, addr @greentest.skipIf(greentest.RUNNING_ON_CI, "This sometimes randomly fails on Travis with ares and on appveyor, beginning Feb 13, 2018") # TODO: Rethink this. We need something reliable. Go back to using # the system's etc hosts? class TestEtcHosts(TestCase): MAX_HOSTS = int(os.getenv('GEVENTTEST_MAX_ETC_HOSTS', '10')) @classmethod def populate_tests(cls): hf = SanitizedHostsFile(os.path.join(os.path.dirname(__file__), 'hosts_file.txt')) all_etc_hosts = sorted(hf.iter_all_host_addr_pairs()) if len(all_etc_hosts) > cls.MAX_HOSTS and not RUN_ALL_HOST_TESTS: all_etc_hosts = all_etc_hosts[:cls.MAX_HOSTS] for host, ip in all_etc_hosts: add(cls, host) add(cls, ip) TestEtcHosts.populate_tests() class TestGeventOrg(TestCase): HOSTNAME = 'www.gevent.org' if RESOLVER_NOT_SYSTEM: def _normalize_result_gethostbyname(self, result): if result == '104.17.33.82': result = '104.17.32.82' return result def _normalize_result_gethostbyname_ex(self, result): result = super(TestGeventOrg, self)._normalize_result_gethostbyname_ex(result) if result[0] == 'python-gevent.readthedocs.org': result = ('readthedocs.io', ) + result[1:] return result def test_AI_CANONNAME(self): args = ( TestGeventOrg.HOSTNAME, None, socket.AF_INET, 0, 0, socket.AI_CANONNAME ) gevent_result = gevent_socket.getaddrinfo(*args) self.assertEqual(gevent_result[0][3], 'readthedocs.io') real_result = socket.getaddrinfo(*args) self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME = not all(r[3] for r in real_result) try: self.assertEqualResults(real_result, gevent_result, 'getaddrinfo') finally: del self.NORMALIZE_GAI_IGNORE_CANONICAL_NAME add(TestGeventOrg, TestGeventOrg.HOSTNAME) class TestFamily(TestCase): def test_inet(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_INET) def test_unspec(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, socket.AF_UNSPEC) def test_badvalue(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255) self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, 255000) self._test('getaddrinfo', TestGeventOrg.HOSTNAME, None, -1) @unittest.skipIf(RESOLVER_DNSPYTHON, "Raises the wrong errno") def test_badtype(self): self._test('getaddrinfo', TestGeventOrg.HOSTNAME, 'x') class Test_getaddrinfo(TestCase): def _test_getaddrinfo(self, *args): self._test('getaddrinfo', *args) def test_80(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 80) def test_int_string(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, '80') def test_0(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 0) def test_http(self): self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 'http') def test_notexistent_tld(self): self._test_getaddrinfo('myhost.mytld', 53) def test_notexistent_dot_com(self): self._test_getaddrinfo('sdfsdfgu5e66098032453245wfdggd.com', 80) def test1(self): return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 52, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, 0) def test2(self): return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 53, socket.AF_INET, socket.SOCK_DGRAM, 17) @unittest.skipIf(RESOLVER_DNSPYTHON, "dnspython only returns some of the possibilities") def test3(self): return self._test_getaddrinfo('google.com', 'http', socket.AF_INET6) @greentest.skipIf(PY2, "Enums only on Python 3.4+") def test_enums(self): gai = gevent_socket.getaddrinfo('example.com', 80, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) af, socktype, _proto, _canonname, _sa = gai[0] self.assertIs(socktype, socket.SOCK_STREAM) self.assertIs(af, socket.AF_INET) class TestInternational(TestCase): if PY2: REAL_ERRORS = set(TestCase.REAL_ERRORS) - {ValueError,} if RESOLVER_ARES: def test_russian_getaddrinfo_http(self): self.skipTest("ares fails to encode.") add(TestInternational, u'президент.рф', 'russian', skip=(PY2 and RESOLVER_DNSPYTHON), skip_reason="dnspython can actually resolve these") add(TestInternational, u'президент.рф'.encode('idna'), 'idna') @skipWithoutExternalNetwork("Tries to resolve and compare hostnames/addrinfo") class TestInterrupted_gethostbyname(gevent.testing.timing.AbstractGenericWaitTestCase): # away yet; one gc may or may not do it. @greentest.ignores_leakcheck def test_returns_none_after_timeout(self): super(TestInterrupted_gethostbyname, self).test_returns_none_after_timeout() def wait(self, timeout): with gevent.Timeout(timeout, False): for index in xrange(1000000): try: gevent_socket.gethostbyname('www.x%s.com' % index) except socket.error: pass raise AssertionError('Timeout was not raised') def cleanup(self): # Depending on timing, this can raise: # (This suddenly started happening on Apr 6 2016; www.x1000000.com # is apparently no longer around) # File "test__socket_dns.py", line 538, in cleanup # gevent.get_hub().threadpool.join() # File "/home/travis/build/gevent/gevent/src/gevent/threadpool.py", line 108, in join # sleep(delay) # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 169, in sleep # hub.wait(loop.timer(seconds, ref=ref)) # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 651, in wait # result = waiter.get() # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 899, in get # return self.hub.switch() # File "/home/travis/build/gevent/gevent/src/greentest/greentest.py", line 520, in switch # return _original_Hub.switch(self, *args) # File "/home/travis/build/gevent/gevent/src/gevent/hub.py", line 630, in switch # return RawGreenlet.switch(self) # gaierror: [Errno -2] Name or service not known try: gevent.get_hub().threadpool.join() except Exception: # pragma: no cover pylint:disable=broad-except traceback.print_exc() # class TestInterrupted_getaddrinfo(greentest.GenericWaitTestCase): # # def wait(self, timeout): # with gevent.Timeout(timeout, False): # for index in range(1000): # try: # gevent_socket.getaddrinfo('www.a%s.com' % index, 'http') # except socket.gaierror: # pass class TestBadName(TestCase): pass add(TestBadName, 'xxxxxxxxxxxx') class TestBadIP(TestCase): pass add(TestBadIP, '1.2.3.400') @greentest.skipIf(greentest.RUNNING_ON_TRAVIS, "Travis began returning ip6-localhost") class Test_getnameinfo_127001(TestCase): def test(self): self._test('getnameinfo', ('10.28.141.171', 80), 0) def test_DGRAM(self): self._test('getnameinfo', ('10.28.141.171', 779), 0) self._test('getnameinfo', ('10.28.141.171', 779), socket.NI_DGRAM) def test_NOFQDN(self): # I get ('localhost', 'www') with _socket but ('localhost.localdomain', 'www') with gevent.socket self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NOFQDN) def test_NAMEREQD(self): self._test('getnameinfo', ('10.28.141.171', 80), socket.NI_NAMEREQD) class Test_getnameinfo_geventorg(TestCase): @unittest.skipIf(RESOLVER_DNSPYTHON, "dnspython raises an error when multiple results are returned") def test_NUMERICHOST(self): self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0) self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICHOST) @unittest.skipIf(RESOLVER_DNSPYTHON, "dnspython raises an error when multiple results are returned") def test_NUMERICSERV(self): self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), socket.NI_NUMERICSERV) def test_domain1(self): self._test('getnameinfo', (TestGeventOrg.HOSTNAME, 80), 0) def test_domain2(self): self._test('getnameinfo', ('www.gevent.org', 80), 0) def test_port_zero(self): self._test('getnameinfo', ('www.gevent.org', 0), 0) class Test_getnameinfo_fail(TestCase): def test_port_string(self): self._test('getnameinfo', ('www.gevent.org', 'http'), 0) def test_bad_flags(self): self._test('getnameinfo', ('localhost', 80), 55555555) class TestInvalidPort(TestCase): @flaky.reraises_flaky_race_condition() def test_overflow_neg_one(self): # An Appveyor beginning 2019-03-21, the system resolver # sometimes returns ('23.100.69.251', '65535') instead of # raising an error. That IP address belongs to # readthedocs[.io?] which is where www.gevent.org is a CNAME # to...but it doesn't actually *reverse* to readthedocs.io. self._test('getnameinfo', ('www.gevent.org', -1), 0) @greentest.skipOnLibuvOnPyPyOnWin("Errors dont match") def test_typeerror_none(self): self._test('getnameinfo', ('www.gevent.org', None), 0) @greentest.skipOnLibuvOnPyPyOnWin("Errors don't match") def test_typeerror_str(self): self._test('getnameinfo', ('www.gevent.org', 'x'), 0) def test_overflow_port_too_large(self): self._test('getnameinfo', ('www.gevent.org', 65536), 0) if __name__ == '__main__': greentest.main()
true
true
1c49e25c6e75fd9b0b06b279779a858490d11a7e
9,520
py
Python
nova/api/openstack/placement/microversion.py
viveknandavanam/nova
556377b6915936467436c9d5bb33bc0e22244e1e
[ "Apache-2.0" ]
1
2015-11-30T19:44:00.000Z
2015-11-30T19:44:00.000Z
nova/api/openstack/placement/microversion.py
viveknandavanam/nova
556377b6915936467436c9d5bb33bc0e22244e1e
[ "Apache-2.0" ]
5
2018-04-12T16:44:34.000Z
2018-05-08T13:33:05.000Z
nova/api/openstack/placement/microversion.py
viveknandavanam/nova
556377b6915936467436c9d5bb33bc0e22244e1e
[ "Apache-2.0" ]
3
2018-04-04T15:15:01.000Z
2018-04-19T18:14:25.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Microversion handling.""" # NOTE(cdent): This code is taken from enamel: # https://github.com/jaypipes/enamel and was the original source of # the code now used in microversion_parse library. import collections import inspect import microversion_parse import webob # NOTE(cdent): avoid cyclical import conflict between util and # microversion import nova.api.openstack.placement.util from nova.i18n import _ SERVICE_TYPE = 'placement' MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE VERSIONED_METHODS = collections.defaultdict(list) # The Canonical Version List VERSIONS = [ '1.0', '1.1', # initial support for aggregate.get_aggregates and set_aggregates '1.2', # Adds /resource_classes resource endpoint '1.3', # Adds 'member_of' query parameter to get resource providers # that are members of any of the listed aggregates '1.4', # Adds resources query string parameter in GET /resource_providers '1.5', # Adds DELETE /resource_providers/{uuid}/inventories ] def max_version_string(): return VERSIONS[-1] def min_version_string(): return VERSIONS[0] def parse_version_string(version_string): """Turn a version string into a Version :param version_string: A string of two numerals, X.Y, or 'latest' :returns: a Version :raises: TypeError """ if version_string == 'latest': version_string = max_version_string() try: # The combination of int and a limited split with the # named tuple means that this incantation will raise # ValueError or TypeError when the incoming data is # poorly formed but will, however, naturally adapt to # extraneous whitespace. return Version(*(int(value) for value in version_string.split('.', 1))) except (ValueError, TypeError) as exc: raise TypeError('invalid version string: %s; %s' % ( version_string, exc)) def raise_http_status_code_if_not_version(req, status_code, min_version, max_version=None): """Utility to raise a http status code if the wanted microversion does not match. :param req: The HTTP request for the placement api :param status_code: HTTP status code (integer value) to be raised :param min_version: Minimum placement microversion level :param max_version: Maximum placement microversion level :returns: None :raises: HTTP status code if the specified microversion does not match :raises: KeyError if status_code is not a valid HTTP status code """ if not isinstance(min_version, tuple): min_version = parse_version_string(min_version) if max_version and not isinstance(max_version, tuple): max_version = parse_version_string(max_version) want_version = req.environ[MICROVERSION_ENVIRON] if not want_version.matches(min_version, max_version): raise webob.exc.status_map[status_code] class MicroversionMiddleware(object): """WSGI middleware for getting microversion info.""" def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): util = nova.api.openstack.placement.util try: microversion = extract_version(req.headers) except ValueError as exc: raise webob.exc.HTTPNotAcceptable( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) except TypeError as exc: raise webob.exc.HTTPBadRequest( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) req.environ[MICROVERSION_ENVIRON] = microversion microversion_header = '%s %s' % (SERVICE_TYPE, microversion) try: response = req.get_response(self.application) except webob.exc.HTTPError as exc: # If there was an error in the application we still need # to send the microversion header, so add the header and # re-raise the exception. exc.headers.add(Version.HEADER, microversion_header) raise exc response.headers.add(Version.HEADER, microversion_header) response.headers.add('vary', Version.HEADER) return response class Version(collections.namedtuple('Version', 'major minor')): """A namedtuple containing major and minor values. Since it is a tuple is automatically comparable. """ HEADER = 'OpenStack-API-Version' MIN_VERSION = None MAX_VERSION = None def __str__(self): return '%s.%s' % (self.major, self.minor) @property def max_version(self): if not self.MAX_VERSION: self.MAX_VERSION = parse_version_string(max_version_string()) return self.MAX_VERSION @property def min_version(self): if not self.MIN_VERSION: self.MIN_VERSION = parse_version_string(min_version_string()) return self.MIN_VERSION def matches(self, min_version=None, max_version=None): if min_version is None: min_version = self.min_version if max_version is None: max_version = self.max_version return min_version <= self <= max_version def extract_version(headers): """Extract the microversion from Version.HEADER There may be multiple headers and some which don't match our service. """ found_version = microversion_parse.get_version(headers, service_type=SERVICE_TYPE) version_string = found_version or min_version_string() request_version = parse_version_string(version_string) # We need a version that is in VERSION and within MIX and MAX. # This gives us the option to administratively disable a # version if we really need to. if (str(request_version) in VERSIONS and request_version.matches()): return request_version raise ValueError('Unacceptable version header: %s' % version_string) # From twisted # https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py def _fully_qualified_name(obj): """Return the fully qualified name of a module, class, method or function. Classes and functions need to be module level ones to be correctly qualified. """ try: name = obj.__qualname__ except AttributeError: name = obj.__name__ if inspect.isclass(obj) or inspect.isfunction(obj): moduleName = obj.__module__ return "%s.%s" % (moduleName, name) elif inspect.ismethod(obj): try: cls = obj.im_class except AttributeError: # Python 3 eliminates im_class, substitutes __module__ and # __qualname__ to provide similar information. return "%s.%s" % (obj.__module__, obj.__qualname__) else: className = _fully_qualified_name(cls) return "%s.%s" % (className, name) return name def _find_method(f, version): """Look in VERSIONED_METHODS for method with right name matching version. If no match is found raise a 404. """ qualified_name = _fully_qualified_name(f) # A KeyError shouldn't be possible here, but let's be robust # just in case. method_list = VERSIONED_METHODS.get(qualified_name, []) for min_version, max_version, func in method_list: if min_version <= version <= max_version: return func raise webob.exc.HTTPNotFound() def version_handler(min_ver, max_ver=None): """Decorator for versioning API methods. Add as a decorator to a placement API handler to constrain the microversions at which it will run. Add after the ``wsgify`` decorator. This does not check for version intersections. That's the domain of tests. :param min_ver: A string of two numerals, X.Y indicating the minimum version allowed for the decorated method. :param min_ver: A string of two numerals, X.Y, indicating the maximum version allowed for the decorated method. """ def decorator(f): min_version = parse_version_string(min_ver) if max_ver: max_version = parse_version_string(max_ver) else: max_version = parse_version_string(max_version_string()) qualified_name = _fully_qualified_name(f) VERSIONED_METHODS[qualified_name].append( (min_version, max_version, f)) def decorated_func(req, *args, **kwargs): version = req.environ[MICROVERSION_ENVIRON] return _find_method(f, version)(req, *args, **kwargs) # Sort highest min version to beginning of list. VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0], reverse=True) return decorated_func return decorator
35.522388
78
0.67542
import collections import inspect import microversion_parse import webob import nova.api.openstack.placement.util from nova.i18n import _ SERVICE_TYPE = 'placement' MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE VERSIONED_METHODS = collections.defaultdict(list) VERSIONS = [ '1.0', '1.1', '1.2', '1.3', '1.4', '1.5', ] def max_version_string(): return VERSIONS[-1] def min_version_string(): return VERSIONS[0] def parse_version_string(version_string): if version_string == 'latest': version_string = max_version_string() try: return Version(*(int(value) for value in version_string.split('.', 1))) except (ValueError, TypeError) as exc: raise TypeError('invalid version string: %s; %s' % ( version_string, exc)) def raise_http_status_code_if_not_version(req, status_code, min_version, max_version=None): if not isinstance(min_version, tuple): min_version = parse_version_string(min_version) if max_version and not isinstance(max_version, tuple): max_version = parse_version_string(max_version) want_version = req.environ[MICROVERSION_ENVIRON] if not want_version.matches(min_version, max_version): raise webob.exc.status_map[status_code] class MicroversionMiddleware(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): util = nova.api.openstack.placement.util try: microversion = extract_version(req.headers) except ValueError as exc: raise webob.exc.HTTPNotAcceptable( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) except TypeError as exc: raise webob.exc.HTTPBadRequest( _('Invalid microversion: %(error)s') % {'error': exc}, json_formatter=util.json_error_formatter) req.environ[MICROVERSION_ENVIRON] = microversion microversion_header = '%s %s' % (SERVICE_TYPE, microversion) try: response = req.get_response(self.application) except webob.exc.HTTPError as exc: exc.headers.add(Version.HEADER, microversion_header) raise exc response.headers.add(Version.HEADER, microversion_header) response.headers.add('vary', Version.HEADER) return response class Version(collections.namedtuple('Version', 'major minor')): HEADER = 'OpenStack-API-Version' MIN_VERSION = None MAX_VERSION = None def __str__(self): return '%s.%s' % (self.major, self.minor) @property def max_version(self): if not self.MAX_VERSION: self.MAX_VERSION = parse_version_string(max_version_string()) return self.MAX_VERSION @property def min_version(self): if not self.MIN_VERSION: self.MIN_VERSION = parse_version_string(min_version_string()) return self.MIN_VERSION def matches(self, min_version=None, max_version=None): if min_version is None: min_version = self.min_version if max_version is None: max_version = self.max_version return min_version <= self <= max_version def extract_version(headers): found_version = microversion_parse.get_version(headers, service_type=SERVICE_TYPE) version_string = found_version or min_version_string() request_version = parse_version_string(version_string) if (str(request_version) in VERSIONS and request_version.matches()): return request_version raise ValueError('Unacceptable version header: %s' % version_string) def _fully_qualified_name(obj): try: name = obj.__qualname__ except AttributeError: name = obj.__name__ if inspect.isclass(obj) or inspect.isfunction(obj): moduleName = obj.__module__ return "%s.%s" % (moduleName, name) elif inspect.ismethod(obj): try: cls = obj.im_class except AttributeError: return "%s.%s" % (obj.__module__, obj.__qualname__) else: className = _fully_qualified_name(cls) return "%s.%s" % (className, name) return name def _find_method(f, version): qualified_name = _fully_qualified_name(f) method_list = VERSIONED_METHODS.get(qualified_name, []) for min_version, max_version, func in method_list: if min_version <= version <= max_version: return func raise webob.exc.HTTPNotFound() def version_handler(min_ver, max_ver=None): def decorator(f): min_version = parse_version_string(min_ver) if max_ver: max_version = parse_version_string(max_ver) else: max_version = parse_version_string(max_version_string()) qualified_name = _fully_qualified_name(f) VERSIONED_METHODS[qualified_name].append( (min_version, max_version, f)) def decorated_func(req, *args, **kwargs): version = req.environ[MICROVERSION_ENVIRON] return _find_method(f, version)(req, *args, **kwargs) VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0], reverse=True) return decorated_func return decorator
true
true
1c49e3447e6cad31e2cefda415baed1335d3fa12
16,213
py
Python
homeassistant/const.py
84KaliPleXon3/home-assistant-core
7194b74580535395b5f100de98643e029bd0f1b6
[ "Apache-2.0" ]
2
2021-09-13T21:44:02.000Z
2021-12-17T21:20:51.000Z
homeassistant/const.py
84KaliPleXon3/home-assistant-core
7194b74580535395b5f100de98643e029bd0f1b6
[ "Apache-2.0" ]
4
2021-02-08T20:47:39.000Z
2022-03-12T00:33:22.000Z
homeassistant/const.py
84KaliPleXon3/home-assistant-core
7194b74580535395b5f100de98643e029bd0f1b6
[ "Apache-2.0" ]
2
2020-11-04T07:40:01.000Z
2021-09-13T21:44:03.000Z
"""Constants used by Home Assistant components.""" MAJOR_VERSION = 0 MINOR_VERSION = 110 PATCH_VERSION = "4" __short_version__ = f"{MAJOR_VERSION}.{MINOR_VERSION}" __version__ = f"{__short_version__}.{PATCH_VERSION}" REQUIRED_PYTHON_VER = (3, 7, 0) # Truthy date string triggers showing related deprecation warning messages. REQUIRED_NEXT_PYTHON_VER = (3, 8, 0) REQUIRED_NEXT_PYTHON_DATE = "" # Format for platform files PLATFORM_FORMAT = "{platform}.{domain}" # Can be used to specify a catch all when registering state or event listeners. MATCH_ALL = "*" # Entity target all constant ENTITY_MATCH_NONE = "none" ENTITY_MATCH_ALL = "all" # If no name is specified DEVICE_DEFAULT_NAME = "Unnamed Device" # Sun events SUN_EVENT_SUNSET = "sunset" SUN_EVENT_SUNRISE = "sunrise" # #### CONFIG #### CONF_ABOVE = "above" CONF_ACCESS_TOKEN = "access_token" CONF_ADDRESS = "address" CONF_AFTER = "after" CONF_ALIAS = "alias" CONF_API_KEY = "api_key" CONF_API_VERSION = "api_version" CONF_ARMING_TIME = "arming_time" CONF_AT = "at" CONF_AUTH_MFA_MODULES = "auth_mfa_modules" CONF_AUTH_PROVIDERS = "auth_providers" CONF_AUTHENTICATION = "authentication" CONF_BASE = "base" CONF_BEFORE = "before" CONF_BELOW = "below" CONF_BINARY_SENSORS = "binary_sensors" CONF_BLACKLIST = "blacklist" CONF_BRIGHTNESS = "brightness" CONF_BROADCAST_ADDRESS = "broadcast_address" CONF_CLIENT_ID = "client_id" CONF_CLIENT_SECRET = "client_secret" CONF_CODE = "code" CONF_COLOR_TEMP = "color_temp" CONF_COMMAND = "command" CONF_COMMAND_CLOSE = "command_close" CONF_COMMAND_OFF = "command_off" CONF_COMMAND_ON = "command_on" CONF_COMMAND_OPEN = "command_open" CONF_COMMAND_STATE = "command_state" CONF_COMMAND_STOP = "command_stop" CONF_CONDITION = "condition" CONF_CONTINUE_ON_TIMEOUT = "continue_on_timeout" CONF_COVERS = "covers" CONF_CURRENCY = "currency" CONF_CUSTOMIZE = "customize" CONF_CUSTOMIZE_DOMAIN = "customize_domain" CONF_CUSTOMIZE_GLOB = "customize_glob" CONF_DELAY = "delay" CONF_DELAY_TIME = "delay_time" CONF_DEVICE = "device" CONF_DEVICE_CLASS = "device_class" CONF_DEVICE_ID = "device_id" CONF_DEVICES = "devices" CONF_DISARM_AFTER_TRIGGER = "disarm_after_trigger" CONF_DISCOVERY = "discovery" CONF_DISKS = "disks" CONF_DISPLAY_CURRENCY = "display_currency" CONF_DISPLAY_OPTIONS = "display_options" CONF_DOMAIN = "domain" CONF_DOMAINS = "domains" CONF_EFFECT = "effect" CONF_ELEVATION = "elevation" CONF_EMAIL = "email" CONF_ENTITIES = "entities" CONF_ENTITY_ID = "entity_id" CONF_ENTITY_NAMESPACE = "entity_namespace" CONF_ENTITY_PICTURE_TEMPLATE = "entity_picture_template" CONF_EVENT = "event" CONF_EVENT_DATA = "event_data" CONF_EVENT_DATA_TEMPLATE = "event_data_template" CONF_EXCLUDE = "exclude" CONF_EXTERNAL_URL = "external_url" CONF_FILE_PATH = "file_path" CONF_FILENAME = "filename" CONF_FOR = "for" CONF_FORCE_UPDATE = "force_update" CONF_FRIENDLY_NAME = "friendly_name" CONF_FRIENDLY_NAME_TEMPLATE = "friendly_name_template" CONF_HEADERS = "headers" CONF_HOST = "host" CONF_HOSTS = "hosts" CONF_HS = "hs" CONF_ICON = "icon" CONF_ICON_TEMPLATE = "icon_template" CONF_ID = "id" CONF_INCLUDE = "include" CONF_INTERNAL_URL = "internal_url" CONF_IP_ADDRESS = "ip_address" CONF_LATITUDE = "latitude" CONF_LIGHTS = "lights" CONF_LONGITUDE = "longitude" CONF_MAC = "mac" CONF_MAXIMUM = "maximum" CONF_METHOD = "method" CONF_MINIMUM = "minimum" CONF_MODE = "mode" CONF_MONITORED_CONDITIONS = "monitored_conditions" CONF_MONITORED_VARIABLES = "monitored_variables" CONF_NAME = "name" CONF_OFFSET = "offset" CONF_OPTIMISTIC = "optimistic" CONF_PACKAGES = "packages" CONF_PASSWORD = "password" CONF_PATH = "path" CONF_PAYLOAD = "payload" CONF_PAYLOAD_OFF = "payload_off" CONF_PAYLOAD_ON = "payload_on" CONF_PENDING_TIME = "pending_time" CONF_PIN = "pin" CONF_PLATFORM = "platform" CONF_PORT = "port" CONF_PREFIX = "prefix" CONF_PROFILE_NAME = "profile_name" CONF_PROTOCOL = "protocol" CONF_PROXY_SSL = "proxy_ssl" CONF_QUOTE = "quote" CONF_RADIUS = "radius" CONF_RECIPIENT = "recipient" CONF_REGION = "region" CONF_RESOURCE = "resource" CONF_RESOURCE_TEMPLATE = "resource_template" CONF_RESOURCES = "resources" CONF_RGB = "rgb" CONF_ROOM = "room" CONF_SCAN_INTERVAL = "scan_interval" CONF_SCENE = "scene" CONF_SENDER = "sender" CONF_SENSOR_TYPE = "sensor_type" CONF_SENSORS = "sensors" CONF_SERVICE = "service" CONF_SERVICE_DATA = "data" CONF_SERVICE_TEMPLATE = "service_template" CONF_SHOW_ON_MAP = "show_on_map" CONF_SLAVE = "slave" CONF_SOURCE = "source" CONF_SSL = "ssl" CONF_STATE = "state" CONF_STATE_TEMPLATE = "state_template" CONF_STRUCTURE = "structure" CONF_SWITCHES = "switches" CONF_TEMPERATURE_UNIT = "temperature_unit" CONF_TIME_ZONE = "time_zone" CONF_TIMEOUT = "timeout" CONF_TOKEN = "token" CONF_TRIGGER_TIME = "trigger_time" CONF_TTL = "ttl" CONF_TYPE = "type" CONF_UNIT_OF_MEASUREMENT = "unit_of_measurement" CONF_UNIT_SYSTEM = "unit_system" CONF_URL = "url" CONF_USERNAME = "username" CONF_VALUE_TEMPLATE = "value_template" CONF_VERIFY_SSL = "verify_ssl" CONF_WAIT_TEMPLATE = "wait_template" CONF_WEBHOOK_ID = "webhook_id" CONF_WEEKDAY = "weekday" CONF_WHITE_VALUE = "white_value" CONF_WHITELIST = "whitelist" CONF_WHITELIST_EXTERNAL_DIRS = "whitelist_external_dirs" CONF_XY = "xy" CONF_ZONE = "zone" # #### EVENTS #### EVENT_AUTOMATION_TRIGGERED = "automation_triggered" EVENT_CALL_SERVICE = "call_service" EVENT_COMPONENT_LOADED = "component_loaded" EVENT_CORE_CONFIG_UPDATE = "core_config_updated" EVENT_HOMEASSISTANT_CLOSE = "homeassistant_close" EVENT_HOMEASSISTANT_START = "homeassistant_start" EVENT_HOMEASSISTANT_STARTED = "homeassistant_started" EVENT_HOMEASSISTANT_STOP = "homeassistant_stop" EVENT_HOMEASSISTANT_FINAL_WRITE = "homeassistant_final_write" EVENT_LOGBOOK_ENTRY = "logbook_entry" EVENT_PLATFORM_DISCOVERED = "platform_discovered" EVENT_SCRIPT_STARTED = "script_started" EVENT_SERVICE_REGISTERED = "service_registered" EVENT_SERVICE_REMOVED = "service_removed" EVENT_STATE_CHANGED = "state_changed" EVENT_THEMES_UPDATED = "themes_updated" EVENT_TIMER_OUT_OF_SYNC = "timer_out_of_sync" EVENT_TIME_CHANGED = "time_changed" # #### DEVICE CLASSES #### DEVICE_CLASS_BATTERY = "battery" DEVICE_CLASS_HUMIDITY = "humidity" DEVICE_CLASS_ILLUMINANCE = "illuminance" DEVICE_CLASS_SIGNAL_STRENGTH = "signal_strength" DEVICE_CLASS_TEMPERATURE = "temperature" DEVICE_CLASS_TIMESTAMP = "timestamp" DEVICE_CLASS_PRESSURE = "pressure" DEVICE_CLASS_POWER = "power" # #### STATES #### STATE_ON = "on" STATE_OFF = "off" STATE_HOME = "home" STATE_NOT_HOME = "not_home" STATE_UNKNOWN = "unknown" STATE_OPEN = "open" STATE_OPENING = "opening" STATE_CLOSED = "closed" STATE_CLOSING = "closing" STATE_PLAYING = "playing" STATE_PAUSED = "paused" STATE_IDLE = "idle" STATE_STANDBY = "standby" STATE_ALARM_DISARMED = "disarmed" STATE_ALARM_ARMED_HOME = "armed_home" STATE_ALARM_ARMED_AWAY = "armed_away" STATE_ALARM_ARMED_NIGHT = "armed_night" STATE_ALARM_ARMED_CUSTOM_BYPASS = "armed_custom_bypass" STATE_ALARM_PENDING = "pending" STATE_ALARM_ARMING = "arming" STATE_ALARM_DISARMING = "disarming" STATE_ALARM_TRIGGERED = "triggered" STATE_LOCKED = "locked" STATE_UNLOCKED = "unlocked" STATE_UNAVAILABLE = "unavailable" STATE_OK = "ok" STATE_PROBLEM = "problem" # #### STATE AND EVENT ATTRIBUTES #### # Attribution ATTR_ATTRIBUTION = "attribution" # Credentials ATTR_CREDENTIALS = "credentials" # Contains time-related attributes ATTR_NOW = "now" ATTR_DATE = "date" ATTR_TIME = "time" ATTR_SECONDS = "seconds" # Contains domain, service for a SERVICE_CALL event ATTR_DOMAIN = "domain" ATTR_SERVICE = "service" ATTR_SERVICE_DATA = "service_data" # IDs ATTR_ID = "id" # Name ATTR_NAME = "name" # Contains one string or a list of strings, each being an entity id ATTR_ENTITY_ID = "entity_id" # Contains one string or a list of strings, each being an area id ATTR_AREA_ID = "area_id" # String with a friendly name for the entity ATTR_FRIENDLY_NAME = "friendly_name" # A picture to represent entity ATTR_ENTITY_PICTURE = "entity_picture" # Icon to use in the frontend ATTR_ICON = "icon" # The unit of measurement if applicable ATTR_UNIT_OF_MEASUREMENT = "unit_of_measurement" CONF_UNIT_SYSTEM_METRIC: str = "metric" CONF_UNIT_SYSTEM_IMPERIAL: str = "imperial" # Electrical attributes ATTR_VOLTAGE = "voltage" # Contains the information that is discovered ATTR_DISCOVERED = "discovered" # Location of the device/sensor ATTR_LOCATION = "location" ATTR_MODE = "mode" ATTR_BATTERY_CHARGING = "battery_charging" ATTR_BATTERY_LEVEL = "battery_level" ATTR_WAKEUP = "wake_up_interval" # For devices which support a code attribute ATTR_CODE = "code" ATTR_CODE_FORMAT = "code_format" # For calling a device specific command ATTR_COMMAND = "command" # For devices which support an armed state ATTR_ARMED = "device_armed" # For devices which support a locked state ATTR_LOCKED = "locked" # For sensors that support 'tripping', eg. motion and door sensors ATTR_TRIPPED = "device_tripped" # For sensors that support 'tripping' this holds the most recent # time the device was tripped ATTR_LAST_TRIP_TIME = "last_tripped_time" # For all entity's, this hold whether or not it should be hidden ATTR_HIDDEN = "hidden" # Location of the entity ATTR_LATITUDE = "latitude" ATTR_LONGITUDE = "longitude" # Accuracy of location in meters ATTR_GPS_ACCURACY = "gps_accuracy" # If state is assumed ATTR_ASSUMED_STATE = "assumed_state" ATTR_STATE = "state" ATTR_EDITABLE = "editable" ATTR_OPTION = "option" # Bitfield of supported component features for the entity ATTR_SUPPORTED_FEATURES = "supported_features" # Class of device within its domain ATTR_DEVICE_CLASS = "device_class" # Temperature attribute ATTR_TEMPERATURE = "temperature" # #### UNITS OF MEASUREMENT #### # Power units POWER_WATT = "W" POWER_KILO_WATT = f"k{POWER_WATT}" # Voltage units VOLT = "V" # Energy units ENERGY_WATT_HOUR = f"{POWER_WATT}h" ENERGY_KILO_WATT_HOUR = f"k{ENERGY_WATT_HOUR}" # Degree units DEGREE = "°" # Temperature units TEMP_CELSIUS = f"{DEGREE}C" TEMP_FAHRENHEIT = f"{DEGREE}F" TEMP_KELVIN = f"{DEGREE}K" # Time units TIME_MICROSECONDS = "μs" TIME_MILLISECONDS = "ms" TIME_SECONDS = "s" TIME_MINUTES = "min" TIME_HOURS = "h" TIME_DAYS = "d" TIME_WEEKS = "w" TIME_MONTHS = "m" TIME_YEARS = "y" # Length units LENGTH_CENTIMETERS: str = "cm" LENGTH_METERS: str = "m" LENGTH_KILOMETERS: str = "km" LENGTH_INCHES: str = "in" LENGTH_FEET: str = "ft" LENGTH_YARD: str = "yd" LENGTH_MILES: str = "mi" # Frequency units FREQUENCY_HERTZ = "Hz" FREQUENCY_GIGAHERTZ = f"G{FREQUENCY_HERTZ}" # Pressure units PRESSURE_PA: str = "Pa" PRESSURE_HPA: str = "hPa" PRESSURE_BAR: str = "bar" PRESSURE_MBAR: str = "mbar" PRESSURE_INHG: str = "inHg" PRESSURE_PSI: str = "psi" # Volume units VOLUME_LITERS: str = "L" VOLUME_MILLILITERS: str = "mL" VOLUME_CUBIC_METERS = f"{LENGTH_METERS}³" VOLUME_GALLONS: str = "gal" VOLUME_FLUID_OUNCE: str = "fl. oz." # Area units AREA_SQUARE_METERS = f"{LENGTH_METERS}²" # Mass units MASS_GRAMS: str = "g" MASS_KILOGRAMS: str = "kg" MASS_MILLIGRAMS = "mg" MASS_MICROGRAMS = "µg" MASS_OUNCES: str = "oz" MASS_POUNDS: str = "lb" # Conductivity units CONDUCTIVITY: str = f"µS/{LENGTH_CENTIMETERS}" # UV Index units UV_INDEX: str = "UV index" # Percentage units UNIT_PERCENTAGE = "%" # Irradiation units IRRADIATION_WATTS_PER_SQUARE_METER = f"{POWER_WATT}/{AREA_SQUARE_METERS}" # Concentration units CONCENTRATION_MICROGRAMS_PER_CUBIC_METER = f"{MASS_MICROGRAMS}/{VOLUME_CUBIC_METERS}" CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER = f"{MASS_MILLIGRAMS}/{VOLUME_CUBIC_METERS}" CONCENTRATION_PARTS_PER_MILLION = "ppm" CONCENTRATION_PARTS_PER_BILLION = "ppb" # Speed units SPEED_METERS_PER_SECOND = f"{LENGTH_METERS}/{TIME_SECONDS}" SPEED_KILOMETERS_PER_HOUR = f"{LENGTH_KILOMETERS}/{TIME_HOURS}" SPEED_MILES_PER_HOUR = "mph" # Data units DATA_BITS = "bit" DATA_KILOBITS = "kbit" DATA_MEGABITS = "Mbit" DATA_GIGABITS = "Gbit" DATA_BYTES = "B" DATA_KILOBYTES = "kB" DATA_MEGABYTES = "MB" DATA_GIGABYTES = "GB" DATA_TERABYTES = "TB" DATA_PETABYTES = "PB" DATA_EXABYTES = "EB" DATA_ZETTABYTES = "ZB" DATA_YOTTABYTES = "YB" DATA_KIBIBYTES = "KiB" DATA_MEBIBYTES = "MiB" DATA_GIBIBYTES = "GiB" DATA_TEBIBYTES = "TiB" DATA_PEBIBYTES = "PiB" DATA_EXBIBYTES = "EiB" DATA_ZEBIBYTES = "ZiB" DATA_YOBIBYTES = "YiB" DATA_RATE_BITS_PER_SECOND = f"{DATA_BITS}/{TIME_SECONDS}" DATA_RATE_KILOBITS_PER_SECOND = f"{DATA_KILOBITS}/{TIME_SECONDS}" DATA_RATE_MEGABITS_PER_SECOND = f"{DATA_MEGABITS}/{TIME_SECONDS}" DATA_RATE_GIGABITS_PER_SECOND = f"{DATA_GIGABITS}/{TIME_SECONDS}" DATA_RATE_BYTES_PER_SECOND = f"{DATA_BYTES}/{TIME_SECONDS}" DATA_RATE_KILOBYTES_PER_SECOND = f"{DATA_KILOBYTES}/{TIME_SECONDS}" DATA_RATE_MEGABYTES_PER_SECOND = f"{DATA_MEGABYTES}/{TIME_SECONDS}" DATA_RATE_GIGABYTES_PER_SECOND = f"{DATA_GIGABYTES}/{TIME_SECONDS}" DATA_RATE_KIBIBYTES_PER_SECOND = f"{DATA_KIBIBYTES}/{TIME_SECONDS}" DATA_RATE_MEBIBYTES_PER_SECOND = f"{DATA_MEBIBYTES}/{TIME_SECONDS}" DATA_RATE_GIBIBYTES_PER_SECOND = f"{DATA_GIBIBYTES}/{TIME_SECONDS}" # #### SERVICES #### SERVICE_HOMEASSISTANT_STOP = "stop" SERVICE_HOMEASSISTANT_RESTART = "restart" SERVICE_TURN_ON = "turn_on" SERVICE_TURN_OFF = "turn_off" SERVICE_TOGGLE = "toggle" SERVICE_RELOAD = "reload" SERVICE_VOLUME_UP = "volume_up" SERVICE_VOLUME_DOWN = "volume_down" SERVICE_VOLUME_MUTE = "volume_mute" SERVICE_VOLUME_SET = "volume_set" SERVICE_MEDIA_PLAY_PAUSE = "media_play_pause" SERVICE_MEDIA_PLAY = "media_play" SERVICE_MEDIA_PAUSE = "media_pause" SERVICE_MEDIA_STOP = "media_stop" SERVICE_MEDIA_NEXT_TRACK = "media_next_track" SERVICE_MEDIA_PREVIOUS_TRACK = "media_previous_track" SERVICE_MEDIA_SEEK = "media_seek" SERVICE_SHUFFLE_SET = "shuffle_set" SERVICE_ALARM_DISARM = "alarm_disarm" SERVICE_ALARM_ARM_HOME = "alarm_arm_home" SERVICE_ALARM_ARM_AWAY = "alarm_arm_away" SERVICE_ALARM_ARM_NIGHT = "alarm_arm_night" SERVICE_ALARM_ARM_CUSTOM_BYPASS = "alarm_arm_custom_bypass" SERVICE_ALARM_TRIGGER = "alarm_trigger" SERVICE_LOCK = "lock" SERVICE_UNLOCK = "unlock" SERVICE_OPEN = "open" SERVICE_CLOSE = "close" SERVICE_CLOSE_COVER = "close_cover" SERVICE_CLOSE_COVER_TILT = "close_cover_tilt" SERVICE_OPEN_COVER = "open_cover" SERVICE_OPEN_COVER_TILT = "open_cover_tilt" SERVICE_SET_COVER_POSITION = "set_cover_position" SERVICE_SET_COVER_TILT_POSITION = "set_cover_tilt_position" SERVICE_STOP_COVER = "stop_cover" SERVICE_STOP_COVER_TILT = "stop_cover_tilt" SERVICE_TOGGLE_COVER_TILT = "toggle_cover_tilt" SERVICE_SELECT_OPTION = "select_option" # #### API / REMOTE #### SERVER_PORT = 8123 URL_ROOT = "/" URL_API = "/api/" URL_API_STREAM = "/api/stream" URL_API_CONFIG = "/api/config" URL_API_DISCOVERY_INFO = "/api/discovery_info" URL_API_STATES = "/api/states" URL_API_STATES_ENTITY = "/api/states/{}" URL_API_EVENTS = "/api/events" URL_API_EVENTS_EVENT = "/api/events/{}" URL_API_SERVICES = "/api/services" URL_API_SERVICES_SERVICE = "/api/services/{}/{}" URL_API_COMPONENTS = "/api/components" URL_API_ERROR_LOG = "/api/error_log" URL_API_LOG_OUT = "/api/log_out" URL_API_TEMPLATE = "/api/template" HTTP_OK = 200 HTTP_CREATED = 201 HTTP_MOVED_PERMANENTLY = 301 HTTP_BAD_REQUEST = 400 HTTP_UNAUTHORIZED = 401 HTTP_FORBIDDEN = 403 HTTP_NOT_FOUND = 404 HTTP_METHOD_NOT_ALLOWED = 405 HTTP_UNPROCESSABLE_ENTITY = 422 HTTP_TOO_MANY_REQUESTS = 429 HTTP_INTERNAL_SERVER_ERROR = 500 HTTP_SERVICE_UNAVAILABLE = 503 HTTP_BASIC_AUTHENTICATION = "basic" HTTP_DIGEST_AUTHENTICATION = "digest" HTTP_HEADER_X_REQUESTED_WITH = "X-Requested-With" CONTENT_TYPE_JSON = "application/json" CONTENT_TYPE_MULTIPART = "multipart/x-mixed-replace; boundary={}" CONTENT_TYPE_TEXT_PLAIN = "text/plain" # The exit code to send to request a restart RESTART_EXIT_CODE = 100 UNIT_NOT_RECOGNIZED_TEMPLATE: str = "{} is not a recognized {} unit." LENGTH: str = "length" MASS: str = "mass" PRESSURE: str = "pressure" VOLUME: str = "volume" TEMPERATURE: str = "temperature" SPEED_MS: str = "speed_ms" ILLUMINANCE: str = "illuminance" WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] # The degree of precision for platforms PRECISION_WHOLE = 1 PRECISION_HALVES = 0.5 PRECISION_TENTHS = 0.1 # Static list of entities that will never be exposed to # cloud, alexa, or google_home components CLOUD_NEVER_EXPOSED_ENTITIES = ["group.all_locks"]
27.479661
85
0.784247
MAJOR_VERSION = 0 MINOR_VERSION = 110 PATCH_VERSION = "4" __short_version__ = f"{MAJOR_VERSION}.{MINOR_VERSION}" __version__ = f"{__short_version__}.{PATCH_VERSION}" REQUIRED_PYTHON_VER = (3, 7, 0) REQUIRED_NEXT_PYTHON_VER = (3, 8, 0) REQUIRED_NEXT_PYTHON_DATE = "" PLATFORM_FORMAT = "{platform}.{domain}" MATCH_ALL = "*" ENTITY_MATCH_NONE = "none" ENTITY_MATCH_ALL = "all" DEVICE_DEFAULT_NAME = "Unnamed Device" SUN_EVENT_SUNSET = "sunset" SUN_EVENT_SUNRISE = "sunrise" S = "address" CONF_AFTER = "after" CONF_ALIAS = "alias" CONF_API_KEY = "api_key" CONF_API_VERSION = "api_version" CONF_ARMING_TIME = "arming_time" CONF_AT = "at" CONF_AUTH_MFA_MODULES = "auth_mfa_modules" CONF_AUTH_PROVIDERS = "auth_providers" CONF_AUTHENTICATION = "authentication" CONF_BASE = "base" CONF_BEFORE = "before" CONF_BELOW = "below" CONF_BINARY_SENSORS = "binary_sensors" CONF_BLACKLIST = "blacklist" CONF_BRIGHTNESS = "brightness" CONF_BROADCAST_ADDRESS = "broadcast_address" CONF_CLIENT_ID = "client_id" CONF_CLIENT_SECRET = "client_secret" CONF_CODE = "code" CONF_COLOR_TEMP = "color_temp" CONF_COMMAND = "command" CONF_COMMAND_CLOSE = "command_close" CONF_COMMAND_OFF = "command_off" CONF_COMMAND_ON = "command_on" CONF_COMMAND_OPEN = "command_open" CONF_COMMAND_STATE = "command_state" CONF_COMMAND_STOP = "command_stop" CONF_CONDITION = "condition" CONF_CONTINUE_ON_TIMEOUT = "continue_on_timeout" CONF_COVERS = "covers" CONF_CURRENCY = "currency" CONF_CUSTOMIZE = "customize" CONF_CUSTOMIZE_DOMAIN = "customize_domain" CONF_CUSTOMIZE_GLOB = "customize_glob" CONF_DELAY = "delay" CONF_DELAY_TIME = "delay_time" CONF_DEVICE = "device" CONF_DEVICE_CLASS = "device_class" CONF_DEVICE_ID = "device_id" CONF_DEVICES = "devices" CONF_DISARM_AFTER_TRIGGER = "disarm_after_trigger" CONF_DISCOVERY = "discovery" CONF_DISKS = "disks" CONF_DISPLAY_CURRENCY = "display_currency" CONF_DISPLAY_OPTIONS = "display_options" CONF_DOMAIN = "domain" CONF_DOMAINS = "domains" CONF_EFFECT = "effect" CONF_ELEVATION = "elevation" CONF_EMAIL = "email" CONF_ENTITIES = "entities" CONF_ENTITY_ID = "entity_id" CONF_ENTITY_NAMESPACE = "entity_namespace" CONF_ENTITY_PICTURE_TEMPLATE = "entity_picture_template" CONF_EVENT = "event" CONF_EVENT_DATA = "event_data" CONF_EVENT_DATA_TEMPLATE = "event_data_template" CONF_EXCLUDE = "exclude" CONF_EXTERNAL_URL = "external_url" CONF_FILE_PATH = "file_path" CONF_FILENAME = "filename" CONF_FOR = "for" CONF_FORCE_UPDATE = "force_update" CONF_FRIENDLY_NAME = "friendly_name" CONF_FRIENDLY_NAME_TEMPLATE = "friendly_name_template" CONF_HEADERS = "headers" CONF_HOST = "host" CONF_HOSTS = "hosts" CONF_HS = "hs" CONF_ICON = "icon" CONF_ICON_TEMPLATE = "icon_template" CONF_ID = "id" CONF_INCLUDE = "include" CONF_INTERNAL_URL = "internal_url" CONF_IP_ADDRESS = "ip_address" CONF_LATITUDE = "latitude" CONF_LIGHTS = "lights" CONF_LONGITUDE = "longitude" CONF_MAC = "mac" CONF_MAXIMUM = "maximum" CONF_METHOD = "method" CONF_MINIMUM = "minimum" CONF_MODE = "mode" CONF_MONITORED_CONDITIONS = "monitored_conditions" CONF_MONITORED_VARIABLES = "monitored_variables" CONF_NAME = "name" CONF_OFFSET = "offset" CONF_OPTIMISTIC = "optimistic" CONF_PACKAGES = "packages" CONF_PASSWORD = "password" CONF_PATH = "path" CONF_PAYLOAD = "payload" CONF_PAYLOAD_OFF = "payload_off" CONF_PAYLOAD_ON = "payload_on" CONF_PENDING_TIME = "pending_time" CONF_PIN = "pin" CONF_PLATFORM = "platform" CONF_PORT = "port" CONF_PREFIX = "prefix" CONF_PROFILE_NAME = "profile_name" CONF_PROTOCOL = "protocol" CONF_PROXY_SSL = "proxy_ssl" CONF_QUOTE = "quote" CONF_RADIUS = "radius" CONF_RECIPIENT = "recipient" CONF_REGION = "region" CONF_RESOURCE = "resource" CONF_RESOURCE_TEMPLATE = "resource_template" CONF_RESOURCES = "resources" CONF_RGB = "rgb" CONF_ROOM = "room" CONF_SCAN_INTERVAL = "scan_interval" CONF_SCENE = "scene" CONF_SENDER = "sender" CONF_SENSOR_TYPE = "sensor_type" CONF_SENSORS = "sensors" CONF_SERVICE = "service" CONF_SERVICE_DATA = "data" CONF_SERVICE_TEMPLATE = "service_template" CONF_SHOW_ON_MAP = "show_on_map" CONF_SLAVE = "slave" CONF_SOURCE = "source" CONF_SSL = "ssl" CONF_STATE = "state" CONF_STATE_TEMPLATE = "state_template" CONF_STRUCTURE = "structure" CONF_SWITCHES = "switches" CONF_TEMPERATURE_UNIT = "temperature_unit" CONF_TIME_ZONE = "time_zone" CONF_TIMEOUT = "timeout" CONF_TOKEN = "token" CONF_TRIGGER_TIME = "trigger_time" CONF_TTL = "ttl" CONF_TYPE = "type" CONF_UNIT_OF_MEASUREMENT = "unit_of_measurement" CONF_UNIT_SYSTEM = "unit_system" CONF_URL = "url" CONF_USERNAME = "username" CONF_VALUE_TEMPLATE = "value_template" CONF_VERIFY_SSL = "verify_ssl" CONF_WAIT_TEMPLATE = "wait_template" CONF_WEBHOOK_ID = "webhook_id" CONF_WEEKDAY = "weekday" CONF_WHITE_VALUE = "white_value" CONF_WHITELIST = "whitelist" CONF_WHITELIST_EXTERNAL_DIRS = "whitelist_external_dirs" CONF_XY = "xy" CONF_ZONE = "zone" ICE = "call_service" EVENT_COMPONENT_LOADED = "component_loaded" EVENT_CORE_CONFIG_UPDATE = "core_config_updated" EVENT_HOMEASSISTANT_CLOSE = "homeassistant_close" EVENT_HOMEASSISTANT_START = "homeassistant_start" EVENT_HOMEASSISTANT_STARTED = "homeassistant_started" EVENT_HOMEASSISTANT_STOP = "homeassistant_stop" EVENT_HOMEASSISTANT_FINAL_WRITE = "homeassistant_final_write" EVENT_LOGBOOK_ENTRY = "logbook_entry" EVENT_PLATFORM_DISCOVERED = "platform_discovered" EVENT_SCRIPT_STARTED = "script_started" EVENT_SERVICE_REGISTERED = "service_registered" EVENT_SERVICE_REMOVED = "service_removed" EVENT_STATE_CHANGED = "state_changed" EVENT_THEMES_UPDATED = "themes_updated" EVENT_TIMER_OUT_OF_SYNC = "timer_out_of_sync" EVENT_TIME_CHANGED = "time_changed" uminance" DEVICE_CLASS_SIGNAL_STRENGTH = "signal_strength" DEVICE_CLASS_TEMPERATURE = "temperature" DEVICE_CLASS_TIMESTAMP = "timestamp" DEVICE_CLASS_PRESSURE = "pressure" DEVICE_CLASS_POWER = "power" E = "not_home" STATE_UNKNOWN = "unknown" STATE_OPEN = "open" STATE_OPENING = "opening" STATE_CLOSED = "closed" STATE_CLOSING = "closing" STATE_PLAYING = "playing" STATE_PAUSED = "paused" STATE_IDLE = "idle" STATE_STANDBY = "standby" STATE_ALARM_DISARMED = "disarmed" STATE_ALARM_ARMED_HOME = "armed_home" STATE_ALARM_ARMED_AWAY = "armed_away" STATE_ALARM_ARMED_NIGHT = "armed_night" STATE_ALARM_ARMED_CUSTOM_BYPASS = "armed_custom_bypass" STATE_ALARM_PENDING = "pending" STATE_ALARM_ARMING = "arming" STATE_ALARM_DISARMING = "disarming" STATE_ALARM_TRIGGERED = "triggered" STATE_LOCKED = "locked" STATE_UNLOCKED = "unlocked" STATE_UNAVAILABLE = "unavailable" STATE_OK = "ok" STATE_PROBLEM = "problem" ds" ATTR_DOMAIN = "domain" ATTR_SERVICE = "service" ATTR_SERVICE_DATA = "service_data" ATTR_ID = "id" ATTR_NAME = "name" ATTR_ENTITY_ID = "entity_id" ATTR_AREA_ID = "area_id" ATTR_FRIENDLY_NAME = "friendly_name" ATTR_ENTITY_PICTURE = "entity_picture" ATTR_ICON = "icon" ATTR_UNIT_OF_MEASUREMENT = "unit_of_measurement" CONF_UNIT_SYSTEM_METRIC: str = "metric" CONF_UNIT_SYSTEM_IMPERIAL: str = "imperial" ATTR_VOLTAGE = "voltage" ATTR_DISCOVERED = "discovered" ATTR_LOCATION = "location" ATTR_MODE = "mode" ATTR_BATTERY_CHARGING = "battery_charging" ATTR_BATTERY_LEVEL = "battery_level" ATTR_WAKEUP = "wake_up_interval" ATTR_CODE = "code" ATTR_CODE_FORMAT = "code_format" ATTR_COMMAND = "command" ATTR_ARMED = "device_armed" ATTR_LOCKED = "locked" ATTR_TRIPPED = "device_tripped" ATTR_LAST_TRIP_TIME = "last_tripped_time" ATTR_HIDDEN = "hidden" # Location of the entity ATTR_LATITUDE = "latitude" ATTR_LONGITUDE = "longitude" # Accuracy of location in meters ATTR_GPS_ACCURACY = "gps_accuracy" # If state is assumed ATTR_ASSUMED_STATE = "assumed_state" ATTR_STATE = "state" ATTR_EDITABLE = "editable" ATTR_OPTION = "option" # Bitfield of supported component features for the entity ATTR_SUPPORTED_FEATURES = "supported_features" # Class of device within its domain ATTR_DEVICE_CLASS = "device_class" # Temperature attribute ATTR_TEMPERATURE = "temperature" # #### UNITS OF MEASUREMENT #### # Power units POWER_WATT = "W" POWER_KILO_WATT = f"k{POWER_WATT}" # Voltage units VOLT = "V" # Energy units ENERGY_WATT_HOUR = f"{POWER_WATT}h" ENERGY_KILO_WATT_HOUR = f"k{ENERGY_WATT_HOUR}" # Degree units DEGREE = "°" # Temperature units TEMP_CELSIUS = f"{DEGREE}C" TEMP_FAHRENHEIT = f"{DEGREE}F" TEMP_KELVIN = f"{DEGREE}K" # Time units TIME_MICROSECONDS = "μs" TIME_MILLISECONDS = "ms" TIME_SECONDS = "s" TIME_MINUTES = "min" TIME_HOURS = "h" TIME_DAYS = "d" TIME_WEEKS = "w" TIME_MONTHS = "m" TIME_YEARS = "y" # Length units LENGTH_CENTIMETERS: str = "cm" LENGTH_METERS: str = "m" LENGTH_KILOMETERS: str = "km" LENGTH_INCHES: str = "in" LENGTH_FEET: str = "ft" LENGTH_YARD: str = "yd" LENGTH_MILES: str = "mi" # Frequency units FREQUENCY_HERTZ = "Hz" FREQUENCY_GIGAHERTZ = f"G{FREQUENCY_HERTZ}" # Pressure units PRESSURE_PA: str = "Pa" PRESSURE_HPA: str = "hPa" PRESSURE_BAR: str = "bar" PRESSURE_MBAR: str = "mbar" PRESSURE_INHG: str = "inHg" PRESSURE_PSI: str = "psi" # Volume units VOLUME_LITERS: str = "L" VOLUME_MILLILITERS: str = "mL" VOLUME_CUBIC_METERS = f"{LENGTH_METERS}³" VOLUME_GALLONS: str = "gal" VOLUME_FLUID_OUNCE: str = "fl. oz." # Area units AREA_SQUARE_METERS = f"{LENGTH_METERS}²" # Mass units MASS_GRAMS: str = "g" MASS_KILOGRAMS: str = "kg" MASS_MILLIGRAMS = "mg" MASS_MICROGRAMS = "µg" MASS_OUNCES: str = "oz" MASS_POUNDS: str = "lb" # Conductivity units CONDUCTIVITY: str = f"µS/{LENGTH_CENTIMETERS}" # UV Index units UV_INDEX: str = "UV index" # Percentage units UNIT_PERCENTAGE = "%" # Irradiation units IRRADIATION_WATTS_PER_SQUARE_METER = f"{POWER_WATT}/{AREA_SQUARE_METERS}" # Concentration units CONCENTRATION_MICROGRAMS_PER_CUBIC_METER = f"{MASS_MICROGRAMS}/{VOLUME_CUBIC_METERS}" CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER = f"{MASS_MILLIGRAMS}/{VOLUME_CUBIC_METERS}" CONCENTRATION_PARTS_PER_MILLION = "ppm" CONCENTRATION_PARTS_PER_BILLION = "ppb" # Speed units SPEED_METERS_PER_SECOND = f"{LENGTH_METERS}/{TIME_SECONDS}" SPEED_KILOMETERS_PER_HOUR = f"{LENGTH_KILOMETERS}/{TIME_HOURS}" SPEED_MILES_PER_HOUR = "mph" # Data units DATA_BITS = "bit" DATA_KILOBITS = "kbit" DATA_MEGABITS = "Mbit" DATA_GIGABITS = "Gbit" DATA_BYTES = "B" DATA_KILOBYTES = "kB" DATA_MEGABYTES = "MB" DATA_GIGABYTES = "GB" DATA_TERABYTES = "TB" DATA_PETABYTES = "PB" DATA_EXABYTES = "EB" DATA_ZETTABYTES = "ZB" DATA_YOTTABYTES = "YB" DATA_KIBIBYTES = "KiB" DATA_MEBIBYTES = "MiB" DATA_GIBIBYTES = "GiB" DATA_TEBIBYTES = "TiB" DATA_PEBIBYTES = "PiB" DATA_EXBIBYTES = "EiB" DATA_ZEBIBYTES = "ZiB" DATA_YOBIBYTES = "YiB" DATA_RATE_BITS_PER_SECOND = f"{DATA_BITS}/{TIME_SECONDS}" DATA_RATE_KILOBITS_PER_SECOND = f"{DATA_KILOBITS}/{TIME_SECONDS}" DATA_RATE_MEGABITS_PER_SECOND = f"{DATA_MEGABITS}/{TIME_SECONDS}" DATA_RATE_GIGABITS_PER_SECOND = f"{DATA_GIGABITS}/{TIME_SECONDS}" DATA_RATE_BYTES_PER_SECOND = f"{DATA_BYTES}/{TIME_SECONDS}" DATA_RATE_KILOBYTES_PER_SECOND = f"{DATA_KILOBYTES}/{TIME_SECONDS}" DATA_RATE_MEGABYTES_PER_SECOND = f"{DATA_MEGABYTES}/{TIME_SECONDS}" DATA_RATE_GIGABYTES_PER_SECOND = f"{DATA_GIGABYTES}/{TIME_SECONDS}" DATA_RATE_KIBIBYTES_PER_SECOND = f"{DATA_KIBIBYTES}/{TIME_SECONDS}" DATA_RATE_MEBIBYTES_PER_SECOND = f"{DATA_MEBIBYTES}/{TIME_SECONDS}" DATA_RATE_GIBIBYTES_PER_SECOND = f"{DATA_GIBIBYTES}/{TIME_SECONDS}" # #### SERVICES #### SERVICE_HOMEASSISTANT_STOP = "stop" SERVICE_HOMEASSISTANT_RESTART = "restart" SERVICE_TURN_ON = "turn_on" SERVICE_TURN_OFF = "turn_off" SERVICE_TOGGLE = "toggle" SERVICE_RELOAD = "reload" SERVICE_VOLUME_UP = "volume_up" SERVICE_VOLUME_DOWN = "volume_down" SERVICE_VOLUME_MUTE = "volume_mute" SERVICE_VOLUME_SET = "volume_set" SERVICE_MEDIA_PLAY_PAUSE = "media_play_pause" SERVICE_MEDIA_PLAY = "media_play" SERVICE_MEDIA_PAUSE = "media_pause" SERVICE_MEDIA_STOP = "media_stop" SERVICE_MEDIA_NEXT_TRACK = "media_next_track" SERVICE_MEDIA_PREVIOUS_TRACK = "media_previous_track" SERVICE_MEDIA_SEEK = "media_seek" SERVICE_SHUFFLE_SET = "shuffle_set" SERVICE_ALARM_DISARM = "alarm_disarm" SERVICE_ALARM_ARM_HOME = "alarm_arm_home" SERVICE_ALARM_ARM_AWAY = "alarm_arm_away" SERVICE_ALARM_ARM_NIGHT = "alarm_arm_night" SERVICE_ALARM_ARM_CUSTOM_BYPASS = "alarm_arm_custom_bypass" SERVICE_ALARM_TRIGGER = "alarm_trigger" SERVICE_LOCK = "lock" SERVICE_UNLOCK = "unlock" SERVICE_OPEN = "open" SERVICE_CLOSE = "close" SERVICE_CLOSE_COVER = "close_cover" SERVICE_CLOSE_COVER_TILT = "close_cover_tilt" SERVICE_OPEN_COVER = "open_cover" SERVICE_OPEN_COVER_TILT = "open_cover_tilt" SERVICE_SET_COVER_POSITION = "set_cover_position" SERVICE_SET_COVER_TILT_POSITION = "set_cover_tilt_position" SERVICE_STOP_COVER = "stop_cover" SERVICE_STOP_COVER_TILT = "stop_cover_tilt" SERVICE_TOGGLE_COVER_TILT = "toggle_cover_tilt" SERVICE_SELECT_OPTION = "select_option" # #### API / REMOTE #### SERVER_PORT = 8123 URL_ROOT = "/" URL_API = "/api/" URL_API_STREAM = "/api/stream" URL_API_CONFIG = "/api/config" URL_API_DISCOVERY_INFO = "/api/discovery_info" URL_API_STATES = "/api/states" URL_API_STATES_ENTITY = "/api/states/{}" URL_API_EVENTS = "/api/events" URL_API_EVENTS_EVENT = "/api/events/{}" URL_API_SERVICES = "/api/services" URL_API_SERVICES_SERVICE = "/api/services/{}/{}" URL_API_COMPONENTS = "/api/components" URL_API_ERROR_LOG = "/api/error_log" URL_API_LOG_OUT = "/api/log_out" URL_API_TEMPLATE = "/api/template" HTTP_OK = 200 HTTP_CREATED = 201 HTTP_MOVED_PERMANENTLY = 301 HTTP_BAD_REQUEST = 400 HTTP_UNAUTHORIZED = 401 HTTP_FORBIDDEN = 403 HTTP_NOT_FOUND = 404 HTTP_METHOD_NOT_ALLOWED = 405 HTTP_UNPROCESSABLE_ENTITY = 422 HTTP_TOO_MANY_REQUESTS = 429 HTTP_INTERNAL_SERVER_ERROR = 500 HTTP_SERVICE_UNAVAILABLE = 503 HTTP_BASIC_AUTHENTICATION = "basic" HTTP_DIGEST_AUTHENTICATION = "digest" HTTP_HEADER_X_REQUESTED_WITH = "X-Requested-With" CONTENT_TYPE_JSON = "application/json" CONTENT_TYPE_MULTIPART = "multipart/x-mixed-replace; boundary={}" CONTENT_TYPE_TEXT_PLAIN = "text/plain" # The exit code to send to request a restart RESTART_EXIT_CODE = 100 UNIT_NOT_RECOGNIZED_TEMPLATE: str = "{} is not a recognized {} unit." LENGTH: str = "length" MASS: str = "mass" PRESSURE: str = "pressure" VOLUME: str = "volume" TEMPERATURE: str = "temperature" SPEED_MS: str = "speed_ms" ILLUMINANCE: str = "illuminance" WEEKDAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] # The degree of precision for platforms PRECISION_WHOLE = 1 PRECISION_HALVES = 0.5 PRECISION_TENTHS = 0.1 # Static list of entities that will never be exposed to # cloud, alexa, or google_home components CLOUD_NEVER_EXPOSED_ENTITIES = ["group.all_locks"]
true
true
1c49e547ddbba557b3ea3778bdb44456db6149f8
10,716
py
Python
astromodels/sources/extended_source.py
abtinshahidi/astromodels
580e972ccc69f4fad57e22030923ee27f9d59ee3
[ "BSD-3-Clause" ]
1
2019-07-05T18:36:59.000Z
2019-07-05T18:36:59.000Z
astromodels/sources/extended_source.py
abtinshahidi/astromodels
580e972ccc69f4fad57e22030923ee27f9d59ee3
[ "BSD-3-Clause" ]
null
null
null
astromodels/sources/extended_source.py
abtinshahidi/astromodels
580e972ccc69f4fad57e22030923ee27f9d59ee3
[ "BSD-3-Clause" ]
null
null
null
import collections import astropy.units as u import numpy as np from astromodels.core.spectral_component import SpectralComponent from astromodels.core.tree import Node from astromodels.core.units import get_units from astromodels.functions.functions import Constant from astromodels.sources.source import Source, EXTENDED_SOURCE from astromodels.utils.pretty_list import dict_to_list class ExtendedSource(Source, Node): def __init__(self, source_name, spatial_shape, spectral_shape=None, components=None): # Check that we have all the required information # and set the units current_u = get_units() if spatial_shape.n_dim == 2: # Now gather the component(s) # We need either a single component, or a list of components, but not both # (that's the ^ symbol) assert (spectral_shape is not None) ^ (components is not None), "You have to provide either a single " \ "component, or a list of components " \ "(but not both)." # If the user specified only one component, make a list of one element with a default name ("main") if spectral_shape is not None: components = [SpectralComponent("main", spectral_shape)] # Components in this case have energy as x and differential flux as y diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1) # Now set the units of the components for component in components: component.shape.set_units(current_u.energy, diff_flux_units) # Set the units of the brightness spatial_shape.set_units(current_u.angle, current_u.angle, current_u.angle**(-2)) elif spatial_shape.n_dim == 3: # If there is no spectral component then assume that the input is a template, which will provide the # spectrum by itself. We just use a renormalization (a bias) if spectral_shape is None and components is None: # This is a template. Add a component which is just a renormalization spectral_shape = Constant() components = [SpectralComponent("main", spectral_shape)] # set the units diff_flux_units = (current_u.energy * current_u.area * current_u.time * current_u.angle**2) ** (-1) spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, diff_flux_units) else: # the spectral shape has been given, so this is a case where the spatial template gives an # energy-dependent shape and the spectral components give the spectrum assert (spectral_shape is not None) ^ (components is not None), "You can provide either a single " \ "component, or a list of components " \ "(but not both)." if spectral_shape is not None: components = [SpectralComponent("main", spectral_shape)] # Assign units diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1) # Now set the units of the components for component in components: component.shape.set_units(current_u.energy, diff_flux_units) # Set the unit of the spatial template spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, current_u.angle**(-2)) else: raise RuntimeError("The spatial shape must have either 2 or 3 dimensions.") # Here we have a list of components Source.__init__(self, components, EXTENDED_SOURCE) # A source is also a Node in the tree Node.__init__(self, source_name) # Add the spatial shape as a child node, with an explicit name self._spatial_shape = spatial_shape self._add_child(self._spatial_shape) # Add the same node also with the name of the function #self._add_child(self._shape, self._shape.__name__) # Add a node called 'spectrum' spectrum_node = Node('spectrum') spectrum_node._add_children(self._components.values()) self._add_child(spectrum_node) @property def spatial_shape(self): """ A generic name for the spatial shape. :return: the spatial shape instance """ return self._spatial_shape def get_spatially_integrated_flux( self, energies): """ Returns total flux of source at the given energy :param energies: energies (array or float) :return: differential flux at given energy """ if not isinstance(energies, np.ndarray): energies = np.array(energies, ndmin=1) # Get the differential flux from the spectral components results = [self.spatial_shape.get_total_spatial_integral(energies) * component.shape(energies) for component in self.components.values()] if isinstance(energies, u.Quantity): # Slow version with units # We need to sum like this (slower) because using np.sum will not preserve the units # (thanks astropy.units) differential_flux = sum(results) else: # Fast version without units, where x is supposed to be in the same units as currently defined in # units.get_units() differential_flux = np.sum(results, 0) return differential_flux def __call__(self, lon, lat, energies): """ Returns brightness of source at the given position and energy :param lon: longitude (array or float) :param lat: latitude (array or float) :param energies: energies (array or float) :return: differential flux at given position and energy """ assert type(lat) == type(lon) and type(lon) == type(energies), "Type mismatch in input of call" if not isinstance(lat, np.ndarray): lat = np.array(lat, ndmin=1) lon = np.array(lon, ndmin=1) energies = np.array(energies, ndmin=1) # Get the differential flux from the spectral components results = [component.shape(energies) for component in self.components.values()] if isinstance(energies, u.Quantity): # Slow version with units # We need to sum like this (slower) because using np.sum will not preserve the units # (thanks astropy.units) differential_flux = sum(results) else: # Fast version without units, where x is supposed to be in the same units as currently defined in # units.get_units() differential_flux = np.sum(results, 0) # Get brightness from spatial model if self._spatial_shape.n_dim == 2: brightness = self._spatial_shape(lon, lat) # In this case the spectrum is the same everywhere n_points = lat.shape[0] n_energies = differential_flux.shape[0] # The following is a little obscure, but it is 6x faster than doing a for loop cube = np.repeat(differential_flux, n_points).reshape(n_energies, n_points).T result = (cube.T * brightness).T else: result = self._spatial_shape(lon, lat, energies) * differential_flux # Do not clip the output, otherwise it will not be possible to use ext. sources # with negative fluxes return np.squeeze(result) def has_free_parameters(self): """ Returns True or False whether there is any parameter in this source :return: """ for component in self._components.values(): for par in component.shape.parameters.values(): if par.free: return True for par in self.spatial_shape.parameters.values(): if par.free: return True return False @property def free_parameters(self): """ Returns a dictionary of free parameters for this source We use the parameter path as the key because it's guaranteed to be unique, unlike the parameter name. :return: """ free_parameters = collections.OrderedDict() for component in self._components.values(): for par in component.shape.parameters.values(): if par.free: free_parameters[par.path] = par for par in self.spatial_shape.parameters.values(): if par.free: free_parameters[par.path] = par return free_parameters @property def parameters(self): """ Returns a dictionary of all parameters for this source. We use the parameter path as the key because it's guaranteed to be unique, unlike the parameter name. :return: """ all_parameters = collections.OrderedDict() for component in self._components.values(): for par in component.shape.parameters.values(): all_parameters[par.path] = par for par in self.spatial_shape.parameters.values(): all_parameters[par.path] = par return all_parameters def _repr__base(self, rich_output=False): """ Representation of the object :param rich_output: if True, generates HTML, otherwise text :return: the representation """ # Make a dictionary which will then be transformed in a list repr_dict = collections.OrderedDict() key = '%s (extended source)' % self.name repr_dict[key] = collections.OrderedDict() repr_dict[key]['shape'] = self._spatial_shape.to_dict(minimal=True) repr_dict[key]['spectrum'] = collections.OrderedDict() for component_name, component in self.components.iteritems(): repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True) return dict_to_list(repr_dict, rich_output) def get_boundaries(self): """ Returns the boundaries for this extended source :return: a tuple of tuples ((min. lon, max. lon), (min lat, max lat)) """ return self._spatial_shape.get_boundaries()
32.871166
145
0.60713
import collections import astropy.units as u import numpy as np from astromodels.core.spectral_component import SpectralComponent from astromodels.core.tree import Node from astromodels.core.units import get_units from astromodels.functions.functions import Constant from astromodels.sources.source import Source, EXTENDED_SOURCE from astromodels.utils.pretty_list import dict_to_list class ExtendedSource(Source, Node): def __init__(self, source_name, spatial_shape, spectral_shape=None, components=None): current_u = get_units() if spatial_shape.n_dim == 2: assert (spectral_shape is not None) ^ (components is not None), "You have to provide either a single " \ "component, or a list of components " \ "(but not both)." # If the user specified only one component, make a list of one element with a default name ("main") if spectral_shape is not None: components = [SpectralComponent("main", spectral_shape)] # Components in this case have energy as x and differential flux as y diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1) # Now set the units of the components for component in components: component.shape.set_units(current_u.energy, diff_flux_units) # Set the units of the brightness spatial_shape.set_units(current_u.angle, current_u.angle, current_u.angle**(-2)) elif spatial_shape.n_dim == 3: # If there is no spectral component then assume that the input is a template, which will provide the # spectrum by itself. We just use a renormalization (a bias) if spectral_shape is None and components is None: # This is a template. Add a component which is just a renormalization spectral_shape = Constant() components = [SpectralComponent("main", spectral_shape)] # set the units diff_flux_units = (current_u.energy * current_u.area * current_u.time * current_u.angle**2) ** (-1) spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, diff_flux_units) else: # the spectral shape has been given, so this is a case where the spatial template gives an # energy-dependent shape and the spectral components give the spectrum assert (spectral_shape is not None) ^ (components is not None), "You can provide either a single " \ "component, or a list of components " \ "(but not both)." if spectral_shape is not None: components = [SpectralComponent("main", spectral_shape)] # Assign units diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1) # Now set the units of the components for component in components: component.shape.set_units(current_u.energy, diff_flux_units) # Set the unit of the spatial template spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, current_u.angle**(-2)) else: raise RuntimeError("The spatial shape must have either 2 or 3 dimensions.") # Here we have a list of components Source.__init__(self, components, EXTENDED_SOURCE) # A source is also a Node in the tree Node.__init__(self, source_name) # Add the spatial shape as a child node, with an explicit name self._spatial_shape = spatial_shape self._add_child(self._spatial_shape) # Add the same node also with the name of the function #self._add_child(self._shape, self._shape.__name__) # Add a node called 'spectrum' spectrum_node = Node('spectrum') spectrum_node._add_children(self._components.values()) self._add_child(spectrum_node) @property def spatial_shape(self): return self._spatial_shape def get_spatially_integrated_flux( self, energies): if not isinstance(energies, np.ndarray): energies = np.array(energies, ndmin=1) # Get the differential flux from the spectral components results = [self.spatial_shape.get_total_spatial_integral(energies) * component.shape(energies) for component in self.components.values()] if isinstance(energies, u.Quantity): # Slow version with units # We need to sum like this (slower) because using np.sum will not preserve the units # (thanks astropy.units) differential_flux = sum(results) else: # Fast version without units, where x is supposed to be in the same units as currently defined in # units.get_units() differential_flux = np.sum(results, 0) return differential_flux def __call__(self, lon, lat, energies): assert type(lat) == type(lon) and type(lon) == type(energies), "Type mismatch in input of call" if not isinstance(lat, np.ndarray): lat = np.array(lat, ndmin=1) lon = np.array(lon, ndmin=1) energies = np.array(energies, ndmin=1) # Get the differential flux from the spectral components results = [component.shape(energies) for component in self.components.values()] if isinstance(energies, u.Quantity): # Slow version with units # We need to sum like this (slower) because using np.sum will not preserve the units # (thanks astropy.units) differential_flux = sum(results) else: # Fast version without units, where x is supposed to be in the same units as currently defined in # units.get_units() differential_flux = np.sum(results, 0) # Get brightness from spatial model if self._spatial_shape.n_dim == 2: brightness = self._spatial_shape(lon, lat) # In this case the spectrum is the same everywhere n_points = lat.shape[0] n_energies = differential_flux.shape[0] # The following is a little obscure, but it is 6x faster than doing a for loop cube = np.repeat(differential_flux, n_points).reshape(n_energies, n_points).T result = (cube.T * brightness).T else: result = self._spatial_shape(lon, lat, energies) * differential_flux # Do not clip the output, otherwise it will not be possible to use ext. sources # with negative fluxes return np.squeeze(result) def has_free_parameters(self): for component in self._components.values(): for par in component.shape.parameters.values(): if par.free: return True for par in self.spatial_shape.parameters.values(): if par.free: return True return False @property def free_parameters(self): free_parameters = collections.OrderedDict() for component in self._components.values(): for par in component.shape.parameters.values(): if par.free: free_parameters[par.path] = par for par in self.spatial_shape.parameters.values(): if par.free: free_parameters[par.path] = par return free_parameters @property def parameters(self): all_parameters = collections.OrderedDict() for component in self._components.values(): for par in component.shape.parameters.values(): all_parameters[par.path] = par for par in self.spatial_shape.parameters.values(): all_parameters[par.path] = par return all_parameters def _repr__base(self, rich_output=False): # Make a dictionary which will then be transformed in a list repr_dict = collections.OrderedDict() key = '%s (extended source)' % self.name repr_dict[key] = collections.OrderedDict() repr_dict[key]['shape'] = self._spatial_shape.to_dict(minimal=True) repr_dict[key]['spectrum'] = collections.OrderedDict() for component_name, component in self.components.iteritems(): repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True) return dict_to_list(repr_dict, rich_output) def get_boundaries(self): return self._spatial_shape.get_boundaries()
true
true
1c49e550917e878195d1309e7b174aba630d18ef
714
py
Python
setup.py
M69k65y/endpoint-logger
96aa2513271ad984bf015c959300f31f6c2acd52
[ "MIT" ]
null
null
null
setup.py
M69k65y/endpoint-logger
96aa2513271ad984bf015c959300f31f6c2acd52
[ "MIT" ]
null
null
null
setup.py
M69k65y/endpoint-logger
96aa2513271ad984bf015c959300f31f6c2acd52
[ "MIT" ]
null
null
null
from setuptools import setup def readme(): with open("readme.md") as f: return f.read() setup( name = "endpoint-logger", version = "1.0.1", description = "A Python package used to track Flask API endpoint access. (Built in Python 3)", url = "https://github.com/M69k65y/endpoint-logger", author = "M69k65y", license = "MIT", packages = ["endpoint_logger"], zip_safe=False, install_requires = [ "flask" ], classifiers = [ "Development Status :: 3 - Alpha", "Framework :: Flask", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3" ], keywords = "flask endpoint logger logging", long_description = readme(), long_description_content_type = "text/markdown" )
23.8
95
0.680672
from setuptools import setup def readme(): with open("readme.md") as f: return f.read() setup( name = "endpoint-logger", version = "1.0.1", description = "A Python package used to track Flask API endpoint access. (Built in Python 3)", url = "https://github.com/M69k65y/endpoint-logger", author = "M69k65y", license = "MIT", packages = ["endpoint_logger"], zip_safe=False, install_requires = [ "flask" ], classifiers = [ "Development Status :: 3 - Alpha", "Framework :: Flask", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3" ], keywords = "flask endpoint logger logging", long_description = readme(), long_description_content_type = "text/markdown" )
true
true
1c49e68ee19ec34fdae4c2bd75976597c975284e
1,683
py
Python
pype/modules/deadline/plugins/publish/validate_deadline_connection.py
simonebarbieri/pype
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
[ "MIT" ]
null
null
null
pype/modules/deadline/plugins/publish/validate_deadline_connection.py
simonebarbieri/pype
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
[ "MIT" ]
null
null
null
pype/modules/deadline/plugins/publish/validate_deadline_connection.py
simonebarbieri/pype
a6dc83aa1300738749cbe8e5e2e6d2d1794e0289
[ "MIT" ]
null
null
null
import pyblish.api from avalon.vendor import requests from pype.plugin import contextplugin_should_run import os class ValidateDeadlineConnection(pyblish.api.ContextPlugin): """Validate Deadline Web Service is running""" label = "Validate Deadline Web Service" order = pyblish.api.ValidatorOrder hosts = ["maya", "nuke"] families = ["renderlayer"] def process(self, context): # Workaround bug pyblish-base#250 if not contextplugin_should_run(self, context): return deadline_url = ( context.data["system_settings"] ["modules"] ["deadline"] ["DEADLINE_REST_URL"] ) # Check response response = self._requests_get(deadline_url) assert response.ok, "Response must be ok" assert response.text.startswith("Deadline Web Service "), ( "Web service did not respond with 'Deadline Web Service'" ) def _requests_get(self, *args, **kwargs): """ Wrapper for requests, disabling SSL certificate validation if DONT_VERIFY_SSL environment variable is found. This is useful when Deadline or Muster server are running with self-signed certificates and their certificate is not added to trusted certificates on client machines. WARNING: disabling SSL certificate validation is defeating one line of defense SSL is providing and it is not recommended. """ if 'verify' not in kwargs: kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa return requests.get(*args, **kwargs)
34.346939
97
0.648247
import pyblish.api from avalon.vendor import requests from pype.plugin import contextplugin_should_run import os class ValidateDeadlineConnection(pyblish.api.ContextPlugin): label = "Validate Deadline Web Service" order = pyblish.api.ValidatorOrder hosts = ["maya", "nuke"] families = ["renderlayer"] def process(self, context): if not contextplugin_should_run(self, context): return deadline_url = ( context.data["system_settings"] ["modules"] ["deadline"] ["DEADLINE_REST_URL"] ) response = self._requests_get(deadline_url) assert response.ok, "Response must be ok" assert response.text.startswith("Deadline Web Service "), ( "Web service did not respond with 'Deadline Web Service'" ) def _requests_get(self, *args, **kwargs): if 'verify' not in kwargs: kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True return requests.get(*args, **kwargs)
true
true
1c49e6c1969a6fefd977839e22d2106128182e9a
1,684
py
Python
waipawama/dag/taxdoo.py
elcolumbio/waipawama
6ca23c3a2f35ba07762fb68d6ce115ff8f826903
[ "MIT" ]
null
null
null
waipawama/dag/taxdoo.py
elcolumbio/waipawama
6ca23c3a2f35ba07762fb68d6ce115ff8f826903
[ "MIT" ]
null
null
null
waipawama/dag/taxdoo.py
elcolumbio/waipawama
6ca23c3a2f35ba07762fb68d6ce115ff8f826903
[ "MIT" ]
null
null
null
from airflow.decorators import dag, task from airflow.operators.python import get_current_context from airflow.operators.bash_operator import BashOperator import datetime from waipawama.models.taxdoo import TaxdooMeta def get_timespan() -> str: """This is our main parameter in our monthly pipeline.""" context = get_current_context() year_month = '-'.join(context['ds'].split('-')[:2]) # e.g. '2019-09' return year_month @dag(default_args={'owner': 'florian'}, schedule_interval='@monthly', start_date=datetime.datetime(2018, 12, 1), tags=['VAT']) def taxdoo_dag(): """Ingestion for Taxdoo.""" @task() def taxdoo_external_file() -> str: timespan = get_timespan() # e.g '2021-01' meta = TaxdooMeta(timespan=timespan) meta.DataFileExists # throws error if not return timespan @task() def taxdoo_write_parquet(timespan) -> str: meta = TaxdooMeta(timespan=timespan) meta.save_as_parquet() return timespan @task() def taxdoo_load_to_bigquery(timespan): meta = TaxdooMeta(timespan=timespan) if not meta.TableExists: meta.create_table() meta.update_table() # relaxation and add columns possible meta.append_data() return timespan dbt_test = BashOperator( task_id='dbt_test', bash_command=('source ~/dbt-env/bin/activate && ' 'cd ~/projects/accountant/ && dbt test')) timespan = taxdoo_external_file() timespan = taxdoo_write_parquet(timespan) timespan = taxdoo_load_to_bigquery(timespan) dbt_test.set_upstream(timespan) taxdoo_etl_dag = taxdoo_dag()
30.071429
73
0.665677
from airflow.decorators import dag, task from airflow.operators.python import get_current_context from airflow.operators.bash_operator import BashOperator import datetime from waipawama.models.taxdoo import TaxdooMeta def get_timespan() -> str: context = get_current_context() year_month = '-'.join(context['ds'].split('-')[:2]) return year_month @dag(default_args={'owner': 'florian'}, schedule_interval='@monthly', start_date=datetime.datetime(2018, 12, 1), tags=['VAT']) def taxdoo_dag(): @task() def taxdoo_external_file() -> str: timespan = get_timespan() meta = TaxdooMeta(timespan=timespan) meta.DataFileExists return timespan @task() def taxdoo_write_parquet(timespan) -> str: meta = TaxdooMeta(timespan=timespan) meta.save_as_parquet() return timespan @task() def taxdoo_load_to_bigquery(timespan): meta = TaxdooMeta(timespan=timespan) if not meta.TableExists: meta.create_table() meta.update_table() meta.append_data() return timespan dbt_test = BashOperator( task_id='dbt_test', bash_command=('source ~/dbt-env/bin/activate && ' 'cd ~/projects/accountant/ && dbt test')) timespan = taxdoo_external_file() timespan = taxdoo_write_parquet(timespan) timespan = taxdoo_load_to_bigquery(timespan) dbt_test.set_upstream(timespan) taxdoo_etl_dag = taxdoo_dag()
true
true
1c49e712be10de6dccb1f07fa13fd2281c8ef4d7
29,322
py
Python
pytorch/pytorchcv/models/resnet.py
oliviaweng/imgclsmob
80fffbb46f986614b162c725b21f3d208597ac77
[ "MIT" ]
null
null
null
pytorch/pytorchcv/models/resnet.py
oliviaweng/imgclsmob
80fffbb46f986614b162c725b21f3d208597ac77
[ "MIT" ]
null
null
null
pytorch/pytorchcv/models/resnet.py
oliviaweng/imgclsmob
80fffbb46f986614b162c725b21f3d208597ac77
[ "MIT" ]
null
null
null
""" ResNet for ImageNet-1K, implemented in PyTorch. Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. """ __all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2', 'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b', 'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck', 'ResUnit', 'ResInitBlock'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, conv7x7_block class ResBlock(nn.Module): """ Simple ResNet block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True): super(ResBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=None) def forward(self, x, identity=None): x = self.conv1(x) if identity is not None: # print('adding shorter skip connection) x = x + identity # Shorter skip connection - LIV x = self.conv2(x) return x class ResBottleneck(nn.Module): """ ResNet bottleneck block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. bottleneck_factor : int, default 4 Bottleneck factor. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, conv1_stride=False, bottleneck_factor=4): super(ResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), padding=padding, dilation=dilation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ResUnit(nn.Module): """ ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(ResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x, None) # Needed for original skip connection AND need line below: x = x + identity # x = self.body(x, identity) # creates shorter skip connection - LIV # Don't need skip connection bc shorter skip connection now in ResBlock() - LIV x = x + identity x = self.activ(x) return x """ LIV """ class NonResBlock(nn.Module): """ Simple ResNet block for residual path in ResNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. """ def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True): super(NonResBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=None) def forward(self, x): x = self.conv1(x) # NO skip connections at all # if identity is not None: # # print('adding shorter skip connection) # x = x + identity # Shorter skip connection - LIV x = self.conv2(x) return x class NonResUnit(nn.Module): """ ResNet unit with residual connection. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int, default 1 Padding value for the second convolution layer in bottleneck. dilation : int or tuple/list of 2 int, default 1 Dilation value for the second convolution layer in bottleneck. bias : bool, default False Whether the layer uses a bias vector. use_bn : bool, default True Whether to use BatchNorm layer. bottleneck : bool, default True Whether to use a bottleneck or simple block in units. conv1_stride : bool, default False Whether to use stride in the first or the second convolution layer of the block. """ def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(NonResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = NonResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) self.activ = nn.ReLU(inplace=True) def forward(self, x): # if self.resize_identity: # identity = self.identity_conv(x) # else: # identity = x x = self.body(x) # No skip connection # x = x + identity x = self.activ(x) return x """ LIV END """ class ResInitBlock(nn.Module): """ ResNet specific initial block. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. """ def __init__(self, in_channels, out_channels): super(ResInitBlock, self).__init__() self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class ResNet(nn.Module): """ ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- channels : list of list of int Number of output channels for each unit. init_block_channels : int Number of output channels for the initial unit. bottleneck : bool Whether to use a bottleneck or simple block in units. conv1_stride : bool Whether to use stride in the first or the second convolution layer in units. in_channels : int, default 3 Number of input channels. in_size : tuple of two ints, default (224, 224) Spatial size of the expected input image. num_classes : int, default 1000 Number of classification classes. """ def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create ResNet model with specific parameters. Parameters: ---------- blocks : int Number of blocks. bottleneck : bool, default None Whether to use a bottleneck or simple block in units. conv1_stride : bool, default True Whether to use stride in the first or the second convolution layer in units. width_scale : float, default 1.0 Scale factor for width of layers. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnet10(**kwargs): """ ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=10, model_name="resnet10", **kwargs) def resnet12(**kwargs): """ ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=12, model_name="resnet12", **kwargs) def resnet14(**kwargs): """ ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, model_name="resnet14", **kwargs) def resnetbc14b(**kwargs): """ ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs) def resnet16(**kwargs): """ ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=16, model_name="resnet16", **kwargs) def resnet18_wd4(**kwargs): """ ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs) def resnet18_wd2(**kwargs): """ ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs) def resnet18_w3d4(**kwargs): """ ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs) def resnet18(**kwargs): """ ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=18, model_name="resnet18", **kwargs) def resnet26(**kwargs): """ ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs) def resnetbc26b(**kwargs): """ ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs) def resnet34(**kwargs): """ ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=34, model_name="resnet34", **kwargs) def resnetbc38b(**kwargs): """ ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed). Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs) def resnet50(**kwargs): """ ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, model_name="resnet50", **kwargs) def resnet50b(**kwargs): """ ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs) def resnet101(**kwargs): """ ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, model_name="resnet101", **kwargs) def resnet101b(**kwargs): """ ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs) def resnet152(**kwargs): """ ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, model_name="resnet152", **kwargs) def resnet152b(**kwargs): """ ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs) def resnet200(**kwargs): """ ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, model_name="resnet200", **kwargs) def resnet200b(**kwargs): """ ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ resnet10, resnet12, resnet14, resnetbc14b, resnet16, resnet18_wd4, resnet18_wd2, resnet18_w3d4, resnet18, resnet26, resnetbc26b, resnet34, resnetbc38b, resnet50, resnet50b, resnet101, resnet101b, resnet152, resnet152b, resnet200, resnet200b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet10 or weight_count == 5418792) assert (model != resnet12 or weight_count == 5492776) assert (model != resnet14 or weight_count == 5788200) assert (model != resnetbc14b or weight_count == 10064936) assert (model != resnet16 or weight_count == 6968872) assert (model != resnet18_wd4 or weight_count == 3937400) assert (model != resnet18_wd2 or weight_count == 5804296) assert (model != resnet18_w3d4 or weight_count == 8476056) assert (model != resnet18 or weight_count == 11689512) assert (model != resnet26 or weight_count == 17960232) assert (model != resnetbc26b or weight_count == 15995176) assert (model != resnet34 or weight_count == 21797672) assert (model != resnetbc38b or weight_count == 21925416) assert (model != resnet50 or weight_count == 25557032) assert (model != resnet50b or weight_count == 25557032) assert (model != resnet101 or weight_count == 44549160) assert (model != resnet101b or weight_count == 44549160) assert (model != resnet152 or weight_count == 60192808) assert (model != resnet152b or weight_count == 60192808) assert (model != resnet200 or weight_count == 64673832) assert (model != resnet200b or weight_count == 64673832) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
32.4
120
0.606405
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2', 'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b', 'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck', 'ResUnit', 'ResInitBlock'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, conv3x3_block, conv7x7_block class ResBlock(nn.Module): def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True): super(ResBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=None) def forward(self, x, identity=None): x = self.conv1(x) if identity is not None: x = x + identity # Shorter skip connection - LIV x = self.conv2(x) return x class ResBottleneck(nn.Module): def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, conv1_stride=False, bottleneck_factor=4): super(ResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), padding=padding, dilation=dilation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x class ResUnit(nn.Module): def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(ResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x, None) # Needed for original skip connection AND need line below: x = x + identity # x = self.body(x, identity) # creates shorter skip connection - LIV # Don't need skip connection bc shorter skip connection now in ResBlock() - LIV x = x + identity x = self.activ(x) return x class NonResBlock(nn.Module): def __init__(self, in_channels, out_channels, stride, bias=False, use_bn=True): super(NonResBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, activation=None) def forward(self, x): x = self.conv1(x) r skip connection - LIV x = self.conv2(x) return x class NonResUnit(nn.Module): def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bias=False, use_bn=True, bottleneck=True, conv1_stride=False): super(NonResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = NonResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, use_bn=use_bn) self.activ = nn.ReLU(inplace=True) def forward(self, x): # if self.resize_identity: # identity = self.identity_conv(x) # else: # identity = x x = self.body(x) # No skip connection # x = x + identity x = self.activ(x) return x class ResInitBlock(nn.Module): def __init__(self, in_channels, out_channels): super(ResInitBlock, self).__init__() self.conv = conv7x7_block( in_channels=in_channels, out_channels=out_channels, stride=2) self.pool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class ResNet(nn.Module): def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(ResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), ResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_resnet(blocks, bottleneck=None, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if width_scale != 1.0: channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij for j, cij in enumerate(ci)] for i, ci in enumerate(channels)] init_block_channels = int(init_block_channels * width_scale) net = ResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def resnet10(**kwargs): return get_resnet(blocks=10, model_name="resnet10", **kwargs) def resnet12(**kwargs): return get_resnet(blocks=12, model_name="resnet12", **kwargs) def resnet14(**kwargs): return get_resnet(blocks=14, model_name="resnet14", **kwargs) def resnetbc14b(**kwargs): return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs) def resnet16(**kwargs): return get_resnet(blocks=16, model_name="resnet16", **kwargs) def resnet18_wd4(**kwargs): return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs) def resnet18_wd2(**kwargs): return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs) def resnet18_w3d4(**kwargs): return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs) def resnet18(**kwargs): return get_resnet(blocks=18, model_name="resnet18", **kwargs) def resnet26(**kwargs): return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs) def resnetbc26b(**kwargs): return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs) def resnet34(**kwargs): return get_resnet(blocks=34, model_name="resnet34", **kwargs) def resnetbc38b(**kwargs): return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs) def resnet50(**kwargs): return get_resnet(blocks=50, model_name="resnet50", **kwargs) def resnet50b(**kwargs): return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs) def resnet101(**kwargs): return get_resnet(blocks=101, model_name="resnet101", **kwargs) def resnet101b(**kwargs): return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs) def resnet152(**kwargs): return get_resnet(blocks=152, model_name="resnet152", **kwargs) def resnet152b(**kwargs): return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs) def resnet200(**kwargs): return get_resnet(blocks=200, model_name="resnet200", **kwargs) def resnet200b(**kwargs): return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs) def _calc_width(net): import numpy as np net_params = filter(lambda p: p.requires_grad, net.parameters()) weight_count = 0 for param in net_params: weight_count += np.prod(param.size()) return weight_count def _test(): import torch pretrained = False models = [ resnet10, resnet12, resnet14, resnetbc14b, resnet16, resnet18_wd4, resnet18_wd2, resnet18_w3d4, resnet18, resnet26, resnetbc26b, resnet34, resnetbc38b, resnet50, resnet50b, resnet101, resnet101b, resnet152, resnet152b, resnet200, resnet200b, ] for model in models: net = model(pretrained=pretrained) # net.train() net.eval() weight_count = _calc_width(net) print("m={}, {}".format(model.__name__, weight_count)) assert (model != resnet10 or weight_count == 5418792) assert (model != resnet12 or weight_count == 5492776) assert (model != resnet14 or weight_count == 5788200) assert (model != resnetbc14b or weight_count == 10064936) assert (model != resnet16 or weight_count == 6968872) assert (model != resnet18_wd4 or weight_count == 3937400) assert (model != resnet18_wd2 or weight_count == 5804296) assert (model != resnet18_w3d4 or weight_count == 8476056) assert (model != resnet18 or weight_count == 11689512) assert (model != resnet26 or weight_count == 17960232) assert (model != resnetbc26b or weight_count == 15995176) assert (model != resnet34 or weight_count == 21797672) assert (model != resnetbc38b or weight_count == 21925416) assert (model != resnet50 or weight_count == 25557032) assert (model != resnet50b or weight_count == 25557032) assert (model != resnet101 or weight_count == 44549160) assert (model != resnet101b or weight_count == 44549160) assert (model != resnet152 or weight_count == 60192808) assert (model != resnet152b or weight_count == 60192808) assert (model != resnet200 or weight_count == 64673832) assert (model != resnet200b or weight_count == 64673832) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000)) if __name__ == "__main__": _test()
true
true
1c49e72eda40b3456987582ab05a77a3b7b4b840
318
py
Python
pajbot/tests/modules/test_two_word_variations.py
UVClay/SkookumBot
69679c78475662e2b7948fe63d529a755c47fc80
[ "MIT" ]
1
2021-10-02T10:19:38.000Z
2021-10-02T10:19:38.000Z
pajbot/tests/modules/test_two_word_variations.py
UVClay/SkookumBot
69679c78475662e2b7948fe63d529a755c47fc80
[ "MIT" ]
64
2021-01-09T21:28:05.000Z
2022-03-31T10:07:05.000Z
pajbot/tests/modules/test_two_word_variations.py
UVClay/SkookumBot
69679c78475662e2b7948fe63d529a755c47fc80
[ "MIT" ]
1
2020-03-11T19:37:10.000Z
2020-03-11T19:37:10.000Z
from pajbot.modules.bingo import two_word_variations def test_two_word_variations(): assert two_word_variations("abc", "def", "KKona") == { "abc-def": "KKona", "abc_def": "KKona", "abcdef": "KKona", "def-abc": "KKona", "def_abc": "KKona", "defabc": "KKona", }
24.461538
58
0.559748
from pajbot.modules.bingo import two_word_variations def test_two_word_variations(): assert two_word_variations("abc", "def", "KKona") == { "abc-def": "KKona", "abc_def": "KKona", "abcdef": "KKona", "def-abc": "KKona", "def_abc": "KKona", "defabc": "KKona", }
true
true
1c49e7d519f97f97ba1df092341baac9cd9535c8
61,625
py
Python
nltk/parse/chart.py
addisonblanda/Plato
cebd522dfe4b21f8c965f0e56637c15744817474
[ "MIT" ]
6
2017-01-22T03:15:01.000Z
2019-12-01T16:19:36.000Z
nltk/parse/chart.py
addisonblanda/Plato
cebd522dfe4b21f8c965f0e56637c15744817474
[ "MIT" ]
3
2020-03-24T15:38:23.000Z
2021-02-02T21:44:18.000Z
nltk/parse/chart.py
addisonblanda/Plato
cebd522dfe4b21f8c965f0e56637c15744817474
[ "MIT" ]
6
2017-01-19T21:49:55.000Z
2021-04-14T09:57:17.000Z
# -*- coding: utf-8 -*- # Natural Language Toolkit: A Chart Parser # # Copyright (C) 2001-2017 NLTK Project # Author: Edward Loper <edloper@gmail.com> # Steven Bird <stevenbird1@gmail.com> # Jean Mark Gawron <gawron@mail.sdsu.edu> # Peter Ljunglöf <peter.ljunglof@heatherleaf.se> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Data classes and parser implementations for "chart parsers", which use dynamic programming to efficiently parse a text. A chart parser derives parse trees for a text by iteratively adding "edges" to a "chart." Each edge represents a hypothesis about the tree structure for a subsequence of the text. The chart is a "blackboard" for composing and combining these hypotheses. When a chart parser begins parsing a text, it creates a new (empty) chart, spanning the text. It then incrementally adds new edges to the chart. A set of "chart rules" specifies the conditions under which new edges should be added to the chart. Once the chart reaches a stage where none of the chart rules adds any new edges, parsing is complete. Charts are encoded with the ``Chart`` class, and edges are encoded with the ``TreeEdge`` and ``LeafEdge`` classes. The chart parser module defines three chart parsers: - ``ChartParser`` is a simple and flexible chart parser. Given a set of chart rules, it will apply those rules to the chart until no more edges are added. - ``SteppingChartParser`` is a subclass of ``ChartParser`` that can be used to step through the parsing process. """ from __future__ import print_function, division, unicode_literals import itertools import re import warnings from nltk import compat from nltk.tree import Tree from nltk.grammar import PCFG, is_nonterminal, is_terminal from nltk.util import OrderedDict from nltk.internals import raise_unorderable_types from nltk.compat import (total_ordering, python_2_unicode_compatible, unicode_repr) from nltk.parse.api import ParserI ######################################################################## ## Edges ######################################################################## @total_ordering class EdgeI(object): """ A hypothesis about the structure of part of a sentence. Each edge records the fact that a structure is (partially) consistent with the sentence. An edge contains: - A span, indicating what part of the sentence is consistent with the hypothesized structure. - A left-hand side, specifying what kind of structure is hypothesized. - A right-hand side, specifying the contents of the hypothesized structure. - A dot position, indicating how much of the hypothesized structure is consistent with the sentence. Every edge is either complete or incomplete: - An edge is complete if its structure is fully consistent with the sentence. - An edge is incomplete if its structure is partially consistent with the sentence. For every incomplete edge, the span specifies a possible prefix for the edge's structure. There are two kinds of edge: - A ``TreeEdge`` records which trees have been found to be (partially) consistent with the text. - A ``LeafEdge`` records the tokens occurring in the text. The ``EdgeI`` interface provides a common interface to both types of edge, allowing chart parsers to treat them in a uniform manner. """ def __init__(self): if self.__class__ == EdgeI: raise TypeError('Edge is an abstract interface') #//////////////////////////////////////////////////////////// # Span #//////////////////////////////////////////////////////////// def span(self): """ Return a tuple ``(s, e)``, where ``tokens[s:e]`` is the portion of the sentence that is consistent with this edge's structure. :rtype: tuple(int, int) """ raise NotImplementedError() def start(self): """ Return the start index of this edge's span. :rtype: int """ raise NotImplementedError() def end(self): """ Return the end index of this edge's span. :rtype: int """ raise NotImplementedError() def length(self): """ Return the length of this edge's span. :rtype: int """ raise NotImplementedError() #//////////////////////////////////////////////////////////// # Left Hand Side #//////////////////////////////////////////////////////////// def lhs(self): """ Return this edge's left-hand side, which specifies what kind of structure is hypothesized by this edge. :see: ``TreeEdge`` and ``LeafEdge`` for a description of the left-hand side values for each edge type. """ raise NotImplementedError() #//////////////////////////////////////////////////////////// # Right Hand Side #//////////////////////////////////////////////////////////// def rhs(self): """ Return this edge's right-hand side, which specifies the content of the structure hypothesized by this edge. :see: ``TreeEdge`` and ``LeafEdge`` for a description of the right-hand side values for each edge type. """ raise NotImplementedError() def dot(self): """ Return this edge's dot position, which indicates how much of the hypothesized structure is consistent with the sentence. In particular, ``self.rhs[:dot]`` is consistent with ``tokens[self.start():self.end()]``. :rtype: int """ raise NotImplementedError() def nextsym(self): """ Return the element of this edge's right-hand side that immediately follows its dot. :rtype: Nonterminal or terminal or None """ raise NotImplementedError() def is_complete(self): """ Return True if this edge's structure is fully consistent with the text. :rtype: bool """ raise NotImplementedError() def is_incomplete(self): """ Return True if this edge's structure is partially consistent with the text. :rtype: bool """ raise NotImplementedError() #//////////////////////////////////////////////////////////// # Comparisons & hashing #//////////////////////////////////////////////////////////// def __eq__(self, other): return (self.__class__ is other.__class__ and self._comparison_key == other._comparison_key) def __ne__(self, other): return not self == other def __lt__(self, other): if not isinstance(other, EdgeI): raise_unorderable_types("<", self, other) if self.__class__ is other.__class__: return self._comparison_key < other._comparison_key else: return self.__class__.__name__ < other.__class__.__name__ def __hash__(self): try: return self._hash except AttributeError: self._hash = hash(self._comparison_key) return self._hash @python_2_unicode_compatible class TreeEdge(EdgeI): """ An edge that records the fact that a tree is (partially) consistent with the sentence. A tree edge consists of: - A span, indicating what part of the sentence is consistent with the hypothesized tree. - A left-hand side, specifying the hypothesized tree's node value. - A right-hand side, specifying the hypothesized tree's children. Each element of the right-hand side is either a terminal, specifying a token with that terminal as its leaf value; or a nonterminal, specifying a subtree with that nonterminal's symbol as its node value. - A dot position, indicating which children are consistent with part of the sentence. In particular, if ``dot`` is the dot position, ``rhs`` is the right-hand size, ``(start,end)`` is the span, and ``sentence`` is the list of tokens in the sentence, then ``tokens[start:end]`` can be spanned by the children specified by ``rhs[:dot]``. For more information about edges, see the ``EdgeI`` interface. """ def __init__(self, span, lhs, rhs, dot=0): """ Construct a new ``TreeEdge``. :type span: tuple(int, int) :param span: A tuple ``(s, e)``, where ``tokens[s:e]`` is the portion of the sentence that is consistent with the new edge's structure. :type lhs: Nonterminal :param lhs: The new edge's left-hand side, specifying the hypothesized tree's node value. :type rhs: list(Nonterminal and str) :param rhs: The new edge's right-hand side, specifying the hypothesized tree's children. :type dot: int :param dot: The position of the new edge's dot. This position specifies what prefix of the production's right hand side is consistent with the text. In particular, if ``sentence`` is the list of tokens in the sentence, then ``okens[span[0]:span[1]]`` can be spanned by the children specified by ``rhs[:dot]``. """ self._span = span self._lhs = lhs rhs = tuple(rhs) self._rhs = rhs self._dot = dot self._comparison_key = (span, lhs, rhs, dot) @staticmethod def from_production(production, index): """ Return a new ``TreeEdge`` formed from the given production. The new edge's left-hand side and right-hand side will be taken from ``production``; its span will be ``(index,index)``; and its dot position will be ``0``. :rtype: TreeEdge """ return TreeEdge(span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0) def move_dot_forward(self, new_end): """ Return a new ``TreeEdge`` formed from this edge. The new edge's dot position is increased by ``1``, and its end index will be replaced by ``new_end``. :param new_end: The new end index. :type new_end: int :rtype: TreeEdge """ return TreeEdge(span=(self._span[0], new_end), lhs=self._lhs, rhs=self._rhs, dot=self._dot+1) # Accessors def lhs(self): return self._lhs def span(self): return self._span def start(self): return self._span[0] def end(self): return self._span[1] def length(self): return self._span[1] - self._span[0] def rhs(self): return self._rhs def dot(self): return self._dot def is_complete(self): return self._dot == len(self._rhs) def is_incomplete(self): return self._dot != len(self._rhs) def nextsym(self): if self._dot >= len(self._rhs): return None else: return self._rhs[self._dot] # String representation def __str__(self): str = '[%s:%s] ' % (self._span[0], self._span[1]) str += '%-2r ->' % (self._lhs,) for i in range(len(self._rhs)): if i == self._dot: str += ' *' str += ' %s' % unicode_repr(self._rhs[i]) if len(self._rhs) == self._dot: str += ' *' return str def __repr__(self): return '[Edge: %s]' % self @python_2_unicode_compatible class LeafEdge(EdgeI): """ An edge that records the fact that a leaf value is consistent with a word in the sentence. A leaf edge consists of: - An index, indicating the position of the word. - A leaf, specifying the word's content. A leaf edge's left-hand side is its leaf value, and its right hand side is ``()``. Its span is ``[index, index+1]``, and its dot position is ``0``. """ def __init__(self, leaf, index): """ Construct a new ``LeafEdge``. :param leaf: The new edge's leaf value, specifying the word that is recorded by this edge. :param index: The new edge's index, specifying the position of the word that is recorded by this edge. """ self._leaf = leaf self._index = index self._comparison_key = (leaf, index) # Accessors def lhs(self): return self._leaf def span(self): return (self._index, self._index+1) def start(self): return self._index def end(self): return self._index+1 def length(self): return 1 def rhs(self): return () def dot(self): return 0 def is_complete(self): return True def is_incomplete(self): return False def nextsym(self): return None # String representations def __str__(self): return '[%s:%s] %s' % (self._index, self._index+1, unicode_repr(self._leaf)) def __repr__(self): return '[Edge: %s]' % (self) ######################################################################## ## Chart ######################################################################## class Chart(object): """ A blackboard for hypotheses about the syntactic constituents of a sentence. A chart contains a set of edges, and each edge encodes a single hypothesis about the structure of some portion of the sentence. The ``select`` method can be used to select a specific collection of edges. For example ``chart.select(is_complete=True, start=0)`` yields all complete edges whose start indices are 0. To ensure the efficiency of these selection operations, ``Chart`` dynamically creates and maintains an index for each set of attributes that have been selected on. In order to reconstruct the trees that are represented by an edge, the chart associates each edge with a set of child pointer lists. A child pointer list is a list of the edges that license an edge's right-hand side. :ivar _tokens: The sentence that the chart covers. :ivar _num_leaves: The number of tokens. :ivar _edges: A list of the edges in the chart :ivar _edge_to_cpls: A dictionary mapping each edge to a set of child pointer lists that are associated with that edge. :ivar _indexes: A dictionary mapping tuples of edge attributes to indices, where each index maps the corresponding edge attribute values to lists of edges. """ def __init__(self, tokens): """ Construct a new chart. The chart is initialized with the leaf edges corresponding to the terminal leaves. :type tokens: list :param tokens: The sentence that this chart will be used to parse. """ # Record the sentence token and the sentence length. self._tokens = tuple(tokens) self._num_leaves = len(self._tokens) # Initialise the chart. self.initialize() def initialize(self): """ Clear the chart. """ # A list of edges contained in this chart. self._edges = [] # The set of child pointer lists associated with each edge. self._edge_to_cpls = {} # Indexes mapping attribute values to lists of edges # (used by select()). self._indexes = {} #//////////////////////////////////////////////////////////// # Sentence Access #//////////////////////////////////////////////////////////// def num_leaves(self): """ Return the number of words in this chart's sentence. :rtype: int """ return self._num_leaves def leaf(self, index): """ Return the leaf value of the word at the given index. :rtype: str """ return self._tokens[index] def leaves(self): """ Return a list of the leaf values of each word in the chart's sentence. :rtype: list(str) """ return self._tokens #//////////////////////////////////////////////////////////// # Edge access #//////////////////////////////////////////////////////////// def edges(self): """ Return a list of all edges in this chart. New edges that are added to the chart after the call to edges() will *not* be contained in this list. :rtype: list(EdgeI) :see: ``iteredges``, ``select`` """ return self._edges[:] def iteredges(self): """ Return an iterator over the edges in this chart. It is not guaranteed that new edges which are added to the chart before the iterator is exhausted will also be generated. :rtype: iter(EdgeI) :see: ``edges``, ``select`` """ return iter(self._edges) # Iterating over the chart yields its edges. __iter__ = iteredges def num_edges(self): """ Return the number of edges contained in this chart. :rtype: int """ return len(self._edge_to_cpls) def select(self, **restrictions): """ Return an iterator over the edges in this chart. Any new edges that are added to the chart before the iterator is exahusted will also be generated. ``restrictions`` can be used to restrict the set of edges that will be generated. :param span: Only generate edges ``e`` where ``e.span()==span`` :param start: Only generate edges ``e`` where ``e.start()==start`` :param end: Only generate edges ``e`` where ``e.end()==end`` :param length: Only generate edges ``e`` where ``e.length()==length`` :param lhs: Only generate edges ``e`` where ``e.lhs()==lhs`` :param rhs: Only generate edges ``e`` where ``e.rhs()==rhs`` :param nextsym: Only generate edges ``e`` where ``e.nextsym()==nextsym`` :param dot: Only generate edges ``e`` where ``e.dot()==dot`` :param is_complete: Only generate edges ``e`` where ``e.is_complete()==is_complete`` :param is_incomplete: Only generate edges ``e`` where ``e.is_incomplete()==is_incomplete`` :rtype: iter(EdgeI) """ # If there are no restrictions, then return all edges. if restrictions=={}: return iter(self._edges) # Find the index corresponding to the given restrictions. restr_keys = sorted(restrictions.keys()) restr_keys = tuple(restr_keys) # If it doesn't exist, then create it. if restr_keys not in self._indexes: self._add_index(restr_keys) vals = tuple(restrictions[key] for key in restr_keys) return iter(self._indexes[restr_keys].get(vals, [])) def _add_index(self, restr_keys): """ A helper function for ``select``, which creates a new index for a given set of attributes (aka restriction keys). """ # Make sure it's a valid index. for key in restr_keys: if not hasattr(EdgeI, key): raise ValueError('Bad restriction: %s' % key) # Create the index. index = self._indexes[restr_keys] = {} # Add all existing edges to the index. for edge in self._edges: vals = tuple(getattr(edge, key)() for key in restr_keys) index.setdefault(vals, []).append(edge) def _register_with_indexes(self, edge): """ A helper function for ``insert``, which registers the new edge with all existing indexes. """ for (restr_keys, index) in self._indexes.items(): vals = tuple(getattr(edge, key)() for key in restr_keys) index.setdefault(vals, []).append(edge) #//////////////////////////////////////////////////////////// # Edge Insertion #//////////////////////////////////////////////////////////// def insert_with_backpointer(self, new_edge, previous_edge, child_edge): """ Add a new edge to the chart, using a pointer to the previous edge. """ cpls = self.child_pointer_lists(previous_edge) new_cpls = [cpl+(child_edge,) for cpl in cpls] return self.insert(new_edge, *new_cpls) def insert(self, edge, *child_pointer_lists): """ Add a new edge to the chart, and return True if this operation modified the chart. In particular, return true iff the chart did not already contain ``edge``, or if it did not already associate ``child_pointer_lists`` with ``edge``. :type edge: EdgeI :param edge: The new edge :type child_pointer_lists: sequence of tuple(EdgeI) :param child_pointer_lists: A sequence of lists of the edges that were used to form this edge. This list is used to reconstruct the trees (or partial trees) that are associated with ``edge``. :rtype: bool """ # Is it a new edge? if edge not in self._edge_to_cpls: # Add it to the list of edges. self._append_edge(edge) # Register with indexes. self._register_with_indexes(edge) # Get the set of child pointer lists for this edge. cpls = self._edge_to_cpls.setdefault(edge, OrderedDict()) chart_was_modified = False for child_pointer_list in child_pointer_lists: child_pointer_list = tuple(child_pointer_list) if child_pointer_list not in cpls: # It's a new CPL; register it, and return true. cpls[child_pointer_list] = True chart_was_modified = True return chart_was_modified def _append_edge(self, edge): self._edges.append(edge) #//////////////////////////////////////////////////////////// # Tree extraction & child pointer lists #//////////////////////////////////////////////////////////// def parses(self, root, tree_class=Tree): """ Return an iterator of the complete tree structures that span the entire chart, and whose root node is ``root``. """ for edge in self.select(start=0, end=self._num_leaves, lhs=root): for tree in self.trees(edge, tree_class=tree_class, complete=True): yield tree def trees(self, edge, tree_class=Tree, complete=False): """ Return an iterator of the tree structures that are associated with ``edge``. If ``edge`` is incomplete, then the unexpanded children will be encoded as childless subtrees, whose node value is the corresponding terminal or nonterminal. :rtype: list(Tree) :note: If two trees share a common subtree, then the same Tree may be used to encode that subtree in both trees. If you need to eliminate this subtree sharing, then create a deep copy of each tree. """ return iter(self._trees(edge, complete, memo={}, tree_class=tree_class)) def _trees(self, edge, complete, memo, tree_class): """ A helper function for ``trees``. :param memo: A dictionary used to record the trees that we've generated for each edge, so that when we see an edge more than once, we can reuse the same trees. """ # If we've seen this edge before, then reuse our old answer. if edge in memo: return memo[edge] # when we're reading trees off the chart, don't use incomplete edges if complete and edge.is_incomplete(): return [] # Leaf edges. if isinstance(edge, LeafEdge): leaf = self._tokens[edge.start()] memo[edge] = [leaf] return [leaf] # Until we're done computing the trees for edge, set # memo[edge] to be empty. This has the effect of filtering # out any cyclic trees (i.e., trees that contain themselves as # descendants), because if we reach this edge via a cycle, # then it will appear that the edge doesn't generate any trees. memo[edge] = [] trees = [] lhs = edge.lhs().symbol() # Each child pointer list can be used to form trees. for cpl in self.child_pointer_lists(edge): # Get the set of child choices for each child pointer. # child_choices[i] is the set of choices for the tree's # ith child. child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl] # For each combination of children, add a tree. for children in itertools.product(*child_choices): trees.append(tree_class(lhs, children)) # If the edge is incomplete, then extend it with "partial trees": if edge.is_incomplete(): unexpanded = [tree_class(elt,[]) for elt in edge.rhs()[edge.dot():]] for tree in trees: tree.extend(unexpanded) # Update the memoization dictionary. memo[edge] = trees # Return the list of trees. return trees def child_pointer_lists(self, edge): """ Return the set of child pointer lists for the given edge. Each child pointer list is a list of edges that have been used to form this edge. :rtype: list(list(EdgeI)) """ # Make a copy, in case they modify it. return self._edge_to_cpls.get(edge, {}).keys() #//////////////////////////////////////////////////////////// # Display #//////////////////////////////////////////////////////////// def pretty_format_edge(self, edge, width=None): """ Return a pretty-printed string representation of a given edge in this chart. :rtype: str :param width: The number of characters allotted to each index in the sentence. """ if width is None: width = 50 // (self.num_leaves()+1) (start, end) = (edge.start(), edge.end()) str = '|' + ('.'+' '*(width-1))*start # Zero-width edges are "#" if complete, ">" if incomplete if start == end: if edge.is_complete(): str += '#' else: str += '>' # Spanning complete edges are "[===]"; Other edges are # "[---]" if complete, "[--->" if incomplete elif edge.is_complete() and edge.span() == (0,self._num_leaves): str += '['+('='*width)*(end-start-1) + '='*(width-1)+']' elif edge.is_complete(): str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+']' else: str += '['+('-'*width)*(end-start-1) + '-'*(width-1)+'>' str += (' '*(width-1)+'.')*(self._num_leaves-end) return str + '| %s' % edge def pretty_format_leaves(self, width=None): """ Return a pretty-printed string representation of this chart's leaves. This string can be used as a header for calls to ``pretty_format_edge``. """ if width is None: width = 50 // (self.num_leaves()+1) if self._tokens is not None and width>1: header = '|.' for tok in self._tokens: header += tok[:width-1].center(width-1)+'.' header += '|' else: header = '' return header def pretty_format(self, width=None): """ Return a pretty-printed string representation of this chart. :param width: The number of characters allotted to each index in the sentence. :rtype: str """ if width is None: width = 50 // (self.num_leaves()+1) # sort edges: primary key=length, secondary key=start index. # (and filter out the token edges) edges = sorted([(e.length(), e.start(), e) for e in self]) edges = [e for (_,_,e) in edges] return (self.pretty_format_leaves(width) + '\n' + '\n'.join(self.pretty_format_edge(edge, width) for edge in edges)) #//////////////////////////////////////////////////////////// # Display: Dot (AT&T Graphviz) #//////////////////////////////////////////////////////////// def dot_digraph(self): # Header s = 'digraph nltk_chart {\n' #s += ' size="5,5";\n' s += ' rankdir=LR;\n' s += ' node [height=0.1,width=0.1];\n' s += ' node [style=filled, color="lightgray"];\n' # Set up the nodes for y in range(self.num_edges(), -1, -1): if y == 0: s += ' node [style=filled, color="black"];\n' for x in range(self.num_leaves()+1): if y == 0 or (x <= self._edges[y-1].start() or x >= self._edges[y-1].end()): s += ' %04d.%04d [label=""];\n' % (x,y) # Add a spacer s += ' x [style=invis]; x->0000.0000 [style=invis];\n' # Declare ranks. for x in range(self.num_leaves()+1): s += ' {rank=same;' for y in range(self.num_edges()+1): if y == 0 or (x <= self._edges[y-1].start() or x >= self._edges[y-1].end()): s += ' %04d.%04d' % (x,y) s += '}\n' # Add the leaves s += ' edge [style=invis, weight=100];\n' s += ' node [shape=plaintext]\n' s += ' 0000.0000' for x in range(self.num_leaves()): s += '->%s->%04d.0000' % (self.leaf(x), x+1) s += ';\n\n' # Add the edges s += ' edge [style=solid, weight=1];\n' for y, edge in enumerate(self): for x in range(edge.start()): s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' % (x, y+1, x+1, y+1)) s += (' %04d.%04d -> %04d.%04d [label="%s"];\n' % (edge.start(), y+1, edge.end(), y+1, edge)) for x in range(edge.end(), self.num_leaves()): s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' % (x, y+1, x+1, y+1)) s += '}\n' return s ######################################################################## ## Chart Rules ######################################################################## class ChartRuleI(object): """ A rule that specifies what new edges are licensed by any given set of existing edges. Each chart rule expects a fixed number of edges, as indicated by the class variable ``NUM_EDGES``. In particular: - A chart rule with ``NUM_EDGES=0`` specifies what new edges are licensed, regardless of existing edges. - A chart rule with ``NUM_EDGES=1`` specifies what new edges are licensed by a single existing edge. - A chart rule with ``NUM_EDGES=2`` specifies what new edges are licensed by a pair of existing edges. :type NUM_EDGES: int :cvar NUM_EDGES: The number of existing edges that this rule uses to license new edges. Typically, this number ranges from zero to two. """ def apply(self, chart, grammar, *edges): """ Return a generator that will add edges licensed by this rule and the given edges to the chart, one at a time. Each time the generator is resumed, it will either add a new edge and yield that edge; or return. :type edges: list(EdgeI) :param edges: A set of existing edges. The number of edges that should be passed to ``apply()`` is specified by the ``NUM_EDGES`` class variable. :rtype: iter(EdgeI) """ raise NotImplementedError() def apply_everywhere(self, chart, grammar): """ Return a generator that will add all edges licensed by this rule, given the edges that are currently in the chart, one at a time. Each time the generator is resumed, it will either add a new edge and yield that edge; or return. :rtype: iter(EdgeI) """ raise NotImplementedError() @python_2_unicode_compatible class AbstractChartRule(ChartRuleI): """ An abstract base class for chart rules. ``AbstractChartRule`` provides: - A default implementation for ``apply``. - A default implementation for ``apply_everywhere``, (Currently, this implementation assumes that ``NUM_EDGES``<=3.) - A default implementation for ``__str__``, which returns a name based on the rule's class name. """ # Subclasses must define apply. def apply(self, chart, grammar, *edges): raise NotImplementedError() # Default: loop through the given number of edges, and call # self.apply() for each set of edges. def apply_everywhere(self, chart, grammar): if self.NUM_EDGES == 0: for new_edge in self.apply(chart, grammar): yield new_edge elif self.NUM_EDGES == 1: for e1 in chart: for new_edge in self.apply(chart, grammar, e1): yield new_edge elif self.NUM_EDGES == 2: for e1 in chart: for e2 in chart: for new_edge in self.apply(chart, grammar, e1, e2): yield new_edge elif self.NUM_EDGES == 3: for e1 in chart: for e2 in chart: for e3 in chart: for new_edge in self.apply(chart,grammar,e1,e2,e3): yield new_edge else: raise AssertionError('NUM_EDGES>3 is not currently supported') # Default: return a name based on the class name. def __str__(self): # Add spaces between InitialCapsWords. return re.sub('([a-z])([A-Z])', r'\1 \2', self.__class__.__name__) #//////////////////////////////////////////////////////////// # Fundamental Rule #//////////////////////////////////////////////////////////// class FundamentalRule(AbstractChartRule): """ A rule that joins two adjacent edges to form a single combined edge. In particular, this rule specifies that any pair of edges - ``[A -> alpha \* B beta][i:j]`` - ``[B -> gamma \*][j:k]`` licenses the edge: - ``[A -> alpha B * beta][i:j]`` """ NUM_EDGES = 2 def apply(self, chart, grammar, left_edge, right_edge): # Make sure the rule is applicable. if not (left_edge.is_incomplete() and right_edge.is_complete() and left_edge.end() == right_edge.start() and left_edge.nextsym() == right_edge.lhs()): return # Construct the new edge. new_edge = left_edge.move_dot_forward(right_edge.end()) # Insert it into the chart. if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge class SingleEdgeFundamentalRule(FundamentalRule): """ A rule that joins a given edge with adjacent edges in the chart, to form combined edges. In particular, this rule specifies that either of the edges: - ``[A -> alpha \* B beta][i:j]`` - ``[B -> gamma \*][j:k]`` licenses the edge: - ``[A -> alpha B * beta][i:j]`` if the other edge is already in the chart. :note: This is basically ``FundamentalRule``, with one edge left unspecified. """ NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_incomplete(): for new_edge in self._apply_incomplete(chart, grammar, edge): yield new_edge else: for new_edge in self._apply_complete(chart, grammar, edge): yield new_edge def _apply_complete(self, chart, grammar, right_edge): for left_edge in chart.select(end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge def _apply_incomplete(self, chart, grammar, left_edge): for right_edge in chart.select(start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge #//////////////////////////////////////////////////////////// # Inserting Terminal Leafs #//////////////////////////////////////////////////////////// class LeafInitRule(AbstractChartRule): NUM_EDGES=0 def apply(self, chart, grammar): for index in range(chart.num_leaves()): new_edge = LeafEdge(chart.leaf(index), index) if chart.insert(new_edge, ()): yield new_edge #//////////////////////////////////////////////////////////// # Top-Down Prediction #//////////////////////////////////////////////////////////// class TopDownInitRule(AbstractChartRule): """ A rule licensing edges corresponding to the grammar productions for the grammar's start symbol. In particular, this rule specifies that ``[S -> \* alpha][0:i]`` is licensed for each grammar production ``S -> alpha``, where ``S`` is the grammar's start symbol. """ NUM_EDGES = 0 def apply(self, chart, grammar): for prod in grammar.productions(lhs=grammar.start()): new_edge = TreeEdge.from_production(prod, 0) if chart.insert(new_edge, ()): yield new_edge class TopDownPredictRule(AbstractChartRule): """ A rule licensing edges corresponding to the grammar productions for the nonterminal following an incomplete edge's dot. In particular, this rule specifies that ``[A -> alpha \* B beta][i:j]`` licenses the edge ``[B -> \* gamma][j:j]`` for each grammar production ``B -> gamma``. :note: This rule corresponds to the Predictor Rule in Earley parsing. """ NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_complete(): return for prod in grammar.productions(lhs=edge.nextsym()): new_edge = TreeEdge.from_production(prod, edge.end()) if chart.insert(new_edge, ()): yield new_edge class CachedTopDownPredictRule(TopDownPredictRule): """ A cached version of ``TopDownPredictRule``. After the first time this rule is applied to an edge with a given ``end`` and ``next``, it will not generate any more edges for edges with that ``end`` and ``next``. If ``chart`` or ``grammar`` are changed, then the cache is flushed. """ def __init__(self): TopDownPredictRule.__init__(self) self._done = {} def apply(self, chart, grammar, edge): if edge.is_complete(): return nextsym, index = edge.nextsym(), edge.end() if not is_nonterminal(nextsym): return # If we've already applied this rule to an edge with the same # next & end, and the chart & grammar have not changed, then # just return (no new edges to add). done = self._done.get((nextsym, index), (None,None)) if done[0] is chart and done[1] is grammar: return # Add all the edges indicated by the top down expand rule. for prod in grammar.productions(lhs=nextsym): # If the left corner in the predicted production is # leaf, it must match with the input. if prod.rhs(): first = prod.rhs()[0] if is_terminal(first): if index >= chart.num_leaves() or first != chart.leaf(index): continue new_edge = TreeEdge.from_production(prod, index) if chart.insert(new_edge, ()): yield new_edge # Record the fact that we've applied this rule. self._done[nextsym, index] = (chart, grammar) #//////////////////////////////////////////////////////////// # Bottom-Up Prediction #//////////////////////////////////////////////////////////// class BottomUpPredictRule(AbstractChartRule): """ A rule licensing any edge corresponding to a production whose right-hand side begins with a complete edge's left-hand side. In particular, this rule specifies that ``[A -> alpha \*]`` licenses the edge ``[B -> \* A beta]`` for each grammar production ``B -> A beta``. """ NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_incomplete(): return for prod in grammar.productions(rhs=edge.lhs()): new_edge = TreeEdge.from_production(prod, edge.start()) if chart.insert(new_edge, ()): yield new_edge class BottomUpPredictCombineRule(BottomUpPredictRule): """ A rule licensing any edge corresponding to a production whose right-hand side begins with a complete edge's left-hand side. In particular, this rule specifies that ``[A -> alpha \*]`` licenses the edge ``[B -> A \* beta]`` for each grammar production ``B -> A beta``. :note: This is like ``BottomUpPredictRule``, but it also applies the ``FundamentalRule`` to the resulting edge. """ NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_incomplete(): return for prod in grammar.productions(rhs=edge.lhs()): new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) if chart.insert(new_edge, (edge,)): yield new_edge class EmptyPredictRule(AbstractChartRule): """ A rule that inserts all empty productions as passive edges, in every position in the chart. """ NUM_EDGES = 0 def apply(self, chart, grammar): for prod in grammar.productions(empty=True): for index in compat.xrange(chart.num_leaves() + 1): new_edge = TreeEdge.from_production(prod, index) if chart.insert(new_edge, ()): yield new_edge ######################################################################## ## Filtered Bottom Up ######################################################################## class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): def _apply_complete(self, chart, grammar, right_edge): end = right_edge.end() nexttoken = end < chart.num_leaves() and chart.leaf(end) for left_edge in chart.select(end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()): if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge def _apply_incomplete(self, chart, grammar, left_edge): for right_edge in chart.select(start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()): end = right_edge.end() nexttoken = end < chart.num_leaves() and chart.leaf(end) if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule): def apply(self, chart, grammar, edge): if edge.is_incomplete(): return end = edge.end() nexttoken = end < chart.num_leaves() and chart.leaf(end) for prod in grammar.productions(rhs=edge.lhs()): if _bottomup_filter(grammar, nexttoken, prod.rhs()): new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) if chart.insert(new_edge, (edge,)): yield new_edge def _bottomup_filter(grammar, nexttoken, rhs, dot=0): if len(rhs) <= dot + 1: return True _next = rhs[dot + 1] if is_terminal(_next): return nexttoken == _next else: return grammar.is_leftcorner(_next, nexttoken) ######################################################################## ## Generic Chart Parser ######################################################################## TD_STRATEGY = [LeafInitRule(), TopDownInitRule(), CachedTopDownPredictRule(), SingleEdgeFundamentalRule()] BU_STRATEGY = [LeafInitRule(), EmptyPredictRule(), BottomUpPredictRule(), SingleEdgeFundamentalRule()] BU_LC_STRATEGY = [LeafInitRule(), EmptyPredictRule(), BottomUpPredictCombineRule(), SingleEdgeFundamentalRule()] LC_STRATEGY = [LeafInitRule(), FilteredBottomUpPredictCombineRule(), FilteredSingleEdgeFundamentalRule()] class ChartParser(ParserI): """ A generic chart parser. A "strategy", or list of ``ChartRuleI`` instances, is used to decide what edges to add to the chart. In particular, ``ChartParser`` uses the following algorithm to parse texts: | Until no new edges are added: | For each *rule* in *strategy*: | Apply *rule* to any applicable edges in the chart. | Return any complete parses in the chart """ def __init__(self, grammar, strategy=BU_LC_STRATEGY, trace=0, trace_chart_width=50, use_agenda=True, chart_class=Chart): """ Create a new chart parser, that uses ``grammar`` to parse texts. :type grammar: CFG :param grammar: The grammar used to parse texts. :type strategy: list(ChartRuleI) :param strategy: A list of rules that should be used to decide what edges to add to the chart (top-down strategy by default). :type trace: int :param trace: The level of tracing that should be used when parsing a text. ``0`` will generate no tracing output; and higher numbers will produce more verbose tracing output. :type trace_chart_width: int :param trace_chart_width: The default total width reserved for the chart in trace output. The remainder of each line will be used to display edges. :type use_agenda: bool :param use_agenda: Use an optimized agenda-based algorithm, if possible. :param chart_class: The class that should be used to create the parse charts. """ self._grammar = grammar self._strategy = strategy self._trace = trace self._trace_chart_width = trace_chart_width # If the strategy only consists of axioms (NUM_EDGES==0) and # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm: self._use_agenda = use_agenda self._chart_class = chart_class self._axioms = [] self._inference_rules = [] for rule in strategy: if rule.NUM_EDGES == 0: self._axioms.append(rule) elif rule.NUM_EDGES == 1: self._inference_rules.append(rule) else: self._use_agenda = False def grammar(self): return self._grammar def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width): if not trace: return print_rule_header = trace > 1 for edge in new_edges: if print_rule_header: print('%s:' % rule) print_rule_header = False print(chart.pretty_format_edge(edge, edge_width)) def chart_parse(self, tokens, trace=None): """ Return the final parse ``Chart`` from which all possible parse trees can be extracted. :param tokens: The sentence to be parsed :type tokens: list(str) :rtype: Chart """ if trace is None: trace = self._trace trace_new_edges = self._trace_new_edges tokens = list(tokens) self._grammar.check_coverage(tokens) chart = self._chart_class(tokens) grammar = self._grammar # Width, for printing trace edges. trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) if trace: print(chart.pretty_format_leaves(trace_edge_width)) if self._use_agenda: # Use an agenda-based algorithm. for axiom in self._axioms: new_edges = list(axiom.apply(chart, grammar)) trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) inference_rules = self._inference_rules agenda = chart.edges() # We reverse the initial agenda, since it is a stack # but chart.edges() functions as a queue. agenda.reverse() while agenda: edge = agenda.pop() for rule in inference_rules: new_edges = list(rule.apply(chart, grammar, edge)) if trace: trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) agenda += new_edges else: # Do not use an agenda-based algorithm. edges_added = True while edges_added: edges_added = False for rule in self._strategy: new_edges = list(rule.apply_everywhere(chart, grammar)) edges_added = len(new_edges) trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) # Return the final chart. return chart def parse(self, tokens, tree_class=Tree): chart = self.chart_parse(tokens) return iter(chart.parses(self._grammar.start(), tree_class=tree_class)) class TopDownChartParser(ChartParser): """ A ``ChartParser`` using a top-down parsing strategy. See ``ChartParser`` for more information. """ def __init__(self, grammar, **parser_args): ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args) class BottomUpChartParser(ChartParser): """ A ``ChartParser`` using a bottom-up parsing strategy. See ``ChartParser`` for more information. """ def __init__(self, grammar, **parser_args): if isinstance(grammar, PCFG): warnings.warn("BottomUpChartParser only works for CFG, " "use BottomUpProbabilisticChartParser instead", category=DeprecationWarning) ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args) class BottomUpLeftCornerChartParser(ChartParser): """ A ``ChartParser`` using a bottom-up left-corner parsing strategy. This strategy is often more efficient than standard bottom-up. See ``ChartParser`` for more information. """ def __init__(self, grammar, **parser_args): ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args) class LeftCornerChartParser(ChartParser): def __init__(self, grammar, **parser_args): if not grammar.is_nonempty(): raise ValueError("LeftCornerParser only works for grammars " "without empty productions.") ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args) ######################################################################## ## Stepping Chart Parser ######################################################################## class SteppingChartParser(ChartParser): """ A ``ChartParser`` that allows you to step through the parsing process, adding a single edge at a time. It also allows you to change the parser's strategy or grammar midway through parsing a text. The ``initialize`` method is used to start parsing a text. ``step`` adds a single edge to the chart. ``set_strategy`` changes the strategy used by the chart parser. ``parses`` returns the set of parses that has been found by the chart parser. :ivar _restart: Records whether the parser's strategy, grammar, or chart has been changed. If so, then ``step`` must restart the parsing algorithm. """ def __init__(self, grammar, strategy=[], trace=0): self._chart = None self._current_chartrule = None self._restart = False ChartParser.__init__(self, grammar, strategy, trace) #//////////////////////////////////////////////////////////// # Initialization #//////////////////////////////////////////////////////////// def initialize(self, tokens): "Begin parsing the given tokens." self._chart = Chart(list(tokens)) self._restart = True #//////////////////////////////////////////////////////////// # Stepping #//////////////////////////////////////////////////////////// def step(self): """ Return a generator that adds edges to the chart, one at a time. Each time the generator is resumed, it adds a single edge and yields that edge. If no more edges can be added, then it yields None. If the parser's strategy, grammar, or chart is changed, then the generator will continue adding edges using the new strategy, grammar, or chart. Note that this generator never terminates, since the grammar or strategy might be changed to values that would add new edges. Instead, it yields None when no more edges can be added with the current strategy and grammar. """ if self._chart is None: raise ValueError('Parser must be initialized first') while True: self._restart = False w = 50 // (self._chart.num_leaves()+1) for e in self._parse(): if self._trace > 1: print(self._current_chartrule) if self._trace > 0: print(self._chart.pretty_format_edge(e,w)) yield e if self._restart: break else: yield None # No more edges. def _parse(self): """ A generator that implements the actual parsing algorithm. ``step`` iterates through this generator, and restarts it whenever the parser's strategy, grammar, or chart is modified. """ chart = self._chart grammar = self._grammar edges_added = 1 while edges_added > 0: edges_added = 0 for rule in self._strategy: self._current_chartrule = rule for e in rule.apply_everywhere(chart, grammar): edges_added += 1 yield e #//////////////////////////////////////////////////////////// # Accessors #//////////////////////////////////////////////////////////// def strategy(self): "Return the strategy used by this parser." return self._strategy def grammar(self): "Return the grammar used by this parser." return self._grammar def chart(self): "Return the chart that is used by this parser." return self._chart def current_chartrule(self): "Return the chart rule used to generate the most recent edge." return self._current_chartrule def parses(self, tree_class=Tree): "Return the parse trees currently contained in the chart." return self._chart.parses(self._grammar.start(), tree_class) #//////////////////////////////////////////////////////////// # Parser modification #//////////////////////////////////////////////////////////// def set_strategy(self, strategy): """ Change the strategy that the parser uses to decide which edges to add to the chart. :type strategy: list(ChartRuleI) :param strategy: A list of rules that should be used to decide what edges to add to the chart. """ if strategy == self._strategy: return self._strategy = strategy[:] # Make a copy. self._restart = True def set_grammar(self, grammar): "Change the grammar used by the parser." if grammar is self._grammar: return self._grammar = grammar self._restart = True def set_chart(self, chart): "Load a given chart into the chart parser." if chart is self._chart: return self._chart = chart self._restart = True #//////////////////////////////////////////////////////////// # Standard parser methods #//////////////////////////////////////////////////////////// def parse(self, tokens, tree_class=Tree): tokens = list(tokens) self._grammar.check_coverage(tokens) # Initialize ourselves. self.initialize(tokens) # Step until no more edges are generated. for e in self.step(): if e is None: break # Return an iterator of complete parses. return self.parses(tree_class=tree_class) ######################################################################## ## Demo Code ######################################################################## def demo_grammar(): from nltk.grammar import CFG return CFG.fromstring(""" S -> NP VP PP -> "with" NP NP -> NP PP VP -> VP PP VP -> Verb NP VP -> Verb NP -> Det Noun NP -> "John" NP -> "I" Det -> "the" Det -> "my" Det -> "a" Noun -> "dog" Noun -> "cookie" Verb -> "ate" Verb -> "saw" Prep -> "with" Prep -> "under" """) def demo(choice=None, print_times=True, print_grammar=False, print_trees=True, trace=2, sent='I saw John with a dog with my cookie', numparses=5): """ A demonstration of the chart parsers. """ import sys, time from nltk import nonterminals, Production, CFG # The grammar for ChartParser and SteppingChartParser: grammar = demo_grammar() if print_grammar: print("* Grammar") print(grammar) # Tokenize the sample sentence. print("* Sentence:") print(sent) tokens = sent.split() print(tokens) print() # Ask the user which parser to test, # if the parser wasn't provided as an argument if choice is None: print(' 1: Top-down chart parser') print(' 2: Bottom-up chart parser') print(' 3: Bottom-up left-corner chart parser') print(' 4: Left-corner chart parser with bottom-up filter') print(' 5: Stepping chart parser (alternating top-down & bottom-up)') print(' 6: All parsers') print('\nWhich parser (1-6)? ', end=' ') choice = sys.stdin.readline().strip() print() choice = str(choice) if choice not in "123456": print('Bad parser number') return # Keep track of how long each parser takes. times = {} strategies = {'1': ('Top-down', TD_STRATEGY), '2': ('Bottom-up', BU_STRATEGY), '3': ('Bottom-up left-corner', BU_LC_STRATEGY), '4': ('Filtered left-corner', LC_STRATEGY)} choices = [] if choice in strategies: choices = [choice] if choice=='6': choices = "1234" # Run the requested chart parser(s), except the stepping parser. for strategy in choices: print("* Strategy: " + strategies[strategy][0]) print() cp = ChartParser(grammar, strategies[strategy][1], trace=trace) t = time.time() chart = cp.chart_parse(tokens) parses = list(chart.parses(grammar.start())) times[strategies[strategy][0]] = time.time()-t print("Nr edges in chart:", len(chart.edges())) if numparses: assert len(parses)==numparses, 'Not all parses found' if print_trees: for tree in parses: print(tree) else: print("Nr trees:", len(parses)) print() # Run the stepping parser, if requested. if choice in "56": print("* Strategy: Stepping (top-down vs bottom-up)") print() t = time.time() cp = SteppingChartParser(grammar, trace=trace) cp.initialize(tokens) for i in range(5): print('*** SWITCH TO TOP DOWN') cp.set_strategy(TD_STRATEGY) for j, e in enumerate(cp.step()): if j>20 or e is None: break print('*** SWITCH TO BOTTOM UP') cp.set_strategy(BU_STRATEGY) for j, e in enumerate(cp.step()): if j>20 or e is None: break times['Stepping'] = time.time()-t print("Nr edges in chart:", len(cp.chart().edges())) if numparses: assert len(list(cp.parses()))==numparses, 'Not all parses found' if print_trees: for tree in cp.parses(): print(tree) else: print("Nr trees:", len(list(cp.parses()))) print() # Print the times of all parsers: if not (print_times and times): return print("* Parsing times") print() maxlen = max(len(key) for key in times) format = '%' + repr(maxlen) + 's parser: %6.3fsec' times_items = times.items() for (parser, t) in sorted(times_items, key=lambda a:a[1]): print(format % (parser, t)) if __name__ == '__main__': demo()
36.637931
90
0.567675
from __future__ import print_function, division, unicode_literals import itertools import re import warnings from nltk import compat from nltk.tree import Tree from nltk.grammar import PCFG, is_nonterminal, is_terminal from nltk.util import OrderedDict from nltk.internals import raise_unorderable_types from nltk.compat import (total_ordering, python_2_unicode_compatible, unicode_repr) from nltk.parse.api import ParserI nter(width-1)+'.' header += '|' else: header = '' return header def pretty_format(self, width=None): if width is None: width = 50 // (self.num_leaves()+1) # sort edges: primary key=length, secondary key=start index. # (and filter out the token edges) edges = sorted([(e.length(), e.start(), e) for e in self]) edges = [e for (_,_,e) in edges] return (self.pretty_format_leaves(width) + '\n' + '\n'.join(self.pretty_format_edge(edge, width) for edge in edges)) #//////////////////////////////////////////////////////////// # Display: Dot (AT&T Graphviz) #//////////////////////////////////////////////////////////// def dot_digraph(self): # Header s = 'digraph nltk_chart {\n' #s += ' size="5,5";\n' s += ' rankdir=LR;\n' s += ' node [height=0.1,width=0.1];\n' s += ' node [style=filled, color="lightgray"];\n' # Set up the nodes for y in range(self.num_edges(), -1, -1): if y == 0: s += ' node [style=filled, color="black"];\n' for x in range(self.num_leaves()+1): if y == 0 or (x <= self._edges[y-1].start() or x >= self._edges[y-1].end()): s += ' %04d.%04d [label=""];\n' % (x,y) # Add a spacer s += ' x [style=invis]; x->0000.0000 [style=invis];\n' # Declare ranks. for x in range(self.num_leaves()+1): s += ' {rank=same;' for y in range(self.num_edges()+1): if y == 0 or (x <= self._edges[y-1].start() or x >= self._edges[y-1].end()): s += ' %04d.%04d' % (x,y) s += '}\n' # Add the leaves s += ' edge [style=invis, weight=100];\n' s += ' node [shape=plaintext]\n' s += ' 0000.0000' for x in range(self.num_leaves()): s += '->%s->%04d.0000' % (self.leaf(x), x+1) s += ';\n\n' # Add the edges s += ' edge [style=solid, weight=1];\n' for y, edge in enumerate(self): for x in range(edge.start()): s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' % (x, y+1, x+1, y+1)) s += (' %04d.%04d -> %04d.%04d [label="%s"];\n' % (edge.start(), y+1, edge.end(), y+1, edge)) for x in range(edge.end(), self.num_leaves()): s += (' %04d.%04d -> %04d.%04d [style="invis"];\n' % (x, y+1, x+1, y+1)) s += '}\n' return s ######################################################################## ## Chart Rules ######################################################################## class ChartRuleI(object): def apply(self, chart, grammar, *edges): raise NotImplementedError() def apply_everywhere(self, chart, grammar): raise NotImplementedError() @python_2_unicode_compatible class AbstractChartRule(ChartRuleI): # Subclasses must define apply. def apply(self, chart, grammar, *edges): raise NotImplementedError() # Default: loop through the given number of edges, and call # self.apply() for each set of edges. def apply_everywhere(self, chart, grammar): if self.NUM_EDGES == 0: for new_edge in self.apply(chart, grammar): yield new_edge elif self.NUM_EDGES == 1: for e1 in chart: for new_edge in self.apply(chart, grammar, e1): yield new_edge elif self.NUM_EDGES == 2: for e1 in chart: for e2 in chart: for new_edge in self.apply(chart, grammar, e1, e2): yield new_edge elif self.NUM_EDGES == 3: for e1 in chart: for e2 in chart: for e3 in chart: for new_edge in self.apply(chart,grammar,e1,e2,e3): yield new_edge else: raise AssertionError('NUM_EDGES>3 is not currently supported') # Default: return a name based on the class name. def __str__(self): # Add spaces between InitialCapsWords. return re.sub('([a-z])([A-Z])', r'\1 \2', self.__class__.__name__) #//////////////////////////////////////////////////////////// # Fundamental Rule #//////////////////////////////////////////////////////////// class FundamentalRule(AbstractChartRule): NUM_EDGES = 2 def apply(self, chart, grammar, left_edge, right_edge): # Make sure the rule is applicable. if not (left_edge.is_incomplete() and right_edge.is_complete() and left_edge.end() == right_edge.start() and left_edge.nextsym() == right_edge.lhs()): return # Construct the new edge. new_edge = left_edge.move_dot_forward(right_edge.end()) # Insert it into the chart. if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge class SingleEdgeFundamentalRule(FundamentalRule): NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_incomplete(): for new_edge in self._apply_incomplete(chart, grammar, edge): yield new_edge else: for new_edge in self._apply_complete(chart, grammar, edge): yield new_edge def _apply_complete(self, chart, grammar, right_edge): for left_edge in chart.select(end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge def _apply_incomplete(self, chart, grammar, left_edge): for right_edge in chart.select(start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge #//////////////////////////////////////////////////////////// # Inserting Terminal Leafs #//////////////////////////////////////////////////////////// class LeafInitRule(AbstractChartRule): NUM_EDGES=0 def apply(self, chart, grammar): for index in range(chart.num_leaves()): new_edge = LeafEdge(chart.leaf(index), index) if chart.insert(new_edge, ()): yield new_edge #//////////////////////////////////////////////////////////// # Top-Down Prediction #//////////////////////////////////////////////////////////// class TopDownInitRule(AbstractChartRule): NUM_EDGES = 0 def apply(self, chart, grammar): for prod in grammar.productions(lhs=grammar.start()): new_edge = TreeEdge.from_production(prod, 0) if chart.insert(new_edge, ()): yield new_edge class TopDownPredictRule(AbstractChartRule): NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_complete(): return for prod in grammar.productions(lhs=edge.nextsym()): new_edge = TreeEdge.from_production(prod, edge.end()) if chart.insert(new_edge, ()): yield new_edge class CachedTopDownPredictRule(TopDownPredictRule): def __init__(self): TopDownPredictRule.__init__(self) self._done = {} def apply(self, chart, grammar, edge): if edge.is_complete(): return nextsym, index = edge.nextsym(), edge.end() if not is_nonterminal(nextsym): return # If we've already applied this rule to an edge with the same done = self._done.get((nextsym, index), (None,None)) if done[0] is chart and done[1] is grammar: return for prod in grammar.productions(lhs=nextsym): if prod.rhs(): first = prod.rhs()[0] if is_terminal(first): if index >= chart.num_leaves() or first != chart.leaf(index): continue new_edge = TreeEdge.from_production(prod, index) if chart.insert(new_edge, ()): yield new_edge self._done[nextsym, index] = (chart, grammar) #//////////////////////////////////////////////////////////// # Bottom-Up Prediction #//////////////////////////////////////////////////////////// class BottomUpPredictRule(AbstractChartRule): NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_incomplete(): return for prod in grammar.productions(rhs=edge.lhs()): new_edge = TreeEdge.from_production(prod, edge.start()) if chart.insert(new_edge, ()): yield new_edge class BottomUpPredictCombineRule(BottomUpPredictRule): NUM_EDGES = 1 def apply(self, chart, grammar, edge): if edge.is_incomplete(): return for prod in grammar.productions(rhs=edge.lhs()): new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) if chart.insert(new_edge, (edge,)): yield new_edge class EmptyPredictRule(AbstractChartRule): NUM_EDGES = 0 def apply(self, chart, grammar): for prod in grammar.productions(empty=True): for index in compat.xrange(chart.num_leaves() + 1): new_edge = TreeEdge.from_production(prod, index) if chart.insert(new_edge, ()): yield new_edge ######################################################################## ## Filtered Bottom Up ######################################################################## class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): def _apply_complete(self, chart, grammar, right_edge): end = right_edge.end() nexttoken = end < chart.num_leaves() and chart.leaf(end) for left_edge in chart.select(end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()): if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge def _apply_incomplete(self, chart, grammar, left_edge): for right_edge in chart.select(start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()): end = right_edge.end() nexttoken = end < chart.num_leaves() and chart.leaf(end) if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule): def apply(self, chart, grammar, edge): if edge.is_incomplete(): return end = edge.end() nexttoken = end < chart.num_leaves() and chart.leaf(end) for prod in grammar.productions(rhs=edge.lhs()): if _bottomup_filter(grammar, nexttoken, prod.rhs()): new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) if chart.insert(new_edge, (edge,)): yield new_edge def _bottomup_filter(grammar, nexttoken, rhs, dot=0): if len(rhs) <= dot + 1: return True _next = rhs[dot + 1] if is_terminal(_next): return nexttoken == _next else: return grammar.is_leftcorner(_next, nexttoken) ######################################################################## ## Generic Chart Parser ######################################################################## TD_STRATEGY = [LeafInitRule(), TopDownInitRule(), CachedTopDownPredictRule(), SingleEdgeFundamentalRule()] BU_STRATEGY = [LeafInitRule(), EmptyPredictRule(), BottomUpPredictRule(), SingleEdgeFundamentalRule()] BU_LC_STRATEGY = [LeafInitRule(), EmptyPredictRule(), BottomUpPredictCombineRule(), SingleEdgeFundamentalRule()] LC_STRATEGY = [LeafInitRule(), FilteredBottomUpPredictCombineRule(), FilteredSingleEdgeFundamentalRule()] class ChartParser(ParserI): def __init__(self, grammar, strategy=BU_LC_STRATEGY, trace=0, trace_chart_width=50, use_agenda=True, chart_class=Chart): self._grammar = grammar self._strategy = strategy self._trace = trace self._trace_chart_width = trace_chart_width # If the strategy only consists of axioms (NUM_EDGES==0) and # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm: self._use_agenda = use_agenda self._chart_class = chart_class self._axioms = [] self._inference_rules = [] for rule in strategy: if rule.NUM_EDGES == 0: self._axioms.append(rule) elif rule.NUM_EDGES == 1: self._inference_rules.append(rule) else: self._use_agenda = False def grammar(self): return self._grammar def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width): if not trace: return print_rule_header = trace > 1 for edge in new_edges: if print_rule_header: print('%s:' % rule) print_rule_header = False print(chart.pretty_format_edge(edge, edge_width)) def chart_parse(self, tokens, trace=None): if trace is None: trace = self._trace trace_new_edges = self._trace_new_edges tokens = list(tokens) self._grammar.check_coverage(tokens) chart = self._chart_class(tokens) grammar = self._grammar # Width, for printing trace edges. trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) if trace: print(chart.pretty_format_leaves(trace_edge_width)) if self._use_agenda: # Use an agenda-based algorithm. for axiom in self._axioms: new_edges = list(axiom.apply(chart, grammar)) trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) inference_rules = self._inference_rules agenda = chart.edges() # We reverse the initial agenda, since it is a stack # but chart.edges() functions as a queue. agenda.reverse() while agenda: edge = agenda.pop() for rule in inference_rules: new_edges = list(rule.apply(chart, grammar, edge)) if trace: trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) agenda += new_edges else: # Do not use an agenda-based algorithm. edges_added = True while edges_added: edges_added = False for rule in self._strategy: new_edges = list(rule.apply_everywhere(chart, grammar)) edges_added = len(new_edges) trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) # Return the final chart. return chart def parse(self, tokens, tree_class=Tree): chart = self.chart_parse(tokens) return iter(chart.parses(self._grammar.start(), tree_class=tree_class)) class TopDownChartParser(ChartParser): def __init__(self, grammar, **parser_args): ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args) class BottomUpChartParser(ChartParser): def __init__(self, grammar, **parser_args): if isinstance(grammar, PCFG): warnings.warn("BottomUpChartParser only works for CFG, " "use BottomUpProbabilisticChartParser instead", category=DeprecationWarning) ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args) class BottomUpLeftCornerChartParser(ChartParser): def __init__(self, grammar, **parser_args): ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args) class LeftCornerChartParser(ChartParser): def __init__(self, grammar, **parser_args): if not grammar.is_nonempty(): raise ValueError("LeftCornerParser only works for grammars " "without empty productions.") ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args) ######################################################################## ## Stepping Chart Parser ######################################################################## class SteppingChartParser(ChartParser): def __init__(self, grammar, strategy=[], trace=0): self._chart = None self._current_chartrule = None self._restart = False ChartParser.__init__(self, grammar, strategy, trace) #//////////////////////////////////////////////////////////// # Initialization #//////////////////////////////////////////////////////////// def initialize(self, tokens): self._chart = Chart(list(tokens)) self._restart = True #//////////////////////////////////////////////////////////// # Stepping #//////////////////////////////////////////////////////////// def step(self): if self._chart is None: raise ValueError('Parser must be initialized first') while True: self._restart = False w = 50 // (self._chart.num_leaves()+1) for e in self._parse(): if self._trace > 1: print(self._current_chartrule) if self._trace > 0: print(self._chart.pretty_format_edge(e,w)) yield e if self._restart: break else: yield None # No more edges. def _parse(self): chart = self._chart grammar = self._grammar edges_added = 1 while edges_added > 0: edges_added = 0 for rule in self._strategy: self._current_chartrule = rule for e in rule.apply_everywhere(chart, grammar): edges_added += 1 yield e #//////////////////////////////////////////////////////////// # Accessors #//////////////////////////////////////////////////////////// def strategy(self): return self._strategy def grammar(self): return self._grammar def chart(self): return self._chart def current_chartrule(self): return self._current_chartrule def parses(self, tree_class=Tree): return self._chart.parses(self._grammar.start(), tree_class) #//////////////////////////////////////////////////////////// # Parser modification #//////////////////////////////////////////////////////////// def set_strategy(self, strategy): if strategy == self._strategy: return self._strategy = strategy[:] # Make a copy. self._restart = True def set_grammar(self, grammar): if grammar is self._grammar: return self._grammar = grammar self._restart = True def set_chart(self, chart): if chart is self._chart: return self._chart = chart self._restart = True #//////////////////////////////////////////////////////////// # Standard parser methods #//////////////////////////////////////////////////////////// def parse(self, tokens, tree_class=Tree): tokens = list(tokens) self._grammar.check_coverage(tokens) # Initialize ourselves. self.initialize(tokens) # Step until no more edges are generated. for e in self.step(): if e is None: break # Return an iterator of complete parses. return self.parses(tree_class=tree_class) ######################################################################## ## Demo Code ######################################################################## def demo_grammar(): from nltk.grammar import CFG return CFG.fromstring(""" S -> NP VP PP -> "with" NP NP -> NP PP VP -> VP PP VP -> Verb NP VP -> Verb NP -> Det Noun NP -> "John" NP -> "I" Det -> "the" Det -> "my" Det -> "a" Noun -> "dog" Noun -> "cookie" Verb -> "ate" Verb -> "saw" Prep -> "with" Prep -> "under" """) def demo(choice=None, print_times=True, print_grammar=False, print_trees=True, trace=2, sent='I saw John with a dog with my cookie', numparses=5): import sys, time from nltk import nonterminals, Production, CFG # The grammar for ChartParser and SteppingChartParser: grammar = demo_grammar() if print_grammar: print("* Grammar") print(grammar) # Tokenize the sample sentence. print("* Sentence:") print(sent) tokens = sent.split() print(tokens) print() # Ask the user which parser to test, # if the parser wasn't provided as an argument if choice is None: print(' 1: Top-down chart parser') print(' 2: Bottom-up chart parser') print(' 3: Bottom-up left-corner chart parser') print(' 4: Left-corner chart parser with bottom-up filter') print(' 5: Stepping chart parser (alternating top-down & bottom-up)') print(' 6: All parsers') print('\nWhich parser (1-6)? ', end=' ') choice = sys.stdin.readline().strip() print() choice = str(choice) if choice not in "123456": print('Bad parser number') return times = {} strategies = {'1': ('Top-down', TD_STRATEGY), '2': ('Bottom-up', BU_STRATEGY), '3': ('Bottom-up left-corner', BU_LC_STRATEGY), '4': ('Filtered left-corner', LC_STRATEGY)} choices = [] if choice in strategies: choices = [choice] if choice=='6': choices = "1234" for strategy in choices: print("* Strategy: " + strategies[strategy][0]) print() cp = ChartParser(grammar, strategies[strategy][1], trace=trace) t = time.time() chart = cp.chart_parse(tokens) parses = list(chart.parses(grammar.start())) times[strategies[strategy][0]] = time.time()-t print("Nr edges in chart:", len(chart.edges())) if numparses: assert len(parses)==numparses, 'Not all parses found' if print_trees: for tree in parses: print(tree) else: print("Nr trees:", len(parses)) print() if choice in "56": print("* Strategy: Stepping (top-down vs bottom-up)") print() t = time.time() cp = SteppingChartParser(grammar, trace=trace) cp.initialize(tokens) for i in range(5): print('*** SWITCH TO TOP DOWN') cp.set_strategy(TD_STRATEGY) for j, e in enumerate(cp.step()): if j>20 or e is None: break print('*** SWITCH TO BOTTOM UP') cp.set_strategy(BU_STRATEGY) for j, e in enumerate(cp.step()): if j>20 or e is None: break times['Stepping'] = time.time()-t print("Nr edges in chart:", len(cp.chart().edges())) if numparses: assert len(list(cp.parses()))==numparses, 'Not all parses found' if print_trees: for tree in cp.parses(): print(tree) else: print("Nr trees:", len(list(cp.parses()))) print() if not (print_times and times): return print("* Parsing times") print() maxlen = max(len(key) for key in times) format = '%' + repr(maxlen) + 's parser: %6.3fsec' times_items = times.items() for (parser, t) in sorted(times_items, key=lambda a:a[1]): print(format % (parser, t)) if __name__ == '__main__': demo()
true
true
1c49e8c673c464665b7013997bb5ecdb23c0b915
642
py
Python
tests/objects/message/__init__.py
mjneff2/Pincer
a11bc3e4bad319fdf927d913c58c933576ec7c99
[ "MIT" ]
null
null
null
tests/objects/message/__init__.py
mjneff2/Pincer
a11bc3e4bad319fdf927d913c58c933576ec7c99
[ "MIT" ]
null
null
null
tests/objects/message/__init__.py
mjneff2/Pincer
a11bc3e4bad319fdf927d913c58c933576ec7c99
[ "MIT" ]
null
null
null
from pincer.objects import Embed print(Embed( title="Pincer - 0.6.4", description=( "🚀 An asynchronous python API wrapper meant to replace" " discord.py\n> Snappy discord api wrapper written " "with aiohttp & websockets" ) ).add_field( name="**Github Repository**", value="> https://github.com/Pincer-org/Pincer" ).set_thumbnail( url="https://pincer.dev/img/icon.png" ).set_image( url=( "https://repository-images.githubusercontent.com" "/400871418/045ebf39-7c6e-4c3a-b744-0c3122374203" ) ).to_dict())
32.1
67
0.579439
from pincer.objects import Embed print(Embed( title="Pincer - 0.6.4", description=( "🚀 An asynchronous python API wrapper meant to replace" " discord.py\n> Snappy discord api wrapper written " "with aiohttp & websockets" ) ).add_field( name="**Github Repository**", value="> https://github.com/Pincer-org/Pincer" ).set_thumbnail( url="https://pincer.dev/img/icon.png" ).set_image( url=( "https://repository-images.githubusercontent.com" "/400871418/045ebf39-7c6e-4c3a-b744-0c3122374203" ) ).to_dict())
true
true
1c49e8d16ca5be1232c4449dc3a9df00edfe575b
1,186
py
Python
setup.py
deone/requestor
9af13ebc90861d37dc2db4e1b1375aa445655868
[ "MIT" ]
null
null
null
setup.py
deone/requestor
9af13ebc90861d37dc2db4e1b1375aa445655868
[ "MIT" ]
null
null
null
setup.py
deone/requestor
9af13ebc90861d37dc2db4e1b1375aa445655868
[ "MIT" ]
null
null
null
# Always prefer setuptools over distutils from setuptools import setup # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='requestor', version='0.1.1', description='Use this package to make HTTP post calls to django APIs with csrf support, and return response in json.', long_description=long_description, url='https://github.com/deone/requestor', author='Dayo Osikoya', author_email='alwaysdeone@gmail.com', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], keywords='http post requests json response django csrf', py_modules=["requestor"], install_requires=['requests'], )
33.885714
122
0.676223
from setuptools import setup from codecs import open from os import path here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='requestor', version='0.1.1', description='Use this package to make HTTP post calls to django APIs with csrf support, and return response in json.', long_description=long_description, url='https://github.com/deone/requestor', author='Dayo Osikoya', author_email='alwaysdeone@gmail.com', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], keywords='http post requests json response django csrf', py_modules=["requestor"], install_requires=['requests'], )
true
true
1c49e9bafda8707fe36fdbba4d19f3bf3c46ee9e
43,586
py
Python
codalab/apps/web/migrations/0063_CompetitionDump.py
AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public
64cd6ac9a56a4e2d40d93608d4289b1a0e50cce7
[ "Apache-2.0" ]
333
2015-12-29T22:49:40.000Z
2022-03-27T12:01:57.000Z
codalab/apps/web/migrations/0063_CompetitionDump.py
AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public
64cd6ac9a56a4e2d40d93608d4289b1a0e50cce7
[ "Apache-2.0" ]
1,572
2015-12-28T21:54:00.000Z
2022-03-31T13:00:32.000Z
codalab/apps/web/migrations/0063_CompetitionDump.py
AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public
64cd6ac9a56a4e2d40d93608d4289b1a0e50cce7
[ "Apache-2.0" ]
107
2016-01-08T03:46:07.000Z
2022-03-16T08:43:57.000Z
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CompetitionDump' db.create_table(u'web_competitiondump', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('competition', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dumps', to=orm['web.Competition'])), ('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('status', self.gf('django.db.models.fields.CharField')(default='Starting', max_length=64)), ('data_file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)), )) db.send_create_signal(u'web', ['CompetitionDump']) def backwards(self, orm): # Deleting model 'CompetitionDump' db.delete_table(u'web_competitiondump') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'authenz.cluser': { 'Meta': {'object_name': 'ClUser'}, 'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'email_on_submission_finished_successfully': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'organizer_direct_message_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'organizer_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'participation_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'rabbitmq_password': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}), 'rabbitmq_queue_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5', 'blank': 'True'}), 'rabbitmq_username': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}), 'team_members': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'queues.queue': { 'Meta': {'object_name': 'Queue'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'organizers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}), 'vhost': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'}) }, u'teams.team': { 'Meta': {'unique_together': "(('name', 'competition'),)", 'object_name': 'Team'}, 'allow_requests': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_creator'", 'to': u"orm['authenz.ClUser']"}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['authenz.ClUser']", 'null': 'True', 'through': u"orm['teams.TeamMembership']", 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamStatus']", 'null': 'True'}) }, u'teams.teammembership': { 'Meta': {'object_name': 'TeamMembership'}, 'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_invitation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamMembershipStatus']", 'null': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.Team']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}) }, u'teams.teammembershipstatus': { 'Meta': {'object_name': 'TeamMembershipStatus'}, 'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'teams.teamstatus': { 'Meta': {'object_name': 'TeamStatus'}, 'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'web.competition': { 'Meta': {'ordering': "['end_date']", 'object_name': 'Competition'}, 'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}), 'allow_public_submissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'allow_teams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'anonymous_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_creator'", 'to': u"orm['authenz.ClUser']"}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'disallow_leaderboard_modifying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_detailed_results': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_forum': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'enable_medical_image_viewer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_per_submission_metadata': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_teams': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'force_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'has_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'is_migrating': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_migrating_delayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'last_phase_migration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_modified_by'", 'to': u"orm['authenz.ClUser']"}), 'original_yaml_file': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'queue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['queues.Queue']"}), 'require_team_approval': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'reward': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'show_datasets_from_yaml': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_teams'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['teams.Team']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'web.competitiondefbundle': { 'Meta': {'object_name': 'CompetitionDefBundle'}, 'config_bundle': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['authenz.ClUser']"}), 's3_config_bundle': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'}) }, u'web.competitiondump': { 'Meta': {'object_name': 'CompetitionDump'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dumps'", 'to': u"orm['web.Competition']"}), 'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Starting'", 'max_length': '64'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, u'web.competitionparticipant': { 'Meta': {'unique_together': "(('user', 'competition'),)", 'object_name': 'CompetitionParticipant'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participants'", 'to': u"orm['web.Competition']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ParticipantStatus']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participation'", 'to': u"orm['authenz.ClUser']"}) }, u'web.competitionphase': { 'Meta': {'ordering': "['phasenumber']", 'object_name': 'CompetitionPhase'}, 'auto_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'color': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': u"orm['web.Competition']"}), 'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'phase'", 'blank': 'True', 'to': u"orm['web.Dataset']"}), 'default_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'disable_custom_docker_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'execution_time_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300'}), 'force_best_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'input_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'input_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'input_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}), 'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_scoring_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'leaderboard_management_mode': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '50'}), 'max_submissions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}), 'max_submissions_per_day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999'}), 'phase_never_ends': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'phasenumber': ('django.db.models.fields.PositiveIntegerField', [], {}), 'reference_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'reference_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reference_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}), 'scoring_program': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'scoring_program_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'scoring_program_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scoring_program_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}), 'start_date': ('django.db.models.fields.DateTimeField', [], {}) }, u'web.competitionsubmission': { 'Meta': {'unique_together': "(('submission_number', 'phase', 'participant'),)", 'object_name': 'CompetitionSubmission'}, 'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'coopetition_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), 'detailed_results_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'dislike_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'exception_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'execution_key': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'file_url_base': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'history_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inputfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionParticipant']"}), 'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionPhase']"}), 'prediction_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'prediction_runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'prediction_stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'prediction_stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'private_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'readable_filename': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 's3_file': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'}), 'scores_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'secret': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionSubmissionStatus']"}), 'status_details': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'submission_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'submitted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['teams.Team']"}), 'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'when_made_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'when_unmade_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, u'web.competitionsubmissionmetadata': { 'Meta': {'object_name': 'CompetitionSubmissionMetadata'}, 'beginning_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'beginning_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'beginning_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_predict': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_scoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'processes_running_in_temp_dir': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadatas'", 'to': u"orm['web.CompetitionSubmission']"}) }, u'web.competitionsubmissionstatus': { 'Meta': {'object_name': 'CompetitionSubmissionStatus'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'web.contentcategory': { 'Meta': {'object_name': 'ContentCategory'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), 'content_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_menu': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.ContentCategory']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"}) }, u'web.contentvisibility': { 'Meta': {'object_name': 'ContentVisibility'}, 'classname': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'web.dataset': { 'Meta': {'ordering': "['number']", 'object_name': 'Dataset'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': u"orm['authenz.ClUser']"}), 'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFile']"}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, u'web.defaultcontentitem': { 'Meta': {'object_name': 'DefaultContentItem'}, 'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}), 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'web.externalfile': { 'Meta': {'object_name': 'ExternalFile'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'source_address_info': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFileType']"}) }, u'web.externalfilesource': { 'Meta': {'object_name': 'ExternalFileSource'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'service_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'web.externalfiletype': { 'Meta': {'object_name': 'ExternalFileType'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'web.organizerdataset': { 'Meta': {'object_name': 'OrganizerDataSet'}, 'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'sub_data_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['web.OrganizerDataSet']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '64'}), 'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}) }, u'web.page': { 'Meta': {'ordering': "['category', 'rank']", 'unique_together': "(('label', 'category', 'container'),)", 'object_name': 'Page'}, 'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}), 'codename': ('django.db.models.fields.SlugField', [], {'max_length': '100'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'null': 'True', 'to': u"orm['web.Competition']"}), 'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['web.PageContainer']"}), 'defaults': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.DefaultContentItem']", 'null': 'True', 'blank': 'True'}), 'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'markup': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'visibility': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, u'web.pagecontainer': { 'Meta': {'unique_together': "(('object_id', 'content_type'),)", 'object_name': 'PageContainer'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, u'web.participantstatus': { 'Meta': {'object_name': 'ParticipantStatus'}, 'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'web.phaseleaderboard': { 'Meta': {'object_name': 'PhaseLeaderBoard'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phase': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'board'", 'unique': 'True', 'to': u"orm['web.CompetitionPhase']"}) }, u'web.phaseleaderboardentry': { 'Meta': {'unique_together': "(('board', 'result'),)", 'object_name': 'PhaseLeaderBoardEntry'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['web.PhaseLeaderBoard']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'leaderboard_entry_result'", 'to': u"orm['web.CompetitionSubmission']"}) }, u'web.submissioncomputedscore': { 'Meta': {'object_name': 'SubmissionComputedScore'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'operation': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'scoredef': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'computed_score'", 'unique': 'True', 'to': u"orm['web.SubmissionScoreDef']"}) }, u'web.submissioncomputedscorefield': { 'Meta': {'object_name': 'SubmissionComputedScoreField'}, 'computed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['web.SubmissionComputedScore']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}) }, u'web.submissionresultgroup': { 'Meta': {'ordering': "['ordering']", 'object_name': 'SubmissionResultGroup'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'phases': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.CompetitionPhase']", 'through': u"orm['web.SubmissionResultGroupPhase']", 'symmetrical': 'False'}) }, u'web.submissionresultgroupphase': { 'Meta': {'unique_together': "(('group', 'phase'),)", 'object_name': 'SubmissionResultGroupPhase'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionPhase']"}) }, u'web.submissionscore': { 'Meta': {'unique_together': "(('result', 'scoredef'),)", 'object_name': 'SubmissionScore'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scores'", 'to': u"orm['web.CompetitionSubmission']"}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '10'}) }, u'web.submissionscoredef': { 'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreDef'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), 'computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.SubmissionResultGroup']", 'through': u"orm['web.SubmissionScoreDefGroup']", 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'numeric_format': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'selection_default': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'show_rank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sorting': ('django.db.models.fields.SlugField', [], {'default': "'asc'", 'max_length': '20'}) }, u'web.submissionscoredefgroup': { 'Meta': {'unique_together': "(('scoredef', 'group'),)", 'object_name': 'SubmissionScoreDefGroup'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}) }, u'web.submissionscoreset': { 'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreSet'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.SubmissionScoreSet']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']", 'null': 'True', 'blank': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) } } complete_apps = ['web']
91.76
260
0.576079
from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.create_table(u'web_competitiondump', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('competition', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dumps', to=orm['web.Competition'])), ('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('status', self.gf('django.db.models.fields.CharField')(default='Starting', max_length=64)), ('data_file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)), )) db.send_create_signal(u'web', ['CompetitionDump']) def backwards(self, orm): db.delete_table(u'web_competitiondump') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'authenz.cluser': { 'Meta': {'object_name': 'ClUser'}, 'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'email_on_submission_finished_successfully': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'organizer_direct_message_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'organizer_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'participation_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'rabbitmq_password': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}), 'rabbitmq_queue_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5', 'blank': 'True'}), 'rabbitmq_username': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}), 'team_members': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'queues.queue': { 'Meta': {'object_name': 'Queue'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'organizers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}), 'vhost': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'}) }, u'teams.team': { 'Meta': {'unique_together': "(('name', 'competition'),)", 'object_name': 'Team'}, 'allow_requests': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_creator'", 'to': u"orm['authenz.ClUser']"}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['authenz.ClUser']", 'null': 'True', 'through': u"orm['teams.TeamMembership']", 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamStatus']", 'null': 'True'}) }, u'teams.teammembership': { 'Meta': {'object_name': 'TeamMembership'}, 'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_invitation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamMembershipStatus']", 'null': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.Team']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}) }, u'teams.teammembershipstatus': { 'Meta': {'object_name': 'TeamMembershipStatus'}, 'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'teams.teamstatus': { 'Meta': {'object_name': 'TeamStatus'}, 'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'web.competition': { 'Meta': {'ordering': "['end_date']", 'object_name': 'Competition'}, 'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}), 'allow_public_submissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'allow_teams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'anonymous_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_creator'", 'to': u"orm['authenz.ClUser']"}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'disallow_leaderboard_modifying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_detailed_results': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_forum': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'enable_medical_image_viewer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_per_submission_metadata': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'enable_teams': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'force_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'has_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'is_migrating': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_migrating_delayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'last_phase_migration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_modified_by'", 'to': u"orm['authenz.ClUser']"}), 'original_yaml_file': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'queue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['queues.Queue']"}), 'require_team_approval': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'reward': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'show_datasets_from_yaml': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_teams'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['teams.Team']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'web.competitiondefbundle': { 'Meta': {'object_name': 'CompetitionDefBundle'}, 'config_bundle': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['authenz.ClUser']"}), 's3_config_bundle': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'}) }, u'web.competitiondump': { 'Meta': {'object_name': 'CompetitionDump'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dumps'", 'to': u"orm['web.Competition']"}), 'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Starting'", 'max_length': '64'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, u'web.competitionparticipant': { 'Meta': {'unique_together': "(('user', 'competition'),)", 'object_name': 'CompetitionParticipant'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participants'", 'to': u"orm['web.Competition']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ParticipantStatus']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participation'", 'to': u"orm['authenz.ClUser']"}) }, u'web.competitionphase': { 'Meta': {'ordering': "['phasenumber']", 'object_name': 'CompetitionPhase'}, 'auto_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'color': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': u"orm['web.Competition']"}), 'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'phase'", 'blank': 'True', 'to': u"orm['web.Dataset']"}), 'default_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'disable_custom_docker_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'execution_time_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300'}), 'force_best_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'input_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'input_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'input_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}), 'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_scoring_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'leaderboard_management_mode': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '50'}), 'max_submissions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}), 'max_submissions_per_day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999'}), 'phase_never_ends': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'phasenumber': ('django.db.models.fields.PositiveIntegerField', [], {}), 'reference_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'reference_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reference_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}), 'scoring_program': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'scoring_program_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'scoring_program_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scoring_program_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}), 'start_date': ('django.db.models.fields.DateTimeField', [], {}) }, u'web.competitionsubmission': { 'Meta': {'unique_together': "(('submission_number', 'phase', 'participant'),)", 'object_name': 'CompetitionSubmission'}, 'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'coopetition_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), 'detailed_results_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'dislike_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'exception_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'execution_key': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'file_url_base': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'history_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inputfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionParticipant']"}), 'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionPhase']"}), 'prediction_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'prediction_runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'prediction_stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'prediction_stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'private_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'readable_filename': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 's3_file': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'}), 'scores_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'secret': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionSubmissionStatus']"}), 'status_details': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'submission_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'submitted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['teams.Team']"}), 'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'when_made_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'when_unmade_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, u'web.competitionsubmissionmetadata': { 'Meta': {'object_name': 'CompetitionSubmissionMetadata'}, 'beginning_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'beginning_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'beginning_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_predict': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_scoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'processes_running_in_temp_dir': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadatas'", 'to': u"orm['web.CompetitionSubmission']"}) }, u'web.competitionsubmissionstatus': { 'Meta': {'object_name': 'CompetitionSubmissionStatus'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'web.contentcategory': { 'Meta': {'object_name': 'ContentCategory'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), 'content_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_menu': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.ContentCategory']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"}) }, u'web.contentvisibility': { 'Meta': {'object_name': 'ContentVisibility'}, 'classname': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'web.dataset': { 'Meta': {'ordering': "['number']", 'object_name': 'Dataset'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': u"orm['authenz.ClUser']"}), 'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFile']"}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}) }, u'web.defaultcontentitem': { 'Meta': {'object_name': 'DefaultContentItem'}, 'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}), 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'web.externalfile': { 'Meta': {'object_name': 'ExternalFile'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'source_address_info': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFileType']"}) }, u'web.externalfilesource': { 'Meta': {'object_name': 'ExternalFileSource'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'service_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'web.externalfiletype': { 'Meta': {'object_name': 'ExternalFileType'}, 'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'web.organizerdataset': { 'Meta': {'object_name': 'OrganizerDataSet'}, 'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'sub_data_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['web.OrganizerDataSet']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '64'}), 'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}) }, u'web.page': { 'Meta': {'ordering': "['category', 'rank']", 'unique_together': "(('label', 'category', 'container'),)", 'object_name': 'Page'}, 'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}), 'codename': ('django.db.models.fields.SlugField', [], {'max_length': '100'}), 'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'null': 'True', 'to': u"orm['web.Competition']"}), 'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['web.PageContainer']"}), 'defaults': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.DefaultContentItem']", 'null': 'True', 'blank': 'True'}), 'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'markup': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'visibility': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, u'web.pagecontainer': { 'Meta': {'unique_together': "(('object_id', 'content_type'),)", 'object_name': 'PageContainer'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, u'web.participantstatus': { 'Meta': {'object_name': 'ParticipantStatus'}, 'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'web.phaseleaderboard': { 'Meta': {'object_name': 'PhaseLeaderBoard'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phase': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'board'", 'unique': 'True', 'to': u"orm['web.CompetitionPhase']"}) }, u'web.phaseleaderboardentry': { 'Meta': {'unique_together': "(('board', 'result'),)", 'object_name': 'PhaseLeaderBoardEntry'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['web.PhaseLeaderBoard']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'leaderboard_entry_result'", 'to': u"orm['web.CompetitionSubmission']"}) }, u'web.submissioncomputedscore': { 'Meta': {'object_name': 'SubmissionComputedScore'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'operation': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'scoredef': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'computed_score'", 'unique': 'True', 'to': u"orm['web.SubmissionScoreDef']"}) }, u'web.submissioncomputedscorefield': { 'Meta': {'object_name': 'SubmissionComputedScoreField'}, 'computed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['web.SubmissionComputedScore']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}) }, u'web.submissionresultgroup': { 'Meta': {'ordering': "['ordering']", 'object_name': 'SubmissionResultGroup'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'phases': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.CompetitionPhase']", 'through': u"orm['web.SubmissionResultGroupPhase']", 'symmetrical': 'False'}) }, u'web.submissionresultgroupphase': { 'Meta': {'unique_together': "(('group', 'phase'),)", 'object_name': 'SubmissionResultGroupPhase'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionPhase']"}) }, u'web.submissionscore': { 'Meta': {'unique_together': "(('result', 'scoredef'),)", 'object_name': 'SubmissionScore'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scores'", 'to': u"orm['web.CompetitionSubmission']"}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}), 'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '10'}) }, u'web.submissionscoredef': { 'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreDef'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), 'computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.SubmissionResultGroup']", 'through': u"orm['web.SubmissionScoreDefGroup']", 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'numeric_format': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'selection_default': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'show_rank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sorting': ('django.db.models.fields.SlugField', [], {'default': "'asc'", 'max_length': '20'}) }, u'web.submissionscoredefgroup': { 'Meta': {'unique_together': "(('scoredef', 'group'),)", 'object_name': 'SubmissionScoreDefGroup'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}) }, u'web.submissionscoreset': { 'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreSet'}, 'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.SubmissionScoreSet']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']", 'null': 'True', 'blank': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) } } complete_apps = ['web']
true
true
1c49ea49846f00cd469bd4230329f98ec6de0734
3,872
py
Python
tensorflow/python/autograph/tests/basic_list_test.py
Stevanus-Christian/tensorflow
d44afcf5ca16c5d704c66f891b99eac804e7cd14
[ "Apache-2.0" ]
3
2022-03-09T01:39:56.000Z
2022-03-30T23:17:58.000Z
tensorflow/python/autograph/tests/basic_list_test.py
Stevanus-Christian/tensorflow
d44afcf5ca16c5d704c66f891b99eac804e7cd14
[ "Apache-2.0" ]
1
2020-08-01T05:40:12.000Z
2020-08-01T05:40:12.000Z
tensorflow/python/autograph/tests/basic_list_test.py
Stevanus-Christian/tensorflow
d44afcf5ca16c5d704c66f891b99eac804e7cd14
[ "Apache-2.0" ]
1
2022-03-22T00:45:15.000Z
2022-03-22T00:45:15.000Z
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic list operations.""" import tensorflow as tf from tensorflow.python import autograph as ag from tensorflow.python.autograph.tests import reference_test_base def type_not_annotated(n): l = [] # TODO(mdan): Here, we ought to infer the dtype and shape when i is staged. for i in range(n): l.append(i) return ag.stack(l, strict=False) def element_access(): l = [] l.append(1) l.append(2) l.append(3) ag.set_element_type(l, tf.int32) return 2 * l[1] def element_update(): l = [] l.append(1) l.append(2) l.append(3) ag.set_element_type(l, tf.int32) l[1] = 5 return ag.stack(l, strict=False) def simple_fill(n): l = [] ag.set_element_type(l, tf.int32) for i in range(n): l.append(i) return ag.stack(l, strict=False) def nested_fill(m, n): mat = [] ag.set_element_type(mat, tf.int32) for _ in range(m): l = [] ag.set_element_type(l, tf.int32) for j in range(n): l.append(j) mat.append(ag.stack(l, strict=False)) return ag.stack(mat, strict=False) def read_write_loop(n): l = [] l.append(1) l.append(1) ag.set_element_type(l, tf.int32) for i in range(2, n): l.append(l[i-1] + l[i-2]) l[i-2] = -l[i-2] return ag.stack(l, strict=False) def simple_empty(n): l = [] l.append(1) l.append(2) l.append(3) l.append(4) ag.set_element_type(l, tf.int32, ()) s = 0 for _ in range(n): s += l.pop() return ag.stack(l, strict=False), s def mutation(t, n): for i in range(n): t[i] = i return t class ReferenceTest(reference_test_base.TestCase): def setUp(self): super(ReferenceTest, self).setUp() self.autograph_opts = tf.autograph.experimental.Feature.LISTS def test_tensor_mutation(self): self.assertConvertedMatchesNative(mutation, [0] * 10, 10) def test_basic(self): self.all_inputs_tensors = True self.assertFunctionMatchesEager(element_access) self.assertFunctionMatchesEager(element_update) # TODO(mdan): This should raise a compilation, not runtime, error. with self.assertRaisesRegex( ValueError, 'cannot stack a list without knowing its element type; ' 'use set_element_type to annotate it'): self.function(type_not_annotated)(3) self.assertFunctionMatchesEager(simple_fill, 5) self.assertFunctionMatchesEager(nested_fill, 5, 3) self.assertFunctionMatchesEager(read_write_loop, 4) self.assertFunctionMatchesEager(simple_empty, 0) self.assertFunctionMatchesEager(simple_empty, 2) self.assertFunctionMatchesEager(simple_empty, 4) # TODO(mdan): Allow explicitly setting the element shape to mitigate these. # TODO(mdan): This should raise a friendlier runtime error. # The error should spell out that empty lists cannot be stacked. # Alternatively, we can also insert conditionals that construct a zero-sized # Tensor of the appropriate type and shape, but we first want to make sure # that doesn't degrade performance. with self.assertRaises(ValueError): self.function(simple_fill)(0) with self.assertRaises(ValueError): self.function(nested_fill)(0, 3) if __name__ == '__main__': tf.test.main()
27.267606
80
0.685692
import tensorflow as tf from tensorflow.python import autograph as ag from tensorflow.python.autograph.tests import reference_test_base def type_not_annotated(n): l = [] for i in range(n): l.append(i) return ag.stack(l, strict=False) def element_access(): l = [] l.append(1) l.append(2) l.append(3) ag.set_element_type(l, tf.int32) return 2 * l[1] def element_update(): l = [] l.append(1) l.append(2) l.append(3) ag.set_element_type(l, tf.int32) l[1] = 5 return ag.stack(l, strict=False) def simple_fill(n): l = [] ag.set_element_type(l, tf.int32) for i in range(n): l.append(i) return ag.stack(l, strict=False) def nested_fill(m, n): mat = [] ag.set_element_type(mat, tf.int32) for _ in range(m): l = [] ag.set_element_type(l, tf.int32) for j in range(n): l.append(j) mat.append(ag.stack(l, strict=False)) return ag.stack(mat, strict=False) def read_write_loop(n): l = [] l.append(1) l.append(1) ag.set_element_type(l, tf.int32) for i in range(2, n): l.append(l[i-1] + l[i-2]) l[i-2] = -l[i-2] return ag.stack(l, strict=False) def simple_empty(n): l = [] l.append(1) l.append(2) l.append(3) l.append(4) ag.set_element_type(l, tf.int32, ()) s = 0 for _ in range(n): s += l.pop() return ag.stack(l, strict=False), s def mutation(t, n): for i in range(n): t[i] = i return t class ReferenceTest(reference_test_base.TestCase): def setUp(self): super(ReferenceTest, self).setUp() self.autograph_opts = tf.autograph.experimental.Feature.LISTS def test_tensor_mutation(self): self.assertConvertedMatchesNative(mutation, [0] * 10, 10) def test_basic(self): self.all_inputs_tensors = True self.assertFunctionMatchesEager(element_access) self.assertFunctionMatchesEager(element_update) with self.assertRaisesRegex( ValueError, 'cannot stack a list without knowing its element type; ' 'use set_element_type to annotate it'): self.function(type_not_annotated)(3) self.assertFunctionMatchesEager(simple_fill, 5) self.assertFunctionMatchesEager(nested_fill, 5, 3) self.assertFunctionMatchesEager(read_write_loop, 4) self.assertFunctionMatchesEager(simple_empty, 0) self.assertFunctionMatchesEager(simple_empty, 2) self.assertFunctionMatchesEager(simple_empty, 4) with self.assertRaises(ValueError): self.function(simple_fill)(0) with self.assertRaises(ValueError): self.function(nested_fill)(0, 3) if __name__ == '__main__': tf.test.main()
true
true
1c49ea6e4d435b59d0f0cb4c10b9268eb44ceb65
2,196
py
Python
python_example/python_example.py
markbentzjr/TroyBot
3be3afa6ae05b889c505f7ab2cc140cc368d1c05
[ "MIT" ]
1
2018-11-24T12:50:51.000Z
2018-11-24T12:50:51.000Z
python_example/python_example.py
markbentzjr/TroyBot
3be3afa6ae05b889c505f7ab2cc140cc368d1c05
[ "MIT" ]
null
null
null
python_example/python_example.py
markbentzjr/TroyBot
3be3afa6ae05b889c505f7ab2cc140cc368d1c05
[ "MIT" ]
null
null
null
import math from rlbot.agents.base_agent import BaseAgent, SimpleControllerState from rlbot.utils.structures.game_data_struct import GameTickPacket class PythonExample(BaseAgent): def initialize_agent(self): #This runs once before the bot starts up self.controller_state = SimpleControllerState() def get_output(self, packet: GameTickPacket) -> SimpleControllerState: ball_location = Vector2(packet.game_ball.physics.location.x, packet.game_ball.physics.location.y) my_car = packet.game_cars[self.index] car_location = Vector2(my_car.physics.location.x, my_car.physics.location.y) car_direction = get_car_facing_vector(my_car) car_to_ball = ball_location - car_location steer_correction_radians = car_direction.correction_to(car_to_ball) if steer_correction_radians > 0: # Positive radians in the unit circle is a turn to the left. turn = -1.0 # Negative value for a turn to the left. else: turn = 1.0 self.controller_state.throttle = 1.0 self.controller_state.steer = turn return self.controller_state class Vector2: def __init__(self, x=0, y=0): self.x = float(x) self.y = float(y) def __add__(self, val): return Vector2(self.x + val.x, self.y + val.y) def __sub__(self, val): return Vector2(self.x - val.x, self.y - val.y) def correction_to(self, ideal): # The in-game axes are left handed, so use -x current_in_radians = math.atan2(self.y, -self.x) ideal_in_radians = math.atan2(ideal.y, -ideal.x) correction = ideal_in_radians - current_in_radians # Make sure we go the 'short way' if abs(correction) > math.pi: if correction < 0: correction += 2 * math.pi else: correction -= 2 * math.pi return correction def get_car_facing_vector(car): pitch = float(car.physics.rotation.pitch) yaw = float(car.physics.rotation.yaw) facing_x = math.cos(pitch) * math.cos(yaw) facing_y = math.cos(pitch) * math.sin(yaw) return Vector2(facing_x, facing_y)
30.929577
105
0.656193
import math from rlbot.agents.base_agent import BaseAgent, SimpleControllerState from rlbot.utils.structures.game_data_struct import GameTickPacket class PythonExample(BaseAgent): def initialize_agent(self): self.controller_state = SimpleControllerState() def get_output(self, packet: GameTickPacket) -> SimpleControllerState: ball_location = Vector2(packet.game_ball.physics.location.x, packet.game_ball.physics.location.y) my_car = packet.game_cars[self.index] car_location = Vector2(my_car.physics.location.x, my_car.physics.location.y) car_direction = get_car_facing_vector(my_car) car_to_ball = ball_location - car_location steer_correction_radians = car_direction.correction_to(car_to_ball) if steer_correction_radians > 0: turn = -1.0 else: turn = 1.0 self.controller_state.throttle = 1.0 self.controller_state.steer = turn return self.controller_state class Vector2: def __init__(self, x=0, y=0): self.x = float(x) self.y = float(y) def __add__(self, val): return Vector2(self.x + val.x, self.y + val.y) def __sub__(self, val): return Vector2(self.x - val.x, self.y - val.y) def correction_to(self, ideal): current_in_radians = math.atan2(self.y, -self.x) ideal_in_radians = math.atan2(ideal.y, -ideal.x) correction = ideal_in_radians - current_in_radians if abs(correction) > math.pi: if correction < 0: correction += 2 * math.pi else: correction -= 2 * math.pi return correction def get_car_facing_vector(car): pitch = float(car.physics.rotation.pitch) yaw = float(car.physics.rotation.yaw) facing_x = math.cos(pitch) * math.cos(yaw) facing_y = math.cos(pitch) * math.sin(yaw) return Vector2(facing_x, facing_y)
true
true
1c49eae527f808a327985c9ccfa493755d812372
2,038
py
Python
util/log.py
brunomateus/open_source_android_apps
143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa
[ "MIT" ]
2
2019-11-18T18:01:27.000Z
2021-05-13T18:16:17.000Z
util/log.py
brunomateus/open_source_android_apps
143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa
[ "MIT" ]
null
null
null
util/log.py
brunomateus/open_source_android_apps
143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa
[ "MIT" ]
3
2019-07-18T19:33:04.000Z
2021-01-13T21:13:29.000Z
"""Maintain a global logger instance.""" import logging from typing import IO, Text import github3 import urllib3 import neo4j LOG_LEVEL = logging.WARNING LOG_FORMAT = '%(asctime)s | [%(levelname)s] %(name)s: %(message)s' LEVELS = [ logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] def compute_level(verbose: int, quiet: int) -> int: """Compute a log level based on input. Log level is based on LOG_LEVEL. :param int verbose: Number of levels to increase log level. :param int quiet: Number of levels to decrease log level. :returns int: New log level. Either of NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL. """ if verbose < 0 or quiet < 0: raise ValueError('Input must not be less than 0') default_index = LEVELS.index(LOG_LEVEL) index = min(len(LEVELS) - 1, max(0, default_index + quiet - verbose)) return LEVELS[index] def lower_level_for_libraries(min_level: int): """Decrease log level for libraries.""" max_level = max(min_level, logging.WARNING) for package in [github3, urllib3, neo4j]: logger = logging.getLogger(package.__package__) logger.setLevel(max_level) def configure_logger(name: Text, stream: IO[str], verbose: int, quiet: int): """Create handler for logging to an IO stream. :param Text name: Name of logger, e.g. __package__. :param IO[str] stream: Stream to log to, e.g. sys.stderr. :param int verbose: Number of levels to increase log level. :param int quiet: Number of levels to decrease log level. """ log_level = compute_level(verbose, quiet) handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter(LOG_FORMAT)) handler.setLevel(log_level) lower_level_for_libraries(log_level) logger = logging.getLogger(name) logger.setLevel(handler.level) logger.addHandler(handler) logger.info('Log to %s. Level: %d', stream.name, log_level)
30.41791
79
0.683023
import logging from typing import IO, Text import github3 import urllib3 import neo4j LOG_LEVEL = logging.WARNING LOG_FORMAT = '%(asctime)s | [%(levelname)s] %(name)s: %(message)s' LEVELS = [ logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] def compute_level(verbose: int, quiet: int) -> int: if verbose < 0 or quiet < 0: raise ValueError('Input must not be less than 0') default_index = LEVELS.index(LOG_LEVEL) index = min(len(LEVELS) - 1, max(0, default_index + quiet - verbose)) return LEVELS[index] def lower_level_for_libraries(min_level: int): max_level = max(min_level, logging.WARNING) for package in [github3, urllib3, neo4j]: logger = logging.getLogger(package.__package__) logger.setLevel(max_level) def configure_logger(name: Text, stream: IO[str], verbose: int, quiet: int): log_level = compute_level(verbose, quiet) handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter(LOG_FORMAT)) handler.setLevel(log_level) lower_level_for_libraries(log_level) logger = logging.getLogger(name) logger.setLevel(handler.level) logger.addHandler(handler) logger.info('Log to %s. Level: %d', stream.name, log_level)
true
true
1c49eb7bdb4418c37bc8952b6d14fa22f15819b6
1,033
py
Python
authen/views.py
ozcanyarimdunya/django_authen
fdb48a65d6f4ac4bb2fc09e3b7f024b3a41dd71b
[ "MIT" ]
null
null
null
authen/views.py
ozcanyarimdunya/django_authen
fdb48a65d6f4ac4bb2fc09e3b7f024b3a41dd71b
[ "MIT" ]
2
2020-02-11T23:05:32.000Z
2020-06-05T18:43:16.000Z
authen/views.py
ozcanyarimdunya/django_authen
fdb48a65d6f4ac4bb2fc09e3b7f024b3a41dd71b
[ "MIT" ]
null
null
null
from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.views import LoginView, LogoutView from django.conf import settings class Login(LoginView): template_name = "authen/login.html" def get_success_url(self): return "/" def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")}) ctx.update({"login_title": settings.AUTHEN.get("LOGIN_TITLE", "")}) return ctx class Logout(LoginRequiredMixin, LogoutView): template_name = "authen/logout.html" def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")}) ctx.update({"logout_title": settings.AUTHEN.get("LOGOUT_TITLE", "")}) ctx.update({"logout_message": settings.AUTHEN.get("LOGOUT_MESSAGE", "")}) return ctx login_view = Login.as_view() logout_view = Logout.as_view()
32.28125
81
0.684414
from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.views import LoginView, LogoutView from django.conf import settings class Login(LoginView): template_name = "authen/login.html" def get_success_url(self): return "/" def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")}) ctx.update({"login_title": settings.AUTHEN.get("LOGIN_TITLE", "")}) return ctx class Logout(LoginRequiredMixin, LogoutView): template_name = "authen/logout.html" def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx.update({"company_name": settings.AUTHEN.get("COMPANY_NAME", "")}) ctx.update({"logout_title": settings.AUTHEN.get("LOGOUT_TITLE", "")}) ctx.update({"logout_message": settings.AUTHEN.get("LOGOUT_MESSAGE", "")}) return ctx login_view = Login.as_view() logout_view = Logout.as_view()
true
true
1c49ecb6fa75797648420f768e466984724c7b7a
782
py
Python
scripts/beam_pairs.py
RolT/ZRTools
47aa156b660224fd123582c832bb5e5525c262d8
[ "BSD-3-Clause" ]
25
2015-08-06T20:15:30.000Z
2021-08-30T15:12:42.000Z
scripts/beam_pairs.py
RolT/ZRTools
47aa156b660224fd123582c832bb5e5525c262d8
[ "BSD-3-Clause" ]
2
2017-07-21T11:06:35.000Z
2020-02-27T13:20:34.000Z
scripts/beam_pairs.py
RolT/ZRTools
47aa156b660224fd123582c832bb5e5525c262d8
[ "BSD-3-Clause" ]
16
2015-08-06T21:16:55.000Z
2020-07-09T08:05:50.000Z
#!/usr/bin/env python # # Copyright 2011-2012 Johns Hopkins University (Author: Aren Jansen) # from __future__ import division import sys import os import re import string import random beamwidth = int(sys.argv[1]) baselist = [] for line in sys.stdin: base = line.strip() baselist.append(base) if beamwidth == 0: random.shuffle(baselist) for n in range(len(baselist)): for m in range(n,len(baselist)): sys.stdout.write(baselist[n]+" "+baselist[m]+"\n") else: for n in range(len(baselist)): sys.stdout.write(baselist[n]+" "+baselist[n]+"\n") if beamwidth > 1: samp = random.sample(baselist,beamwidth-1) for m in range(len(samp)): sys.stdout.write(baselist[n]+" "+samp[m]+"\n")
21.135135
69
0.621483
from __future__ import division import sys import os import re import string import random beamwidth = int(sys.argv[1]) baselist = [] for line in sys.stdin: base = line.strip() baselist.append(base) if beamwidth == 0: random.shuffle(baselist) for n in range(len(baselist)): for m in range(n,len(baselist)): sys.stdout.write(baselist[n]+" "+baselist[m]+"\n") else: for n in range(len(baselist)): sys.stdout.write(baselist[n]+" "+baselist[n]+"\n") if beamwidth > 1: samp = random.sample(baselist,beamwidth-1) for m in range(len(samp)): sys.stdout.write(baselist[n]+" "+samp[m]+"\n")
true
true
1c49ed056770080f895f299b51a9ce278a83c276
309
py
Python
oommfc/tests/test_stt.py
gamdow/oommfc
de33ae2a8348ca78d9e16fe18bc562393703c215
[ "BSD-3-Clause" ]
null
null
null
oommfc/tests/test_stt.py
gamdow/oommfc
de33ae2a8348ca78d9e16fe18bc562393703c215
[ "BSD-3-Clause" ]
null
null
null
oommfc/tests/test_stt.py
gamdow/oommfc
de33ae2a8348ca78d9e16fe18bc562393703c215
[ "BSD-3-Clause" ]
null
null
null
import pytest import oommfc as oc import micromagneticmodel.tests as mmt class TestSTT(mmt.TestSTT): def test_script(self): for arg in self.valid_args: u, beta = arg stt = oc.STT(u, beta) with pytest.raises(NotImplementedError): stt._script()
23.769231
52
0.614887
import pytest import oommfc as oc import micromagneticmodel.tests as mmt class TestSTT(mmt.TestSTT): def test_script(self): for arg in self.valid_args: u, beta = arg stt = oc.STT(u, beta) with pytest.raises(NotImplementedError): stt._script()
true
true
1c49ede342e3b5381a8d05e606d45887e3cb7caf
569
py
Python
src/Lexer/token_types.py
Sword-And-Rose/Simple-Interpreter
471b962e385ade5b18e1b1b785cd0d7529011144
[ "MIT" ]
1
2019-07-19T16:27:31.000Z
2019-07-19T16:27:31.000Z
src/Lexer/token_types.py
HorizonFTT/Simple-Interpreter
471b962e385ade5b18e1b1b785cd0d7529011144
[ "MIT" ]
null
null
null
src/Lexer/token_types.py
HorizonFTT/Simple-Interpreter
471b962e385ade5b18e1b1b785cd0d7529011144
[ "MIT" ]
null
null
null
INTEGER = 'INTEGER' REAL = 'REAL' INTEGER_CONST = 'INTEGER_CONST' REAL_CONST = 'REAL_CONST' STRING = 'STRING' STRING_CONST = 'STRING_CONST' PLUS = '+' MINUS = '-' MUL = '*' INTEGER_DIV = 'DIV' FLOAT_DIV = '/' LESS_THAN = '<' GREATER_THAN = '>' EQUAL = '=' LPAREN = '(' RPAREN = ')' ID = 'ID' ASSIGN = ':=' BEGIN = 'BEGIN' END = 'END' SEMI = ';' DOT = '.' PROGRAM = 'PROGRAM' VAR = 'VAR' COLON = ':' COMMA = ',' PROCEDURE = 'PROCEDURE' FUNCTION = 'FUNCTION' CALL = 'CALL' IF = 'IF' THEN = 'THEN' ELSE = 'ELSE' WHILE = 'WHILE' DO = 'DO' FOR = 'FOR' TO = 'TO' EOF = 'EOF'
14.973684
31
0.58348
INTEGER = 'INTEGER' REAL = 'REAL' INTEGER_CONST = 'INTEGER_CONST' REAL_CONST = 'REAL_CONST' STRING = 'STRING' STRING_CONST = 'STRING_CONST' PLUS = '+' MINUS = '-' MUL = '*' INTEGER_DIV = 'DIV' FLOAT_DIV = '/' LESS_THAN = '<' GREATER_THAN = '>' EQUAL = '=' LPAREN = '(' RPAREN = ')' ID = 'ID' ASSIGN = ':=' BEGIN = 'BEGIN' END = 'END' SEMI = ';' DOT = '.' PROGRAM = 'PROGRAM' VAR = 'VAR' COLON = ':' COMMA = ',' PROCEDURE = 'PROCEDURE' FUNCTION = 'FUNCTION' CALL = 'CALL' IF = 'IF' THEN = 'THEN' ELSE = 'ELSE' WHILE = 'WHILE' DO = 'DO' FOR = 'FOR' TO = 'TO' EOF = 'EOF'
true
true
1c49eea08a2dfc05dfb02ffb03ad6b610d781514
248
py
Python
idm/commands/bind_chat.py
Ruslan21473/IDM2
27adc319e753173e63b1d790caec993b920f2823
[ "MIT" ]
null
null
null
idm/commands/bind_chat.py
Ruslan21473/IDM2
27adc319e753173e63b1d790caec993b920f2823
[ "MIT" ]
null
null
null
idm/commands/bind_chat.py
Ruslan21473/IDM2
27adc319e753173e63b1d790caec993b920f2823
[ "MIT" ]
null
null
null
from ..objects import dp, Event from ..utils import new_message @dp.event_handle(dp.Methods.BIND_CHAT) def bind_chat(event: Event) -> str: new_message(event.api, event.chat.peer_id, message=f"✅ Беседа распознана.") return "ok"
31
47
0.701613
from ..objects import dp, Event from ..utils import new_message @dp.event_handle(dp.Methods.BIND_CHAT) def bind_chat(event: Event) -> str: new_message(event.api, event.chat.peer_id, message=f"✅ Беседа распознана.") return "ok"
true
true
1c49efc05a3a126007cf12dd6346fb8bbdb8cd2f
898
py
Python
simanalysis.py
EndyLab/spaceballs
331ce388674a4b01b56b36dfb3dda26729b107e6
[ "MIT" ]
1
2017-10-19T07:41:26.000Z
2017-10-19T07:41:26.000Z
simanalysis.py
EndyLab/spaceballs
331ce388674a4b01b56b36dfb3dda26729b107e6
[ "MIT" ]
1
2017-10-19T07:42:12.000Z
2017-10-19T07:42:12.000Z
simanalysis.py
EndyLab/spaceballs
331ce388674a4b01b56b36dfb3dda26729b107e6
[ "MIT" ]
null
null
null
""" Created by Akshay Maheshwari 09/05/2017 Produces analysis figures from experiment data """ from simanalysis_methods import * import matplotlib.pyplot as plt import time; start_time=time.time() expt_name = "171018_2219" outputlist = loadOutputList(expt_name,'molpos') histlistpklpath = combinePkls(expt_name,outputlist,covertime=True) #histlistpklpath = saveHist(outputlist, expt_name,bins=10,diameter=0.1,molposTS=1e-7) fig = plotHist(histlistpklpath,expt_name,diameter=0.1, graphs="all", logscale=False,step=1,start=1.25,simtime=1,x_label="R_crowder (nm)") fig.suptitle("Effects of crowding molecule size on covertime, and dispersion of a single tracked molecule. \n[1s. sim] -- R_tracked=7.25nm -- R_crowder=[1.25nm,2.25nm,...9.25nm] -- $\phi$=0.25 -- time step=1e-7s.") plt.savefig("data/"+expt_name+"/"+expt_name+"_analysis1.png") print("--- %s seconds ---" % (time.time() - start_time))
44.9
214
0.752784
from simanalysis_methods import * import matplotlib.pyplot as plt import time; start_time=time.time() expt_name = "171018_2219" outputlist = loadOutputList(expt_name,'molpos') histlistpklpath = combinePkls(expt_name,outputlist,covertime=True) fig = plotHist(histlistpklpath,expt_name,diameter=0.1, graphs="all", logscale=False,step=1,start=1.25,simtime=1,x_label="R_crowder (nm)") fig.suptitle("Effects of crowding molecule size on covertime, and dispersion of a single tracked molecule. \n[1s. sim] -- R_tracked=7.25nm -- R_crowder=[1.25nm,2.25nm,...9.25nm] -- $\phi$=0.25 -- time step=1e-7s.") plt.savefig("data/"+expt_name+"/"+expt_name+"_analysis1.png") print("--- %s seconds ---" % (time.time() - start_time))
true
true
1c49f0117ce0749903b3f36e283ae1b91cd3b22f
825
py
Python
GameMenu.py
KRHS-GameProgramming-2015/King-of-the-Pile
1368c97ba8124e27f74c6f8aae8e1f8362126934
[ "BSD-2-Clause" ]
1
2015-12-15T17:39:08.000Z
2015-12-15T17:39:08.000Z
GameMenu.py
KRHS-GameProgramming-2015/King-of-the-Pile
1368c97ba8124e27f74c6f8aae8e1f8362126934
[ "BSD-2-Clause" ]
4
2015-12-21T17:06:51.000Z
2016-02-10T16:50:57.000Z
GameMenu.py
KRHS-GameProgramming-2015/King-of-the-Pile
1368c97ba8124e27f74c6f8aae8e1f8362126934
[ "BSD-2-Clause" ]
null
null
null
import sys, pygame, math, random from Button import * class Menu(): def __init__(self, images): self.images = [] for image in images: #print image self.images += [pygame.image.load(image)] self.image = self.images[0] self.rect = self.image.get_rect() self.originalImage = self.image self.width, self.height = self.image.get_size() def update(): for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_q: sys.exit() elif event.type == pygame.KEYUP: pass
21.153846
56
0.466667
import sys, pygame, math, random from Button import * class Menu(): def __init__(self, images): self.images = [] for image in images: self.images += [pygame.image.load(image)] self.image = self.images[0] self.rect = self.image.get_rect() self.originalImage = self.image self.width, self.height = self.image.get_size() def update(): for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_q: sys.exit() elif event.type == pygame.KEYUP: pass
true
true
1c49f01f22cbc23cfecb70fb36d3a72ff0991e5f
8,685
py
Python
python/paddle_serving_app/local_predict.py
hysunflower/Serving
50d0c2900f3385b049f76b91e38cc69d8e8a102d
[ "Apache-2.0" ]
null
null
null
python/paddle_serving_app/local_predict.py
hysunflower/Serving
50d0c2900f3385b049f76b91e38cc69d8e8a102d
[ "Apache-2.0" ]
null
null
null
python/paddle_serving_app/local_predict.py
hysunflower/Serving
50d0c2900f3385b049f76b91e38cc69d8e8a102d
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ import os import google.protobuf.text_format import numpy as np import argparse import paddle.fluid as fluid import paddle.inference as inference from .proto import general_model_config_pb2 as m_config from paddle.fluid.core import PaddleTensor from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import create_paddle_predictor import logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") logger = logging.getLogger("fluid") logger.setLevel(logging.INFO) class LocalPredictor(object): """ Prediction in the current process of the local environment, in process call, Compared with RPC/HTTP, LocalPredictor has better performance, because of no network and packaging load. """ def __init__(self): self.feed_names_ = [] self.fetch_names_ = [] self.feed_types_ = {} self.fetch_types_ = {} self.feed_shapes_ = {} self.feed_names_to_idx_ = {} self.fetch_names_to_idx_ = {} self.fetch_names_to_type_ = {} def load_model_config(self, model_path, use_gpu=False, gpu_id=0, use_profile=False, thread_num=1, mem_optim=True, ir_optim=False, use_trt=False, use_lite=False, use_xpu=False, use_feed_fetch_ops=False): """ Load model config and set the engine config for the paddle predictor Args: model_path: model config path. use_gpu: calculating with gpu, False default. gpu_id: gpu id, 0 default. use_profile: use predictor profiles, False default. thread_num: thread nums, default 1. mem_optim: memory optimization, True default. ir_optim: open calculation chart optimization, False default. use_trt: use nvidia TensorRT optimization, False default use_lite: use Paddle-Lite Engint, False default use_xpu: run predict on Baidu Kunlun, False default use_feed_fetch_ops: use feed/fetch ops, False default. """ client_config = "{}/serving_server_conf.prototxt".format(model_path) model_conf = m_config.GeneralModelConfig() f = open(client_config, 'r') model_conf = google.protobuf.text_format.Merge( str(f.read()), model_conf) config = AnalysisConfig(model_path) logger.info("load_model_config params: model_path:{}, use_gpu:{},\ gpu_id:{}, use_profile:{}, thread_num:{}, mem_optim:{}, ir_optim:{},\ use_trt:{}, use_lite:{}, use_xpu: {}, use_feed_fetch_ops:{}".format( model_path, use_gpu, gpu_id, use_profile, thread_num, mem_optim, ir_optim, use_trt, use_lite, use_xpu, use_feed_fetch_ops)) self.feed_names_ = [var.alias_name for var in model_conf.feed_var] self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var] self.feed_names_to_idx_ = {} self.fetch_names_to_idx_ = {} for i, var in enumerate(model_conf.feed_var): self.feed_names_to_idx_[var.alias_name] = i self.feed_types_[var.alias_name] = var.feed_type self.feed_shapes_[var.alias_name] = var.shape for i, var in enumerate(model_conf.fetch_var): self.fetch_names_to_idx_[var.alias_name] = i self.fetch_names_to_type_[var.alias_name] = var.fetch_type if use_profile: config.enable_profile() if mem_optim: config.enable_memory_optim() config.switch_ir_optim(ir_optim) config.set_cpu_math_library_num_threads(thread_num) config.switch_use_feed_fetch_ops(use_feed_fetch_ops) config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") if not use_gpu: config.disable_gpu() else: config.enable_use_gpu(100, gpu_id) if use_trt: config.enable_tensorrt_engine( workspace_size=1 << 20, max_batch_size=32, min_subgraph_size=3, use_static=False, use_calib_mode=False) if use_lite: config.enable_lite_engine( precision_mode=inference.PrecisionType.Float32, zero_copy=True, passes_filter=[], ops_filter=[]) if use_xpu: # 2MB l3 cache config.enable_xpu(8 * 1024 * 1024) self.predictor = create_paddle_predictor(config) def predict(self, feed=None, fetch=None, batch=False, log_id=0): """ Predict locally Args: feed: feed var fetch: fetch var batch: batch data or not, False default.If batch is False, a new dimension is added to header of the shape[np.newaxis]. log_id: for logging Returns: fetch_map: dict """ if feed is None or fetch is None: raise ValueError("You should specify feed and fetch for prediction") fetch_list = [] if isinstance(fetch, str): fetch_list = [fetch] elif isinstance(fetch, list): fetch_list = fetch else: raise ValueError("Fetch only accepts string and list of string") feed_batch = [] if isinstance(feed, dict): feed_batch.append(feed) elif isinstance(feed, list): feed_batch = feed else: raise ValueError("Feed only accepts dict and list of dict") int_slot_batch = [] float_slot_batch = [] int_feed_names = [] float_feed_names = [] int_shape = [] float_shape = [] fetch_names = [] counter = 0 batch_size = len(feed_batch) for key in fetch_list: if key in self.fetch_names_: fetch_names.append(key) if len(fetch_names) == 0: raise ValueError( "Fetch names should not be empty or out of saved fetch list.") return {} input_names = self.predictor.get_input_names() for name in input_names: if isinstance(feed[name], list): feed[name] = np.array(feed[name]).reshape(self.feed_shapes_[ name]) if self.feed_types_[name] == 0: feed[name] = feed[name].astype("int64") elif self.feed_types_[name] == 1: feed[name] = feed[name].astype("float32") elif self.feed_types_[name] == 2: feed[name] = feed[name].astype("int32") else: raise ValueError("local predictor receives wrong data type") input_tensor = self.predictor.get_input_tensor(name) if "{}.lod".format(name) in feed: input_tensor.set_lod([feed["{}.lod".format(name)]]) if batch == False: input_tensor.copy_from_cpu(feed[name][np.newaxis, :]) else: input_tensor.copy_from_cpu(feed[name]) output_tensors = [] output_names = self.predictor.get_output_names() for output_name in output_names: output_tensor = self.predictor.get_output_tensor(output_name) output_tensors.append(output_tensor) outputs = [] self.predictor.zero_copy_run() for output_tensor in output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) fetch_map = {} for i, name in enumerate(fetch): fetch_map[name] = outputs[i] if len(output_tensors[i].lod()) > 0: fetch_map[name + ".lod"] = np.array(output_tensors[i].lod()[ 0]).astype('int32') return fetch_map
38.092105
81
0.595855
import os import google.protobuf.text_format import numpy as np import argparse import paddle.fluid as fluid import paddle.inference as inference from .proto import general_model_config_pb2 as m_config from paddle.fluid.core import PaddleTensor from paddle.fluid.core import AnalysisConfig from paddle.fluid.core import create_paddle_predictor import logging logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") logger = logging.getLogger("fluid") logger.setLevel(logging.INFO) class LocalPredictor(object): def __init__(self): self.feed_names_ = [] self.fetch_names_ = [] self.feed_types_ = {} self.fetch_types_ = {} self.feed_shapes_ = {} self.feed_names_to_idx_ = {} self.fetch_names_to_idx_ = {} self.fetch_names_to_type_ = {} def load_model_config(self, model_path, use_gpu=False, gpu_id=0, use_profile=False, thread_num=1, mem_optim=True, ir_optim=False, use_trt=False, use_lite=False, use_xpu=False, use_feed_fetch_ops=False): client_config = "{}/serving_server_conf.prototxt".format(model_path) model_conf = m_config.GeneralModelConfig() f = open(client_config, 'r') model_conf = google.protobuf.text_format.Merge( str(f.read()), model_conf) config = AnalysisConfig(model_path) logger.info("load_model_config params: model_path:{}, use_gpu:{},\ gpu_id:{}, use_profile:{}, thread_num:{}, mem_optim:{}, ir_optim:{},\ use_trt:{}, use_lite:{}, use_xpu: {}, use_feed_fetch_ops:{}".format( model_path, use_gpu, gpu_id, use_profile, thread_num, mem_optim, ir_optim, use_trt, use_lite, use_xpu, use_feed_fetch_ops)) self.feed_names_ = [var.alias_name for var in model_conf.feed_var] self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var] self.feed_names_to_idx_ = {} self.fetch_names_to_idx_ = {} for i, var in enumerate(model_conf.feed_var): self.feed_names_to_idx_[var.alias_name] = i self.feed_types_[var.alias_name] = var.feed_type self.feed_shapes_[var.alias_name] = var.shape for i, var in enumerate(model_conf.fetch_var): self.fetch_names_to_idx_[var.alias_name] = i self.fetch_names_to_type_[var.alias_name] = var.fetch_type if use_profile: config.enable_profile() if mem_optim: config.enable_memory_optim() config.switch_ir_optim(ir_optim) config.set_cpu_math_library_num_threads(thread_num) config.switch_use_feed_fetch_ops(use_feed_fetch_ops) config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") if not use_gpu: config.disable_gpu() else: config.enable_use_gpu(100, gpu_id) if use_trt: config.enable_tensorrt_engine( workspace_size=1 << 20, max_batch_size=32, min_subgraph_size=3, use_static=False, use_calib_mode=False) if use_lite: config.enable_lite_engine( precision_mode=inference.PrecisionType.Float32, zero_copy=True, passes_filter=[], ops_filter=[]) if use_xpu: config.enable_xpu(8 * 1024 * 1024) self.predictor = create_paddle_predictor(config) def predict(self, feed=None, fetch=None, batch=False, log_id=0): if feed is None or fetch is None: raise ValueError("You should specify feed and fetch for prediction") fetch_list = [] if isinstance(fetch, str): fetch_list = [fetch] elif isinstance(fetch, list): fetch_list = fetch else: raise ValueError("Fetch only accepts string and list of string") feed_batch = [] if isinstance(feed, dict): feed_batch.append(feed) elif isinstance(feed, list): feed_batch = feed else: raise ValueError("Feed only accepts dict and list of dict") int_slot_batch = [] float_slot_batch = [] int_feed_names = [] float_feed_names = [] int_shape = [] float_shape = [] fetch_names = [] counter = 0 batch_size = len(feed_batch) for key in fetch_list: if key in self.fetch_names_: fetch_names.append(key) if len(fetch_names) == 0: raise ValueError( "Fetch names should not be empty or out of saved fetch list.") return {} input_names = self.predictor.get_input_names() for name in input_names: if isinstance(feed[name], list): feed[name] = np.array(feed[name]).reshape(self.feed_shapes_[ name]) if self.feed_types_[name] == 0: feed[name] = feed[name].astype("int64") elif self.feed_types_[name] == 1: feed[name] = feed[name].astype("float32") elif self.feed_types_[name] == 2: feed[name] = feed[name].astype("int32") else: raise ValueError("local predictor receives wrong data type") input_tensor = self.predictor.get_input_tensor(name) if "{}.lod".format(name) in feed: input_tensor.set_lod([feed["{}.lod".format(name)]]) if batch == False: input_tensor.copy_from_cpu(feed[name][np.newaxis, :]) else: input_tensor.copy_from_cpu(feed[name]) output_tensors = [] output_names = self.predictor.get_output_names() for output_name in output_names: output_tensor = self.predictor.get_output_tensor(output_name) output_tensors.append(output_tensor) outputs = [] self.predictor.zero_copy_run() for output_tensor in output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) fetch_map = {} for i, name in enumerate(fetch): fetch_map[name] = outputs[i] if len(output_tensors[i].lod()) > 0: fetch_map[name + ".lod"] = np.array(output_tensors[i].lod()[ 0]).astype('int32') return fetch_map
true
true
1c49f06dffa7d3da20a9bd4b1507a6abb441f68a
98
py
Python
src/sports_halls/apps.py
codacy-badger/hbscorez
215e4d2617ac9be91bb9d561bbfc552349cd4781
[ "MIT" ]
12
2018-03-20T21:38:53.000Z
2021-10-31T10:00:12.000Z
src/sports_halls/apps.py
codacy-badger/hbscorez
215e4d2617ac9be91bb9d561bbfc552349cd4781
[ "MIT" ]
79
2018-03-18T14:26:47.000Z
2022-03-01T15:51:40.000Z
src/sports_halls/apps.py
codacy-badger/hbscorez
215e4d2617ac9be91bb9d561bbfc552349cd4781
[ "MIT" ]
4
2018-05-18T15:39:56.000Z
2020-10-29T09:28:41.000Z
from django.apps import AppConfig class SportsHallsConfig(AppConfig): name = 'sports_halls'
16.333333
35
0.77551
from django.apps import AppConfig class SportsHallsConfig(AppConfig): name = 'sports_halls'
true
true
1c49f0af27c73266029ce93f22052346da2d9b95
5,439
py
Python
zerver/lib/bugdown/api_code_examples.py
fearless0307/zulip
378d14af7ea73a9a83c7245706cd918bec5a37bf
[ "Apache-2.0" ]
4
2019-06-04T09:06:53.000Z
2019-06-04T09:07:47.000Z
zerver/lib/bugdown/api_code_examples.py
fearless0307/zulip
378d14af7ea73a9a83c7245706cd918bec5a37bf
[ "Apache-2.0" ]
10
2019-02-26T11:10:42.000Z
2019-02-26T14:30:24.000Z
zerver/lib/bugdown/api_code_examples.py
fearless0307/zulip
378d14af7ea73a9a83c7245706cd918bec5a37bf
[ "Apache-2.0" ]
1
2020-01-07T15:49:54.000Z
2020-01-07T15:49:54.000Z
import re import json import inspect from markdown.extensions import Extension from markdown.preprocessors import Preprocessor from typing import Any, Dict, Optional, List import markdown import zerver.lib.api_test_helpers from zerver.lib.openapi import get_openapi_fixture MACRO_REGEXP = re.compile(r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}') CODE_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}') PYTHON_CLIENT_CONFIG = """ #!/usr/bin/env python3 import zulip # Pass the path to your zuliprc file here. client = zulip.Client(config_file="~/zuliprc") """ PYTHON_CLIENT_ADMIN_CONFIG = """ #!/usr/bin/env python import zulip # The user for this zuliprc file must be an organization administrator client = zulip.Client(config_file="~/zuliprc-admin") """ def extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]: start = -1 end = -1 for line in source: match = CODE_EXAMPLE_REGEX.search(line) if match: if match.group(1) == 'start': start = source.index(line) elif match.group(1) == 'end': end = source.index(line) break if (start == -1 and end == -1): return snippet snippet.extend(source[start + 1: end]) snippet.append(' print(result)') snippet.append('\n') source = source[end + 1:] return extract_python_code_example(source, snippet) def render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]: method = zerver.lib.api_test_helpers.TEST_FUNCTIONS[function] function_source_lines = inspect.getsourcelines(method)[0] if admin_config: config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines() else: config = PYTHON_CLIENT_CONFIG.splitlines() snippet = extract_python_code_example(function_source_lines, []) code_example = [] code_example.append('```python') code_example.extend(config) for line in snippet: # Remove one level of indentation and strip newlines code_example.append(line[4:].rstrip()) code_example.append('```') return code_example SUPPORTED_LANGUAGES = { 'python': { 'client_config': PYTHON_CLIENT_CONFIG, 'admin_config': PYTHON_CLIENT_ADMIN_CONFIG, 'render': render_python_code_example, } } # type: Dict[str, Any] class APICodeExamplesGenerator(Extension): def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None: md.preprocessors.add( 'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin' ) class APICodeExamplesPreprocessor(Preprocessor): def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None: super(APICodeExamplesPreprocessor, self).__init__(md) def run(self, lines: List[str]) -> List[str]: done = False while not done: for line in lines: loc = lines.index(line) match = MACRO_REGEXP.search(line) if match: language = match.group(2) function = match.group(3) key = match.group(4) argument = match.group(6) if key == 'fixture': if argument: text = self.render_fixture(function, name=argument) else: text = self.render_fixture(function) elif key == 'example': if argument == 'admin_config=True': text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True) else: text = SUPPORTED_LANGUAGES[language]['render'](function) # The line that contains the directive to include the macro # may be preceded or followed by text or tags, in that case # we need to make sure that any preceding or following text # stays the same. line_split = MACRO_REGEXP.split(line, maxsplit=0) preceding = line_split[0] following = line_split[-1] text = [preceding] + text + [following] lines = lines[:loc] + text + lines[loc+1:] break else: done = True return lines def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]: fixture = [] # We assume that if the function we're rendering starts with a slash # it's a path in the endpoint and therefore it uses the new OpenAPI # format. if function.startswith('/'): path, method = function.rsplit(':', 1) fixture_dict = get_openapi_fixture(path, method, name) else: fixture_dict = zerver.lib.api_test_helpers.FIXTURES[function] fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True, separators=(',', ': ')) fixture.append('```') fixture.extend(fixture_json.splitlines()) fixture.append('```') return fixture def makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator: return APICodeExamplesGenerator(**kwargs)
33.99375
116
0.596249
import re import json import inspect from markdown.extensions import Extension from markdown.preprocessors import Preprocessor from typing import Any, Dict, Optional, List import markdown import zerver.lib.api_test_helpers from zerver.lib.openapi import get_openapi_fixture MACRO_REGEXP = re.compile(r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}') CODE_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}') PYTHON_CLIENT_CONFIG = """ #!/usr/bin/env python3 import zulip # Pass the path to your zuliprc file here. client = zulip.Client(config_file="~/zuliprc") """ PYTHON_CLIENT_ADMIN_CONFIG = """ #!/usr/bin/env python import zulip # The user for this zuliprc file must be an organization administrator client = zulip.Client(config_file="~/zuliprc-admin") """ def extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]: start = -1 end = -1 for line in source: match = CODE_EXAMPLE_REGEX.search(line) if match: if match.group(1) == 'start': start = source.index(line) elif match.group(1) == 'end': end = source.index(line) break if (start == -1 and end == -1): return snippet snippet.extend(source[start + 1: end]) snippet.append(' print(result)') snippet.append('\n') source = source[end + 1:] return extract_python_code_example(source, snippet) def render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]: method = zerver.lib.api_test_helpers.TEST_FUNCTIONS[function] function_source_lines = inspect.getsourcelines(method)[0] if admin_config: config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines() else: config = PYTHON_CLIENT_CONFIG.splitlines() snippet = extract_python_code_example(function_source_lines, []) code_example = [] code_example.append('```python') code_example.extend(config) for line in snippet: code_example.append(line[4:].rstrip()) code_example.append('```') return code_example SUPPORTED_LANGUAGES = { 'python': { 'client_config': PYTHON_CLIENT_CONFIG, 'admin_config': PYTHON_CLIENT_ADMIN_CONFIG, 'render': render_python_code_example, } } class APICodeExamplesGenerator(Extension): def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None: md.preprocessors.add( 'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin' ) class APICodeExamplesPreprocessor(Preprocessor): def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None: super(APICodeExamplesPreprocessor, self).__init__(md) def run(self, lines: List[str]) -> List[str]: done = False while not done: for line in lines: loc = lines.index(line) match = MACRO_REGEXP.search(line) if match: language = match.group(2) function = match.group(3) key = match.group(4) argument = match.group(6) if key == 'fixture': if argument: text = self.render_fixture(function, name=argument) else: text = self.render_fixture(function) elif key == 'example': if argument == 'admin_config=True': text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True) else: text = SUPPORTED_LANGUAGES[language]['render'](function) line_split = MACRO_REGEXP.split(line, maxsplit=0) preceding = line_split[0] following = line_split[-1] text = [preceding] + text + [following] lines = lines[:loc] + text + lines[loc+1:] break else: done = True return lines def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]: fixture = [] # it's a path in the endpoint and therefore it uses the new OpenAPI if function.startswith('/'): path, method = function.rsplit(':', 1) fixture_dict = get_openapi_fixture(path, method, name) else: fixture_dict = zerver.lib.api_test_helpers.FIXTURES[function] fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True, separators=(',', ': ')) fixture.append('```') fixture.extend(fixture_json.splitlines()) fixture.append('```') return fixture def makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator: return APICodeExamplesGenerator(**kwargs)
true
true
1c49f0b92cb9d9dcfa236360b7e8067165742279
15,373
py
Python
tests/test_absorption_spectrum.py
foggie-sims/trident
c5902a066ed87dc760f620d502c3e644bf93d450
[ "BSD-3-Clause-Clear" ]
null
null
null
tests/test_absorption_spectrum.py
foggie-sims/trident
c5902a066ed87dc760f620d502c3e644bf93d450
[ "BSD-3-Clause-Clear" ]
5
2020-11-18T11:58:08.000Z
2022-02-24T10:40:50.000Z
tests/test_absorption_spectrum.py
foggie-sims/trident
c5902a066ed87dc760f620d502c3e644bf93d450
[ "BSD-3-Clause-Clear" ]
1
2022-03-29T17:44:56.000Z
2022-03-29T17:44:56.000Z
""" Unit test for the AbsorptionSpectrum analysis module """ #----------------------------------------------------------------------------- # Copyright (c) 2014-2017, yt Development Team. # Copyright (c) 2017, Trident Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. #----------------------------------------------------------------------------- import numpy as np import os from yt.loaders import load from yt.testing import \ assert_allclose_units, \ assert_almost_equal from trident.absorption_spectrum.absorption_line import \ voigt from trident.absorption_spectrum.absorption_spectrum import \ AbsorptionSpectrum from trident.light_ray import \ LightRay from trident.testing import \ answer_test_data_dir, \ assert_array_rel_equal, \ h5_answer_test, \ TempDirTest COSMO_PLUS = os.path.join(answer_test_data_dir, "enzo_cosmology_plus/AMRCosmology.enzo") COSMO_PLUS_SINGLE = os.path.join(answer_test_data_dir, "enzo_cosmology_plus/RD0009/RD0009") GIZMO_PLUS = os.path.join(answer_test_data_dir, "gizmo_cosmology_plus/N128L16.param") GIZMO_PLUS_SINGLE = os.path.join(answer_test_data_dir, "gizmo_cosmology_plus/snap_N128L16_151.hdf5") ISO_GALAXY = os.path.join(answer_test_data_dir, "IsolatedGalaxy/galaxy0030/galaxy0030") FIRE = os.path.join(answer_test_data_dir, "FIRE_M12i_ref11/snapshot_600.hdf5") class AbsorptionSpectrumTest(TempDirTest): @h5_answer_test(assert_array_rel_equal, decimals=13) def test_absorption_spectrum_cosmo(self): """ This test generates an absorption spectrum from a compound light ray on a grid dataset """ lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03) lr.make_light_ray(seed=1234567, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(900.0, 1800.0, 10000) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 912.323660 # Angstroms normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=15) def test_absorption_spectrum_non_cosmo(self): """ This test generates an absorption spectrum from a simple light ray on a grid dataset """ lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=15) def test_absorption_spectrum_non_cosmo_novpec(self): """ This test generates an absorption spectrum from a simple light ray on a grid dataset """ lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5', use_peculiar_velocity=False) sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=False) return filename def test_equivalent_width_conserved(self): """ This tests that the equivalent width of the optical depth is conserved regardless of the bin width employed in wavelength space. Unresolved lines should still deposit optical depth into the spectrum. """ lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') my_label = 'HI Lya' field = 'H_p0_number_density' wave = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 lambda_min= 1200 lambda_max= 1300 lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1] total_tau = [] for lambda_bin_width in lambda_bin_widths: n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1 sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max, n_lambda=n_lambda) sp.add_line(my_label, field, wave, f_value, gamma, mass) wavelength, flux = sp.make_spectrum('lightray.h5') total_tau.append((lambda_bin_width * sp.tau_field).sum()) # assure that the total tau values are all within 1e-3 of each other for tau in total_tau: assert_almost_equal(tau, total_tau[0], 3) def test_absorption_spectrum_fits(self): """ This test generates an absorption spectrum and saves it as a fits file. """ lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(900.0, 1800.0, 10000) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 912.323660 # Angstroms normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) wavelength, flux = sp.make_spectrum('lightray.h5', output_file='spectrum.fits', line_list_file='lines.txt', use_peculiar_velocity=True) @h5_answer_test(assert_array_rel_equal, decimals=12) def test_absorption_spectrum_cosmo_sph(self): """ This test generates an absorption spectrum from a compound light ray on a particle dataset """ lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01) lr.make_light_ray(seed=1234567, fields=[('gas', 'temperature'), ('gas', 'H_p0_number_density')], data_filename='lightray.h5') sp = AbsorptionSpectrum(900.0, 1800.0, 10000) my_label = 'HI Lya' field = ('gas', 'H_p0_number_density') wavelength = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'HI Lya' field = ('gas', 'H_p0_number_density') wavelength = 912.323660 # Angstroms normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=16) def test_absorption_spectrum_non_cosmo_sph(self): """ This test generates an absorption spectrum from a simple light ray on a particle dataset """ ds = load(GIZMO_PLUS_SINGLE) lr = LightRay(ds) ray_start = ds.domain_left_edge ray_end = ds.domain_right_edge lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=[('gas', 'temperature'), ('gas', 'H_p0_number_density')], data_filename='lightray.h5') sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) my_label = 'HI Lya' field = ('gas', 'H_p0_number_density') wavelength = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=15) def test_absorption_spectrum_with_continuum(self): """ This test generates an absorption spectrum from a simple light ray on a grid dataset and adds Lyman alpha and Lyman continuum to it """ ds = load(ISO_GALAXY) lr = LightRay(ds) ray_start = ds.domain_left_edge ray_end = ds.domain_right_edge lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(800.0, 1300.0, 5001) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 # Angstromss f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'Ly C' field = 'H_p0_number_density' wavelength = 912.323660 # Angstroms normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename def test_absorption_spectrum_with_zero_field(self): """ This test generates an absorption spectrum with some particle dataset """ ds = load(FIRE) lr = LightRay(ds) # Define species and associated parameters to add to continuum # Parameters used for both adding the transition to the spectrum # and for fitting # Note that for single species that produce multiple lines # (as in the OVI doublet), 'numLines' will be equal to the number # of lines, and f,gamma, and wavelength will have multiple values. HI_parameters = { 'name': 'HI', 'field': 'H_p0_number_density', 'f': [.4164], 'Gamma': [6.265E8], 'wavelength': [1215.67], 'mass': 1.00794, 'numLines': 1, 'maxN': 1E22, 'minN': 1E11, 'maxb': 300, 'minb': 1, 'maxz': 6, 'minz': 0, 'init_b': 30, 'init_N': 1E14 } species_dicts = {'HI': HI_parameters} # Get all fields that need to be added to the light ray fields = [('gas','temperature')] for s, params in species_dicts.items(): fields.append(params['field']) # With a single dataset, a start_position and # end_position or trajectory must be given. # Trajectory should be given as (r, theta, phi) lr.make_light_ray( start_position=ds.arr([0., 0., 0.], 'unitary'), end_position=ds.arr([1., 1., 1.], 'unitary'), solution_filename='test_lightraysolution.txt', data_filename='test_lightray.h5', fields=fields) # Create an AbsorptionSpectrum object extending from # lambda = 900 to lambda = 1800, with 10000 pixels sp = AbsorptionSpectrum(900.0, 1400.0, 50000) # Iterate over species for s, params in species_dicts.items(): # Iterate over transitions for a single species for i in range(params['numLines']): # Add the lines to the spectrum sp.add_line( s, params['field'], params['wavelength'][i], params['f'][i], params['Gamma'][i], params['mass'], label_threshold=1.e10) # Make and save spectrum wavelength, flux = sp.make_spectrum( 'test_lightray.h5', output_file='test_spectrum.h5', line_list_file='test_lines.txt', use_peculiar_velocity=True)
36.34279
83
0.564366
import numpy as np import os from yt.loaders import load from yt.testing import \ assert_allclose_units, \ assert_almost_equal from trident.absorption_spectrum.absorption_line import \ voigt from trident.absorption_spectrum.absorption_spectrum import \ AbsorptionSpectrum from trident.light_ray import \ LightRay from trident.testing import \ answer_test_data_dir, \ assert_array_rel_equal, \ h5_answer_test, \ TempDirTest COSMO_PLUS = os.path.join(answer_test_data_dir, "enzo_cosmology_plus/AMRCosmology.enzo") COSMO_PLUS_SINGLE = os.path.join(answer_test_data_dir, "enzo_cosmology_plus/RD0009/RD0009") GIZMO_PLUS = os.path.join(answer_test_data_dir, "gizmo_cosmology_plus/N128L16.param") GIZMO_PLUS_SINGLE = os.path.join(answer_test_data_dir, "gizmo_cosmology_plus/snap_N128L16_151.hdf5") ISO_GALAXY = os.path.join(answer_test_data_dir, "IsolatedGalaxy/galaxy0030/galaxy0030") FIRE = os.path.join(answer_test_data_dir, "FIRE_M12i_ref11/snapshot_600.hdf5") class AbsorptionSpectrumTest(TempDirTest): @h5_answer_test(assert_array_rel_equal, decimals=13) def test_absorption_spectrum_cosmo(self): lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03) lr.make_light_ray(seed=1234567, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(900.0, 1800.0, 10000) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 912.323660 normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=15) def test_absorption_spectrum_non_cosmo(self): lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=15) def test_absorption_spectrum_non_cosmo_novpec(self): lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5', use_peculiar_velocity=False) sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=False) return filename def test_equivalent_width_conserved(self): lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') my_label = 'HI Lya' field = 'H_p0_number_density' wave = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 lambda_min= 1200 lambda_max= 1300 lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1] total_tau = [] for lambda_bin_width in lambda_bin_widths: n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1 sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max, n_lambda=n_lambda) sp.add_line(my_label, field, wave, f_value, gamma, mass) wavelength, flux = sp.make_spectrum('lightray.h5') total_tau.append((lambda_bin_width * sp.tau_field).sum()) for tau in total_tau: assert_almost_equal(tau, total_tau[0], 3) def test_absorption_spectrum_fits(self): lr = LightRay(COSMO_PLUS_SINGLE) ray_start = [0,0,0] ray_end = [1,1,1] lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(900.0, 1800.0, 10000) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 912.323660 normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) wavelength, flux = sp.make_spectrum('lightray.h5', output_file='spectrum.fits', line_list_file='lines.txt', use_peculiar_velocity=True) @h5_answer_test(assert_array_rel_equal, decimals=12) def test_absorption_spectrum_cosmo_sph(self): lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01) lr.make_light_ray(seed=1234567, fields=[('gas', 'temperature'), ('gas', 'H_p0_number_density')], data_filename='lightray.h5') sp = AbsorptionSpectrum(900.0, 1800.0, 10000) my_label = 'HI Lya' field = ('gas', 'H_p0_number_density') wavelength = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'HI Lya' field = ('gas', 'H_p0_number_density') wavelength = 912.323660 normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=16) def test_absorption_spectrum_non_cosmo_sph(self): ds = load(GIZMO_PLUS_SINGLE) lr = LightRay(ds) ray_start = ds.domain_left_edge ray_end = ds.domain_right_edge lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=[('gas', 'temperature'), ('gas', 'H_p0_number_density')], data_filename='lightray.h5') sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) my_label = 'HI Lya' field = ('gas', 'H_p0_number_density') wavelength = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename @h5_answer_test(assert_array_rel_equal, decimals=15) def test_absorption_spectrum_with_continuum(self): ds = load(ISO_GALAXY) lr = LightRay(ds) ray_start = ds.domain_left_edge ray_end = ds.domain_right_edge lr.make_light_ray(start_position=ray_start, end_position=ray_end, fields=['temperature', 'density', 'H_p0_number_density'], data_filename='lightray.h5') sp = AbsorptionSpectrum(800.0, 1300.0, 5001) my_label = 'HI Lya' field = 'H_p0_number_density' wavelength = 1215.6700 f_value = 4.164E-01 gamma = 6.265e+08 mass = 1.00794 sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) my_label = 'Ly C' field = 'H_p0_number_density' wavelength = 912.323660 normalization = 1.6e17 index = 3.0 sp.add_continuum(my_label, field, wavelength, normalization, index) filename = "spectrum.h5" wavelength, flux = sp.make_spectrum('lightray.h5', output_file=filename, line_list_file='lines.txt', use_peculiar_velocity=True) return filename def test_absorption_spectrum_with_zero_field(self): ds = load(FIRE) lr = LightRay(ds) HI_parameters = { 'name': 'HI', 'field': 'H_p0_number_density', 'f': [.4164], 'Gamma': [6.265E8], 'wavelength': [1215.67], 'mass': 1.00794, 'numLines': 1, 'maxN': 1E22, 'minN': 1E11, 'maxb': 300, 'minb': 1, 'maxz': 6, 'minz': 0, 'init_b': 30, 'init_N': 1E14 } species_dicts = {'HI': HI_parameters} fields = [('gas','temperature')] for s, params in species_dicts.items(): fields.append(params['field']) lr.make_light_ray( start_position=ds.arr([0., 0., 0.], 'unitary'), end_position=ds.arr([1., 1., 1.], 'unitary'), solution_filename='test_lightraysolution.txt', data_filename='test_lightray.h5', fields=fields) sp = AbsorptionSpectrum(900.0, 1400.0, 50000) for s, params in species_dicts.items(): for i in range(params['numLines']): sp.add_line( s, params['field'], params['wavelength'][i], params['f'][i], params['Gamma'][i], params['mass'], label_threshold=1.e10) wavelength, flux = sp.make_spectrum( 'test_lightray.h5', output_file='test_spectrum.h5', line_list_file='test_lines.txt', use_peculiar_velocity=True)
true
true
1c49f0f3bf54c9438b56d4e5b82e5dc16b9dd8e7
4,982
py
Python
tests/unit/test_models/test_full_battery_models/test_lithium_ion/test_dfn.py
NunoEdgarGFlowHub/PyBaMM
4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190
[ "BSD-3-Clause" ]
null
null
null
tests/unit/test_models/test_full_battery_models/test_lithium_ion/test_dfn.py
NunoEdgarGFlowHub/PyBaMM
4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190
[ "BSD-3-Clause" ]
null
null
null
tests/unit/test_models/test_full_battery_models/test_lithium_ion/test_dfn.py
NunoEdgarGFlowHub/PyBaMM
4e4e1ab8c488b0c0a6efdb9934c5ac59e947a190
[ "BSD-3-Clause" ]
null
null
null
# # Tests for the lithium-ion DFN model # import pybamm import unittest class TestDFN(unittest.TestCase): def test_well_posed(self): options = {"thermal": "isothermal"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_2plus1D(self): options = {"current collector": "potential pair", "dimensionality": 1} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() options = {"current collector": "potential pair", "dimensionality": 2} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() options = {"bc_options": {"dimensionality": 5}} with self.assertRaises(pybamm.OptionError): model = pybamm.lithium_ion.DFN(options) def test_lumped_thermal_model_1D(self): options = {"thermal": "x-lumped"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_x_full_thermal_model(self): options = {"thermal": "x-full"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_x_full_Nplus1D_not_implemented(self): # 1plus1D options = { "current collector": "potential pair", "dimensionality": 1, "thermal": "x-full", } with self.assertRaises(NotImplementedError): pybamm.lithium_ion.DFN(options) # 2plus1D options = { "current collector": "potential pair", "dimensionality": 2, "thermal": "x-full", } with self.assertRaises(NotImplementedError): pybamm.lithium_ion.DFN(options) def test_lumped_thermal_1plus1D(self): options = { "current collector": "potential pair", "dimensionality": 1, "thermal": "lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_lumped_thermal_2plus1D(self): options = { "current collector": "potential pair", "dimensionality": 2, "thermal": "lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_thermal_1plus1D(self): options = { "current collector": "potential pair", "dimensionality": 1, "thermal": "x-lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_thermal_2plus1D(self): options = { "current collector": "potential pair", "dimensionality": 2, "thermal": "x-lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_particle_fast_diffusion(self): options = {"particle": "fast diffusion"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_surface_form_differential(self): options = {"surface form": "differential"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_surface_form_algebraic(self): options = {"surface form": "algebraic"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() class TestDFNWithSEI(unittest.TestCase): def test_well_posed_constant(self): options = {"sei": "constant"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_reaction_limited(self): options = {"sei": "reaction limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_reaction_limited_average_film_resistance(self): options = {"sei": "reaction limited", "sei film resistance": "average"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_solvent_diffusion_limited(self): options = {"sei": "solvent-diffusion limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_electron_migration_limited(self): options = {"sei": "electron-migration limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_interstitial_diffusion_limited(self): options = {"sei": "interstitial-diffusion limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_ec_reaction_limited(self): options = {"sei": "ec reaction limited", "sei porosity change": True} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() if __name__ == "__main__": print("Add -v for more debug output") import sys if "-v" in sys.argv: debug = True pybamm.settings.debug_mode = True unittest.main()
32.776316
79
0.633681
import pybamm import unittest class TestDFN(unittest.TestCase): def test_well_posed(self): options = {"thermal": "isothermal"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_2plus1D(self): options = {"current collector": "potential pair", "dimensionality": 1} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() options = {"current collector": "potential pair", "dimensionality": 2} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() options = {"bc_options": {"dimensionality": 5}} with self.assertRaises(pybamm.OptionError): model = pybamm.lithium_ion.DFN(options) def test_lumped_thermal_model_1D(self): options = {"thermal": "x-lumped"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_x_full_thermal_model(self): options = {"thermal": "x-full"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_x_full_Nplus1D_not_implemented(self): options = { "current collector": "potential pair", "dimensionality": 1, "thermal": "x-full", } with self.assertRaises(NotImplementedError): pybamm.lithium_ion.DFN(options) options = { "current collector": "potential pair", "dimensionality": 2, "thermal": "x-full", } with self.assertRaises(NotImplementedError): pybamm.lithium_ion.DFN(options) def test_lumped_thermal_1plus1D(self): options = { "current collector": "potential pair", "dimensionality": 1, "thermal": "lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_lumped_thermal_2plus1D(self): options = { "current collector": "potential pair", "dimensionality": 2, "thermal": "lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_thermal_1plus1D(self): options = { "current collector": "potential pair", "dimensionality": 1, "thermal": "x-lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_thermal_2plus1D(self): options = { "current collector": "potential pair", "dimensionality": 2, "thermal": "x-lumped", } model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_particle_fast_diffusion(self): options = {"particle": "fast diffusion"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_surface_form_differential(self): options = {"surface form": "differential"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_surface_form_algebraic(self): options = {"surface form": "algebraic"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() class TestDFNWithSEI(unittest.TestCase): def test_well_posed_constant(self): options = {"sei": "constant"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_reaction_limited(self): options = {"sei": "reaction limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_reaction_limited_average_film_resistance(self): options = {"sei": "reaction limited", "sei film resistance": "average"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_solvent_diffusion_limited(self): options = {"sei": "solvent-diffusion limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_electron_migration_limited(self): options = {"sei": "electron-migration limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_interstitial_diffusion_limited(self): options = {"sei": "interstitial-diffusion limited"} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() def test_well_posed_ec_reaction_limited(self): options = {"sei": "ec reaction limited", "sei porosity change": True} model = pybamm.lithium_ion.DFN(options) model.check_well_posedness() if __name__ == "__main__": print("Add -v for more debug output") import sys if "-v" in sys.argv: debug = True pybamm.settings.debug_mode = True unittest.main()
true
true
1c49f156253c1ee5a0762a2795736557c9bddbfb
3,641
py
Python
engine/account/forms.py
NamoxLabs/BlogEngine
741549e78b58bbc857e9dcecd88034de49d73304
[ "BSD-3-Clause" ]
1
2018-12-28T04:57:41.000Z
2018-12-28T04:57:41.000Z
engine/account/forms.py
NamoxLabs/BlogEngine
741549e78b58bbc857e9dcecd88034de49d73304
[ "BSD-3-Clause" ]
null
null
null
engine/account/forms.py
NamoxLabs/BlogEngine
741549e78b58bbc857e9dcecd88034de49d73304
[ "BSD-3-Clause" ]
2
2019-01-25T04:34:55.000Z
2020-04-11T09:01:24.000Z
#from captcha.fields import ReCaptchaField from django import forms from django.conf import settings from django.contrib.auth import forms as django_forms, update_session_auth_hash from django.utils.translation import pgettext, pgettext_lazy #from . import models(User) from . import models """ class FormWithReCaptcha(forms.BaseForm): def __new__(cls, *args, **kwargs): if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY: # insert a Google reCaptcha field inside the from # note: label is empty, the reCaptcha is self-explanatory making # the from simpler for the user. cls.base_fields['_captcha'] = ReCaptchaField(label='') return super(FormWithReCaptcha, cls).__new__(cls) """ class ChangePasswordForm(django_forms.PasswordChangeForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['new_password1'].user = self.user self.fields['old_password'].widget.attrs['placeholder'] = '' self.fields['new_password1'].widget.attrs['placeholder'] = '' del self.fields['new_password2'] def logout_on_password_change(request, user): if (update_session_auth_hash is not None and not settings.LOGOUT_ON_PASSWORD_CHANGE): update_session_auth_hash(request, user) #class LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha): class LoginForm(django_forms.AuthenticationForm): username = forms.EmailField( label=pgettext('Form field', 'Email'), max_length=75) def __init__(self, request=None, *args, **kwargs): super().__init__(request=request, *args, **kwargs) if request: email = request.GET.get('email') if email: self.fields['username'].initial = email #class SignupForm(forms.ModelForm, FormWithReCaptcha): class SignupForm(forms.ModelForm): password = forms.CharField( widget=forms.PasswordInput, label=pgettext('Password', 'Password')) email = forms.EmailField( label=pgettext('Email', 'Email'), error_messages={ 'unique': pgettext_lazy( 'Registration error', 'This email has already been registered.' )}) class Meta: model = models.User def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self._meta.model.USERNAME_FIELD in self.fields: self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update( {'autofocus': ''}) def save(self, request=None, commit=True): user = super().save(commit=False) password = self.cleaned_data['password'] user.set_password(password) if commit: user.save() return user #class PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha): class PasswordResetForm(django_forms.PasswordResetForm): """Allow resetting password. This subclass overrides sending emails to use templated email. """ def get_users(self, email): active_users = models.User.objects.filter(email__iexact=email, is_active=True) return active_users def send_mail( self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): # Passing the user object to the Celery task throws an # error "'User' is not JSON serializable". Since it's not used in our # template, we remove it from the context. del context['user'] #emails.send_password_reset_email.delay(context, to_email)
36.777778
86
0.675639
from django import forms from django.conf import settings from django.contrib.auth import forms as django_forms, update_session_auth_hash from django.utils.translation import pgettext, pgettext_lazy from . import models class ChangePasswordForm(django_forms.PasswordChangeForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['new_password1'].user = self.user self.fields['old_password'].widget.attrs['placeholder'] = '' self.fields['new_password1'].widget.attrs['placeholder'] = '' del self.fields['new_password2'] def logout_on_password_change(request, user): if (update_session_auth_hash is not None and not settings.LOGOUT_ON_PASSWORD_CHANGE): update_session_auth_hash(request, user) class LoginForm(django_forms.AuthenticationForm): username = forms.EmailField( label=pgettext('Form field', 'Email'), max_length=75) def __init__(self, request=None, *args, **kwargs): super().__init__(request=request, *args, **kwargs) if request: email = request.GET.get('email') if email: self.fields['username'].initial = email class SignupForm(forms.ModelForm): password = forms.CharField( widget=forms.PasswordInput, label=pgettext('Password', 'Password')) email = forms.EmailField( label=pgettext('Email', 'Email'), error_messages={ 'unique': pgettext_lazy( 'Registration error', 'This email has already been registered.' )}) class Meta: model = models.User def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self._meta.model.USERNAME_FIELD in self.fields: self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update( {'autofocus': ''}) def save(self, request=None, commit=True): user = super().save(commit=False) password = self.cleaned_data['password'] user.set_password(password) if commit: user.save() return user class PasswordResetForm(django_forms.PasswordResetForm): def get_users(self, email): active_users = models.User.objects.filter(email__iexact=email, is_active=True) return active_users def send_mail( self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): # template, we remove it from the context. del context['user'] #emails.send_password_reset_email.delay(context, to_email)
true
true
1c49f162a350f2ebbca239c540dffc96e43e2bae
2,574
py
Python
src/pages/gallery/awesome_panel_express_tests/test_markdown.py
jlstevens/awesome-panel
c67b0f4529a3ce6a8517648f49fef8358e2e2c8b
[ "Apache-2.0" ]
null
null
null
src/pages/gallery/awesome_panel_express_tests/test_markdown.py
jlstevens/awesome-panel
c67b0f4529a3ce6a8517648f49fef8358e2e2c8b
[ "Apache-2.0" ]
null
null
null
src/pages/gallery/awesome_panel_express_tests/test_markdown.py
jlstevens/awesome-panel
c67b0f4529a3ce6a8517648f49fef8358e2e2c8b
[ "Apache-2.0" ]
null
null
null
"""In this module we test the `Markdown` functionality of `awesome_panel.express` The `Markdown` functionality of Panel is limited as it does not support - One liners for using Markdown from files - Code blocks - Indented Markdown text as is often what is used in Editors like VS Code. Please note you need to run `Code.extend()` in order to add the CODE_HILITE CSS to the app. """ import pathlib import panel as pn import awesome_panel.express as pnx from awesome_panel.express.testing import TestApp TEST_MD_FILE = pathlib.Path(__file__).parent / "data" / "test.md" pnx.Code.extend() def test_markdown(): """We test that - A "Header is shown" - The background is blue - The sizing_mode is "stretch_width" by default. DOES NOT WORK CURRENTLY """ return TestApp( test_markdown, pnx.Markdown("# Header", name="basic", background="lightblue"), sizing_mode="stretch_width", background="lightgray", max_width=600, ) def test_markdown_from_file(): """We test that - A path to a markdown file can used directly in one line """ return TestApp( test_markdown_from_file, pnx.Markdown(path=TEST_MD_FILE, name="file", background="lightblue"), ) def test_markdown_indendation(): """We test the Markdown pane - can handle leading spaces, i.e. this line shows as a bullited list and not in mono-space """ return TestApp(test_markdown_indendation, sizing_mode="stretch_width",) def test_markdown_code_block(): """We test that - A code blocks are supported. Sort of. BUT THE INDENTATION IS CURRENTLY LOST! - Indented markdown test from editors is supported. The Panel Markdown does not support this. """ code_block = """ This is not indented ```python print("Hello Awesome Panel World") return TestApp( test_markdown_code_block, pnx.Markdown(code_block, name="code block", background="lightblue"), ``` This is indented``` """ return TestApp( test_markdown_code_block, pnx.Markdown(code_block, name="code block", background="lightblue"), ) def view() -> pn.Column: """Wraps all tests in a Column that can be included in the Gallery or served independently Returns: pn.Column -- An Column containing all the tests """ return pn.Column( pnx.Markdown(__doc__), test_markdown, test_markdown_from_file, test_markdown_indendation, test_markdown_code_block, ) if __name__.startswith("bk"): view().servable("test_markdown")
25.74
97
0.688423
import pathlib import panel as pn import awesome_panel.express as pnx from awesome_panel.express.testing import TestApp TEST_MD_FILE = pathlib.Path(__file__).parent / "data" / "test.md" pnx.Code.extend() def test_markdown(): return TestApp( test_markdown, pnx.Markdown("# Header", name="basic", background="lightblue"), sizing_mode="stretch_width", background="lightgray", max_width=600, ) def test_markdown_from_file(): return TestApp( test_markdown_from_file, pnx.Markdown(path=TEST_MD_FILE, name="file", background="lightblue"), ) def test_markdown_indendation(): return TestApp(test_markdown_indendation, sizing_mode="stretch_width",) def test_markdown_code_block(): code_block = """ This is not indented ```python print("Hello Awesome Panel World") return TestApp( test_markdown_code_block, pnx.Markdown(code_block, name="code block", background="lightblue"), ``` This is indented``` """ return TestApp( test_markdown_code_block, pnx.Markdown(code_block, name="code block", background="lightblue"), ) def view() -> pn.Column: return pn.Column( pnx.Markdown(__doc__), test_markdown, test_markdown_from_file, test_markdown_indendation, test_markdown_code_block, ) if __name__.startswith("bk"): view().servable("test_markdown")
true
true
1c49f2142861df2d045a482002a519a24cbcd848
2,022
py
Python
tests/nuodb_crypt_test.py
jgetto/nuodb-python
3a22260e801d8f9d9bd33f911a694e8caeba7282
[ "BSD-3-Clause" ]
null
null
null
tests/nuodb_crypt_test.py
jgetto/nuodb-python
3a22260e801d8f9d9bd33f911a694e8caeba7282
[ "BSD-3-Clause" ]
null
null
null
tests/nuodb_crypt_test.py
jgetto/nuodb-python
3a22260e801d8f9d9bd33f911a694e8caeba7282
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import pynuodb import unittest from nuodb_base import NuoBase class NuoDBBasicTest(unittest.TestCase): def test_toByteString(self): self.assertEqual(pynuodb.crypt.toSignedByteString(1), '01'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(127), '7F'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(254), '00FE'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(255), '00FF'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-1), 'FF'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-2), 'FE'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-256), 'FF00'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-258), 'FEFE'.decode('hex')) def test_fromByteString(self): self.assertEqual(pynuodb.crypt.fromSignedByteString('01'.decode('hex')), 1) self.assertEqual(pynuodb.crypt.fromSignedByteString('00FF'.decode('hex')), 255) self.assertEqual(pynuodb.crypt.fromSignedByteString('FF'.decode('hex')), -1) self.assertEqual(pynuodb.crypt.fromSignedByteString('FF01'.decode('hex')), -255) self.assertEqual(pynuodb.crypt.fromSignedByteString('FF00'.decode('hex')), -256) self.assertEqual(pynuodb.crypt.fromSignedByteString('FEFE'.decode('hex')), -258) def test_bothByteString(self): self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(1)), 1) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(0)), 0) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-1)), -1) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(256)), 256) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-256)), -256) if __name__ == '__main__': unittest.main()
54.648649
106
0.720574
import pynuodb import unittest from nuodb_base import NuoBase class NuoDBBasicTest(unittest.TestCase): def test_toByteString(self): self.assertEqual(pynuodb.crypt.toSignedByteString(1), '01'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(127), '7F'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(254), '00FE'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(255), '00FF'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-1), 'FF'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-2), 'FE'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-256), 'FF00'.decode('hex')) self.assertEqual(pynuodb.crypt.toSignedByteString(-258), 'FEFE'.decode('hex')) def test_fromByteString(self): self.assertEqual(pynuodb.crypt.fromSignedByteString('01'.decode('hex')), 1) self.assertEqual(pynuodb.crypt.fromSignedByteString('00FF'.decode('hex')), 255) self.assertEqual(pynuodb.crypt.fromSignedByteString('FF'.decode('hex')), -1) self.assertEqual(pynuodb.crypt.fromSignedByteString('FF01'.decode('hex')), -255) self.assertEqual(pynuodb.crypt.fromSignedByteString('FF00'.decode('hex')), -256) self.assertEqual(pynuodb.crypt.fromSignedByteString('FEFE'.decode('hex')), -258) def test_bothByteString(self): self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(1)), 1) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(0)), 0) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-1)), -1) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(256)), 256) self.assertEqual(pynuodb.crypt.fromSignedByteString(pynuodb.crypt.toSignedByteString(-256)), -256) if __name__ == '__main__': unittest.main()
true
true
1c49f2b7d071ce1ebf896fca28a137a5869de6d1
4,909
py
Python
DQMOffline/Configuration/python/DQMOfflineCosmics_SecondStep_cff.py
vjmastra/cmssw
de96df37dbaf3543daef67339179e074bde9e858
[ "Apache-2.0" ]
1
2019-02-06T13:19:54.000Z
2019-02-06T13:19:54.000Z
DQMOffline/Configuration/python/DQMOfflineCosmics_SecondStep_cff.py
dellaric/cmssw
cd7470dc554972076740dde7523f311c43f248d3
[ "Apache-2.0" ]
null
null
null
DQMOffline/Configuration/python/DQMOfflineCosmics_SecondStep_cff.py
dellaric/cmssw
cd7470dc554972076740dde7523f311c43f248d3
[ "Apache-2.0" ]
null
null
null
import FWCore.ParameterSet.Config as cms from DQMServices.Components.DQMMessageLoggerClient_cff import * from DQMServices.Components.DQMFastTimerServiceClient_cfi import * from DQMOffline.Ecal.ecal_dqm_client_offline_cosmic_cff import * from DQM.EcalPreshowerMonitorClient.es_dqm_client_offline_cosmic_cff import * from DQM.HcalTasks.OfflineHarvestingSequence_cosmic import * from DQM.SiStripMonitorClient.SiStripClientConfig_Tier0_Cosmic_cff import * from DQM.SiPixelCommon.SiPixelOfflineDQM_client_cff import * from DQM.DTMonitorClient.dtDQMOfflineClients_Cosmics_cff import * from DQM.RPCMonitorClient.RPCTier0Client_cff import * from DQM.CSCMonitorModule.csc_dqm_offlineclient_cosmics_cff import * from DQM.GEM.gem_dqm_offline_client_cosmics_cff import * from DQMServices.Components.DQMFEDIntegrityClient_cff import * DQMNone = cms.Sequence() DQMOfflineCosmics_SecondStepEcal = cms.Sequence( ecal_dqm_client_offline * es_dqm_client_offline ) DQMOfflineCosmics_SecondStepHcal = cms.Sequence( hcalOfflineHarvesting ) DQMOfflineCosmics_SecondStepTrackerStrip = cms.Sequence( SiStripCosmicDQMClient ) DQMOfflineCosmics_SecondStepTrackerPixel = cms.Sequence( PixelOfflineDQMClientNoDataCertification_cosmics ) DQMOfflineCosmics_SecondStepMuonDPG = cms.Sequence( dtClientsCosmics * rpcTier0Client * cscOfflineCosmicsClients ) from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG = DQMOfflineCosmics_SecondStepMuonDPG.copy() _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG += gemClientsCosmics run3_GEM.toReplaceWith(DQMOfflineCosmics_SecondStepMuonDPG, _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG) DQMOfflineCosmics_SecondStepFED = cms.Sequence( dqmFEDIntegrityClient ) DQMOfflineCosmics_SecondStep_PreDPG = cms.Sequence( DQMOfflineCosmics_SecondStepEcal * DQMOfflineCosmics_SecondStepHcal * DQMOfflineCosmics_SecondStepTrackerStrip * DQMOfflineCosmics_SecondStepTrackerPixel * DQMOfflineCosmics_SecondStepMuonDPG * DQMOfflineCosmics_SecondStepFED ) DQMOfflineCosmics_SecondStepDPG = cms.Sequence( DQMOfflineCosmics_SecondStep_PreDPG * DQMMessageLoggerClientSeq ) from DQM.TrackingMonitorClient.TrackingClientConfig_Tier0_Cosmic_cff import * from DQMOffline.Muon.muonQualityTests_cff import * from DQMOffline.EGamma.photonOfflineDQMClient_cff import * from DQMOffline.L1Trigger.L1TriggerDqmOffline_cff import * from DQMOffline.Trigger.DQMOffline_Trigger_Client_cff import * from DQMOffline.Trigger.DQMOffline_HLT_Client_cff import * from DQMOffline.JetMET.SusyPostProcessor_cff import * DQMOfflineCosmics_SecondStepTracking = cms.Sequence( TrackingCosmicDQMClient ) DQMOfflineCosmics_SecondStepMUO = cms.Sequence( cosmicMuonQualityTests ) DQMOfflineCosmics_SecondStepEGamma = cms.Sequence( photonOfflineDQMClient ) DQMOfflineCosmics_SecondStepL1T = cms.Sequence( l1TriggerDqmOfflineCosmicsClient ) DQMOfflineCosmics_SecondStepTrigger = cms.Sequence( triggerOfflineDQMClient * hltOfflineDQMClient ) DQMOfflineCosmics_SecondStepJetMET = cms.Sequence( SusyPostProcessorSequence ) DQMOfflineCosmics_SecondStep_PrePOG = cms.Sequence( DQMOfflineCosmics_SecondStepTracking * DQMOfflineCosmics_SecondStepMUO * DQMOfflineCosmics_SecondStepEGamma * DQMOfflineCosmics_SecondStepL1T * DQMOfflineCosmics_SecondStepJetMET ) DQMOfflineCosmics_SecondStep_PrePOG.remove(fsqClient) DQMOfflineCosmics_SecondStepPOG = cms.Sequence( DQMOfflineCosmics_SecondStep_PrePOG * DQMMessageLoggerClientSeq * dqmFastTimerServiceClient) DQMOfflineCosmics_SecondStep = cms.Sequence( DQMOfflineCosmics_SecondStep_PreDPG * DQMOfflineCosmics_SecondStep_PrePOG * DQMOfflineCosmics_SecondStepTrigger * DQMMessageLoggerClientSeq ) DQMOfflineCosmics_SecondStep_FakeHLT = cms.Sequence(DQMOfflineCosmics_SecondStep ) DQMOfflineCosmics_SecondStep_FakeHLT.remove( DQMOfflineCosmics_SecondStepTrigger )
52.223404
107
0.695457
import FWCore.ParameterSet.Config as cms from DQMServices.Components.DQMMessageLoggerClient_cff import * from DQMServices.Components.DQMFastTimerServiceClient_cfi import * from DQMOffline.Ecal.ecal_dqm_client_offline_cosmic_cff import * from DQM.EcalPreshowerMonitorClient.es_dqm_client_offline_cosmic_cff import * from DQM.HcalTasks.OfflineHarvestingSequence_cosmic import * from DQM.SiStripMonitorClient.SiStripClientConfig_Tier0_Cosmic_cff import * from DQM.SiPixelCommon.SiPixelOfflineDQM_client_cff import * from DQM.DTMonitorClient.dtDQMOfflineClients_Cosmics_cff import * from DQM.RPCMonitorClient.RPCTier0Client_cff import * from DQM.CSCMonitorModule.csc_dqm_offlineclient_cosmics_cff import * from DQM.GEM.gem_dqm_offline_client_cosmics_cff import * from DQMServices.Components.DQMFEDIntegrityClient_cff import * DQMNone = cms.Sequence() DQMOfflineCosmics_SecondStepEcal = cms.Sequence( ecal_dqm_client_offline * es_dqm_client_offline ) DQMOfflineCosmics_SecondStepHcal = cms.Sequence( hcalOfflineHarvesting ) DQMOfflineCosmics_SecondStepTrackerStrip = cms.Sequence( SiStripCosmicDQMClient ) DQMOfflineCosmics_SecondStepTrackerPixel = cms.Sequence( PixelOfflineDQMClientNoDataCertification_cosmics ) DQMOfflineCosmics_SecondStepMuonDPG = cms.Sequence( dtClientsCosmics * rpcTier0Client * cscOfflineCosmicsClients ) from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG = DQMOfflineCosmics_SecondStepMuonDPG.copy() _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG += gemClientsCosmics run3_GEM.toReplaceWith(DQMOfflineCosmics_SecondStepMuonDPG, _run3_GEM_DQMOfflineCosmics_SecondStepMuonDPG) DQMOfflineCosmics_SecondStepFED = cms.Sequence( dqmFEDIntegrityClient ) DQMOfflineCosmics_SecondStep_PreDPG = cms.Sequence( DQMOfflineCosmics_SecondStepEcal * DQMOfflineCosmics_SecondStepHcal * DQMOfflineCosmics_SecondStepTrackerStrip * DQMOfflineCosmics_SecondStepTrackerPixel * DQMOfflineCosmics_SecondStepMuonDPG * DQMOfflineCosmics_SecondStepFED ) DQMOfflineCosmics_SecondStepDPG = cms.Sequence( DQMOfflineCosmics_SecondStep_PreDPG * DQMMessageLoggerClientSeq ) from DQM.TrackingMonitorClient.TrackingClientConfig_Tier0_Cosmic_cff import * from DQMOffline.Muon.muonQualityTests_cff import * from DQMOffline.EGamma.photonOfflineDQMClient_cff import * from DQMOffline.L1Trigger.L1TriggerDqmOffline_cff import * from DQMOffline.Trigger.DQMOffline_Trigger_Client_cff import * from DQMOffline.Trigger.DQMOffline_HLT_Client_cff import * from DQMOffline.JetMET.SusyPostProcessor_cff import * DQMOfflineCosmics_SecondStepTracking = cms.Sequence( TrackingCosmicDQMClient ) DQMOfflineCosmics_SecondStepMUO = cms.Sequence( cosmicMuonQualityTests ) DQMOfflineCosmics_SecondStepEGamma = cms.Sequence( photonOfflineDQMClient ) DQMOfflineCosmics_SecondStepL1T = cms.Sequence( l1TriggerDqmOfflineCosmicsClient ) DQMOfflineCosmics_SecondStepTrigger = cms.Sequence( triggerOfflineDQMClient * hltOfflineDQMClient ) DQMOfflineCosmics_SecondStepJetMET = cms.Sequence( SusyPostProcessorSequence ) DQMOfflineCosmics_SecondStep_PrePOG = cms.Sequence( DQMOfflineCosmics_SecondStepTracking * DQMOfflineCosmics_SecondStepMUO * DQMOfflineCosmics_SecondStepEGamma * DQMOfflineCosmics_SecondStepL1T * DQMOfflineCosmics_SecondStepJetMET ) DQMOfflineCosmics_SecondStep_PrePOG.remove(fsqClient) DQMOfflineCosmics_SecondStepPOG = cms.Sequence( DQMOfflineCosmics_SecondStep_PrePOG * DQMMessageLoggerClientSeq * dqmFastTimerServiceClient) DQMOfflineCosmics_SecondStep = cms.Sequence( DQMOfflineCosmics_SecondStep_PreDPG * DQMOfflineCosmics_SecondStep_PrePOG * DQMOfflineCosmics_SecondStepTrigger * DQMMessageLoggerClientSeq ) DQMOfflineCosmics_SecondStep_FakeHLT = cms.Sequence(DQMOfflineCosmics_SecondStep ) DQMOfflineCosmics_SecondStep_FakeHLT.remove( DQMOfflineCosmics_SecondStepTrigger )
true
true
1c49f324acb2a047c20500c33a13ef6f0f53f559
73
py
Python
pvpc/__init__.py
David-Lor/python-pvpc
a5aac6f32a6eaf464ee374fd7da32a79fbbd18ba
[ "ISC" ]
null
null
null
pvpc/__init__.py
David-Lor/python-pvpc
a5aac6f32a6eaf464ee374fd7da32a79fbbd18ba
[ "ISC" ]
null
null
null
pvpc/__init__.py
David-Lor/python-pvpc
a5aac6f32a6eaf464ee374fd7da32a79fbbd18ba
[ "ISC" ]
null
null
null
from .models import * from .requester import * from .exceptions import *
18.25
25
0.753425
from .models import * from .requester import * from .exceptions import *
true
true
1c49f39ec3628b8aaf020ff4bb77d86834de746f
1,407
py
Python
kicad_cicd/plotter.py
sillevl/pcbops_template
68107607412245df168acdab978447ab82da33f7
[ "MIT" ]
10
2019-04-30T22:14:20.000Z
2021-02-24T13:51:57.000Z
kicad_cicd/plotter.py
sillevl/pcbops_template
68107607412245df168acdab978447ab82da33f7
[ "MIT" ]
2
2019-05-09T13:59:39.000Z
2019-09-25T14:07:25.000Z
kicad_cicd/plotter.py
sillevl/pcbops_template
68107607412245df168acdab978447ab82da33f7
[ "MIT" ]
3
2019-04-29T10:01:48.000Z
2020-06-04T10:14:26.000Z
import sys import os import pcbnew from pcbnew import * file_name = os.path.abspath(sys.argv[1]) output_dir = os.path.abspath(sys.argv[2]) print("Running KiCAD Plotter CI/CD Script on %s output to %s"%(file_name, output_dir,)) try: os.makedirs(output_dir) except OSError: pass board = pcbnew.LoadBoard(file_name) pctl = pcbnew.PLOT_CONTROLLER(board) popt = pctl.GetPlotOptions() popt.SetOutputDirectory(output_dir) popt.SetPlotFrameRef(False) popt.SetLineWidth(pcbnew.FromMM(0.1)) popt.SetAutoScale(False) popt.SetScale(1) popt.SetMirror(False) popt.SetUseGerberAttributes(True) popt.SetUseGerberProtelExtensions(True) popt.SetExcludeEdgeLayer(True) popt.SetUseAuxOrigin(False) pctl.SetColorMode(True) popt.SetSubtractMaskFromSilk(False) popt.SetPlotReference(True) popt.SetPlotValue(False) layers = [ ("F.Cu", pcbnew.F_Cu, "Top layer"), ("B.Cu", pcbnew.B_Cu, "Bottom layer"), ("F.Paste", pcbnew.F_Paste, "Paste top"), ("B.Paste", pcbnew.B_Paste, "Paste bottom"), ("F.SilkS", pcbnew.F_SilkS, "Silk top"), ("B.SilkS", pcbnew.B_SilkS, "Silk top"), ("F.Mask", pcbnew.F_Mask, "Mask top"), ("B.Mask", pcbnew.B_Mask, "Mask bottom"), ("Edge.Cuts", pcbnew.Edge_Cuts, "Edges"), ] for layer_info in layers: pctl.SetLayer(layer_info[1]) pctl.OpenPlotfile(layer_info[0], pcbnew.PLOT_FORMAT_GERBER, layer_info[2]) pctl.PlotLayer() pctl.ClosePlot()
24.684211
87
0.721393
import sys import os import pcbnew from pcbnew import * file_name = os.path.abspath(sys.argv[1]) output_dir = os.path.abspath(sys.argv[2]) print("Running KiCAD Plotter CI/CD Script on %s output to %s"%(file_name, output_dir,)) try: os.makedirs(output_dir) except OSError: pass board = pcbnew.LoadBoard(file_name) pctl = pcbnew.PLOT_CONTROLLER(board) popt = pctl.GetPlotOptions() popt.SetOutputDirectory(output_dir) popt.SetPlotFrameRef(False) popt.SetLineWidth(pcbnew.FromMM(0.1)) popt.SetAutoScale(False) popt.SetScale(1) popt.SetMirror(False) popt.SetUseGerberAttributes(True) popt.SetUseGerberProtelExtensions(True) popt.SetExcludeEdgeLayer(True) popt.SetUseAuxOrigin(False) pctl.SetColorMode(True) popt.SetSubtractMaskFromSilk(False) popt.SetPlotReference(True) popt.SetPlotValue(False) layers = [ ("F.Cu", pcbnew.F_Cu, "Top layer"), ("B.Cu", pcbnew.B_Cu, "Bottom layer"), ("F.Paste", pcbnew.F_Paste, "Paste top"), ("B.Paste", pcbnew.B_Paste, "Paste bottom"), ("F.SilkS", pcbnew.F_SilkS, "Silk top"), ("B.SilkS", pcbnew.B_SilkS, "Silk top"), ("F.Mask", pcbnew.F_Mask, "Mask top"), ("B.Mask", pcbnew.B_Mask, "Mask bottom"), ("Edge.Cuts", pcbnew.Edge_Cuts, "Edges"), ] for layer_info in layers: pctl.SetLayer(layer_info[1]) pctl.OpenPlotfile(layer_info[0], pcbnew.PLOT_FORMAT_GERBER, layer_info[2]) pctl.PlotLayer() pctl.ClosePlot()
true
true
1c49f3bd8d21302a83182466fe1ef519c82625f3
49,318
py
Python
python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/run_storage.py
cvb/dagster
6c735708790febe79ffe727225a4445c033ab79d
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/run_storage.py
cvb/dagster
6c735708790febe79ffe727225a4445c033ab79d
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/run_storage.py
cvb/dagster
6c735708790febe79ffe727225a4445c033ab79d
[ "Apache-2.0" ]
null
null
null
import sys import tempfile from datetime import datetime import pendulum import pytest from dagster import job, op, seven from dagster.core.definitions import PipelineDefinition from dagster.core.errors import ( DagsterRunAlreadyExists, DagsterRunNotFoundError, DagsterSnapshotDoesNotExist, ) from dagster.core.events import DagsterEvent, DagsterEventType from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill from dagster.core.host_representation import ( ExternalRepositoryOrigin, ManagedGrpcPythonEnvRepositoryLocationOrigin, ) from dagster.core.instance import DagsterInstance, InstanceType from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher from dagster.core.run_coordinator import DefaultRunCoordinator from dagster.core.snap import create_pipeline_snapshot_id from dagster.core.storage.event_log import InMemoryEventLogStorage from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager from dagster.core.storage.pipeline_run import ( DagsterRun, JobBucket, PipelineRunStatus, PipelineRunsFilter, TagBucket, ) from dagster.core.storage.root import LocalArtifactStorage from dagster.core.storage.runs.migration import REQUIRED_DATA_MIGRATIONS from dagster.core.storage.runs.sql_run_storage import SqlRunStorage from dagster.core.storage.tags import PARENT_RUN_ID_TAG, ROOT_RUN_ID_TAG from dagster.core.types.loadable_target_origin import LoadableTargetOrigin from dagster.core.utils import make_new_run_id from dagster.daemon.daemon import SensorDaemon from dagster.daemon.types import DaemonHeartbeat from dagster.serdes import serialize_pp from dagster.seven.compat.pendulum import create_pendulum_time, to_timezone win_py36 = seven.IS_WINDOWS and sys.version_info[0] == 3 and sys.version_info[1] == 6 class TestRunStorage: """ You can extend this class to easily run these set of tests on any run storage. When extending, you simply need to override the `run_storage` fixture and return your implementation of `RunStorage`. For example: ``` class TestMyStorageImplementation(TestRunStorage): __test__ = True @pytest.fixture(scope='function', name='storage') def run_storage(self): # pylint: disable=arguments-differ return MyStorageImplementation() ``` """ __test__ = False @pytest.fixture(name="storage", params=[]) def run_storage(self, request): with request.param() as s: yield s # Override for storages that are not allowed to delete runs def can_delete_runs(self): return True @staticmethod def fake_repo_target(): return ExternalRepositoryOrigin( ManagedGrpcPythonEnvRepositoryLocationOrigin( LoadableTargetOrigin( executable_path=sys.executable, module_name="fake", attribute="fake" ), ), "fake_repo_name", ) @classmethod def fake_partition_set_origin(cls, partition_set_name): return cls.fake_repo_target().get_partition_set_origin(partition_set_name) @staticmethod def build_run( run_id, pipeline_name, mode="default", tags=None, status=PipelineRunStatus.NOT_STARTED, parent_run_id=None, root_run_id=None, pipeline_snapshot_id=None, ): return DagsterRun( pipeline_name=pipeline_name, run_id=run_id, run_config=None, mode=mode, tags=tags, status=status, root_run_id=root_run_id, parent_run_id=parent_run_id, pipeline_snapshot_id=pipeline_snapshot_id, ) def test_basic_storage(self, storage): assert storage run_id = make_new_run_id() added = storage.add_run( TestRunStorage.build_run( run_id=run_id, pipeline_name="some_pipeline", tags={"foo": "bar"} ) ) assert added runs = storage.get_runs() assert len(runs) == 1 run = runs[0] assert run.run_id == run_id assert run.pipeline_name == "some_pipeline" assert run.tags assert run.tags.get("foo") == "bar" assert storage.has_run(run_id) fetched_run = storage.get_run_by_id(run_id) assert fetched_run.run_id == run_id assert fetched_run.pipeline_name == "some_pipeline" def test_clear(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete") assert storage run_id = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 1 storage.wipe() assert list(storage.get_runs()) == [] def test_storage_telemetry(self, storage): assert storage storage_id = storage.get_run_storage_id() assert isinstance(storage_id, str) storage_id_again = storage.get_run_storage_id() assert storage_id == storage_id_again def test_fetch_by_pipeline(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="some_pipeline")) storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="some_other_pipeline")) assert len(storage.get_runs()) == 2 some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline")) assert len(some_runs) == 1 assert some_runs[0].run_id == one def test_fetch_by_snapshot_id(self, storage): assert storage pipeline_def_a = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_def_b = PipelineDefinition(name="some_other_pipeline", solid_defs=[]) pipeline_snapshot_a = pipeline_def_a.get_pipeline_snapshot() pipeline_snapshot_b = pipeline_def_b.get_pipeline_snapshot() pipeline_snapshot_a_id = create_pipeline_snapshot_id(pipeline_snapshot_a) pipeline_snapshot_b_id = create_pipeline_snapshot_id(pipeline_snapshot_b) assert storage.add_pipeline_snapshot(pipeline_snapshot_a) == pipeline_snapshot_a_id assert storage.add_pipeline_snapshot(pipeline_snapshot_b) == pipeline_snapshot_b_id one = make_new_run_id() two = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", pipeline_snapshot_id=pipeline_snapshot_a_id, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_other_pipeline", pipeline_snapshot_id=pipeline_snapshot_b_id, ) ) assert len(storage.get_runs()) == 2 runs_a = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_a_id)) assert len(runs_a) == 1 assert runs_a[0].run_id == one runs_b = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_b_id)) assert len(runs_b) == 1 assert runs_b[0].run_id == two def test_add_run_tags(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="foo")) storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="bar")) assert storage.get_run_tags() == [] storage.add_run_tags(one, {"tag1": "val1", "tag2": "val2"}) storage.add_run_tags(two, {"tag1": "val1"}) assert storage.get_run_tags() == [("tag1", {"val1"}), ("tag2", {"val2"})] # Adding both existing tags and a new tag storage.add_run_tags(one, {"tag1": "val2", "tag3": "val3"}) test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 3 assert test_run.tags["tag1"] == "val2" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert storage.get_run_tags() == [ ("tag1", {"val1", "val2"}), ("tag2", {"val2"}), ("tag3", {"val3"}), ] # Adding only existing tags storage.add_run_tags(one, {"tag1": "val3"}) test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 3 assert test_run.tags["tag1"] == "val3" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert storage.get_run_tags() == [ ("tag1", {"val1", "val3"}), ("tag2", {"val2"}), ("tag3", {"val3"}), ] # Adding only a new tag that wasn't there before storage.add_run_tags(one, {"tag4": "val4"}) test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 4 assert test_run.tags["tag1"] == "val3" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert test_run.tags["tag4"] == "val4" assert storage.get_run_tags() == [ ("tag1", {"val1", "val3"}), ("tag2", {"val2"}), ("tag3", {"val3"}), ("tag4", {"val4"}), ] test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 4 assert test_run.tags["tag1"] == "val3" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert test_run.tags["tag4"] == "val4" some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag3": "val3"})) assert len(some_runs) == 1 assert some_runs[0].run_id == one runs_with_old_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val1"})) assert len(runs_with_old_tag) == 1 assert runs_with_old_tag[0].tags == {"tag1": "val1"} runs_with_new_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val3"})) assert len(runs_with_new_tag) == 1 assert runs_with_new_tag[0].tags == { "tag1": "val3", "tag2": "val2", "tag3": "val3", "tag4": "val4", } def test_fetch_by_filter(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"tag": "hello", "tag2": "world"}, status=PipelineRunStatus.SUCCESS, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"tag": "hello"}, status=PipelineRunStatus.FAILURE, ), ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="other_pipeline", status=PipelineRunStatus.SUCCESS ) ) assert len(storage.get_runs()) == 3 some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one])) count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one])) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline")) count = storage.get_runs_count(PipelineRunsFilter(pipeline_name="some_pipeline")) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS])) count = storage.get_runs_count(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS])) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == three assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello"})) count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello"})) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"})) count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"})) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs( PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"}) ) count = storage.get_runs_count( PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"}) ) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs( PipelineRunsFilter( pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) count = storage.get_runs_count( PipelineRunsFilter( pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one # All filters some_runs = storage.get_runs( PipelineRunsFilter( run_ids=[one], pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) count = storage.get_runs_count( PipelineRunsFilter( run_ids=[one], pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs(PipelineRunsFilter()) count = storage.get_runs_count(PipelineRunsFilter()) assert len(some_runs) == 3 assert count == 3 def test_fetch_count_by_tag(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello", "mytag2": "world"}, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"mytag": "goodbye", "mytag2": "world"}, ) ) storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 3 run_count = storage.get_runs_count( filters=PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"}) ) assert run_count == 1 run_count = storage.get_runs_count(filters=PipelineRunsFilter(tags={"mytag2": "world"})) assert run_count == 2 run_count = storage.get_runs_count() assert run_count == 3 assert storage.get_run_tags() == [("mytag", {"hello", "goodbye"}), ("mytag2", {"world"})] def test_fetch_by_tags(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello", "mytag2": "world"}, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"mytag": "goodbye", "mytag2": "world"}, ) ) storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 3 some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"})) assert len(some_runs) == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag2": "world"})) assert len(some_runs) == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={})) assert len(some_runs) == 3 def test_paginated_fetch(self, storage): assert storage one, two, three = [make_new_run_id(), make_new_run_id(), make_new_run_id()] storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello"} ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"mytag": "hello"} ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", tags={"mytag": "hello"} ) ) all_runs = storage.get_runs() assert len(all_runs) == 3 sliced_runs = storage.get_runs(cursor=three, limit=1) assert len(sliced_runs) == 1 assert sliced_runs[0].run_id == two all_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline")) assert len(all_runs) == 3 sliced_runs = storage.get_runs( PipelineRunsFilter(pipeline_name="some_pipeline"), cursor=three, limit=1 ) assert len(sliced_runs) == 1 assert sliced_runs[0].run_id == two all_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello"})) assert len(all_runs) == 3 sliced_runs = storage.get_runs( PipelineRunsFilter(tags={"mytag": "hello"}), cursor=three, limit=1 ) assert len(sliced_runs) == 1 assert sliced_runs[0].run_id == two def test_fetch_by_status(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() four = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE ) ) assert { run.run_id for run in storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.NOT_STARTED]) ) } == {one} assert { run.run_id for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED])) } == { two, three, } assert { run.run_id for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE])) } == {four} assert { run.run_id for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS])) } == set() def test_fetch_records_by_update_timestamp(self, storage): assert storage self._skip_in_memory(storage) one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.handle_run_event( three, # three succeeds DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="some_pipeline", ), ) storage.handle_run_event( one, # fail one after two has fails and three has succeeded DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_FAILURE.value, pipeline_name="some_pipeline", ), ) record_two = storage.get_run_records( filters=PipelineRunsFilter(run_ids=[two], updated_after=datetime(2020, 1, 1)) )[0] run_two_update_timestamp = record_two.update_timestamp assert [ record.pipeline_run.run_id for record in storage.get_run_records( filters=PipelineRunsFilter(updated_after=run_two_update_timestamp), order_by="update_timestamp", ascending=True, ) ] == [three, one] assert [ record.pipeline_run.run_id for record in storage.get_run_records( filters=PipelineRunsFilter( statuses=[PipelineRunStatus.FAILURE], updated_after=run_two_update_timestamp ), ) ] == [one] def test_fetch_by_status_cursored(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() four = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) cursor_four_runs = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four ) assert len(cursor_four_runs) == 2 assert {run.run_id for run in cursor_four_runs} == {one, two} cursor_two_runs = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=two ) assert len(cursor_two_runs) == 1 assert {run.run_id for run in cursor_two_runs} == {one} cursor_one_runs = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=one ) assert not cursor_one_runs cursor_four_limit_one = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four, limit=1 ) assert len(cursor_four_limit_one) == 1 assert cursor_four_limit_one[0].run_id == two def test_delete(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete runs") assert storage run_id = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 1 storage.delete_run(run_id) assert list(storage.get_runs()) == [] def test_delete_with_tags(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete runs") assert storage run_id = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=run_id, pipeline_name="some_pipeline", tags={run_id: run_id}, ) ) assert len(storage.get_runs()) == 1 assert run_id in [key for key, value in storage.get_run_tags()] storage.delete_run(run_id) assert list(storage.get_runs()) == [] assert run_id not in [key for key, value in storage.get_run_tags()] def test_wipe_tags(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete") run_id = "some_run_id" run = DagsterRun(run_id=run_id, pipeline_name="a_pipeline", tags={"foo": "bar"}) storage.add_run(run) assert storage.get_run_by_id(run_id) == run assert dict(storage.get_run_tags()) == {"foo": {"bar"}} storage.wipe() assert list(storage.get_runs()) == [] assert dict(storage.get_run_tags()) == {} def test_write_conflicting_run_id(self, storage): double_run_id = "double_run_id" pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) run = DagsterRun(run_id=double_run_id, pipeline_name=pipeline_def.name) assert storage.add_run(run) with pytest.raises(DagsterRunAlreadyExists): storage.add_run(run) def test_add_get_snapshot(self, storage): pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_snapshot = pipeline_def.get_pipeline_snapshot() pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot) assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id fetched_pipeline_snapshot = storage.get_pipeline_snapshot(pipeline_snapshot_id) assert fetched_pipeline_snapshot assert serialize_pp(fetched_pipeline_snapshot) == serialize_pp(pipeline_snapshot) assert storage.has_pipeline_snapshot(pipeline_snapshot_id) assert not storage.has_pipeline_snapshot("nope") if self.can_delete_runs(): storage.wipe() assert not storage.has_pipeline_snapshot(pipeline_snapshot_id) def test_single_write_read_with_snapshot(self, storage): run_with_snapshot_id = "lkasjdflkjasdf" pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_snapshot = pipeline_def.get_pipeline_snapshot() pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot) run_with_snapshot = DagsterRun( run_id=run_with_snapshot_id, pipeline_name=pipeline_def.name, pipeline_snapshot_id=pipeline_snapshot_id, ) assert not storage.has_pipeline_snapshot(pipeline_snapshot_id) assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id assert serialize_pp(storage.get_pipeline_snapshot(pipeline_snapshot_id)) == serialize_pp( pipeline_snapshot ) storage.add_run(run_with_snapshot) assert storage.get_run_by_id(run_with_snapshot_id) == run_with_snapshot if self.can_delete_runs(): storage.wipe() assert not storage.has_pipeline_snapshot(pipeline_snapshot_id) assert not storage.has_run(run_with_snapshot_id) def test_single_write_with_missing_snapshot(self, storage): run_with_snapshot_id = "lkasjdflkjasdf" pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) run_with_missing_snapshot = DagsterRun( run_id=run_with_snapshot_id, pipeline_name=pipeline_def.name, pipeline_snapshot_id="nope", ) with pytest.raises(DagsterSnapshotDoesNotExist): storage.add_run(run_with_missing_snapshot) def test_add_get_execution_snapshot(self, storage): from dagster.core.execution.api import create_execution_plan from dagster.core.snap import snapshot_from_execution_plan pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) execution_plan = create_execution_plan(pipeline_def) ep_snapshot = snapshot_from_execution_plan( execution_plan, pipeline_def.get_pipeline_snapshot_id() ) snapshot_id = storage.add_execution_plan_snapshot(ep_snapshot) fetched_ep_snapshot = storage.get_execution_plan_snapshot(snapshot_id) assert fetched_ep_snapshot assert serialize_pp(fetched_ep_snapshot) == serialize_pp(ep_snapshot) assert storage.has_execution_plan_snapshot(snapshot_id) assert not storage.has_execution_plan_snapshot("nope") if self.can_delete_runs(): storage.wipe() assert not storage.has_execution_plan_snapshot(snapshot_id) def test_fetch_run_filter(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.SUCCESS, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.SUCCESS, ), ) assert len(storage.get_runs()) == 2 some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one, two])) count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one, two])) assert len(some_runs) == 2 assert count == 2 def test_fetch_run_group(self, storage): assert storage root_run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") runs = [root_run] # Create 3 children and 3 descendants of the rightmost child: # root # / | \ # [0] [1] [2] # | # [a] # | # [b] # | # [c] for _ in range(3): runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", root_run_id=root_run.run_id, parent_run_id=root_run.run_id, tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id}, ) ) for _ in range(3): # get root run id from the previous run if exists, otherwise use previous run's id root_run_id = runs[-1].root_run_id if runs[-1].root_run_id else runs[-1].run_id parent_run_id = runs[-1].run_id runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", root_run_id=root_run_id, parent_run_id=parent_run_id, tags={PARENT_RUN_ID_TAG: parent_run_id, ROOT_RUN_ID_TAG: root_run_id}, ) ) for run in runs: storage.add_run(run) run_group_one = storage.get_run_group(root_run.run_id) assert len(run_group_one[1]) == 7 run_group_two = storage.get_run_group(runs[-1].run_id) assert len(run_group_two[1]) == 7 assert run_group_one[0] == run_group_two[0] assert run_group_one[1] == run_group_two[1] def test_fetch_run_group_not_found(self, storage): assert storage run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") storage.add_run(run) with pytest.raises(DagsterRunNotFoundError): storage.get_run_group(make_new_run_id()) def test_fetch_run_groups(self, storage): assert storage root_runs = [ TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") for i in range(3) ] runs = [run for run in root_runs] for _ in range(5): for root_run in root_runs: runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id}, ) ) for run in runs: storage.add_run(run) run_groups = storage.get_run_groups(limit=5) assert len(run_groups) == 3 expected_group_lens = { root_runs[i].run_id: expected_len for i, expected_len in enumerate([2, 3, 3]) } for root_run_id in run_groups: assert len(run_groups[root_run_id]["runs"]) == expected_group_lens[root_run_id] assert run_groups[root_run_id]["count"] == 6 def test_fetch_run_groups_filter(self, storage): assert storage root_runs = [ TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") for i in range(3) ] runs = [run for run in root_runs] for root_run in root_runs: failed_run_id = make_new_run_id() runs.append( TestRunStorage.build_run( run_id=failed_run_id, pipeline_name="foo_pipeline", tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id}, status=PipelineRunStatus.FAILURE, ) ) for _ in range(3): runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={PARENT_RUN_ID_TAG: failed_run_id, ROOT_RUN_ID_TAG: root_run.run_id}, ) ) for run in runs: storage.add_run(run) run_groups = storage.get_run_groups( limit=5, filters=PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE]) ) assert len(run_groups) == 3 for root_run_id in run_groups: assert len(run_groups[root_run_id]["runs"]) == 2 assert run_groups[root_run_id]["count"] == 5 def test_fetch_run_groups_ordering(self, storage): assert storage first_root_run = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline" ) storage.add_run(first_root_run) second_root_run = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline" ) storage.add_run(second_root_run) second_root_run_child = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={ PARENT_RUN_ID_TAG: second_root_run.run_id, ROOT_RUN_ID_TAG: second_root_run.run_id, }, ) storage.add_run(second_root_run_child) first_root_run_child = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={ PARENT_RUN_ID_TAG: first_root_run.run_id, ROOT_RUN_ID_TAG: first_root_run.run_id, }, ) storage.add_run(first_root_run_child) run_groups = storage.get_run_groups(limit=1) assert first_root_run.run_id in run_groups assert second_root_run.run_id not in run_groups def _skip_in_memory(self, storage): from dagster.core.storage.runs import InMemoryRunStorage if isinstance(storage, InMemoryRunStorage): pytest.skip() def test_empty_heartbeat(self, storage): self._skip_in_memory(storage) assert storage.get_daemon_heartbeats() == {} def test_add_heartbeat(self, storage): self._skip_in_memory(storage) # test insert added_heartbeat = DaemonHeartbeat( timestamp=pendulum.from_timestamp(1000).float_timestamp, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=[], ) storage.add_daemon_heartbeat(added_heartbeat) assert len(storage.get_daemon_heartbeats()) == 1 stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()] assert stored_heartbeat == added_heartbeat # test update second_added_heartbeat = DaemonHeartbeat( timestamp=pendulum.from_timestamp(2000).float_timestamp, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=[], ) storage.add_daemon_heartbeat(second_added_heartbeat) assert len(storage.get_daemon_heartbeats()) == 1 stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()] assert stored_heartbeat == second_added_heartbeat def test_wipe_heartbeats(self, storage): self._skip_in_memory(storage) if not self.can_delete_runs(): pytest.skip("storage cannot delete") added_heartbeat = DaemonHeartbeat( timestamp=pendulum.from_timestamp(1000).float_timestamp, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=[], ) storage.add_daemon_heartbeat(added_heartbeat) storage.wipe_daemon_heartbeats() def test_backfill(self, storage): origin = self.fake_partition_set_origin("fake_partition_set") backfills = storage.get_backfills() assert len(backfills) == 0 one = PartitionBackfill( "one", origin, BulkActionStatus.REQUESTED, ["a", "b", "c"], False, None, None, pendulum.now().timestamp(), ) storage.add_backfill(one) assert len(storage.get_backfills()) == 1 assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 1 backfill = storage.get_backfill(one.backfill_id) assert backfill == one storage.update_backfill(one.with_status(status=BulkActionStatus.COMPLETED)) assert len(storage.get_backfills()) == 1 assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 0 def test_secondary_index(self, storage): if not isinstance(storage, SqlRunStorage): return for name in REQUIRED_DATA_MIGRATIONS.keys(): assert storage.has_built_index(name) def test_handle_run_event_pipeline_success_test(self, storage): run_id = make_new_run_id() run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id) storage.add_run(run_to_add) dagster_pipeline_start_event = DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_START.value, pipeline_name="pipeline_name", step_key=None, solid_handle=None, step_kind_value=None, logging_tags=None, ) storage.handle_run_event(run_id, dagster_pipeline_start_event) assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED storage.handle_run_event( make_new_run_id(), # diff run DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="pipeline_name", step_key=None, solid_handle=None, step_kind_value=None, logging_tags=None, ), ) assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED storage.handle_run_event( run_id, # correct run DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="pipeline_name", step_key=None, solid_handle=None, step_kind_value=None, logging_tags=None, ), ) assert storage.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS def test_debug_snapshot_import(self, storage): from dagster.core.execution.api import create_execution_plan from dagster.core.snap import ( snapshot_from_execution_plan, create_execution_plan_snapshot_id, ) run_id = make_new_run_id() run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id) storage.add_run(run_to_add) pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_snapshot = pipeline_def.get_pipeline_snapshot() pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot) new_pipeline_snapshot_id = f"{pipeline_snapshot_id}-new-snapshot" storage.add_snapshot(pipeline_snapshot, snapshot_id=new_pipeline_snapshot_id) assert not storage.has_snapshot(pipeline_snapshot_id) assert storage.has_snapshot(new_pipeline_snapshot_id) execution_plan = create_execution_plan(pipeline_def) ep_snapshot = snapshot_from_execution_plan(execution_plan, new_pipeline_snapshot_id) ep_snapshot_id = create_execution_plan_snapshot_id(ep_snapshot) new_ep_snapshot_id = f"{ep_snapshot_id}-new-snapshot" storage.add_snapshot(ep_snapshot, snapshot_id=new_ep_snapshot_id) assert not storage.has_snapshot(ep_snapshot_id) assert storage.has_snapshot(new_ep_snapshot_id) def test_run_record_stats(self, storage): assert storage self._skip_in_memory(storage) run_id = make_new_run_id() run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id) storage.add_run(run_to_add) run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0] assert run_record.start_time is None assert run_record.end_time is None storage.handle_run_event( run_id, DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_START.value, pipeline_name="pipeline_name", ), ) run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0] assert run_record.start_time is not None assert run_record.end_time is None storage.handle_run_event( run_id, DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="pipeline_name", ), ) run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0] assert run_record.start_time is not None assert run_record.end_time is not None assert run_record.end_time >= run_record.start_time @pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36") def test_by_job(self, storage): def _add_run(job_name, tags=None): return storage.add_run( TestRunStorage.build_run( pipeline_name=job_name, run_id=make_new_run_id(), tags=tags ) ) _a_one = _add_run("a_pipeline", tags={"a": "A"}) a_two = _add_run("a_pipeline", tags={"a": "A"}) _b_one = _add_run("b_pipeline", tags={"a": "A"}) b_two = _add_run("b_pipeline", tags={"a": "A"}) c_one = _add_run("c_pipeline", tags={"a": "A"}) c_two = _add_run("c_pipeline", tags={"a": "B"}) runs_by_job = { run.pipeline_name: run for run in storage.get_runs( bucket_by=JobBucket( job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1 ) ) } assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"} assert runs_by_job.get("a_pipeline").run_id == a_two.run_id assert runs_by_job.get("b_pipeline").run_id == b_two.run_id assert runs_by_job.get("c_pipeline").run_id == c_two.run_id # fetch with a runs filter applied runs_by_job = { run.pipeline_name: run for run in storage.get_runs( filters=PipelineRunsFilter(tags={"a": "A"}), bucket_by=JobBucket( job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1 ), ) } assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"} assert runs_by_job.get("a_pipeline").run_id == a_two.run_id assert runs_by_job.get("b_pipeline").run_id == b_two.run_id assert runs_by_job.get("c_pipeline").run_id == c_one.run_id @pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36") def test_by_tag(self, storage): def _add_run(job_name, tags=None): return storage.add_run( TestRunStorage.build_run( pipeline_name=job_name, run_id=make_new_run_id(), tags=tags ) ) _one = _add_run("a", tags={"a": "1"}) _two = _add_run("a", tags={"a": "2"}) three = _add_run("a", tags={"a": "3"}) _none = _add_run("a") b = _add_run("b", tags={"a": "4"}) one = _add_run("a", tags={"a": "1"}) two = _add_run("a", tags={"a": "2"}) runs_by_tag = { run.tags.get("a"): run for run in storage.get_runs( bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1) ) } assert set(runs_by_tag.keys()) == {"1", "2", "3", "4"} assert runs_by_tag.get("1").run_id == one.run_id assert runs_by_tag.get("2").run_id == two.run_id assert runs_by_tag.get("3").run_id == three.run_id assert runs_by_tag.get("4").run_id == b.run_id runs_by_tag = { run.tags.get("a"): run for run in storage.get_runs( filters=PipelineRunsFilter(pipeline_name="a"), bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1), ) } assert set(runs_by_tag.keys()) == {"1", "2", "3"} assert runs_by_tag.get("1").run_id == one.run_id assert runs_by_tag.get("2").run_id == two.run_id assert runs_by_tag.get("3").run_id == three.run_id def test_run_record_timestamps(self, storage): assert storage self._skip_in_memory(storage) @op def a(): pass @job def my_job(): a() with tempfile.TemporaryDirectory() as temp_dir: if storage._instance: # pylint: disable=protected-access instance = storage._instance # pylint: disable=protected-access else: instance = DagsterInstance( instance_type=InstanceType.EPHEMERAL, local_artifact_storage=LocalArtifactStorage(temp_dir), run_storage=storage, event_storage=InMemoryEventLogStorage(), compute_log_manager=NoOpComputeLogManager(), run_coordinator=DefaultRunCoordinator(), run_launcher=SyncInMemoryRunLauncher(), ) freeze_datetime = to_timezone( create_pendulum_time(2019, 11, 2, 0, 0, 0, tz="US/Central"), "US/Pacific" ) with pendulum.test(freeze_datetime): result = my_job.execute_in_process(instance=instance) records = instance.get_run_records( filters=PipelineRunsFilter(run_ids=[result.run_id]) ) assert len(records) == 1 record = records[0] assert record.start_time == freeze_datetime.timestamp() assert record.end_time == freeze_datetime.timestamp()
36.209985
100
0.611278
import sys import tempfile from datetime import datetime import pendulum import pytest from dagster import job, op, seven from dagster.core.definitions import PipelineDefinition from dagster.core.errors import ( DagsterRunAlreadyExists, DagsterRunNotFoundError, DagsterSnapshotDoesNotExist, ) from dagster.core.events import DagsterEvent, DagsterEventType from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill from dagster.core.host_representation import ( ExternalRepositoryOrigin, ManagedGrpcPythonEnvRepositoryLocationOrigin, ) from dagster.core.instance import DagsterInstance, InstanceType from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher from dagster.core.run_coordinator import DefaultRunCoordinator from dagster.core.snap import create_pipeline_snapshot_id from dagster.core.storage.event_log import InMemoryEventLogStorage from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager from dagster.core.storage.pipeline_run import ( DagsterRun, JobBucket, PipelineRunStatus, PipelineRunsFilter, TagBucket, ) from dagster.core.storage.root import LocalArtifactStorage from dagster.core.storage.runs.migration import REQUIRED_DATA_MIGRATIONS from dagster.core.storage.runs.sql_run_storage import SqlRunStorage from dagster.core.storage.tags import PARENT_RUN_ID_TAG, ROOT_RUN_ID_TAG from dagster.core.types.loadable_target_origin import LoadableTargetOrigin from dagster.core.utils import make_new_run_id from dagster.daemon.daemon import SensorDaemon from dagster.daemon.types import DaemonHeartbeat from dagster.serdes import serialize_pp from dagster.seven.compat.pendulum import create_pendulum_time, to_timezone win_py36 = seven.IS_WINDOWS and sys.version_info[0] == 3 and sys.version_info[1] == 6 class TestRunStorage: __test__ = False @pytest.fixture(name="storage", params=[]) def run_storage(self, request): with request.param() as s: yield s def can_delete_runs(self): return True @staticmethod def fake_repo_target(): return ExternalRepositoryOrigin( ManagedGrpcPythonEnvRepositoryLocationOrigin( LoadableTargetOrigin( executable_path=sys.executable, module_name="fake", attribute="fake" ), ), "fake_repo_name", ) @classmethod def fake_partition_set_origin(cls, partition_set_name): return cls.fake_repo_target().get_partition_set_origin(partition_set_name) @staticmethod def build_run( run_id, pipeline_name, mode="default", tags=None, status=PipelineRunStatus.NOT_STARTED, parent_run_id=None, root_run_id=None, pipeline_snapshot_id=None, ): return DagsterRun( pipeline_name=pipeline_name, run_id=run_id, run_config=None, mode=mode, tags=tags, status=status, root_run_id=root_run_id, parent_run_id=parent_run_id, pipeline_snapshot_id=pipeline_snapshot_id, ) def test_basic_storage(self, storage): assert storage run_id = make_new_run_id() added = storage.add_run( TestRunStorage.build_run( run_id=run_id, pipeline_name="some_pipeline", tags={"foo": "bar"} ) ) assert added runs = storage.get_runs() assert len(runs) == 1 run = runs[0] assert run.run_id == run_id assert run.pipeline_name == "some_pipeline" assert run.tags assert run.tags.get("foo") == "bar" assert storage.has_run(run_id) fetched_run = storage.get_run_by_id(run_id) assert fetched_run.run_id == run_id assert fetched_run.pipeline_name == "some_pipeline" def test_clear(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete") assert storage run_id = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 1 storage.wipe() assert list(storage.get_runs()) == [] def test_storage_telemetry(self, storage): assert storage storage_id = storage.get_run_storage_id() assert isinstance(storage_id, str) storage_id_again = storage.get_run_storage_id() assert storage_id == storage_id_again def test_fetch_by_pipeline(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="some_pipeline")) storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="some_other_pipeline")) assert len(storage.get_runs()) == 2 some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline")) assert len(some_runs) == 1 assert some_runs[0].run_id == one def test_fetch_by_snapshot_id(self, storage): assert storage pipeline_def_a = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_def_b = PipelineDefinition(name="some_other_pipeline", solid_defs=[]) pipeline_snapshot_a = pipeline_def_a.get_pipeline_snapshot() pipeline_snapshot_b = pipeline_def_b.get_pipeline_snapshot() pipeline_snapshot_a_id = create_pipeline_snapshot_id(pipeline_snapshot_a) pipeline_snapshot_b_id = create_pipeline_snapshot_id(pipeline_snapshot_b) assert storage.add_pipeline_snapshot(pipeline_snapshot_a) == pipeline_snapshot_a_id assert storage.add_pipeline_snapshot(pipeline_snapshot_b) == pipeline_snapshot_b_id one = make_new_run_id() two = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", pipeline_snapshot_id=pipeline_snapshot_a_id, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_other_pipeline", pipeline_snapshot_id=pipeline_snapshot_b_id, ) ) assert len(storage.get_runs()) == 2 runs_a = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_a_id)) assert len(runs_a) == 1 assert runs_a[0].run_id == one runs_b = storage.get_runs(PipelineRunsFilter(snapshot_id=pipeline_snapshot_b_id)) assert len(runs_b) == 1 assert runs_b[0].run_id == two def test_add_run_tags(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=one, pipeline_name="foo")) storage.add_run(TestRunStorage.build_run(run_id=two, pipeline_name="bar")) assert storage.get_run_tags() == [] storage.add_run_tags(one, {"tag1": "val1", "tag2": "val2"}) storage.add_run_tags(two, {"tag1": "val1"}) assert storage.get_run_tags() == [("tag1", {"val1"}), ("tag2", {"val2"})] storage.add_run_tags(one, {"tag1": "val2", "tag3": "val3"}) test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 3 assert test_run.tags["tag1"] == "val2" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert storage.get_run_tags() == [ ("tag1", {"val1", "val2"}), ("tag2", {"val2"}), ("tag3", {"val3"}), ] storage.add_run_tags(one, {"tag1": "val3"}) test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 3 assert test_run.tags["tag1"] == "val3" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert storage.get_run_tags() == [ ("tag1", {"val1", "val3"}), ("tag2", {"val2"}), ("tag3", {"val3"}), ] storage.add_run_tags(one, {"tag4": "val4"}) test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 4 assert test_run.tags["tag1"] == "val3" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert test_run.tags["tag4"] == "val4" assert storage.get_run_tags() == [ ("tag1", {"val1", "val3"}), ("tag2", {"val2"}), ("tag3", {"val3"}), ("tag4", {"val4"}), ] test_run = storage.get_run_by_id(one) assert len(test_run.tags) == 4 assert test_run.tags["tag1"] == "val3" assert test_run.tags["tag2"] == "val2" assert test_run.tags["tag3"] == "val3" assert test_run.tags["tag4"] == "val4" some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag3": "val3"})) assert len(some_runs) == 1 assert some_runs[0].run_id == one runs_with_old_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val1"})) assert len(runs_with_old_tag) == 1 assert runs_with_old_tag[0].tags == {"tag1": "val1"} runs_with_new_tag = storage.get_runs(PipelineRunsFilter(tags={"tag1": "val3"})) assert len(runs_with_new_tag) == 1 assert runs_with_new_tag[0].tags == { "tag1": "val3", "tag2": "val2", "tag3": "val3", "tag4": "val4", } def test_fetch_by_filter(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"tag": "hello", "tag2": "world"}, status=PipelineRunStatus.SUCCESS, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"tag": "hello"}, status=PipelineRunStatus.FAILURE, ), ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="other_pipeline", status=PipelineRunStatus.SUCCESS ) ) assert len(storage.get_runs()) == 3 some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one])) count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one])) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline")) count = storage.get_runs_count(PipelineRunsFilter(pipeline_name="some_pipeline")) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS])) count = storage.get_runs_count(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS])) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == three assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello"})) count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello"})) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"})) count = storage.get_runs_count(PipelineRunsFilter(tags={"tag": "hello", "tag2": "world"})) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs( PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"}) ) count = storage.get_runs_count( PipelineRunsFilter(pipeline_name="some_pipeline", tags={"tag": "hello"}) ) assert len(some_runs) == 2 assert count == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs( PipelineRunsFilter( pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) count = storage.get_runs_count( PipelineRunsFilter( pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one # All filters some_runs = storage.get_runs( PipelineRunsFilter( run_ids=[one], pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) count = storage.get_runs_count( PipelineRunsFilter( run_ids=[one], pipeline_name="some_pipeline", tags={"tag": "hello"}, statuses=[PipelineRunStatus.SUCCESS], ) ) assert len(some_runs) == 1 assert count == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs(PipelineRunsFilter()) count = storage.get_runs_count(PipelineRunsFilter()) assert len(some_runs) == 3 assert count == 3 def test_fetch_count_by_tag(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello", "mytag2": "world"}, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"mytag": "goodbye", "mytag2": "world"}, ) ) storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 3 run_count = storage.get_runs_count( filters=PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"}) ) assert run_count == 1 run_count = storage.get_runs_count(filters=PipelineRunsFilter(tags={"mytag2": "world"})) assert run_count == 2 run_count = storage.get_runs_count() assert run_count == 3 assert storage.get_run_tags() == [("mytag", {"hello", "goodbye"}), ("mytag2", {"world"})] def test_fetch_by_tags(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello", "mytag2": "world"}, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"mytag": "goodbye", "mytag2": "world"}, ) ) storage.add_run(TestRunStorage.build_run(run_id=three, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 3 some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello", "mytag2": "world"})) assert len(some_runs) == 1 assert some_runs[0].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag2": "world"})) assert len(some_runs) == 2 assert some_runs[0].run_id == two assert some_runs[1].run_id == one some_runs = storage.get_runs(PipelineRunsFilter(tags={})) assert len(some_runs) == 3 def test_paginated_fetch(self, storage): assert storage one, two, three = [make_new_run_id(), make_new_run_id(), make_new_run_id()] storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", tags={"mytag": "hello"} ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", tags={"mytag": "hello"} ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", tags={"mytag": "hello"} ) ) all_runs = storage.get_runs() assert len(all_runs) == 3 sliced_runs = storage.get_runs(cursor=three, limit=1) assert len(sliced_runs) == 1 assert sliced_runs[0].run_id == two all_runs = storage.get_runs(PipelineRunsFilter(pipeline_name="some_pipeline")) assert len(all_runs) == 3 sliced_runs = storage.get_runs( PipelineRunsFilter(pipeline_name="some_pipeline"), cursor=three, limit=1 ) assert len(sliced_runs) == 1 assert sliced_runs[0].run_id == two all_runs = storage.get_runs(PipelineRunsFilter(tags={"mytag": "hello"})) assert len(all_runs) == 3 sliced_runs = storage.get_runs( PipelineRunsFilter(tags={"mytag": "hello"}), cursor=three, limit=1 ) assert len(sliced_runs) == 1 assert sliced_runs[0].run_id == two def test_fetch_by_status(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() four = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE ) ) assert { run.run_id for run in storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.NOT_STARTED]) ) } == {one} assert { run.run_id for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED])) } == { two, three, } assert { run.run_id for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE])) } == {four} assert { run.run_id for run in storage.get_runs(PipelineRunsFilter(statuses=[PipelineRunStatus.SUCCESS])) } == set() def test_fetch_records_by_update_timestamp(self, storage): assert storage self._skip_in_memory(storage) one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.FAILURE ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.handle_run_event( three, # three succeeds DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="some_pipeline", ), ) storage.handle_run_event( one, # fail one after two has fails and three has succeeded DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_FAILURE.value, pipeline_name="some_pipeline", ), ) record_two = storage.get_run_records( filters=PipelineRunsFilter(run_ids=[two], updated_after=datetime(2020, 1, 1)) )[0] run_two_update_timestamp = record_two.update_timestamp assert [ record.pipeline_run.run_id for record in storage.get_run_records( filters=PipelineRunsFilter(updated_after=run_two_update_timestamp), order_by="update_timestamp", ascending=True, ) ] == [three, one] assert [ record.pipeline_run.run_id for record in storage.get_run_records( filters=PipelineRunsFilter( statuses=[PipelineRunStatus.FAILURE], updated_after=run_two_update_timestamp ), ) ] == [one] def test_fetch_by_status_cursored(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() three = make_new_run_id() four = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=three, pipeline_name="some_pipeline", status=PipelineRunStatus.NOT_STARTED ) ) storage.add_run( TestRunStorage.build_run( run_id=four, pipeline_name="some_pipeline", status=PipelineRunStatus.STARTED ) ) cursor_four_runs = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four ) assert len(cursor_four_runs) == 2 assert {run.run_id for run in cursor_four_runs} == {one, two} cursor_two_runs = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=two ) assert len(cursor_two_runs) == 1 assert {run.run_id for run in cursor_two_runs} == {one} cursor_one_runs = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=one ) assert not cursor_one_runs cursor_four_limit_one = storage.get_runs( PipelineRunsFilter(statuses=[PipelineRunStatus.STARTED]), cursor=four, limit=1 ) assert len(cursor_four_limit_one) == 1 assert cursor_four_limit_one[0].run_id == two def test_delete(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete runs") assert storage run_id = make_new_run_id() storage.add_run(TestRunStorage.build_run(run_id=run_id, pipeline_name="some_pipeline")) assert len(storage.get_runs()) == 1 storage.delete_run(run_id) assert list(storage.get_runs()) == [] def test_delete_with_tags(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete runs") assert storage run_id = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=run_id, pipeline_name="some_pipeline", tags={run_id: run_id}, ) ) assert len(storage.get_runs()) == 1 assert run_id in [key for key, value in storage.get_run_tags()] storage.delete_run(run_id) assert list(storage.get_runs()) == [] assert run_id not in [key for key, value in storage.get_run_tags()] def test_wipe_tags(self, storage): if not self.can_delete_runs(): pytest.skip("storage cannot delete") run_id = "some_run_id" run = DagsterRun(run_id=run_id, pipeline_name="a_pipeline", tags={"foo": "bar"}) storage.add_run(run) assert storage.get_run_by_id(run_id) == run assert dict(storage.get_run_tags()) == {"foo": {"bar"}} storage.wipe() assert list(storage.get_runs()) == [] assert dict(storage.get_run_tags()) == {} def test_write_conflicting_run_id(self, storage): double_run_id = "double_run_id" pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) run = DagsterRun(run_id=double_run_id, pipeline_name=pipeline_def.name) assert storage.add_run(run) with pytest.raises(DagsterRunAlreadyExists): storage.add_run(run) def test_add_get_snapshot(self, storage): pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_snapshot = pipeline_def.get_pipeline_snapshot() pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot) assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id fetched_pipeline_snapshot = storage.get_pipeline_snapshot(pipeline_snapshot_id) assert fetched_pipeline_snapshot assert serialize_pp(fetched_pipeline_snapshot) == serialize_pp(pipeline_snapshot) assert storage.has_pipeline_snapshot(pipeline_snapshot_id) assert not storage.has_pipeline_snapshot("nope") if self.can_delete_runs(): storage.wipe() assert not storage.has_pipeline_snapshot(pipeline_snapshot_id) def test_single_write_read_with_snapshot(self, storage): run_with_snapshot_id = "lkasjdflkjasdf" pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_snapshot = pipeline_def.get_pipeline_snapshot() pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot) run_with_snapshot = DagsterRun( run_id=run_with_snapshot_id, pipeline_name=pipeline_def.name, pipeline_snapshot_id=pipeline_snapshot_id, ) assert not storage.has_pipeline_snapshot(pipeline_snapshot_id) assert storage.add_pipeline_snapshot(pipeline_snapshot) == pipeline_snapshot_id assert serialize_pp(storage.get_pipeline_snapshot(pipeline_snapshot_id)) == serialize_pp( pipeline_snapshot ) storage.add_run(run_with_snapshot) assert storage.get_run_by_id(run_with_snapshot_id) == run_with_snapshot if self.can_delete_runs(): storage.wipe() assert not storage.has_pipeline_snapshot(pipeline_snapshot_id) assert not storage.has_run(run_with_snapshot_id) def test_single_write_with_missing_snapshot(self, storage): run_with_snapshot_id = "lkasjdflkjasdf" pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) run_with_missing_snapshot = DagsterRun( run_id=run_with_snapshot_id, pipeline_name=pipeline_def.name, pipeline_snapshot_id="nope", ) with pytest.raises(DagsterSnapshotDoesNotExist): storage.add_run(run_with_missing_snapshot) def test_add_get_execution_snapshot(self, storage): from dagster.core.execution.api import create_execution_plan from dagster.core.snap import snapshot_from_execution_plan pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) execution_plan = create_execution_plan(pipeline_def) ep_snapshot = snapshot_from_execution_plan( execution_plan, pipeline_def.get_pipeline_snapshot_id() ) snapshot_id = storage.add_execution_plan_snapshot(ep_snapshot) fetched_ep_snapshot = storage.get_execution_plan_snapshot(snapshot_id) assert fetched_ep_snapshot assert serialize_pp(fetched_ep_snapshot) == serialize_pp(ep_snapshot) assert storage.has_execution_plan_snapshot(snapshot_id) assert not storage.has_execution_plan_snapshot("nope") if self.can_delete_runs(): storage.wipe() assert not storage.has_execution_plan_snapshot(snapshot_id) def test_fetch_run_filter(self, storage): assert storage one = make_new_run_id() two = make_new_run_id() storage.add_run( TestRunStorage.build_run( run_id=one, pipeline_name="some_pipeline", status=PipelineRunStatus.SUCCESS, ) ) storage.add_run( TestRunStorage.build_run( run_id=two, pipeline_name="some_pipeline", status=PipelineRunStatus.SUCCESS, ), ) assert len(storage.get_runs()) == 2 some_runs = storage.get_runs(PipelineRunsFilter(run_ids=[one, two])) count = storage.get_runs_count(PipelineRunsFilter(run_ids=[one, two])) assert len(some_runs) == 2 assert count == 2 def test_fetch_run_group(self, storage): assert storage root_run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") runs = [root_run] # Create 3 children and 3 descendants of the rightmost child: # root # / | \ # [0] [1] [2] # | # [a] # | # [b] # | # [c] for _ in range(3): runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", root_run_id=root_run.run_id, parent_run_id=root_run.run_id, tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id}, ) ) for _ in range(3): # get root run id from the previous run if exists, otherwise use previous run's id root_run_id = runs[-1].root_run_id if runs[-1].root_run_id else runs[-1].run_id parent_run_id = runs[-1].run_id runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", root_run_id=root_run_id, parent_run_id=parent_run_id, tags={PARENT_RUN_ID_TAG: parent_run_id, ROOT_RUN_ID_TAG: root_run_id}, ) ) for run in runs: storage.add_run(run) run_group_one = storage.get_run_group(root_run.run_id) assert len(run_group_one[1]) == 7 run_group_two = storage.get_run_group(runs[-1].run_id) assert len(run_group_two[1]) == 7 assert run_group_one[0] == run_group_two[0] assert run_group_one[1] == run_group_two[1] def test_fetch_run_group_not_found(self, storage): assert storage run = TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") storage.add_run(run) with pytest.raises(DagsterRunNotFoundError): storage.get_run_group(make_new_run_id()) def test_fetch_run_groups(self, storage): assert storage root_runs = [ TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") for i in range(3) ] runs = [run for run in root_runs] for _ in range(5): for root_run in root_runs: runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id}, ) ) for run in runs: storage.add_run(run) run_groups = storage.get_run_groups(limit=5) assert len(run_groups) == 3 expected_group_lens = { root_runs[i].run_id: expected_len for i, expected_len in enumerate([2, 3, 3]) } for root_run_id in run_groups: assert len(run_groups[root_run_id]["runs"]) == expected_group_lens[root_run_id] assert run_groups[root_run_id]["count"] == 6 def test_fetch_run_groups_filter(self, storage): assert storage root_runs = [ TestRunStorage.build_run(run_id=make_new_run_id(), pipeline_name="foo_pipeline") for i in range(3) ] runs = [run for run in root_runs] for root_run in root_runs: failed_run_id = make_new_run_id() runs.append( TestRunStorage.build_run( run_id=failed_run_id, pipeline_name="foo_pipeline", tags={PARENT_RUN_ID_TAG: root_run.run_id, ROOT_RUN_ID_TAG: root_run.run_id}, status=PipelineRunStatus.FAILURE, ) ) for _ in range(3): runs.append( TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={PARENT_RUN_ID_TAG: failed_run_id, ROOT_RUN_ID_TAG: root_run.run_id}, ) ) for run in runs: storage.add_run(run) run_groups = storage.get_run_groups( limit=5, filters=PipelineRunsFilter(statuses=[PipelineRunStatus.FAILURE]) ) assert len(run_groups) == 3 for root_run_id in run_groups: assert len(run_groups[root_run_id]["runs"]) == 2 assert run_groups[root_run_id]["count"] == 5 def test_fetch_run_groups_ordering(self, storage): assert storage first_root_run = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline" ) storage.add_run(first_root_run) second_root_run = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline" ) storage.add_run(second_root_run) second_root_run_child = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={ PARENT_RUN_ID_TAG: second_root_run.run_id, ROOT_RUN_ID_TAG: second_root_run.run_id, }, ) storage.add_run(second_root_run_child) first_root_run_child = TestRunStorage.build_run( run_id=make_new_run_id(), pipeline_name="foo_pipeline", tags={ PARENT_RUN_ID_TAG: first_root_run.run_id, ROOT_RUN_ID_TAG: first_root_run.run_id, }, ) storage.add_run(first_root_run_child) run_groups = storage.get_run_groups(limit=1) assert first_root_run.run_id in run_groups assert second_root_run.run_id not in run_groups def _skip_in_memory(self, storage): from dagster.core.storage.runs import InMemoryRunStorage if isinstance(storage, InMemoryRunStorage): pytest.skip() def test_empty_heartbeat(self, storage): self._skip_in_memory(storage) assert storage.get_daemon_heartbeats() == {} def test_add_heartbeat(self, storage): self._skip_in_memory(storage) added_heartbeat = DaemonHeartbeat( timestamp=pendulum.from_timestamp(1000).float_timestamp, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=[], ) storage.add_daemon_heartbeat(added_heartbeat) assert len(storage.get_daemon_heartbeats()) == 1 stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()] assert stored_heartbeat == added_heartbeat second_added_heartbeat = DaemonHeartbeat( timestamp=pendulum.from_timestamp(2000).float_timestamp, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=[], ) storage.add_daemon_heartbeat(second_added_heartbeat) assert len(storage.get_daemon_heartbeats()) == 1 stored_heartbeat = storage.get_daemon_heartbeats()[SensorDaemon.daemon_type()] assert stored_heartbeat == second_added_heartbeat def test_wipe_heartbeats(self, storage): self._skip_in_memory(storage) if not self.can_delete_runs(): pytest.skip("storage cannot delete") added_heartbeat = DaemonHeartbeat( timestamp=pendulum.from_timestamp(1000).float_timestamp, daemon_type=SensorDaemon.daemon_type(), daemon_id=None, errors=[], ) storage.add_daemon_heartbeat(added_heartbeat) storage.wipe_daemon_heartbeats() def test_backfill(self, storage): origin = self.fake_partition_set_origin("fake_partition_set") backfills = storage.get_backfills() assert len(backfills) == 0 one = PartitionBackfill( "one", origin, BulkActionStatus.REQUESTED, ["a", "b", "c"], False, None, None, pendulum.now().timestamp(), ) storage.add_backfill(one) assert len(storage.get_backfills()) == 1 assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 1 backfill = storage.get_backfill(one.backfill_id) assert backfill == one storage.update_backfill(one.with_status(status=BulkActionStatus.COMPLETED)) assert len(storage.get_backfills()) == 1 assert len(storage.get_backfills(status=BulkActionStatus.REQUESTED)) == 0 def test_secondary_index(self, storage): if not isinstance(storage, SqlRunStorage): return for name in REQUIRED_DATA_MIGRATIONS.keys(): assert storage.has_built_index(name) def test_handle_run_event_pipeline_success_test(self, storage): run_id = make_new_run_id() run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id) storage.add_run(run_to_add) dagster_pipeline_start_event = DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_START.value, pipeline_name="pipeline_name", step_key=None, solid_handle=None, step_kind_value=None, logging_tags=None, ) storage.handle_run_event(run_id, dagster_pipeline_start_event) assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED storage.handle_run_event( make_new_run_id(), DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="pipeline_name", step_key=None, solid_handle=None, step_kind_value=None, logging_tags=None, ), ) assert storage.get_run_by_id(run_id).status == PipelineRunStatus.STARTED storage.handle_run_event( run_id, DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="pipeline_name", step_key=None, solid_handle=None, step_kind_value=None, logging_tags=None, ), ) assert storage.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS def test_debug_snapshot_import(self, storage): from dagster.core.execution.api import create_execution_plan from dagster.core.snap import ( snapshot_from_execution_plan, create_execution_plan_snapshot_id, ) run_id = make_new_run_id() run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id) storage.add_run(run_to_add) pipeline_def = PipelineDefinition(name="some_pipeline", solid_defs=[]) pipeline_snapshot = pipeline_def.get_pipeline_snapshot() pipeline_snapshot_id = create_pipeline_snapshot_id(pipeline_snapshot) new_pipeline_snapshot_id = f"{pipeline_snapshot_id}-new-snapshot" storage.add_snapshot(pipeline_snapshot, snapshot_id=new_pipeline_snapshot_id) assert not storage.has_snapshot(pipeline_snapshot_id) assert storage.has_snapshot(new_pipeline_snapshot_id) execution_plan = create_execution_plan(pipeline_def) ep_snapshot = snapshot_from_execution_plan(execution_plan, new_pipeline_snapshot_id) ep_snapshot_id = create_execution_plan_snapshot_id(ep_snapshot) new_ep_snapshot_id = f"{ep_snapshot_id}-new-snapshot" storage.add_snapshot(ep_snapshot, snapshot_id=new_ep_snapshot_id) assert not storage.has_snapshot(ep_snapshot_id) assert storage.has_snapshot(new_ep_snapshot_id) def test_run_record_stats(self, storage): assert storage self._skip_in_memory(storage) run_id = make_new_run_id() run_to_add = TestRunStorage.build_run(pipeline_name="pipeline_name", run_id=run_id) storage.add_run(run_to_add) run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0] assert run_record.start_time is None assert run_record.end_time is None storage.handle_run_event( run_id, DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_START.value, pipeline_name="pipeline_name", ), ) run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0] assert run_record.start_time is not None assert run_record.end_time is None storage.handle_run_event( run_id, DagsterEvent( message="a message", event_type_value=DagsterEventType.PIPELINE_SUCCESS.value, pipeline_name="pipeline_name", ), ) run_record = storage.get_run_records(PipelineRunsFilter(run_ids=[run_id]))[0] assert run_record.start_time is not None assert run_record.end_time is not None assert run_record.end_time >= run_record.start_time @pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36") def test_by_job(self, storage): def _add_run(job_name, tags=None): return storage.add_run( TestRunStorage.build_run( pipeline_name=job_name, run_id=make_new_run_id(), tags=tags ) ) _a_one = _add_run("a_pipeline", tags={"a": "A"}) a_two = _add_run("a_pipeline", tags={"a": "A"}) _b_one = _add_run("b_pipeline", tags={"a": "A"}) b_two = _add_run("b_pipeline", tags={"a": "A"}) c_one = _add_run("c_pipeline", tags={"a": "A"}) c_two = _add_run("c_pipeline", tags={"a": "B"}) runs_by_job = { run.pipeline_name: run for run in storage.get_runs( bucket_by=JobBucket( job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1 ) ) } assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"} assert runs_by_job.get("a_pipeline").run_id == a_two.run_id assert runs_by_job.get("b_pipeline").run_id == b_two.run_id assert runs_by_job.get("c_pipeline").run_id == c_two.run_id runs_by_job = { run.pipeline_name: run for run in storage.get_runs( filters=PipelineRunsFilter(tags={"a": "A"}), bucket_by=JobBucket( job_names=["a_pipeline", "b_pipeline", "c_pipeline"], bucket_limit=1 ), ) } assert set(runs_by_job.keys()) == {"a_pipeline", "b_pipeline", "c_pipeline"} assert runs_by_job.get("a_pipeline").run_id == a_two.run_id assert runs_by_job.get("b_pipeline").run_id == b_two.run_id assert runs_by_job.get("c_pipeline").run_id == c_one.run_id @pytest.mark.skipif(win_py36, reason="Sqlite rank queries not working on windows py36") def test_by_tag(self, storage): def _add_run(job_name, tags=None): return storage.add_run( TestRunStorage.build_run( pipeline_name=job_name, run_id=make_new_run_id(), tags=tags ) ) _one = _add_run("a", tags={"a": "1"}) _two = _add_run("a", tags={"a": "2"}) three = _add_run("a", tags={"a": "3"}) _none = _add_run("a") b = _add_run("b", tags={"a": "4"}) one = _add_run("a", tags={"a": "1"}) two = _add_run("a", tags={"a": "2"}) runs_by_tag = { run.tags.get("a"): run for run in storage.get_runs( bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1) ) } assert set(runs_by_tag.keys()) == {"1", "2", "3", "4"} assert runs_by_tag.get("1").run_id == one.run_id assert runs_by_tag.get("2").run_id == two.run_id assert runs_by_tag.get("3").run_id == three.run_id assert runs_by_tag.get("4").run_id == b.run_id runs_by_tag = { run.tags.get("a"): run for run in storage.get_runs( filters=PipelineRunsFilter(pipeline_name="a"), bucket_by=TagBucket(tag_key="a", tag_values=["1", "2", "3", "4"], bucket_limit=1), ) } assert set(runs_by_tag.keys()) == {"1", "2", "3"} assert runs_by_tag.get("1").run_id == one.run_id assert runs_by_tag.get("2").run_id == two.run_id assert runs_by_tag.get("3").run_id == three.run_id def test_run_record_timestamps(self, storage): assert storage self._skip_in_memory(storage) @op def a(): pass @job def my_job(): a() with tempfile.TemporaryDirectory() as temp_dir: if storage._instance: instance = storage._instance else: instance = DagsterInstance( instance_type=InstanceType.EPHEMERAL, local_artifact_storage=LocalArtifactStorage(temp_dir), run_storage=storage, event_storage=InMemoryEventLogStorage(), compute_log_manager=NoOpComputeLogManager(), run_coordinator=DefaultRunCoordinator(), run_launcher=SyncInMemoryRunLauncher(), ) freeze_datetime = to_timezone( create_pendulum_time(2019, 11, 2, 0, 0, 0, tz="US/Central"), "US/Pacific" ) with pendulum.test(freeze_datetime): result = my_job.execute_in_process(instance=instance) records = instance.get_run_records( filters=PipelineRunsFilter(run_ids=[result.run_id]) ) assert len(records) == 1 record = records[0] assert record.start_time == freeze_datetime.timestamp() assert record.end_time == freeze_datetime.timestamp()
true
true
1c49f411229f1de6a15db374752a524ef0e0ee0b
12,639
py
Python
rnn/train_search.py
cclauss/darts
b6d4fe1692a67d81adaa3d4bfd7c13e3dcb1d443
[ "Apache-2.0" ]
1
2018-07-26T01:16:31.000Z
2018-07-26T01:16:31.000Z
rnn/train_search.py
wangxinchina/darts
77a461b62edb232406891028645b2331a24a8b4d
[ "Apache-2.0" ]
null
null
null
rnn/train_search.py
wangxinchina/darts
77a461b62edb232406891028645b2331a24a8b4d
[ "Apache-2.0" ]
1
2019-06-18T05:53:16.000Z
2019-06-18T05:53:16.000Z
import argparse import os, sys, glob import time import math import numpy as np import torch import logging import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn from architect import Architect import gc import data import model_search as model from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model') parser.add_argument('--data', type=str, default='../data/penn/', help='location of the data corpus') parser.add_argument('--emsize', type=int, default=300, help='size of word embeddings') parser.add_argument('--nhid', type=int, default=300, help='number of hidden units per layer') parser.add_argument('--nhidlast', type=int, default=300, help='number of hidden units for the last rnn layer') parser.add_argument('--lr', type=float, default=20, help='initial learning rate') parser.add_argument('--clip', type=float, default=0.25, help='gradient clipping') parser.add_argument('--epochs', type=int, default=50, help='upper epoch limit') parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='batch size') parser.add_argument('--bptt', type=int, default=35, help='sequence length') parser.add_argument('--dropout', type=float, default=0.75, help='dropout applied to layers (0 = no dropout)') parser.add_argument('--dropouth', type=float, default=0.25, help='dropout for hidden nodes in rnn layers (0 = no dropout)') parser.add_argument('--dropoutx', type=float, default=0.75, help='dropout for input nodes in rnn layers (0 = no dropout)') parser.add_argument('--dropouti', type=float, default=0.2, help='dropout for input embedding layers (0 = no dropout)') parser.add_argument('--dropoute', type=float, default=0, help='dropout to remove words from embedding layer (0 = no dropout)') parser.add_argument('--seed', type=int, default=2, help='random seed') parser.add_argument('--nonmono', type=int, default=5, help='random seed') parser.add_argument('--cuda', action='store_false', help='use CUDA') parser.add_argument('--log-interval', type=int, default=50, metavar='N', help='report interval') parser.add_argument('--save', type=str, default='EXP', help='path to save the final model') parser.add_argument('--alpha', type=float, default=0, help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)') parser.add_argument('--beta', type=float, default=1e-3, help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)') parser.add_argument('--wdecay', type=float, default=5e-7, help='weight decay applied to all weights') parser.add_argument('--continue_train', action='store_true', help='continue train from a checkpoint') parser.add_argument('--small_batch_size', type=int, default=-1, help='the batch size for computation. batch_size should be divisible by small_batch_size.\ In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\ until batch_size is reached. An update step is then performed.') parser.add_argument('--max_seq_len_delta', type=int, default=20, help='max sequence length') parser.add_argument('--single_gpu', default=True, action='store_false', help='use single GPU') parser.add_argument('--gpu', type=int, default=0, help='GPU device to use') parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss') parser.add_argument('--arch_wdecay', type=float, default=1e-3, help='weight decay for the architecture encoding alpha') parser.add_argument('--arch_lr', type=float, default=3e-3, help='learning rate for the architecture encoding alpha') args = parser.parse_args() if args.nhidlast < 0: args.nhidlast = args.emsize if args.small_batch_size < 0: args.small_batch_size = args.batch_size if not args.continue_train: args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S")) create_exp_dir(args.save, scripts_to_save=glob.glob('*.py')) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') fh = logging.FileHandler(os.path.join(args.save, 'log.txt')) fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) # Set the random seed manually for reproducibility. np.random.seed(args.seed) torch.manual_seed(args.seed) if torch.cuda.is_available(): if not args.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") else: torch.cuda.set_device(args.gpu) cudnn.benchmark = True cudnn.enabled=True torch.cuda.manual_seed_all(args.seed) corpus = data.Corpus(args.data) eval_batch_size = 10 test_batch_size = 1 train_data = batchify(corpus.train, args.batch_size, args) search_data = batchify(corpus.valid, args.batch_size, args) val_data = batchify(corpus.valid, eval_batch_size, args) test_data = batchify(corpus.test, test_batch_size, args) ntokens = len(corpus.dictionary) if args.continue_train: model = torch.load(os.path.join(args.save, 'model.pt')) else: model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast, args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute) size = 0 for p in model.parameters(): size += p.nelement() logging.info('param size: {}'.format(size)) logging.info('initial genotype:') logging.info(model.genotype()) if args.cuda: if args.single_gpu: parallel_model = model.cuda() else: parallel_model = nn.DataParallel(model, dim=1).cuda() else: parallel_model = model architect = Architect(parallel_model, args) total_params = sum(x.data.nelement() for x in model.parameters()) logging.info('Args: {}'.format(args)) logging.info('Model total parameters: {}'.format(total_params)) def evaluate(data_source, batch_size=10): # Turn on evaluation mode which disables dropout. model.eval() total_loss = 0 ntokens = len(corpus.dictionary) hidden = model.init_hidden(batch_size) for i in range(0, data_source.size(0) - 1, args.bptt): data, targets = get_batch(data_source, i, args, evaluation=True) targets = targets.view(-1) log_prob, hidden = parallel_model(data, hidden) loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data total_loss += loss * len(data) hidden = repackage_hidden(hidden) return total_loss[0] / len(data_source) def train(): assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size' # Turn on training mode which enables dropout. total_loss = 0 start_time = time.time() ntokens = len(corpus.dictionary) hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)] hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)] batch, i = 0, 0 while i < train_data.size(0) - 1 - 1: bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2. # Prevent excessively small or negative sequence lengths # seq_len = max(5, int(np.random.normal(bptt, 5))) # # There's a very small chance that it could select a very long sequence length resulting in OOM # seq_len = min(seq_len, args.bptt + args.max_seq_len_delta) seq_len = int(bptt) lr2 = optimizer.param_groups[0]['lr'] optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt model.train() data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args) data, targets = get_batch(train_data, i, args, seq_len=seq_len) optimizer.zero_grad() start, end, s_id = 0, args.small_batch_size, 0 while start < args.batch_size: cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1) cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden[s_id] = repackage_hidden(hidden[s_id]) hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id]) hidden_valid[s_id], grad_norm = architect.step( hidden[s_id], cur_data, cur_targets, hidden_valid[s_id], cur_data_valid, cur_targets_valid, optimizer, args.unrolled) # assuming small_batch_size = batch_size so we don't accumulate gradients optimizer.zero_grad() hidden[s_id] = repackage_hidden(hidden[s_id]) log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True) raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets) loss = raw_loss # Activiation Regularization if args.alpha > 0: loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:]) # Temporal Activation Regularization (slowness) loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:]) loss *= args.small_batch_size / args.batch_size total_loss += raw_loss.data * args.small_batch_size / args.batch_size loss.backward() s_id += 1 start = end end = start + args.small_batch_size gc.collect() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs. torch.nn.utils.clip_grad_norm(model.parameters(), args.clip) optimizer.step() # total_loss += raw_loss.data optimizer.param_groups[0]['lr'] = lr2 if batch % args.log_interval == 0 and batch > 0: logging.info(parallel_model.genotype()) print(F.softmax(parallel_model.weights, dim=-1)) cur_loss = total_loss[0] / args.log_interval elapsed = time.time() - start_time logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | ' 'loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'], elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() batch += 1 i += seq_len # Loop over epochs. lr = args.lr best_val_loss = [] stored_loss = 100000000 if args.continue_train: optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt')) if 't0' in optimizer_state['param_groups'][0]: optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay) else: optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay) optimizer.load_state_dict(optimizer_state) else: optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay) for epoch in range(1, args.epochs+1): epoch_start_time = time.time() train() val_loss = evaluate(val_data, eval_batch_size) logging.info('-' * 89) logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ' 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss))) logging.info('-' * 89) if val_loss < stored_loss: save_checkpoint(model, optimizer, epoch, args.save) logging.info('Saving Normal!') stored_loss = val_loss best_val_loss.append(val_loss)
44.038328
132
0.652742
import argparse import os, sys, glob import time import math import numpy as np import torch import logging import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn from architect import Architect import gc import data import model_search as model from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model') parser.add_argument('--data', type=str, default='../data/penn/', help='location of the data corpus') parser.add_argument('--emsize', type=int, default=300, help='size of word embeddings') parser.add_argument('--nhid', type=int, default=300, help='number of hidden units per layer') parser.add_argument('--nhidlast', type=int, default=300, help='number of hidden units for the last rnn layer') parser.add_argument('--lr', type=float, default=20, help='initial learning rate') parser.add_argument('--clip', type=float, default=0.25, help='gradient clipping') parser.add_argument('--epochs', type=int, default=50, help='upper epoch limit') parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='batch size') parser.add_argument('--bptt', type=int, default=35, help='sequence length') parser.add_argument('--dropout', type=float, default=0.75, help='dropout applied to layers (0 = no dropout)') parser.add_argument('--dropouth', type=float, default=0.25, help='dropout for hidden nodes in rnn layers (0 = no dropout)') parser.add_argument('--dropoutx', type=float, default=0.75, help='dropout for input nodes in rnn layers (0 = no dropout)') parser.add_argument('--dropouti', type=float, default=0.2, help='dropout for input embedding layers (0 = no dropout)') parser.add_argument('--dropoute', type=float, default=0, help='dropout to remove words from embedding layer (0 = no dropout)') parser.add_argument('--seed', type=int, default=2, help='random seed') parser.add_argument('--nonmono', type=int, default=5, help='random seed') parser.add_argument('--cuda', action='store_false', help='use CUDA') parser.add_argument('--log-interval', type=int, default=50, metavar='N', help='report interval') parser.add_argument('--save', type=str, default='EXP', help='path to save the final model') parser.add_argument('--alpha', type=float, default=0, help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)') parser.add_argument('--beta', type=float, default=1e-3, help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)') parser.add_argument('--wdecay', type=float, default=5e-7, help='weight decay applied to all weights') parser.add_argument('--continue_train', action='store_true', help='continue train from a checkpoint') parser.add_argument('--small_batch_size', type=int, default=-1, help='the batch size for computation. batch_size should be divisible by small_batch_size.\ In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\ until batch_size is reached. An update step is then performed.') parser.add_argument('--max_seq_len_delta', type=int, default=20, help='max sequence length') parser.add_argument('--single_gpu', default=True, action='store_false', help='use single GPU') parser.add_argument('--gpu', type=int, default=0, help='GPU device to use') parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss') parser.add_argument('--arch_wdecay', type=float, default=1e-3, help='weight decay for the architecture encoding alpha') parser.add_argument('--arch_lr', type=float, default=3e-3, help='learning rate for the architecture encoding alpha') args = parser.parse_args() if args.nhidlast < 0: args.nhidlast = args.emsize if args.small_batch_size < 0: args.small_batch_size = args.batch_size if not args.continue_train: args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S")) create_exp_dir(args.save, scripts_to_save=glob.glob('*.py')) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') fh = logging.FileHandler(os.path.join(args.save, 'log.txt')) fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) np.random.seed(args.seed) torch.manual_seed(args.seed) if torch.cuda.is_available(): if not args.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") else: torch.cuda.set_device(args.gpu) cudnn.benchmark = True cudnn.enabled=True torch.cuda.manual_seed_all(args.seed) corpus = data.Corpus(args.data) eval_batch_size = 10 test_batch_size = 1 train_data = batchify(corpus.train, args.batch_size, args) search_data = batchify(corpus.valid, args.batch_size, args) val_data = batchify(corpus.valid, eval_batch_size, args) test_data = batchify(corpus.test, test_batch_size, args) ntokens = len(corpus.dictionary) if args.continue_train: model = torch.load(os.path.join(args.save, 'model.pt')) else: model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast, args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute) size = 0 for p in model.parameters(): size += p.nelement() logging.info('param size: {}'.format(size)) logging.info('initial genotype:') logging.info(model.genotype()) if args.cuda: if args.single_gpu: parallel_model = model.cuda() else: parallel_model = nn.DataParallel(model, dim=1).cuda() else: parallel_model = model architect = Architect(parallel_model, args) total_params = sum(x.data.nelement() for x in model.parameters()) logging.info('Args: {}'.format(args)) logging.info('Model total parameters: {}'.format(total_params)) def evaluate(data_source, batch_size=10): model.eval() total_loss = 0 ntokens = len(corpus.dictionary) hidden = model.init_hidden(batch_size) for i in range(0, data_source.size(0) - 1, args.bptt): data, targets = get_batch(data_source, i, args, evaluation=True) targets = targets.view(-1) log_prob, hidden = parallel_model(data, hidden) loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data total_loss += loss * len(data) hidden = repackage_hidden(hidden) return total_loss[0] / len(data_source) def train(): assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size' total_loss = 0 start_time = time.time() ntokens = len(corpus.dictionary) hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)] hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)] batch, i = 0, 0 while i < train_data.size(0) - 1 - 1: bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2. t) lr2 = optimizer.param_groups[0]['lr'] optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt model.train() data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args) data, targets = get_batch(train_data, i, args, seq_len=seq_len) optimizer.zero_grad() start, end, s_id = 0, args.small_batch_size, 0 while start < args.batch_size: cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1) cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden[s_id] = repackage_hidden(hidden[s_id]) hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id]) hidden_valid[s_id], grad_norm = architect.step( hidden[s_id], cur_data, cur_targets, hidden_valid[s_id], cur_data_valid, cur_targets_valid, optimizer, args.unrolled) optimizer.zero_grad() hidden[s_id] = repackage_hidden(hidden[s_id]) log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True) raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets) loss = raw_loss # Activiation Regularization if args.alpha > 0: loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:]) # Temporal Activation Regularization (slowness) loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:]) loss *= args.small_batch_size / args.batch_size total_loss += raw_loss.data * args.small_batch_size / args.batch_size loss.backward() s_id += 1 start = end end = start + args.small_batch_size gc.collect() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs. torch.nn.utils.clip_grad_norm(model.parameters(), args.clip) optimizer.step() # total_loss += raw_loss.data optimizer.param_groups[0]['lr'] = lr2 if batch % args.log_interval == 0 and batch > 0: logging.info(parallel_model.genotype()) print(F.softmax(parallel_model.weights, dim=-1)) cur_loss = total_loss[0] / args.log_interval elapsed = time.time() - start_time logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | ' 'loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'], elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time() batch += 1 i += seq_len # Loop over epochs. lr = args.lr best_val_loss = [] stored_loss = 100000000 if args.continue_train: optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt')) if 't0' in optimizer_state['param_groups'][0]: optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay) else: optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay) optimizer.load_state_dict(optimizer_state) else: optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay) for epoch in range(1, args.epochs+1): epoch_start_time = time.time() train() val_loss = evaluate(val_data, eval_batch_size) logging.info('-' * 89) logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | ' 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss))) logging.info('-' * 89) if val_loss < stored_loss: save_checkpoint(model, optimizer, epoch, args.save) logging.info('Saving Normal!') stored_loss = val_loss best_val_loss.append(val_loss)
true
true
1c49f5c305ab49c62a991b94780ce7e3479571cc
4,046
py
Python
codejobs/settings.py
amanfojnr/open-jobs-api
e70aa2c0d5031981cd571c50753fa5b28f5dce07
[ "MIT" ]
null
null
null
codejobs/settings.py
amanfojnr/open-jobs-api
e70aa2c0d5031981cd571c50753fa5b28f5dce07
[ "MIT" ]
null
null
null
codejobs/settings.py
amanfojnr/open-jobs-api
e70aa2c0d5031981cd571c50753fa5b28f5dce07
[ "MIT" ]
null
null
null
""" Django settings for codejobs project. Generated by 'django-admin startproject' using Django 1.11.7. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'b(=fum!6g93&xfvwmd^8#bz-2t8nqxbuum!9_ke!t$d&f@hztp' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'api', 'rest_framework.authtoken', 'rest_auth', 'django.contrib.sites', 'allauth', 'allauth.account', 'rest_auth.registration', ] SITE_ID = 1 MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'codejobs.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'codejobs.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles') STATIC_URL = '/static/' # django-allauth config ACCOUNT_EMAIL_VERIFICATION = None # heroku django settings try: import local_settings except ImportError: import dj_database_url ... DEBUG = False ALLOWED_HOSTS = ['127.0.0.1', '.herokuapp.com'] ... DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'codelabs', 'USER': 'amanfojnr', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '', } } ... db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env)
24.227545
91
0.674246
import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = 'b(=fum!6g93&xfvwmd^8#bz-2t8nqxbuum!9_ke!t$d&f@hztp' DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'api', 'rest_framework.authtoken', 'rest_auth', 'django.contrib.sites', 'allauth', 'allauth.account', 'rest_auth.registration', ] SITE_ID = 1 MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'codejobs.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'codejobs.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles') STATIC_URL = '/static/' # django-allauth config ACCOUNT_EMAIL_VERIFICATION = None # heroku django settings try: import local_settings except ImportError: import dj_database_url ... DEBUG = False ALLOWED_HOSTS = ['127.0.0.1', '.herokuapp.com'] ... DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'codelabs', 'USER': 'amanfojnr', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '', } } ... db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env)
true
true
1c49f7108d5c141fdd81026bb117e58efba5174b
1,394
py
Python
AutomationFramework/tests/network_instance/test_ni_protocol_instances.py
sbarguil/Testing-framework
f3ef69f1c4f0aeafd02e222d846162c711783b15
[ "Apache-2.0" ]
1
2020-04-23T15:22:16.000Z
2020-04-23T15:22:16.000Z
AutomationFramework/tests/network_instance/test_ni_protocol_instances.py
sbarguil/Testing-framework
f3ef69f1c4f0aeafd02e222d846162c711783b15
[ "Apache-2.0" ]
44
2020-08-13T19:35:41.000Z
2021-03-01T09:08:00.000Z
AutomationFramework/tests/network_instance/test_ni_protocol_instances.py
sbarguil/Testing-framework
f3ef69f1c4f0aeafd02e222d846162c711783b15
[ "Apache-2.0" ]
6
2020-04-23T15:29:38.000Z
2022-03-03T14:23:38.000Z
import pytest from AutomationFramework.page_objects.network_instance.network_intance import NetworkInstance from AutomationFramework.tests.base_test import BaseTest class TestNetworkInstanceProtocolInstances(BaseTest): test_case_file = 'ni_protocol_instances.yml' @pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file, 'test_case_name': 'ni_protocol_instances_creation', 'page_object_class': NetworkInstance}]) def test_ni_protocol_instances_creation(self, create_page_object): create_page_object.execute_network_instance_edit_config_test_case() assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description() @pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file, 'test_case_name': 'ni_protocol_instances_enabled', 'page_object_class': NetworkInstance}]) def test_ni_protocol_instances_enabled(self, create_page_object): create_page_object.execute_network_instance_edit_config_test_case() assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
63.363636
117
0.700861
import pytest from AutomationFramework.page_objects.network_instance.network_intance import NetworkInstance from AutomationFramework.tests.base_test import BaseTest class TestNetworkInstanceProtocolInstances(BaseTest): test_case_file = 'ni_protocol_instances.yml' @pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file, 'test_case_name': 'ni_protocol_instances_creation', 'page_object_class': NetworkInstance}]) def test_ni_protocol_instances_creation(self, create_page_object): create_page_object.execute_network_instance_edit_config_test_case() assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description() @pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file, 'test_case_name': 'ni_protocol_instances_enabled', 'page_object_class': NetworkInstance}]) def test_ni_protocol_instances_enabled(self, create_page_object): create_page_object.execute_network_instance_edit_config_test_case() assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
true
true
1c49f76aea09c7aaa661dfbbe91e896bbb4e690a
844
py
Python
Python/AWS_Scripts/moveFile.py
CharvyJain/Rotten-Scripts
c9b8f7dde378620e4a82eae7aacec53f1eeea3c5
[ "MIT" ]
3
2021-02-06T16:16:46.000Z
2021-08-20T03:19:01.000Z
Python/Aws/moveFile.py
SKAUL05/Rotten-Scripts
c44e69754bbecb8a547fe2cc3a29be5acf97c46a
[ "MIT" ]
null
null
null
Python/Aws/moveFile.py
SKAUL05/Rotten-Scripts
c44e69754bbecb8a547fe2cc3a29be5acf97c46a
[ "MIT" ]
1
2021-08-08T16:03:40.000Z
2021-08-08T16:03:40.000Z
import boto3 awsAccessKeyId = "" awsSecretAccessKey = "" bucketName= "" directoryName = "" s3 = boto3.resource( 's3', aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey ) myBucket = s3.Bucket(bucketName) def moveFile(): try: for objectSummary in myBucket.objects.filter(Prefix=directoryName): s3FilePath = objectSummary.key sourceFilename = (s3FilePath).split("/")[-1] copySource = {"Bucket": bucketName, "Key": s3FilePath} targetFilename = f"{destinationDirectory}/{sourceFilename}" s3.meta.client.copy(copySource, bucketName, targetFilename) s3.Object(bucketName, s3FilePath).delete() except Exception as err: print(err) if __name__ == '__main__': moveFile()
30.142857
79
0.631517
import boto3 awsAccessKeyId = "" awsSecretAccessKey = "" bucketName= "" directoryName = "" s3 = boto3.resource( 's3', aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey ) myBucket = s3.Bucket(bucketName) def moveFile(): try: for objectSummary in myBucket.objects.filter(Prefix=directoryName): s3FilePath = objectSummary.key sourceFilename = (s3FilePath).split("/")[-1] copySource = {"Bucket": bucketName, "Key": s3FilePath} targetFilename = f"{destinationDirectory}/{sourceFilename}" s3.meta.client.copy(copySource, bucketName, targetFilename) s3.Object(bucketName, s3FilePath).delete() except Exception as err: print(err) if __name__ == '__main__': moveFile()
true
true
1c49f7929e2a520d2bfeebebc3d0b9896156a77e
2,768
py
Python
starfish/image/_filter/scale_by_percentile.py
vipulsinghal02/starfish
c3d347954ad40a7a4be9a50d89974f5fbbc2919d
[ "MIT" ]
null
null
null
starfish/image/_filter/scale_by_percentile.py
vipulsinghal02/starfish
c3d347954ad40a7a4be9a50d89974f5fbbc2919d
[ "MIT" ]
null
null
null
starfish/image/_filter/scale_by_percentile.py
vipulsinghal02/starfish
c3d347954ad40a7a4be9a50d89974f5fbbc2919d
[ "MIT" ]
null
null
null
from functools import partial from typing import Optional import numpy as np from starfish.imagestack.imagestack import ImageStack from ._base import FilterAlgorithmBase from .util import preserve_float_range class ScaleByPercentile(FilterAlgorithmBase): def __init__(self, p: int=0, is_volume: bool=False, **kwargs) -> None: """Image scaling filter Parameters ---------- p : int each image in the stack is scaled by this percentile. must be in [0, 100] is_volume : bool If True, 3d (z, y, x) volumes will be filtered. By default, filter 2-d (y, x) tiles kwargs """ self.p = p self.is_volume = is_volume _DEFAULT_TESTING_PARAMETERS = {"p": 0} @classmethod def _add_arguments(cls, group_parser) -> None: group_parser.add_argument( "--p", default=100, type=int, help="scale images by this percentile") @staticmethod def _scale(image: np.ndarray, p: int) -> np.ndarray: """Clip values of img below and above percentiles p_min and p_max Parameters ---------- image : np.ndarray image to be scaled p : int each image in the stack is scaled by this percentile. must be in [0, 100] Notes ----- - Setting p to 100 scales the image by it's maximum value - No shifting or transformation to adjust dynamic range is done after scaling Returns ------- np.ndarray : Numpy array of same shape as img """ v = np.percentile(image, p) image = image / v image = preserve_float_range(image) return image def run( self, stack: ImageStack, in_place: bool=False, verbose: bool=False, n_processes: Optional[int]=None ) -> ImageStack: """Perform filtering of an image stack Parameters ---------- stack : ImageStack Stack to be filtered. in_place : bool if True, process ImageStack in-place, otherwise return a new stack verbose : bool If True, report on the percentage completed (default = False) during processing n_processes : Optional[int] Number of parallel processes to devote to calculating the filter Returns ------- ImageStack : If in-place is False, return the results of filter as a new stack. Otherwise return the original stack. """ clip = partial(self._scale, p=self.p) result = stack.apply( clip, is_volume=self.is_volume, verbose=verbose, in_place=in_place, n_processes=n_processes ) return result
29.136842
100
0.595737
from functools import partial from typing import Optional import numpy as np from starfish.imagestack.imagestack import ImageStack from ._base import FilterAlgorithmBase from .util import preserve_float_range class ScaleByPercentile(FilterAlgorithmBase): def __init__(self, p: int=0, is_volume: bool=False, **kwargs) -> None: self.p = p self.is_volume = is_volume _DEFAULT_TESTING_PARAMETERS = {"p": 0} @classmethod def _add_arguments(cls, group_parser) -> None: group_parser.add_argument( "--p", default=100, type=int, help="scale images by this percentile") @staticmethod def _scale(image: np.ndarray, p: int) -> np.ndarray: v = np.percentile(image, p) image = image / v image = preserve_float_range(image) return image def run( self, stack: ImageStack, in_place: bool=False, verbose: bool=False, n_processes: Optional[int]=None ) -> ImageStack: clip = partial(self._scale, p=self.p) result = stack.apply( clip, is_volume=self.is_volume, verbose=verbose, in_place=in_place, n_processes=n_processes ) return result
true
true
1c49f7f555a1957609cf19ef4517fb9da15f2e1a
688
py
Python
mogan/image/__init__.py
GURUIFENG9139/rocky-mogan
6008c1d12b00e70d2cc651f7bd5d47968fc3aec7
[ "Apache-2.0" ]
null
null
null
mogan/image/__init__.py
GURUIFENG9139/rocky-mogan
6008c1d12b00e70d2cc651f7bd5d47968fc3aec7
[ "Apache-2.0" ]
null
null
null
mogan/image/__init__.py
GURUIFENG9139/rocky-mogan
6008c1d12b00e70d2cc651f7bd5d47968fc3aec7
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def API(): # Needed to prevent circular import... import mogan.image.api return mogan.image.api.API()
38.222222
78
0.715116
def API(): import mogan.image.api return mogan.image.api.API()
true
true
1c49fa29619138af962a51bbcd48f42a98d3cb02
1,274
py
Python
perfkitbenchmarker/linux_packages/nfs_server.py
msidana/PerfKitBenchmarker
2784642d3e6b20b3f474c4e27edb1ef163804f66
[ "Apache-2.0" ]
2
2021-01-15T09:40:28.000Z
2021-01-15T09:40:36.000Z
perfkitbenchmarker/linux_packages/nfs_server.py
msidana/PerfKitBenchmarker
2784642d3e6b20b3f474c4e27edb1ef163804f66
[ "Apache-2.0" ]
1
2021-02-23T12:07:44.000Z
2021-02-23T12:07:44.000Z
perfkitbenchmarker/linux_packages/nfs_server.py
msidana/PerfKitBenchmarker
2784642d3e6b20b3f474c4e27edb1ef163804f66
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing installation for NFSv4 Server. Network File System (NFS) is a distributed file system protocol that allows a user on a client computer to access files over a computer network much like local storage is accessed. This is mainly used for scientific-computing distributed workloads that require file copying between master and worker nodes. Server can be used on the master node. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function PACKAGE_NAME = 'nfs_server' def YumInstall(vm): vm.InstallPackages('nfs-utils') def AptInstall(vm): vm.InstallPackages('nfs-kernel-server')
32.666667
79
0.787284
from __future__ import absolute_import from __future__ import division from __future__ import print_function PACKAGE_NAME = 'nfs_server' def YumInstall(vm): vm.InstallPackages('nfs-utils') def AptInstall(vm): vm.InstallPackages('nfs-kernel-server')
true
true
1c49fa7a717d7e4d70535ca92cd54ab0fc3c3e50
19,387
py
Python
speech/utils/textgrid.py
dzubke/speech-lite
65f83ac2b7551650820f079ce5152741f2a6fdb8
[ "Apache-2.0" ]
null
null
null
speech/utils/textgrid.py
dzubke/speech-lite
65f83ac2b7551650820f079ce5152741f2a6fdb8
[ "Apache-2.0" ]
null
null
null
speech/utils/textgrid.py
dzubke/speech-lite
65f83ac2b7551650820f079ce5152741f2a6fdb8
[ "Apache-2.0" ]
null
null
null
# Natural Language Toolkit: TextGrid analysis # # Copyright (C) 2001-2011 NLTK Project # Author: Margaret Mitchell <itallow@gmail.com> # Steven Bird <sb@csse.unimelb.edu.au> (revisions) # URL: <http://www.nltk.org> # For license information, see LICENSE.TXT # """ Tools for reading TextGrid files, the format used by Praat. Module contents =============== The textgrid corpus reader provides 4 data items and 1 function for each textgrid file. For each tier in the file, the reader provides 10 data items and 2 functions. For the full textgrid file: - size The number of tiers in the file. - xmin First marked time of the file. - xmax Last marked time of the file. - t_time xmax - xmin. - text_type The style of TextGrid format: - ooTextFile: Organized by tier. - ChronTextFile: Organized by time. - OldooTextFile: Similar to ooTextFile. - to_chron() Convert given file to a ChronTextFile format. - to_oo() Convert given file to an ooTextFile format. For each tier: - text_type The style of TextGrid format, as above. - classid The style of transcription on this tier: - IntervalTier: Transcription is marked as intervals. - TextTier: Transcription is marked as single points. - nameid The name of the tier. - xmin First marked time of the tier. - xmax Last marked time of the tier. - size Number of entries in the tier. - transcript The raw transcript for the tier. - simple_transcript The transcript formatted as a list of tuples: (time1, time2, utterance). - tier_info List of (classid, nameid, xmin, xmax, size, transcript). - min_max() A tuple of (xmin, xmax). - time(non_speech_marker) Returns the utterance time of a given tier. Excludes entries that begin with a non-speech marker. """ # needs more cleanup, subclassing, epydoc docstrings import sys import re TEXTTIER = "TextTier" INTERVALTIER = "IntervalTier" OOTEXTFILE = re.compile(r"""(?x) xmin\ =\ (.*)[\r\n]+ xmax\ =\ (.*)[\r\n]+ [\s\S]+?size\ =\ (.*)[\r\n]+ """) CHRONTEXTFILE = re.compile(r"""(?x) [\r\n]+(\S+)\ (\S+)\ +!\ Time\ domain.\ *[\r\n]+ (\S+)\ +!\ Number\ of\ tiers.\ *[\r\n]+" """) OLDOOTEXTFILE = re.compile(r"""(?x) [\r\n]+(\S+) [\r\n]+(\S+) [\r\n]+.+[\r\n]+(\S+) """) ################################################################# # TextGrid Class ################################################################# class TextGrid(object): """ Class to manipulate the TextGrid format used by Praat. Separates each tier within this file into its own Tier object. Each TextGrid object has a number of tiers (size), xmin, xmax, a text type to help with the different styles of TextGrid format, and tiers with their own attributes. """ def __init__(self, read_file, config:dict=None): """ Takes open read file as input, initializes attributes of the TextGrid file. @type read_file: An open TextGrid file, mode "r". @arg config (dict): dict configuration file to create a TextGrid object @param size: Number of tiers. @param xmin: xmin. @param xmax: xmax. @param t_time: Total time of TextGrid file. @param text_type: TextGrid format. @type tiers: A list of tier objects. """ # default creation of textgrid object if config is None: self.read_file = read_file self.size = 0 self.xmin = 0 self.xmax = 0 self.t_time = 0 self.text_type = self._check_type() self.tiers = self._find_tiers() # creating a textgrid from a dict config else: self.read_file = None self.size = config['size'] self.xmin = config['xmin'] self.xmax = config['xmax'] self.t_time = config['t_time'] def __iter__(self): for tier in self.tiers: yield tier def next(self): if self.idx == (self.size - 1): raise StopIteration self.idx += 1 return self.tiers[self.idx] @staticmethod def load(file): """ @param file: a file in TextGrid format """ return TextGrid(open(file).read()) def _load_tiers(self, header): """ Iterates over each tier and grabs tier information. """ tiers = [] if self.text_type == "ChronTextFile": m = re.compile(header) tier_headers = m.findall(self.read_file) tier_re = " \d+.?\d* \d+.?\d*[\r\n]+\"[^\"]*\"" for i in range(0, self.size): tier_info = [tier_headers[i]] + \ re.findall(str(i + 1) + tier_re, self.read_file) tier_info = "\n".join(tier_info) tiers.append(Tier(tier_info, self.text_type, self.t_time)) return tiers tier_re = header + "[\s\S]+?(?=" + header + "|$$)" m = re.compile(tier_re) tier_iter = m.finditer(self.read_file) for iterator in tier_iter: (begin, end) = iterator.span() tier_info = self.read_file[begin:end] tiers.append(Tier(tier_info, self.text_type, self.t_time)) return tiers def _check_type(self): """ Figures out the TextGrid format. """ m = re.match("(.*)[\r\n](.*)[\r\n](.*)[\r\n](.*)", self.read_file) try: type_id = m.group(1).strip() except AttributeError: raise TypeError("Cannot read file -- try TextGrid.load()") xmin = m.group(4) if type_id == "File type = \"ooTextFile\"": if "xmin" not in xmin: text_type = "OldooTextFile" else: text_type = "ooTextFile" elif type_id == "\"Praat chronological TextGrid text file\"": text_type = "ChronTextFile" else: raise TypeError("Unknown format '(%s)'", (type_id)) return text_type def _find_tiers(self): """ Splits the textgrid file into substrings corresponding to tiers. """ if self.text_type == "ooTextFile": m = OOTEXTFILE header = "\\t+item \[" elif self.text_type == "ChronTextFile": m = CHRONTEXTFILE header = "\"\S+\" \".*\" \d+\.?\d* \d+\.?\d*" elif self.text_type == "OldooTextFile": m = OLDOOTEXTFILE header = "\".*\"[\r\n]+\".*\"" file_info = m.findall(self.read_file)[0] self.xmin = float(file_info[0]) self.xmax = float(file_info[1]) self.t_time = self.xmax - self.xmin self.size = int(file_info[2]) tiers = self._load_tiers(header) return tiers def to_chron(self): """ @return: String in Chronological TextGrid file format. """ chron_file = "" chron_file += "\"Praat chronological TextGrid text file\"\n" chron_file += str(self.xmin) + " " + str(self.xmax) chron_file += " ! Time domain.\n" chron_file += str(self.size) + " ! Number of tiers.\n" for tier in self.tiers: idx = (self.tiers.index(tier)) + 1 tier_header = "\"" + tier.classid + "\" \"" \ + tier.nameid + "\" " + str(tier.xmin) \ + " " + str(tier.xmax) chron_file += tier_header + "\n" transcript = tier.simple_transcript for (xmin, xmax, utt) in transcript: chron_file += str(idx) + " " + str(xmin) chron_file += " " + str(xmax) +"\n" chron_file += "\"" + utt + "\"\n" return chron_file def to_oo(self): """ @return: A string in OoTextGrid file format. """ oo_file = "" oo_file += "File type = \"ooTextFile\"\n" oo_file += "Object class = \"TextGrid\"\n\n" oo_file += "xmin = ", self.xmin, "\n" oo_file += "xmax = ", self.xmax, "\n" oo_file += "tiers? <exists>\n" oo_file += "size = ", self.size, "\n" oo_file += "item []:\n" for i in range(len(self.tiers)): oo_file += "%4s%s [%s]" % ("", "item", i + 1) _curr_tier = self.tiers[i] for (x, y) in _curr_tier.header: oo_file += "%8s%s = \"%s\"" % ("", x, y) if _curr_tier.classid != TEXTTIER: for (xmin, xmax, text) in _curr_tier.simple_transcript: oo_file += "%12s%s = %s" % ("", "xmin", xmin) oo_file += "%12s%s = %s" % ("", "xmax", xmax) oo_file += "%12s%s = \"%s\"" % ("", "text", text) else: for (time, mark) in _curr_tier.simple_transcript: oo_file += "%12s%s = %s" % ("", "time", time) oo_file += "%12s%s = %s" % ("", "mark", mark) return oo_file ################################################################# # Tier Class ################################################################# class Tier(object): """ A container for each tier. """ def __init__(self, tier, text_type, t_time, config=None): """ Initializes attributes of the tier: class, name, xmin, xmax size, transcript, total time. Utilizes text_type to guide how to parse the file. @type tier: a tier object; single item in the TextGrid list. @param text_type: TextGrid format @param t_time: Total time of TextGrid file. @param classid: Type of tier (point or interval). @param nameid: Name of tier. @param xmin: xmin of the tier. @param xmax: xmax of the tier. @param size: Number of entries in the tier @param transcript: The raw transcript for the tier. @arg config (dict): a dictionary configuration of a tier object """ if config is None: self.tier = tier self.text_type = text_type self.t_time = t_time self.classid = "" self.nameid = "" self.xmin = 0 self.xmax = 0 self.size = 0 self.transcript = "" self.tier_info = "" self._make_info() self.simple_transcript = self.make_simple_transcript() if self.classid != TEXTTIER: self.mark_type = "intervals" else: self.mark_type = "points" self.header = [("class", self.classid), ("name", self.nameid), \ ("xmin", self.xmin), ("xmax", self.xmax), ("size", self.size)] else: pass def __iter__(self): return self def _make_info(self): """ Figures out most attributes of the tier object: class, name, xmin, xmax, transcript. """ trans = "([\S\s]*)" if self.text_type == "ChronTextFile": classid = "\"(.*)\" +" nameid = "\"(.*)\" +" xmin = "(\d+\.?\d*) +" xmax = "(\d+\.?\d*) *[\r\n]+" # No size values are given in the Chronological Text File format. self.size = None size = "" elif self.text_type == "ooTextFile": classid = "\t+class = \"(.*)\" *[\r\n]+" nameid = "\t+name = \"(.*)\" *[\r\n]+" xmin = "\t+xmin = (\d+\.?\d*) *[\r\n]+" xmax = "\t+xmax = (\d+\.?\d*) *[\r\n]+" size = "\t+\S+: size = (\d+) *[\r\n]+" elif self.text_type == "OldooTextFile": classid = "\"(.*)\" *[\r\n]+" nameid = "\"(.*)\" *[\r\n]+" xmin = "(\d+\.?\d*) *[\r\n]+" xmax = "(\d+\.?\d*) *[\r\n]+" size = "(\d+) *[\r\n]+" m = re.compile(classid + nameid + xmin + xmax + size + trans) self.tier_info = m.findall(self.tier)[0] self.classid = self.tier_info[0] self.nameid = self.tier_info[1] self.xmin = float(self.tier_info[2]) self.xmax = float(self.tier_info[3]) if self.size != None: self.size = int(self.tier_info[4]) self.transcript = self.tier_info[-1] def make_simple_transcript(self): """ @return: Transcript of the tier, in form [(start_time end_time label)] """ if self.text_type == "ChronTextFile": trans_head = "" trans_xmin = " (\S+)" trans_xmax = " (\S+)[\r\n]+" trans_text = "\"([\S\s]*?)\"" elif self.text_type == "ooTextFile": trans_head = "\\t+\S+ \[\d+\]: *[\r\n]+" trans_xmin = "\\t+\S+ = (\S+) *[\r\n]+" trans_xmax = "\\t+\S+ = (\S+) *[\r\n]+" trans_text = "\\t+\S+ = \"([^\"]*?)\"" elif self.text_type == "OldooTextFile": trans_head = "" trans_xmin = "(.*)[\r\n]+" trans_xmax = "(.*)[\r\n]+" trans_text = "\"([\S\s]*?)\"" if self.classid == TEXTTIER: trans_xmin = "" trans_m = re.compile(trans_head + trans_xmin + trans_xmax + trans_text) self.simple_transcript = trans_m.findall(self.transcript) return self.simple_transcript def transcript(self): """ @return: Transcript of the tier, as it appears in the file. """ return self.transcript def time(self, non_speech_char="."): """ @return: Utterance time of a given tier. Screens out entries that begin with a non-speech marker. """ total = 0.0 if self.classid != TEXTTIER: for (time1, time2, utt) in self.simple_transcript: utt = utt.strip() if utt and not utt[0] == ".": total += (float(time2) - float(time1)) return total def tier_name(self): """ @return: Tier name of a given tier. """ return self.nameid def classid(self): """ @return: Type of transcription on tier. """ return self.classid def min_max(self): """ @return: (xmin, xmax) tuple for a given tier. """ return (self.xmin, self.xmax) def __repr__(self): return "<%s \"%s\" (%.2f, %.2f) %.2f%%>" % (self.classid, self.nameid, self.xmin, self.xmax, 100*self.time()/self.t_time) def __str__(self): return self.__repr__() + "\n " + "\n ".join(" ".join(row) for row in self.simple_transcript) def demo_TextGrid(demo_data): print("** Demo of the TextGrid class. **") fid = TextGrid(demo_data) print("Tiers: %s" % (fid.size)) for i, tier in enumerate(fid): print("\n***") print("Tier: %s" % (i + 1)) print(tier) def demo(): # Each demo demonstrates different TextGrid formats. print("Format 1") demo_TextGrid(demo_data1) print("\nFormat 2") demo_TextGrid(demo_data2) print("\nFormat 3") demo_TextGrid(demo_data3) demo_data1 = """File type = "ooTextFile" Object class = "TextGrid" xmin = 0 xmax = 2045.144149659864 tiers? <exists> size = 3 item []: item [1]: class = "IntervalTier" name = "utterances" xmin = 0 xmax = 2045.144149659864 intervals: size = 5 intervals [1]: xmin = 0 xmax = 2041.4217474125382 text = "" intervals [2]: xmin = 2041.4217474125382 xmax = 2041.968276643991 text = "this" intervals [3]: xmin = 2041.968276643991 xmax = 2042.5281632653062 text = "is" intervals [4]: xmin = 2042.5281632653062 xmax = 2044.0487352585324 text = "a" intervals [5]: xmin = 2044.0487352585324 xmax = 2045.144149659864 text = "demo" item [2]: class = "TextTier" name = "notes" xmin = 0 xmax = 2045.144149659864 points: size = 3 points [1]: time = 2041.4217474125382 mark = ".begin_demo" points [2]: time = 2043.8338291031832 mark = "voice gets quiet here" points [3]: time = 2045.144149659864 mark = ".end_demo" item [3]: class = "IntervalTier" name = "phones" xmin = 0 xmax = 2045.144149659864 intervals: size = 12 intervals [1]: xmin = 0 xmax = 2041.4217474125382 text = "" intervals [2]: xmin = 2041.4217474125382 xmax = 2041.5438290324326 text = "D" intervals [3]: xmin = 2041.5438290324326 xmax = 2041.7321032910372 text = "I" intervals [4]: xmin = 2041.7321032910372 xmax = 2041.968276643991 text = "s" intervals [5]: xmin = 2041.968276643991 xmax = 2042.232189031843 text = "I" intervals [6]: xmin = 2042.232189031843 xmax = 2042.5281632653062 text = "z" intervals [7]: xmin = 2042.5281632653062 xmax = 2044.0487352585324 text = "eI" intervals [8]: xmin = 2044.0487352585324 xmax = 2044.2487352585324 text = "dc" intervals [9]: xmin = 2044.2487352585324 xmax = 2044.3102321849011 text = "d" intervals [10]: xmin = 2044.3102321849011 xmax = 2044.5748932104329 text = "E" intervals [11]: xmin = 2044.5748932104329 xmax = 2044.8329108578437 text = "m" intervals [12]: xmin = 2044.8329108578437 xmax = 2045.144149659864 text = "oU" """ demo_data2 = """File type = "ooTextFile" Object class = "TextGrid" 0 2.8 <exists> 2 "IntervalTier" "utterances" 0 2.8 3 0 1.6229213249309031 "" 1.6229213249309031 2.341428074708195 "demo" 2.341428074708195 2.8 "" "IntervalTier" "phones" 0 2.8 6 0 1.6229213249309031 "" 1.6229213249309031 1.6428291382019483 "dc" 1.6428291382019483 1.65372183721983721 "d" 1.65372183721983721 1.94372874328943728 "E" 1.94372874328943728 2.13821938291038210 "m" 2.13821938291038210 2.341428074708195 "oU" 2.341428074708195 2.8 "" """ demo_data3 = """"Praat chronological TextGrid text file" 0 2.8 ! Time domain. 2 ! Number of tiers. "IntervalTier" "utterances" 0 2.8 "IntervalTier" "utterances" 0 2.8 1 0 1.6229213249309031 "" 2 0 1.6229213249309031 "" 2 1.6229213249309031 1.6428291382019483 "dc" 2 1.6428291382019483 1.65372183721983721 "d" 2 1.65372183721983721 1.94372874328943728 "E" 2 1.94372874328943728 2.13821938291038210 "m" 2 2.13821938291038210 2.341428074708195 "oU" 1 1.6229213249309031 2.341428074708195 "demo" 1 2.341428074708195 2.8 "" 2 2.341428074708195 2.8 "" """ if __name__ == "__main__": demo()
30.104037
129
0.516429
import sys import re TEXTTIER = "TextTier" INTERVALTIER = "IntervalTier" OOTEXTFILE = re.compile(r"""(?x) xmin\ =\ (.*)[\r\n]+ xmax\ =\ (.*)[\r\n]+ [\s\S]+?size\ =\ (.*)[\r\n]+ """) CHRONTEXTFILE = re.compile(r"""(?x) [\r\n]+(\S+)\ (\S+)\ +!\ Time\ domain.\ *[\r\n]+ (\S+)\ +!\ Number\ of\ tiers.\ *[\r\n]+" """) OLDOOTEXTFILE = re.compile(r"""(?x) [\r\n]+(\S+) [\r\n]+(\S+) [\r\n]+.+[\r\n]+(\S+) """) ################################################################# # TextGrid Class ################################################################# class TextGrid(object): def __init__(self, read_file, config:dict=None): # default creation of textgrid object if config is None: self.read_file = read_file self.size = 0 self.xmin = 0 self.xmax = 0 self.t_time = 0 self.text_type = self._check_type() self.tiers = self._find_tiers() # creating a textgrid from a dict config else: self.read_file = None self.size = config['size'] self.xmin = config['xmin'] self.xmax = config['xmax'] self.t_time = config['t_time'] def __iter__(self): for tier in self.tiers: yield tier def next(self): if self.idx == (self.size - 1): raise StopIteration self.idx += 1 return self.tiers[self.idx] @staticmethod def load(file): return TextGrid(open(file).read()) def _load_tiers(self, header): tiers = [] if self.text_type == "ChronTextFile": m = re.compile(header) tier_headers = m.findall(self.read_file) tier_re = " \d+.?\d* \d+.?\d*[\r\n]+\"[^\"]*\"" for i in range(0, self.size): tier_info = [tier_headers[i]] + \ re.findall(str(i + 1) + tier_re, self.read_file) tier_info = "\n".join(tier_info) tiers.append(Tier(tier_info, self.text_type, self.t_time)) return tiers tier_re = header + "[\s\S]+?(?=" + header + "|$$)" m = re.compile(tier_re) tier_iter = m.finditer(self.read_file) for iterator in tier_iter: (begin, end) = iterator.span() tier_info = self.read_file[begin:end] tiers.append(Tier(tier_info, self.text_type, self.t_time)) return tiers def _check_type(self): m = re.match("(.*)[\r\n](.*)[\r\n](.*)[\r\n](.*)", self.read_file) try: type_id = m.group(1).strip() except AttributeError: raise TypeError("Cannot read file -- try TextGrid.load()") xmin = m.group(4) if type_id == "File type = \"ooTextFile\"": if "xmin" not in xmin: text_type = "OldooTextFile" else: text_type = "ooTextFile" elif type_id == "\"Praat chronological TextGrid text file\"": text_type = "ChronTextFile" else: raise TypeError("Unknown format '(%s)'", (type_id)) return text_type def _find_tiers(self): if self.text_type == "ooTextFile": m = OOTEXTFILE header = "\\t+item \[" elif self.text_type == "ChronTextFile": m = CHRONTEXTFILE header = "\"\S+\" \".*\" \d+\.?\d* \d+\.?\d*" elif self.text_type == "OldooTextFile": m = OLDOOTEXTFILE header = "\".*\"[\r\n]+\".*\"" file_info = m.findall(self.read_file)[0] self.xmin = float(file_info[0]) self.xmax = float(file_info[1]) self.t_time = self.xmax - self.xmin self.size = int(file_info[2]) tiers = self._load_tiers(header) return tiers def to_chron(self): chron_file = "" chron_file += "\"Praat chronological TextGrid text file\"\n" chron_file += str(self.xmin) + " " + str(self.xmax) chron_file += " ! Time domain.\n" chron_file += str(self.size) + " ! Number of tiers.\n" for tier in self.tiers: idx = (self.tiers.index(tier)) + 1 tier_header = "\"" + tier.classid + "\" \"" \ + tier.nameid + "\" " + str(tier.xmin) \ + " " + str(tier.xmax) chron_file += tier_header + "\n" transcript = tier.simple_transcript for (xmin, xmax, utt) in transcript: chron_file += str(idx) + " " + str(xmin) chron_file += " " + str(xmax) +"\n" chron_file += "\"" + utt + "\"\n" return chron_file def to_oo(self): oo_file = "" oo_file += "File type = \"ooTextFile\"\n" oo_file += "Object class = \"TextGrid\"\n\n" oo_file += "xmin = ", self.xmin, "\n" oo_file += "xmax = ", self.xmax, "\n" oo_file += "tiers? <exists>\n" oo_file += "size = ", self.size, "\n" oo_file += "item []:\n" for i in range(len(self.tiers)): oo_file += "%4s%s [%s]" % ("", "item", i + 1) _curr_tier = self.tiers[i] for (x, y) in _curr_tier.header: oo_file += "%8s%s = \"%s\"" % ("", x, y) if _curr_tier.classid != TEXTTIER: for (xmin, xmax, text) in _curr_tier.simple_transcript: oo_file += "%12s%s = %s" % ("", "xmin", xmin) oo_file += "%12s%s = %s" % ("", "xmax", xmax) oo_file += "%12s%s = \"%s\"" % ("", "text", text) else: for (time, mark) in _curr_tier.simple_transcript: oo_file += "%12s%s = %s" % ("", "time", time) oo_file += "%12s%s = %s" % ("", "mark", mark) return oo_file numerate(fid): print("\n***") print("Tier: %s" % (i + 1)) print(tier) def demo(): # Each demo demonstrates different TextGrid formats. print("Format 1") demo_TextGrid(demo_data1) print("\nFormat 2") demo_TextGrid(demo_data2) print("\nFormat 3") demo_TextGrid(demo_data3) demo_data1 = """File type = "ooTextFile" Object class = "TextGrid" xmin = 0 xmax = 2045.144149659864 tiers? <exists> size = 3 item []: item [1]: class = "IntervalTier" name = "utterances" xmin = 0 xmax = 2045.144149659864 intervals: size = 5 intervals [1]: xmin = 0 xmax = 2041.4217474125382 text = "" intervals [2]: xmin = 2041.4217474125382 xmax = 2041.968276643991 text = "this" intervals [3]: xmin = 2041.968276643991 xmax = 2042.5281632653062 text = "is" intervals [4]: xmin = 2042.5281632653062 xmax = 2044.0487352585324 text = "a" intervals [5]: xmin = 2044.0487352585324 xmax = 2045.144149659864 text = "demo" item [2]: class = "TextTier" name = "notes" xmin = 0 xmax = 2045.144149659864 points: size = 3 points [1]: time = 2041.4217474125382 mark = ".begin_demo" points [2]: time = 2043.8338291031832 mark = "voice gets quiet here" points [3]: time = 2045.144149659864 mark = ".end_demo" item [3]: class = "IntervalTier" name = "phones" xmin = 0 xmax = 2045.144149659864 intervals: size = 12 intervals [1]: xmin = 0 xmax = 2041.4217474125382 text = "" intervals [2]: xmin = 2041.4217474125382 xmax = 2041.5438290324326 text = "D" intervals [3]: xmin = 2041.5438290324326 xmax = 2041.7321032910372 text = "I" intervals [4]: xmin = 2041.7321032910372 xmax = 2041.968276643991 text = "s" intervals [5]: xmin = 2041.968276643991 xmax = 2042.232189031843 text = "I" intervals [6]: xmin = 2042.232189031843 xmax = 2042.5281632653062 text = "z" intervals [7]: xmin = 2042.5281632653062 xmax = 2044.0487352585324 text = "eI" intervals [8]: xmin = 2044.0487352585324 xmax = 2044.2487352585324 text = "dc" intervals [9]: xmin = 2044.2487352585324 xmax = 2044.3102321849011 text = "d" intervals [10]: xmin = 2044.3102321849011 xmax = 2044.5748932104329 text = "E" intervals [11]: xmin = 2044.5748932104329 xmax = 2044.8329108578437 text = "m" intervals [12]: xmin = 2044.8329108578437 xmax = 2045.144149659864 text = "oU" """ demo_data2 = """File type = "ooTextFile" Object class = "TextGrid" 0 2.8 <exists> 2 "IntervalTier" "utterances" 0 2.8 3 0 1.6229213249309031 "" 1.6229213249309031 2.341428074708195 "demo" 2.341428074708195 2.8 "" "IntervalTier" "phones" 0 2.8 6 0 1.6229213249309031 "" 1.6229213249309031 1.6428291382019483 "dc" 1.6428291382019483 1.65372183721983721 "d" 1.65372183721983721 1.94372874328943728 "E" 1.94372874328943728 2.13821938291038210 "m" 2.13821938291038210 2.341428074708195 "oU" 2.341428074708195 2.8 "" """ demo_data3 = """"Praat chronological TextGrid text file" 0 2.8 ! Time domain. 2 ! Number of tiers. "IntervalTier" "utterances" 0 2.8 "IntervalTier" "utterances" 0 2.8 1 0 1.6229213249309031 "" 2 0 1.6229213249309031 "" 2 1.6229213249309031 1.6428291382019483 "dc" 2 1.6428291382019483 1.65372183721983721 "d" 2 1.65372183721983721 1.94372874328943728 "E" 2 1.94372874328943728 2.13821938291038210 "m" 2 2.13821938291038210 2.341428074708195 "oU" 1 1.6229213249309031 2.341428074708195 "demo" 1 2.341428074708195 2.8 "" 2 2.341428074708195 2.8 "" """ if __name__ == "__main__": demo()
true
true
1c49fb41d87bacb2885b8521921dc64905ec5e5d
25,423
py
Python
utils/data_generator.py
qiuqiangkong/dcase2019_task2
62575c8cdd4723cfdf497b290b6dddcce316c60b
[ "MIT" ]
36
2019-04-13T02:04:04.000Z
2020-10-27T15:54:24.000Z
utils/data_generator.py
qiuqiangkong/dcase2019_task2
62575c8cdd4723cfdf497b290b6dddcce316c60b
[ "MIT" ]
2
2019-04-14T08:08:26.000Z
2019-04-18T19:29:38.000Z
utils/data_generator.py
qiuqiangkong/dcase2019_task2
62575c8cdd4723cfdf497b290b6dddcce316c60b
[ "MIT" ]
16
2019-04-13T23:01:32.000Z
2021-01-10T05:20:15.000Z
import numpy as np import h5py import csv import time import logging import os import glob import matplotlib.pyplot as plt import logging import pandas as pd from utilities import scale import config class Base(object): def __init__(self): '''Base class for train, validate and test data generator. ''' pass def load_hdf5(self, hdf5_path, cross_validation_path): '''Load hdf5 file. Args: hdf5_path: string, path of hdf5 file cross_validation_path, string | 'none', path of cross validation csv file Returns: data_dict: {'audio_name': (audios_num,), 'feature': (dataset_total_frames, mel_bins), 'begin_index': (audios_num,), 'end_index': (audios_num,), (if exist) 'target': (audios_num, classes_num), (if exist) 'fold': (audios_num,)} ''' data_dict = {} with h5py.File(hdf5_path, 'r') as hf: data_dict['audio_name'] = np.array( [audio_name.decode() for audio_name in hf['audio_name'][:]]) data_dict['feature'] = hf['feature'][:].astype(np.float32) data_dict['begin_index'] = hf['begin_index'][:].astype(np.int32) data_dict['end_index'] = hf['end_index'][:].astype(np.int32) if 'target' in hf.keys(): data_dict['target'] = hf['target'][:].astype(np.float32) if cross_validation_path: df = pd.read_csv(cross_validation_path, sep=',') folds = [] for n, audio_name in enumerate(data_dict['audio_name']): index = df.index[df['fname'] == audio_name][0] folds.append(df['fold'][index]) data_dict['fold'] = np.array(folds) return data_dict def get_segment_metadata_dict(self, data_dict, audio_indexes, segment_frames, hop_frames, source): '''Get segments metadata for training or inference. Long audio recordings are split to segments with the same duration. Each segment inherit the label of the audio recording. Args: data_dict: {'audio_name': (audios_num,), 'feature': (dataset_total_frames, mel_bins), 'begin_index': (audios_num,), 'end_index': (audios_num,), (if exist) 'target': (audios_num, classes_num), (if exist) 'fold': (audios_num,)} audio_indexes: (audios_num,) segment_frames: int, frames number of a segment hop_frames: int, hop frames between segments source: 'curated' | 'noisy' | None Returns: segment_metadata_dict: {'audio_name': (segments_num,), 'begin_index': (segments_num,), 'end_index': (segments_num,), (if exist) 'target': (segments_num, classes_num), (if exist) 'source': (segments_num)} ''' segment_metadata_dict = {'audio_name': [], 'begin_index': [], 'end_index': []} has_target = 'target' in data_dict.keys() if has_target: segment_metadata_dict['target'] = [] if source: segment_metadata_dict['source'] = [] for audio_index in audio_indexes: audio_name = data_dict['audio_name'][audio_index] begin_index = data_dict['begin_index'][audio_index] end_index = data_dict['end_index'][audio_index] if has_target: target = data_dict['target'][audio_index] else: target = None # If audio recording shorter than a segment if end_index - begin_index < segment_frames: segment_metadata_dict['begin_index'].append(begin_index) segment_metadata_dict['end_index'].append(end_index) self._append_to_meta_data(segment_metadata_dict, audio_name, target, source) # If audio recording longer than a segment then split else: shift = 0 while end_index - (begin_index + shift) > segment_frames: segment_metadata_dict['begin_index'].append( begin_index + shift) segment_metadata_dict['end_index'].append( begin_index + shift + segment_frames) self._append_to_meta_data(segment_metadata_dict, audio_name, target, source) shift += hop_frames # Append the last segment segment_metadata_dict['begin_index'].append( end_index - segment_frames) segment_metadata_dict['end_index'].append(end_index) self._append_to_meta_data(segment_metadata_dict, audio_name, target, source) for key in segment_metadata_dict.keys(): segment_metadata_dict[key] = np.array(segment_metadata_dict[key]) return segment_metadata_dict def _append_to_meta_data(self, segment_metadata_dict, audio_name, target, source): '''Append audio_name, target, source to segment_metadata_dict. ''' segment_metadata_dict['audio_name'].append(audio_name) if target is not None: segment_metadata_dict['target'].append(target) if source is not None: segment_metadata_dict['source'].append(source) def get_feature_mask(self, data_dict, begin_index, end_index, segment_frames, pad_type, logmel_eps): '''Get logmel feature and mask of one segment. Args: data_dict: {'audio_name': (audios_num,), 'feature': (dataset_total_frames, mel_bins), 'begin_index': (audios_num,), 'end_index': (audios_num,), (if exist) 'target': (audios_num, classes_num), (if exist) 'fold': (audios_num,)} begin_index: int, begin index of a segment end_index: int, end index of a segment segment_frames: int, frames number of a segment pad_type: string, 'constant' | 'repeat' logmel_eps: constant value to pad if pad_type == 'constant' ''' this_segment_frames = end_index - begin_index # If segment frames of this audio is fewer than the designed segment # frames, then pad. if this_segment_frames < segment_frames: if pad_type == 'constant': this_feature = self.pad_constant( data_dict['feature'][begin_index : end_index], segment_frames, logmel_eps) elif pad_type == 'repeat': this_feature = self.pad_repeat( data_dict['feature'][begin_index : end_index], segment_frames) this_mask = np.zeros(segment_frames) this_mask[0 : this_segment_frames] = 1 # If segment frames is equal to the designed segment frames, then load # data without padding. else: this_feature = data_dict['feature'][begin_index : end_index] this_mask = np.ones(self.segment_frames) return this_feature, this_mask def pad_constant(self, x, max_len, constant): '''Pad matrix with constant. Args: x: (frames, mel_bins) max_len: int, legnth to be padded constant: float, value used for padding ''' pad = constant * np.ones((max_len - x.shape[0], x.shape[1])) padded_x = np.concatenate((x, pad), axis=0) return padded_x def pad_repeat(self, x, max_len): '''Repeat matrix to a legnth. Args: x: (frames, mel_bins) max_len: int, length to be padded ''' repeat_num = int(max_len / x.shape[0]) + 1 repeated_x = np.tile(x, (repeat_num, 1)) repeated_x = repeated_x[0 : max_len] return repeated_x def transform(self, x): '''Transform data. ''' return scale(x, self.scalar['mean'], self.scalar['std']) class DataGenerator(Base): def __init__(self, curated_feature_hdf5_path, noisy_feature_hdf5_path, curated_cross_validation_path, noisy_cross_validation_path, train_source, holdout_fold, segment_seconds, hop_seconds, pad_type, scalar, batch_size, seed=1234): '''Data generator for training and validation. Args: curated_feature_hdf5_path: string, path of hdf5 file noisy_feature_hdf5_path: string, path of hdf5 file curated_cross_validation_path: path of cross validation csv file noisy_cross_validation_path: path of cross validation csv file train_source: 'curated' | 'noisy' | 'curated_and_noisy' holdout_fold: '1', '2', '3', '4' | 'none', set `none` for training on all data without validation segment_seconds: float, duration of audio recordings to be padded or split hop_seconds: float, hop seconds between segments pad_type: 'constant' | 'repeat' scalar: object, containing mean and std value batch_size: int seed: int ''' self.scalar = scalar self.batch_size = batch_size self.random_state = np.random.RandomState(seed) self.segment_frames = int(segment_seconds * config.frames_per_second) self.hop_frames = int(hop_seconds * config.frames_per_second) self.pad_type = pad_type self.logmel_eps = config.logmel_eps # Load training data load_time = time.time() self.curated_data_dict = self.load_hdf5( curated_feature_hdf5_path, curated_cross_validation_path) self.noisy_data_dict = self.load_hdf5( noisy_feature_hdf5_path, noisy_cross_validation_path) # Get train and validate audio indexes (train_curated_audio_indexes, validate_curated_audio_indexes) = \ self.get_train_validate_audio_indexes( self.curated_data_dict, holdout_fold) (train_noisy_audio_indexes, validate_noisy_audio_indexes) = \ self.get_train_validate_audio_indexes( self.noisy_data_dict, holdout_fold) logging.info('Train curated audio num: {}'.format( len(train_curated_audio_indexes))) logging.info('Train noisy audio num: {}'.format( len(train_noisy_audio_indexes))) logging.info('Validate curated audio num: {}'.format( len(validate_curated_audio_indexes))) logging.info('Validate noisy audio num: {}'.format( len(validate_noisy_audio_indexes))) logging.info('Load data time: {:.3f} s'.format(time.time() - load_time)) # Get segment metadata for training self.train_curated_segment_metadata_dict = \ self.get_segment_metadata_dict( self.curated_data_dict, train_curated_audio_indexes, self.segment_frames, self.hop_frames, 'curated') self.train_noisy_segment_metadata_dict = self.get_segment_metadata_dict( self.noisy_data_dict, train_noisy_audio_indexes, self.segment_frames, self.hop_frames, 'noisy') if train_source == 'curated': self.train_segment_metadata_dict = \ self.train_curated_segment_metadata_dict elif train_source == 'noisy': self.train_segment_metadata_dict = \ self.train_noisy_segment_metadata_dict elif train_source == 'curated_and_noisy': self.train_segment_metadata_dict = \ self.combine_curated_noisy_metadata_dict( self.train_curated_segment_metadata_dict, self.train_noisy_segment_metadata_dict) # Get segment metadata for validation self.validate_curated_segment_metadata_dict = \ self.get_segment_metadata_dict( self.curated_data_dict, validate_curated_audio_indexes, self.segment_frames, self.hop_frames, 'curated') self.validate_noisy_segment_metadata_dict = \ self.get_segment_metadata_dict( self.noisy_data_dict, validate_noisy_audio_indexes, self.segment_frames, self.hop_frames, 'noisy') # Print data statistics train_segments_num = len(self.train_segment_metadata_dict['audio_name']) validate_curated_segments_num = len( self.validate_curated_segment_metadata_dict['audio_name']) validate_noisy_segments_num = len( self.validate_noisy_segment_metadata_dict['audio_name']) logging.info('') logging.info('Total train segments num: {}'.format(train_segments_num)) logging.info('Validate curated segments num: {}'.format( validate_curated_segments_num)) logging.info('Validate noisy segments num: {}'.format( validate_noisy_segments_num)) self.train_segments_indexes = np.arange(train_segments_num) self.random_state.shuffle(self.train_segments_indexes) self.pointer = 0 def get_train_validate_audio_indexes(self, data_dict, holdout_fold): '''Get train and validate audio indexes. Args: data_dict: {'audio_name': (audios_num,), 'feature': (dataset_total_frames, mel_bins), 'target': (audios_num, classes_num), 'begin_index': (audios_num,), 'end_index': (audios_num,), (if exist) 'fold': (audios_num,)} holdout_fold: 'none' | int, if 'none' then validate indexes are empty Returns: train_audio_indexes: (train_audios_num,) validate_audio_indexes: (validate_audios_num) ''' if holdout_fold == 'none': train_audio_indexes = np.arange(len(data_dict['audio_name'])) validate_audio_indexes = np.array([]) else: train_audio_indexes = np.where( data_dict['fold'] != int(holdout_fold))[0] validate_audio_indexes = np.where( data_dict['fold'] == int(holdout_fold))[0] return train_audio_indexes, validate_audio_indexes def combine_curated_noisy_metadata_dict(self, curated_metadata_dict, noisy_metadata_dict): '''Combine curated and noisy segment metadata dict. ''' combined_metadata_dict = {} for key in curated_metadata_dict.keys(): combined_metadata_dict[key] = np.concatenate( (curated_metadata_dict[key], noisy_metadata_dict[key]), axis=0) return combined_metadata_dict def generate_train(self): '''Generate mini-batch data for training. Returns: batch_data_dict: {'audio_name': (batch_size,), 'feature': (batch_size, segment_frames, mel_bins), 'mask': (batch_size, segment_frames), 'target': (batch_size, classes_num), 'source': (batch_size,)} ''' while True: # Reset pointer if self.pointer >= len(self.train_segments_indexes): self.pointer = 0 self.random_state.shuffle(self.train_segments_indexes) # Get batch segment indexes batch_segment_indexes = self.train_segments_indexes[ self.pointer: self.pointer + self.batch_size] self.pointer += self.batch_size # Batch segment data batch_audio_name = self.train_segment_metadata_dict\ ['audio_name'][batch_segment_indexes] batch_begin_index = self.train_segment_metadata_dict\ ['begin_index'][batch_segment_indexes] batch_end_index = self.train_segment_metadata_dict\ ['end_index'][batch_segment_indexes] batch_target = self.train_segment_metadata_dict\ ['target'][batch_segment_indexes] batch_source = self.train_segment_metadata_dict\ ['source'][batch_segment_indexes] batch_feature = [] batch_mask = [] # Get logmel segments one by one, pad the short segments for n in range(len(batch_segment_indexes)): if batch_source[n] == 'curated': data_dict = self.curated_data_dict elif batch_source[n] == 'noisy': data_dict = self.noisy_data_dict else: raise Exception('Incorrect source type!') (this_feature, this_mask) = self.get_feature_mask( data_dict, batch_begin_index[n], batch_end_index[n], self.segment_frames, self.pad_type, self.logmel_eps) batch_feature.append(this_feature) batch_mask.append(this_mask) batch_feature = np.array(batch_feature) batch_feature = self.transform(batch_feature) batch_mask = np.array(batch_mask) batch_data_dict = { 'audio_name': batch_audio_name, 'feature': batch_feature, 'mask': batch_mask, 'target': batch_target, 'source': batch_source} yield batch_data_dict def generate_validate(self, data_type, target_source, max_iteration=None): '''Generate mini-batch data for validation. Returns: batch_data_dict: {'audio_name': (batch_size,), 'feature': (batch_size, segment_frames, mel_bins), 'mask': (batch_size, segment_frames), 'target': (batch_size, classes_num)} ''' assert(data_type in ['train', 'validate']) assert(target_source in ['curated', 'noisy']) segment_metadata_dict = eval( 'self.{}_{}_segment_metadata_dict'.format(data_type, target_source)) data_dict = eval('self.{}_data_dict'.format(target_source)) segments_num = len(segment_metadata_dict['audio_name']) segment_indexes = np.arange(segments_num) iteration = 0 pointer = 0 while True: if iteration == max_iteration: break # Reset pointer if pointer >= segments_num: break # Get batch segment indexes batch_segment_indexes = segment_indexes[ pointer: pointer + self.batch_size] pointer += self.batch_size iteration += 1 # Batch segment data batch_audio_name = segment_metadata_dict\ ['audio_name'][batch_segment_indexes] batch_begin_index = segment_metadata_dict\ ['begin_index'][batch_segment_indexes] batch_end_index = segment_metadata_dict\ ['end_index'][batch_segment_indexes] batch_target = segment_metadata_dict\ ['target'][batch_segment_indexes] batch_feature = [] batch_mask = [] # Get logmel segments one by one, pad the short segments for n in range(len(batch_segment_indexes)): (this_feature, this_mask) = self.get_feature_mask( data_dict, batch_begin_index[n], batch_end_index[n], self.segment_frames, self.pad_type, self.logmel_eps) batch_feature.append(this_feature) batch_mask.append(this_mask) batch_feature = np.array(batch_feature) batch_feature = self.transform(batch_feature) batch_mask = np.array(batch_mask) batch_data_dict = { 'audio_name': batch_audio_name, 'feature': batch_feature, 'mask': batch_mask, 'target': batch_target} yield batch_data_dict class TestDataGenerator(Base): def __init__(self, test_feature_hdf5_path, segment_seconds, hop_seconds, pad_type, scalar, batch_size, seed=1234): '''Data generator for testing. Args: test_feature_hdf5_path: string, path of hdf5 file segment_seconds: float, duration of audio recordings to be padded or split hop_seconds: float, hop seconds between segments pad_type: 'constant' | 'repeat' scalar: object, containing mean and std value batch_size: int seed: int ''' self.scalar = scalar self.batch_size = batch_size self.random_state = np.random.RandomState(seed) self.segment_frames = int(segment_seconds * config.frames_per_second) self.hop_frames = int(hop_seconds * config.frames_per_second) self.pad_type = pad_type self.logmel_eps = config.logmel_eps # Load testing data self.test_data_dict = self.load_hdf5( test_feature_hdf5_path, cross_validation_path=None) audios_num = len(self.test_data_dict['audio_name']) test_audio_indexes = np.arange(audios_num) self.test_segment_metadata_dict = \ self.get_segment_metadata_dict( self.test_data_dict, test_audio_indexes, self.segment_frames, self.hop_frames, source=None) def generate_test(self): '''Generate mini-batch data for test. Returns: batch_data_dict: {'audio_name': (batch_size,), 'feature': (batch_size, segment_frames, mel_bins), 'mask': (batch_size, segment_frames)} ''' segment_metadata_dict = self.test_segment_metadata_dict data_dict = self.test_data_dict segments_num = len(segment_metadata_dict['audio_name']) segment_indexes = np.arange(segments_num) iteration = 0 pointer = 0 while True: # Reset pointer if pointer >= segments_num: break # Get batch segment indexes batch_segment_indexes = segment_indexes[ pointer: pointer + self.batch_size] pointer += self.batch_size iteration += 1 # Batch segment data batch_audio_name = segment_metadata_dict\ ['audio_name'][batch_segment_indexes] batch_begin_index = segment_metadata_dict\ ['begin_index'][batch_segment_indexes] batch_end_index = segment_metadata_dict\ ['end_index'][batch_segment_indexes] batch_feature = [] batch_mask = [] # Get logmel segments one by one, pad the short segments for n in range(len(batch_segment_indexes)): (this_feature, this_mask) = self.get_feature_mask( data_dict, batch_begin_index[n], batch_end_index[n], self.segment_frames, self.pad_type, self.logmel_eps) batch_feature.append(this_feature) batch_mask.append(this_mask) batch_feature = np.array(batch_feature) batch_feature = self.transform(batch_feature) batch_mask = np.array(batch_mask) batch_data_dict = { 'audio_name': batch_audio_name, 'feature': batch_feature, 'mask': batch_mask} yield batch_data_dict
39.476708
84
0.556858
import numpy as np import h5py import csv import time import logging import os import glob import matplotlib.pyplot as plt import logging import pandas as pd from utilities import scale import config class Base(object): def __init__(self): pass def load_hdf5(self, hdf5_path, cross_validation_path): data_dict = {} with h5py.File(hdf5_path, 'r') as hf: data_dict['audio_name'] = np.array( [audio_name.decode() for audio_name in hf['audio_name'][:]]) data_dict['feature'] = hf['feature'][:].astype(np.float32) data_dict['begin_index'] = hf['begin_index'][:].astype(np.int32) data_dict['end_index'] = hf['end_index'][:].astype(np.int32) if 'target' in hf.keys(): data_dict['target'] = hf['target'][:].astype(np.float32) if cross_validation_path: df = pd.read_csv(cross_validation_path, sep=',') folds = [] for n, audio_name in enumerate(data_dict['audio_name']): index = df.index[df['fname'] == audio_name][0] folds.append(df['fold'][index]) data_dict['fold'] = np.array(folds) return data_dict def get_segment_metadata_dict(self, data_dict, audio_indexes, segment_frames, hop_frames, source): segment_metadata_dict = {'audio_name': [], 'begin_index': [], 'end_index': []} has_target = 'target' in data_dict.keys() if has_target: segment_metadata_dict['target'] = [] if source: segment_metadata_dict['source'] = [] for audio_index in audio_indexes: audio_name = data_dict['audio_name'][audio_index] begin_index = data_dict['begin_index'][audio_index] end_index = data_dict['end_index'][audio_index] if has_target: target = data_dict['target'][audio_index] else: target = None if end_index - begin_index < segment_frames: segment_metadata_dict['begin_index'].append(begin_index) segment_metadata_dict['end_index'].append(end_index) self._append_to_meta_data(segment_metadata_dict, audio_name, target, source) else: shift = 0 while end_index - (begin_index + shift) > segment_frames: segment_metadata_dict['begin_index'].append( begin_index + shift) segment_metadata_dict['end_index'].append( begin_index + shift + segment_frames) self._append_to_meta_data(segment_metadata_dict, audio_name, target, source) shift += hop_frames segment_metadata_dict['begin_index'].append( end_index - segment_frames) segment_metadata_dict['end_index'].append(end_index) self._append_to_meta_data(segment_metadata_dict, audio_name, target, source) for key in segment_metadata_dict.keys(): segment_metadata_dict[key] = np.array(segment_metadata_dict[key]) return segment_metadata_dict def _append_to_meta_data(self, segment_metadata_dict, audio_name, target, source): segment_metadata_dict['audio_name'].append(audio_name) if target is not None: segment_metadata_dict['target'].append(target) if source is not None: segment_metadata_dict['source'].append(source) def get_feature_mask(self, data_dict, begin_index, end_index, segment_frames, pad_type, logmel_eps): this_segment_frames = end_index - begin_index if this_segment_frames < segment_frames: if pad_type == 'constant': this_feature = self.pad_constant( data_dict['feature'][begin_index : end_index], segment_frames, logmel_eps) elif pad_type == 'repeat': this_feature = self.pad_repeat( data_dict['feature'][begin_index : end_index], segment_frames) this_mask = np.zeros(segment_frames) this_mask[0 : this_segment_frames] = 1 else: this_feature = data_dict['feature'][begin_index : end_index] this_mask = np.ones(self.segment_frames) return this_feature, this_mask def pad_constant(self, x, max_len, constant): pad = constant * np.ones((max_len - x.shape[0], x.shape[1])) padded_x = np.concatenate((x, pad), axis=0) return padded_x def pad_repeat(self, x, max_len): repeat_num = int(max_len / x.shape[0]) + 1 repeated_x = np.tile(x, (repeat_num, 1)) repeated_x = repeated_x[0 : max_len] return repeated_x def transform(self, x): return scale(x, self.scalar['mean'], self.scalar['std']) class DataGenerator(Base): def __init__(self, curated_feature_hdf5_path, noisy_feature_hdf5_path, curated_cross_validation_path, noisy_cross_validation_path, train_source, holdout_fold, segment_seconds, hop_seconds, pad_type, scalar, batch_size, seed=1234): self.scalar = scalar self.batch_size = batch_size self.random_state = np.random.RandomState(seed) self.segment_frames = int(segment_seconds * config.frames_per_second) self.hop_frames = int(hop_seconds * config.frames_per_second) self.pad_type = pad_type self.logmel_eps = config.logmel_eps load_time = time.time() self.curated_data_dict = self.load_hdf5( curated_feature_hdf5_path, curated_cross_validation_path) self.noisy_data_dict = self.load_hdf5( noisy_feature_hdf5_path, noisy_cross_validation_path) (train_curated_audio_indexes, validate_curated_audio_indexes) = \ self.get_train_validate_audio_indexes( self.curated_data_dict, holdout_fold) (train_noisy_audio_indexes, validate_noisy_audio_indexes) = \ self.get_train_validate_audio_indexes( self.noisy_data_dict, holdout_fold) logging.info('Train curated audio num: {}'.format( len(train_curated_audio_indexes))) logging.info('Train noisy audio num: {}'.format( len(train_noisy_audio_indexes))) logging.info('Validate curated audio num: {}'.format( len(validate_curated_audio_indexes))) logging.info('Validate noisy audio num: {}'.format( len(validate_noisy_audio_indexes))) logging.info('Load data time: {:.3f} s'.format(time.time() - load_time)) self.train_curated_segment_metadata_dict = \ self.get_segment_metadata_dict( self.curated_data_dict, train_curated_audio_indexes, self.segment_frames, self.hop_frames, 'curated') self.train_noisy_segment_metadata_dict = self.get_segment_metadata_dict( self.noisy_data_dict, train_noisy_audio_indexes, self.segment_frames, self.hop_frames, 'noisy') if train_source == 'curated': self.train_segment_metadata_dict = \ self.train_curated_segment_metadata_dict elif train_source == 'noisy': self.train_segment_metadata_dict = \ self.train_noisy_segment_metadata_dict elif train_source == 'curated_and_noisy': self.train_segment_metadata_dict = \ self.combine_curated_noisy_metadata_dict( self.train_curated_segment_metadata_dict, self.train_noisy_segment_metadata_dict) self.validate_curated_segment_metadata_dict = \ self.get_segment_metadata_dict( self.curated_data_dict, validate_curated_audio_indexes, self.segment_frames, self.hop_frames, 'curated') self.validate_noisy_segment_metadata_dict = \ self.get_segment_metadata_dict( self.noisy_data_dict, validate_noisy_audio_indexes, self.segment_frames, self.hop_frames, 'noisy') train_segments_num = len(self.train_segment_metadata_dict['audio_name']) validate_curated_segments_num = len( self.validate_curated_segment_metadata_dict['audio_name']) validate_noisy_segments_num = len( self.validate_noisy_segment_metadata_dict['audio_name']) logging.info('') logging.info('Total train segments num: {}'.format(train_segments_num)) logging.info('Validate curated segments num: {}'.format( validate_curated_segments_num)) logging.info('Validate noisy segments num: {}'.format( validate_noisy_segments_num)) self.train_segments_indexes = np.arange(train_segments_num) self.random_state.shuffle(self.train_segments_indexes) self.pointer = 0 def get_train_validate_audio_indexes(self, data_dict, holdout_fold): if holdout_fold == 'none': train_audio_indexes = np.arange(len(data_dict['audio_name'])) validate_audio_indexes = np.array([]) else: train_audio_indexes = np.where( data_dict['fold'] != int(holdout_fold))[0] validate_audio_indexes = np.where( data_dict['fold'] == int(holdout_fold))[0] return train_audio_indexes, validate_audio_indexes def combine_curated_noisy_metadata_dict(self, curated_metadata_dict, noisy_metadata_dict): combined_metadata_dict = {} for key in curated_metadata_dict.keys(): combined_metadata_dict[key] = np.concatenate( (curated_metadata_dict[key], noisy_metadata_dict[key]), axis=0) return combined_metadata_dict def generate_train(self): while True: if self.pointer >= len(self.train_segments_indexes): self.pointer = 0 self.random_state.shuffle(self.train_segments_indexes) batch_segment_indexes = self.train_segments_indexes[ self.pointer: self.pointer + self.batch_size] self.pointer += self.batch_size batch_audio_name = self.train_segment_metadata_dict\ ['audio_name'][batch_segment_indexes] batch_begin_index = self.train_segment_metadata_dict\ ['begin_index'][batch_segment_indexes] batch_end_index = self.train_segment_metadata_dict\ ['end_index'][batch_segment_indexes] batch_target = self.train_segment_metadata_dict\ ['target'][batch_segment_indexes] batch_source = self.train_segment_metadata_dict\ ['source'][batch_segment_indexes] batch_feature = [] batch_mask = [] for n in range(len(batch_segment_indexes)): if batch_source[n] == 'curated': data_dict = self.curated_data_dict elif batch_source[n] == 'noisy': data_dict = self.noisy_data_dict else: raise Exception('Incorrect source type!') (this_feature, this_mask) = self.get_feature_mask( data_dict, batch_begin_index[n], batch_end_index[n], self.segment_frames, self.pad_type, self.logmel_eps) batch_feature.append(this_feature) batch_mask.append(this_mask) batch_feature = np.array(batch_feature) batch_feature = self.transform(batch_feature) batch_mask = np.array(batch_mask) batch_data_dict = { 'audio_name': batch_audio_name, 'feature': batch_feature, 'mask': batch_mask, 'target': batch_target, 'source': batch_source} yield batch_data_dict def generate_validate(self, data_type, target_source, max_iteration=None): assert(data_type in ['train', 'validate']) assert(target_source in ['curated', 'noisy']) segment_metadata_dict = eval( 'self.{}_{}_segment_metadata_dict'.format(data_type, target_source)) data_dict = eval('self.{}_data_dict'.format(target_source)) segments_num = len(segment_metadata_dict['audio_name']) segment_indexes = np.arange(segments_num) iteration = 0 pointer = 0 while True: if iteration == max_iteration: break if pointer >= segments_num: break batch_segment_indexes = segment_indexes[ pointer: pointer + self.batch_size] pointer += self.batch_size iteration += 1 batch_audio_name = segment_metadata_dict\ ['audio_name'][batch_segment_indexes] batch_begin_index = segment_metadata_dict\ ['begin_index'][batch_segment_indexes] batch_end_index = segment_metadata_dict\ ['end_index'][batch_segment_indexes] batch_target = segment_metadata_dict\ ['target'][batch_segment_indexes] batch_feature = [] batch_mask = [] for n in range(len(batch_segment_indexes)): (this_feature, this_mask) = self.get_feature_mask( data_dict, batch_begin_index[n], batch_end_index[n], self.segment_frames, self.pad_type, self.logmel_eps) batch_feature.append(this_feature) batch_mask.append(this_mask) batch_feature = np.array(batch_feature) batch_feature = self.transform(batch_feature) batch_mask = np.array(batch_mask) batch_data_dict = { 'audio_name': batch_audio_name, 'feature': batch_feature, 'mask': batch_mask, 'target': batch_target} yield batch_data_dict class TestDataGenerator(Base): def __init__(self, test_feature_hdf5_path, segment_seconds, hop_seconds, pad_type, scalar, batch_size, seed=1234): self.scalar = scalar self.batch_size = batch_size self.random_state = np.random.RandomState(seed) self.segment_frames = int(segment_seconds * config.frames_per_second) self.hop_frames = int(hop_seconds * config.frames_per_second) self.pad_type = pad_type self.logmel_eps = config.logmel_eps self.test_data_dict = self.load_hdf5( test_feature_hdf5_path, cross_validation_path=None) audios_num = len(self.test_data_dict['audio_name']) test_audio_indexes = np.arange(audios_num) self.test_segment_metadata_dict = \ self.get_segment_metadata_dict( self.test_data_dict, test_audio_indexes, self.segment_frames, self.hop_frames, source=None) def generate_test(self): segment_metadata_dict = self.test_segment_metadata_dict data_dict = self.test_data_dict segments_num = len(segment_metadata_dict['audio_name']) segment_indexes = np.arange(segments_num) iteration = 0 pointer = 0 while True: if pointer >= segments_num: break batch_segment_indexes = segment_indexes[ pointer: pointer + self.batch_size] pointer += self.batch_size iteration += 1 batch_audio_name = segment_metadata_dict\ ['audio_name'][batch_segment_indexes] batch_begin_index = segment_metadata_dict\ ['begin_index'][batch_segment_indexes] batch_end_index = segment_metadata_dict\ ['end_index'][batch_segment_indexes] batch_feature = [] batch_mask = [] for n in range(len(batch_segment_indexes)): (this_feature, this_mask) = self.get_feature_mask( data_dict, batch_begin_index[n], batch_end_index[n], self.segment_frames, self.pad_type, self.logmel_eps) batch_feature.append(this_feature) batch_mask.append(this_mask) batch_feature = np.array(batch_feature) batch_feature = self.transform(batch_feature) batch_mask = np.array(batch_mask) batch_data_dict = { 'audio_name': batch_audio_name, 'feature': batch_feature, 'mask': batch_mask} yield batch_data_dict
true
true
1c49fb45dc43ea2b8aafb011957f27effda703a7
1,190
py
Python
all_tests/it10.py
shushantkumar/ci_edit_final
3b13c7a39b2112ed8daaa70bc4f0f50d67909494
[ "Apache-2.0" ]
null
null
null
all_tests/it10.py
shushantkumar/ci_edit_final
3b13c7a39b2112ed8daaa70bc4f0f50d67909494
[ "Apache-2.0" ]
null
null
null
all_tests/it10.py
shushantkumar/ci_edit_final
3b13c7a39b2112ed8daaa70bc4f0f50d67909494
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from app.curses_util import * import app.fake_curses_testing class it10(app.fake_curses_testing.FakeCursesTestCase): def setUp(self): app.fake_curses_testing.FakeCursesTestCase.setUp(self) def test10(self): #self.setMovieMode(True) lineLimitIndicator = self.prg.prefs.editor['lineLimitIndicator'] self.prg.prefs.editor['lineLimitIndicator'] = 10 self.runWithFakeInputs([ self.displayCheck(2, 7, [u" "]), self.writeText(u"A line with numbers 1234567890"), self.displayCheck(2, 7, [u"A line with numbers 1234567890"]), self.writeText(u". Writing"), self.displayCheck(2, 7, [u"ith numbers 1234567890. Writing"]), self.writeText(u" some more."), self.displayCheck(2, 7, [u" 1234567890. Writing some more."]), self.writeText(u"\n"), self.displayCheck(2, 7, [u"A line with numbers 1234567890."]), CTRL_Q, u"n" ]) self.prg.prefs.editor['lineLimitIndicator'] = lineLimitIndicator
38.387097
74
0.653782
from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from app.curses_util import * import app.fake_curses_testing class it10(app.fake_curses_testing.FakeCursesTestCase): def setUp(self): app.fake_curses_testing.FakeCursesTestCase.setUp(self) def test10(self): lineLimitIndicator = self.prg.prefs.editor['lineLimitIndicator'] self.prg.prefs.editor['lineLimitIndicator'] = 10 self.runWithFakeInputs([ self.displayCheck(2, 7, [u" "]), self.writeText(u"A line with numbers 1234567890"), self.displayCheck(2, 7, [u"A line with numbers 1234567890"]), self.writeText(u". Writing"), self.displayCheck(2, 7, [u"ith numbers 1234567890. Writing"]), self.writeText(u" some more."), self.displayCheck(2, 7, [u" 1234567890. Writing some more."]), self.writeText(u"\n"), self.displayCheck(2, 7, [u"A line with numbers 1234567890."]), CTRL_Q, u"n" ]) self.prg.prefs.editor['lineLimitIndicator'] = lineLimitIndicator
true
true
1c49fb62426836b9756f2971c833979c6b552fae
2,657
py
Python
tests/test_cdtw.py
dizcza/cdtw-python
a83fffd6fc222a1691f07421fd4dbf46dc19e0aa
[ "MIT" ]
null
null
null
tests/test_cdtw.py
dizcza/cdtw-python
a83fffd6fc222a1691f07421fd4dbf46dc19e0aa
[ "MIT" ]
null
null
null
tests/test_cdtw.py
dizcza/cdtw-python
a83fffd6fc222a1691f07421fd4dbf46dc19e0aa
[ "MIT" ]
null
null
null
import unittest import math import numpy as np from cdtw.dtw import * from numpy.testing import assert_array_equal, assert_array_almost_equal try: import dtaidistance DTAIDISTANCE_INSTALLED = True except ImportError: DTAIDISTANCE_INSTALLED = False class TestCDTW(unittest.TestCase): def test_empty(self): self.assertRaises(ValueError, dtw_mat, [], [1.0, 2.0]) self.assertRaises(ValueError, dtw_dist, [], [1.0, 2.0]) def test_one_point(self): self.assertEqual(dtw_dist([1.0], [5.0]), 4.0) cost_mat = dtw_mat([1.0], [5.0]) assert_array_equal(cost_mat, [[4.0]]) assert_array_equal(dtw_path(cost_mat), [(0, 0)]) def test_simple(self): x = [1, 2, 3, 4, 5] y = [2, 3, 4] cost_mat_expected = np.sqrt([ [1, 5, 14], [1, 2, 6], [2, 1, 2], [6, 2, 1], [15, 6, 2] ]) path_expected = [(0, 0), (1, 0), (2, 1), (3, 2), (4, 2)] cost_mat = dtw_mat(x, y) self.assertAlmostEqual(dtw_dist(x, y), math.sqrt(2.0), places=6) assert_array_almost_equal(cost_mat, cost_mat_expected) assert_array_equal(dtw_path(cost_mat), path_expected) def test_order_does_not_matter(self): np.random.seed(0) x = np.random.randn(100) y = np.random.randn(300) assert_array_almost_equal(dtw_mat(x, y), dtw_mat(y, x).T) self.assertAlmostEqual(dtw_dist(x, y), dtw_dist(y, x)) def test_dtw_distance_path(self): np.random.seed(0) x = np.random.randn(10) y = np.random.randn(30) cost_mat = dtw_mat(x, y) self.assertAlmostEqual(cost_mat[-1, -1], dtw_dist(x, y), places=6) path = dtw_path(cost_mat) assert_array_equal(path[0], (0, 0)) assert_array_equal(path[-1], (len(x) - 1, len(y) - 1)) @unittest.skipUnless(DTAIDISTANCE_INSTALLED, "dtaidistance not installed") def test_dtaidistance(self): np.random.seed(0) x = np.random.randn(100).astype(np.float32) y = np.random.randn(30).astype(np.float32) self.assertAlmostEqual(dtw_dist(x, y), dtaidistance.dtw.distance(x, y), places=6) _, cost_mat_expected = dtaidistance.dtw.warping_paths(x, y) cost_mat = dtw_mat(x, y) assert_array_almost_equal(cost_mat, cost_mat_expected[1:, 1:], decimal=5) path_expected = dtaidistance.dtw.best_path(cost_mat_expected) assert_array_equal(dtw_path(cost_mat), path_expected) if __name__ == '__main__': unittest.main()
33.2125
78
0.596161
import unittest import math import numpy as np from cdtw.dtw import * from numpy.testing import assert_array_equal, assert_array_almost_equal try: import dtaidistance DTAIDISTANCE_INSTALLED = True except ImportError: DTAIDISTANCE_INSTALLED = False class TestCDTW(unittest.TestCase): def test_empty(self): self.assertRaises(ValueError, dtw_mat, [], [1.0, 2.0]) self.assertRaises(ValueError, dtw_dist, [], [1.0, 2.0]) def test_one_point(self): self.assertEqual(dtw_dist([1.0], [5.0]), 4.0) cost_mat = dtw_mat([1.0], [5.0]) assert_array_equal(cost_mat, [[4.0]]) assert_array_equal(dtw_path(cost_mat), [(0, 0)]) def test_simple(self): x = [1, 2, 3, 4, 5] y = [2, 3, 4] cost_mat_expected = np.sqrt([ [1, 5, 14], [1, 2, 6], [2, 1, 2], [6, 2, 1], [15, 6, 2] ]) path_expected = [(0, 0), (1, 0), (2, 1), (3, 2), (4, 2)] cost_mat = dtw_mat(x, y) self.assertAlmostEqual(dtw_dist(x, y), math.sqrt(2.0), places=6) assert_array_almost_equal(cost_mat, cost_mat_expected) assert_array_equal(dtw_path(cost_mat), path_expected) def test_order_does_not_matter(self): np.random.seed(0) x = np.random.randn(100) y = np.random.randn(300) assert_array_almost_equal(dtw_mat(x, y), dtw_mat(y, x).T) self.assertAlmostEqual(dtw_dist(x, y), dtw_dist(y, x)) def test_dtw_distance_path(self): np.random.seed(0) x = np.random.randn(10) y = np.random.randn(30) cost_mat = dtw_mat(x, y) self.assertAlmostEqual(cost_mat[-1, -1], dtw_dist(x, y), places=6) path = dtw_path(cost_mat) assert_array_equal(path[0], (0, 0)) assert_array_equal(path[-1], (len(x) - 1, len(y) - 1)) @unittest.skipUnless(DTAIDISTANCE_INSTALLED, "dtaidistance not installed") def test_dtaidistance(self): np.random.seed(0) x = np.random.randn(100).astype(np.float32) y = np.random.randn(30).astype(np.float32) self.assertAlmostEqual(dtw_dist(x, y), dtaidistance.dtw.distance(x, y), places=6) _, cost_mat_expected = dtaidistance.dtw.warping_paths(x, y) cost_mat = dtw_mat(x, y) assert_array_almost_equal(cost_mat, cost_mat_expected[1:, 1:], decimal=5) path_expected = dtaidistance.dtw.best_path(cost_mat_expected) assert_array_equal(dtw_path(cost_mat), path_expected) if __name__ == '__main__': unittest.main()
true
true
1c49fd605d161c98160f7a93f5883987a5cf6858
2,476
py
Python
samsungctl/interactive.py
jakubpas/samsungctl
adda11c55038e2a3d057edf515ee44a2fd950949
[ "MIT" ]
4
2021-03-01T01:49:23.000Z
2022-02-08T16:18:37.000Z
samsungctl/interactive.py
jakubpas/samsungctl
adda11c55038e2a3d057edf515ee44a2fd950949
[ "MIT" ]
null
null
null
samsungctl/interactive.py
jakubpas/samsungctl
adda11c55038e2a3d057edf515ee44a2fd950949
[ "MIT" ]
1
2021-06-23T20:42:05.000Z
2021-06-23T20:42:05.000Z
import curses _wake_on_lan = '44:5C:E9:51:C8:29' _mappings = [ ["p", "KEY_POWER", "P", "Power off"], ["h", "KEY_HOME", "H", "Home"], ["KEY_UP", "KEY_UP", "Up", "Up"], ["KEY_DOWN", "KEY_DOWN", "Down", "Down"], ["KEY_LEFT", "KEY_LEFT", "Left", "Left"], ["KEY_RIGHT", "KEY_RIGHT", "Right", "Right"], ["\n", "KEY_ENTER", "Enter", "Enter"], ["KEY_BACKSPACE", "KEY_RETURN", "Backspace", "Return"], ["e", "KEY_EXIT", "E", "Exit"], [" ", "KEY_PLAY", "Space", "Play/Pause"], ["m", "KEY_MENU", "M", "Menu"], ["s", "KEY_SOURCE", "S", "Source"], ["+", "KEY_VOLUP", "+", "Volume Up"], ["-", "KEY_VOLDOWN", "-", "Volume Down"], ["*", "KEY_MUTE", "*", "Mute"], ["s", "KEY_HDMI", "S", "HDMI Source"], ["i", "KEY_INFO", "I", "Info"], ["n", "KEY_MORE", "D", "Numbers"], ] def run(remote): """Run interactive remote control application.""" curses.wrapper(_control, remote) def _control(std_scr, remote): height, width = std_scr.getmaxyx() std_scr.addstr("Interactive mode, press 'Q' to exit.\n") std_scr.addstr("Key mappings:\n") column_len = max(len(mapping[2]) for mapping in _mappings) + 1 mappings_dict = {} for mapping in _mappings: mappings_dict[mapping[0]] = mapping[1] row = std_scr.getyx()[0] + 2 if row < height: line = " {}= {} ({})\n".format(mapping[2].ljust(column_len), mapping[3], mapping[1]) std_scr.addstr(line) elif row == height: std_scr.addstr("[Terminal is too small to show all keys]\n") running = True while running: key = std_scr.getkey() if key == "q": running = False if key in mappings_dict: remote.control(mappings_dict[key]) try: std_scr.addstr(".") except curses.error: std_scr.deleteln() std_scr.move(std_scr.getyx()[0], 0) std_scr.addstr(".")
37.515152
74
0.421648
import curses _wake_on_lan = '44:5C:E9:51:C8:29' _mappings = [ ["p", "KEY_POWER", "P", "Power off"], ["h", "KEY_HOME", "H", "Home"], ["KEY_UP", "KEY_UP", "Up", "Up"], ["KEY_DOWN", "KEY_DOWN", "Down", "Down"], ["KEY_LEFT", "KEY_LEFT", "Left", "Left"], ["KEY_RIGHT", "KEY_RIGHT", "Right", "Right"], ["\n", "KEY_ENTER", "Enter", "Enter"], ["KEY_BACKSPACE", "KEY_RETURN", "Backspace", "Return"], ["e", "KEY_EXIT", "E", "Exit"], [" ", "KEY_PLAY", "Space", "Play/Pause"], ["m", "KEY_MENU", "M", "Menu"], ["s", "KEY_SOURCE", "S", "Source"], ["+", "KEY_VOLUP", "+", "Volume Up"], ["-", "KEY_VOLDOWN", "-", "Volume Down"], ["*", "KEY_MUTE", "*", "Mute"], ["s", "KEY_HDMI", "S", "HDMI Source"], ["i", "KEY_INFO", "I", "Info"], ["n", "KEY_MORE", "D", "Numbers"], ] def run(remote): curses.wrapper(_control, remote) def _control(std_scr, remote): height, width = std_scr.getmaxyx() std_scr.addstr("Interactive mode, press 'Q' to exit.\n") std_scr.addstr("Key mappings:\n") column_len = max(len(mapping[2]) for mapping in _mappings) + 1 mappings_dict = {} for mapping in _mappings: mappings_dict[mapping[0]] = mapping[1] row = std_scr.getyx()[0] + 2 if row < height: line = " {}= {} ({})\n".format(mapping[2].ljust(column_len), mapping[3], mapping[1]) std_scr.addstr(line) elif row == height: std_scr.addstr("[Terminal is too small to show all keys]\n") running = True while running: key = std_scr.getkey() if key == "q": running = False if key in mappings_dict: remote.control(mappings_dict[key]) try: std_scr.addstr(".") except curses.error: std_scr.deleteln() std_scr.move(std_scr.getyx()[0], 0) std_scr.addstr(".")
true
true
1c49fd615acaec331d8b1875e666820618016ac8
4,988
py
Python
attention.py
huajianjiu/ANSMESC
76323a46f638c717e23388cf529734081a70eeee
[ "Apache-2.0" ]
1
2021-08-09T03:45:36.000Z
2021-08-09T03:45:36.000Z
attention.py
huajianjiu/ANSMESC
76323a46f638c717e23388cf529734081a70eeee
[ "Apache-2.0" ]
2
2021-08-09T07:40:19.000Z
2021-08-10T12:34:04.000Z
attention.py
yuanzhiKe/ANSMESC
76323a46f638c717e23388cf529734081a70eeee
[ "Apache-2.0" ]
null
null
null
# author - Richard Liao # Dec 26 2016 # Attention GRU network from keras import backend as K from keras.engine.topology import Layer from keras import initializers, regularizers, constraints class AttentionWithContext(Layer): """ Attention operation, with a context/query vector, for temporal data. Supports Masking. Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf] "Hierarchical Attention Networks for Document Classification" by using a context vector to assist the attention # Input shape 3D tensor with shape: `(samples, steps, features)`. # Output shape 2D tensor with shape: `(samples, features)`. :param kwargs: Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. The dimensions are inferred based on the output shape of the RNN. Example: model.add(LSTM(64, return_sequences=True)) model.add(AttentionWithContext()) """ def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs) def build(self, input_shape): assert len(input_shape) == 3 self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) if self.bias: self.b = self.add_weight(shape=(input_shape[-1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) self.u = self.add_weight(shape=(input_shape[-1],), initializer=self.init, name='{}_u'.format(self.name), regularizer=self.u_regularizer, constraint=self.u_constraint) super(AttentionWithContext, self).build(input_shape) def compute_mask(self, input, input_mask=None): # do not pass the mask to the next layers return None def call(self, x, mask=None): uit = K.dot(x, self.W) if self.bias: uit += self.b uit = K.tanh(uit) # ait = K.dot(uit, self.u) # replace this mul_a = uit * self.u # with this ait = K.sum(mul_a, axis=2) # and this a = K.exp(ait) # apply mask after the exp. will be re-normalized next if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano a *= K.cast(mask, K.floatx()) # in some cases especially in the early stages of training the sum may be almost zero # and this results in NaN's. A workaround is to add a very small positive number ε to the sum. # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], input_shape[-1] if __name__ == "__main__": from keras.models import Model from keras.layers import Input, Embedding, Bidirectional, TimeDistributed, GRU, Dense import numpy as np input_array = np.random.randint(25, size=(15, 100)) embedding_layer = Embedding(25 + 1, 100, input_length=100, trainable=True) sentence_input = Input(shape=(100,), dtype='int32') embedded_sequences = embedding_layer(sentence_input) l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences) l_dense = TimeDistributed(Dense(200))(l_lstm) l_att = AttentionWithContext()(l_dense) model = Model(sentence_input, l_att) # model = Model(sentence_input, l_dense) model.compile('rmsprop', 'mse') output_array = model.predict(input_array) print(output_array.shape)
39.587302
102
0.594226
from keras import backend as K from keras.engine.topology import Layer from keras import initializers, regularizers, constraints class AttentionWithContext(Layer): def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs) def build(self, input_shape): assert len(input_shape) == 3 self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) if self.bias: self.b = self.add_weight(shape=(input_shape[-1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) self.u = self.add_weight(shape=(input_shape[-1],), initializer=self.init, name='{}_u'.format(self.name), regularizer=self.u_regularizer, constraint=self.u_constraint) super(AttentionWithContext, self).build(input_shape) def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): uit = K.dot(x, self.W) if self.bias: uit += self.b uit = K.tanh(uit) = uit * self.u ait = K.sum(mul_a, axis=2) a = K.exp(ait) if mask is not None: a *= K.cast(mask, K.floatx()) # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], input_shape[-1] if __name__ == "__main__": from keras.models import Model from keras.layers import Input, Embedding, Bidirectional, TimeDistributed, GRU, Dense import numpy as np input_array = np.random.randint(25, size=(15, 100)) embedding_layer = Embedding(25 + 1, 100, input_length=100, trainable=True) sentence_input = Input(shape=(100,), dtype='int32') embedded_sequences = embedding_layer(sentence_input) l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences) l_dense = TimeDistributed(Dense(200))(l_lstm) l_att = AttentionWithContext()(l_dense) model = Model(sentence_input, l_att) # model = Model(sentence_input, l_dense) model.compile('rmsprop', 'mse') output_array = model.predict(input_array) print(output_array.shape)
true
true
1c49fdbc6de8e0fa0905400b281525d8cbffcdac
2,739
py
Python
tests/test_client.py
nicolaskenner/jira
5c27f6ddafffc6110be1db4749fa67025852bcb6
[ "BSD-2-Clause" ]
1
2021-03-04T08:06:37.000Z
2021-03-04T08:06:37.000Z
tests/test_client.py
nicolaskenner/jira
5c27f6ddafffc6110be1db4749fa67025852bcb6
[ "BSD-2-Clause" ]
1
2020-08-25T15:50:27.000Z
2020-08-25T15:50:27.000Z
tests/test_client.py
nicolaskenner/jira
5c27f6ddafffc6110be1db4749fa67025852bcb6
[ "BSD-2-Clause" ]
1
2022-01-18T20:17:48.000Z
2022-01-18T20:17:48.000Z
# -*- coding: utf-8 -*- import getpass import pytest # from tenacity import retry # from tenacity import wait_incrementing from tests import get_unique_project_name from tests import JiraTestManager from jira import Role, Issue, JIRA, JIRAError, Project # noqa import jira.client @pytest.fixture() def prep(): pass @pytest.fixture(scope="module") def test_manager(): return JiraTestManager() @pytest.fixture() def cl_admin(test_manager): return test_manager.jira_admin @pytest.fixture() def cl_normal(test_manager): return test_manager.jira_normal @pytest.fixture(scope="function") def slug(request, cl_admin): def remove_by_slug(): try: cl_admin.delete_project(slug) except (ValueError, JIRAError): # Some tests have project already removed, so we stay silent pass slug = get_unique_project_name() project_name = "Test user=%s key=%s A" % (getpass.getuser(), slug) try: proj = cl_admin.project(slug) except JIRAError: proj = cl_admin.create_project(slug, project_name) assert proj request.addfinalizer(remove_by_slug) return slug def test_delete_project(cl_admin, cl_normal, slug): assert cl_admin.delete_project(slug) def test_delete_inexistent_project(cl_admin): slug = "abogus123" with pytest.raises(JIRAError) as ex: assert cl_admin.delete_project(slug) assert "No project could be found with key" in str( ex.value ) or 'Parameter pid="%s" is not a Project, projectID or slug' % slug in str( ex.value ) def test_templates(cl_admin): templates = cl_admin.templates() expected_templates = set( filter( None, """ Agility Basic Bug tracking Content Management Customer service Document Approval IT Service Desk Kanban software development Lead Tracking Process management Procurement Project management Recruitment Scrum software development Task management """.split( "\n" ), ) ) for t in expected_templates: assert t in templates def test_result_list(): iterable = [2, 3] startAt = 0 maxResults = 50 total = 2 results = jira.client.ResultList(iterable, startAt, maxResults, total) for idx, result in enumerate(results): assert results[idx] == iterable[idx] assert next(results) == iterable[0] assert next(results) == iterable[1] with pytest.raises(StopIteration): next(results) def test_result_list_if_empty(): results = jira.client.ResultList() for r in results: raise AssertionError("`results` should be empty") with pytest.raises(StopIteration): next(results)
20.75
80
0.68054
import getpass import pytest from tests import get_unique_project_name from tests import JiraTestManager from jira import Role, Issue, JIRA, JIRAError, Project import jira.client @pytest.fixture() def prep(): pass @pytest.fixture(scope="module") def test_manager(): return JiraTestManager() @pytest.fixture() def cl_admin(test_manager): return test_manager.jira_admin @pytest.fixture() def cl_normal(test_manager): return test_manager.jira_normal @pytest.fixture(scope="function") def slug(request, cl_admin): def remove_by_slug(): try: cl_admin.delete_project(slug) except (ValueError, JIRAError): pass slug = get_unique_project_name() project_name = "Test user=%s key=%s A" % (getpass.getuser(), slug) try: proj = cl_admin.project(slug) except JIRAError: proj = cl_admin.create_project(slug, project_name) assert proj request.addfinalizer(remove_by_slug) return slug def test_delete_project(cl_admin, cl_normal, slug): assert cl_admin.delete_project(slug) def test_delete_inexistent_project(cl_admin): slug = "abogus123" with pytest.raises(JIRAError) as ex: assert cl_admin.delete_project(slug) assert "No project could be found with key" in str( ex.value ) or 'Parameter pid="%s" is not a Project, projectID or slug' % slug in str( ex.value ) def test_templates(cl_admin): templates = cl_admin.templates() expected_templates = set( filter( None, """ Agility Basic Bug tracking Content Management Customer service Document Approval IT Service Desk Kanban software development Lead Tracking Process management Procurement Project management Recruitment Scrum software development Task management """.split( "\n" ), ) ) for t in expected_templates: assert t in templates def test_result_list(): iterable = [2, 3] startAt = 0 maxResults = 50 total = 2 results = jira.client.ResultList(iterable, startAt, maxResults, total) for idx, result in enumerate(results): assert results[idx] == iterable[idx] assert next(results) == iterable[0] assert next(results) == iterable[1] with pytest.raises(StopIteration): next(results) def test_result_list_if_empty(): results = jira.client.ResultList() for r in results: raise AssertionError("`results` should be empty") with pytest.raises(StopIteration): next(results)
true
true
1c49fdc0256ccd65c716e03f0e803a5cd3cf8ffb
2,151
py
Python
google_oauth/__init__.py
martialo12/flask-google-login
592043ed8cf8fddcaab7536c1911d654013b5e4f
[ "MIT" ]
null
null
null
google_oauth/__init__.py
martialo12/flask-google-login
592043ed8cf8fddcaab7536c1911d654013b5e4f
[ "MIT" ]
null
null
null
google_oauth/__init__.py
martialo12/flask-google-login
592043ed8cf8fddcaab7536c1911d654013b5e4f
[ "MIT" ]
null
null
null
# python standard libraries from pathlib import Path import logging.config # third party libraries from flask import Flask from flask_login import LoginManager from oauthlib.oauth2 import WebApplicationClient from flask_bootstrap import Bootstrap from flask_sqlalchemy import SQLAlchemy import yaml # logging path_to_config_file = Path(__file__).parent / "config/config.yaml" logging.config.fileConfig(path_to_config_file, disable_existing_loggers=False) # create logger logger = logging.getLogger("flaskapp") with open(rf"{path_to_config_file}") as cfgfile: logger.info(f"loading configuration from config {path_to_config_file}") config = yaml.load(cfgfile, Loader=yaml.FullLoader) flask_app_conf = config["FLASK_APP_CONFIGURATION"] google_conf = config["GOOGLE_CONFIG"] db_conf = config["DB_CONFIG"] logger.info(f"=========Flask app Config========\n") logger.debug(f"{flask_app_conf}") logger.info(f"=========Google Config========\n") logger.debug(f"{google_conf}") logger.info(f"=========db Config========\n") logger.debug(f"{db_conf}") # flask app config secret_key = flask_app_conf["secret_key"] port = flask_app_conf["port"] debug = flask_app_conf["debug"] host = flask_app_conf["host"] # google config google_discovery_url = google_conf["google_discovery_url"] google_redirect_uri = google_conf["google_redirect_uri"] google_client_id = google_conf["google_client_id"] google_client_secret = google_conf["google_client_secret"] # db conf SQLALCHEMY_DATABASE_URI = db_conf["SQLALCHEMY_DATABASE_URI"] SQLALCHEMY_TRACK_MODIFICATIONS = db_conf["SQLALCHEMY_TRACK_MODIFICATIONS"] # create flask app app = Flask(__name__) app.config["SECRET_KEY"] = secret_key app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS # add extensions to our app db = SQLAlchemy(app) bootstrap = Bootstrap(app) # user session management login_manager = LoginManager() login_manager.init_app(app) # Oauth2 client setup client = WebApplicationClient(google_client_id) from google_oauth import routes
22.40625
78
0.76662
from pathlib import Path import logging.config from flask import Flask from flask_login import LoginManager from oauthlib.oauth2 import WebApplicationClient from flask_bootstrap import Bootstrap from flask_sqlalchemy import SQLAlchemy import yaml path_to_config_file = Path(__file__).parent / "config/config.yaml" logging.config.fileConfig(path_to_config_file, disable_existing_loggers=False) logger = logging.getLogger("flaskapp") with open(rf"{path_to_config_file}") as cfgfile: logger.info(f"loading configuration from config {path_to_config_file}") config = yaml.load(cfgfile, Loader=yaml.FullLoader) flask_app_conf = config["FLASK_APP_CONFIGURATION"] google_conf = config["GOOGLE_CONFIG"] db_conf = config["DB_CONFIG"] logger.info(f"=========Flask app Config========\n") logger.debug(f"{flask_app_conf}") logger.info(f"=========Google Config========\n") logger.debug(f"{google_conf}") logger.info(f"=========db Config========\n") logger.debug(f"{db_conf}") secret_key = flask_app_conf["secret_key"] port = flask_app_conf["port"] debug = flask_app_conf["debug"] host = flask_app_conf["host"] google_discovery_url = google_conf["google_discovery_url"] google_redirect_uri = google_conf["google_redirect_uri"] google_client_id = google_conf["google_client_id"] google_client_secret = google_conf["google_client_secret"] SQLALCHEMY_DATABASE_URI = db_conf["SQLALCHEMY_DATABASE_URI"] SQLALCHEMY_TRACK_MODIFICATIONS = db_conf["SQLALCHEMY_TRACK_MODIFICATIONS"] app = Flask(__name__) app.config["SECRET_KEY"] = secret_key app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS db = SQLAlchemy(app) bootstrap = Bootstrap(app) login_manager = LoginManager() login_manager.init_app(app) client = WebApplicationClient(google_client_id) from google_oauth import routes
true
true
1c49fe277718b141c1da5f42a448b8e3c088d5a0
3,987
py
Python
python/basics/chapter_6_dictionaries/exercises_6.py
gabriel-miglioranza/python_crash_course
57db9d6b17b225a6aaa5451c3a3b567ffc426b37
[ "MIT" ]
null
null
null
python/basics/chapter_6_dictionaries/exercises_6.py
gabriel-miglioranza/python_crash_course
57db9d6b17b225a6aaa5451c3a3b567ffc426b37
[ "MIT" ]
null
null
null
python/basics/chapter_6_dictionaries/exercises_6.py
gabriel-miglioranza/python_crash_course
57db9d6b17b225a6aaa5451c3a3b567ffc426b37
[ "MIT" ]
null
null
null
# Chapter 6 exercises from the book Python Crash Course: A Hands-On, Project-Based Introduction to Programming. # 6-1. Person person = { 'first_name': 'sean', 'last_name': 'carroll', 'city': 'los angeles' } print(person) # 6-2. Favorite Numbers favorite_numbers = { 'cris': 23, 'bianca': 133, 'monica': 42 } print("Monica's favorite number is " + str(favorite_numbers['monica']) + ".") print("Cris' favorite number is " + str(favorite_numbers['cris']) + ".") print("Biancas's favorite number is " + str(favorite_numbers['bianca']) + ".") # 6-3 Glossary glossary = { 'list': 'set of values organized in a cardinal order.', 'tuple': "set of fixed values organized in a cardinal order.", 'dictionary': 'collection of key-value pairs.', 'string': 'a collection of characters in a certain order.', } print('List: ' + glossary['list']) print('Tuple: ' + glossary['tuple']) print('Dictionary: ' + glossary['dictionary']) # 6-4 Glossary 2 for key, value in glossary.items(): print(key.title() + ': ' + value) # 6-5. Rivers rivers = { 'nile': 'egypt', 'thames': 'england', 'são francisco': 'brazil' } for river, country in rivers.items(): print('The ' + river.title() + ' runs through ' + country.title() + '.') for river in rivers.keys(): print(river.title()) for country in rivers.values(): print(country.title()) # 6-6. Polling favorite_languages = { 'jen': 'python', 'sarah': 'c', 'edward': 'ruby', 'phil': 'python' } people_poll = ['jen', 'sarah', 'ned', 'phil', 'james'] for people in people_poll: if people in favorite_languages.keys(): print(people.title() + ', thanks for your answer.') else: print(people.title() + ', you have not taken the poll yet.') # 6-7. People person_0 = { 'first_name': 'sean', 'last_name': 'carroll', 'city': 'los angeles' } person_1 = { 'first_name': 'hannah', 'last_name': 'fry', 'city': 'london' } person_2 = { 'first_name': 'gabriel', 'last_name': 'miglioranza', 'city': 'porto alegre' } people = [person_0, person_1, person_2] for person in people: full_name = person['first_name'] + ' ' + person['last_name'] city = person['city'] print(full_name.title() + ' lives in ' + city.title() + '.') # 6-8 Pets montanha = { 'kind': 'dog', 'owner': 'gabriel' } chiara = { 'kind': 'cat', 'owner': 'amanda' } guri = { 'kind': 'dog', 'owner': 'ito' } pets = [montanha, chiara, guri] for pet in pets: for key, value in pet.items(): print(key.title() + ': ' + value.title()) # 6-9. Favorite Places favorite_places = { 'anna': ['angel falls', 'antartica', 'antelope canion'], 'paul': ['the azores', 'boracay', 'cabo san lucas'], 'miguel': ['grand canyon', 'faroe islands', 'fernando de noronha'] } for name, places in favorite_places.items(): print(name.title() + "'s favorite places are:") for place in places: print('\t' + place.title() + '.') # 6-10. Favorite Numbers favorite_numbers = { 'cris': [234, 3434, 343], 'bianca': [215423, 534, 3523], 'monica': [42, 3454, 345] } for name, numbers in favorite_numbers.items(): print(name.title() + ': ', numbers) # 6-11. Cities cities = { 'porto alegre':{ 'contry': 'brazil', 'population': '1479101', 'foundation': '1772 AC' }, 'new york city':{ 'contry': 'united states of america', 'population': '8175133', 'foundation': '1898 AC' }, 'rome':{ 'contry': 'italy', 'population': '2872800', 'foundation': '753 BC' } } for city, infos in cities.items(): print('About ' + city.title() + ':') for key, info in infos.items(): print(key.title() + ': ' + info.title()) # 6-12. Extensions
24.163636
112
0.562328
person = { 'first_name': 'sean', 'last_name': 'carroll', 'city': 'los angeles' } print(person) favorite_numbers = { 'cris': 23, 'bianca': 133, 'monica': 42 } print("Monica's favorite number is " + str(favorite_numbers['monica']) + ".") print("Cris' favorite number is " + str(favorite_numbers['cris']) + ".") print("Biancas's favorite number is " + str(favorite_numbers['bianca']) + ".") # 6-3 Glossary glossary = { 'list': 'set of values organized in a cardinal order.', 'tuple': "set of fixed values organized in a cardinal order.", 'dictionary': 'collection of key-value pairs.', 'string': 'a collection of characters in a certain order.', } print('List: ' + glossary['list']) print('Tuple: ' + glossary['tuple']) print('Dictionary: ' + glossary['dictionary']) # 6-4 Glossary 2 for key, value in glossary.items(): print(key.title() + ': ' + value) # 6-5. Rivers rivers = { 'nile': 'egypt', 'thames': 'england', 'são francisco': 'brazil' } for river, country in rivers.items(): print('The ' + river.title() + ' runs through ' + country.title() + '.') for river in rivers.keys(): print(river.title()) for country in rivers.values(): print(country.title()) # 6-6. Polling favorite_languages = { 'jen': 'python', 'sarah': 'c', 'edward': 'ruby', 'phil': 'python' } people_poll = ['jen', 'sarah', 'ned', 'phil', 'james'] for people in people_poll: if people in favorite_languages.keys(): print(people.title() + ', thanks for your answer.') else: print(people.title() + ', you have not taken the poll yet.') # 6-7. People person_0 = { 'first_name': 'sean', 'last_name': 'carroll', 'city': 'los angeles' } person_1 = { 'first_name': 'hannah', 'last_name': 'fry', 'city': 'london' } person_2 = { 'first_name': 'gabriel', 'last_name': 'miglioranza', 'city': 'porto alegre' } people = [person_0, person_1, person_2] for person in people: full_name = person['first_name'] + ' ' + person['last_name'] city = person['city'] print(full_name.title() + ' lives in ' + city.title() + '.') # 6-8 Pets montanha = { 'kind': 'dog', 'owner': 'gabriel' } chiara = { 'kind': 'cat', 'owner': 'amanda' } guri = { 'kind': 'dog', 'owner': 'ito' } pets = [montanha, chiara, guri] for pet in pets: for key, value in pet.items(): print(key.title() + ': ' + value.title()) # 6-9. Favorite Places favorite_places = { 'anna': ['angel falls', 'antartica', 'antelope canion'], 'paul': ['the azores', 'boracay', 'cabo san lucas'], 'miguel': ['grand canyon', 'faroe islands', 'fernando de noronha'] } for name, places in favorite_places.items(): print(name.title() + "'s favorite places are:") for place in places: print('\t' + place.title() + '.') favorite_numbers = { 'cris': [234, 3434, 343], 'bianca': [215423, 534, 3523], 'monica': [42, 3454, 345] } for name, numbers in favorite_numbers.items(): print(name.title() + ': ', numbers) cities = { 'porto alegre':{ 'contry': 'brazil', 'population': '1479101', 'foundation': '1772 AC' }, 'new york city':{ 'contry': 'united states of america', 'population': '8175133', 'foundation': '1898 AC' }, 'rome':{ 'contry': 'italy', 'population': '2872800', 'foundation': '753 BC' } } for city, infos in cities.items(): print('About ' + city.title() + ':') for key, info in infos.items(): print(key.title() + ': ' + info.title())
true
true
1c49ffe311feb2c993cc12b7b475ef5d345533e4
222,005
py
Python
SigProfilerTopography/source/plotting/TranscriptionReplicationStrandBiasFigures.py
AlexandrovLab/SigProfilerTopography
34c7cf24392bc77953370038a520ffc8d0bdee50
[ "BSD-2-Clause" ]
5
2021-04-02T14:03:45.000Z
2022-02-21T12:54:52.000Z
SigProfilerTopography/source/plotting/TranscriptionReplicationStrandBiasFigures.py
AlexandrovLab/SigProfilerTopography
34c7cf24392bc77953370038a520ffc8d0bdee50
[ "BSD-2-Clause" ]
null
null
null
SigProfilerTopography/source/plotting/TranscriptionReplicationStrandBiasFigures.py
AlexandrovLab/SigProfilerTopography
34c7cf24392bc77953370038a520ffc8d0bdee50
[ "BSD-2-Clause" ]
1
2022-01-22T06:27:49.000Z
2022-01-22T06:27:49.000Z
# This source code file is a part of SigProfilerTopography # SigProfilerTopography is a tool included as part of the SigProfiler # computational framework for comprehensive analysis of mutational # signatures from next-generation sequencing of cancer genomes. # SigProfilerTopography provides the downstream data analysis of # mutations and extracted mutational signatures w.r.t. # nucleosome occupancy, replication time, strand bias and processivity. # Copyright (C) 2018-2020 Burcak Otlu import os import numpy as np import statsmodels.stats.multitest # import matplotlib # BACKEND = 'Agg' # if matplotlib.get_backend().lower() != BACKEND.lower(): # # If backend is not set properly a call to describe will hang # matplotlib.use(BACKEND) from matplotlib import pyplot as plt from matplotlib.lines import Line2D from matplotlib import gridspec import pandas as pd from SigProfilerTopography.source.commons.TopographyCommons import natural_key from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_STRAND from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_STRAND from SigProfilerTopography.source.commons.TopographyCommons import LAGGING from SigProfilerTopography.source.commons.TopographyCommons import LEADING from SigProfilerTopography.source.commons.TopographyCommons import six_mutation_types from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS from SigProfilerTopography.source.commons.TopographyCommons import DATA from SigProfilerTopography.source.commons.TopographyCommons import FIGURE from SigProfilerTopography.source.commons.TopographyCommons import SCATTER_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import BAR_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_BAR_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import SAMPLES from SigProfilerTopography.source.commons.TopographyCommons import TABLES from SigProfilerTopography.source.commons.TopographyCommons import SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_P_VALUE from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_P_VALUE from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_Q_VALUE from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_Q_VALUE from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import GENIC_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LEADING_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import GENIC_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LEADING_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import GENIC from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC from SigProfilerTopography.source.commons.TopographyCommons import percentage_numbers from SigProfilerTopography.source.commons.TopographyCommons import percentage_strings from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_10_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_20_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_30_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_50_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_75_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_100_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import ID from SigProfilerTopography.source.commons.TopographyCommons import DBS from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT from SigProfilerTopography.source.commons.TopographyCommons import EXCEL_FILES from SigProfilerTopography.source.commons.TopographyCommons import write_excel_file from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT SIGNATURE = 'signature' CANCER_TYPE = 'cancer_type' MUTATION_TYPE = 'mutation_type' TYPE = 'type' SIGNIFICANT_STRAND = 'significant_strand' SIGNIFICANCE_LEVEL = 0.05 from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofSubsDict from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofIndelsDict from SigProfilerTopography.source.commons.TopographyCommons import Sample2NumberofDinucsDictFilename from SigProfilerTopography.source.commons.TopographyCommons import getSample2SubsSignature2NumberofMutationsDict from SigProfilerTopography.source.commons.TopographyCommons import getSample2IndelsSignature2NumberofMutationsDict from SigProfilerTopography.source.commons.TopographyCommons import Sample2DinucsSignature2NumberofMutationsDictFilename transcriptionStrands = [TRANSCRIBED_STRAND, UNTRANSCRIBED_STRAND] genicVersusIntergenicStrands=[GENIC, INTERGENIC] replicationStrands = [LAGGING, LEADING] ######################################################################## #New way #For Mutation Types def plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(sample,numberofMutations, type_transcribed_versus_untranscribed_df, type_lagging_versus_leading_df, outputDir, jobname): fig = plt.figure(figsize=(8,8), facecolor=None) plt.style.use('ggplot') # build a rectangle in axes coords left, width = .0, 1. bottom, height = .0, 1. right = left + width top = bottom + height # This code makes the background white. # Always put these statements after plt.figure ax = plt.gca() ax.set_facecolor('white') for edge_i in ['bottom','top','left','right']: ax.spines[edge_i].set_edgecolor("black") ax.spines[edge_i].set_linewidth(1) ax.spines[edge_i].set_bounds(-0.3, 0.3) plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes) plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes) plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes) plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) if (sample is not None): plt.title(sample, fontsize=15, fontweight='bold') plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold') plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold') # Put some extra place by xlim if necessary plt.xlim(-0.3, 0.3) plt.ylim(-0.3, 0.3) plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10) yticks = [-0.2, -0.1, 0.0, 0.1, 0.2] yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.yticks(yticks, yticklabels) xticks = [-0.2, -0.1, 0.0, 0.1, 0.2] xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.xticks(xticks, xticklabels) # type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type', # 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value', # 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list', # 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]] # # type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type', # 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value', # 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list', # 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]] ######################################################################## transcriptionRatiosDict = {} replicationRatiosDict = {} for mutationType in six_mutation_types: ################################################################## transcribed_real_count=0 untranscribed_real_count=0 if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['Transcribed_real_count'].values.size>0): transcribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['Transcribed_real_count'].values[0] if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['UnTranscribed_real_count'].values.size>0): untranscribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['UnTranscribed_real_count'].values[0] if (transcribed_real_count>0 and untranscribed_real_count>0): transcriptionRatiosDict[mutationType] = np.log10(transcribed_real_count/untranscribed_real_count) ################################################################## ################################################################## lagging_real_count = 0 leading_real_count = 0 if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values.size > 0): lagging_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values[0] if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values.size > 0): leading_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values[0] if (lagging_real_count>0 and leading_real_count>0): replicationRatiosDict[mutationType] = np.log10(lagging_real_count/leading_real_count) ################################################################## ################################################################## if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict): plt.scatter(replicationRatiosDict[mutationType], transcriptionRatiosDict[mutationType], label=mutationType) ################################################################## ######################################################################## legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095)) legend.get_frame().set_linewidth(1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') plt.axvline(x=0.0, color='gray', linestyle='--') plt.axhline(y=0.0, color='gray', linestyle='--') if sample is None: figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS) figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName) else: figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS) os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True) figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName) fig.savefig(figureFile) plt.cla() plt.close(fig) ######################################################################## ######################################################################## #Old way #For Mutation Types def plot_ncomms11383_Supp_FigG_AllMutationTypes_TranscriptionLog10Ratio_ReplicationLog10Ratio(sample,numberofMutations,type2TranscriptionStrand2CountDict,type2ReplicationStrand2CountDict,outputDir,jobname): fig = plt.figure(figsize=(8,8), facecolor=None) plt.style.use('ggplot') # build a rectangle in axes coords left, width = .0, 1. bottom, height = .0, 1. right = left + width top = bottom + height # This code makes the background white. # Always put these statements after plt.figure ax = plt.gca() ax.set_facecolor('white') for edge_i in ['bottom','top','left','right']: ax.spines[edge_i].set_edgecolor("black") ax.spines[edge_i].set_linewidth(1) ax.spines[edge_i].set_bounds(-0.3, 0.3) plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes) plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes) plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes) plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) if (sample is not None): plt.title(sample, fontsize=15, fontweight='bold') plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold') plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold') # Put some extra place by xlim if necessary plt.xlim(-0.3, 0.3) plt.ylim(-0.3, 0.3) plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10) # plt.tick_params( # axis='y', # changes apply to the x-axis # which='both', # both major and minor ticks are affected # left='off' # ticks along the bottom edge are off # ) yticks = [-0.2, -0.1, 0.0, 0.1, 0.2] yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.yticks(yticks, yticklabels) xticks = [-0.2, -0.1, 0.0, 0.1, 0.2] xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.xticks(xticks, xticklabels) ######################################################################## transcriptionRatiosDict = {} replicationRatiosDict = {} for mutationType in six_mutation_types: if (mutationType in type2TranscriptionStrand2CountDict) and (mutationType in type2ReplicationStrand2CountDict): if ((TRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType]) and (UNTRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType])): transcriptionRatiosDict[mutationType]= np.log10(type2TranscriptionStrand2CountDict[mutationType][TRANSCRIBED_STRAND]/type2TranscriptionStrand2CountDict[mutationType][UNTRANSCRIBED_STRAND]) if ((LAGGING in type2ReplicationStrand2CountDict[mutationType]) and (LEADING in type2ReplicationStrand2CountDict[mutationType])): replicationRatiosDict[mutationType] = np.log10(type2ReplicationStrand2CountDict[mutationType][LAGGING]/type2ReplicationStrand2CountDict[mutationType][LEADING]) if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict): plt.scatter(replicationRatiosDict[mutationType],transcriptionRatiosDict[mutationType], label=mutationType) ######################################################################## legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095)) legend.get_frame().set_linewidth(1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') plt.axvline(x=0.0, color='gray', linestyle='--') plt.axhline(y=0.0, color='gray', linestyle='--') if sample is None: figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS) figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName) else: figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS) os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True) figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName) fig.savefig(figureFile) plt.cla() plt.close(fig) ######################################################################## ######################################################################## #July 7, 2020 def plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes(signatureType, sample, numberofMutations, type_transcribed_versus_untranscribed_df, type_lagging_versus_leading_df, signature_cutoff_numberofmutations_averageprobability_df, outputDir, jobname): fig = plt.figure(figsize=(8,8), facecolor=None) plt.style.use('ggplot') # build a rectangle in axes coords left, width = .0, 1. bottom, height = .0, 1. right = left + width top = bottom + height # This code makes the background white. # Always put these statements after plt.figure ax = plt.gca() ax.set_facecolor('white') for edge_i in ['bottom','top','left','right']: ax.spines[edge_i].set_edgecolor("black") ax.spines[edge_i].set_linewidth(1) ax.spines[edge_i].set_bounds(-0.3, 0.3) plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes) plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes) plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes) plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) if (sample is not None): plt.title(sample, fontsize=15, fontweight='bold') plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold') plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold') # Put some extra place by xlim if necessary plt.xlim(-0.3, 0.3) plt.ylim(-0.3, 0.3) plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10) yticks = [-0.2, -0.1, 0.0, 0.1, 0.2] yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.yticks(yticks, yticklabels) xticks = [-0.2, -0.1, 0.0, 0.1, 0.2] xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.xticks(xticks, xticklabels) transcriptionRatiosDict = {} replicationRatiosDict = {} for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique(): ################################################################################################# #First check whether we have this signature or not # type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type', # 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value', # 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list', # 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]] transcribed_real_count=0 untranscribed_real_count=0 if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['Transcribed_real_count'].values.size>0): transcribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['Transcribed_real_count'].values[0] if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['UnTranscribed_real_count'].values.size>0): untranscribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['UnTranscribed_real_count'].values[0] if (transcribed_real_count+untranscribed_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD): transcriptionRatiosDict[signature] = np.log10(transcribed_real_count/untranscribed_real_count) ################################################################################################# ################################################################################################# # First check whether we have this signature or not # type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type', # 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value', # 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list', # 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]] lagging_real_count=0 leading_real_count = 0 if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values.size>0): lagging_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values[0] if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values.size>0): leading_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values[0] if (lagging_real_count+leading_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD): replicationRatiosDict[signature] = np.log10(lagging_real_count/leading_real_count) ################################################################################################# if (transcriptionRatiosDict and replicationRatiosDict): signaturesShownInLegend = [] for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique(): if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())): signaturesShownInLegend.append(signature) plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature) legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend, bbox_to_anchor=(-0.0095, 1.0095)) legend.get_frame().set_linewidth(1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') plt.axvline(x=0.0, color='gray', linestyle='--') plt.axhline(y=0.0, color='gray', linestyle='--') if sample is None: figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS) figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName) else: figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (signatureType, sample, numberofMutations, STRANDBIAS) os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True) figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName) fig.savefig(figureFile) plt.cla() plt.close(fig) ######################################################################## ######################################################################## #May 9, 2018 starts #For Signatures def plot_ncomms11383_Supp_FigH_AllSignatures_TranscriptionLog10Ratio_ReplicationLog10Ratio( signatureType, sample, numberofMutations, signature2TranscriptionStrand2CountDict, signature2ReplicationStrand2CountDict, signature_cutoff_numberofmutations_averageprobability_df, outputDir, jobname): fig = plt.figure(figsize=(8,8), facecolor=None) plt.style.use('ggplot') # build a rectangle in axes coords left, width = .0, 1. bottom, height = .0, 1. right = left + width top = bottom + height # This code makes the background white. # Always put these statements after plt.figure ax = plt.gca() ax.set_facecolor('white') for edge_i in ['bottom','top','left','right']: ax.spines[edge_i].set_edgecolor("black") ax.spines[edge_i].set_linewidth(1) ax.spines[edge_i].set_bounds(-0.3, 0.3) plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes) plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes) plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes) plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) if (sample is not None): plt.title(sample, fontsize=15, fontweight='bold') plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold') plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold') # Put some extra place by xlim if necessary plt.xlim(-0.3, 0.3) plt.ylim(-0.3, 0.3) plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10) yticks = [-0.2, -0.1, 0.0, 0.1, 0.2] yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.yticks(yticks, yticklabels) xticks = [-0.2, -0.1, 0.0, 0.1, 0.2] xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2'] plt.xticks(xticks, xticklabels) transcriptionRatiosDict = {} replicationRatiosDict = {} for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique(): ################################################################################################# #First check whether we have this signature or not if ((signature in signature2TranscriptionStrand2CountDict) and (TRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) and (UNTRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) ): if ((signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]+signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND]) >= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD): transcriptionRatiosDict[signature]= np.log10(signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]/signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND]) ################################################################################################# ################################################################################################# # First check whether we have this signature or not if ((signature in signature2ReplicationStrand2CountDict) and (LAGGING in (signature2ReplicationStrand2CountDict[signature])) and (LEADING in (signature2ReplicationStrand2CountDict[signature]))): if ((signature2ReplicationStrand2CountDict[signature][LAGGING]+signature2ReplicationStrand2CountDict[signature][LEADING])>= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD): replicationRatiosDict[signature] = np.log10(signature2ReplicationStrand2CountDict[signature][LAGGING]/signature2ReplicationStrand2CountDict[signature][LEADING]) ################################################################################################# if (transcriptionRatiosDict and replicationRatiosDict): signaturesShownInLegend = [] for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique(): if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())): signaturesShownInLegend.append(signature) plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature) legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend, bbox_to_anchor=(-0.0095, 1.0095)) legend.get_frame().set_linewidth(1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') plt.axvline(x=0.0, color='gray', linestyle='--') plt.axhline(y=0.0, color='gray', linestyle='--') if sample is None: figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS) figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName) else: figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % ( signatureType, sample, numberofMutations, STRANDBIAS) os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True) figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName) fig.savefig(figureFile) plt.cla() plt.close(fig) ######################################################################## ######################################################################## #MutationTypeBased SampleBased Figures def plot_ncomms11383_Supp_FigE_MutationTypeBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio( type2Sample2TranscriptionStrand2CountDict, type2Sample2ReplicationStrand2CountDict, outputDir, jobname, isFigureAugmentation): mutationType2ColorDict = {'C>A': 'blue', 'C>G':'black', 'C>T':'red', 'T>A':'gray', 'T>C':'green', 'T>G':'pink'} transcriptionRatiosDict = {} replicationRatiosDict = {} for mutationType in six_mutation_types: #initialization if mutationType not in transcriptionRatiosDict: transcriptionRatiosDict[mutationType] = {} if mutationType not in replicationRatiosDict: replicationRatiosDict[mutationType] = {} #Fill the dictionaries if mutationType in type2Sample2TranscriptionStrand2CountDict: for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys(): if ((TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys()) and (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys())): transcriptionRatiosDict[mutationType][sample]= np.log10(type2Sample2TranscriptionStrand2CountDict[mutationType][sample][TRANSCRIBED_STRAND]/type2Sample2TranscriptionStrand2CountDict[mutationType][sample][UNTRANSCRIBED_STRAND]) if mutationType in type2Sample2ReplicationStrand2CountDict: for sample in type2Sample2ReplicationStrand2CountDict[mutationType].keys(): if ((LAGGING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys()) and (LEADING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys())): replicationRatiosDict[mutationType][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[mutationType][sample][LAGGING]/type2Sample2ReplicationStrand2CountDict[mutationType][sample][LEADING]) for mutationType in six_mutation_types: fig = plt.figure(figsize=(8, 8), facecolor=None) plt.style.use('ggplot') # build a rectangle in axes coords left, width = .0, 1. bottom, height = .0, 1. right = left + width top = bottom + height # This code makes the background white. # Always put these statements after plt.figure ax = plt.gca() ax.set_facecolor('white') for edge_i in ['bottom', 'top', 'left', 'right']: ax.spines[edge_i].set_edgecolor("black") ax.spines[edge_i].set_linewidth(1) ax.spines[edge_i].set_bounds(-0.65, 0.65) plt.title(mutationType, fontsize=15, fontweight='bold') plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes) plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes) plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold') plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold') # Put some extra place by xlim if necessary plt.xlim(-0.65, 0.65) plt.ylim(-0.65, 0.65) plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10) yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6] yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6'] plt.yticks(yticks, yticklabels) xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6] xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6'] plt.xticks(xticks, xticklabels) if (mutationType in type2Sample2TranscriptionStrand2CountDict): for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys(): if ((sample in replicationRatiosDict[mutationType].keys()) and (sample in transcriptionRatiosDict[mutationType].keys())): plt.scatter(replicationRatiosDict[mutationType][sample],transcriptionRatiosDict[mutationType][sample], facecolor='none', color=mutationType2ColorDict[mutationType]) plt.axvline(x=0.0, color='gray', linestyle='--') plt.axhline(y=0.0, color='gray', linestyle='--') if (isFigureAugmentation): plt.title(jobname + ' ' + mutationType) newMutationType = mutationType.replace('>', '2') figureName = newMutationType + '_MutationType_' + STRANDBIAS + '.png' figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName) fig.savefig(figureFile) plt.cla() plt.close(fig) ######################################################################## ######################################################################## #SignatureBased SampleBased Figures #Sig26 is very different def plot_ncomms11383_Supp_FigF_SignatureBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(type2Sample2TranscriptionStrand2CountDict,type2Sample2ReplicationStrand2CountDict,signatures,outputDir,jobname,isFigureAugmentation): transcriptionRatiosDict = {} replicationRatiosDict = {} for signature in signatures: # initialization if signature not in transcriptionRatiosDict: transcriptionRatiosDict[signature] = {} if signature not in replicationRatiosDict: replicationRatiosDict[signature] = {} # Fill the dictionaries if signature in type2Sample2TranscriptionStrand2CountDict: for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys(): if (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]) and (TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]): transcriptionRatiosDict[signature][sample] = np.log10(type2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND] /type2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND]) # print(signature, sample) # print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND]) # print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND]) # print(signature,sample,transcriptionRatiosDict[signature][sample]) if signature in type2Sample2ReplicationStrand2CountDict: for sample in type2Sample2ReplicationStrand2CountDict[signature].keys(): if (LAGGING in type2Sample2ReplicationStrand2CountDict[signature][sample]) and (LEADING in type2Sample2ReplicationStrand2CountDict[signature][sample]): replicationRatiosDict[signature][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[signature][sample][LAGGING] /type2Sample2ReplicationStrand2CountDict[signature][sample][LEADING]) for signature in signatures: if (len(replicationRatiosDict[signature].keys())>0 and len(transcriptionRatiosDict[signature].keys())>0): fig = plt.figure(figsize=(8, 8), facecolor=None) plt.style.use('ggplot') # build a rectangle in axes coords left, width = .0, 1. bottom, height = .0, 1. right = left + width top = bottom + height # This code makes the background white. # Always put these statements after plt.figure ax = plt.gca() ax.set_facecolor('white') for edge_i in ['bottom', 'top', 'left', 'right']: ax.spines[edge_i].set_edgecolor("black") ax.spines[edge_i].set_linewidth(1) ax.spines[edge_i].set_bounds(-0.65, 0.65) plt.title(signature, fontsize=15, fontweight='bold') plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes) plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes) plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes) plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold') plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold') # Put some extra place by xlim if necessary plt.xlim(-0.65, 0.65) plt.ylim(-0.65, 0.65) plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10) plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10) yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6] yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6'] plt.yticks(yticks, yticklabels) xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6] xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6'] plt.xticks(xticks, xticklabels) for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys(): if (sample in replicationRatiosDict[signature]) and (sample in transcriptionRatiosDict[signature]): plt.scatter(replicationRatiosDict[signature][sample], transcriptionRatiosDict[signature][sample],facecolor='none',color='green') plt.axvline(x=0.0, color='gray', linestyle='--') plt.axhline(y=0.0, color='gray', linestyle='--') if (isFigureAugmentation): plt.title(jobname + ' ' + signature) figureName = signature.replace(' ','') + '_Signature_' + STRANDBIAS + '.png' figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName) fig.savefig(figureFile) plt.cla() plt.close(fig) ######################################################################## def is_there_at_least_10perc_diff(strand1_value, strand2_value): diff = abs(strand1_value - strand2_value) if (diff >= (strand1_value/10)) or (diff >= (strand2_value/10)): return True else: return False # Only this method supports simulations # key can be a sample or a signature def plotStrandBiasFigureWithBarPlots(outputDir, jobname, numberofSimulations, key, isKeySample, numberofMutations, N, x_axis_labels, strand1_values, strand2_values, strand1_simulations_median_values, strand2_simulations_median_values, fdr_bh_adjusted_pvalues, strand1Name, strand2Name, mutationsOrSignatures, color1, color2, figureName, width, plot_mode): # Here we can take into difference between strand1_values and strand2_values while deciding on significance from matplotlib import rcParams rcParams.update({'figure.autolayout': True}) # the x locations for the groups ind = np.arange(N) fig, ax = plt.subplots(figsize=(16,10),dpi=300) legend=None rects1=None rects2=None rects3=None rects4=None rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1) rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2) if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values): rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///') if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values): rects4 = ax.bar(ind +3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///') # add some text for labels, title and axes ticks if plot_mode==PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL: ax.tick_params(axis='x', labelsize=35) ax.tick_params(axis='y', labelsize=35) locs, labels = plt.yticks() ax.set_ylim(0, locs[-1] + 5000) # To make the bar width not too wide if len(ind) < 6: maxn = 6 ax.set_xlim(-0.5, maxn - 0.5) # Set title if key is not None: ax.set_title('%s %s vs. %s %s' %(key,strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold') else: ax.set_title('%s vs. %s %s' %(strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold') # Set x tick labels if len(x_axis_labels) > 6: ax.set_xticklabels(x_axis_labels, fontsize=35, rotation=90) else: ax.set_xticklabels(x_axis_labels, fontsize=35) # Set the ylabel plt.ylabel('Number of single base substitutions', fontsize=35, fontweight='normal') # set the x axis tick locations if (numberofSimulations > 0): ax.set_xticks(ind + (3 * width) / 2) realStrand1Name = 'Real %s' % (strand1Name) realStrand2Name = 'Real %s' % (strand2Name) simulationsStrand1Name = 'Simulated %s' % (strand1Name) simulationsStrand2Name = 'Simulated %s' % (strand2Name) if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)): if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)): legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best') else: # Old way with no simulations ax.set_xticks(ind + width / 2) if ((rects1 is not None) and (rects2 is not None)): if ((len(rects1) > 0) and (len(rects2) > 0)): legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right') elif plot_mode == PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT: # set axis ticks # ax.tick_params(axis='both', which='both', length=0) ax.tick_params(axis='x', which='both', length=0) ax.tick_params(axis='y', which='both', length=0) # set axis labels plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) if (numberofSimulations > 0): realStrand1Name = 'Real %s' % (strand1Name) realStrand2Name = 'Real %s' % (strand2Name) simulationsStrand1Name = 'Simulated %s' % (strand1Name) simulationsStrand2Name = 'Simulated %s' % (strand2Name) if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)): if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)): legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 30}, ncol=1, loc='best') else: if ((rects1 is not None) and (rects2 is not None)): if ((len(rects1) > 0) and (len(rects2) > 0)): legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 35},loc='upper right') # To make the barplot background white ax.set_facecolor('white') # To makes spines black like a rectangle with black stroke ax.spines["bottom"].set_color('black') ax.spines["left"].set_color('black') ax.spines["top"].set_color('black') ax.spines["right"].set_color('black') if (legend is not None): frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') # Add star above the bars for significant differences between the number of mutations on each strand starts # For each bar: Place a label if fdr_bh_adjusted_pvalues is not None: for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2): # Get X and Y placement of label from rect. y_value = max(rect1.get_height(),rect2.get_height()) x_value = rect1.get_x() + rect1.get_width() # Number of points between bar and label. Change to your liking. space = 3 # Vertical alignment for positive values va = 'bottom' # If value of bar is negative: Place label below bar if y_value < 0: # Invert space to place label below space *= -1 # Vertically align label at top va = 'top' # Use Y value as label and format number with one decimal place label = "{:.1f}".format(y_value) # Create annotation if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))): plt.annotate( '***', # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=20) # Vertically align label differently for elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))): plt.annotate( '**', # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=20) # Vertically align label differently for elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))): plt.annotate( '*', # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=20) # Vertically align label differently for # positive and negative values. # Add star above the bars for significant differences between the number of mutations on each strand ends ######################################################################################################### if (key is None): figureName = '%s_bar_plot.png' %(figureName) figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName) elif (not isKeySample): figureName = '%s_%s_bar_plot.png' %(key,figureName) figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName) else: figureName = '%s_%s_%d_bar_plot.png' %(figureName,key,numberofMutations) os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS), exist_ok=True) figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS, figureName) fig.savefig(figureFile) plt.cla() plt.close(fig) # June 2, 2021 def plot_circle_plot_in_given_axis(ax, percentage_strings, sbs_signature, six_mutation_types, xticklabels_list, signature2mutation_type2strand2percentagedict): strand_bias_list=[LAGGING_VERSUS_LEADING, TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC] # make aspect ratio square ax.set_aspect(1.0) # set title title = '%s Strand Bias' %(sbs_signature) ax.text(len(percentage_strings) * 3, len(strand_bias_list) + 2.5, title, horizontalalignment='center',fontsize=60, fontweight='bold', fontname='Arial') # Colors are from SigProfilerPlotting tool to be consistent colors = [[3 / 256, 189 / 256, 239 / 256], [1 / 256, 1 / 256, 1 / 256], [228 / 256, 41 / 256, 38 / 256], [203 / 256, 202 / 256, 202 / 256], [162 / 256, 207 / 256, 99 / 256], [236 / 256, 199 / 256, 197 / 256]] # Put rectangles x = 0 for i in range(0, len(six_mutation_types), 1): ax.text((x + (len(percentage_strings) / 2) - 0.75), len(strand_bias_list) + 1.5, six_mutation_types[i],fontsize=55, fontweight='bold', fontname='Arial') ax.add_patch(plt.Rectangle((x + .0415, len(strand_bias_list) + 0.75), len(percentage_strings) - (2 * .0415), .5,facecolor=colors[i], clip_on=False)) ax.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(strand_bias_list), facecolor=colors[i], zorder=0,alpha=0.25, edgecolor='grey')) x += len(percentage_strings) # CODE GOES HERE TO CENTER X-AXIS LABELS... ax.set_xlim([0, len(six_mutation_types) * len(percentage_strings)]) ax.set_xticklabels([]) ax.tick_params(axis='x', which='minor', length=0, labelsize=35) # major ticks ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1)) # minor ticks ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1) + 0.5, minor=True) ax.set_xticklabels(xticklabels_list, minor=True) ax.xaxis.set_label_position('top') ax.xaxis.set_ticks_position('top') ax.tick_params( axis='x', # changes apply to the x-axis which='major', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False) # labels along the bottom edge are off # CODE GOES HERE TO CENTER Y-AXIS LABELS... ax.set_ylim([0, len(strand_bias_list)]) ax.set_yticklabels([]) ax.tick_params(axis='y', which='minor', length=0, labelsize=40) # major ticks ax.set_yticks(np.arange(0, len(strand_bias_list), 1)) # minor ticks ax.set_yticks(np.arange(0, len(strand_bias_list), 1) + 0.5, minor=True) ax.set_yticklabels(['', sbs_signature,''], minor=True) # fontsize ax.tick_params( axis='y', # changes apply to the x-axis which='major', # both major and minor ticks are affected left=False) # labels along the bottom edge are off # Gridlines based on major ticks ax.grid(which='major', color='black', zorder=3) # Put the legend legend_elements = [ Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan', markersize=40), Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray', markersize=40), Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue',markersize=40), Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40), Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40), Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)] legend = ax.legend(handles=legend_elements, ncol=len(legend_elements), bbox_to_anchor=(0.5, 0), loc='upper center',fontsize=40) # legend.get_frame().set_linewidth(1) frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') for percentage_diff_index, percentage_string in enumerate(percentage_strings): for mutation_type_index, mutation_type in enumerate(six_mutation_types): # for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures): # strand_bias_list = [TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING] for strand_bias_index, strand_bias in enumerate(strand_bias_list): if (strand_bias == LAGGING_VERSUS_LEADING): if sbs_signature in signature2mutation_type2strand2percentagedict: if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]: lagging_percentage = None leading_percentage = None if (LAGGING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and ( signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LAGGING][percentage_string] == 1): lagging_percentage = 100 if (LEADING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and ( signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LEADING][percentage_string] == 1): leading_percentage = 100 if (lagging_percentage is not None) and (leading_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string)) ax.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,strand_bias_index + 0.5), radius, color='indianred', fill=True)) elif (leading_percentage is not None) and (lagging_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string)) ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius, color='goldenrod', fill=True)) elif (lagging_percentage is not None) and (leading_percentage is not None): radius_lagging = 0.49 radius_leading = 0.49 if (radius_lagging > radius_leading): # First lagging ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_lagging, color='indianred', fill=True)) # Second leading ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_leading, color='goldenrod', fill=True)) else: # First leading ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_leading, color='goldenrod', fill=True)) # Second lagging ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_lagging, color='indianred', fill=True)) elif (strand_bias == GENIC_VERSUS_INTERGENIC): if sbs_signature in signature2mutation_type2strand2percentagedict: if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]: genic_percentage = None intergenic_percentage = None if (GENIC in signature2mutation_type2strand2percentagedict[sbs_signature][ mutation_type]) and ( signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][ GENIC][percentage_string] == 1): genic_percentage = 100 if (INTERGENIC in signature2mutation_type2strand2percentagedict[sbs_signature][ mutation_type]) and ( signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][ INTERGENIC][percentage_string] == 1): intergenic_percentage = 100 if (genic_percentage is not None) and (intergenic_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius, color='cyan', fill=True)) elif (intergenic_percentage is not None) and (genic_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius, color='gray', fill=True)) elif (genic_percentage is not None) and (intergenic_percentage is not None): radius_genic = 0.49 radius_intergenic = 0.49 if (radius_genic > radius_intergenic): # First genic ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_genic, color='cyan', fill=True)) # Second intergenic ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_intergenic, color='gray', fill=True)) else: # First intergenic ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_intergenic, color='gray', fill=True)) # Second genic ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_genic, color='cyan', fill=True)) elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED): if sbs_signature in signature2mutation_type2strand2percentagedict: if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]: transcribed_percentage = None untranscribed_percentage = None if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[sbs_signature][ mutation_type]) and ( signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][ TRANSCRIBED_STRAND][percentage_string] == 1): transcribed_percentage = 100 if (UNTRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and ( signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][ UNTRANSCRIBED_STRAND][percentage_string] == 1): untranscribed_percentage = 100 if (transcribed_percentage is not None) and (untranscribed_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius, color='royalblue', fill=True)) elif (untranscribed_percentage is not None) and (transcribed_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius, color='yellowgreen', fill=True)) elif (transcribed_percentage is not None) and (untranscribed_percentage is not None): radius_transcribed = 0.49 radius_untranscribed = 0.49 if (radius_transcribed > radius_untranscribed): # First transcribed ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_transcribed, color='royalblue', fill=True)) # Second untranscribed ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)) else: # First untranscribed ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)) # Second transcribed ax.add_patch(plt.Circle((mutation_type_index * len( percentage_strings) + percentage_diff_index + 0.5, strand_bias_index + 0.5), radius_transcribed, color='royalblue', fill=True)) # June 2, 2021 def plot_strand_bias_figure_with_bar_plots(strand_bias, strandbias_figures_outputDir, numberofSimulations, signature, N, x_axis_tick_labels, y_axis_label, strand1_values, strand2_values, strand1_simulations_median_values, strand2_simulations_median_values, fdr_bh_adjusted_pvalues, strand1Name, strand2Name, color1, color2, width, axis_given=None): # Here we can take into difference between strand1_values and strand2_values while deciding on significance # the x locations for the groups ind = np.arange(N) if axis_given == None: fig, ax = plt.subplots(figsize=(16,10),dpi=100) else: ax = axis_given legend = None rects3 = None rects4 = None rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1) rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2) if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values): rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///') if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values): rects4 = ax.bar(ind + 3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///') # add some text for labels, title and axes ticks ax.tick_params(axis='x', labelsize=35) ax.tick_params(axis='y', labelsize=35) ymax = np.nanmax([np.nanmax(strand1_values), np.nanmax(strand2_values), np.nanmax(strand1_simulations_median_values), np.nanmax(strand2_simulations_median_values)]) y = ymax / 1.025 ytick_offest = float(y / 3) ylabs = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4] ylabels = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4] ylabels = ['{:,}'.format(int(x)) for x in ylabels] if len(ylabels[-1]) > 3: ylabels_temp = [] if len(ylabels[-1]) > 7: for label in ylabels: if len(label) > 7: ylabels_temp.append(label[0:-8] + "m") elif len(label) > 3: ylabels_temp.append(label[0:-4] + "k") else: ylabels_temp.append(label) else: for label in ylabels: if len(label) > 3: ylabels_temp.append(label[0:-4] + "k") else: ylabels_temp.append(label) ylabels = ylabels_temp ax.set_ylim([0, y]) ax.set_yticks(ylabs) ax.set_yticklabels(ylabels, fontsize=35, fontweight='bold', fontname='Arial') # To make the bar width not too wide if len(ind) < 6: maxn = 6 ax.set_xlim(-0.5, maxn - 0.5) # Set title ax.set_title('%s vs. %s' %(strand1Name,strand2Name), fontsize=40, fontweight='bold') # Set x tick labels if len(x_axis_tick_labels) > 6: ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90) else: ax.set_xticklabels(x_axis_tick_labels, fontsize=35) # Set the ylabel if y_axis_label: ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15) # Set the x axis tick locations if (numberofSimulations > 0): ax.set_xticks(ind + (3 * width) / 2) realStrand1Name = 'Real %s' % (strand1Name) realStrand2Name = 'Real %s' % (strand2Name) simulationsStrand1Name = 'Simulated %s' % (strand1Name) simulationsStrand2Name = 'Simulated %s' % (strand2Name) if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)): if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)): legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]), (realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best') else: # Old way with no simulations ax.set_xticks(ind + width / 2) if ((rects1 is not None) and (rects2 is not None)): if ((len(rects1) > 0) and (len(rects2) > 0)): legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right') # To make the barplot background white ax.set_facecolor('white') # To makes spines black like a rectangle with black stroke ax.spines["bottom"].set_color('black') ax.spines["left"].set_color('black') ax.spines["top"].set_color('black') ax.spines["right"].set_color('black') if (legend is not None): frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') # Add star above the bars for significant differences between the number of mutations on each strand starts # For each bar: Place a label if fdr_bh_adjusted_pvalues is not None: for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2): # Get X and Y placement of label from rect. y_value = max(rect1.get_height(),rect2.get_height()) x_value = rect1.get_x() + rect1.get_width() # Number of points between bar and label. Change to your liking. space = 3 # Vertical alignment for positive values va = 'bottom' # If value of bar is negative: Place label below bar if y_value < 0: # Invert space to place label below space *= -1 # Vertically align label at top va = 'top' # Use Y value as label and format number with one decimal place label = "{:.1f}".format(y_value) # Create annotation if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)): ax.annotate( '***', # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=25) # Vertically align label differently for elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)): ax.annotate( '**', # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=25) # Vertically align label differently for elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)) : ax.annotate( '*', # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=25) # Vertically align label differently for if axis_given == None: filename = '%s_%s_with_bars.png' %(signature,strand_bias) figFile = os.path.join(strandbias_figures_outputDir, filename) fig.savefig(figFile, dpi=100, bbox_inches="tight") plt.cla() plt.close(fig) # June 2, 2021 def plot_bar_plot_in_given_axis(axis, sbs_signature, strand_bias, strands_list, signature_strand1_versus_strand2_df, y_axis_label = None): box = axis.get_position() axis.set_position([box.x0, box.y0 + 0.125, box.width * 1, box.height * 1], which='both') mutation_types = six_mutation_types numberofSimulations = 100 width = 0.20 if strand_bias == LAGGING_VERSUS_LEADING: strands = strands_list strand1 = "Lagging_real_count" strand2 = "Leading_real_count" strand1_sims = "Lagging_mean_sims_count" strand2_sims = "Leading_mean_sims_count" q_value_column_name = "lagging_versus_leading_q_value" color1 = 'indianred' color2 = 'goldenrod' elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED: strands = strands_list strand1 = "Transcribed_real_count" strand2 = "UnTranscribed_real_count" strand1_sims = "Transcribed_mean_sims_count" strand2_sims = "UnTranscribed_mean_sims_count" q_value_column_name = "transcribed_versus_untranscribed_q_value" color1 = 'royalblue' color2 = 'yellowgreen' elif strand_bias == GENIC_VERSUS_INTERGENIC: strands = strands_list strand1 = "genic_real_count" strand2 = "intergenic_real_count" strand1_sims = "genic_mean_sims_count" strand2_sims = "intergenic_mean_sims_count" q_value_column_name = "genic_versus_intergenic_q_value" color1 = 'cyan' color2 = 'gray' groupby_df = signature_strand1_versus_strand2_df.groupby(['signature']) group_df = groupby_df.get_group(sbs_signature) mutationtype_strand1_real_list = [] mutationtype_strand2_real_list = [] mutationtype_strand1_sims_mean_list = [] mutationtype_strand2_sims_mean_list = [] mutationtype_FDR_BH_adjusted_pvalues_list = [] for mutation_type in six_mutation_types: strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0] strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0] strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0] strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0] q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0] mutationtype_strand1_real_list.append(strand1_real_count) mutationtype_strand2_real_list.append(strand2_real_count) mutationtype_strand1_sims_mean_list.append(strand1_sims_count) mutationtype_strand2_sims_mean_list.append(strand2_sims_count) mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value) plot_strand_bias_figure_with_bar_plots(strand_bias, None, numberofSimulations, sbs_signature, len(mutation_types), mutation_types, y_axis_label, mutationtype_strand1_real_list, mutationtype_strand2_real_list, mutationtype_strand1_sims_mean_list, mutationtype_strand2_sims_mean_list, mutationtype_FDR_BH_adjusted_pvalues_list, strands[0], strands[1], color1, color2, width, axis_given = axis) # June 2, 2021 def plot_strand_bias_figure_with_stacked_bar_plots(strand_bias, strandbias_figures_outputDir, numberofSimulations, signature, N, x_axis_tick_labels, y_axis_label, strand1_values, strand2_values, strand1_simulations_median_values, strand2_simulations_median_values, fdr_bh_adjusted_pvalues, strand1Name, strand2Name, color1, color2, width, axis_given=None): # Replace np.nans with 0 strand1_values = [0 if np.isnan(x) else x for x in strand1_values] strand2_values = [0 if np.isnan(x) else x for x in strand2_values] strand1_simulations_median_values = [0 if np.isnan(x) else x for x in strand1_simulations_median_values] strand2_simulations_median_values = [0 if np.isnan(x) else x for x in strand2_simulations_median_values] # Fill odds_ratio_list odds_real_list = [] odds_sims_list = [] for a, b in zip(strand1_values, strand2_values): odds_real = np.nan if b>0: odds_real = a/b odds_real_list.append(odds_real) for x, y in zip(strand1_simulations_median_values, strand2_simulations_median_values): odds_sims = np.nan if y > 0: odds_sims = x/y odds_sims_list.append(odds_sims) odds_ratio_list = [odds_real/odds_sims if odds_sims>0 else np.nan for (odds_real, odds_sims) in zip(odds_real_list,odds_sims_list)] # Here we can take into difference between strand1_values and strand2_values while deciding on significance # the x locations for the groups ind = np.arange(N) if axis_given == None: fig, ax = plt.subplots(figsize=(16,10),dpi=100) else: ax = axis_given legend=None rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1) rects2 = ax.bar(ind, strand2_values, width=width, edgecolor='black', color=color2, bottom=strand1_values) if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values): ax.bar(ind + width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///') if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values): ax.bar(ind + width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///', bottom=strand1_simulations_median_values) # Add some text for labels, title and axes ticks ax.tick_params(axis='x', labelsize=35) ax.tick_params(axis='y', labelsize=35) ax.set_ylim(0, 1.1) ax.set_yticklabels([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=35) # To make the bar width not too wide if len(ind) < 6: maxn = 6 ax.set_xlim(-0.5, maxn - 0.5) # Set title stacked_bar_title = 'Real vs. Simulated\nOdds Ratio of %s vs. %s' %(strand1Name, strand2Name) ax.set_title(stacked_bar_title, fontsize=40, fontweight='bold') # Set x tick labels if len(x_axis_tick_labels) > 6: ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90) else: ax.set_xticklabels(x_axis_tick_labels, fontsize=35) # Set the ylabel if y_axis_label: ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15) # Set the x axis tick locations if (numberofSimulations > 0): ax.set_xticks(ind + (width/2)) else: # Old way with no simulations ax.set_xticks(ind + width / 2) # To make the barplot background white ax.set_facecolor('white') # To makes spines black like a rectangle with black stroke ax.spines["bottom"].set_color('black') ax.spines["left"].set_color('black') ax.spines["top"].set_color('black') ax.spines["right"].set_color('black') if (legend is not None): frame = legend.get_frame() frame.set_facecolor('white') frame.set_edgecolor('black') # Add star above the bars for significant differences between the number of mutations on each strand starts # For each bar: Place a label if odds_ratio_list is not None: for odds_ratio, fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(odds_ratio_list, fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2): # Get X and Y placement of label from rect. # y_value = max(rect1.get_height(),rect2.get_height()) y_value = rect1.get_height() + rect2.get_height() x_value = rect1.get_x() + rect1.get_width() # Number of points between bar and label. Change to your liking. space = 3 # Vertical alignment for positive values va = 'bottom' # If value of bar is negative: Place label below bar if y_value < 0: # Invert space to place label below space *= -1 # Vertically align label at top va = 'top' # Use Y value as label and format number with one decimal place label = "{:.1f}".format(y_value) # Create annotation if not np.isnan(odds_ratio): if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)): ax.annotate( '%.2f ***' %(odds_ratio), # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=25) # Vertically align label differently for elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)): ax.annotate( '%.2f **' %(odds_ratio), # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=25) # Vertically align label differently for elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)): ax.annotate( '%.2f *' %(odds_ratio), # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=25) # Vertically align label differently for else: ax.annotate( '%.2f' %(odds_ratio), # Use `label` as label (x_value, y_value), # Place label at end of the bar xytext=(0, space), # Vertically shift label by `space` textcoords="offset points", # Interpret `xytext` as offset in points ha='center', # Horizontally center label va=va, fontsize=25) # Vertically align label differently for if axis_given==None: filename = '%s_%s_with_bars.png' %(signature,strand_bias) figFile = os.path.join(strandbias_figures_outputDir, filename) fig.savefig(figFile, dpi=100, bbox_inches="tight") plt.cla() plt.close(fig) # June 2, 2021 def plot_stacked_bar_plot_in_given_axis(axis, sbs_signature, strand_bias, strands_list, signature_strand1_versus_strand2_df, y_axis_label = None): box = axis.get_position() axis.set_position([box.x0, box.y0+0.125, box.width * 1, box.height * 1], which='both') mutation_types = six_mutation_types numberofSimulations = 100 width = 0.20 if strand_bias == LAGGING_VERSUS_LEADING: strands = strands_list strand1 = "Lagging_real_count" strand2 = "Leading_real_count" strand1_sims = "Lagging_mean_sims_count" strand2_sims = "Leading_mean_sims_count" q_value_column_name = "lagging_versus_leading_q_value" color1 = 'indianred' color2 = 'goldenrod' elif strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED: strands = strands_list strand1 = "Transcribed_real_count" strand2 = "UnTranscribed_real_count" strand1_sims = "Transcribed_mean_sims_count" strand2_sims = "UnTranscribed_mean_sims_count" q_value_column_name = "transcribed_versus_untranscribed_q_value" color1 = 'royalblue' color2 = 'yellowgreen' elif strand_bias == GENIC_VERSUS_INTERGENIC: strands = strands_list strand1 = "genic_real_count" strand2 = "intergenic_real_count" strand1_sims = "genic_mean_sims_count" strand2_sims = "intergenic_mean_sims_count" q_value_column_name = "genic_versus_intergenic_q_value" color1 = 'cyan' color2 = 'gray' groupby_df = signature_strand1_versus_strand2_df.groupby(['signature']) group_df = groupby_df.get_group(sbs_signature) mutationtype_strand1_real_list = [] mutationtype_strand2_real_list = [] mutationtype_strand1_sims_mean_list = [] mutationtype_strand2_sims_mean_list = [] mutationtype_FDR_BH_adjusted_pvalues_list = [] for mutation_type in six_mutation_types: strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0] strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0] strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0] strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0] q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0] mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value) if (strand1_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT): mutationtype_strand1_real_list.append(strand1_real_count/(strand1_real_count+strand2_real_count)) mutationtype_strand2_real_list.append(strand2_real_count/(strand1_real_count+strand2_real_count)) else: mutationtype_strand1_real_list.append(np.nan) mutationtype_strand2_real_list.append(np.nan) if (strand1_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT): mutationtype_strand1_sims_mean_list.append(strand1_sims_count/(strand1_sims_count+strand2_sims_count)) mutationtype_strand2_sims_mean_list.append(strand2_sims_count/(strand1_sims_count+strand2_sims_count)) else: mutationtype_strand1_sims_mean_list.append(np.nan) mutationtype_strand2_sims_mean_list.append(np.nan) plot_strand_bias_figure_with_stacked_bar_plots(strand_bias, None, numberofSimulations, sbs_signature, len(mutation_types), mutation_types, y_axis_label, mutationtype_strand1_real_list, mutationtype_strand2_real_list, mutationtype_strand1_sims_mean_list, mutationtype_strand2_sims_mean_list, mutationtype_FDR_BH_adjusted_pvalues_list, strands[0], strands[1], color1, color2, width, axis_given=axis) def plot_circle_bar_plots_together(outputDir, jobname, sbs_signature, six_mutation_types, signature2mutation_type2strand2percentagedict, signature_genic_versus_intergenic_df, signature_transcribed_versus_untranscribed_df, signature_lagging_versus_leading_df, genic_vs_intergenic_strands, transcription_strands, replication_strands): x_ticklabels_list = percentage_strings * 6 fig = plt.figure(figsize=(5 + 1.5 * len(x_ticklabels_list), 30 + 1.5)) plt.rc('axes', edgecolor='lightgray') width = 6 height = 6 width_ratios = [1] * width height_ratios = [1] * height gs = gridspec.GridSpec(height, width, height_ratios = height_ratios, width_ratios = width_ratios) fig.subplots_adjust(hspace=0, wspace=3) cirle_plot_axis = plt.subplot(gs[0:2, :]) genic_vs_intergenic_bar_plot_axis = plt.subplot(gs[2:4, 0:2]) transcribed_vs_untranscribed_bar_plot_axis = plt.subplot(gs[2:4, 2:4]) lagging_vs_leading_bar_plot_axis = plt.subplot(gs[2:4, 4:6]) genic_vs_intergenic_stacked_bar_plot_axis = plt.subplot(gs[4:, 0:2]) transcribed_vs_untranscribed_stacked_bar_plot_axis = plt.subplot(gs[4:, 2:4]) lagging_vs_leading_stacked_bar_plot_axis = plt.subplot(gs[4:, 4:6]) # Circle plot with legends plot_circle_plot_in_given_axis(cirle_plot_axis, percentage_strings, sbs_signature, six_mutation_types, x_ticklabels_list, signature2mutation_type2strand2percentagedict) # 3 Bar plots side by side plot_bar_plot_in_given_axis(genic_vs_intergenic_bar_plot_axis, sbs_signature, GENIC_VERSUS_INTERGENIC, genic_vs_intergenic_strands, signature_genic_versus_intergenic_df, y_axis_label = 'Number of Single Base Substitutions') plot_bar_plot_in_given_axis(transcribed_vs_untranscribed_bar_plot_axis, sbs_signature, TRANSCRIBED_VERSUS_UNTRANSCRIBED, transcription_strands, signature_transcribed_versus_untranscribed_df) plot_bar_plot_in_given_axis(lagging_vs_leading_bar_plot_axis, sbs_signature, LAGGING_VERSUS_LEADING, replication_strands, signature_lagging_versus_leading_df) # 3 Stacked Bar plots side by side plot_stacked_bar_plot_in_given_axis(genic_vs_intergenic_stacked_bar_plot_axis, sbs_signature, GENIC_VERSUS_INTERGENIC, genic_vs_intergenic_strands, signature_genic_versus_intergenic_df, y_axis_label = 'Ratio of mutations on each strand') plot_stacked_bar_plot_in_given_axis(transcribed_vs_untranscribed_stacked_bar_plot_axis, sbs_signature, TRANSCRIBED_VERSUS_UNTRANSCRIBED, transcription_strands, signature_transcribed_versus_untranscribed_df) plot_stacked_bar_plot_in_given_axis(lagging_vs_leading_stacked_bar_plot_axis, sbs_signature, LAGGING_VERSUS_LEADING, replication_strands, signature_lagging_versus_leading_df) # filename = '%s_circle_bar_plot_together_%s.png' % (sbs_signature, str(significance_level).replace('.', '_')) filename = '%s_circle_bar_plots.png' % (sbs_signature) figurepath = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, CIRCLE_BAR_PLOTS, filename) fig.savefig(figurepath, dpi=100, bbox_inches="tight") plt.cla() plt.close(fig) # Key can be signature or sample def plotBarPlotsUsingDataframes(outputDir, jobname, numberofSimulations, signature_cutoff_numberofmutations_averageprobability_df, isKeySample, existingMutationTypesList, signature_strand1_versus_strand2_df, width, strand1_versus_strand2, strands, color1, color2, title, figureName, plot_mode): # signature_strand1_versus_strand2_df column names here # ['cancer_type', 'signature', 'mutation_type', # 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', # 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value', # 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list', # 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list'] signatures = signature_strand1_versus_strand2_df['signature'].unique() x_axis_labels = existingMutationTypesList N = len(x_axis_labels) for signature in signatures: numberofMutations = int(signature_cutoff_numberofmutations_averageprobability_df[signature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['number_of_mutations'].values[0]) mutationtype_strand1_real_list=[] mutationtype_strand2_real_list=[] mutationtype_strand1_sims_mean_list=[] mutationtype_strand2_sims_mean_list=[] mutationtype_FDR_BH_adjusted_pvalues_list=[] for mutation_type in existingMutationTypesList: if (strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED): strand1_real_count_column_name=TRANSCRIBED_REAL_COUNT strand1_sims_mean_count_Column_name=TRANSCRIBED_SIMULATIONS_MEAN_COUNT strand2_real_count_column_name=UNTRANSCRIBED_REAL_COUNT strand2_sims_mean_count_Column_name=UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT q_value_column_name = TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE elif (strand1_versus_strand2 == GENIC_VERSUS_INTERGENIC): strand1_real_count_column_name=GENIC_REAL_COUNT strand1_sims_mean_count_Column_name=GENIC_SIMULATIONS_MEAN_COUNT strand2_real_count_column_name=INTERGENIC_REAL_COUNT strand2_sims_mean_count_Column_name=INTERGENIC_SIMULATIONS_MEAN_COUNT q_value_column_name = GENIC_VERSUS_INTERGENIC_Q_VALUE elif (strand1_versus_strand2 == LAGGING_VERSUS_LEADING): strand1_real_count_column_name=LAGGING_REAL_COUNT strand1_sims_mean_count_Column_name=LAGGING_SIMULATIONS_MEAN_COUNT strand2_real_count_column_name=LEADING_REAL_COUNT strand2_sims_mean_count_Column_name=LEADING_SIMULATIONS_MEAN_COUNT q_value_column_name = LAGGING_VERSUS_LEADING_Q_VALUE strand1_real_count = 0 strand1_sims_mean_count = 0 strand2_real_count = 0 strand2_sims_mean_count = 0 q_value = None if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values.size>0): strand1_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values[0] if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values.size>0): strand1_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values[0] if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values.size>0): strand2_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values[0] if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values.size>0): strand2_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values[0] if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values.size>0): q_value = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values[0] mutationtype_strand1_real_list.append(strand1_real_count) mutationtype_strand1_sims_mean_list.append(strand1_sims_mean_count) mutationtype_strand2_real_list.append(strand2_real_count) mutationtype_strand2_sims_mean_list.append(strand2_sims_mean_count) mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value) plotStrandBiasFigureWithBarPlots(outputDir, jobname, numberofSimulations, signature, isKeySample, numberofMutations, N, x_axis_labels, mutationtype_strand1_real_list, mutationtype_strand2_real_list, mutationtype_strand1_sims_mean_list, mutationtype_strand2_sims_mean_list, mutationtype_FDR_BH_adjusted_pvalues_list, strands[0], strands[1], title, color1, color2, figureName, width, plot_mode) ################################################################### # April 20, 2020 # July 4, 2020 starts # Using dataframes def transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode): # Initialize these dataframes as empty dataframe # We will read these dataframes if there is the corresponding data subsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame() dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame() indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame() sbs_df = pd.DataFrame() dbs_df = pd.DataFrame() id_df = pd.DataFrame() subsSignatures = np.array([]) dinucsSignatures = np.array([]) indelsSignatures = np.array([]) os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS), exist_ok=True) os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,BAR_PLOTS), exist_ok=True) os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_PLOTS), exist_ok=True) os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_BAR_PLOTS), exist_ok=True) os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,TABLES), exist_ok=True) os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,EXCEL_FILES), exist_ok=True) strandbias_figures_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS) strandbias_figures_tables_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, TABLES) strandbias_figures_excel_files_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, EXCEL_FILES) ########################################################################################## ######################### Read dictionaries related with ################################ ######################### signatures and samples starts ################################ ########################################################################################## for mutation_type_context in mutation_types_contexts: if (mutation_type_context in SBS_CONTEXTS): subsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32}) subsSignatures = subsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique() if (DBS in mutation_types_contexts): dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32}) dinucsSignatures = dinucsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique() if (ID in mutation_types_contexts): indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32}) indelsSignatures = indelsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique() ########################################################################################## ######################### Read dictionaries related with ################################ ######################### signatures and samples ends ################################## ########################################################################################## if is_discreet: sbs_df = subsSignature_cutoff_numberofmutations_averageprobability_df dbs_df = dinucsSignature_cutoff_numberofmutations_averageprobability_df id_df = indelsSignature_cutoff_numberofmutations_averageprobability_df else: if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)): sbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32}) subsSignatures = sbs_df['signature'].unique() if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)): dbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32}) dinucsSignatures = dbs_df['signature'].unique() if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)): id_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32}) indelsSignatures = id_df['signature'].unique() ####################################################################### # Step1 Read p_value if LAGGING_VERSUS_LEADING in strand_bias_list: # Replication Strand Bias signature_mutation_type_lagging_versus_leading_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING) signature_mutation_type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,signature_mutation_type_lagging_versus_leading_table_file_name) signature_lagging_versus_leading_df = pd.read_csv(signature_mutation_type_lagging_versus_leading_table_filepath, header=0, sep='\t') type_lagging_versus_leading_table_file_name = 'Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING) type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,type_lagging_versus_leading_table_file_name) type_lagging_versus_leading_df = pd.read_csv(type_lagging_versus_leading_table_filepath, header=0, sep='\t') if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: # Transcription Strand Bias signature_mutation_type_transcribed_versus_untranscribed_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED) signature_mutation_type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_transcribed_versus_untranscribed_table_file_name) signature_transcribed_versus_untranscribed_df = pd.read_csv(signature_mutation_type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\t') type_transcribed_versus_untranscribed_table_file_name = 'Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED) type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_transcribed_versus_untranscribed_table_file_name) type_transcribed_versus_untranscribed_df = pd.read_csv(type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\t') if GENIC_VERSUS_INTERGENIC in strand_bias_list: # Transcription Strand Bias signature_mutation_type_genic_versus_intergenic_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC) signature_mutation_type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_genic_versus_intergenic_table_file_name) signature_genic_versus_intergenic_df = pd.read_csv(signature_mutation_type_genic_versus_intergenic_table_filepath, header=0, sep='\t') type_genic_versus_intergenic_table_file_name = 'Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC) type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_genic_versus_intergenic_table_file_name) type_genic_versus_intergenic_df = pd.read_csv(type_genic_versus_intergenic_table_filepath, header=0, sep='\t') ####################################################################### ####################################################################### # Step2 Compute q_value p_values_list=[] element_names=[] # Fill p_values_list if LAGGING_VERSUS_LEADING in strand_bias_list: for index, row in signature_lagging_versus_leading_df.iterrows(): element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], LAGGING_VERSUS_LEADING) element_names.append(element_name) p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE]) for index, row in type_lagging_versus_leading_df.iterrows(): element_name=(row[CANCER_TYPE], None, row[TYPE], LAGGING_VERSUS_LEADING) element_names.append(element_name) p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE]) if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: for index, row in signature_transcribed_versus_untranscribed_df.iterrows(): element_name=(row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED) element_names.append(element_name) p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE]) for index, row in type_transcribed_versus_untranscribed_df.iterrows(): element_name=(row[CANCER_TYPE], None, row[TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED) element_names.append(element_name) p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE]) if GENIC_VERSUS_INTERGENIC in strand_bias_list: for index, row in signature_genic_versus_intergenic_df.iterrows(): element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], GENIC_VERSUS_INTERGENIC) element_names.append(element_name) p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE]) for index, row in type_genic_versus_intergenic_df.iterrows(): element_name=(row[CANCER_TYPE], None, row[TYPE], GENIC_VERSUS_INTERGENIC) element_names.append(element_name) p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE]) # print('len(p_values_list): %d' %(len(p_values_list))) ####################################################################### ####################################################################### if ((p_values_list is not None) and p_values_list): rejected, all_FDR_BH_adjusted_p_values, alphacSidak, alphacBonf = statsmodels.stats.multitest.multipletests(p_values_list, alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False) # Add None q_values if LAGGING_VERSUS_LEADING in strand_bias_list: signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = np.nan type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]= np.nan if GENIC_VERSUS_INTERGENIC in strand_bias_list: signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan # Update q_value for element_index, element_name in enumerate(element_names,0): (cancer_type, signature, mutation_type, versus_type)=element_name q_value=all_FDR_BH_adjusted_p_values[element_index] if (signature is not None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED): signature_transcribed_versus_untranscribed_df.loc[(signature_transcribed_versus_untranscribed_df[CANCER_TYPE]==cancer_type) & (signature_transcribed_versus_untranscribed_df[SIGNATURE]==signature) & (signature_transcribed_versus_untranscribed_df[MUTATION_TYPE]==mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]=q_value elif (signature is not None) and (versus_type == GENIC_VERSUS_INTERGENIC): signature_genic_versus_intergenic_df.loc[(signature_genic_versus_intergenic_df[CANCER_TYPE]==cancer_type) & (signature_genic_versus_intergenic_df[SIGNATURE]==signature) & (signature_genic_versus_intergenic_df[MUTATION_TYPE]==mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE]=q_value elif (signature is not None) and (versus_type==LAGGING_VERSUS_LEADING): signature_lagging_versus_leading_df.loc[(signature_lagging_versus_leading_df[CANCER_TYPE]==cancer_type) & (signature_lagging_versus_leading_df[SIGNATURE]==signature) & (signature_lagging_versus_leading_df[MUTATION_TYPE]==mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE]=q_value elif (signature is None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED): type_transcribed_versus_untranscribed_df.loc[(type_transcribed_versus_untranscribed_df[CANCER_TYPE] == cancer_type) & (type_transcribed_versus_untranscribed_df[TYPE] == mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = q_value elif (signature is None) and (versus_type == GENIC_VERSUS_INTERGENIC): type_genic_versus_intergenic_df.loc[(type_genic_versus_intergenic_df[CANCER_TYPE] == cancer_type) & (type_genic_versus_intergenic_df[TYPE] == mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE] = q_value elif (signature is None) and (versus_type == LAGGING_VERSUS_LEADING): type_lagging_versus_leading_df.loc[(type_lagging_versus_leading_df[CANCER_TYPE] == cancer_type) & (type_lagging_versus_leading_df[TYPE] == mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE] = q_value # Reorder columns # Write dataframes if LAGGING_VERSUS_LEADING in strand_bias_list: signature_lagging_versus_leading_df = signature_lagging_versus_leading_df[ ['cancer_type', 'signature', 'mutation_type', 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value', 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list', 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list']] type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type', 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value', 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list', 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]] signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING) signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename) signature_lagging_versus_leading_df.to_csv(signature_filepath, sep='\t', header=True, index=False) type_filename = 'Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING) type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename) type_lagging_versus_leading_df.to_csv(type_filepath, sep='\t', header=True, index=False) if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: signature_transcribed_versus_untranscribed_df=signature_transcribed_versus_untranscribed_df[['cancer_type', 'signature', 'mutation_type', 'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value', 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list', 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list', 'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']] type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type', 'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value', 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list', 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list', 'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']] signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED) signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename) signature_transcribed_versus_untranscribed_df.to_csv(signature_filepath, sep='\t', header=True, index=False) type_filename = 'Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED) type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename) type_transcribed_versus_untranscribed_df.to_csv(type_filepath, sep='\t', header=True, index=False) if GENIC_VERSUS_INTERGENIC in strand_bias_list: signature_genic_versus_intergenic_df=signature_genic_versus_intergenic_df[['cancer_type', 'signature', 'mutation_type', 'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value', 'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list', 'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list', 'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]] type_genic_versus_intergenic_df=type_genic_versus_intergenic_df[['cancer_type', 'type', 'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value', 'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list', 'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list', 'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]] signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC) signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename) signature_genic_versus_intergenic_df.to_csv(signature_filepath, sep='\t', header=True, index=False) type_filename = 'Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC) type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename) type_genic_versus_intergenic_df.to_csv(type_filepath, sep='\t', header=True, index=False) ####################################################################### ####################################################################### # Step3 Filter q-values, Decide significant strand and set 10,20,30,50,75, 100 percent # Add Significant Strand # Set significant strands # Set percentages # Write Filtered Q Values dataframes with percentages ################################################################################################################################## if LAGGING_VERSUS_LEADING in strand_bias_list: signature_lagging_versus_leading_filtered_q_value_df = signature_lagging_versus_leading_df[signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy() type_lagging_versus_leading_filtered_q_value_df= type_lagging_versus_leading_df[type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy() signature_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None type_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None for percentage_string in percentage_strings: signature_lagging_versus_leading_filtered_q_value_df[percentage_string] = None type_lagging_versus_leading_filtered_q_value_df[percentage_string] = None signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND] = LAGGING signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]), SIGNIFICANT_STRAND] = LEADING type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND]=LAGGING type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]),SIGNIFICANT_STRAND]=LEADING for percentage_index, percentage_number in enumerate(percentage_numbers, 0): percentage_string = percentage_strings[percentage_index] # Set percentages for signature mutation_type signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1 signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1 # Set percentages for type type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1 type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1 signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING) signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename) signature_lagging_versus_leading_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True,index=False) type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING) type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename) type_lagging_versus_leading_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True, index=False) ################################################################################################################################## ################################################################################################################################## if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: signature_transcribed_versus_untranscribed_filtered_q_value_df = signature_transcribed_versus_untranscribed_df[signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy() type_transcribed_versus_untranscribed_filtered_q_value_df= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy() signature_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND] = None type_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND]=None for percentage_string in percentage_strings: signature_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string]=None type_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string] = None signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND for percentage_index, percentage_number in enumerate(percentage_numbers,0): percentage_string=percentage_strings[percentage_index] # Set percentages for signature mutation_type signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 # Set percentages for type type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED) signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename) signature_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True, index=False) type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED) type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename) type_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True,index=False) ################################################################################################################################## ################################################################################################################################## if GENIC_VERSUS_INTERGENIC in strand_bias_list: signature_genic_versus_intergenic_filtered_q_value_df = signature_genic_versus_intergenic_df[signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy() type_genic_versus_intergenic_filtered_q_value_df= type_genic_versus_intergenic_df[type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy() signature_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None type_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None for percentage_string in percentage_strings: signature_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None type_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]),SIGNIFICANT_STRAND] = INTERGENIC type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = INTERGENIC # Set percentages for percentage_index, percentage_number in enumerate(percentage_numbers,0): percentage_string=percentage_strings[percentage_index] # Set percentages for signature mutation_type signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 # Set percentages for type type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1 signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC) signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename) signature_genic_versus_intergenic_filtered_q_value_df.to_csv(signature_filepath, sep='\t', header=True,index=False) type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC) type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename) type_genic_versus_intergenic_filtered_q_value_df.to_csv(type_filepath, sep='\t', header=True, index=False) ################################################################################################################################## ####################################################################### # Write Excel Files sheet_list = ['corrected_p_value', 'percentages'] for strand1_versus_strand2 in strand_bias_list: if strand1_versus_strand2==LAGGING_VERSUS_LEADING: signatures_df_list=[signature_lagging_versus_leading_df,signature_lagging_versus_leading_filtered_q_value_df] types_df_list = [type_lagging_versus_leading_df, type_lagging_versus_leading_filtered_q_value_df] elif strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED: signatures_df_list = [signature_transcribed_versus_untranscribed_df,signature_transcribed_versus_untranscribed_filtered_q_value_df] types_df_list = [type_transcribed_versus_untranscribed_df, type_transcribed_versus_untranscribed_filtered_q_value_df] elif strand1_versus_strand2==GENIC_VERSUS_INTERGENIC: signatures_df_list = [signature_genic_versus_intergenic_df,signature_genic_versus_intergenic_filtered_q_value_df] types_df_list = [type_genic_versus_intergenic_df, type_genic_versus_intergenic_filtered_q_value_df] signatures_filename="Signatures_Mutation_Types_%s.xlsx" %(strand1_versus_strand2) file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, signatures_filename) write_excel_file(signatures_df_list, sheet_list, file_name_with_path) types_filename="Types_%s.xlsx" %(strand1_versus_strand2) file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, types_filename) write_excel_file(types_df_list, sheet_list, file_name_with_path) ####################################################################### ####################################################################### #Circle plots starts ####################################################################### #Step4 Fill this dictionary signature2mutation_type2strand2percentagedict={} df_list=[] if LAGGING_VERSUS_LEADING in strand_bias_list: df_list.append(signature_lagging_versus_leading_filtered_q_value_df) if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: df_list.append(signature_transcribed_versus_untranscribed_filtered_q_value_df) if GENIC_VERSUS_INTERGENIC in strand_bias_list: df_list.append(signature_genic_versus_intergenic_filtered_q_value_df) for df in df_list: for index, row in df.iterrows(): cancer_type = row[CANCER_TYPE] signature = row[SIGNATURE] mutation_type = row[MUTATION_TYPE] significant_strand=row[SIGNIFICANT_STRAND] percent_10 = row[AT_LEAST_10_PERCENT_DIFF] percent_20 = row[AT_LEAST_20_PERCENT_DIFF] percent_30 = row[AT_LEAST_30_PERCENT_DIFF] percent_50 = row[AT_LEAST_50_PERCENT_DIFF] percent_75 = row[AT_LEAST_75_PERCENT_DIFF] percent_100 = row[AT_LEAST_100_PERCENT_DIFF] if signature in signature2mutation_type2strand2percentagedict: if mutation_type in signature2mutation_type2strand2percentagedict[signature]: if significant_strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]: if (percent_10 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1 if (percent_20 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1 if (percent_30 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1 if (percent_50 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1 if (percent_75 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1 if (percent_100 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1 else: signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand]={} signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0 if (percent_10 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1 if (percent_20 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1 if (percent_30 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1 if (percent_50 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1 if (percent_75 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1 if (percent_100 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1 else: signature2mutation_type2strand2percentagedict[signature][mutation_type] = {} signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {} signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0 if (percent_10 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1 if (percent_20 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1 if (percent_30 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1 if (percent_50 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1 if (percent_75 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1 if (percent_100 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1 else: signature2mutation_type2strand2percentagedict[signature] = {} signature2mutation_type2strand2percentagedict[signature][mutation_type] = {} signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {} signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0 signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0 if (percent_10 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1 if (percent_20 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1 if (percent_30 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1 if (percent_50 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1 if (percent_75 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1 if (percent_100 == 1): signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1 ####################################################################### ####################################################################### # Step4 Fill this dictionary type2strand2percentagedict={} df_list=[] if LAGGING_VERSUS_LEADING in strand_bias_list: df_list.append(type_lagging_versus_leading_filtered_q_value_df) if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: df_list.append(type_transcribed_versus_untranscribed_filtered_q_value_df) if GENIC_VERSUS_INTERGENIC in strand_bias_list: df_list.append(type_genic_versus_intergenic_filtered_q_value_df) for df in df_list: for index, row in df.iterrows(): cancer_type = row[CANCER_TYPE] my_type = row[TYPE] significant_strand=row[SIGNIFICANT_STRAND] percent_10 = row[AT_LEAST_10_PERCENT_DIFF] percent_20 = row[AT_LEAST_20_PERCENT_DIFF] percent_30 = row[AT_LEAST_30_PERCENT_DIFF] percent_50 = row[AT_LEAST_50_PERCENT_DIFF] percent_75 = row[AT_LEAST_75_PERCENT_DIFF] percent_100 = row[AT_LEAST_100_PERCENT_DIFF] if my_type in type2strand2percentagedict: if significant_strand in type2strand2percentagedict[my_type]: if (percent_10 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1 if (percent_20 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1 if (percent_30 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1 if (percent_50 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1 if (percent_75 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1 if (percent_100 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1 else: type2strand2percentagedict[my_type][significant_strand]={} type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0 if (percent_10 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1 if (percent_20 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1 if (percent_30 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1 if (percent_50 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1 if (percent_75 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1 if (percent_100 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1 else: type2strand2percentagedict[my_type] = {} type2strand2percentagedict[my_type][significant_strand] = {} type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0 type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0 if (percent_10 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1 if (percent_20 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1 if (percent_30 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1 if (percent_50 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1 if (percent_75 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1 if (percent_100 == 1): type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1 ####################################################################### ####################################################################### # Step5 Plot figures plot_legend(strandbias_figures_outputDir) for strand_bias in strand_bias_list: if np.any(subsSignatures): plot_six_mutations_sbs_signatures_circle_figures(subsSignatures, strand_bias, strandbias_figures_outputDir, SIGNIFICANCE_LEVEL, signature2mutation_type2strand2percentagedict, percentage_strings) if np.any(dinucsSignatures): plot_dbs_and_id_signatures_circle_figures(DBS, dinucsSignatures, strand_bias, strandbias_figures_outputDir, SIGNIFICANCE_LEVEL, type2strand2percentagedict, percentage_strings) if np.any(indelsSignatures): plot_dbs_and_id_signatures_circle_figures(ID, indelsSignatures, strand_bias, strandbias_figures_outputDir, SIGNIFICANCE_LEVEL, type2strand2percentagedict, percentage_strings) # Circle plots ends ####################################################################### ######################################################################## ########################## Part 2 starts ############################## ############## Mutation Types Scatter Plots starts ##################### ############## Signatures Scatter Plots starts ######################### ######################################################################## if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list): if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty)): plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(None,None, type_transcribed_versus_untranscribed_df, type_lagging_versus_leading_df, outputDir, jobname) if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not sbs_df.empty)): plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('subs', None, None, type_transcribed_versus_untranscribed_df, type_lagging_versus_leading_df, sbs_df, outputDir, jobname) if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not dbs_df.empty)): plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('dinucs', None, None, type_transcribed_versus_untranscribed_df, type_lagging_versus_leading_df, dbs_df, outputDir, jobname) if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not id_df.empty)): plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('indels', None, None, type_transcribed_versus_untranscribed_df, type_lagging_versus_leading_df, id_df, outputDir, jobname) ######################################################################## ############## Mutation Types Scatter Plots ends ####################### ############## Signatures Scatter Plots ends ########################### ########################## Part 2 ends ################################ ######################################################################## ######################################################################## ########################## Part 4 starts ############################## ######## Bar plot starts includes sample based bar plots ############### ######################################################################## isKeySample = False width = 0.20 ####################################################### ################# Plot types starts ################### ####################################################### types_list= [('All Mutations', 'mutationtypes', six_mutation_types), ('All Signatures', 'subs_signatures', subsSignatures), ('All Signatures', 'indels_signatures', indelsSignatures), ('All Signatures', 'dinucs_signatures', dinucsSignatures)] for mutationsOrSignatures, sub_figure_name, x_axis_labels in types_list: x_axis_labels = sorted(x_axis_labels, key=natural_key) N = len(x_axis_labels) for strand_bias in strand_bias_list: if (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED): type_strand1_versus_strand2_df = type_transcribed_versus_untranscribed_df strand1 = transcriptionStrands[0] strand2 = transcriptionStrands[1] strand1_real_count_column_name = 'Transcribed_real_count' strand2_real_count_column_name = 'UnTranscribed_real_count' strand1_sims_mean_count_column_name = 'Transcribed_mean_sims_count' strand2_sims_mean_count_column_name = 'UnTranscribed_mean_sims_count' q_value_column_name = 'transcribed_versus_untranscribed_q_value' color1 = 'royalblue' color2 = 'yellowgreen' figureName = '%s_transcription_strand_bias' %(sub_figure_name) elif (strand_bias == GENIC_VERSUS_INTERGENIC): type_strand1_versus_strand2_df = type_genic_versus_intergenic_df strand1 = genicVersusIntergenicStrands[0] strand2 = genicVersusIntergenicStrands[1] strand1_real_count_column_name = 'genic_real_count' strand2_real_count_column_name = 'intergenic_real_count' strand1_sims_mean_count_column_name = 'genic_mean_sims_count' strand2_sims_mean_count_column_name = 'intergenic_mean_sims_count' q_value_column_name = 'genic_versus_intergenic_q_value' color1 = 'cyan' color2 = 'gray' figureName = '%s_genic_versus_intergenic_strand_bias' %(sub_figure_name) elif (strand_bias == LAGGING_VERSUS_LEADING): type_strand1_versus_strand2_df = type_lagging_versus_leading_df strand1 = replicationStrands[0] strand2 = replicationStrands[1] strand1_real_count_column_name = 'Lagging_real_count' strand2_real_count_column_name = 'Leading_real_count' strand1_sims_mean_count_column_name = 'Lagging_mean_sims_count' strand2_sims_mean_count_column_name = 'Leading_mean_sims_count' q_value_column_name = 'lagging_versus_leading_q_value' color1 = 'indianred' color2 = 'goldenrod' figureName = '%s_replication_strand_bias' %(sub_figure_name) types_strand1_real_count_list = [] types_strand2_real_count_list = [] types_strand1_sims_mean_count_list = [] types_strand2_sims_mean_count_list = [] types_strand1_versus_strand2_FDR_BH_adjusted_pvalues = [] for my_type in x_axis_labels: strand1_real_count = 0 strand2_real_count = 0 strand1_sims_mean_count = 0 strand2_sims_mean_count = 0 q_value = None if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values.size>0: strand1_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values[0] if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values.size>0: strand2_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values[0] if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values.size>0: strand1_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values[0] if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values.size>0: strand2_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values[0] if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values.size>0: q_value= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values[0] types_strand1_real_count_list.append(strand1_real_count) types_strand2_real_count_list.append(strand2_real_count) types_strand1_sims_mean_count_list.append(strand1_sims_mean_count) types_strand2_sims_mean_count_list.append(strand2_sims_mean_count) types_strand1_versus_strand2_FDR_BH_adjusted_pvalues.append(q_value) if ((len(x_axis_labels) > 0) and types_strand1_real_count_list and types_strand2_real_count_list and types_strand1_sims_mean_count_list and types_strand2_sims_mean_count_list and (len(types_strand1_versus_strand2_FDR_BH_adjusted_pvalues)>0)): if (types_strand1_real_count_list and types_strand2_real_count_list): plotStrandBiasFigureWithBarPlots(outputDir, jobname, numberofSimulations, None, isKeySample, None, N, x_axis_labels, types_strand1_real_count_list, types_strand2_real_count_list, types_strand1_sims_mean_count_list, types_strand2_sims_mean_count_list, types_strand1_versus_strand2_FDR_BH_adjusted_pvalues, strand1,strand2, mutationsOrSignatures, color1, color2, figureName, width, plot_mode) ####################################################### ################# Plot types ends ##################### ####################################################### ################################################################# ########### Plot sub signatures mutation types starts ########### ################################################################# if not sbs_df.empty: if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list: plotBarPlotsUsingDataframes(outputDir, jobname, numberofSimulations, sbs_df, isKeySample, six_mutation_types, signature_transcribed_versus_untranscribed_df, width, TRANSCRIBED_VERSUS_UNTRANSCRIBED, transcriptionStrands, 'royalblue', 'yellowgreen', 'All Mutations', 'mutationtypes_transcription_strand_bias', plot_mode) if GENIC_VERSUS_INTERGENIC in strand_bias_list: plotBarPlotsUsingDataframes(outputDir, jobname, numberofSimulations, sbs_df, isKeySample, six_mutation_types, signature_genic_versus_intergenic_df, width, GENIC_VERSUS_INTERGENIC, genicVersusIntergenicStrands, 'cyan', 'gray', 'All Mutations', 'mutationtypes_genic_versus_intergenic_strand_bias', plot_mode) if LAGGING_VERSUS_LEADING in strand_bias_list: plotBarPlotsUsingDataframes(outputDir, jobname, numberofSimulations, sbs_df, isKeySample, six_mutation_types, signature_lagging_versus_leading_df, width, LAGGING_VERSUS_LEADING, replicationStrands, 'indianred', 'goldenrod', 'All Mutations', 'mutationtypes_replication_strand_bias', plot_mode) ################################################################# ########### Plot sub signatures mutation types ends ############# ################################################################# ######################################################################## ######## Bar plot starts includes sample based bar plots ############### ########################## Part 4 ends ################################ ######################################################################## # Circle Bar Plots # Plot circle plots and bar plots all together # At top ax, circle plots with 3 rows: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading # At middle ax, 3 bar plots: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading # At below ax, 3 normalized bar plots: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list): sbs_signatures = sbs_df['signature'].unique() for sbs_signature in sbs_signatures: plot_circle_bar_plots_together(outputDir, jobname, sbs_signature, six_mutation_types, signature2mutation_type2strand2percentagedict, signature_genic_versus_intergenic_df, signature_transcribed_versus_untranscribed_df, signature_lagging_versus_leading_df, genicVersusIntergenicStrands, transcriptionStrands, replicationStrands) ################################################################### ############################################################################################################################ def plot_dbs_and_id_signatures_circle_figures(signature_type, signatures, strand_bias, strandbias_figures_outputDir, SIGNIFICANCE_LEVEL, type2strand2percentagedict, percentage_strings): rows_signatures=[] ##################################################################### if strand_bias==LAGGING_VERSUS_LEADING: strands=replicationStrands elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED: strands=transcriptionStrands elif strand_bias==GENIC_VERSUS_INTERGENIC: strands=genicVersusIntergenicStrands ##################################################################### ##################################################################### #Fill rows_DBS_signatures #Fill rows_ID_signatures for signature in signatures: if signature in type2strand2percentagedict: for strand in strands: if strand in type2strand2percentagedict[signature]: for percentage_string in percentage_strings: if percentage_string in type2strand2percentagedict[signature][strand]: print('signature:%s strand:%s percentage_string:%s' %(signature,strand,percentage_string)) if signature not in rows_signatures: rows_signatures.append(signature) ##################################################################### ##################################################################### rows_signatures=sorted(rows_signatures,key=natural_key,reverse=True) ##################################################################### if (len(rows_signatures)>0): ##################################################################### #New plot (width,height) fig, ax = plt.subplots(figsize=(5+1.5*len(percentage_strings), 10+1.5*len(rows_signatures))) #make aspect ratio square ax.set_aspect(1.0) ##################################################################### ###################################################################################################################################### for percentage_diff_index, percentage_string in enumerate(percentage_strings): for row_signature_index, row_signature in enumerate(rows_signatures): if (strand_bias==LAGGING_VERSUS_LEADING): if row_signature in type2strand2percentagedict: lagging_percentage=None leading_percentage=None if LAGGING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LAGGING][percentage_string]==1: lagging_percentage = 100 if LEADING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LEADING][percentage_string]==1: leading_percentage = 100 if (lagging_percentage is not None) and (leading_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string)) circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='indianred', fill=True) ax.add_artist(circle) elif (leading_percentage is not None) and (lagging_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string)) circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='goldenrod', fill=True) ax.add_artist(circle) elif (lagging_percentage is not None) and (leading_percentage is not None): radius_lagging = 0.49 radius_leading = 0.49 if (radius_lagging>radius_leading): #First lagging circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True) ax.add_artist(circle) #Second leading circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True) ax.add_artist(circle) else: #First leading circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True) ax.add_artist(circle) #Second lagging circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True) ax.add_artist(circle) elif (strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED): if row_signature in type2strand2percentagedict: transcribed_percentage=None untranscribed_percentage=None if TRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][TRANSCRIBED_STRAND][percentage_string]==1: transcribed_percentage = 100 if UNTRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][UNTRANSCRIBED_STRAND][percentage_string]==1: untranscribed_percentage = 100 if (transcribed_percentage is not None) and (untranscribed_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string)) circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='royalblue', fill=True) ax.add_artist(circle) elif (untranscribed_percentage is not None) and (transcribed_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string)) circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='yellowgreen', fill=True) ax.add_artist(circle) elif (transcribed_percentage is not None) and (untranscribed_percentage is not None): radius_transcribed = 0.49 radius_untranscribed = 0.49 if (radius_transcribed>radius_untranscribed): #First transcribed circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True) ax.add_artist(circle) #Second untranscribed circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True) ax.add_artist(circle) else: #First untranscribed circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True) ax.add_artist(circle) #Second transcribed circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True) ax.add_artist(circle) elif (strand_bias==GENIC_VERSUS_INTERGENIC): if row_signature in type2strand2percentagedict: genic_percentage=None intergenic_percentage=None if GENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][GENIC][percentage_string]==1: genic_percentage = 100 if INTERGENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][INTERGENIC][percentage_string]==1: intergenic_percentage = 100 if (genic_percentage is not None) and (intergenic_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string)) circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='cyan', fill=True) ax.add_artist(circle) elif (intergenic_percentage is not None) and (genic_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string)) circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='gray', fill=True) ax.add_artist(circle) elif (genic_percentage is not None) and (intergenic_percentage is not None): radius_genic = 0.49 radius_intergenic = 0.49 if (radius_genic>radius_intergenic): #First genic circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True) ax.add_artist(circle) #Second intergenic circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True) ax.add_artist(circle) else: #First untranscribed circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True) ax.add_artist(circle) #Second transcribed circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True) ax.add_artist(circle) ###################################################################################################################################### ################################################################################## # CODE GOES HERE TO CENTER X-AXIS LABELS... ax.set_xlim([0,len(percentage_strings)]) ax.set_xticklabels([]) ax.tick_params(axis='x', which='minor', length=0, labelsize=20) #major ticks ax.set_xticks(np.arange(0, len(percentage_strings), 1)) #minor ticks ax.set_xticks(np.arange(0, len(percentage_strings), 1)+0.5,minor=True) ax.set_xticklabels(percentage_strings,minor=True) #Jul 7, 2020 if strand_bias==LAGGING_VERSUS_LEADING: fig.suptitle('Lagging versus Leading Strand Bias', fontsize=30) elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED: fig.suptitle('Transcribed versus Untranscribed Strand Bias', fontsize=30) elif strand_bias==GENIC_VERSUS_INTERGENIC: fig.suptitle('Genic versus Intergenic Strand Bias', fontsize=30) ax.xaxis.set_ticks_position('top') plt.tick_params( axis='x', # changes apply to the x-axis which='major', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False) # labels along the bottom edge are off ################################################################################## ################################################################################## # CODE GOES HERE TO CENTER Y-AXIS LABELS... ax.set_ylim([0,len(rows_signatures)]) ax.set_yticklabels([]) ax.tick_params(axis='y', which='minor', length=0, labelsize=30) #major ticks ax.set_yticks(np.arange(0, len(rows_signatures), 1)) #minor ticks ax.set_yticks(np.arange(0, len(rows_signatures), 1)+0.5,minor=True) ax.set_yticklabels(rows_signatures, minor=True) # fontsize plt.tick_params( axis='y', # changes apply to the x-axis which='major', # both major and minor ticks are affected left=False) # labels along the bottom edge are off ################################################################################## ################################################################################## # Gridlines based on major ticks ax.grid(which='major', color='black') ################################################################################## ################################################################################## # create the directory if it does not exists filename = '%s_Signatures_%s_with_circles_%s.png' % (signature_type,strand_bias,str(SIGNIFICANCE_LEVEL).replace('.','_')) figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename) fig.savefig(figFile) fig.tight_layout() plt.cla() plt.close(fig) ################################################################################## ############################################################################################################################ ############################################################################################################################ #Plot Legend only def plot_legend(strandbias_figures_outputDir): strand_biases=[TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING] for strandbias in strand_biases: ################################################################################## fig = plt.figure(figsize=(4,1), dpi=300) ax = plt.gca() plt.axis('off') ################################################################################## ################################################################################## if strandbias==TRANSCRIBED_VERSUS_UNTRANSCRIBED: legend_elements = [ Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=20), Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=20)] elif strandbias == GENIC_VERSUS_INTERGENIC: legend_elements = [ Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=20), Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=20)] elif (strandbias==LAGGING_VERSUS_LEADING): legend_elements = [ Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=20), Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=20)] ax.legend(handles=legend_elements, bbox_to_anchor=(0, 0.5), loc='center left' ,fontsize = 20) ################################################################################## ################################################################################## # create the directory if it does not exists filename = 'Legend_%s.png' % (strandbias) figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename) fig.savefig(figFile) fig.tight_layout() plt.cla() plt.close(fig) ################################################################################## ############################################################################################################################ ############################################################################################################################ #Sep 19, 2020 def plot_six_mutations_sbs_signatures_circle_figures(sbs_signatures, strand_bias, strandbias_figures_outputDir, significance_level, signature2mutation_type2strand2percentagedict, percentage_strings): mutation_types=six_mutation_types ##################################################################### if strand_bias==LAGGING_VERSUS_LEADING: strands=replicationStrands elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED: strands=transcriptionStrands elif strand_bias==GENIC_VERSUS_INTERGENIC: strands=genicVersusIntergenicStrands ##################################################################### ##################################################################### rows_sbs_signatures=[] #Fill rows_sbs_signatures for signature in sbs_signatures: if signature in signature2mutation_type2strand2percentagedict: for mutation_type in signature2mutation_type2strand2percentagedict[signature]: for strand in strands: if strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]: for percentage_string in percentage_strings: if (percentage_string in signature2mutation_type2strand2percentagedict[signature][mutation_type][strand]) and (signature2mutation_type2strand2percentagedict[signature][mutation_type][strand][percentage_string]==1): if signature not in rows_sbs_signatures: rows_sbs_signatures.append(signature) ##################################################################### ##################################################################### rows_sbs_signatures=sorted(rows_sbs_signatures,key=natural_key,reverse=True) ##################################################################### ##################################################################### xticklabels_list = percentage_strings * len(mutation_types) ##################################################################### if (len(rows_sbs_signatures)>0): ##################################################################### plot1, panel1 = plt.subplots(figsize=(5+1.5*len(xticklabels_list), 10+1.5*len(rows_sbs_signatures))) # plot1, panel1 = plt.subplots(figsize=(5+1.4*len(xticklabels_list), 10+len(rows_sbs_signatures))) Title and mutation texts are not seen. plt.rc('axes', edgecolor='lightgray') # panel1 = plt.axes([0.04, 0.09, 0.95, 0.75]) #make aspect ratio square panel1.set_aspect(1.0) ##################################################################### ################################################################################## #set title if strand_bias==LAGGING_VERSUS_LEADING: title='Lagging versus Leading Strand Bias' elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED: title='Transcribed versus Untranscribed Strand Bias' elif strand_bias==GENIC_VERSUS_INTERGENIC: title='Genic versus Intergenic Strand Bias' panel1.text(len(percentage_strings)*3, len(rows_sbs_signatures)+2.5, title, horizontalalignment='center', fontsize=60, fontweight='bold', fontname='Arial') ################################################################################## ################################################################################## #Colors from SigProfilerPlotting tool to be consistent colors = [[3 / 256, 189 / 256, 239 / 256], [1 / 256, 1 / 256, 1 / 256], [228 / 256, 41 / 256, 38 / 256], [203 / 256, 202 / 256, 202 / 256], [162 / 256, 207 / 256, 99 / 256], [236 / 256, 199 / 256, 197 / 256]] #Put rectangles x = 0 for i in range(0, len(mutation_types), 1): panel1.text((x+(len(percentage_strings)/2)-0.75), len(rows_sbs_signatures)+1.5, mutation_types[i], fontsize=55, fontweight='bold', fontname='Arial') panel1.add_patch(plt.Rectangle((x+.0415, len(rows_sbs_signatures)+0.75), len(percentage_strings)-(2*.0415), .5, facecolor=colors[i], clip_on=False)) panel1.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(rows_sbs_signatures), facecolor=colors[i], zorder=0, alpha=0.25,edgecolor='grey')) x += len(percentage_strings) ################################################################################## ################################################################################## # CODE GOES HERE TO CENTER X-AXIS LABELS... panel1.set_xlim([0,len(mutation_types)*len(percentage_strings)]) panel1.set_xticklabels([]) panel1.tick_params(axis='x', which='minor', length=0, labelsize=35) #major ticks panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1)) #minor ticks panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1)+0.5,minor=True) panel1.set_xticklabels(xticklabels_list,minor=True) panel1.xaxis.set_label_position('top') panel1.xaxis.set_ticks_position('top') plt.tick_params( axis='x', # changes apply to the x-axis which='major', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False) # labels along the bottom edge are off ################################################################################## ################################################################################## # CODE GOES HERE TO CENTER Y-AXIS LABELS... panel1.set_ylim([0,len(rows_sbs_signatures)]) panel1.set_yticklabels([]) panel1.tick_params(axis='y', which='minor', length=0, labelsize=40) #major ticks panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1)) #minor ticks panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1)+0.5,minor=True) panel1.set_yticklabels(rows_sbs_signatures, minor=True) # fontsize plt.tick_params( axis='y', # changes apply to the x-axis which='major', # both major and minor ticks are affected left=False) # labels along the bottom edge are off ################################################################################## ################################################################################## # Gridlines based on major ticks panel1.grid(which='major', color='black', zorder=3) ################################################################################## ################################################################################## #Put the legend if strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED: legend_elements = [ Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=40), Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40)] elif strand_bias == GENIC_VERSUS_INTERGENIC: legend_elements = [ Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=40), Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=40)] elif (strand_bias==LAGGING_VERSUS_LEADING): legend_elements = [ Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40), Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)] panel1.legend(handles=legend_elements,ncol=len(legend_elements), bbox_to_anchor=(1, -0.1),loc='upper right', fontsize=40) ################################################################################## ###################################################################################################################################### for percentage_diff_index, percentage_string in enumerate(percentage_strings): for mutation_type_index, mutation_type in enumerate(mutation_types): for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures): if (strand_bias==LAGGING_VERSUS_LEADING): if row_sbs_signature in signature2mutation_type2strand2percentagedict: if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]: lagging_percentage = None leading_percentage = None if (LAGGING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LAGGING][percentage_string]==1): lagging_percentage = 100 if (LEADING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LEADING][percentage_string]==1): leading_percentage = 100 if (lagging_percentage is not None) and (leading_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string)) panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='indianred', fill=True)) elif (leading_percentage is not None) and (lagging_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string)) panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='goldenrod', fill=True)) elif (lagging_percentage is not None) and (leading_percentage is not None): radius_lagging = 0.49 radius_leading = 0.49 if (radius_lagging > radius_leading): # First lagging panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True)) # Second leading panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True)) else: # First leading panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True)) # Second lagging panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True)) elif (strand_bias == GENIC_VERSUS_INTERGENIC): if row_sbs_signature in signature2mutation_type2strand2percentagedict: if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]: genic_percentage = None intergenic_percentage = None if (GENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][GENIC][percentage_string]==1): genic_percentage = 100 if (INTERGENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][INTERGENIC][percentage_string]==1): intergenic_percentage = 100 if (genic_percentage is not None) and (intergenic_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='cyan',fill=True)) elif (intergenic_percentage is not None) and (genic_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='gray',fill=True)) elif (genic_percentage is not None) and (intergenic_percentage is not None): radius_genic = 0.49 radius_intergenic = 0.49 if (radius_genic > radius_intergenic): # First genic panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True)) # Second intergenic panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic,color='gray', fill=True)) else: # First intergenic panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic, color='gray', fill=True)) # Second genic panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True)) elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED): if row_sbs_signature in signature2mutation_type2strand2percentagedict: if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]: transcribed_percentage = None untranscribed_percentage = None if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][TRANSCRIBED_STRAND][percentage_string]==1): transcribed_percentage = 100 if (UNTRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][UNTRANSCRIBED_STRAND][percentage_string]==1): untranscribed_percentage = 100 if (transcribed_percentage is not None) and (untranscribed_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='royalblue',fill=True)) elif (untranscribed_percentage is not None) and (transcribed_percentage is None): radius = 0.49 if (radius > 0): # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string)) panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='yellowgreen',fill=True)) elif (transcribed_percentage is not None) and (untranscribed_percentage is not None): radius_transcribed = 0.49 radius_untranscribed = 0.49 if (radius_transcribed > radius_untranscribed): # First transcribed panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True)) # Second untranscribed panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True)) else: # First untranscribed panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True)) # Second transcribed panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True)) ###################################################################################################################################### ################################################################################## # create the directory if it does not exists filename = 'SBS_Signatures_%s_with_circle_plot_%s.png' % (strand_bias,str(significance_level).replace('.','_')) figFile = os.path.join(strandbias_figures_outputDir,CIRCLE_PLOTS, filename) plot1.savefig(figFile,bbox_inches='tight') plot1.tight_layout() plt.cla() plt.close(plot1) ################################################################################## ############################################################################################################################
64.015283
398
0.615946
import os import numpy as np import statsmodels.stats.multitest import Line2D from matplotlib import gridspec import pandas as pd from SigProfilerTopography.source.commons.TopographyCommons import natural_key from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_STRAND from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_STRAND from SigProfilerTopography.source.commons.TopographyCommons import LAGGING from SigProfilerTopography.source.commons.TopographyCommons import LEADING from SigProfilerTopography.source.commons.TopographyCommons import six_mutation_types from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS from SigProfilerTopography.source.commons.TopographyCommons import DATA from SigProfilerTopography.source.commons.TopographyCommons import FIGURE from SigProfilerTopography.source.commons.TopographyCommons import SCATTER_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import BAR_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_BAR_PLOTS from SigProfilerTopography.source.commons.TopographyCommons import SAMPLES from SigProfilerTopography.source.commons.TopographyCommons import TABLES from SigProfilerTopography.source.commons.TopographyCommons import SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_P_VALUE from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_P_VALUE from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_Q_VALUE from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_Q_VALUE from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import GENIC_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LEADING_REAL_COUNT from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import GENIC_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import LEADING_SIMULATIONS_MEAN_COUNT from SigProfilerTopography.source.commons.TopographyCommons import GENIC from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC from SigProfilerTopography.source.commons.TopographyCommons import percentage_numbers from SigProfilerTopography.source.commons.TopographyCommons import percentage_strings from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_10_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_20_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_30_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_50_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_75_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_100_PERCENT_DIFF from SigProfilerTopography.source.commons.TopographyCommons import ID from SigProfilerTopography.source.commons.TopographyCommons import DBS from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT from SigProfilerTopography.source.commons.TopographyCommons import EXCEL_FILES from SigProfilerTopography.source.commons.TopographyCommons import write_excel_file from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT SIGNATURE = 'signature' CANCER_TYPE = 'cancer_type' MUTATION_TYPE = 'mutation_type' TYPE = 'type' SIGNIFICANT_STRAND = 'significant_strand' SIGNIFICANCE_LEVEL = 0.05 from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofSubsDict from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofIndelsDict from SigProfilerTopography.source.commons.TopographyCommons import Sample2NumberofDinucsDictFilename from SigProfilerTopography.source.commons.TopographyCommons import getSample2SubsSignature2NumberofMutationsDict from SigProfilerTopography.source.commons.TopographyCommons import getSample2IndelsSignature2NumberofMutationsDict from SigProfilerTopography.source.commons.TopographyCommons import Sample2DinucsSignature2NumberofMutationsDictFilename transcriptionStrands = [TRANSCRIBED_STRAND, UNTRANSCRIBED_STRAND] genicVersusIntergenicStrands=[GENIC, INTERGENIC] replicationStrands = [LAGGING, LEADING]
true
true
1c4a0071eef9fbd124ada34ed39dfb7abd9d10cb
5,376
py
Python
models/face_parsing/modules/bn.py
soumik12345/Barbershop
971be31afca55499287e97a7034a59a66b871ba8
[ "MIT" ]
null
null
null
models/face_parsing/modules/bn.py
soumik12345/Barbershop
971be31afca55499287e97a7034a59a66b871ba8
[ "MIT" ]
2
2022-03-30T17:49:03.000Z
2022-03-30T19:20:28.000Z
models/face_parsing/modules/bn.py
soumik12345/Barbershop
971be31afca55499287e97a7034a59a66b871ba8
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import torch.nn.functional as functional try: from queue import Queue except ImportError: from Queue import Queue from .functions import * class ABN(nn.Module): """Activated Batch Normalization This gathers a `BatchNorm2d` and an activation function in a single module """ def __init__( self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01, ): """Creates an Activated Batch Normalization module Parameters ---------- num_features : int Number of feature channels in the input and output. eps : float Small constant to prevent numerical issues. momentum : float Momentum factor applied to compute running statistics as. affine : bool If `True` apply learned scale and shift transformation after normalization. activation : str Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. slope : float Negative slope for the `leaky_relu` activation. """ super(ABN, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.momentum = momentum self.activation = activation self.slope = slope if self.affine: self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) else: self.register_parameter("weight", None) self.register_parameter("bias", None) self.register_buffer("running_mean", torch.zeros(num_features)) self.register_buffer("running_var", torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.running_mean, 0) nn.init.constant_(self.running_var, 1) if self.affine: nn.init.constant_(self.weight, 1) nn.init.constant_(self.bias, 0) def forward(self, x): x = functional.batch_norm( x, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps, ) if self.activation == ACT_RELU: return functional.relu(x, inplace=True) elif self.activation == ACT_LEAKY_RELU: return functional.leaky_relu(x, negative_slope=self.slope, inplace=True) elif self.activation == ACT_ELU: return functional.elu(x, inplace=True) else: return x def __repr__(self): rep = ( "{name}({num_features}, eps={eps}, momentum={momentum}," " affine={affine}, activation={activation}" ) if self.activation == "leaky_relu": rep += ", slope={slope})" else: rep += ")" return rep.format(name=self.__class__.__name__, **self.__dict__) class InPlaceABN(ABN): """InPlace Activated Batch Normalization""" def __init__( self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01, ): """Creates an InPlace Activated Batch Normalization module Parameters ---------- num_features : int Number of feature channels in the input and output. eps : float Small constant to prevent numerical issues. momentum : float Momentum factor applied to compute running statistics as. affine : bool If `True` apply learned scale and shift transformation after normalization. activation : str Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. slope : float Negative slope for the `leaky_relu` activation. """ super(InPlaceABN, self).__init__( num_features, eps, momentum, affine, activation, slope ) def forward(self, x): return inplace_abn( x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope, ) class InPlaceABNSync(ABN): """InPlace Activated Batch Normalization with cross-GPU synchronization This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DistributedDataParallel`. """ def forward(self, x): return inplace_abn_sync( x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope, ) def __repr__(self): rep = ( "{name}({num_features}, eps={eps}, momentum={momentum}," " affine={affine}, activation={activation}" ) if self.activation == "leaky_relu": rep += ", slope={slope})" else: rep += ")" return rep.format(name=self.__class__.__name__, **self.__dict__)
30.03352
116
0.570499
import torch import torch.nn as nn import torch.nn.functional as functional try: from queue import Queue except ImportError: from Queue import Queue from .functions import * class ABN(nn.Module): def __init__( self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01, ): super(ABN, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.momentum = momentum self.activation = activation self.slope = slope if self.affine: self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) else: self.register_parameter("weight", None) self.register_parameter("bias", None) self.register_buffer("running_mean", torch.zeros(num_features)) self.register_buffer("running_var", torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.running_mean, 0) nn.init.constant_(self.running_var, 1) if self.affine: nn.init.constant_(self.weight, 1) nn.init.constant_(self.bias, 0) def forward(self, x): x = functional.batch_norm( x, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps, ) if self.activation == ACT_RELU: return functional.relu(x, inplace=True) elif self.activation == ACT_LEAKY_RELU: return functional.leaky_relu(x, negative_slope=self.slope, inplace=True) elif self.activation == ACT_ELU: return functional.elu(x, inplace=True) else: return x def __repr__(self): rep = ( "{name}({num_features}, eps={eps}, momentum={momentum}," " affine={affine}, activation={activation}" ) if self.activation == "leaky_relu": rep += ", slope={slope})" else: rep += ")" return rep.format(name=self.__class__.__name__, **self.__dict__) class InPlaceABN(ABN): def __init__( self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01, ): super(InPlaceABN, self).__init__( num_features, eps, momentum, affine, activation, slope ) def forward(self, x): return inplace_abn( x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope, ) class InPlaceABNSync(ABN): def forward(self, x): return inplace_abn_sync( x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.activation, self.slope, ) def __repr__(self): rep = ( "{name}({num_features}, eps={eps}, momentum={momentum}," " affine={affine}, activation={activation}" ) if self.activation == "leaky_relu": rep += ", slope={slope})" else: rep += ")" return rep.format(name=self.__class__.__name__, **self.__dict__)
true
true
1c4a00a6ef4dd2fe7cdd0c9ca505b8fd1ccc0977
14,574
py
Python
QEBATangentAttack/utils.py
machanic/TangentAttack
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
[ "Apache-2.0" ]
4
2021-11-12T04:06:32.000Z
2022-01-27T09:01:41.000Z
QEBATangentAttack/utils.py
machanic/TangentAttack
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
[ "Apache-2.0" ]
1
2022-02-22T14:00:59.000Z
2022-02-25T08:57:29.000Z
QEBATangentAttack/utils.py
machanic/TangentAttack
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
[ "Apache-2.0" ]
null
null
null
""" Provides classes to measure the distance between inputs. Distances --------- .. autosummary:: :nosignatures: MeanSquaredDistance MeanAbsoluteDistance Linfinity L0 Aliases ------- .. autosummary:: :nosignatures: MSE MAE Linf Base class ---------- To implement a new distance, simply subclass the :class:`Distance` class and implement the :meth:`_calculate` method. .. autosummary:: :nosignatures: Distance """ from __future__ import division import sys import abc import torch abstractmethod = abc.abstractmethod if sys.version_info >= (3, 4): ABC = abc.ABC else: # pragma: no cover ABC = abc.ABCMeta('ABC', (), {}) import functools from numbers import Number from torch.nn import functional as F import numpy as np @functools.total_ordering class Distance(ABC): """Base class for distances. This class should be subclassed when implementing new distances. Subclasses must implement _calculate. """ def __init__( self, reference=None, other=None, bounds=None, value=None): if value is not None: # alternative constructor assert isinstance(value, Number) assert reference is None assert other is None assert bounds is None self.reference = None self.other = None self._bounds = None self._value = value self._gradient = None else: # standard constructor self.reference = reference self.other = other self._bounds = bounds self._value, self._gradient = self._calculate() assert self._value is not None @property def value(self): return self._value @property def gradient(self): return self._gradient @abstractmethod def _calculate(self): """Returns distance and gradient of distance w.r.t. to self.other""" raise NotImplementedError def name(self): return self.__class__.__name__ def __str__(self): return '{} = {:.6e}'.format(self.name(), self._value) def __repr__(self): return self.__str__() def __eq__(self, other): if other.__class__ != self.__class__: raise TypeError('Comparisons are only possible between the same distance types.') return self.value == other.value def __lt__(self, other): if other.__class__ != self.__class__: raise TypeError('Comparisons are only possible between the same distance types.') return self.value < other.value class MeanSquaredDistance(Distance): """Calculates the mean squared error between two inputs. """ def _calculate(self): min_, max_ = self._bounds n = self.reference.numel() f = n * (max_ - min_)**2 diff = self.other - self.reference value = torch.dot(diff.view(-1), diff.view(-1)).item() / f # calculate the gradient only when needed self._g_diff = diff self._g_f = f gradient = None return value, gradient @property def gradient(self): if self._gradient is None: self._gradient = self._g_diff / (self._g_f / 2) return self._gradient def __str__(self): return 'normalized MSE = {:.2e}'.format(self._value) MSE = MeanSquaredDistance class MeanAbsoluteDistance(Distance): """Calculates the mean absolute error between two inputs. """ def _calculate(self): min_, max_ = self._bounds diff = (self.other - self.reference) / (max_ - min_) value = torch.mean(torch.abs(diff)).type(torch.float64) n = self.reference.size gradient = 1 / n * torch.sign(diff) / (max_ - min_) return value, gradient def __str__(self): return 'normalized MAE = {:.2e}'.format(self._value) MAE = MeanAbsoluteDistance class Linfinity(Distance): """Calculates the L-infinity norm of the difference between two inputs. """ def _calculate(self): min_, max_ = self._bounds diff = (self.other - self.reference) / (max_ - min_) value = torch.max(torch.abs(diff)).type(torch.float64) gradient = None return value, gradient @property def gradient(self): raise NotImplementedError def __str__(self): return 'normalized Linf distance = {:.2e}'.format(self._value) Linf = Linfinity class L0(Distance): """Calculates the L0 norm of the difference between two inputs. """ def _calculate(self): diff = self.other - self.reference value = torch.sum(diff != 0) gradient = None return value, gradient @property def gradient(self): raise NotImplementedError def __str__(self): return 'L0 distance = {}'.format(self._value) """ Provides classes that define what is adversarial. Criteria -------- We provide criteria for untargeted and targeted adversarial attacks. .. autosummary:: :nosignatures: Misclassification TopKMisclassification OriginalClassProbability ConfidentMisclassification .. autosummary:: :nosignatures: TargetClass TargetClassProbability Examples -------- Untargeted criteria: >>> from foolbox.criteria import Misclassification >>> criterion1 = Misclassification() >>> from foolbox.criteria import TopKMisclassification >>> criterion2 = TopKMisclassification(k=5) Targeted criteria: >>> from foolbox.criteria import TargetClass >>> criterion3 = TargetClass(22) >>> from foolbox.criteria import TargetClassProbability >>> criterion4 = TargetClassProbability(22, p=0.99) Criteria can be combined to create a new criterion: >>> criterion5 = criterion2 & criterion3 """ class Criterion(ABC): """Base class for criteria that define what is adversarial. The :class:`Criterion` class represents a criterion used to determine if predictions for an image are adversarial given a reference label. It should be subclassed when implementing new criteria. Subclasses must implement is_adversarial. """ def name(self): """Returns a human readable name that uniquely identifies the criterion with its hyperparameters. Returns ------- str Human readable name that uniquely identifies the criterion with its hyperparameters. Notes ----- Defaults to the class name but subclasses can provide more descriptive names and must take hyperparameters into account. """ return self.__class__.__name__ @abstractmethod def is_adversarial(self, predictions, label): """Decides if predictions for an image are adversarial given a reference label. Parameters ---------- predictions : :class:`numpy.ndarray` A vector with the pre-softmax predictions for some image. label : int The label of the unperturbed reference image. Returns ------- bool True if an image with the given predictions is an adversarial example when the ground-truth class is given by label, False otherwise. """ raise NotImplementedError def __and__(self, other): return CombinedCriteria(self, other) class CombinedCriteria(Criterion): """Meta criterion that combines several criteria into a new one. Considers inputs as adversarial that are considered adversarial by all sub-criteria that are combined by this criterion. Instead of using this class directly, it is possible to combine criteria like this: criteria1 & criteria2 Parameters ---------- *criteria : variable length list of :class:`Criterion` instances List of sub-criteria that will be combined. Notes ----- This class uses lazy evaluation of the criteria in the order they are passed to the constructor. """ def __init__(self, *criteria): super(CombinedCriteria, self).__init__() self._criteria = criteria def name(self): """Concatenates the names of the given criteria in alphabetical order. If a sub-criterion is itself a combined criterion, its name is first split into the individual names and the names of the sub-sub criteria is used instead of the name of the sub-criterion. This is done recursively to ensure that the order and the hierarchy of the criteria does not influence the name. Returns ------- str The alphabetically sorted names of the sub-criteria concatenated using double underscores between them. """ names = (criterion.name() for criterion in self._criteria) return '__'.join(sorted(names)) def is_adversarial(self, predictions, label): for criterion in self._criteria: if not criterion.is_adversarial(predictions, label): # lazy evaluation return False return True class Misclassification(Criterion): """Defines adversarials as inputs for which the predicted class is not the original class. See Also -------- :class:`TopKMisclassification` Notes ----- Uses `numpy.argmax` to break ties. """ def name(self): return 'Top1Misclassification' def is_adversarial(self, predictions, label): top1 = torch.argmax(predictions).item() return top1 != label class ConfidentMisclassification(Criterion): """Defines adversarials as inputs for which the probability of any class other than the original is above a given threshold. Parameters ---------- p : float The threshold probability. If the probability of any class other than the original is at least p, the image is considered an adversarial. It must satisfy 0 <= p <= 1. """ def __init__(self, p): super(ConfidentMisclassification, self).__init__() assert 0 <= p <= 1 self.p = p def name(self): return '{}-{:.04f}'.format(self.__class__.__name__, self.p) def is_adversarial(self, predictions, label): top1 = torch.argmax(predictions) probabilities = F.softmax(predictions) return (torch.max(probabilities) >= self.p) and (top1 != label) class TopKMisclassification(Criterion): """Defines adversarials as inputs for which the original class is not one of the top k predicted classes. For k = 1, the :class:`Misclassification` class provides a more efficient implementation. Parameters ---------- k : int Number of top predictions to which the reference label is compared to. See Also -------- :class:`Misclassification` : Provides a more effcient implementation for k = 1. Notes ----- Uses `numpy.argsort` to break ties. """ def __init__(self, k): super(TopKMisclassification, self).__init__() self.k = k def name(self): return 'Top{}Misclassification'.format(self.k) def is_adversarial(self, predictions, label): topk = torch.argsort(predictions)[-self.k:] return label not in topk class TargetClass(Criterion): """Defines adversarials as inputs for which the predicted class is the given target class. Parameters ---------- target_class : int The target class that needs to be predicted for an image to be considered an adversarial. Notes ----- Uses `numpy.argmax` to break ties. """ def __init__(self, target_class=None): super(TargetClass, self).__init__() self._target_class = target_class def target_class(self): return self._target_class def name(self): return '{}-{}'.format(self.__class__.__name__, self.target_class()) def is_adversarial(self, predictions, label=None): top1 = torch.argmax(predictions,dim=-1).item() return top1 == self.target_class() # target class 其实是true label class OriginalClassProbability(Criterion): """Defines adversarials as inputs for which the probability of the original class is below a given threshold. This criterion alone does not guarantee that the class predicted for the adversarial image is not the original class (unless p < 1 / number of classes). Therefore, it should usually be combined with a classifcation criterion. Parameters ---------- p : float The threshold probability. If the probability of the original class is below this threshold, the image is considered an adversarial. It must satisfy 0 <= p <= 1. """ def __init__(self, p): super(OriginalClassProbability, self).__init__() assert 0 <= p <= 1 self.p = p def name(self): return '{}-{:.04f}'.format(self.__class__.__name__, self.p) def is_adversarial(self, predictions, label): probabilities = F.softmax(predictions) return probabilities[label] < self.p class TargetClassProbability(Criterion): """Defines adversarials as inputs for which the probability of a given target class is above a given threshold. If the threshold is below 0.5, this criterion does not guarantee that the class predicted for the adversarial image is not the original class. In that case, it should usually be combined with a classification criterion. Parameters ---------- target_class : int The target class for which the predicted probability must be above the threshold probability p, otherwise the image is not considered an adversarial. p : float The threshold probability. If the probability of the target class is above this threshold, the image is considered an adversarial. It must satisfy 0 <= p <= 1. """ def __init__(self, target_class, p): super(TargetClassProbability, self).__init__() self._target_class = target_class assert 0 <= p <= 1 self.p = p def target_class(self): return self._target_class def name(self): return '{}-{}-{:.04f}'.format( self.__class__.__name__, self.target_class(), self.p) def is_adversarial(self, predictions, label): probabilities = softmax(predictions) return probabilities[self.target_class()] > self.p
25.97861
93
0.645327
from __future__ import division import sys import abc import torch abstractmethod = abc.abstractmethod if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta('ABC', (), {}) import functools from numbers import Number from torch.nn import functional as F import numpy as np @functools.total_ordering class Distance(ABC): def __init__( self, reference=None, other=None, bounds=None, value=None): if value is not None: assert isinstance(value, Number) assert reference is None assert other is None assert bounds is None self.reference = None self.other = None self._bounds = None self._value = value self._gradient = None else: self.reference = reference self.other = other self._bounds = bounds self._value, self._gradient = self._calculate() assert self._value is not None @property def value(self): return self._value @property def gradient(self): return self._gradient @abstractmethod def _calculate(self): raise NotImplementedError def name(self): return self.__class__.__name__ def __str__(self): return '{} = {:.6e}'.format(self.name(), self._value) def __repr__(self): return self.__str__() def __eq__(self, other): if other.__class__ != self.__class__: raise TypeError('Comparisons are only possible between the same distance types.') return self.value == other.value def __lt__(self, other): if other.__class__ != self.__class__: raise TypeError('Comparisons are only possible between the same distance types.') return self.value < other.value class MeanSquaredDistance(Distance): def _calculate(self): min_, max_ = self._bounds n = self.reference.numel() f = n * (max_ - min_)**2 diff = self.other - self.reference value = torch.dot(diff.view(-1), diff.view(-1)).item() / f self._g_diff = diff self._g_f = f gradient = None return value, gradient @property def gradient(self): if self._gradient is None: self._gradient = self._g_diff / (self._g_f / 2) return self._gradient def __str__(self): return 'normalized MSE = {:.2e}'.format(self._value) MSE = MeanSquaredDistance class MeanAbsoluteDistance(Distance): def _calculate(self): min_, max_ = self._bounds diff = (self.other - self.reference) / (max_ - min_) value = torch.mean(torch.abs(diff)).type(torch.float64) n = self.reference.size gradient = 1 / n * torch.sign(diff) / (max_ - min_) return value, gradient def __str__(self): return 'normalized MAE = {:.2e}'.format(self._value) MAE = MeanAbsoluteDistance class Linfinity(Distance): def _calculate(self): min_, max_ = self._bounds diff = (self.other - self.reference) / (max_ - min_) value = torch.max(torch.abs(diff)).type(torch.float64) gradient = None return value, gradient @property def gradient(self): raise NotImplementedError def __str__(self): return 'normalized Linf distance = {:.2e}'.format(self._value) Linf = Linfinity class L0(Distance): def _calculate(self): diff = self.other - self.reference value = torch.sum(diff != 0) gradient = None return value, gradient @property def gradient(self): raise NotImplementedError def __str__(self): return 'L0 distance = {}'.format(self._value) class Criterion(ABC): def name(self): return self.__class__.__name__ @abstractmethod def is_adversarial(self, predictions, label): raise NotImplementedError def __and__(self, other): return CombinedCriteria(self, other) class CombinedCriteria(Criterion): def __init__(self, *criteria): super(CombinedCriteria, self).__init__() self._criteria = criteria def name(self): names = (criterion.name() for criterion in self._criteria) return '__'.join(sorted(names)) def is_adversarial(self, predictions, label): for criterion in self._criteria: if not criterion.is_adversarial(predictions, label): return False return True class Misclassification(Criterion): def name(self): return 'Top1Misclassification' def is_adversarial(self, predictions, label): top1 = torch.argmax(predictions).item() return top1 != label class ConfidentMisclassification(Criterion): def __init__(self, p): super(ConfidentMisclassification, self).__init__() assert 0 <= p <= 1 self.p = p def name(self): return '{}-{:.04f}'.format(self.__class__.__name__, self.p) def is_adversarial(self, predictions, label): top1 = torch.argmax(predictions) probabilities = F.softmax(predictions) return (torch.max(probabilities) >= self.p) and (top1 != label) class TopKMisclassification(Criterion): def __init__(self, k): super(TopKMisclassification, self).__init__() self.k = k def name(self): return 'Top{}Misclassification'.format(self.k) def is_adversarial(self, predictions, label): topk = torch.argsort(predictions)[-self.k:] return label not in topk class TargetClass(Criterion): def __init__(self, target_class=None): super(TargetClass, self).__init__() self._target_class = target_class def target_class(self): return self._target_class def name(self): return '{}-{}'.format(self.__class__.__name__, self.target_class()) def is_adversarial(self, predictions, label=None): top1 = torch.argmax(predictions,dim=-1).item() return top1 == self.target_class() class OriginalClassProbability(Criterion): def __init__(self, p): super(OriginalClassProbability, self).__init__() assert 0 <= p <= 1 self.p = p def name(self): return '{}-{:.04f}'.format(self.__class__.__name__, self.p) def is_adversarial(self, predictions, label): probabilities = F.softmax(predictions) return probabilities[label] < self.p class TargetClassProbability(Criterion): def __init__(self, target_class, p): super(TargetClassProbability, self).__init__() self._target_class = target_class assert 0 <= p <= 1 self.p = p def target_class(self): return self._target_class def name(self): return '{}-{}-{:.04f}'.format( self.__class__.__name__, self.target_class(), self.p) def is_adversarial(self, predictions, label): probabilities = softmax(predictions) return probabilities[self.target_class()] > self.p
true
true
1c4a01f57bcc1a7f20369f01c8316e7174a4aa93
2,789
py
Python
src/data_upload/batch.py
yourtrading-ai/py_yourtrading_ai
b69424f2afc40fe258c7ddae2fb47acc383ecbe5
[ "MIT" ]
null
null
null
src/data_upload/batch.py
yourtrading-ai/py_yourtrading_ai
b69424f2afc40fe258c7ddae2fb47acc383ecbe5
[ "MIT" ]
null
null
null
src/data_upload/batch.py
yourtrading-ai/py_yourtrading_ai
b69424f2afc40fe258c7ddae2fb47acc383ecbe5
[ "MIT" ]
null
null
null
import asyncio import io import ssl import aiohttp import aleph_client.asynchronous import certifi import pandas as pd from data_upload.data_utils import clean_time_duplicates def get_download_url(symbol, interval="hourly"): if interval == "daily": interval = "d" elif interval == "hourly": interval = "1h" elif interval == "minutely": interval = "minute" return f"https://www.cryptodatadownload.com/cdd/Binance_{symbol}USDT_{interval}.csv" # Code for all async # responses = asyncio.get_event_loop().run_until_complete(post_all_to_aleph_async(currencies)) # hashes = [resp['item_hash'] for resp in responses] async def post_to_aleph_async(account, client, symbol, interval="hourly"): url = get_download_url(symbol, interval) sslcontext = ssl.create_default_context(cafile=certifi.where()) async with client.get(url, ssl=sslcontext) as response: with io.StringIO(await response.text()) as text_io: df = pd.read_csv(text_io, header=1) clean_time_duplicates(df) print(df.describe()) return await aleph_client.asynchronous.create_post(account=account, post_content=df.to_dict(), post_type="ohlcv_timeseries", channel="TEST-CRYPTODATADOWNLOAD") async def post_all_to_aleph_async(account, symbols: list, interval="hourly"): async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(limit_per_host=4)) as client: futures = [post_to_aleph_async(account, client, symbol, interval) for symbol in symbols] return await asyncio.gather(*futures) def post_to_aleph(account, url, amend_hash=None): df = pd.read_csv(url, header=1) print(df.describe()) post_type = 'ohlcv_timeseries' if amend_hash is None else 'amend' return aleph_client.create_post(account=account, post_content=df.describe().to_dict(), post_type=post_type, channel="TEST-CRYPTODATADOWNLOAD", ref=amend_hash) def post_all_to_aleph(account, symbols: list, amend_hashes=None, interval="hourly"): hashes = {} for symbol in symbols: url = get_download_url(symbol, interval) if amend_hashes: resp = post_to_aleph(account, url, amend_hashes[symbol]) print(f"Amended {symbol}: {amend_hashes[symbol]}") else: resp = post_to_aleph(account, url) print(f"Posted {symbol}: {resp['item_hash']}") hashes[symbol] = resp['item_hash'] return hashes
39.842857
113
0.627106
import asyncio import io import ssl import aiohttp import aleph_client.asynchronous import certifi import pandas as pd from data_upload.data_utils import clean_time_duplicates def get_download_url(symbol, interval="hourly"): if interval == "daily": interval = "d" elif interval == "hourly": interval = "1h" elif interval == "minutely": interval = "minute" return f"https://www.cryptodatadownload.com/cdd/Binance_{symbol}USDT_{interval}.csv" async def post_to_aleph_async(account, client, symbol, interval="hourly"): url = get_download_url(symbol, interval) sslcontext = ssl.create_default_context(cafile=certifi.where()) async with client.get(url, ssl=sslcontext) as response: with io.StringIO(await response.text()) as text_io: df = pd.read_csv(text_io, header=1) clean_time_duplicates(df) print(df.describe()) return await aleph_client.asynchronous.create_post(account=account, post_content=df.to_dict(), post_type="ohlcv_timeseries", channel="TEST-CRYPTODATADOWNLOAD") async def post_all_to_aleph_async(account, symbols: list, interval="hourly"): async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(limit_per_host=4)) as client: futures = [post_to_aleph_async(account, client, symbol, interval) for symbol in symbols] return await asyncio.gather(*futures) def post_to_aleph(account, url, amend_hash=None): df = pd.read_csv(url, header=1) print(df.describe()) post_type = 'ohlcv_timeseries' if amend_hash is None else 'amend' return aleph_client.create_post(account=account, post_content=df.describe().to_dict(), post_type=post_type, channel="TEST-CRYPTODATADOWNLOAD", ref=amend_hash) def post_all_to_aleph(account, symbols: list, amend_hashes=None, interval="hourly"): hashes = {} for symbol in symbols: url = get_download_url(symbol, interval) if amend_hashes: resp = post_to_aleph(account, url, amend_hashes[symbol]) print(f"Amended {symbol}: {amend_hashes[symbol]}") else: resp = post_to_aleph(account, url) print(f"Posted {symbol}: {resp['item_hash']}") hashes[symbol] = resp['item_hash'] return hashes
true
true
1c4a02071d1bd4dc5a2bf4caa5a4ce0f0c07ce3f
3,426
py
Python
ansible/venv/lib/python2.7/site-packages/ansible/module_utils/facts/virtual/sysctl.py
gvashchenkolineate/gvashchenkolineate_infra_trytravis
0fb18850afe0d8609693ba4b23f29c7cda17d97f
[ "MIT" ]
17
2017-06-07T23:15:01.000Z
2021-08-30T14:32:36.000Z
ansible/venv/lib/python2.7/site-packages/ansible/module_utils/facts/virtual/sysctl.py
gvashchenkolineate/gvashchenkolineate_infra_trytravis
0fb18850afe0d8609693ba4b23f29c7cda17d97f
[ "MIT" ]
9
2017-06-25T03:31:52.000Z
2021-05-17T23:43:12.000Z
ansible/venv/lib/python2.7/site-packages/ansible/module_utils/facts/virtual/sysctl.py
gvashchenkolineate/gvashchenkolineate_infra_trytravis
0fb18850afe0d8609693ba4b23f29c7cda17d97f
[ "MIT" ]
3
2018-05-26T21:31:22.000Z
2019-09-28T17:00:45.000Z
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re class VirtualSysctlDetectionMixin(object): def detect_sysctl(self): self.sysctl_path = self.module.get_bin_path('sysctl') def detect_virt_product(self, key): virtual_product_facts = {} self.detect_sysctl() # FIXME: exit early on falsey self.sysctl_path and unindent if self.sysctl_path: rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key)) if rc == 0: if re.match('(KVM|kvm|Bochs|SmartDC).*', out): virtual_product_facts['virtualization_type'] = 'kvm' virtual_product_facts['virtualization_role'] = 'guest' elif re.match('.*VMware.*', out): virtual_product_facts['virtualization_type'] = 'VMware' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'VirtualBox': virtual_product_facts['virtualization_type'] = 'virtualbox' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'HVM domU': virtual_product_facts['virtualization_type'] = 'xen' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'Parallels': virtual_product_facts['virtualization_type'] = 'parallels' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'RHEV Hypervisor': virtual_product_facts['virtualization_type'] = 'RHEV' virtual_product_facts['virtualization_role'] = 'guest' elif (key == 'security.jail.jailed') and (out.rstrip() == '1'): virtual_product_facts['virtualization_type'] = 'jails' virtual_product_facts['virtualization_role'] = 'guest' return virtual_product_facts def detect_virt_vendor(self, key): virtual_vendor_facts = {} self.detect_sysctl() # FIXME: exit early on falsey self.sysctl_path and unindent if self.sysctl_path: rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key)) if rc == 0: if out.rstrip() == 'QEMU': virtual_vendor_facts['virtualization_type'] = 'kvm' virtual_vendor_facts['virtualization_role'] = 'guest' if out.rstrip() == 'OpenBSD': virtual_vendor_facts['virtualization_type'] = 'vmm' virtual_vendor_facts['virtualization_role'] = 'guest' return virtual_vendor_facts
47.583333
88
0.620257
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re class VirtualSysctlDetectionMixin(object): def detect_sysctl(self): self.sysctl_path = self.module.get_bin_path('sysctl') def detect_virt_product(self, key): virtual_product_facts = {} self.detect_sysctl() if self.sysctl_path: rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key)) if rc == 0: if re.match('(KVM|kvm|Bochs|SmartDC).*', out): virtual_product_facts['virtualization_type'] = 'kvm' virtual_product_facts['virtualization_role'] = 'guest' elif re.match('.*VMware.*', out): virtual_product_facts['virtualization_type'] = 'VMware' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'VirtualBox': virtual_product_facts['virtualization_type'] = 'virtualbox' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'HVM domU': virtual_product_facts['virtualization_type'] = 'xen' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'Parallels': virtual_product_facts['virtualization_type'] = 'parallels' virtual_product_facts['virtualization_role'] = 'guest' elif out.rstrip() == 'RHEV Hypervisor': virtual_product_facts['virtualization_type'] = 'RHEV' virtual_product_facts['virtualization_role'] = 'guest' elif (key == 'security.jail.jailed') and (out.rstrip() == '1'): virtual_product_facts['virtualization_type'] = 'jails' virtual_product_facts['virtualization_role'] = 'guest' return virtual_product_facts def detect_virt_vendor(self, key): virtual_vendor_facts = {} self.detect_sysctl() if self.sysctl_path: rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key)) if rc == 0: if out.rstrip() == 'QEMU': virtual_vendor_facts['virtualization_type'] = 'kvm' virtual_vendor_facts['virtualization_role'] = 'guest' if out.rstrip() == 'OpenBSD': virtual_vendor_facts['virtualization_type'] = 'vmm' virtual_vendor_facts['virtualization_role'] = 'guest' return virtual_vendor_facts
true
true
1c4a02c8305edf5419beb0b9ec01a9f4757b6f61
728
py
Python
app/test/test4.py
saint816/fishbook
80a4b563a05086c85eb347286d28bb0e6258ff1c
[ "MIT" ]
null
null
null
app/test/test4.py
saint816/fishbook
80a4b563a05086c85eb347286d28bb0e6258ff1c
[ "MIT" ]
null
null
null
app/test/test4.py
saint816/fishbook
80a4b563a05086c85eb347286d28bb0e6258ff1c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ ------------------------------------------------- File Name: test4 Description : LocalStack线程隔离特性 Author : pengsheng date: 2019-04-21 ------------------------------------------------- """ import threading import time from werkzeug.local import LocalStack my_stack = LocalStack() my_stack.push(1) print('main after push: ' + str(my_stack.top)) def worker(): print('child thread before push: ' + str(my_stack.top)) my_stack.push(2) print('child thread after push: ' + str(my_stack.top)) child_thread = threading.Thread(target=worker, name='child_thread') child_thread.start() time.sleep(1) print('finally value at main thread: ' + str(my_stack.top))
23.483871
67
0.582418
import threading import time from werkzeug.local import LocalStack my_stack = LocalStack() my_stack.push(1) print('main after push: ' + str(my_stack.top)) def worker(): print('child thread before push: ' + str(my_stack.top)) my_stack.push(2) print('child thread after push: ' + str(my_stack.top)) child_thread = threading.Thread(target=worker, name='child_thread') child_thread.start() time.sleep(1) print('finally value at main thread: ' + str(my_stack.top))
true
true
1c4a03d70ff26f631a6d41a2c5e4ca7dcb12136c
3,778
py
Python
generate_cloth_img.py
otsubo/CIFAR-ConvolutionalAutoEncoder-Chainer
bbda81dc7b52f42e07e9daaff38ce7453b24e008
[ "MIT" ]
null
null
null
generate_cloth_img.py
otsubo/CIFAR-ConvolutionalAutoEncoder-Chainer
bbda81dc7b52f42e07e9daaff38ce7453b24e008
[ "MIT" ]
null
null
null
generate_cloth_img.py
otsubo/CIFAR-ConvolutionalAutoEncoder-Chainer
bbda81dc7b52f42e07e9daaff38ce7453b24e008
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Jul 21 08:51:18 2018 @author: user """ import argparse import os import os.path as osp import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm import chainer from chainer import cuda from chainer.datasets import get_cifar10 from chainer import dataset from chainer import Variable from chainer import serializers import chainer.functions as F import numpy as np from sklearn.model_selection import train_test_split from skimage.io import imread import network # Load data class LoadDataset(dataset.DatasetMixin): def __init__(self, split, return_image=False): assert split in ('train', 'val') ids = self._get_ids() iter_train, iter_val = train_test_split( ids, test_size=0.2, random_state=np.random.RandomState(1234)) self.ids = iter_train if split == 'train' else iter_val self._return_image = return_image def __len__(self): return len(self.ids) def _get_ids(self): ids = [] dataset_dir = chainer.dataset.get_dataset_directory( '2019_11_28_pr2') for data_id in os.listdir(dataset_dir): ids.append(osp.join(dataset_dir , data_id)) return ids def img_to_datum(self, img): img = img.copy() datum = img.astype(np.float32) datum = datum[:, :, ::-1] #RGB -> BGR datum = datum.transpose((2, 0, 1)) return datum def get_example(self, i): id = self.ids[i] image_file = osp.join(id , "image.png") img = imread(image_file) datum = self.img_to_datum(img) if self._return_image: return img else: return datum, datum def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', '-g', type=int, default=-1) parser.add_argument('--model', '-m', type=str, default="./results/cloth/model") parser.add_argument('--begin', '-b', type=int, default=0) args = parser.parse_args() # Set up a neural network to train. test = LoadDataset(split='val') model = network.CAE(3,3, return_out=True) if args.model != None: print( "loading model from " + args.model ) serializers.load_npz(args.model, model) # Show 64 images fig = plt.figure(figsize=(6,6)) plt.title("Original images: first rows,\n Predicted images: second rows") plt.axis('off') plt.tight_layout() pbar = tqdm(total=8) #import ipdb; ipdb.set_trace() for i in range(2): for j in range(2): ax = fig.add_subplot(4, 2, i*4+j+1, xticks=[], yticks=[]) x, t = test[i*2+j] xT = x.transpose(1, 2, 0) xT = xT.astype(np.uint8) ax.imshow(xT, cmap=plt.cm.bone, interpolation='nearest') x = np.expand_dims(x, 0) t = np.expand_dims(t, 0) if args.gpu >= 0: cuda.get_device_from_id(0).use() model.to_gpu() x = cuda.cupy.array(x) t = cuda.cupy.array(t) predicted, loss = model(Variable(x), Variable(t)) #print(predicted.shape) #print(loss) predicted = F.transpose(predicted[0], (1, 2, 0)) predicted = cuda.to_cpu(predicted.data) #Variable to numpy predicted = predicted * 255 predicted = predicted.astype(np.uint8) ax = fig.add_subplot(4, 2, i*4+j+3, xticks=[], yticks=[]) ax.imshow(predicted, cmap=plt.cm.bone, interpolation='nearest') pbar.update(1) pbar.close() plt.savefig("result.png") plt.show() plt.close() if __name__ == '__main__': main()
28.839695
83
0.588936
import argparse import os import os.path as osp import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm import chainer from chainer import cuda from chainer.datasets import get_cifar10 from chainer import dataset from chainer import Variable from chainer import serializers import chainer.functions as F import numpy as np from sklearn.model_selection import train_test_split from skimage.io import imread import network class LoadDataset(dataset.DatasetMixin): def __init__(self, split, return_image=False): assert split in ('train', 'val') ids = self._get_ids() iter_train, iter_val = train_test_split( ids, test_size=0.2, random_state=np.random.RandomState(1234)) self.ids = iter_train if split == 'train' else iter_val self._return_image = return_image def __len__(self): return len(self.ids) def _get_ids(self): ids = [] dataset_dir = chainer.dataset.get_dataset_directory( '2019_11_28_pr2') for data_id in os.listdir(dataset_dir): ids.append(osp.join(dataset_dir , data_id)) return ids def img_to_datum(self, img): img = img.copy() datum = img.astype(np.float32) datum = datum[:, :, ::-1] datum = datum.transpose((2, 0, 1)) return datum def get_example(self, i): id = self.ids[i] image_file = osp.join(id , "image.png") img = imread(image_file) datum = self.img_to_datum(img) if self._return_image: return img else: return datum, datum def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', '-g', type=int, default=-1) parser.add_argument('--model', '-m', type=str, default="./results/cloth/model") parser.add_argument('--begin', '-b', type=int, default=0) args = parser.parse_args() test = LoadDataset(split='val') model = network.CAE(3,3, return_out=True) if args.model != None: print( "loading model from " + args.model ) serializers.load_npz(args.model, model) fig = plt.figure(figsize=(6,6)) plt.title("Original images: first rows,\n Predicted images: second rows") plt.axis('off') plt.tight_layout() pbar = tqdm(total=8) for i in range(2): for j in range(2): ax = fig.add_subplot(4, 2, i*4+j+1, xticks=[], yticks=[]) x, t = test[i*2+j] xT = x.transpose(1, 2, 0) xT = xT.astype(np.uint8) ax.imshow(xT, cmap=plt.cm.bone, interpolation='nearest') x = np.expand_dims(x, 0) t = np.expand_dims(t, 0) if args.gpu >= 0: cuda.get_device_from_id(0).use() model.to_gpu() x = cuda.cupy.array(x) t = cuda.cupy.array(t) predicted, loss = model(Variable(x), Variable(t)) predicted = F.transpose(predicted[0], (1, 2, 0)) predicted = cuda.to_cpu(predicted.data) predicted = predicted * 255 predicted = predicted.astype(np.uint8) ax = fig.add_subplot(4, 2, i*4+j+3, xticks=[], yticks=[]) ax.imshow(predicted, cmap=plt.cm.bone, interpolation='nearest') pbar.update(1) pbar.close() plt.savefig("result.png") plt.show() plt.close() if __name__ == '__main__': main()
true
true
1c4a0522b17523bfaff0ea4d0aee5f56a95b355e
496
py
Python
bookmarks/models.py
justinborek/djorg
f6aa9cb23f0476c032ac5250045879962cc11072
[ "MIT" ]
null
null
null
bookmarks/models.py
justinborek/djorg
f6aa9cb23f0476c032ac5250045879962cc11072
[ "MIT" ]
null
null
null
bookmarks/models.py
justinborek/djorg
f6aa9cb23f0476c032ac5250045879962cc11072
[ "MIT" ]
null
null
null
from uuid import uuid4 from datetime import datetime from django.db import models class Bookmark(models.Model): id = models.UUIDField(primary_key=True, default=uuid4, editable=False) url = models.URLField('URL', unique=True) name = models.CharField(max_length=200) notes = models.TextField(blank=True) created_at = models.DateTimeField(default=datetime.now, blank=True) last_modified = models.DateTimeField(auto_now=True) def __str__(self): return self.name
33.066667
74
0.743952
from uuid import uuid4 from datetime import datetime from django.db import models class Bookmark(models.Model): id = models.UUIDField(primary_key=True, default=uuid4, editable=False) url = models.URLField('URL', unique=True) name = models.CharField(max_length=200) notes = models.TextField(blank=True) created_at = models.DateTimeField(default=datetime.now, blank=True) last_modified = models.DateTimeField(auto_now=True) def __str__(self): return self.name
true
true
1c4a0541e9c6f3dabd3305439a3287d532a147dd
1,361
py
Python
app/core/tests/test_admin.py
kim-sun/recipe-app-api
c0c598f2188c42c820178ea7910c34ccdf641393
[ "MIT" ]
null
null
null
app/core/tests/test_admin.py
kim-sun/recipe-app-api
c0c598f2188c42c820178ea7910c34ccdf641393
[ "MIT" ]
null
null
null
app/core/tests/test_admin.py
kim-sun/recipe-app-api
c0c598f2188c42c820178ea7910c34ccdf641393
[ "MIT" ]
null
null
null
from django.test import TestCase, Client from django.contrib.auth import get_user_model from django.urls import reverse class AdminSiteTests(TestCase): def setUp(self): self.client = Client() self.admin_user = get_user_model().objects.create_superuser( email='admin@gmail.com', password='password123' ) self.client.force_login(self.admin_user) self.user = get_user_model().objects.create_user( email='test@gmail.com', password='password123', name='Test user full name' ) def test_users_listed(self): """Test that users are listed on user page""" url = reverse('admin:core_user_changelist') res = self.client.get(url) self.assertContains(res, self.user.name) self.assertContains(res, self.user.email) def test_user_change_page(self): """Test that the user edit page works""" url = reverse('admin:core_user_change', args=[self.user.id]) # /admin/core/user/{id} res = self.client.get(url) # response self.assertEqual(res.status_code, 200) def test_create_user_page(self): """Test that the create user page works""" url = reverse('admin:core_user_add') res = self.client.get(url) self.assertEqual(res.status_code, 200)
31.651163
68
0.635562
from django.test import TestCase, Client from django.contrib.auth import get_user_model from django.urls import reverse class AdminSiteTests(TestCase): def setUp(self): self.client = Client() self.admin_user = get_user_model().objects.create_superuser( email='admin@gmail.com', password='password123' ) self.client.force_login(self.admin_user) self.user = get_user_model().objects.create_user( email='test@gmail.com', password='password123', name='Test user full name' ) def test_users_listed(self): url = reverse('admin:core_user_changelist') res = self.client.get(url) self.assertContains(res, self.user.name) self.assertContains(res, self.user.email) def test_user_change_page(self): url = reverse('admin:core_user_change', args=[self.user.id]) res = self.client.get(url) self.assertEqual(res.status_code, 200) def test_create_user_page(self): url = reverse('admin:core_user_add') res = self.client.get(url) self.assertEqual(res.status_code, 200)
true
true
1c4a0676208607006d811b2a23a60b460dd13518
2,403
py
Python
tests/api_tests/abstrac_api_test.py
kirillskor/dedoc
7793a1be2220a26e7520521306351dfc0a9c8d98
[ "Apache-2.0" ]
null
null
null
tests/api_tests/abstrac_api_test.py
kirillskor/dedoc
7793a1be2220a26e7520521306351dfc0a9c8d98
[ "Apache-2.0" ]
null
null
null
tests/api_tests/abstrac_api_test.py
kirillskor/dedoc
7793a1be2220a26e7520521306351dfc0a9c8d98
[ "Apache-2.0" ]
null
null
null
import json import os import requests import unittest class AbstractTestApiDocReader(unittest.TestCase): data_directory_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data")) def _check_metainfo(self, metainfo, actual_type: str, actual_name: str): self.assertEqual(metainfo['file_type'], actual_type) self.assertEqual(metainfo['file_name'], actual_name) def _get_host(self): host = os.environ.get('DOC_READER_HOST', 'localhost') return host def _get_port(self): port = int(os.environ.get('DOCREADER_PORT', '1231')) return port def _get_abs_path(self, file_name: str) -> str: return os.path.join(self.data_directory_path, file_name) def _send_request(self, file_name: str, data: dict = None, expected_code: int = 200): """ send file `file_name` in post request with `data` as parameters. Expects that response return code `expected_code` :param file_name: name of file (should lie dedoc/tests/data folder :param data: parameter dictionary (here you can put language for example) :param expected_code: expected http response code. 200 for normal request :return: result from json """ if data is None: data = {} host = self._get_host() port = self._get_port() abs_path = self._get_abs_path(file_name) with open(abs_path, 'rb') as file: files = {'file': (file_name, file)} r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), files=files, data=data) self.assertEqual(expected_code, r.status_code) if expected_code != 200: return None if "return_html" in data and data["return_html"]: return r.content.decode() else: return json.loads(r.content.decode()) def _send_request_wo_file(self, data: dict = None, expected_code: int = 200): host = self._get_host() port = self._get_port() if data is None: data = {} r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), data=data) self.assertEqual(expected_code, r.status_code) if expected_code != 200: return None result = json.loads(r.content.decode()) return result
34.826087
113
0.62422
import json import os import requests import unittest class AbstractTestApiDocReader(unittest.TestCase): data_directory_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data")) def _check_metainfo(self, metainfo, actual_type: str, actual_name: str): self.assertEqual(metainfo['file_type'], actual_type) self.assertEqual(metainfo['file_name'], actual_name) def _get_host(self): host = os.environ.get('DOC_READER_HOST', 'localhost') return host def _get_port(self): port = int(os.environ.get('DOCREADER_PORT', '1231')) return port def _get_abs_path(self, file_name: str) -> str: return os.path.join(self.data_directory_path, file_name) def _send_request(self, file_name: str, data: dict = None, expected_code: int = 200): if data is None: data = {} host = self._get_host() port = self._get_port() abs_path = self._get_abs_path(file_name) with open(abs_path, 'rb') as file: files = {'file': (file_name, file)} r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), files=files, data=data) self.assertEqual(expected_code, r.status_code) if expected_code != 200: return None if "return_html" in data and data["return_html"]: return r.content.decode() else: return json.loads(r.content.decode()) def _send_request_wo_file(self, data: dict = None, expected_code: int = 200): host = self._get_host() port = self._get_port() if data is None: data = {} r = requests.post("http://{host}:{port}/upload".format(host=host, port=port), data=data) self.assertEqual(expected_code, r.status_code) if expected_code != 200: return None result = json.loads(r.content.decode()) return result
true
true
1c4a074100935fa59bbb9f0995aad8db11245ba3
6,180
py
Python
mne/io/array/tests/test_array.py
fmamashli/mne-python
52f064415e7c9fa8fe243d22108dcdf3d86505b9
[ "BSD-3-Clause" ]
1
2019-12-11T05:07:08.000Z
2019-12-11T05:07:08.000Z
mne/io/array/tests/test_array.py
fmamashli/mne-python
52f064415e7c9fa8fe243d22108dcdf3d86505b9
[ "BSD-3-Clause" ]
23
2017-09-12T11:08:26.000Z
2019-10-04T11:11:29.000Z
mne/io/array/tests/test_array.py
fmamashli/mne-python
52f064415e7c9fa8fe243d22108dcdf3d86505b9
[ "BSD-3-Clause" ]
3
2019-01-28T13:48:00.000Z
2019-07-10T16:02:11.000Z
# Author: Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import os.path as op import numpy as np from numpy.testing import (assert_array_almost_equal, assert_allclose, assert_equal) import pytest import matplotlib.pyplot as plt from mne import find_events, Epochs, pick_types from mne.io import read_raw_fif from mne.io.array import RawArray from mne.io.tests.test_raw import _test_raw_reader from mne.io.meas_info import create_info, _kind_dict from mne.utils import requires_version, run_tests_if_main from mne.channels import make_dig_montage base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data') fif_fname = op.join(base_dir, 'test_raw.fif') def test_long_names(): """Test long name support.""" info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error') data = np.empty((2, 1000)) raw = RawArray(data, info) assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1'] info = create_info(['a' * 16] * 11, 1000., verbose='error') data = np.empty((11, 1000)) raw = RawArray(data, info) assert raw.ch_names == ['a' * 12 + '-%s' % ii for ii in range(11)] def test_array_copy(): """Test copying during construction.""" info = create_info(1, 1000.) data = np.empty((1, 1000)) # 'auto' (default) raw = RawArray(data, info) assert raw._data is data assert raw.info is not info raw = RawArray(data.astype(np.float32), info) assert raw._data is not data assert raw.info is not info # 'info' (more restrictive) raw = RawArray(data, info, copy='info') assert raw._data is data assert raw.info is not info with pytest.raises(ValueError, match="data copying was not .* copy='info"): RawArray(data.astype(np.float32), info, copy='info') # 'data' raw = RawArray(data, info, copy='data') assert raw._data is not data assert raw.info is info # 'both' raw = RawArray(data, info, copy='both') assert raw._data is not data assert raw.info is not info raw = RawArray(data.astype(np.float32), info, copy='both') assert raw._data is not data assert raw.info is not info # None raw = RawArray(data, info, copy=None) assert raw._data is data assert raw.info is info with pytest.raises(ValueError, match='data copying was not .* copy=None'): RawArray(data.astype(np.float32), info, copy=None) @pytest.mark.slowtest @requires_version('scipy', '0.12') def test_array_raw(): """Test creating raw from array.""" # creating raw = read_raw_fif(fif_fname).crop(2, 5) data, times = raw[:, :] sfreq = raw.info['sfreq'] ch_names = [(ch[4:] if 'STI' not in ch else ch) for ch in raw.info['ch_names']] # change them, why not types = list() for ci in range(101): types.extend(('grad', 'grad', 'mag')) types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels types.extend(['stim'] * 9) types.extend(['eeg'] * 60) picks = np.concatenate([pick_types(raw.info)[::20], pick_types(raw.info, meg=False, stim=True), pick_types(raw.info, meg=False, eeg=True)[::20]]) del raw data = data[picks] ch_names = np.array(ch_names)[picks].tolist() types = np.array(types)[picks].tolist() types.pop(-1) # wrong length pytest.raises(ValueError, create_info, ch_names, sfreq, types) # bad entry types.append('foo') pytest.raises(KeyError, create_info, ch_names, sfreq, types) types[-1] = 'eog' # default type info = create_info(ch_names, sfreq) assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0]) # use real types info = create_info(ch_names, sfreq, types) raw2 = _test_raw_reader(RawArray, test_preloading=False, data=data, info=info, first_samp=2 * data.shape[1]) data2, times2 = raw2[:, :] assert_allclose(data, data2) assert_allclose(times, times2) assert ('RawArray' in repr(raw2)) pytest.raises(TypeError, RawArray, info, data) # filtering picks = pick_types(raw2.info, misc=True, exclude='bads')[:4] assert_equal(len(picks), 4) raw_lp = raw2.copy() kwargs = dict(fir_design='firwin', picks=picks) raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs) raw_hp = raw2.copy() raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs) raw_bp = raw2.copy() raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4., **kwargs) raw_bs = raw2.copy() raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4., **kwargs) data, _ = raw2[picks, :] lp_data, _ = raw_lp[picks, :] hp_data, _ = raw_hp[picks, :] bp_data, _ = raw_bp[picks, :] bs_data, _ = raw_bs[picks, :] sig_dec = 15 assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec) assert_array_almost_equal(data, bp_data + bs_data, sig_dec) # plotting raw2.plot() raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False) plt.close('all') # epoching events = find_events(raw2, stim_channel='STI 014') events[:, 2] = 1 assert len(events) > 2 epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) evoked = epochs.average() assert_equal(evoked.nave, len(events) - 1) # complex data rng = np.random.RandomState(0) data = rng.randn(1, 100) + 1j * rng.randn(1, 100) raw = RawArray(data, create_info(1, 1000., 'eeg')) assert_allclose(raw._data, data) # Using digital montage to give MNI electrode coordinates n_elec = 10 ts_size = 10000 Fs = 512. ch_names = [str(i) for i in range(n_elec)] ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist() data = np.random.rand(n_elec, ts_size) montage = make_dig_montage( ch_pos=dict(zip(ch_names, ch_pos_loc)), coord_frame='head' ) info = create_info(ch_names, Fs, 'ecog', montage=montage) raw = RawArray(data, info) raw.plot_psd(average=False) # looking for inexistent layout raw.plot_psd_topo() run_tests_if_main()
34.719101
79
0.637379
import os.path as op import numpy as np from numpy.testing import (assert_array_almost_equal, assert_allclose, assert_equal) import pytest import matplotlib.pyplot as plt from mne import find_events, Epochs, pick_types from mne.io import read_raw_fif from mne.io.array import RawArray from mne.io.tests.test_raw import _test_raw_reader from mne.io.meas_info import create_info, _kind_dict from mne.utils import requires_version, run_tests_if_main from mne.channels import make_dig_montage base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data') fif_fname = op.join(base_dir, 'test_raw.fif') def test_long_names(): info = create_info(['a' * 15 + 'b', 'a' * 16], 1000., verbose='error') data = np.empty((2, 1000)) raw = RawArray(data, info) assert raw.ch_names == ['a' * 13 + '-0', 'a' * 13 + '-1'] info = create_info(['a' * 16] * 11, 1000., verbose='error') data = np.empty((11, 1000)) raw = RawArray(data, info) assert raw.ch_names == ['a' * 12 + '-%s' % ii for ii in range(11)] def test_array_copy(): info = create_info(1, 1000.) data = np.empty((1, 1000)) raw = RawArray(data, info) assert raw._data is data assert raw.info is not info raw = RawArray(data.astype(np.float32), info) assert raw._data is not data assert raw.info is not info raw = RawArray(data, info, copy='info') assert raw._data is data assert raw.info is not info with pytest.raises(ValueError, match="data copying was not .* copy='info"): RawArray(data.astype(np.float32), info, copy='info') # 'data' raw = RawArray(data, info, copy='data') assert raw._data is not data assert raw.info is info # 'both' raw = RawArray(data, info, copy='both') assert raw._data is not data assert raw.info is not info raw = RawArray(data.astype(np.float32), info, copy='both') assert raw._data is not data assert raw.info is not info # None raw = RawArray(data, info, copy=None) assert raw._data is data assert raw.info is info with pytest.raises(ValueError, match='data copying was not .* copy=None'): RawArray(data.astype(np.float32), info, copy=None) @pytest.mark.slowtest @requires_version('scipy', '0.12') def test_array_raw(): # creating raw = read_raw_fif(fif_fname).crop(2, 5) data, times = raw[:, :] sfreq = raw.info['sfreq'] ch_names = [(ch[4:] if 'STI' not in ch else ch) for ch in raw.info['ch_names']] # change them, why not types = list() for ci in range(101): types.extend(('grad', 'grad', 'mag')) types.extend(['ecog', 'seeg', 'hbo']) # really 3 meg channels types.extend(['stim'] * 9) types.extend(['eeg'] * 60) picks = np.concatenate([pick_types(raw.info)[::20], pick_types(raw.info, meg=False, stim=True), pick_types(raw.info, meg=False, eeg=True)[::20]]) del raw data = data[picks] ch_names = np.array(ch_names)[picks].tolist() types = np.array(types)[picks].tolist() types.pop(-1) # wrong length pytest.raises(ValueError, create_info, ch_names, sfreq, types) # bad entry types.append('foo') pytest.raises(KeyError, create_info, ch_names, sfreq, types) types[-1] = 'eog' # default type info = create_info(ch_names, sfreq) assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0]) # use real types info = create_info(ch_names, sfreq, types) raw2 = _test_raw_reader(RawArray, test_preloading=False, data=data, info=info, first_samp=2 * data.shape[1]) data2, times2 = raw2[:, :] assert_allclose(data, data2) assert_allclose(times, times2) assert ('RawArray' in repr(raw2)) pytest.raises(TypeError, RawArray, info, data) # filtering picks = pick_types(raw2.info, misc=True, exclude='bads')[:4] assert_equal(len(picks), 4) raw_lp = raw2.copy() kwargs = dict(fir_design='firwin', picks=picks) raw_lp.filter(None, 4.0, h_trans_bandwidth=4., **kwargs) raw_hp = raw2.copy() raw_hp.filter(16.0, None, l_trans_bandwidth=4., **kwargs) raw_bp = raw2.copy() raw_bp.filter(8.0, 12.0, l_trans_bandwidth=4., h_trans_bandwidth=4., **kwargs) raw_bs = raw2.copy() raw_bs.filter(16.0, 4.0, l_trans_bandwidth=4., h_trans_bandwidth=4., **kwargs) data, _ = raw2[picks, :] lp_data, _ = raw_lp[picks, :] hp_data, _ = raw_hp[picks, :] bp_data, _ = raw_bp[picks, :] bs_data, _ = raw_bs[picks, :] sig_dec = 15 assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec) assert_array_almost_equal(data, bp_data + bs_data, sig_dec) # plotting raw2.plot() raw2.plot_psd(tmax=2., average=True, n_fft=1024, spatial_colors=False) plt.close('all') # epoching events = find_events(raw2, stim_channel='STI 014') events[:, 2] = 1 assert len(events) > 2 epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True) evoked = epochs.average() assert_equal(evoked.nave, len(events) - 1) # complex data rng = np.random.RandomState(0) data = rng.randn(1, 100) + 1j * rng.randn(1, 100) raw = RawArray(data, create_info(1, 1000., 'eeg')) assert_allclose(raw._data, data) # Using digital montage to give MNI electrode coordinates n_elec = 10 ts_size = 10000 Fs = 512. ch_names = [str(i) for i in range(n_elec)] ch_pos_loc = np.random.randint(60, size=(n_elec, 3)).tolist() data = np.random.rand(n_elec, ts_size) montage = make_dig_montage( ch_pos=dict(zip(ch_names, ch_pos_loc)), coord_frame='head' ) info = create_info(ch_names, Fs, 'ecog', montage=montage) raw = RawArray(data, info) raw.plot_psd(average=False) # looking for inexistent layout raw.plot_psd_topo() run_tests_if_main()
true
true
1c4a0802008b790d1c97611c1cf3739497f6082d
1,992
py
Python
scraping/norcleanser1.py
Asyikin98/SkinFerm
72fd1ad6339c96adf5ec154bde566de9eb1472c3
[ "MIT" ]
null
null
null
scraping/norcleanser1.py
Asyikin98/SkinFerm
72fd1ad6339c96adf5ec154bde566de9eb1472c3
[ "MIT" ]
2
2021-02-03T01:55:13.000Z
2021-04-30T12:46:33.000Z
scraping/norcleanser1.py
Asyikin98/SkinFerm
72fd1ad6339c96adf5ec154bde566de9eb1472c3
[ "MIT" ]
null
null
null
import urllib.request import random from bs4 import BeautifulSoup from requests import get import mysql.connector conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product") cursor = conn.cursor() sql = """INSERT INTO norcleanser (image, name, price, rating) VALUES (%s, %s, %s, %s)""" def crawl_url(pageUrl, cleansernor_arr): url = 'https://www.skinstore.com/skin-care/skincare-concern/normal-combination.list?pageNumber=1&facetFilters=averageReviewScore_auto_content:%5B4+TO+5%5D|en_brand_content:Balance+Me|en_brand_content:Daily+Concepts|en_brand_content:DERMAdoctor|en_brand_content:Epionce|en_brand_content:First+Aid+Beauty|en_skincareproducttype_content:Cleanser|en_brand_content:FOREO' page = get(url) soup = BeautifulSoup(page.text, 'html.parser') type(soup) #######################################################for product 1############################################################################ cleanser = soup.find_all('li', class_='productListProducts_product') try: for cleansers in cleanser : first_product_image = cleansers.find('img')['src'] img_name = random.randrange(1,500) full_name = str(img_name) + ".jpg" urllib.request.urlretrieve(first_product_image, full_name) first_product_name = cleansers.find("h3",{"class":"productBlock_productName"}).get_text().strip() first_product_price = cleansers.find("div",{"class":"productBlock_price"}).get_text().strip() first_product_rating = cleansers.find("span",{"class":"visually-hidden productBlock_rating_hiddenLabel"}).get_text().strip() cleansernor_arr.append((first_product_image, first_product_name, first_product_price, first_product_rating)) finally: return cleansernor_arr cleansernor_arr = crawl_url("", []) print(len(cleansernor_arr)) cursor.executemany(sql, cleansernor_arr) conn.commit() cursor.close() conn.close()
43.304348
370
0.681727
import urllib.request import random from bs4 import BeautifulSoup from requests import get import mysql.connector conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product") cursor = conn.cursor() sql = """INSERT INTO norcleanser (image, name, price, rating) VALUES (%s, %s, %s, %s)""" def crawl_url(pageUrl, cleansernor_arr): url = 'https://www.skinstore.com/skin-care/skincare-concern/normal-combination.list?pageNumber=1&facetFilters=averageReviewScore_auto_content:%5B4+TO+5%5D|en_brand_content:Balance+Me|en_brand_content:Daily+Concepts|en_brand_content:DERMAdoctor|en_brand_content:Epionce|en_brand_content:First+Aid+Beauty|en_skincareproducttype_content:Cleanser|en_brand_content:FOREO' page = get(url) soup = BeautifulSoup(page.text, 'html.parser') type(soup)
true
true
1c4a085e1b8dce8dacaca64ca275241b95642545
396
py
Python
Easy/1475 Final Prices With a Special Discount in a Shop.py
raj713335/LeetCode
e60e145d90f45d37e148e8307a3d97f5f0741de0
[ "Apache-2.0" ]
null
null
null
Easy/1475 Final Prices With a Special Discount in a Shop.py
raj713335/LeetCode
e60e145d90f45d37e148e8307a3d97f5f0741de0
[ "Apache-2.0" ]
null
null
null
Easy/1475 Final Prices With a Special Discount in a Shop.py
raj713335/LeetCode
e60e145d90f45d37e148e8307a3d97f5f0741de0
[ "Apache-2.0" ]
null
null
null
# https://leetcode.com/problems/final-prices-with-a-special-discount-in-a-shop/ class Solution: def finalPrices(self, prices: List[int]) -> List[int]: for i in range(0, len(prices)-1): for j in range(i+1, len(prices)): if prices[i] >= prices[j]: prices[i] -= prices[j] break return prices
30.461538
79
0.515152
class Solution: def finalPrices(self, prices: List[int]) -> List[int]: for i in range(0, len(prices)-1): for j in range(i+1, len(prices)): if prices[i] >= prices[j]: prices[i] -= prices[j] break return prices
true
true
1c4a08af26630f0f1eb8dd09eb2a7c42527d7a98
994
py
Python
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/apply_artifacts_request.py
SUSE/azure-sdk-for-python
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
[ "MIT" ]
2
2020-07-29T14:22:17.000Z
2020-11-06T18:47:40.000Z
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/apply_artifacts_request.py
SUSE/azure-sdk-for-python
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
[ "MIT" ]
1
2016-08-01T07:37:04.000Z
2016-08-01T07:37:04.000Z
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/apply_artifacts_request.py
SUSE/azure-sdk-for-python
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
[ "MIT" ]
1
2020-12-12T21:04:41.000Z
2020-12-12T21:04:41.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ApplyArtifactsRequest(Model): """Request body for applying artifacts to a virtual machine. :param artifacts: The list of artifacts to apply. :type artifacts: list of :class:`ArtifactInstallProperties <azure.mgmt.devtestlabs.models.ArtifactInstallProperties>` """ _attribute_map = { 'artifacts': {'key': 'artifacts', 'type': '[ArtifactInstallProperties]'}, } def __init__(self, artifacts=None): self.artifacts = artifacts
34.275862
81
0.615694
from msrest.serialization import Model class ApplyArtifactsRequest(Model): _attribute_map = { 'artifacts': {'key': 'artifacts', 'type': '[ArtifactInstallProperties]'}, } def __init__(self, artifacts=None): self.artifacts = artifacts
true
true
1c4a08b855151a6840c0b86aa222ceed3a904014
4,712
py
Python
SVS/model/archive/preprocessing/ch_asr/local/data_prep.py
Kirinel/SVS_system
261b80d69578bc3c407bc927750d64858c42a24c
[ "Apache-2.0" ]
null
null
null
SVS/model/archive/preprocessing/ch_asr/local/data_prep.py
Kirinel/SVS_system
261b80d69578bc3c407bc927750d64858c42a24c
[ "Apache-2.0" ]
null
null
null
SVS/model/archive/preprocessing/ch_asr/local/data_prep.py
Kirinel/SVS_system
261b80d69578bc3c407bc927750d64858c42a24c
[ "Apache-2.0" ]
null
null
null
"""Copyright [2020] [Jiatong Shi & Shuai Guo]. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import argparse import os import re def add_zero(number, size): """add_zero.""" out = str(number) for i in range(size - len(out)): out = "0" + out return out single_pron = [ "a", "ai", "ao", "an", "ang", "o", "ou", "ong", "e", "ei", "er", "en", "eng", ] double_starter = ["zh", "ch", "sh", "ii", "aa", "ee", "oo", "vv", "uu"] starter = [ "b", "p", "m", "f", "d", "t", "n", "l", "g", "k", "h", "j", "q", "x", "r", "z", "c", "s", ] def text_refactor(text): """text_refactor.""" text = re.sub(" +", " ", text) units = text.split(" ") # add a e o u i for i in range(len(units)): if len(units[i]) < 1: print("error") print(units) print(text) if units[i] in single_pron: begin = units[i][0] units[i] = begin + begin + units[i] elif units[i] == "jue": units[i] = "jve" elif units[i] == "que": units[i] = "qve" elif units[i] == "xue": units[i] = "xve" elif units[i] == "wen": units[i] = "uuun" elif units[i] == "wei": units[i] = "uuui" elif "w" == units[i][0]: units[i] = "uuu" + units[i][1:] elif len(units[i]) > 1 and ( "yu" == units[i][:2] or "yv" == units[i][:2] ): units[i] = "vvv" + units[i][2:] elif "y" == units[i][0]: units[i] = "iii" + units[i][1:] # further refine if units[i] == "iiiou": units[i] = "iiiu" elif units[i] == "iiiin": units[i] = "iiin" elif units[i] == "iiiing": units[i] = "iiing" spe = [] for unit in units: if unit[:2] in double_starter: spe.extend([unit[:2], unit[2:]]) else: spe.extend([unit[:1], unit[1:]]) return " ".join(spe) parser = argparse.ArgumentParser() parser.add_argument("datadir", type=str, help="data directory") parser.add_argument("outdir", type=str, help="output directory") args = parser.parse_args() if not os.path.exists("data"): os.mkdir("data") if not os.path.exists("data/" + args.outdir): os.mkdir("data/" + args.outdir) basedir = os.path.join("data", args.outdir) kaldi_text = open(os.path.join(basedir, "text"), "w") kaldi_wav_scp = open(os.path.join(basedir, "wav.scp"), "w") kaldi_utt2spk = open(os.path.join(basedir, "utt2spk"), "w") kaldi_spk2utt = open(os.path.join(basedir, "spk2utt"), "w") for root, dirs, files in os.walk(args.datadir): wav_storing = {} text_storing = {} piece_info = add_zero(root.split("/")[-1], 4) for f in files: if f.startswith("yll"): os.system( "mv %s %s" % (os.path.join(root, f), os.path.join(root, f[4:])) ) f = f[4:] name, suffix = f.split(".") if suffix == "wav": wav_storing[piece_info + name] = os.path.join(root, f) if suffix == "txt" and f != "text.txt": count = 1 text = open(os.path.join(root, f), "r") while True: line = text.readline() if not line: break line = line.strip() if len(line) > 0: text_storing[ piece_info + add_zero(count, 4) ] = text_refactor(line) count += 1 for key in text_storing.keys(): if len(text_storing[key]) == 0 or text_storing[key][0] == "#": continue kaldi_text.write("%s %s\n" % (key, text_storing[key])) kaldi_wav_scp.write( ( "%s sox -t wavpcm %s -c 1 -r 16000 -t wavpcm - |\n" % (key, wav_storing[key]) ) ) kaldi_utt2spk.write("%s %s\n" % (key, key)) kaldi_spk2utt.write("%s %s\n" % (key, key)) kaldi_text.close() kaldi_wav_scp.close() kaldi_utt2spk.close() kaldi_spk2utt.close() # os.system("export LC_ALL=C")
26.772727
79
0.503608
import argparse import os import re def add_zero(number, size): out = str(number) for i in range(size - len(out)): out = "0" + out return out single_pron = [ "a", "ai", "ao", "an", "ang", "o", "ou", "ong", "e", "ei", "er", "en", "eng", ] double_starter = ["zh", "ch", "sh", "ii", "aa", "ee", "oo", "vv", "uu"] starter = [ "b", "p", "m", "f", "d", "t", "n", "l", "g", "k", "h", "j", "q", "x", "r", "z", "c", "s", ] def text_refactor(text): text = re.sub(" +", " ", text) units = text.split(" ") for i in range(len(units)): if len(units[i]) < 1: print("error") print(units) print(text) if units[i] in single_pron: begin = units[i][0] units[i] = begin + begin + units[i] elif units[i] == "jue": units[i] = "jve" elif units[i] == "que": units[i] = "qve" elif units[i] == "xue": units[i] = "xve" elif units[i] == "wen": units[i] = "uuun" elif units[i] == "wei": units[i] = "uuui" elif "w" == units[i][0]: units[i] = "uuu" + units[i][1:] elif len(units[i]) > 1 and ( "yu" == units[i][:2] or "yv" == units[i][:2] ): units[i] = "vvv" + units[i][2:] elif "y" == units[i][0]: units[i] = "iii" + units[i][1:] if units[i] == "iiiou": units[i] = "iiiu" elif units[i] == "iiiin": units[i] = "iiin" elif units[i] == "iiiing": units[i] = "iiing" spe = [] for unit in units: if unit[:2] in double_starter: spe.extend([unit[:2], unit[2:]]) else: spe.extend([unit[:1], unit[1:]]) return " ".join(spe) parser = argparse.ArgumentParser() parser.add_argument("datadir", type=str, help="data directory") parser.add_argument("outdir", type=str, help="output directory") args = parser.parse_args() if not os.path.exists("data"): os.mkdir("data") if not os.path.exists("data/" + args.outdir): os.mkdir("data/" + args.outdir) basedir = os.path.join("data", args.outdir) kaldi_text = open(os.path.join(basedir, "text"), "w") kaldi_wav_scp = open(os.path.join(basedir, "wav.scp"), "w") kaldi_utt2spk = open(os.path.join(basedir, "utt2spk"), "w") kaldi_spk2utt = open(os.path.join(basedir, "spk2utt"), "w") for root, dirs, files in os.walk(args.datadir): wav_storing = {} text_storing = {} piece_info = add_zero(root.split("/")[-1], 4) for f in files: if f.startswith("yll"): os.system( "mv %s %s" % (os.path.join(root, f), os.path.join(root, f[4:])) ) f = f[4:] name, suffix = f.split(".") if suffix == "wav": wav_storing[piece_info + name] = os.path.join(root, f) if suffix == "txt" and f != "text.txt": count = 1 text = open(os.path.join(root, f), "r") while True: line = text.readline() if not line: break line = line.strip() if len(line) > 0: text_storing[ piece_info + add_zero(count, 4) ] = text_refactor(line) count += 1 for key in text_storing.keys(): if len(text_storing[key]) == 0 or text_storing[key][0] == "#": continue kaldi_text.write("%s %s\n" % (key, text_storing[key])) kaldi_wav_scp.write( ( "%s sox -t wavpcm %s -c 1 -r 16000 -t wavpcm - |\n" % (key, wav_storing[key]) ) ) kaldi_utt2spk.write("%s %s\n" % (key, key)) kaldi_spk2utt.write("%s %s\n" % (key, key)) kaldi_text.close() kaldi_wav_scp.close() kaldi_utt2spk.close() kaldi_spk2utt.close()
true
true
1c4a0a16894c5126858c0aa112f30b01f145fbcf
4,964
py
Python
src/tstoolbox/functions/expanding_window.py
timcera/tstoolbox
a32fa399d96082f01b7eedfd6c8893bdb881845c
[ "BSD-3-Clause" ]
5
2016-10-13T18:06:41.000Z
2021-06-29T19:47:36.000Z
src/tstoolbox/functions/expanding_window.py
timcera/tstoolbox
a32fa399d96082f01b7eedfd6c8893bdb881845c
[ "BSD-3-Clause" ]
21
2016-04-28T16:48:03.000Z
2021-12-16T18:07:07.000Z
src/tstoolbox/functions/expanding_window.py
timcera/tstoolbox
a32fa399d96082f01b7eedfd6c8893bdb881845c
[ "BSD-3-Clause" ]
3
2018-03-21T21:07:52.000Z
2021-01-22T20:07:49.000Z
# -*- coding: utf-8 -*- """Collection of functions for the manipulation of time series.""" from __future__ import absolute_import, division, print_function from typing import List, Optional import mando import pandas as pd import typic from mando.rst_text_formatter import RSTHelpFormatter from .. import tsutils try: from typing import Literal except ImportError: from typing_extensions import Literal @mando.command("expanding_window", formatter_class=RSTHelpFormatter, doctype="numpy") @tsutils.doc(tsutils.docstrings) def expanding_window_cli( input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", skiprows=None, index_type="datetime", names=None, clean=False, statistic="", min_periods=1, center=False, source_units=None, target_units=None, print_input=False, tablefmt="csv", ): """Calculate an expanding window statistic. Parameters ---------- statistic : str [optional, default is ''] +-----------+----------------------+ | statistic | Meaning | +===========+======================+ | corr | correlation | +-----------+----------------------+ | count | count of real values | +-----------+----------------------+ | cov | covariance | +-----------+----------------------+ | kurt | kurtosis | +-----------+----------------------+ | max | maximum | +-----------+----------------------+ | mean | mean | +-----------+----------------------+ | median | median | +-----------+----------------------+ | min | minimum | +-----------+----------------------+ | skew | skew | +-----------+----------------------+ | std | standard deviation | +-----------+----------------------+ | sum | sum | +-----------+----------------------+ | var | variance | +-----------+----------------------+ min_periods : int [optional, default is 1] Minimum number of observations in window required to have a value center : boolean [optional, default is False] Set the labels at the center of the window. {input_ts} {columns} {start_date} {end_date} {dropna} {skiprows} {index_type} {names} {clean} {source_units} {target_units} {print_input} {tablefmt} """ tsutils.printiso( expanding_window( input_ts=input_ts, columns=columns, start_date=start_date, end_date=end_date, dropna=dropna, skiprows=skiprows, index_type=index_type, names=names, clean=clean, statistic=statistic, min_periods=min_periods, center=center, source_units=source_units, target_units=target_units, print_input=print_input, ), tablefmt=tablefmt, ) @tsutils.transform_args(statistic=tsutils.make_list) @typic.al def expanding_window( input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", skiprows=None, index_type="datetime", names=None, clean=False, statistic: Optional[ List[ Literal[ "corr", "count", "cov", "kurt", "max", "mean", "median", "min", "skew", "std", "sum", "var", ] ] ] = None, min_periods: tsutils.IntGreaterEqualToZero = 1, center: bool = False, source_units=None, target_units=None, print_input=False, ): """Calculate an expanding window statistic.""" tsd = tsutils.common_kwds( input_ts, skiprows=skiprows, names=names, index_type=index_type, start_date=start_date, end_date=end_date, pick=columns, dropna=dropna, source_units=source_units, target_units=target_units, clean=clean, ) ntsd = tsd.expanding(min_periods=min_periods, center=center) if statistic: nntsd = pd.DataFrame() for stat in statistic: ntsd = eval("ntsd.{}()".format(stat)) ntsd.columns = [ tsutils.renamer(i, "expanding.{}".format(stat)) for i in ntsd.columns ] nntsd = nntsd.join(ntsd, how="outer") else: nntsd = ntsd return tsutils.return_input(print_input, tsd, nntsd) expanding_window.__doc__ = expanding_window_cli.__doc__
24.453202
85
0.471394
from __future__ import absolute_import, division, print_function from typing import List, Optional import mando import pandas as pd import typic from mando.rst_text_formatter import RSTHelpFormatter from .. import tsutils try: from typing import Literal except ImportError: from typing_extensions import Literal @mando.command("expanding_window", formatter_class=RSTHelpFormatter, doctype="numpy") @tsutils.doc(tsutils.docstrings) def expanding_window_cli( input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", skiprows=None, index_type="datetime", names=None, clean=False, statistic="", min_periods=1, center=False, source_units=None, target_units=None, print_input=False, tablefmt="csv", ): tsutils.printiso( expanding_window( input_ts=input_ts, columns=columns, start_date=start_date, end_date=end_date, dropna=dropna, skiprows=skiprows, index_type=index_type, names=names, clean=clean, statistic=statistic, min_periods=min_periods, center=center, source_units=source_units, target_units=target_units, print_input=print_input, ), tablefmt=tablefmt, ) @tsutils.transform_args(statistic=tsutils.make_list) @typic.al def expanding_window( input_ts="-", columns=None, start_date=None, end_date=None, dropna="no", skiprows=None, index_type="datetime", names=None, clean=False, statistic: Optional[ List[ Literal[ "corr", "count", "cov", "kurt", "max", "mean", "median", "min", "skew", "std", "sum", "var", ] ] ] = None, min_periods: tsutils.IntGreaterEqualToZero = 1, center: bool = False, source_units=None, target_units=None, print_input=False, ): tsd = tsutils.common_kwds( input_ts, skiprows=skiprows, names=names, index_type=index_type, start_date=start_date, end_date=end_date, pick=columns, dropna=dropna, source_units=source_units, target_units=target_units, clean=clean, ) ntsd = tsd.expanding(min_periods=min_periods, center=center) if statistic: nntsd = pd.DataFrame() for stat in statistic: ntsd = eval("ntsd.{}()".format(stat)) ntsd.columns = [ tsutils.renamer(i, "expanding.{}".format(stat)) for i in ntsd.columns ] nntsd = nntsd.join(ntsd, how="outer") else: nntsd = ntsd return tsutils.return_input(print_input, tsd, nntsd) expanding_window.__doc__ = expanding_window_cli.__doc__
true
true
1c4a0c8f83c03c4ee8d31d036c9293db10cf3d62
1,215
bzl
Python
deps.bzl
abrisco/cargo-bazel
1bb0b7f295e89441b5b7e90898c8b9abdab38402
[ "MIT" ]
4
2021-11-08T14:53:23.000Z
2022-02-25T03:32:32.000Z
deps.bzl
abrisco/cargo-bazel
1bb0b7f295e89441b5b7e90898c8b9abdab38402
[ "MIT" ]
23
2021-10-13T18:53:05.000Z
2022-03-07T00:57:25.000Z
deps.bzl
abrisco/cargo-bazel
1bb0b7f295e89441b5b7e90898c8b9abdab38402
[ "MIT" ]
1
2021-12-09T17:11:39.000Z
2021-12-09T17:11:39.000Z
"""Dependencies required by the `cargo-bazel` rules""" load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") load("//3rdparty:third_party_deps.bzl", "third_party_deps") load("//private:vendor_utils.bzl", "crates_vendor_deps") def cargo_bazel_deps(): maybe( http_archive, name = "rules_rust", sha256 = "7826dbbbf617da8645d2cdd9a944e7948cc9cf87e7242c54cc0c53110495d1c7", strip_prefix = "rules_rust-acca6f400003b9ae097b69ba8f44878aaf65beed", urls = [ # `main` branch as of 2022-03-01 "https://github.com/bazelbuild/rules_rust/archive/acca6f400003b9ae097b69ba8f44878aaf65beed.tar.gz", ], ) maybe( http_archive, name = "bazel_skylib", urls = [ "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz", "https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz", ], sha256 = "af87959afe497dc8dfd4c6cb66e1279cb98ccc84284619ebfec27d9c09a903de", ) third_party_deps() crates_vendor_deps()
36.818182
126
0.681481
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") load("//3rdparty:third_party_deps.bzl", "third_party_deps") load("//private:vendor_utils.bzl", "crates_vendor_deps") def cargo_bazel_deps(): maybe( http_archive, name = "rules_rust", sha256 = "7826dbbbf617da8645d2cdd9a944e7948cc9cf87e7242c54cc0c53110495d1c7", strip_prefix = "rules_rust-acca6f400003b9ae097b69ba8f44878aaf65beed", urls = [ "https://github.com/bazelbuild/rules_rust/archive/acca6f400003b9ae097b69ba8f44878aaf65beed.tar.gz", ], ) maybe( http_archive, name = "bazel_skylib", urls = [ "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz", "https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.0/bazel-skylib-1.2.0.tar.gz", ], sha256 = "af87959afe497dc8dfd4c6cb66e1279cb98ccc84284619ebfec27d9c09a903de", ) third_party_deps() crates_vendor_deps()
true
true
1c4a0cbe390bf014cfe06f318b4c150143ca2c65
8,847
py
Python
ezblog/blog/views.py
zeropol2/ezblog
a43d231d454b32be35f5811a6ca63d17d654f59d
[ "Apache-2.0" ]
4
2016-08-04T04:30:53.000Z
2016-08-31T08:51:30.000Z
ezblog/blog/views.py
zeropol2/ezblog
a43d231d454b32be35f5811a6ca63d17d654f59d
[ "Apache-2.0" ]
null
null
null
ezblog/blog/views.py
zeropol2/ezblog
a43d231d454b32be35f5811a6ca63d17d654f59d
[ "Apache-2.0" ]
null
null
null
from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from django.db.models import Q from django.http import Http404, HttpResponse from django.shortcuts import render, redirect, get_object_or_404 from .models import Post, Category, Tag # index def index(request): if request.method == 'GET': per_page = 2 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.all(), per_page) else: pg = Paginator(Post.objects.filter(status='public'), per_page) return __render_index(request, pg, page) else: raise Http404 # posts def process_post(request, pk): if request.method == 'GET': return __get_post(request, pk) elif request.method == 'PUT': return __update_post(request, pk) elif request.method == 'DELETE': return __delete_post(request, pk) else: raise Http404 def __get_post(request, pk): post = get_object_or_404(Post, pk=pk) ctx = { 'post': post, 'categories': __get_categories(request), 'archives': __get_archives(request) } return render(request, 'detail_post.html', ctx) @login_required def __update_post(request, pk): title = request.PUT.get('title') content = request.PUT.get('content') category_pk = request.PUT.get('category') status = request.PUT.get('status') tags = request.PUT.get('tags') if tags: tags = request.PUT.get('tags').split(',') post = get_object_or_404(Post, pk=pk) post.title = title post.content = content if category_pk: post.category = Category.objects.get(pk=category_pk) post.status = status post.save() if tags: for name in tags: name = name.strip() if name: try: tag = Tag.objects.get(name=name) except Tag.DoesNotExist: tag = Tag() tag.name = name tag.save() post.tags.add(tag) post.save() response = HttpResponse() response.status_code = 200 return response @login_required def __delete_post(request, pk): post = get_object_or_404(Post, pk=pk) post.delete() response = HttpResponse() response.status_code = 200 return response # create_post def create_post_or_list_posts(request): if request.method == 'POST': return __create_post(request) elif request.method == 'GET': return index(request) else: raise Http404 @login_required def __create_post(request): title = request.POST.get('title') content = request.POST.get('content') category_pk = request.POST.get('category') status = request.POST.get('status') tags = request.POST.get('tags') if tags: tags = request.POST.get('tags').split(',') new_post = Post() new_post.title = title new_post.content = content if category_pk: new_post.category = Category.objects.get(pk=category_pk) new_post.status = status new_post.user = request.user new_post.save() if tags: for name in tags: name = name.strip() if name: try: tag = Tag.objects.get(name=name) except Tag.DoesNotExist: tag = Tag() tag.name = name tag.save() new_post.tags.add(tag) new_post.save() url = reverse('blog:post', kwargs={'pk': new_post.pk}) return redirect(url) # create_post_form @login_required def create_post_form(request): if request.method == 'GET': return __create_post_form(request) else: raise Http404 @login_required def __create_post_form(request): post = Post() status_choices = post.get_status_choices() ctx = { 'categories': __get_categories(request), 'status_choices': status_choices, 'archives': __get_archives(request) } return render(request, 'create_post.html', ctx) # update_post_form @login_required def update_post_form(request, pk): if request.method == 'GET': return __update_post_form(request, pk) else: raise Http404 @login_required def __update_post_form(request, pk): post = Post.objects.get(pk=pk) status_choices = post.get_status_choices() ctx = { 'post': post, 'categories': __get_categories(request), 'status_choices': status_choices, 'archives': __get_archives(request) } return render(request, 'update_post.html', ctx) # list def posts_by_tag(request, tag_pk): if request.method == 'GET': target_tag = Tag.objects.get(pk=tag_pk) if not target_tag: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(tags__in=[target_tag]).distinct(), per_page) else: pg = Paginator(Post.objects.filter(status='public', tags__in=[target_tag]).distinct(), per_page) return __render_index(request, pg, page) else: raise Http404 def posts_by_category(request, category_pk): if request.method == 'GET': target_category = Category.objects.get(pk=category_pk) if not target_category: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(category=target_category).distinct(), per_page) else: pg = Paginator(Post.objects.filter(status='public', category=target_category).distinct(), per_page) return __render_index(request, pg, page) else: raise Http404 def posts_by_keyword(request): if request.method == 'GET': keyword = request.GET.get('keyword') if not keyword: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) where_func = Q() for keyword_item in keyword.split(' '): target_tags = Tag.objects.filter(name__contains=keyword_item) target_categories = Category.objects.filter(name__contains=keyword_item) where_func = Q(where_func | Q(title__contains=keyword_item) | Q(content__contains=keyword_item) | Q(tags__in=target_tags) | Q(category__in=target_categories)) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(where_func).distinct(), per_page) else: pg = Paginator(Post.objects.filter(Q(status='public') & where_func).distinct(), per_page) return __render_index(request, pg, page, keyword=keyword) else: raise Http404 def posts_by_year(request, year): if request.method == 'GET': if not year: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(created_at__year=year).distinct(), per_page) else: pg = Paginator(Post.objects.filter(status='public', created_at__year=year).distinct(), per_page) return __render_index(request, pg, page) else: raise Http404 def __render_index(request, pg, page, **kwargs): try: contents = pg.page(page) except PageNotAnInteger: contents = pg.page(1) except EmptyPage: contents = [] ctx = { 'posts': contents, 'categories': __get_categories(request), 'archives': __get_archives(request), 'keyword': kwargs.get('keyword') } return render(request, 'index.html', ctx) def __get_categories(request): categories = Category.objects.all() for category in categories: if request.user.is_authenticated(): category.count = Post.objects.filter(category=category).count() else: category.count = Post.objects.filter(category=category, status='public').count() return categories def __get_archives(request): if request.user.is_authenticated(): all_posts = Post.objects.all() else: all_posts = Post.objects.filter(status='public') result = {} for item in all_posts: year = item.created_at.year count = result.get(year, 0) result[year] = count+1 return result
27.646875
111
0.61648
from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from django.db.models import Q from django.http import Http404, HttpResponse from django.shortcuts import render, redirect, get_object_or_404 from .models import Post, Category, Tag def index(request): if request.method == 'GET': per_page = 2 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.all(), per_page) else: pg = Paginator(Post.objects.filter(status='public'), per_page) return __render_index(request, pg, page) else: raise Http404 def process_post(request, pk): if request.method == 'GET': return __get_post(request, pk) elif request.method == 'PUT': return __update_post(request, pk) elif request.method == 'DELETE': return __delete_post(request, pk) else: raise Http404 def __get_post(request, pk): post = get_object_or_404(Post, pk=pk) ctx = { 'post': post, 'categories': __get_categories(request), 'archives': __get_archives(request) } return render(request, 'detail_post.html', ctx) @login_required def __update_post(request, pk): title = request.PUT.get('title') content = request.PUT.get('content') category_pk = request.PUT.get('category') status = request.PUT.get('status') tags = request.PUT.get('tags') if tags: tags = request.PUT.get('tags').split(',') post = get_object_or_404(Post, pk=pk) post.title = title post.content = content if category_pk: post.category = Category.objects.get(pk=category_pk) post.status = status post.save() if tags: for name in tags: name = name.strip() if name: try: tag = Tag.objects.get(name=name) except Tag.DoesNotExist: tag = Tag() tag.name = name tag.save() post.tags.add(tag) post.save() response = HttpResponse() response.status_code = 200 return response @login_required def __delete_post(request, pk): post = get_object_or_404(Post, pk=pk) post.delete() response = HttpResponse() response.status_code = 200 return response def create_post_or_list_posts(request): if request.method == 'POST': return __create_post(request) elif request.method == 'GET': return index(request) else: raise Http404 @login_required def __create_post(request): title = request.POST.get('title') content = request.POST.get('content') category_pk = request.POST.get('category') status = request.POST.get('status') tags = request.POST.get('tags') if tags: tags = request.POST.get('tags').split(',') new_post = Post() new_post.title = title new_post.content = content if category_pk: new_post.category = Category.objects.get(pk=category_pk) new_post.status = status new_post.user = request.user new_post.save() if tags: for name in tags: name = name.strip() if name: try: tag = Tag.objects.get(name=name) except Tag.DoesNotExist: tag = Tag() tag.name = name tag.save() new_post.tags.add(tag) new_post.save() url = reverse('blog:post', kwargs={'pk': new_post.pk}) return redirect(url) @login_required def create_post_form(request): if request.method == 'GET': return __create_post_form(request) else: raise Http404 @login_required def __create_post_form(request): post = Post() status_choices = post.get_status_choices() ctx = { 'categories': __get_categories(request), 'status_choices': status_choices, 'archives': __get_archives(request) } return render(request, 'create_post.html', ctx) @login_required def update_post_form(request, pk): if request.method == 'GET': return __update_post_form(request, pk) else: raise Http404 @login_required def __update_post_form(request, pk): post = Post.objects.get(pk=pk) status_choices = post.get_status_choices() ctx = { 'post': post, 'categories': __get_categories(request), 'status_choices': status_choices, 'archives': __get_archives(request) } return render(request, 'update_post.html', ctx) def posts_by_tag(request, tag_pk): if request.method == 'GET': target_tag = Tag.objects.get(pk=tag_pk) if not target_tag: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(tags__in=[target_tag]).distinct(), per_page) else: pg = Paginator(Post.objects.filter(status='public', tags__in=[target_tag]).distinct(), per_page) return __render_index(request, pg, page) else: raise Http404 def posts_by_category(request, category_pk): if request.method == 'GET': target_category = Category.objects.get(pk=category_pk) if not target_category: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(category=target_category).distinct(), per_page) else: pg = Paginator(Post.objects.filter(status='public', category=target_category).distinct(), per_page) return __render_index(request, pg, page) else: raise Http404 def posts_by_keyword(request): if request.method == 'GET': keyword = request.GET.get('keyword') if not keyword: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) where_func = Q() for keyword_item in keyword.split(' '): target_tags = Tag.objects.filter(name__contains=keyword_item) target_categories = Category.objects.filter(name__contains=keyword_item) where_func = Q(where_func | Q(title__contains=keyword_item) | Q(content__contains=keyword_item) | Q(tags__in=target_tags) | Q(category__in=target_categories)) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(where_func).distinct(), per_page) else: pg = Paginator(Post.objects.filter(Q(status='public') & where_func).distinct(), per_page) return __render_index(request, pg, page, keyword=keyword) else: raise Http404 def posts_by_year(request, year): if request.method == 'GET': if not year: url = reverse('blog:index') return redirect(url) per_page = 15 page = request.GET.get('page', 1) if request.user.is_authenticated(): pg = Paginator(Post.objects.filter(created_at__year=year).distinct(), per_page) else: pg = Paginator(Post.objects.filter(status='public', created_at__year=year).distinct(), per_page) return __render_index(request, pg, page) else: raise Http404 def __render_index(request, pg, page, **kwargs): try: contents = pg.page(page) except PageNotAnInteger: contents = pg.page(1) except EmptyPage: contents = [] ctx = { 'posts': contents, 'categories': __get_categories(request), 'archives': __get_archives(request), 'keyword': kwargs.get('keyword') } return render(request, 'index.html', ctx) def __get_categories(request): categories = Category.objects.all() for category in categories: if request.user.is_authenticated(): category.count = Post.objects.filter(category=category).count() else: category.count = Post.objects.filter(category=category, status='public').count() return categories def __get_archives(request): if request.user.is_authenticated(): all_posts = Post.objects.all() else: all_posts = Post.objects.filter(status='public') result = {} for item in all_posts: year = item.created_at.year count = result.get(year, 0) result[year] = count+1 return result
true
true
1c4a0cde0a499635d87048fe9f94b9177b5680fc
5,529
py
Python
cms/utils/i18n.py
360youlun/django-cms
bc1240fd46de4c04f3b5402be99a81728a4a324c
[ "BSD-3-Clause" ]
1
2019-04-15T10:28:46.000Z
2019-04-15T10:28:46.000Z
cms/utils/i18n.py
damianmoore/django-cms
2d3e10a01e792ec7da5c1418811c1be5ac84e5e2
[ "BSD-3-Clause" ]
5
2021-03-19T15:39:27.000Z
2021-09-08T02:47:21.000Z
cms/utils/i18n.py
Acidburn0zzz/django-cms
5a105a1c75eeb4c8a4c1c34301d93855e6724407
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from contextlib import contextmanager from django.core.urlresolvers import get_resolver, LocaleRegexURLResolver from django.conf import settings from django.utils import translation from django.utils.translation import ugettext_lazy as _ from cms.exceptions import LanguageError from cms.utils.conf import get_cms_setting, get_site_id @contextmanager def force_language(new_lang): old_lang = get_current_language() if old_lang != new_lang: translation.activate(new_lang) yield translation.activate(old_lang) def get_languages(site_id=None): site_id = get_site_id(site_id) result = get_cms_setting('LANGUAGES').get(site_id) if not result: result = [] defaults = get_cms_setting('LANGUAGES').get('default', {}) for code, name in settings.LANGUAGES: lang = {'code': code, 'name': _(name)} lang.update(defaults) result.append(lang) get_cms_setting('LANGUAGES')[site_id] = result return result def get_language_code(language_code): """ Returns language code while making sure it's in LANGUAGES """ if not language_code: return None languages = get_language_list() if language_code in languages: # direct hit return language_code for lang in languages: if language_code.split('-')[0] == lang: # base language hit return lang if lang.split('-')[0] == language_code: # base language hit return lang return language_code def get_current_language(): """ Returns the currently active language It's a replacement for Django's translation.get_language() to make sure the LANGUAGE_CODE will be found in LANGUAGES. Overcomes this issue: https://code.djangoproject.com/ticket/9340 """ language_code = translation.get_language() return get_language_code(language_code) def get_language_list(site_id=None): """ :return: returns a list of iso2codes for this site """ if not settings.USE_I18N: return [settings.LANGUAGE_CODE] languages = [] for language in get_languages(site_id): languages.append(language['code']) return languages def get_language_tuple(site_id=None): """ :return: returns an list of tuples like the old CMS_LANGUAGES or the LANGUAGES for this site """ languages = [] for language in get_languages(site_id): languages.append((language['code'], language['name'])) return languages def get_language_dict(site_id=None): """ :return: returns an dict of cms languages """ languages = {} for language in get_languages(site_id): languages[language['code']] = language['name'] return languages def get_public_languages(site_id=None): """ :return: list of iso2codes of public languages for this site """ languages = [] for language in get_language_objects(site_id): if language.get("public", True): languages.append(language['code']) return languages def get_language_object(language_code, site_id=None): """ :param language_code: RFC5646 language code :return: the language object filled up by defaults """ for language in get_languages(site_id): if language['code'] == get_language_code(language_code): return language raise LanguageError('Language not found: %s' % language_code) def get_language_objects(site_id=None): """ returns list of all language objects filled up by default values """ return list(get_languages(site_id)) def get_default_language(language_code=None, site_id=None): """ Returns default language depending on settings.LANGUAGE_CODE merged with best match from get_cms_setting('LANGUAGES') Returns: language_code """ if not language_code: language_code = get_language_code(settings.LANGUAGE_CODE) languages = get_language_list(site_id) # first try if there is an exact language if language_code in languages: return language_code # otherwise split the language code if possible, so iso3 language_code = language_code.split("-")[0] if not language_code in languages: return settings.LANGUAGE_CODE return language_code def get_fallback_languages(language, site_id=None): """ returns a list of fallback languages for the given language """ try: language = get_language_object(language, site_id) except LanguageError: language = get_languages(site_id)[0] return language.get('fallbacks', []) def get_redirect_on_fallback(language, site_id=None): """ returns if you should redirect on language fallback :param language: :param site_id: :return: Boolean """ language = get_language_object(language, site_id) return language.get('redirect_on_fallback', True) def hide_untranslated(language, site_id=None): """ Should untranslated pages in this language be hidden? :param language: :param site_id: :return: A Boolean """ obj = get_language_object(language, site_id) return obj.get('hide_untranslated', True) def is_language_prefix_patterns_used(): """ Returns `True` if the `LocaleRegexURLResolver` is used at root level of the urlpatterns, else it returns `False`. """ for url_pattern in get_resolver(None).url_patterns: if isinstance(url_pattern, LocaleRegexURLResolver): return True return False
28.647668
121
0.691988
from contextlib import contextmanager from django.core.urlresolvers import get_resolver, LocaleRegexURLResolver from django.conf import settings from django.utils import translation from django.utils.translation import ugettext_lazy as _ from cms.exceptions import LanguageError from cms.utils.conf import get_cms_setting, get_site_id @contextmanager def force_language(new_lang): old_lang = get_current_language() if old_lang != new_lang: translation.activate(new_lang) yield translation.activate(old_lang) def get_languages(site_id=None): site_id = get_site_id(site_id) result = get_cms_setting('LANGUAGES').get(site_id) if not result: result = [] defaults = get_cms_setting('LANGUAGES').get('default', {}) for code, name in settings.LANGUAGES: lang = {'code': code, 'name': _(name)} lang.update(defaults) result.append(lang) get_cms_setting('LANGUAGES')[site_id] = result return result def get_language_code(language_code): if not language_code: return None languages = get_language_list() if language_code in languages: return language_code for lang in languages: if language_code.split('-')[0] == lang: return lang if lang.split('-')[0] == language_code: return lang return language_code def get_current_language(): language_code = translation.get_language() return get_language_code(language_code) def get_language_list(site_id=None): if not settings.USE_I18N: return [settings.LANGUAGE_CODE] languages = [] for language in get_languages(site_id): languages.append(language['code']) return languages def get_language_tuple(site_id=None): languages = [] for language in get_languages(site_id): languages.append((language['code'], language['name'])) return languages def get_language_dict(site_id=None): languages = {} for language in get_languages(site_id): languages[language['code']] = language['name'] return languages def get_public_languages(site_id=None): languages = [] for language in get_language_objects(site_id): if language.get("public", True): languages.append(language['code']) return languages def get_language_object(language_code, site_id=None): for language in get_languages(site_id): if language['code'] == get_language_code(language_code): return language raise LanguageError('Language not found: %s' % language_code) def get_language_objects(site_id=None): return list(get_languages(site_id)) def get_default_language(language_code=None, site_id=None): if not language_code: language_code = get_language_code(settings.LANGUAGE_CODE) languages = get_language_list(site_id) if language_code in languages: return language_code language_code = language_code.split("-")[0] if not language_code in languages: return settings.LANGUAGE_CODE return language_code def get_fallback_languages(language, site_id=None): try: language = get_language_object(language, site_id) except LanguageError: language = get_languages(site_id)[0] return language.get('fallbacks', []) def get_redirect_on_fallback(language, site_id=None): language = get_language_object(language, site_id) return language.get('redirect_on_fallback', True) def hide_untranslated(language, site_id=None): obj = get_language_object(language, site_id) return obj.get('hide_untranslated', True) def is_language_prefix_patterns_used(): for url_pattern in get_resolver(None).url_patterns: if isinstance(url_pattern, LocaleRegexURLResolver): return True return False
true
true
1c4a0d26b1b1f83eea8c7a0005822c6d1ced6f53
36,374
py
Python
adafruit_minimqtt/adafruit_minimqtt.py
Eason010212/Adafruit_CircuitPython_MiniMQTT
eccc36f41c973c3155bd633716670e1925d51bae
[ "MIT", "Unlicense" ]
null
null
null
adafruit_minimqtt/adafruit_minimqtt.py
Eason010212/Adafruit_CircuitPython_MiniMQTT
eccc36f41c973c3155bd633716670e1925d51bae
[ "MIT", "Unlicense" ]
null
null
null
adafruit_minimqtt/adafruit_minimqtt.py
Eason010212/Adafruit_CircuitPython_MiniMQTT
eccc36f41c973c3155bd633716670e1925d51bae
[ "MIT", "Unlicense" ]
null
null
null
# SPDX-FileCopyrightText: 2019-2021 Brent Rubell for Adafruit Industries # # SPDX-License-Identifier: MIT # Original Work Copyright (c) 2016 Paul Sokolovsky, uMQTT # Modified Work Copyright (c) 2019 Bradley Beach, esp32spi_mqtt # Modified Work Copyright (c) 2012-2019 Roger Light and others, Paho MQTT Python """ `adafruit_minimqtt` ================================================================================ A minimal MQTT Library for CircuitPython. * Author(s): Brent Rubell Implementation Notes -------------------- Adapted from https://github.com/micropython/micropython-lib/tree/master/umqtt.simple/umqtt **Software and Dependencies:** * Adafruit CircuitPython firmware for the supported boards: https://github.com/adafruit/circuitpython/releases """ import errno import struct import time from random import randint from micropython import const from .matcher import MQTTMatcher __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MiniMQTT.git" # Client-specific variables MQTT_MSG_MAX_SZ = const(268435455) MQTT_MSG_SZ_LIM = const(10000000) MQTT_TOPIC_LENGTH_LIMIT = const(65535) MQTT_TCP_PORT = const(1883) MQTT_TLS_PORT = const(8883) # MQTT Commands MQTT_PINGREQ = b"\xc0\0" MQTT_PINGRESP = const(0xD0) MQTT_SUB = b"\x82" MQTT_UNSUB = b"\xA2" MQTT_DISCONNECT = b"\xe0\0" # Variable CONNECT header [MQTT 3.1.2] MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0") CONNACK_ERRORS = { const(0x01): "Connection Refused - Incorrect Protocol Version", const(0x02): "Connection Refused - ID Rejected", const(0x03): "Connection Refused - Server unavailable", const(0x04): "Connection Refused - Incorrect username/password", const(0x05): "Connection Refused - Unauthorized", } _default_sock = None # pylint: disable=invalid-name _fake_context = None # pylint: disable=invalid-name class MMQTTException(Exception): """MiniMQTT Exception class.""" # pylint: disable=unnecessary-pass # pass # Legacy ESP32SPI Socket API def set_socket(sock, iface=None): """Legacy API for setting the socket and network interface. :param sock: socket object. :param iface: internet interface object """ global _default_sock # pylint: disable=invalid-name, global-statement global _fake_context # pylint: disable=invalid-name, global-statement _default_sock = sock if iface: _default_sock.set_interface(iface) _fake_context = _FakeSSLContext(iface) class _FakeSSLSocket: def __init__(self, socket, tls_mode): self._socket = socket self._mode = tls_mode self.settimeout = socket.settimeout self.send = socket.send self.recv = socket.recv self.close = socket.close def connect(self, address): """connect wrapper to add non-standard mode parameter""" try: return self._socket.connect(address, self._mode) except RuntimeError as error: raise OSError(errno.ENOMEM) from error class _FakeSSLContext: def __init__(self, iface): self._iface = iface def wrap_socket(self, socket, server_hostname=None): """Return the same socket""" # pylint: disable=unused-argument return _FakeSSLSocket(socket, self._iface.TLS_MODE) class MQTT: """MQTT Client for CircuitPython. :param str broker: MQTT Broker URL or IP Address. :param int port: Optional port definition, defaults to 8883. :param str username: Username for broker authentication. :param str password: Password for broker authentication. :param network_manager: NetworkManager object, such as WiFiManager from ESPSPI_WiFiManager. :param str client_id: Optional client identifier, defaults to a unique, generated string. :param bool is_ssl: Sets a secure or insecure connection with the broker. :param int keep_alive: KeepAlive interval between the broker and the MiniMQTT client. :param socket socket_pool: A pool of socket resources available for the given radio. :param ssl_context: SSL context for long-lived SSL connections. """ # pylint: disable=too-many-arguments,too-many-instance-attributes, not-callable, invalid-name, no-member def __init__( self, broker, port=None, username=None, password=None, client_id=None, is_ssl=True, keep_alive=60, socket_pool=None, ssl_context=None, ): self._socket_pool = socket_pool self._ssl_context = ssl_context self._sock = None self._backwards_compatible_sock = False self.keep_alive = keep_alive self._user_data = None self._is_connected = False self._msg_size_lim = MQTT_MSG_SZ_LIM self._pid = 0 self._timestamp = 0 self.logger = None self.broker = broker self._username = username self._password = password if ( self._password and len(password.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT ): # [MQTT-3.1.3.5] raise MMQTTException("Password length is too large.") self.port = MQTT_TCP_PORT if is_ssl: self.port = MQTT_TLS_PORT if port: self.port = port # define client identifer if client_id: # user-defined client_id MAY allow client_id's > 23 bytes or # non-alpha-numeric characters self.client_id = client_id else: # assign a unique client_id self.client_id = "cpy{0}{1}".format( randint(0, int(time.monotonic() * 100) % 1000), randint(0, 99) ) # generated client_id's enforce spec.'s length rules if len(self.client_id) > 23 or not self.client_id: raise ValueError("MQTT Client ID must be between 1 and 23 bytes") # LWT self._lw_topic = None self._lw_qos = 0 self._lw_topic = None self._lw_msg = None self._lw_retain = False # List of subscribed topics, used for tracking self._subscribed_topics = [] self._on_message_filtered = MQTTMatcher() # Default topic callback methods self._on_message = None self.on_connect = None self.on_disconnect = None self.on_publish = None self.on_subscribe = None self.on_unsubscribe = None # pylint: disable=too-many-branches def _get_connect_socket(self, host, port, *, timeout=1): """Obtains a new socket and connects to a broker. :param str host: Desired broker hostname :param int port: Desired broker port :param int timeout: Desired socket timeout """ # For reconnections - check if we're using a socket already and close it if self._sock: self._sock.close() self._sock = None # Legacy API - use the interface's socket instead of a passed socket pool if self._socket_pool is None: self._socket_pool = _default_sock # Legacy API - fake the ssl context if self._ssl_context is None: self._ssl_context = _fake_context if not isinstance(port, int): raise RuntimeError("Port must be an integer") if port == 8883 and not self._ssl_context: raise RuntimeError( "ssl_context must be set before using adafruit_mqtt for secure MQTT." ) if self.logger and port == MQTT_TLS_PORT: self.logger.info( "Establishing a SECURE SSL connection to {0}:{1}".format(host, port) ) elif self.logger: self.logger.info( "Establishing an INSECURE connection to {0}:{1}".format(host, port) ) addr_info = self._socket_pool.getaddrinfo( host, port, 0, self._socket_pool.SOCK_STREAM )[0] sock = None retry_count = 0 while retry_count < 5 and sock is None: retry_count += 1 try: sock = self._socket_pool.socket( addr_info[0], addr_info[1], addr_info[2] ) except OSError: continue connect_host = addr_info[-1][0] if port == 8883: sock = self._ssl_context.wrap_socket(sock, server_hostname=host) connect_host = host sock.settimeout(timeout) try: sock.connect((connect_host, port)) except MemoryError: sock.close() sock = None except OSError: sock.close() sock = None if sock is None: raise RuntimeError("Repeated socket failures") self._backwards_compatible_sock = not hasattr(sock, "recv_into") return sock def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.deinit() def _sock_exact_recv(self, bufsize): """Reads _exact_ number of bytes from the connected socket. Will only return string with the exact number of bytes requested. The semantics of native socket receive is that it returns no more than the specified number of bytes (i.e. max size). However, it makes no guarantees in terms of the minimum size of the buffer, which could be 1 byte. This is a wrapper for socket recv() to ensure that no less than the expected number of bytes is returned or trigger a timeout exception. :param int bufsize: number of bytes to receive """ stamp = time.monotonic() rc = self._sock.recv(bufsize) to_read = bufsize - len(rc) assert to_read >= 0 read_timeout = self.keep_alive while to_read > 0: recv = self._sock.recv(to_read) to_read -= len(recv) rc += recv if time.monotonic() - stamp > read_timeout: raise MMQTTException( "Unable to receive {} bytes within {} seconds.".format( to_read, read_timeout ) ) return rc def deinit(self): """De-initializes the MQTT client and disconnects from the mqtt broker.""" self.disconnect() @property def mqtt_msg(self): """Returns maximum MQTT payload and topic size.""" return self._msg_size_lim, MQTT_TOPIC_LENGTH_LIMIT @mqtt_msg.setter def mqtt_msg(self, msg_size): """Sets the maximum MQTT message payload size. :param int msg_size: Maximum MQTT payload size. """ if msg_size < MQTT_MSG_MAX_SZ: self._msg_size_lim = msg_size def will_set(self, topic=None, payload=None, qos=0, retain=False): """Sets the last will and testament properties. MUST be called before `connect()`. :param str topic: MQTT Broker topic. :param int,float,str payload: Last will disconnection payload. payloads of type int & float are converted to a string. :param int qos: Quality of Service level, defaults to zero. Conventional options are ``0`` (send at most once), ``1`` (send at least once), or ``2`` (send exactly once). .. note:: Only options ``1`` or ``0`` are QoS levels supported by this library. :param bool retain: Specifies if the payload is to be retained when it is published. """ if self.logger: self.logger.debug("Setting last will properties") self._valid_qos(qos) if self._is_connected: raise MMQTTException("Last Will should only be called before connect().") if payload is None: payload = "" if isinstance(payload, (int, float, str)): payload = str(payload).encode() else: raise MMQTTException("Invalid message data type.") self._lw_qos = qos self._lw_topic = topic self._lw_msg = payload self._lw_retain = retain def add_topic_callback(self, mqtt_topic, callback_method): """Registers a callback_method for a specific MQTT topic. :param str mqtt_topic: MQTT topic identifier. :param str callback_method: Name of callback method. """ if mqtt_topic is None or callback_method is None: raise ValueError("MQTT topic and callback method must both be defined.") self._on_message_filtered[mqtt_topic] = callback_method def remove_topic_callback(self, mqtt_topic): """Removes a registered callback method. :param str mqtt_topic: MQTT topic identifier string. """ if mqtt_topic is None: raise ValueError("MQTT Topic must be defined.") try: del self._on_message_filtered[mqtt_topic] except KeyError: raise KeyError( "MQTT topic callback not added with add_topic_callback." ) from None @property def on_message(self): """Called when a new message has been received on a subscribed topic. Expected method signature is ``on_message(client, topic, message)`` """ return self._on_message @on_message.setter def on_message(self, method): self._on_message = method def _handle_on_message(self, client, topic, message): matched = False if topic is not None: for callback in self._on_message_filtered.iter_match(topic): callback(client, topic, message) # on_msg with callback matched = True if not matched and self.on_message: # regular on_message self.on_message(client, topic, message) def username_pw_set(self, username, password=None): """Set client's username and an optional password. :param str username: Username to use with your MQTT broker. :param str password: Password to use with your MQTT broker. """ if self._is_connected: raise MMQTTException("This method must be called before connect().") self._username = username if password is not None: self._password = password # pylint: disable=too-many-branches, too-many-statements, too-many-locals def connect(self, clean_session=True, host=None, port=None, keep_alive=None): """Initiates connection with the MQTT Broker. :param bool clean_session: Establishes a persistent session. :param str host: Hostname or IP address of the remote broker. :param int port: Network port of the remote broker. :param int keep_alive: Maximum period allowed for communication, in seconds. """ if host: self.broker = host if port: self.port = port if keep_alive: self.keep_alive = keep_alive if self.logger: self.logger.debug("Attempting to establish MQTT connection...") # Get a new socket self._sock = self._get_connect_socket(self.broker, self.port) # Fixed Header fixed_header = bytearray([0x10]) # NOTE: Variable header is # MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0") # because final 4 bytes are 4, 2, 0, 0 var_header = MQTT_HDR_CONNECT var_header[6] = clean_session << 1 # Set up variable header and remaining_length remaining_length = 12 + len(self.client_id) if self._username: remaining_length += 2 + len(self._username) + 2 + len(self._password) var_header[6] |= 0xC0 if self.keep_alive: assert self.keep_alive < MQTT_TOPIC_LENGTH_LIMIT var_header[7] |= self.keep_alive >> 8 var_header[8] |= self.keep_alive & 0x00FF if self._lw_topic: remaining_length += 2 + len(self._lw_topic) + 2 + len(self._lw_msg) var_header[6] |= 0x4 | (self._lw_qos & 0x1) << 3 | (self._lw_qos & 0x2) << 3 var_header[6] |= self._lw_retain << 5 # Remaining length calculation large_rel_length = False if remaining_length > 0x7F: large_rel_length = True # Calculate Remaining Length [2.2.3] while remaining_length > 0: encoded_byte = remaining_length % 0x80 remaining_length = remaining_length // 0x80 # if there is more data to encode, set the top bit of the byte if remaining_length > 0: encoded_byte |= 0x80 fixed_header.append(encoded_byte) if large_rel_length: fixed_header.append(0x00) else: fixed_header.append(remaining_length) fixed_header.append(0x00) if self.logger: self.logger.debug("Sending CONNECT to broker...") self.logger.debug( "Fixed Header: %s\nVariable Header: %s", fixed_header, var_header ) self._sock.send(fixed_header) self._sock.send(var_header) # [MQTT-3.1.3-4] self._send_str(self.client_id) if self._lw_topic: # [MQTT-3.1.3-11] self._send_str(self._lw_topic) self._send_str(self._lw_msg) if self._username is None: self._username = None else: self._send_str(self._username) self._send_str(self._password) if self.logger: self.logger.debug("Receiving CONNACK packet from broker") while True: op = self._wait_for_msg() if op == 32: rc = self._sock_exact_recv(3) assert rc[0] == 0x02 if rc[2] != 0x00: raise MMQTTException(CONNACK_ERRORS[rc[2]]) self._is_connected = True result = rc[0] & 1 if self.on_connect is not None: self.on_connect(self, self._user_data, result, rc[2]) return result def disconnect(self): """Disconnects the MiniMQTT client from the MQTT broker.""" self.is_connected() if self.logger is not None: self.logger.debug("Sending DISCONNECT packet to broker") try: self._sock.send(MQTT_DISCONNECT) except RuntimeError as e: if self.logger: self.logger.warning("Unable to send DISCONNECT packet: {}".format(e)) if self.logger is not None: self.logger.debug("Closing socket") self._sock.close() self._is_connected = False self._subscribed_topics = [] if self.on_disconnect is not None: self.on_disconnect(self, self._user_data, 0) def ping(self): """Pings the MQTT Broker to confirm if the broker is alive or if there is an active network connection. Returns response codes of any messages received while waiting for PINGRESP. """ self.is_connected() if self.logger: self.logger.debug("Sending PINGREQ") self._sock.send(MQTT_PINGREQ) ping_timeout = self.keep_alive stamp = time.monotonic() rc, rcs = None, [] while rc != MQTT_PINGRESP: rc = self._wait_for_msg() if rc: rcs.append(rc) if time.monotonic() - stamp > ping_timeout: raise MMQTTException("PINGRESP not returned from broker.") return rcs # pylint: disable=too-many-branches, too-many-statements def publish(self, topic, msg, retain=False, qos=0): """Publishes a message to a topic provided. :param str topic: Unique topic identifier. :param str,int,float,bytes msg: Data to send to the broker. :param bool retain: Whether the message is saved by the broker. :param int qos: Quality of Service level for the message, defaults to zero. """ self.is_connected() self._valid_topic(topic) if "+" in topic or "#" in topic: raise MMQTTException("Publish topic can not contain wildcards.") # check msg/qos kwargs if msg is None: raise MMQTTException("Message can not be None.") if isinstance(msg, (int, float)): msg = str(msg).encode("ascii") elif isinstance(msg, str): msg = str(msg).encode("utf-8") elif isinstance(msg, bytes): pass else: raise MMQTTException("Invalid message data type.") if len(msg) > MQTT_MSG_MAX_SZ: raise MMQTTException("Message size larger than %d bytes." % MQTT_MSG_MAX_SZ) assert ( 0 <= qos <= 1 ), "Quality of Service Level 2 is unsupported by this library." # fixed header. [3.3.1.2], [3.3.1.3] pub_hdr_fixed = bytearray([0x30 | retain | qos << 1]) # variable header = 2-byte Topic length (big endian) pub_hdr_var = bytearray(struct.pack(">H", len(topic))) pub_hdr_var.extend(topic.encode("utf-8")) # Topic name remaining_length = 2 + len(msg) + len(topic) if qos > 0: # packet identifier where QoS level is 1 or 2. [3.3.2.2] remaining_length += 2 self._pid = self._pid + 1 if self._pid < 0xFFFF else 1 pub_hdr_var.append(self._pid >> 8) pub_hdr_var.append(self._pid & 0xFF) # Calculate remaining length [2.2.3] if remaining_length > 0x7F: while remaining_length > 0: encoded_byte = remaining_length % 0x80 remaining_length = remaining_length // 0x80 if remaining_length > 0: encoded_byte |= 0x80 pub_hdr_fixed.append(encoded_byte) else: pub_hdr_fixed.append(remaining_length) if self.logger: self.logger.debug( "Sending PUBLISH\nTopic: %s\nMsg: %s\ \nQoS: %d\nRetain? %r", topic, msg, qos, retain, ) self._sock.send(pub_hdr_fixed) self._sock.send(pub_hdr_var) self._sock.send(msg) if qos == 0 and self.on_publish is not None: self.on_publish(self, self._user_data, topic, self._pid) if qos == 1: while True: op = self._wait_for_msg() if op == 0x40: sz = self._sock_exact_recv(1) assert sz == b"\x02" rcv_pid = self._sock_exact_recv(2) rcv_pid = rcv_pid[0] << 0x08 | rcv_pid[1] if self._pid == rcv_pid: if self.on_publish is not None: self.on_publish(self, self._user_data, topic, rcv_pid) return def subscribe(self, topic, qos=0): """Subscribes to a topic on the MQTT Broker. This method can subscribe to one topics or multiple topics. :param str,tuple,list topic: Unique MQTT topic identifier string. If this is a `tuple`, then the tuple should contain topic identifier string and qos level integer. If this is a `list`, then each list element should be a tuple containing a topic identifier string and qos level integer. :param int qos: Quality of Service level for the topic, defaults to zero. Conventional options are ``0`` (send at most once), ``1`` (send at least once), or ``2`` (send exactly once). """ self.is_connected() topics = None if isinstance(topic, tuple): topic, qos = topic self._valid_topic(topic) self._valid_qos(qos) if isinstance(topic, str): self._valid_topic(topic) self._valid_qos(qos) topics = [(topic, qos)] if isinstance(topic, list): topics = [] for t, q in topic: self._valid_qos(q) self._valid_topic(t) topics.append((t, q)) # Assemble packet packet_length = 2 + (2 * len(topics)) + (1 * len(topics)) packet_length += sum(len(topic) for topic, qos in topics) packet_length_byte = packet_length.to_bytes(1, "big") self._pid = self._pid + 1 if self._pid < 0xFFFF else 1 packet_id_bytes = self._pid.to_bytes(2, "big") # Packet with variable and fixed headers packet = MQTT_SUB + packet_length_byte + packet_id_bytes # attaching topic and QOS level to the packet for t, q in topics: topic_size = len(t).to_bytes(2, "big") qos_byte = q.to_bytes(1, "big") packet += topic_size + t.encode() + qos_byte if self.logger: for t, q in topics: self.logger.debug("SUBSCRIBING to topic %s with QoS %d", t, q) self._sock.send(packet) while True: op = self._wait_for_msg() if op == 0x90: rc = self._sock_exact_recv(4) assert rc[1] == packet[2] and rc[2] == packet[3] if rc[3] == 0x80: raise MMQTTException("SUBACK Failure!") for t, q in topics: if self.on_subscribe is not None: self.on_subscribe(self, self._user_data, t, q) self._subscribed_topics.append(t) return def unsubscribe(self, topic): """Unsubscribes from a MQTT topic. :param str,list topic: Unique MQTT topic identifier string or list. """ topics = None if isinstance(topic, str): self._valid_topic(topic) topics = [(topic)] if isinstance(topic, list): topics = [] for t in topic: self._valid_topic(t) topics.append((t)) for t in topics: if t not in self._subscribed_topics: raise MMQTTException( "Topic must be subscribed to before attempting unsubscribe." ) # Assemble packet packet_length = 2 + (2 * len(topics)) packet_length += sum(len(topic) for topic in topics) packet_length_byte = packet_length.to_bytes(1, "big") self._pid = self._pid + 1 if self._pid < 0xFFFF else 1 packet_id_bytes = self._pid.to_bytes(2, "big") packet = MQTT_UNSUB + packet_length_byte + packet_id_bytes for t in topics: topic_size = len(t).to_bytes(2, "big") packet += topic_size + t.encode() if self.logger: for t in topics: self.logger.debug("UNSUBSCRIBING from topic %s", t) self._sock.send(packet) if self.logger: self.logger.debug("Waiting for UNSUBACK...") while True: op = self._wait_for_msg() if op == 176: rc = self._sock_exact_recv(3) assert rc[0] == 0x02 # [MQTT-3.32] assert rc[1] == packet_id_bytes[0] and rc[2] == packet_id_bytes[1] for t in topics: if self.on_unsubscribe is not None: self.on_unsubscribe(self, self._user_data, t, self._pid) self._subscribed_topics.remove(t) return def reconnect(self, resub_topics=True): """Attempts to reconnect to the MQTT broker. :param bool resub_topics: Resubscribe to previously subscribed topics. """ if self.logger: self.logger.debug("Attempting to reconnect with MQTT broker") self.connect() if self.logger: self.logger.debug("Reconnected with broker") if resub_topics: if self.logger: self.logger.debug( "Attempting to resubscribe to previously subscribed topics." ) subscribed_topics = self._subscribed_topics.copy() self._subscribed_topics = [] while subscribed_topics: feed = subscribed_topics.pop() self.subscribe(feed) def loop(self, timeout=1): """Non-blocking message loop. Use this method to check incoming subscription messages. Returns response codes of any messages received. :param int timeout: Socket timeout, in seconds. """ if self._timestamp == 0: self._timestamp = time.monotonic() current_time = time.monotonic() if current_time - self._timestamp >= self.keep_alive: # Handle KeepAlive by expecting a PINGREQ/PINGRESP from the server if self.logger is not None: self.logger.debug( "KeepAlive period elapsed - requesting a PINGRESP from the server..." ) rcs = self.ping() self._timestamp = 0 return rcs self._sock.settimeout(timeout) rc = self._wait_for_msg() return [rc] if rc else None def _wait_for_msg(self, timeout=0.1): """Reads and processes network events.""" # CPython socket module contains a timeout attribute if hasattr(self._socket_pool, "timeout"): try: res = self._sock_exact_recv(1) except self._socket_pool.timeout as error: return None else: # socketpool, esp32spi try: res = self._sock_exact_recv(1) except OSError as error: if error.errno == errno.ETIMEDOUT: # raised by a socket timeout if 0 bytes were present return None raise MMQTTException from error # Block while we parse the rest of the response self._sock.settimeout(timeout) if res in [None, b""]: # If we get here, it means that there is nothing to be received return None if res[0] == MQTT_PINGRESP: if self.logger: self.logger.debug("Got PINGRESP") sz = self._sock_exact_recv(1)[0] if sz != 0x00: raise MMQTTException( "Unexpected PINGRESP returned from broker: {}.".format(sz) ) return MQTT_PINGRESP if res[0] & 0xF0 != 0x30: return res[0] sz = self._recv_len() # topic length MSB & LSB topic_len = self._sock_exact_recv(2) topic_len = (topic_len[0] << 8) | topic_len[1] topic = self._sock_exact_recv(topic_len) topic = str(topic, "utf-8") sz -= topic_len + 2 pid = 0 if res[0] & 0x06: pid = self._sock_exact_recv(2) pid = pid[0] << 0x08 | pid[1] sz -= 0x02 # read message contents msg = self._sock_exact_recv(sz) self._handle_on_message(self, topic, str(msg, "utf-8")) if res[0] & 0x06 == 0x02: pkt = bytearray(b"\x40\x02\0\0") struct.pack_into("!H", pkt, 2, pid) self._sock.send(pkt) elif res[0] & 6 == 4: assert 0 return res[0] def _recv_len(self): """Unpack MQTT message length.""" n = 0 sh = 0 b = bytearray(1) while True: b = self._sock_exact_recv(1)[0] n |= (b & 0x7F) << sh if not b & 0x80: return n sh += 7 def _recv_into(self, buf, size=0): """Backwards-compatible _recv_into implementation.""" if self._backwards_compatible_sock: size = len(buf) if size == 0 else size b = self._sock.recv(size) read_size = len(b) buf[:read_size] = b return read_size return self._sock.recv_into(buf, size) def _sock_exact_recv(self, bufsize): """Reads _exact_ number of bytes from the connected socket. Will only return string with the exact number of bytes requested. The semantics of native socket receive is that it returns no more than the specified number of bytes (i.e. max size). However, it makes no guarantees in terms of the minimum size of the buffer, which could be 1 byte. This is a wrapper for socket recv() to ensure that no less than the expected number of bytes is returned or trigger a timeout exception. :param int bufsize: number of bytes to receive """ if not self._backwards_compatible_sock: # CPython/Socketpool Impl. rc = bytearray(bufsize) self._sock.recv_into(rc, bufsize) else: # ESP32SPI Impl. stamp = time.monotonic() read_timeout = self.keep_alive # This will timeout with socket timeout (not keepalive timeout) rc = self._sock.recv(bufsize) if not rc: if self.logger: self.logger.debug("_sock_exact_recv timeout") # If no bytes waiting, raise same exception as socketpool raise OSError(errno.ETIMEDOUT) # If any bytes waiting, try to read them all, # or raise exception if wait longer than read_timeout to_read = bufsize - len(rc) assert to_read >= 0 read_timeout = self.keep_alive while to_read > 0: recv = self._sock.recv(to_read) to_read -= len(recv) rc += recv if time.monotonic() - stamp > read_timeout: raise MMQTTException( "Unable to receive {} bytes within {} seconds.".format( to_read, read_timeout ) ) return rc def _send_str(self, string): """Encodes a string and sends it to a socket. :param str string: String to write to the socket. """ self._sock.send(struct.pack("!H", len(string))) if isinstance(string, str): self._sock.send(str.encode(string, "utf-8")) else: self._sock.send(string) @staticmethod def _valid_topic(topic): """Validates if topic provided is proper MQTT topic format. :param str topic: Topic identifier """ if topic is None: raise MMQTTException("Topic may not be NoneType") # [MQTT-4.7.3-1] if not topic: raise MMQTTException("Topic may not be empty.") # [MQTT-4.7.3-3] if len(topic.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT: raise MMQTTException("Topic length is too large.") @staticmethod def _valid_qos(qos_level): """Validates if the QoS level is supported by this library :param int qos_level: Desired QoS level. """ if isinstance(qos_level, int): if qos_level < 0 or qos_level > 2: raise MMQTTException("QoS must be between 1 and 2.") else: raise MMQTTException("QoS must be an integer.") def is_connected(self): """Returns MQTT client session status as True if connected, raises a `MMQTTException` if `False`. """ if self._sock is None or self._is_connected is False: raise MMQTTException("MiniMQTT is not connected.") return self._is_connected # Logging def enable_logger(self, logger, log_level=20): """Enables library logging provided a logger object. :param logger: A python logger pacakge. :param log_level: Numeric value of a logging level, defaults to INFO. """ self.logger = logger.getLogger("log") self.logger.setLevel(log_level) def disable_logger(self): """Disables logging.""" if not self.logger: raise MMQTTException("Can not disable logger, no logger found.") self.logger = None
37.38335
108
0.583274
import errno import struct import time from random import randint from micropython import const from .matcher import MQTTMatcher __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MiniMQTT.git" MQTT_MSG_MAX_SZ = const(268435455) MQTT_MSG_SZ_LIM = const(10000000) MQTT_TOPIC_LENGTH_LIMIT = const(65535) MQTT_TCP_PORT = const(1883) MQTT_TLS_PORT = const(8883) MQTT_PINGREQ = b"\xc0\0" MQTT_PINGRESP = const(0xD0) MQTT_SUB = b"\x82" MQTT_UNSUB = b"\xA2" MQTT_DISCONNECT = b"\xe0\0" MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0") CONNACK_ERRORS = { const(0x01): "Connection Refused - Incorrect Protocol Version", const(0x02): "Connection Refused - ID Rejected", const(0x03): "Connection Refused - Server unavailable", const(0x04): "Connection Refused - Incorrect username/password", const(0x05): "Connection Refused - Unauthorized", } _default_sock = None _fake_context = None class MMQTTException(Exception): def set_socket(sock, iface=None): global _default_sock global _fake_context _default_sock = sock if iface: _default_sock.set_interface(iface) _fake_context = _FakeSSLContext(iface) class _FakeSSLSocket: def __init__(self, socket, tls_mode): self._socket = socket self._mode = tls_mode self.settimeout = socket.settimeout self.send = socket.send self.recv = socket.recv self.close = socket.close def connect(self, address): try: return self._socket.connect(address, self._mode) except RuntimeError as error: raise OSError(errno.ENOMEM) from error class _FakeSSLContext: def __init__(self, iface): self._iface = iface def wrap_socket(self, socket, server_hostname=None): return _FakeSSLSocket(socket, self._iface.TLS_MODE) class MQTT: def __init__( self, broker, port=None, username=None, password=None, client_id=None, is_ssl=True, keep_alive=60, socket_pool=None, ssl_context=None, ): self._socket_pool = socket_pool self._ssl_context = ssl_context self._sock = None self._backwards_compatible_sock = False self.keep_alive = keep_alive self._user_data = None self._is_connected = False self._msg_size_lim = MQTT_MSG_SZ_LIM self._pid = 0 self._timestamp = 0 self.logger = None self.broker = broker self._username = username self._password = password if ( self._password and len(password.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT ): raise MMQTTException("Password length is too large.") self.port = MQTT_TCP_PORT if is_ssl: self.port = MQTT_TLS_PORT if port: self.port = port if client_id: # non-alpha-numeric characters self.client_id = client_id else: # assign a unique client_id self.client_id = "cpy{0}{1}".format( randint(0, int(time.monotonic() * 100) % 1000), randint(0, 99) ) # generated client_id's enforce spec.'s length rules if len(self.client_id) > 23 or not self.client_id: raise ValueError("MQTT Client ID must be between 1 and 23 bytes") # LWT self._lw_topic = None self._lw_qos = 0 self._lw_topic = None self._lw_msg = None self._lw_retain = False # List of subscribed topics, used for tracking self._subscribed_topics = [] self._on_message_filtered = MQTTMatcher() # Default topic callback methods self._on_message = None self.on_connect = None self.on_disconnect = None self.on_publish = None self.on_subscribe = None self.on_unsubscribe = None # pylint: disable=too-many-branches def _get_connect_socket(self, host, port, *, timeout=1): # For reconnections - check if we're using a socket already and close it if self._sock: self._sock.close() self._sock = None if self._socket_pool is None: self._socket_pool = _default_sock # Legacy API - fake the ssl context if self._ssl_context is None: self._ssl_context = _fake_context if not isinstance(port, int): raise RuntimeError("Port must be an integer") if port == 8883 and not self._ssl_context: raise RuntimeError( "ssl_context must be set before using adafruit_mqtt for secure MQTT." ) if self.logger and port == MQTT_TLS_PORT: self.logger.info( "Establishing a SECURE SSL connection to {0}:{1}".format(host, port) ) elif self.logger: self.logger.info( "Establishing an INSECURE connection to {0}:{1}".format(host, port) ) addr_info = self._socket_pool.getaddrinfo( host, port, 0, self._socket_pool.SOCK_STREAM )[0] sock = None retry_count = 0 while retry_count < 5 and sock is None: retry_count += 1 try: sock = self._socket_pool.socket( addr_info[0], addr_info[1], addr_info[2] ) except OSError: continue connect_host = addr_info[-1][0] if port == 8883: sock = self._ssl_context.wrap_socket(sock, server_hostname=host) connect_host = host sock.settimeout(timeout) try: sock.connect((connect_host, port)) except MemoryError: sock.close() sock = None except OSError: sock.close() sock = None if sock is None: raise RuntimeError("Repeated socket failures") self._backwards_compatible_sock = not hasattr(sock, "recv_into") return sock def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.deinit() def _sock_exact_recv(self, bufsize): stamp = time.monotonic() rc = self._sock.recv(bufsize) to_read = bufsize - len(rc) assert to_read >= 0 read_timeout = self.keep_alive while to_read > 0: recv = self._sock.recv(to_read) to_read -= len(recv) rc += recv if time.monotonic() - stamp > read_timeout: raise MMQTTException( "Unable to receive {} bytes within {} seconds.".format( to_read, read_timeout ) ) return rc def deinit(self): self.disconnect() @property def mqtt_msg(self): return self._msg_size_lim, MQTT_TOPIC_LENGTH_LIMIT @mqtt_msg.setter def mqtt_msg(self, msg_size): if msg_size < MQTT_MSG_MAX_SZ: self._msg_size_lim = msg_size def will_set(self, topic=None, payload=None, qos=0, retain=False): if self.logger: self.logger.debug("Setting last will properties") self._valid_qos(qos) if self._is_connected: raise MMQTTException("Last Will should only be called before connect().") if payload is None: payload = "" if isinstance(payload, (int, float, str)): payload = str(payload).encode() else: raise MMQTTException("Invalid message data type.") self._lw_qos = qos self._lw_topic = topic self._lw_msg = payload self._lw_retain = retain def add_topic_callback(self, mqtt_topic, callback_method): if mqtt_topic is None or callback_method is None: raise ValueError("MQTT topic and callback method must both be defined.") self._on_message_filtered[mqtt_topic] = callback_method def remove_topic_callback(self, mqtt_topic): if mqtt_topic is None: raise ValueError("MQTT Topic must be defined.") try: del self._on_message_filtered[mqtt_topic] except KeyError: raise KeyError( "MQTT topic callback not added with add_topic_callback." ) from None @property def on_message(self): return self._on_message @on_message.setter def on_message(self, method): self._on_message = method def _handle_on_message(self, client, topic, message): matched = False if topic is not None: for callback in self._on_message_filtered.iter_match(topic): callback(client, topic, message) # on_msg with callback matched = True if not matched and self.on_message: # regular on_message self.on_message(client, topic, message) def username_pw_set(self, username, password=None): if self._is_connected: raise MMQTTException("This method must be called before connect().") self._username = username if password is not None: self._password = password # pylint: disable=too-many-branches, too-many-statements, too-many-locals def connect(self, clean_session=True, host=None, port=None, keep_alive=None): if host: self.broker = host if port: self.port = port if keep_alive: self.keep_alive = keep_alive if self.logger: self.logger.debug("Attempting to establish MQTT connection...") # Get a new socket self._sock = self._get_connect_socket(self.broker, self.port) # Fixed Header fixed_header = bytearray([0x10]) # NOTE: Variable header is # MQTT_HDR_CONNECT = bytearray(b"\x04MQTT\x04\x02\0\0") # because final 4 bytes are 4, 2, 0, 0 var_header = MQTT_HDR_CONNECT var_header[6] = clean_session << 1 # Set up variable header and remaining_length remaining_length = 12 + len(self.client_id) if self._username: remaining_length += 2 + len(self._username) + 2 + len(self._password) var_header[6] |= 0xC0 if self.keep_alive: assert self.keep_alive < MQTT_TOPIC_LENGTH_LIMIT var_header[7] |= self.keep_alive >> 8 var_header[8] |= self.keep_alive & 0x00FF if self._lw_topic: remaining_length += 2 + len(self._lw_topic) + 2 + len(self._lw_msg) var_header[6] |= 0x4 | (self._lw_qos & 0x1) << 3 | (self._lw_qos & 0x2) << 3 var_header[6] |= self._lw_retain << 5 # Remaining length calculation large_rel_length = False if remaining_length > 0x7F: large_rel_length = True # Calculate Remaining Length [2.2.3] while remaining_length > 0: encoded_byte = remaining_length % 0x80 remaining_length = remaining_length // 0x80 # if there is more data to encode, set the top bit of the byte if remaining_length > 0: encoded_byte |= 0x80 fixed_header.append(encoded_byte) if large_rel_length: fixed_header.append(0x00) else: fixed_header.append(remaining_length) fixed_header.append(0x00) if self.logger: self.logger.debug("Sending CONNECT to broker...") self.logger.debug( "Fixed Header: %s\nVariable Header: %s", fixed_header, var_header ) self._sock.send(fixed_header) self._sock.send(var_header) # [MQTT-3.1.3-4] self._send_str(self.client_id) if self._lw_topic: # [MQTT-3.1.3-11] self._send_str(self._lw_topic) self._send_str(self._lw_msg) if self._username is None: self._username = None else: self._send_str(self._username) self._send_str(self._password) if self.logger: self.logger.debug("Receiving CONNACK packet from broker") while True: op = self._wait_for_msg() if op == 32: rc = self._sock_exact_recv(3) assert rc[0] == 0x02 if rc[2] != 0x00: raise MMQTTException(CONNACK_ERRORS[rc[2]]) self._is_connected = True result = rc[0] & 1 if self.on_connect is not None: self.on_connect(self, self._user_data, result, rc[2]) return result def disconnect(self): self.is_connected() if self.logger is not None: self.logger.debug("Sending DISCONNECT packet to broker") try: self._sock.send(MQTT_DISCONNECT) except RuntimeError as e: if self.logger: self.logger.warning("Unable to send DISCONNECT packet: {}".format(e)) if self.logger is not None: self.logger.debug("Closing socket") self._sock.close() self._is_connected = False self._subscribed_topics = [] if self.on_disconnect is not None: self.on_disconnect(self, self._user_data, 0) def ping(self): self.is_connected() if self.logger: self.logger.debug("Sending PINGREQ") self._sock.send(MQTT_PINGREQ) ping_timeout = self.keep_alive stamp = time.monotonic() rc, rcs = None, [] while rc != MQTT_PINGRESP: rc = self._wait_for_msg() if rc: rcs.append(rc) if time.monotonic() - stamp > ping_timeout: raise MMQTTException("PINGRESP not returned from broker.") return rcs # pylint: disable=too-many-branches, too-many-statements def publish(self, topic, msg, retain=False, qos=0): self.is_connected() self._valid_topic(topic) if "+" in topic or "#" in topic: raise MMQTTException("Publish topic can not contain wildcards.") # check msg/qos kwargs if msg is None: raise MMQTTException("Message can not be None.") if isinstance(msg, (int, float)): msg = str(msg).encode("ascii") elif isinstance(msg, str): msg = str(msg).encode("utf-8") elif isinstance(msg, bytes): pass else: raise MMQTTException("Invalid message data type.") if len(msg) > MQTT_MSG_MAX_SZ: raise MMQTTException("Message size larger than %d bytes." % MQTT_MSG_MAX_SZ) assert ( 0 <= qos <= 1 ), "Quality of Service Level 2 is unsupported by this library." # fixed header. [3.3.1.2], [3.3.1.3] pub_hdr_fixed = bytearray([0x30 | retain | qos << 1]) # variable header = 2-byte Topic length (big endian) pub_hdr_var = bytearray(struct.pack(">H", len(topic))) pub_hdr_var.extend(topic.encode("utf-8")) # Topic name remaining_length = 2 + len(msg) + len(topic) if qos > 0: # packet identifier where QoS level is 1 or 2. [3.3.2.2] remaining_length += 2 self._pid = self._pid + 1 if self._pid < 0xFFFF else 1 pub_hdr_var.append(self._pid >> 8) pub_hdr_var.append(self._pid & 0xFF) # Calculate remaining length [2.2.3] if remaining_length > 0x7F: while remaining_length > 0: encoded_byte = remaining_length % 0x80 remaining_length = remaining_length // 0x80 if remaining_length > 0: encoded_byte |= 0x80 pub_hdr_fixed.append(encoded_byte) else: pub_hdr_fixed.append(remaining_length) if self.logger: self.logger.debug( "Sending PUBLISH\nTopic: %s\nMsg: %s\ \nQoS: %d\nRetain? %r", topic, msg, qos, retain, ) self._sock.send(pub_hdr_fixed) self._sock.send(pub_hdr_var) self._sock.send(msg) if qos == 0 and self.on_publish is not None: self.on_publish(self, self._user_data, topic, self._pid) if qos == 1: while True: op = self._wait_for_msg() if op == 0x40: sz = self._sock_exact_recv(1) assert sz == b"\x02" rcv_pid = self._sock_exact_recv(2) rcv_pid = rcv_pid[0] << 0x08 | rcv_pid[1] if self._pid == rcv_pid: if self.on_publish is not None: self.on_publish(self, self._user_data, topic, rcv_pid) return def subscribe(self, topic, qos=0): self.is_connected() topics = None if isinstance(topic, tuple): topic, qos = topic self._valid_topic(topic) self._valid_qos(qos) if isinstance(topic, str): self._valid_topic(topic) self._valid_qos(qos) topics = [(topic, qos)] if isinstance(topic, list): topics = [] for t, q in topic: self._valid_qos(q) self._valid_topic(t) topics.append((t, q)) # Assemble packet packet_length = 2 + (2 * len(topics)) + (1 * len(topics)) packet_length += sum(len(topic) for topic, qos in topics) packet_length_byte = packet_length.to_bytes(1, "big") self._pid = self._pid + 1 if self._pid < 0xFFFF else 1 packet_id_bytes = self._pid.to_bytes(2, "big") # Packet with variable and fixed headers packet = MQTT_SUB + packet_length_byte + packet_id_bytes # attaching topic and QOS level to the packet for t, q in topics: topic_size = len(t).to_bytes(2, "big") qos_byte = q.to_bytes(1, "big") packet += topic_size + t.encode() + qos_byte if self.logger: for t, q in topics: self.logger.debug("SUBSCRIBING to topic %s with QoS %d", t, q) self._sock.send(packet) while True: op = self._wait_for_msg() if op == 0x90: rc = self._sock_exact_recv(4) assert rc[1] == packet[2] and rc[2] == packet[3] if rc[3] == 0x80: raise MMQTTException("SUBACK Failure!") for t, q in topics: if self.on_subscribe is not None: self.on_subscribe(self, self._user_data, t, q) self._subscribed_topics.append(t) return def unsubscribe(self, topic): topics = None if isinstance(topic, str): self._valid_topic(topic) topics = [(topic)] if isinstance(topic, list): topics = [] for t in topic: self._valid_topic(t) topics.append((t)) for t in topics: if t not in self._subscribed_topics: raise MMQTTException( "Topic must be subscribed to before attempting unsubscribe." ) # Assemble packet packet_length = 2 + (2 * len(topics)) packet_length += sum(len(topic) for topic in topics) packet_length_byte = packet_length.to_bytes(1, "big") self._pid = self._pid + 1 if self._pid < 0xFFFF else 1 packet_id_bytes = self._pid.to_bytes(2, "big") packet = MQTT_UNSUB + packet_length_byte + packet_id_bytes for t in topics: topic_size = len(t).to_bytes(2, "big") packet += topic_size + t.encode() if self.logger: for t in topics: self.logger.debug("UNSUBSCRIBING from topic %s", t) self._sock.send(packet) if self.logger: self.logger.debug("Waiting for UNSUBACK...") while True: op = self._wait_for_msg() if op == 176: rc = self._sock_exact_recv(3) assert rc[0] == 0x02 # [MQTT-3.32] assert rc[1] == packet_id_bytes[0] and rc[2] == packet_id_bytes[1] for t in topics: if self.on_unsubscribe is not None: self.on_unsubscribe(self, self._user_data, t, self._pid) self._subscribed_topics.remove(t) return def reconnect(self, resub_topics=True): if self.logger: self.logger.debug("Attempting to reconnect with MQTT broker") self.connect() if self.logger: self.logger.debug("Reconnected with broker") if resub_topics: if self.logger: self.logger.debug( "Attempting to resubscribe to previously subscribed topics." ) subscribed_topics = self._subscribed_topics.copy() self._subscribed_topics = [] while subscribed_topics: feed = subscribed_topics.pop() self.subscribe(feed) def loop(self, timeout=1): if self._timestamp == 0: self._timestamp = time.monotonic() current_time = time.monotonic() if current_time - self._timestamp >= self.keep_alive: # Handle KeepAlive by expecting a PINGREQ/PINGRESP from the server if self.logger is not None: self.logger.debug( "KeepAlive period elapsed - requesting a PINGRESP from the server..." ) rcs = self.ping() self._timestamp = 0 return rcs self._sock.settimeout(timeout) rc = self._wait_for_msg() return [rc] if rc else None def _wait_for_msg(self, timeout=0.1): # CPython socket module contains a timeout attribute if hasattr(self._socket_pool, "timeout"): try: res = self._sock_exact_recv(1) except self._socket_pool.timeout as error: return None else: # socketpool, esp32spi try: res = self._sock_exact_recv(1) except OSError as error: if error.errno == errno.ETIMEDOUT: # raised by a socket timeout if 0 bytes were present return None raise MMQTTException from error # Block while we parse the rest of the response self._sock.settimeout(timeout) if res in [None, b""]: # If we get here, it means that there is nothing to be received return None if res[0] == MQTT_PINGRESP: if self.logger: self.logger.debug("Got PINGRESP") sz = self._sock_exact_recv(1)[0] if sz != 0x00: raise MMQTTException( "Unexpected PINGRESP returned from broker: {}.".format(sz) ) return MQTT_PINGRESP if res[0] & 0xF0 != 0x30: return res[0] sz = self._recv_len() # topic length MSB & LSB topic_len = self._sock_exact_recv(2) topic_len = (topic_len[0] << 8) | topic_len[1] topic = self._sock_exact_recv(topic_len) topic = str(topic, "utf-8") sz -= topic_len + 2 pid = 0 if res[0] & 0x06: pid = self._sock_exact_recv(2) pid = pid[0] << 0x08 | pid[1] sz -= 0x02 # read message contents msg = self._sock_exact_recv(sz) self._handle_on_message(self, topic, str(msg, "utf-8")) if res[0] & 0x06 == 0x02: pkt = bytearray(b"\x40\x02\0\0") struct.pack_into("!H", pkt, 2, pid) self._sock.send(pkt) elif res[0] & 6 == 4: assert 0 return res[0] def _recv_len(self): n = 0 sh = 0 b = bytearray(1) while True: b = self._sock_exact_recv(1)[0] n |= (b & 0x7F) << sh if not b & 0x80: return n sh += 7 def _recv_into(self, buf, size=0): if self._backwards_compatible_sock: size = len(buf) if size == 0 else size b = self._sock.recv(size) read_size = len(b) buf[:read_size] = b return read_size return self._sock.recv_into(buf, size) def _sock_exact_recv(self, bufsize): if not self._backwards_compatible_sock: # CPython/Socketpool Impl. rc = bytearray(bufsize) self._sock.recv_into(rc, bufsize) else: # ESP32SPI Impl. stamp = time.monotonic() read_timeout = self.keep_alive # This will timeout with socket timeout (not keepalive timeout) rc = self._sock.recv(bufsize) if not rc: if self.logger: self.logger.debug("_sock_exact_recv timeout") # If no bytes waiting, raise same exception as socketpool raise OSError(errno.ETIMEDOUT) # If any bytes waiting, try to read them all, # or raise exception if wait longer than read_timeout to_read = bufsize - len(rc) assert to_read >= 0 read_timeout = self.keep_alive while to_read > 0: recv = self._sock.recv(to_read) to_read -= len(recv) rc += recv if time.monotonic() - stamp > read_timeout: raise MMQTTException( "Unable to receive {} bytes within {} seconds.".format( to_read, read_timeout ) ) return rc def _send_str(self, string): self._sock.send(struct.pack("!H", len(string))) if isinstance(string, str): self._sock.send(str.encode(string, "utf-8")) else: self._sock.send(string) @staticmethod def _valid_topic(topic): if topic is None: raise MMQTTException("Topic may not be NoneType") # [MQTT-4.7.3-1] if not topic: raise MMQTTException("Topic may not be empty.") # [MQTT-4.7.3-3] if len(topic.encode("utf-8")) > MQTT_TOPIC_LENGTH_LIMIT: raise MMQTTException("Topic length is too large.") @staticmethod def _valid_qos(qos_level): if isinstance(qos_level, int): if qos_level < 0 or qos_level > 2: raise MMQTTException("QoS must be between 1 and 2.") else: raise MMQTTException("QoS must be an integer.") def is_connected(self): if self._sock is None or self._is_connected is False: raise MMQTTException("MiniMQTT is not connected.") return self._is_connected # Logging def enable_logger(self, logger, log_level=20): self.logger = logger.getLogger("log") self.logger.setLevel(log_level) def disable_logger(self): if not self.logger: raise MMQTTException("Can not disable logger, no logger found.") self.logger = None
true
true
1c4a0fbb4cfee7ec85a999391bf78a422dea764f
1,513
py
Python
get_image_from_video.py
matiji66/face-card-machine
bb466a4e06815869ff801ae50f044bf29e53f20d
[ "Apache-2.0" ]
39
2018-09-29T02:57:09.000Z
2021-04-12T13:45:21.000Z
get_image_from_video.py
a543713743/face-attendance-machine
bb466a4e06815869ff801ae50f044bf29e53f20d
[ "Apache-2.0" ]
2
2019-03-26T13:50:29.000Z
2021-03-23T09:49:43.000Z
get_image_from_video.py
a543713743/face-attendance-machine
bb466a4e06815869ff801ae50f044bf29e53f20d
[ "Apache-2.0" ]
20
2018-06-27T13:55:12.000Z
2022-03-26T15:08:01.000Z
import cv2 # This is a demo of running face recognition on a video file and saving the results to a new video file. # # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. # Open the input movie file # input_movie = cv2.VideoCapture("outpy1525941951.7225914.avi") # length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT)) # Create an output movie file (make sure resolution/frame rate matches input video!) # fourcc = cv2.VideoWriter_fourcc(*'XVID') # output_movie = cv2.VideoWriter('output.avi', fourcc, 29.97, (640, 360)) import os files = [path for path in os.listdir("./videos") if os.path.isfile(path) and path.endswith(".avi")] frame_number = 0 for avi in files: input_movie = cv2.VideoCapture(avi) length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT)) while True: # Grab a single frame of video ret, frame = input_movie.read() frame_number += 1 # Quit when the input video file ends if not ret: break # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_frame = frame[:, :, ::-1] cv2.imwrite("images/image_{}.jpg".format(frame_number), frame) # All done! input_movie.release()
38.794872
110
0.702578
import cv2 # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. import os files = [path for path in os.listdir("./videos") if os.path.isfile(path) and path.endswith(".avi")] frame_number = 0 for avi in files: input_movie = cv2.VideoCapture(avi) length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT)) while True: ret, frame = input_movie.read() frame_number += 1 if not ret: break rgb_frame = frame[:, :, ::-1] cv2.imwrite("images/image_{}.jpg".format(frame_number), frame) input_movie.release()
true
true
1c4a0fd0c26a592c12df47d7e444bcfc3897cded
37,709
py
Python
sympy/functions/combinatorial/factorials.py
jainachal03/sympy
7dbc2f49370b31ac6960524ea7e5444e2e5a50d5
[ "BSD-3-Clause" ]
1
2018-11-20T11:40:30.000Z
2018-11-20T11:40:30.000Z
sympy/functions/combinatorial/factorials.py
jainachal03/sympy
7dbc2f49370b31ac6960524ea7e5444e2e5a50d5
[ "BSD-3-Clause" ]
14
2018-02-08T10:11:03.000Z
2019-04-16T10:32:46.000Z
sympy/functions/combinatorial/factorials.py
jainachal03/sympy
7dbc2f49370b31ac6960524ea7e5444e2e5a50d5
[ "BSD-3-Clause" ]
1
2020-09-09T20:41:34.000Z
2020-09-09T20:41:34.000Z
from typing import List from functools import reduce from sympy.core import S, sympify, Dummy, Mod from sympy.core.cache import cacheit from sympy.core.function import Function, ArgumentIndexError, PoleError from sympy.core.logic import fuzzy_and from sympy.core.numbers import Integer, pi, I from sympy.core.relational import Eq from sympy.external.gmpy import HAS_GMPY, gmpy from sympy.ntheory import sieve from sympy.polys.polytools import Poly from math import sqrt as _sqrt class CombinatorialFunction(Function): """Base class for combinatorial functions. """ def _eval_simplify(self, **kwargs): from sympy.simplify.combsimp import combsimp # combinatorial function with non-integer arguments is # automatically passed to gammasimp expr = combsimp(self) measure = kwargs['measure'] if measure(expr) <= kwargs['ratio']*measure(self): return expr return self ############################################################################### ######################## FACTORIAL and MULTI-FACTORIAL ######################## ############################################################################### class factorial(CombinatorialFunction): r"""Implementation of factorial function over nonnegative integers. By convention (consistent with the gamma function and the binomial coefficients), factorial of a negative integer is complex infinity. The factorial is very important in combinatorics where it gives the number of ways in which `n` objects can be permuted. It also arises in calculus, probability, number theory, etc. There is strict relation of factorial with gamma function. In fact `n! = gamma(n+1)` for nonnegative integers. Rewrite of this kind is very useful in case of combinatorial simplification. Computation of the factorial is done using two algorithms. For small arguments a precomputed look up table is used. However for bigger input algorithm Prime-Swing is used. It is the fastest algorithm known and computes `n!` via prime factorization of special class of numbers, called here the 'Swing Numbers'. Examples ======== >>> from sympy import Symbol, factorial, S >>> n = Symbol('n', integer=True) >>> factorial(0) 1 >>> factorial(7) 5040 >>> factorial(-2) zoo >>> factorial(n) factorial(n) >>> factorial(2*n) factorial(2*n) >>> factorial(S(1)/2) factorial(1/2) See Also ======== factorial2, RisingFactorial, FallingFactorial """ def fdiff(self, argindex=1): from sympy.functions.special.gamma_functions import (gamma, polygamma) if argindex == 1: return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1) else: raise ArgumentIndexError(self, argindex) _small_swing = [ 1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395, 12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075, 35102025, 5014575, 145422675, 9694845, 300540195, 300540195 ] _small_factorials = [] # type: List[int] @classmethod def _swing(cls, n): if n < 33: return cls._small_swing[n] else: N, primes = int(_sqrt(n)), [] for prime in sieve.primerange(3, N + 1): p, q = 1, n while True: q //= prime if q > 0: if q & 1 == 1: p *= prime else: break if p > 1: primes.append(p) for prime in sieve.primerange(N + 1, n//3 + 1): if (n // prime) & 1 == 1: primes.append(prime) L_product = R_product = 1 for prime in sieve.primerange(n//2 + 1, n + 1): L_product *= prime for prime in primes: R_product *= prime return L_product*R_product @classmethod def _recursive(cls, n): if n < 2: return 1 else: return (cls._recursive(n//2)**2)*cls._swing(n) @classmethod def eval(cls, n): n = sympify(n) if n.is_Number: if n.is_zero: return S.One elif n is S.Infinity: return S.Infinity elif n.is_Integer: if n.is_negative: return S.ComplexInfinity else: n = n.p if n < 20: if not cls._small_factorials: result = 1 for i in range(1, 20): result *= i cls._small_factorials.append(result) result = cls._small_factorials[n-1] # GMPY factorial is faster, use it when available elif HAS_GMPY: result = gmpy.fac(n) else: bits = bin(n).count('1') result = cls._recursive(n)*2**(n - bits) return Integer(result) def _facmod(self, n, q): res, N = 1, int(_sqrt(n)) # Exponent of prime p in n! is e_p(n) = [n/p] + [n/p**2] + ... # for p > sqrt(n), e_p(n) < sqrt(n), the primes with [n/p] = m, # occur consecutively and are grouped together in pw[m] for # simultaneous exponentiation at a later stage pw = [1]*N m = 2 # to initialize the if condition below for prime in sieve.primerange(2, n + 1): if m > 1: m, y = 0, n // prime while y: m += y y //= prime if m < N: pw[m] = pw[m]*prime % q else: res = res*pow(prime, m, q) % q for ex, bs in enumerate(pw): if ex == 0 or bs == 1: continue if bs == 0: return 0 res = res*pow(bs, ex, q) % q return res def _eval_Mod(self, q): n = self.args[0] if n.is_integer and n.is_nonnegative and q.is_integer: aq = abs(q) d = aq - n if d.is_nonpositive: return S.Zero else: isprime = aq.is_prime if d == 1: # Apply Wilson's theorem (if a natural number n > 1 # is a prime number, then (n-1)! = -1 mod n) and # its inverse (if n > 4 is a composite number, then # (n-1)! = 0 mod n) if isprime: return -1 % q elif isprime is False and (aq - 6).is_nonnegative: return S.Zero elif n.is_Integer and q.is_Integer: n, d, aq = map(int, (n, d, aq)) if isprime and (d - 1 < n): fc = self._facmod(d - 1, aq) fc = pow(fc, aq - 2, aq) if d%2: fc = -fc else: fc = self._facmod(n, aq) return fc % q def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs): from sympy.functions.special.gamma_functions import gamma return gamma(n + 1) def _eval_rewrite_as_Product(self, n, **kwargs): from sympy.concrete.products import Product if n.is_nonnegative and n.is_integer: i = Dummy('i', integer=True) return Product(i, (i, 1, n)) def _eval_is_integer(self): if self.args[0].is_integer and self.args[0].is_nonnegative: return True def _eval_is_positive(self): if self.args[0].is_integer and self.args[0].is_nonnegative: return True def _eval_is_even(self): x = self.args[0] if x.is_integer and x.is_nonnegative: return (x - 2).is_nonnegative def _eval_is_composite(self): x = self.args[0] if x.is_integer and x.is_nonnegative: return (x - 3).is_nonnegative def _eval_is_real(self): x = self.args[0] if x.is_nonnegative or x.is_noninteger: return True def _eval_as_leading_term(self, x, logx=None, cdir=0): arg = self.args[0].as_leading_term(x) arg0 = arg.subs(x, 0) if arg0.is_zero: return S.One elif not arg0.is_infinite: return self.func(arg) raise PoleError("Cannot expand %s around 0" % (self)) class MultiFactorial(CombinatorialFunction): pass class subfactorial(CombinatorialFunction): r"""The subfactorial counts the derangements of n items and is defined for non-negative integers as: .. math:: !n = \begin{cases} 1 & n = 0 \\ 0 & n = 1 \\ (n-1)(!(n-1) + !(n-2)) & n > 1 \end{cases} It can also be written as ``int(round(n!/exp(1)))`` but the recursive definition with caching is implemented for this function. An interesting analytic expression is the following [2]_ .. math:: !x = \Gamma(x + 1, -1)/e which is valid for non-negative integers `x`. The above formula is not very useful incase of non-integers. :math:`\Gamma(x + 1, -1)` is single-valued only for integral arguments `x`, elsewhere on the positive real axis it has an infinite number of branches none of which are real. References ========== .. [1] https://en.wikipedia.org/wiki/Subfactorial .. [2] http://mathworld.wolfram.com/Subfactorial.html Examples ======== >>> from sympy import subfactorial >>> from sympy.abc import n >>> subfactorial(n + 1) subfactorial(n + 1) >>> subfactorial(5) 44 See Also ======== sympy.functions.combinatorial.factorials.factorial, sympy.utilities.iterables.generate_derangements, sympy.functions.special.gamma_functions.uppergamma """ @classmethod @cacheit def _eval(self, n): if not n: return S.One elif n == 1: return S.Zero else: z1, z2 = 1, 0 for i in range(2, n + 1): z1, z2 = z2, (i - 1)*(z2 + z1) return z2 @classmethod def eval(cls, arg): if arg.is_Number: if arg.is_Integer and arg.is_nonnegative: return cls._eval(arg) elif arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Infinity def _eval_is_even(self): if self.args[0].is_odd and self.args[0].is_nonnegative: return True def _eval_is_integer(self): if self.args[0].is_integer and self.args[0].is_nonnegative: return True def _eval_rewrite_as_factorial(self, arg, **kwargs): from sympy.concrete.summations import summation i = Dummy('i') f = S.NegativeOne**i / factorial(i) return factorial(arg) * summation(f, (i, 0, arg)) def _eval_rewrite_as_gamma(self, arg, piecewise=True, **kwargs): from sympy.functions.elementary.exponential import exp from sympy.functions.special.gamma_functions import (gamma, lowergamma) return (S.NegativeOne**(arg + 1)*exp(-I*pi*arg)*lowergamma(arg + 1, -1) + gamma(arg + 1))*exp(-1) def _eval_rewrite_as_uppergamma(self, arg, **kwargs): from sympy.functions.special.gamma_functions import uppergamma return uppergamma(arg + 1, -1)/S.Exp1 def _eval_is_nonnegative(self): if self.args[0].is_integer and self.args[0].is_nonnegative: return True def _eval_is_odd(self): if self.args[0].is_even and self.args[0].is_nonnegative: return True class factorial2(CombinatorialFunction): r"""The double factorial `n!!`, not to be confused with `(n!)!` The double factorial is defined for nonnegative integers and for odd negative integers as: .. math:: n!! = \begin{cases} 1 & n = 0 \\ n(n-2)(n-4) \cdots 1 & n\ \text{positive odd} \\ n(n-2)(n-4) \cdots 2 & n\ \text{positive even} \\ (n+2)!!/(n+2) & n\ \text{negative odd} \end{cases} References ========== .. [1] https://en.wikipedia.org/wiki/Double_factorial Examples ======== >>> from sympy import factorial2, var >>> n = var('n') >>> n n >>> factorial2(n + 1) factorial2(n + 1) >>> factorial2(5) 15 >>> factorial2(-1) 1 >>> factorial2(-5) 1/3 See Also ======== factorial, RisingFactorial, FallingFactorial """ @classmethod def eval(cls, arg): # TODO: extend this to complex numbers? if arg.is_Number: if not arg.is_Integer: raise ValueError("argument must be nonnegative integer " "or negative odd integer") # This implementation is faster than the recursive one # It also avoids "maximum recursion depth exceeded" runtime error if arg.is_nonnegative: if arg.is_even: k = arg / 2 return 2**k * factorial(k) return factorial(arg) / factorial2(arg - 1) if arg.is_odd: return arg*(S.NegativeOne)**((1 - arg)/2) / factorial2(-arg) raise ValueError("argument must be nonnegative integer " "or negative odd integer") def _eval_is_even(self): # Double factorial is even for every positive even input n = self.args[0] if n.is_integer: if n.is_odd: return False if n.is_even: if n.is_positive: return True if n.is_zero: return False def _eval_is_integer(self): # Double factorial is an integer for every nonnegative input, and for # -1 and -3 n = self.args[0] if n.is_integer: if (n + 1).is_nonnegative: return True if n.is_odd: return (n + 3).is_nonnegative def _eval_is_odd(self): # Double factorial is odd for every odd input not smaller than -3, and # for 0 n = self.args[0] if n.is_odd: return (n + 3).is_nonnegative if n.is_even: if n.is_positive: return False if n.is_zero: return True def _eval_is_positive(self): # Double factorial is positive for every nonnegative input, and for # every odd negative input which is of the form -1-4k for an # nonnegative integer k n = self.args[0] if n.is_integer: if (n + 1).is_nonnegative: return True if n.is_odd: return ((n + 1) / 2).is_even def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs): from sympy.functions.elementary.miscellaneous import sqrt from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.special.gamma_functions import gamma return 2**(n/2)*gamma(n/2 + 1) * Piecewise((1, Eq(Mod(n, 2), 0)), (sqrt(2/pi), Eq(Mod(n, 2), 1))) ############################################################################### ######################## RISING and FALLING FACTORIALS ######################## ############################################################################### class RisingFactorial(CombinatorialFunction): r""" Rising factorial (also called Pochhammer symbol) is a double valued function arising in concrete mathematics, hypergeometric functions and series expansions. It is defined by: .. math:: rf(x,k) = x \cdot (x+1) \cdots (x+k-1) where `x` can be arbitrary expression and `k` is an integer. For more information check "Concrete mathematics" by Graham, pp. 66 or visit http://mathworld.wolfram.com/RisingFactorial.html page. When `x` is a Poly instance of degree >= 1 with a single variable, `rf(x,k) = x(y) \cdot x(y+1) \cdots x(y+k-1)`, where `y` is the variable of `x`. This is as described in Peter Paule, "Greatest Factorial Factorization and Symbolic Summation", Journal of Symbolic Computation, vol. 20, pp. 235-268, 1995. Examples ======== >>> from sympy import rf, Poly >>> from sympy.abc import x >>> rf(x, 0) 1 >>> rf(1, 5) 120 >>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x) True >>> rf(Poly(x**3, x), 2) Poly(x**6 + 3*x**5 + 3*x**4 + x**3, x, domain='ZZ') Rewriting is complicated unless the relationship between the arguments is known, but rising factorial can be rewritten in terms of gamma, factorial and binomial and falling factorial. >>> from sympy import Symbol, factorial, ff, binomial, gamma >>> n = Symbol('n', integer=True, positive=True) >>> R = rf(n, n + 2) >>> for i in (rf, ff, factorial, binomial, gamma): ... R.rewrite(i) ... RisingFactorial(n, n + 2) FallingFactorial(2*n + 1, n + 2) factorial(2*n + 1)/factorial(n - 1) binomial(2*n + 1, n + 2)*factorial(n + 2) gamma(2*n + 2)/gamma(n) See Also ======== factorial, factorial2, FallingFactorial References ========== .. [1] https://en.wikipedia.org/wiki/Pochhammer_symbol """ @classmethod def eval(cls, x, k): x = sympify(x) k = sympify(k) if x is S.NaN or k is S.NaN: return S.NaN elif x is S.One: return factorial(k) elif k.is_Integer: if k.is_zero: return S.One else: if k.is_positive: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: if k.is_odd: return S.NegativeInfinity else: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("rf only defined for " "polynomials on one generator") else: return reduce(lambda r, i: r*(x.shift(i)), range(0, int(k)), 1) else: return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1) else: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("rf only defined for " "polynomials on one generator") else: return 1/reduce(lambda r, i: r*(x.shift(-i)), range(1, abs(int(k)) + 1), 1) else: return 1/reduce(lambda r, i: r*(x - i), range(1, abs(int(k)) + 1), 1) if k.is_integer == False: if x.is_integer and x.is_negative: return S.Zero def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs): from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.special.gamma_functions import gamma if not piecewise: if (x <= 0) == True: return S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1) return gamma(x + k) / gamma(x) return Piecewise( (gamma(x + k) / gamma(x), x > 0), (S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1), True)) def _eval_rewrite_as_FallingFactorial(self, x, k, **kwargs): return FallingFactorial(x + k - 1, k) def _eval_rewrite_as_factorial(self, x, k, **kwargs): from sympy.functions.elementary.piecewise import Piecewise if x.is_integer and k.is_integer: return Piecewise( (factorial(k + x - 1)/factorial(x - 1), x > 0), (S.NegativeOne**k*factorial(-x)/factorial(-k - x), True)) def _eval_rewrite_as_binomial(self, x, k, **kwargs): if k.is_integer: return factorial(k) * binomial(x + k - 1, k) def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs): from sympy.functions.special.gamma_functions import gamma if limitvar: k_lim = k.subs(limitvar, S.Infinity) if k_lim is S.Infinity: return (gamma(x + k).rewrite('tractable', deep=True) / gamma(x)) elif k_lim is S.NegativeInfinity: return (S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1).rewrite('tractable', deep=True)) return self.rewrite(gamma).rewrite('tractable', deep=True) def _eval_is_integer(self): return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer, self.args[1].is_nonnegative)) class FallingFactorial(CombinatorialFunction): r""" Falling factorial (related to rising factorial) is a double valued function arising in concrete mathematics, hypergeometric functions and series expansions. It is defined by .. math:: ff(x,k) = x \cdot (x-1) \cdots (x-k+1) where `x` can be arbitrary expression and `k` is an integer. For more information check "Concrete mathematics" by Graham, pp. 66 or visit http://mathworld.wolfram.com/FallingFactorial.html page. When `x` is a Poly instance of degree >= 1 with single variable, `ff(x,k) = x(y) \cdot x(y-1) \cdots x(y-k+1)`, where `y` is the variable of `x`. This is as described in Peter Paule, "Greatest Factorial Factorization and Symbolic Summation", Journal of Symbolic Computation, vol. 20, pp. 235-268, 1995. >>> from sympy import ff, Poly, Symbol >>> from sympy.abc import x >>> n = Symbol('n', integer=True) >>> ff(x, 0) 1 >>> ff(5, 5) 120 >>> ff(x, 5) == x*(x - 1)*(x - 2)*(x - 3)*(x - 4) True >>> ff(Poly(x**2, x), 2) Poly(x**4 - 2*x**3 + x**2, x, domain='ZZ') >>> ff(n, n) factorial(n) Rewriting is complicated unless the relationship between the arguments is known, but falling factorial can be rewritten in terms of gamma, factorial and binomial and rising factorial. >>> from sympy import factorial, rf, gamma, binomial, Symbol >>> n = Symbol('n', integer=True, positive=True) >>> F = ff(n, n - 2) >>> for i in (rf, ff, factorial, binomial, gamma): ... F.rewrite(i) ... RisingFactorial(3, n - 2) FallingFactorial(n, n - 2) factorial(n)/2 binomial(n, n - 2)*factorial(n - 2) gamma(n + 1)/2 See Also ======== factorial, factorial2, RisingFactorial References ========== .. [1] http://mathworld.wolfram.com/FallingFactorial.html """ @classmethod def eval(cls, x, k): x = sympify(x) k = sympify(k) if x is S.NaN or k is S.NaN: return S.NaN elif k.is_integer and x == k: return factorial(x) elif k.is_Integer: if k.is_zero: return S.One else: if k.is_positive: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: if k.is_odd: return S.NegativeInfinity else: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("ff only defined for " "polynomials on one generator") else: return reduce(lambda r, i: r*(x.shift(-i)), range(0, int(k)), 1) else: return reduce(lambda r, i: r*(x - i), range(0, int(k)), 1) else: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("rf only defined for " "polynomials on one generator") else: return 1/reduce(lambda r, i: r*(x.shift(i)), range(1, abs(int(k)) + 1), 1) else: return 1/reduce(lambda r, i: r*(x + i), range(1, abs(int(k)) + 1), 1) def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs): from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.special.gamma_functions import gamma if not piecewise: if (x < 0) == True: return S.NegativeOne**k*gamma(k - x) / gamma(-x) return gamma(x + 1) / gamma(x - k + 1) return Piecewise( (gamma(x + 1) / gamma(x - k + 1), x >= 0), (S.NegativeOne**k*gamma(k - x) / gamma(-x), True)) def _eval_rewrite_as_RisingFactorial(self, x, k, **kwargs): return rf(x - k + 1, k) def _eval_rewrite_as_binomial(self, x, k, **kwargs): if k.is_integer: return factorial(k) * binomial(x, k) def _eval_rewrite_as_factorial(self, x, k, **kwargs): from sympy.functions.elementary.piecewise import Piecewise if x.is_integer and k.is_integer: return Piecewise( (factorial(x)/factorial(-k + x), x >= 0), (S.NegativeOne**k*factorial(k - x - 1)/factorial(-x - 1), True)) def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs): from sympy.functions.special.gamma_functions import gamma if limitvar: k_lim = k.subs(limitvar, S.Infinity) if k_lim is S.Infinity: return (S.NegativeOne**k*gamma(k - x).rewrite('tractable', deep=True) / gamma(-x)) elif k_lim is S.NegativeInfinity: return (gamma(x + 1) / gamma(x - k + 1).rewrite('tractable', deep=True)) return self.rewrite(gamma).rewrite('tractable', deep=True) def _eval_is_integer(self): return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer, self.args[1].is_nonnegative)) rf = RisingFactorial ff = FallingFactorial ############################################################################### ########################### BINOMIAL COEFFICIENTS ############################# ############################################################################### class binomial(CombinatorialFunction): r"""Implementation of the binomial coefficient. It can be defined in two ways depending on its desired interpretation: .. math:: \binom{n}{k} = \frac{n!}{k!(n-k)!}\ \text{or}\ \binom{n}{k} = \frac{ff(n, k)}{k!} First, in a strict combinatorial sense it defines the number of ways we can choose `k` elements from a set of `n` elements. In this case both arguments are nonnegative integers and binomial is computed using an efficient algorithm based on prime factorization. The other definition is generalization for arbitrary `n`, however `k` must also be nonnegative. This case is very useful when evaluating summations. For the sake of convenience for negative integer `k` this function will return zero no matter what valued is the other argument. To expand the binomial when `n` is a symbol, use either ``expand_func()`` or ``expand(func=True)``. The former will keep the polynomial in factored form while the latter will expand the polynomial itself. See examples for details. Examples ======== >>> from sympy import Symbol, Rational, binomial, expand_func >>> n = Symbol('n', integer=True, positive=True) >>> binomial(15, 8) 6435 >>> binomial(n, -1) 0 Rows of Pascal's triangle can be generated with the binomial function: >>> for N in range(8): ... print([binomial(N, i) for i in range(N + 1)]) ... [1] [1, 1] [1, 2, 1] [1, 3, 3, 1] [1, 4, 6, 4, 1] [1, 5, 10, 10, 5, 1] [1, 6, 15, 20, 15, 6, 1] [1, 7, 21, 35, 35, 21, 7, 1] As can a given diagonal, e.g. the 4th diagonal: >>> N = -4 >>> [binomial(N, i) for i in range(1 - N)] [1, -4, 10, -20, 35] >>> binomial(Rational(5, 4), 3) -5/128 >>> binomial(Rational(-5, 4), 3) -195/128 >>> binomial(n, 3) binomial(n, 3) >>> binomial(n, 3).expand(func=True) n**3/6 - n**2/2 + n/3 >>> expand_func(binomial(n, 3)) n*(n - 2)*(n - 1)/6 References ========== .. [1] https://www.johndcook.com/blog/binomial_coefficients/ """ def fdiff(self, argindex=1): from sympy.functions.special.gamma_functions import polygamma if argindex == 1: # http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/ n, k = self.args return binomial(n, k)*(polygamma(0, n + 1) - \ polygamma(0, n - k + 1)) elif argindex == 2: # http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/ n, k = self.args return binomial(n, k)*(polygamma(0, n - k + 1) - \ polygamma(0, k + 1)) else: raise ArgumentIndexError(self, argindex) @classmethod def _eval(self, n, k): # n.is_Number and k.is_Integer and k != 1 and n != k if k.is_Integer: if n.is_Integer and n >= 0: n, k = int(n), int(k) if k > n: return S.Zero elif k > n // 2: k = n - k if HAS_GMPY: return Integer(gmpy.bincoef(n, k)) d, result = n - k, 1 for i in range(1, k + 1): d += 1 result = result * d // i return Integer(result) else: d, result = n - k, 1 for i in range(1, k + 1): d += 1 result *= d result /= i return result @classmethod def eval(cls, n, k): n, k = map(sympify, (n, k)) d = n - k n_nonneg, n_isint = n.is_nonnegative, n.is_integer if k.is_zero or ((n_nonneg or n_isint is False) and d.is_zero): return S.One if (k - 1).is_zero or ((n_nonneg or n_isint is False) and (d - 1).is_zero): return n if k.is_integer: if k.is_negative or (n_nonneg and n_isint and d.is_negative): return S.Zero elif n.is_number: res = cls._eval(n, k) return res.expand(basic=True) if res else res elif n_nonneg is False and n_isint: # a special case when binomial evaluates to complex infinity return S.ComplexInfinity elif k.is_number: from sympy.functions.special.gamma_functions import gamma return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1)) def _eval_Mod(self, q): n, k = self.args if any(x.is_integer is False for x in (n, k, q)): raise ValueError("Integers expected for binomial Mod") if all(x.is_Integer for x in (n, k, q)): n, k = map(int, (n, k)) aq, res = abs(q), 1 # handle negative integers k or n if k < 0: return S.Zero if n < 0: n = -n + k - 1 res = -1 if k%2 else 1 # non negative integers k and n if k > n: return S.Zero isprime = aq.is_prime aq = int(aq) if isprime: if aq < n: # use Lucas Theorem N, K = n, k while N or K: res = res*binomial(N % aq, K % aq) % aq N, K = N // aq, K // aq else: # use Factorial Modulo d = n - k if k > d: k, d = d, k kf = 1 for i in range(2, k + 1): kf = kf*i % aq df = kf for i in range(k + 1, d + 1): df = df*i % aq res *= df for i in range(d + 1, n + 1): res = res*i % aq res *= pow(kf*df % aq, aq - 2, aq) res %= aq else: # Binomial Factorization is performed by calculating the # exponents of primes <= n in `n! /(k! (n - k)!)`, # for non-negative integers n and k. As the exponent of # prime in n! is e_p(n) = [n/p] + [n/p**2] + ... # the exponent of prime in binomial(n, k) would be # e_p(n) - e_p(k) - e_p(n - k) M = int(_sqrt(n)) for prime in sieve.primerange(2, n + 1): if prime > n - k: res = res*prime % aq elif prime > n // 2: continue elif prime > M: if n % prime < k % prime: res = res*prime % aq else: N, K = n, k exp = a = 0 while N > 0: a = int((N % prime) < (K % prime + a)) N, K = N // prime, K // prime exp += a if exp > 0: res *= pow(prime, exp, aq) res %= aq return S(res % q) def _eval_expand_func(self, **hints): """ Function to expand binomial(n, k) when m is positive integer Also, n is self.args[0] and k is self.args[1] while using binomial(n, k) """ n = self.args[0] if n.is_Number: return binomial(*self.args) k = self.args[1] if (n-k).is_Integer: k = n - k if k.is_Integer: if k.is_zero: return S.One elif k.is_negative: return S.Zero else: n, result = self.args[0], 1 for i in range(1, k + 1): result *= n - k + i result /= i return result else: return binomial(*self.args) def _eval_rewrite_as_factorial(self, n, k, **kwargs): return factorial(n)/(factorial(k)*factorial(n - k)) def _eval_rewrite_as_gamma(self, n, k, piecewise=True, **kwargs): from sympy.functions.special.gamma_functions import gamma return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1)) def _eval_rewrite_as_tractable(self, n, k, limitvar=None, **kwargs): return self._eval_rewrite_as_gamma(n, k).rewrite('tractable') def _eval_rewrite_as_FallingFactorial(self, n, k, **kwargs): if k.is_integer: return ff(n, k) / factorial(k) def _eval_is_integer(self): n, k = self.args if n.is_integer and k.is_integer: return True elif k.is_integer is False: return False def _eval_is_nonnegative(self): n, k = self.args if n.is_integer and k.is_integer: if n.is_nonnegative or k.is_negative or k.is_even: return True elif k.is_even is False: return False def _eval_as_leading_term(self, x, logx=None, cdir=0): from sympy.functions.special.gamma_functions import gamma return self.rewrite(gamma)._eval_as_leading_term(x, logx=logx, cdir=cdir)
33.972072
106
0.497971
from typing import List from functools import reduce from sympy.core import S, sympify, Dummy, Mod from sympy.core.cache import cacheit from sympy.core.function import Function, ArgumentIndexError, PoleError from sympy.core.logic import fuzzy_and from sympy.core.numbers import Integer, pi, I from sympy.core.relational import Eq from sympy.external.gmpy import HAS_GMPY, gmpy from sympy.ntheory import sieve from sympy.polys.polytools import Poly from math import sqrt as _sqrt class CombinatorialFunction(Function): def _eval_simplify(self, **kwargs): from sympy.simplify.combsimp import combsimp expr = combsimp(self) measure = kwargs['measure'] if measure(expr) <= kwargs['ratio']*measure(self): return expr return self raise ValueError("argument must be nonnegative integer " "or negative odd integer") # This implementation is faster than the recursive one # It also avoids "maximum recursion depth exceeded" runtime error if arg.is_nonnegative: if arg.is_even: k = arg / 2 return 2**k * factorial(k) return factorial(arg) / factorial2(arg - 1) if arg.is_odd: return arg*(S.NegativeOne)**((1 - arg)/2) / factorial2(-arg) raise ValueError("argument must be nonnegative integer " "or negative odd integer") def _eval_is_even(self): # Double factorial is even for every positive even input n = self.args[0] if n.is_integer: if n.is_odd: return False if n.is_even: if n.is_positive: return True if n.is_zero: return False def _eval_is_integer(self): # Double factorial is an integer for every nonnegative input, and for # -1 and -3 n = self.args[0] if n.is_integer: if (n + 1).is_nonnegative: return True if n.is_odd: return (n + 3).is_nonnegative def _eval_is_odd(self): # Double factorial is odd for every odd input not smaller than -3, and # for 0 n = self.args[0] if n.is_odd: return (n + 3).is_nonnegative if n.is_even: if n.is_positive: return False if n.is_zero: return True def _eval_is_positive(self): # Double factorial is positive for every nonnegative input, and for # every odd negative input which is of the form -1-4k for an # nonnegative integer k n = self.args[0] if n.is_integer: if (n + 1).is_nonnegative: return True if n.is_odd: return ((n + 1) / 2).is_even def _eval_rewrite_as_gamma(self, n, piecewise=True, **kwargs): from sympy.functions.elementary.miscellaneous import sqrt from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.special.gamma_functions import gamma return 2**(n/2)*gamma(n/2 + 1) * Piecewise((1, Eq(Mod(n, 2), 0)), (sqrt(2/pi), Eq(Mod(n, 2), 1))) ############################################################################### ######################## RISING and FALLING FACTORIALS ######################## ############################################################################### class RisingFactorial(CombinatorialFunction): @classmethod def eval(cls, x, k): x = sympify(x) k = sympify(k) if x is S.NaN or k is S.NaN: return S.NaN elif x is S.One: return factorial(k) elif k.is_Integer: if k.is_zero: return S.One else: if k.is_positive: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: if k.is_odd: return S.NegativeInfinity else: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("rf only defined for " "polynomials on one generator") else: return reduce(lambda r, i: r*(x.shift(i)), range(0, int(k)), 1) else: return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1) else: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("rf only defined for " "polynomials on one generator") else: return 1/reduce(lambda r, i: r*(x.shift(-i)), range(1, abs(int(k)) + 1), 1) else: return 1/reduce(lambda r, i: r*(x - i), range(1, abs(int(k)) + 1), 1) if k.is_integer == False: if x.is_integer and x.is_negative: return S.Zero def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs): from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.special.gamma_functions import gamma if not piecewise: if (x <= 0) == True: return S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1) return gamma(x + k) / gamma(x) return Piecewise( (gamma(x + k) / gamma(x), x > 0), (S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1), True)) def _eval_rewrite_as_FallingFactorial(self, x, k, **kwargs): return FallingFactorial(x + k - 1, k) def _eval_rewrite_as_factorial(self, x, k, **kwargs): from sympy.functions.elementary.piecewise import Piecewise if x.is_integer and k.is_integer: return Piecewise( (factorial(k + x - 1)/factorial(x - 1), x > 0), (S.NegativeOne**k*factorial(-x)/factorial(-k - x), True)) def _eval_rewrite_as_binomial(self, x, k, **kwargs): if k.is_integer: return factorial(k) * binomial(x + k - 1, k) def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs): from sympy.functions.special.gamma_functions import gamma if limitvar: k_lim = k.subs(limitvar, S.Infinity) if k_lim is S.Infinity: return (gamma(x + k).rewrite('tractable', deep=True) / gamma(x)) elif k_lim is S.NegativeInfinity: return (S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1).rewrite('tractable', deep=True)) return self.rewrite(gamma).rewrite('tractable', deep=True) def _eval_is_integer(self): return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer, self.args[1].is_nonnegative)) class FallingFactorial(CombinatorialFunction): @classmethod def eval(cls, x, k): x = sympify(x) k = sympify(k) if x is S.NaN or k is S.NaN: return S.NaN elif k.is_integer and x == k: return factorial(x) elif k.is_Integer: if k.is_zero: return S.One else: if k.is_positive: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: if k.is_odd: return S.NegativeInfinity else: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("ff only defined for " "polynomials on one generator") else: return reduce(lambda r, i: r*(x.shift(-i)), range(0, int(k)), 1) else: return reduce(lambda r, i: r*(x - i), range(0, int(k)), 1) else: if x is S.Infinity: return S.Infinity elif x is S.NegativeInfinity: return S.Infinity else: if isinstance(x, Poly): gens = x.gens if len(gens)!= 1: raise ValueError("rf only defined for " "polynomials on one generator") else: return 1/reduce(lambda r, i: r*(x.shift(i)), range(1, abs(int(k)) + 1), 1) else: return 1/reduce(lambda r, i: r*(x + i), range(1, abs(int(k)) + 1), 1) def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs): from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.special.gamma_functions import gamma if not piecewise: if (x < 0) == True: return S.NegativeOne**k*gamma(k - x) / gamma(-x) return gamma(x + 1) / gamma(x - k + 1) return Piecewise( (gamma(x + 1) / gamma(x - k + 1), x >= 0), (S.NegativeOne**k*gamma(k - x) / gamma(-x), True)) def _eval_rewrite_as_RisingFactorial(self, x, k, **kwargs): return rf(x - k + 1, k) def _eval_rewrite_as_binomial(self, x, k, **kwargs): if k.is_integer: return factorial(k) * binomial(x, k) def _eval_rewrite_as_factorial(self, x, k, **kwargs): from sympy.functions.elementary.piecewise import Piecewise if x.is_integer and k.is_integer: return Piecewise( (factorial(x)/factorial(-k + x), x >= 0), (S.NegativeOne**k*factorial(k - x - 1)/factorial(-x - 1), True)) def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs): from sympy.functions.special.gamma_functions import gamma if limitvar: k_lim = k.subs(limitvar, S.Infinity) if k_lim is S.Infinity: return (S.NegativeOne**k*gamma(k - x).rewrite('tractable', deep=True) / gamma(-x)) elif k_lim is S.NegativeInfinity: return (gamma(x + 1) / gamma(x - k + 1).rewrite('tractable', deep=True)) return self.rewrite(gamma).rewrite('tractable', deep=True) def _eval_is_integer(self): return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer, self.args[1].is_nonnegative)) rf = RisingFactorial ff = FallingFactorial ############################################################################### ########################### BINOMIAL COEFFICIENTS ############################# ############################################################################### class binomial(CombinatorialFunction): def fdiff(self, argindex=1): from sympy.functions.special.gamma_functions import polygamma if argindex == 1: # http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/ n, k = self.args return binomial(n, k)*(polygamma(0, n + 1) - \ polygamma(0, n - k + 1)) elif argindex == 2: # http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/ n, k = self.args return binomial(n, k)*(polygamma(0, n - k + 1) - \ polygamma(0, k + 1)) else: raise ArgumentIndexError(self, argindex) @classmethod def _eval(self, n, k): # n.is_Number and k.is_Integer and k != 1 and n != k if k.is_Integer: if n.is_Integer and n >= 0: n, k = int(n), int(k) if k > n: return S.Zero elif k > n // 2: k = n - k if HAS_GMPY: return Integer(gmpy.bincoef(n, k)) d, result = n - k, 1 for i in range(1, k + 1): d += 1 result = result * d // i return Integer(result) else: d, result = n - k, 1 for i in range(1, k + 1): d += 1 result *= d result /= i return result @classmethod def eval(cls, n, k): n, k = map(sympify, (n, k)) d = n - k n_nonneg, n_isint = n.is_nonnegative, n.is_integer if k.is_zero or ((n_nonneg or n_isint is False) and d.is_zero): return S.One if (k - 1).is_zero or ((n_nonneg or n_isint is False) and (d - 1).is_zero): return n if k.is_integer: if k.is_negative or (n_nonneg and n_isint and d.is_negative): return S.Zero elif n.is_number: res = cls._eval(n, k) return res.expand(basic=True) if res else res elif n_nonneg is False and n_isint: # a special case when binomial evaluates to complex infinity return S.ComplexInfinity elif k.is_number: from sympy.functions.special.gamma_functions import gamma return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1)) def _eval_Mod(self, q): n, k = self.args if any(x.is_integer is False for x in (n, k, q)): raise ValueError("Integers expected for binomial Mod") if all(x.is_Integer for x in (n, k, q)): n, k = map(int, (n, k)) aq, res = abs(q), 1 # handle negative integers k or n if k < 0: return S.Zero if n < 0: n = -n + k - 1 res = -1 if k%2 else 1 # non negative integers k and n if k > n: return S.Zero isprime = aq.is_prime aq = int(aq) if isprime: if aq < n: # use Lucas Theorem N, K = n, k while N or K: res = res*binomial(N % aq, K % aq) % aq N, K = N // aq, K // aq else: # use Factorial Modulo d = n - k if k > d: k, d = d, k kf = 1 for i in range(2, k + 1): kf = kf*i % aq df = kf for i in range(k + 1, d + 1): df = df*i % aq res *= df for i in range(d + 1, n + 1): res = res*i % aq res *= pow(kf*df % aq, aq - 2, aq) res %= aq else: # Binomial Factorization is performed by calculating the # exponents of primes <= n in `n! /(k! (n - k)!)`, # for non-negative integers n and k. As the exponent of # prime in n! is e_p(n) = [n/p] + [n/p**2] + ... # the exponent of prime in binomial(n, k) would be # e_p(n) - e_p(k) - e_p(n - k) M = int(_sqrt(n)) for prime in sieve.primerange(2, n + 1): if prime > n - k: res = res*prime % aq elif prime > n // 2: continue elif prime > M: if n % prime < k % prime: res = res*prime % aq else: N, K = n, k exp = a = 0 while N > 0: a = int((N % prime) < (K % prime + a)) N, K = N // prime, K // prime exp += a if exp > 0: res *= pow(prime, exp, aq) res %= aq return S(res % q) def _eval_expand_func(self, **hints): n = self.args[0] if n.is_Number: return binomial(*self.args) k = self.args[1] if (n-k).is_Integer: k = n - k if k.is_Integer: if k.is_zero: return S.One elif k.is_negative: return S.Zero else: n, result = self.args[0], 1 for i in range(1, k + 1): result *= n - k + i result /= i return result else: return binomial(*self.args) def _eval_rewrite_as_factorial(self, n, k, **kwargs): return factorial(n)/(factorial(k)*factorial(n - k)) def _eval_rewrite_as_gamma(self, n, k, piecewise=True, **kwargs): from sympy.functions.special.gamma_functions import gamma return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1)) def _eval_rewrite_as_tractable(self, n, k, limitvar=None, **kwargs): return self._eval_rewrite_as_gamma(n, k).rewrite('tractable') def _eval_rewrite_as_FallingFactorial(self, n, k, **kwargs): if k.is_integer: return ff(n, k) / factorial(k) def _eval_is_integer(self): n, k = self.args if n.is_integer and k.is_integer: return True elif k.is_integer is False: return False def _eval_is_nonnegative(self): n, k = self.args if n.is_integer and k.is_integer: if n.is_nonnegative or k.is_negative or k.is_even: return True elif k.is_even is False: return False def _eval_as_leading_term(self, x, logx=None, cdir=0): from sympy.functions.special.gamma_functions import gamma return self.rewrite(gamma)._eval_as_leading_term(x, logx=logx, cdir=cdir)
true
true
1c4a104cf5b6c4c66a2d343c800f0bc19b3712c1
1,120
py
Python
src/infi/pyutils/misc.py
jasonjorge/infi.asi
78a4c34a421102f99b959a659cf7303804627d9b
[ "BSD-3-Clause" ]
1
2022-02-12T20:30:55.000Z
2022-02-12T20:30:55.000Z
src/infi/pyutils/misc.py
jasonjorge/infi.asi
78a4c34a421102f99b959a659cf7303804627d9b
[ "BSD-3-Clause" ]
5
2015-11-08T14:50:42.000Z
2020-06-23T14:42:33.000Z
src/infi/pyutils/misc.py
jasonjorge/infi.asi
78a4c34a421102f99b959a659cf7303804627d9b
[ "BSD-3-Clause" ]
4
2015-02-22T09:06:59.000Z
2022-02-12T20:30:55.000Z
_NOTHING = object() def recursive_getattr(obj, attr, default=_NOTHING): for subattr in attr.split("."): obj = getattr(obj, subattr, _NOTHING) if obj is _NOTHING: if default is not _NOTHING: return default raise AttributeError(attr) return obj class Reprify(object): def __init__(self, original, str=None, repr=None): super(Reprify, self).__init__() self._strify__original = original if repr is None: repr = str if str is None: str = repr self._strify__str = str self._strify__repr = repr def __getattribute__(self, attr): if attr.startswith('_strify__'): return super(Reprify, self).__getattribute__(attr) return getattr(self._strify__original, attr) def __repr__(self): if self._strify__repr is not None: return self._strify__repr return repr(self._strify__original) def __str__(self): if self._strify__str is not None: return self._strify__str return str(self._strify__originalx)
32.941176
62
0.621429
_NOTHING = object() def recursive_getattr(obj, attr, default=_NOTHING): for subattr in attr.split("."): obj = getattr(obj, subattr, _NOTHING) if obj is _NOTHING: if default is not _NOTHING: return default raise AttributeError(attr) return obj class Reprify(object): def __init__(self, original, str=None, repr=None): super(Reprify, self).__init__() self._strify__original = original if repr is None: repr = str if str is None: str = repr self._strify__str = str self._strify__repr = repr def __getattribute__(self, attr): if attr.startswith('_strify__'): return super(Reprify, self).__getattribute__(attr) return getattr(self._strify__original, attr) def __repr__(self): if self._strify__repr is not None: return self._strify__repr return repr(self._strify__original) def __str__(self): if self._strify__str is not None: return self._strify__str return str(self._strify__originalx)
true
true
1c4a10aa8c31765be89abb6d09a8c8e18a6945b0
5,744
py
Python
qiskit/test/decorators.py
EnriqueL8/qiskit-terra
08b801f1f8598c4e44680b4a75c232ed92db0262
[ "Apache-2.0" ]
2
2019-06-28T19:58:42.000Z
2019-07-26T05:04:02.000Z
qiskit/test/decorators.py
EnriqueL8/qiskit-terra
08b801f1f8598c4e44680b4a75c232ed92db0262
[ "Apache-2.0" ]
null
null
null
qiskit/test/decorators.py
EnriqueL8/qiskit-terra
08b801f1f8598c4e44680b4a75c232ed92db0262
[ "Apache-2.0" ]
1
2020-01-24T21:01:06.000Z
2020-01-24T21:01:06.000Z
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Decorator for using with Qiskit unit tests.""" import functools import os import sys import unittest from warnings import warn from qiskit.util import _has_connection from .testing_options import get_test_options HAS_NET_CONNECTION = None def is_aer_provider_available(): """Check if the C++ simulator can be instantiated. Returns: bool: True if simulator executable is available """ # TODO: HACK FROM THE DEPTHS OF DESPAIR AS AER DOES NOT WORK ON MAC if sys.platform == 'darwin': return False try: import qiskit.providers.aer # pylint: disable=unused-import except ImportError: return False return True def requires_aer_provider(test_item): """Decorator that skips test if qiskit aer provider is not available Args: test_item (callable): function or class to be decorated. Returns: callable: the decorated function. """ reason = 'Aer provider not found, skipping test' return unittest.skipIf(not is_aer_provider_available(), reason)(test_item) def slow_test(func): """Decorator that signals that the test takes minutes to run. Args: func (callable): test function to be decorated. Returns: callable: the decorated function. """ @functools.wraps(func) def _wrapper(*args, **kwargs): skip_slow = not TEST_OPTIONS['run_slow'] if skip_slow: raise unittest.SkipTest('Skipping slow tests') return func(*args, **kwargs) return _wrapper def _get_credentials(): """Finds the credentials for a specific test and options. Returns: Credentials: set of credentials Raises: SkipTest: when credentials can't be found """ try: from qiskit.providers.ibmq.credentials import (Credentials, discover_credentials) except ImportError: raise unittest.SkipTest('qiskit-ibmq-provider could not be found, ' 'and is required for executing online tests.') if os.getenv('IBMQ_TOKEN') and os.getenv('IBMQ_URL'): return Credentials(os.getenv('IBMQ_TOKEN'), os.getenv('IBMQ_URL')) elif os.getenv('QISKIT_TESTS_USE_CREDENTIALS_FILE'): # Attempt to read the standard credentials. discovered_credentials = discover_credentials() if discovered_credentials: # Decide which credentials to use for testing. if len(discovered_credentials) > 1: raise unittest.SkipTest( "More than 1 credential set found, use: " "IBMQ_TOKEN and IBMQ_URL env variables to " "set credentials explicitly") # Use the first available credentials. return list(discovered_credentials.values())[0] raise unittest.SkipTest( 'No IBMQ credentials found for running the test. This is required for ' 'running online tests.') def requires_qe_access(func): """Deprecated in favor of `online_test`""" warn("`requires_qe_access` is going to be replaced in favor of `online_test`", DeprecationWarning) @functools.wraps(func) def _wrapper(self, *args, **kwargs): if TEST_OPTIONS['skip_online']: raise unittest.SkipTest('Skipping online tests') credentials = _get_credentials() self.using_ibmq_credentials = credentials.is_ibmq() kwargs.update({'qe_token': credentials.token, 'qe_url': credentials.url}) return func(self, *args, **kwargs) return _wrapper def online_test(func): """Decorator that signals that the test uses the network (and the online API): It involves: * determines if the test should be skipped by checking environment variables. * if the `USE_ALTERNATE_ENV_CREDENTIALS` environment variable is set, it reads the credentials from an alternative set of environment variables. * if the test is not skipped, it reads `qe_token` and `qe_url` from `Qconfig.py`, environment variables or qiskitrc. * if the test is not skipped, it appends `qe_token` and `qe_url` as arguments to the test function. Args: func (callable): test function to be decorated. Returns: callable: the decorated function. """ @functools.wraps(func) def _wrapper(self, *args, **kwargs): # To avoid checking the connection in each test global HAS_NET_CONNECTION # pylint: disable=global-statement if TEST_OPTIONS['skip_online']: raise unittest.SkipTest('Skipping online tests') if HAS_NET_CONNECTION is None: HAS_NET_CONNECTION = _has_connection('qiskit.org', 443) if not HAS_NET_CONNECTION: raise unittest.SkipTest("Test requires internet connection.") credentials = _get_credentials() self.using_ibmq_credentials = credentials.is_ibmq() kwargs.update({'qe_token': credentials.token, 'qe_url': credentials.url}) return func(self, *args, **kwargs) return _wrapper TEST_OPTIONS = get_test_options()
31.56044
82
0.657556
import functools import os import sys import unittest from warnings import warn from qiskit.util import _has_connection from .testing_options import get_test_options HAS_NET_CONNECTION = None def is_aer_provider_available(): if sys.platform == 'darwin': return False try: import qiskit.providers.aer except ImportError: return False return True def requires_aer_provider(test_item): reason = 'Aer provider not found, skipping test' return unittest.skipIf(not is_aer_provider_available(), reason)(test_item) def slow_test(func): @functools.wraps(func) def _wrapper(*args, **kwargs): skip_slow = not TEST_OPTIONS['run_slow'] if skip_slow: raise unittest.SkipTest('Skipping slow tests') return func(*args, **kwargs) return _wrapper def _get_credentials(): try: from qiskit.providers.ibmq.credentials import (Credentials, discover_credentials) except ImportError: raise unittest.SkipTest('qiskit-ibmq-provider could not be found, ' 'and is required for executing online tests.') if os.getenv('IBMQ_TOKEN') and os.getenv('IBMQ_URL'): return Credentials(os.getenv('IBMQ_TOKEN'), os.getenv('IBMQ_URL')) elif os.getenv('QISKIT_TESTS_USE_CREDENTIALS_FILE'): discovered_credentials = discover_credentials() if discovered_credentials: if len(discovered_credentials) > 1: raise unittest.SkipTest( "More than 1 credential set found, use: " "IBMQ_TOKEN and IBMQ_URL env variables to " "set credentials explicitly") return list(discovered_credentials.values())[0] raise unittest.SkipTest( 'No IBMQ credentials found for running the test. This is required for ' 'running online tests.') def requires_qe_access(func): warn("`requires_qe_access` is going to be replaced in favor of `online_test`", DeprecationWarning) @functools.wraps(func) def _wrapper(self, *args, **kwargs): if TEST_OPTIONS['skip_online']: raise unittest.SkipTest('Skipping online tests') credentials = _get_credentials() self.using_ibmq_credentials = credentials.is_ibmq() kwargs.update({'qe_token': credentials.token, 'qe_url': credentials.url}) return func(self, *args, **kwargs) return _wrapper def online_test(func): @functools.wraps(func) def _wrapper(self, *args, **kwargs): global HAS_NET_CONNECTION if TEST_OPTIONS['skip_online']: raise unittest.SkipTest('Skipping online tests') if HAS_NET_CONNECTION is None: HAS_NET_CONNECTION = _has_connection('qiskit.org', 443) if not HAS_NET_CONNECTION: raise unittest.SkipTest("Test requires internet connection.") credentials = _get_credentials() self.using_ibmq_credentials = credentials.is_ibmq() kwargs.update({'qe_token': credentials.token, 'qe_url': credentials.url}) return func(self, *args, **kwargs) return _wrapper TEST_OPTIONS = get_test_options()
true
true
1c4a1166d51808988f81418c99b6254b81daae63
4,071
py
Python
alipay/aop/api/request/KoubeiServindustryPortfolioDataCreateRequest.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/request/KoubeiServindustryPortfolioDataCreateRequest.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/request/KoubeiServindustryPortfolioDataCreateRequest.py
articuly/alipay-sdk-python-all
0259cd28eca0f219b97dac7f41c2458441d5e7a6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import simplejson as json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.KoubeiServindustryPortfolioDataCreateModel import KoubeiServindustryPortfolioDataCreateModel class KoubeiServindustryPortfolioDataCreateRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, KoubeiServindustryPortfolioDataCreateModel): self._biz_content = value else: self._biz_content = KoubeiServindustryPortfolioDataCreateModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'koubei.servindustry.portfolio.data.create' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
28.075862
166
0.651437
import simplejson as json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.KoubeiServindustryPortfolioDataCreateModel import KoubeiServindustryPortfolioDataCreateModel class KoubeiServindustryPortfolioDataCreateRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, KoubeiServindustryPortfolioDataCreateModel): self._biz_content = value else: self._biz_content = KoubeiServindustryPortfolioDataCreateModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'koubei.servindustry.portfolio.data.create' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
true
true
1c4a11f4340a879dad3ddc6e9c05271e621cc0f4
4,416
py
Python
scripts/table4_run.py
nataliepopescu/osdi21-artifact
6a268c90a8ce449256b5c290caeb7e0e3b9d7e5c
[ "MIT" ]
null
null
null
scripts/table4_run.py
nataliepopescu/osdi21-artifact
6a268c90a8ce449256b5c290caeb7e0e3b9d7e5c
[ "MIT" ]
null
null
null
scripts/table4_run.py
nataliepopescu/osdi21-artifact
6a268c90a8ce449256b5c290caeb7e0e3b9d7e5c
[ "MIT" ]
null
null
null
import os import subprocess import re import time from numpy import average from ExpStats import runExpWithName ROOT_PATH = os.path.dirname(os.path.realpath(__file__)) def parseThroughput(out): try: m = re.search(r'Requests/sec: ([0-9,.]+)', out) # m = re.search(r'([0-9,]+) ns/iter', out) s = m.group(1) result = float(s.strip()) #s = s.replace(',', '') #result = int(s) except Exception: print(out) print("Run experiment failed") return None return result def parseZola(out): try: m = re.search(r'Done in ([0-9]+)ms.', out) # m = re.search(r'([0-9,]+) ns/iter', out) s = m.group(1) result = float(s.strip()) #s = s.replace(',', '') #result = int(s) except Exception: print(out) print("Run experiment failed") return None return result def test_swc(): print("Testing swc") os.chdir(ROOT_PATH + "/../benchmarks/swc") safe_time, _, _ = runExpWithName("./test_bc-safe", None, 20, False) unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", None, 20, False) perf_diff = (safe_time - unsafe_time) / unsafe_time print("Performance difference of swc is: {:2.2%}".format(perf_diff)) def test_warp(): print("Testing warp") os.chdir(ROOT_PATH + "/../benchmarks/warp") out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") # convert to string from bytes safe_throughput = parseThroughput(out) out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") # convert to string from bytes unsafe_throughput = parseThroughput(out) if safe_throughput and unsafe_throughput: perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput print("Performance difference of warp is: {:2.2%}".format(perf_diff)) def test_iron(): print("Testing iron") os.chdir(ROOT_PATH + "/../benchmarks/iron") out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") # convert to string from bytes safe_throughput = parseThroughput(out) out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") # convert to string from bytes unsafe_throughput = parseThroughput(out) if safe_throughput and unsafe_throughput: perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput print("Performance difference of iron is: {:2.2%}".format(perf_diff)) def test_zola(): print("Testing zola") os.chdir(ROOT_PATH + "/../benchmarks/zola") time_list = [] for _ in range(100): out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") # convert to string from bytes time = parseZola(out) time_list.append(time) unsafe_time = average(time_list) time_list = [] for _ in range(100): out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") # convert to string from bytes time = parseZola(out) time_list.append(time) safe_time = average(time_list) perf_diff = (safe_time - unsafe_time) / unsafe_time print("Performance difference of zola is: {:2.2%}".format(perf_diff)) def test_rustpython(): print("Testing RustPython") os.chdir(ROOT_PATH + "/../benchmarks/RustPython") arg = ROOT_PATH + "/../benchmarks/RustPython/benches/benchmarks/pystone.py" safe_time, _, _ = runExpWithName("./test_bc-safe", arg, 10, False) unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", arg, 10, False) perf_diff = (safe_time - unsafe_time) / unsafe_time print("Performance difference of RustPython is: {:2.2%}".format(perf_diff)) os.chdir(ROOT_PATH + "/../benchmarks") test_iron() test_swc() test_warp() test_zola() test_rustpython()
35.328
118
0.64538
import os import subprocess import re import time from numpy import average from ExpStats import runExpWithName ROOT_PATH = os.path.dirname(os.path.realpath(__file__)) def parseThroughput(out): try: m = re.search(r'Requests/sec: ([0-9,.]+)', out) s = m.group(1) result = float(s.strip()) except Exception: print(out) print("Run experiment failed") return None return result def parseZola(out): try: m = re.search(r'Done in ([0-9]+)ms.', out) s = m.group(1) result = float(s.strip()) except Exception: print(out) print("Run experiment failed") return None return result def test_swc(): print("Testing swc") os.chdir(ROOT_PATH + "/../benchmarks/swc") safe_time, _, _ = runExpWithName("./test_bc-safe", None, 20, False) unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", None, 20, False) perf_diff = (safe_time - unsafe_time) / unsafe_time print("Performance difference of swc is: {:2.2%}".format(perf_diff)) def test_warp(): print("Testing warp") os.chdir(ROOT_PATH + "/../benchmarks/warp") out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") safe_throughput = parseThroughput(out) out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") unsafe_throughput = parseThroughput(out) if safe_throughput and unsafe_throughput: perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput print("Performance difference of warp is: {:2.2%}".format(perf_diff)) def test_iron(): print("Testing iron") os.chdir(ROOT_PATH + "/../benchmarks/iron") out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") safe_throughput = parseThroughput(out) out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") unsafe_throughput = parseThroughput(out) if safe_throughput and unsafe_throughput: perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput print("Performance difference of iron is: {:2.2%}".format(perf_diff)) def test_zola(): print("Testing zola") os.chdir(ROOT_PATH + "/../benchmarks/zola") time_list = [] for _ in range(100): out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") time = parseZola(out) time_list.append(time) unsafe_time = average(time_list) time_list = [] for _ in range(100): out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = out.communicate() out = out.decode("utf-8") time = parseZola(out) time_list.append(time) safe_time = average(time_list) perf_diff = (safe_time - unsafe_time) / unsafe_time print("Performance difference of zola is: {:2.2%}".format(perf_diff)) def test_rustpython(): print("Testing RustPython") os.chdir(ROOT_PATH + "/../benchmarks/RustPython") arg = ROOT_PATH + "/../benchmarks/RustPython/benches/benchmarks/pystone.py" safe_time, _, _ = runExpWithName("./test_bc-safe", arg, 10, False) unsafe_time, _, _ = runExpWithName("./test_bc-unsafe", arg, 10, False) perf_diff = (safe_time - unsafe_time) / unsafe_time print("Performance difference of RustPython is: {:2.2%}".format(perf_diff)) os.chdir(ROOT_PATH + "/../benchmarks") test_iron() test_swc() test_warp() test_zola() test_rustpython()
true
true
1c4a1240142afa6606892ba452cdab5d9517361e
396
py
Python
apps/goods/migrations/0013_auto_20181010_1629.py
lianxiaopang/camel-store-api
b8021250bf3d8cf7adc566deebdba55225148316
[ "Apache-2.0" ]
12
2020-02-01T01:52:01.000Z
2021-04-28T15:06:43.000Z
apps/goods/migrations/0013_auto_20181010_1629.py
lianxiaopang/camel-store-api
b8021250bf3d8cf7adc566deebdba55225148316
[ "Apache-2.0" ]
5
2020-02-06T08:07:58.000Z
2020-06-02T13:03:45.000Z
apps/goods/migrations/0013_auto_20181010_1629.py
lianxiaopang/camel-store-api
b8021250bf3d8cf7adc566deebdba55225148316
[ "Apache-2.0" ]
11
2020-02-03T13:07:46.000Z
2020-11-29T01:44:06.000Z
# Generated by Django 2.1.2 on 2018-10-10 08:29 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('goods', '0012_auto_20181010_1558'), ] operations = [ migrations.AlterModelOptions( name='goods', options={'ordering': ('status',), 'verbose_name': '商品', 'verbose_name_plural': '商品'}, ), ]
22
97
0.598485
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('goods', '0012_auto_20181010_1558'), ] operations = [ migrations.AlterModelOptions( name='goods', options={'ordering': ('status',), 'verbose_name': '商品', 'verbose_name_plural': '商品'}, ), ]
true
true
1c4a12fad6dad46559aac33e6d98b82995bb2863
4,099
py
Python
src/one_stack.py
noemiefedon/RELAY
1bf9c27ee1bcf1be0a7652fcca0ea38dd47b14b8
[ "MIT" ]
1
2020-12-07T22:18:22.000Z
2020-12-07T22:18:22.000Z
src/one_stack.py
noemiefedon/RELAY
1bf9c27ee1bcf1be0a7652fcca0ea38dd47b14b8
[ "MIT" ]
null
null
null
src/one_stack.py
noemiefedon/RELAY
1bf9c27ee1bcf1be0a7652fcca0ea38dd47b14b8
[ "MIT" ]
1
2021-12-02T22:19:02.000Z
2021-12-02T22:19:02.000Z
# -*- coding: utf-8 -*- """ Functions to check a design manufacturability - check_ss_manufacturability checks the manufacturability of a stacking sequence list """ __version__ = '1.0' __author__ = 'Noemie Fedon' import sys import numpy as np sys.path.append(r'C:\RELAY') from src.contiguity import is_contig from src.disorientation import is_diso_ss from src.balance import is_balanced from src.dam_tol import is_dam_tol from src.ten_percent_rule import is_ten_percent_rule from src.lp_functions_2 import calc_lampamA from src.constraints import Constraints from src.pretty_print import print_ss def check_ss_manufacturability( ss, constraints, no_ipo_check=False, no_bal_check=False, equality_45_135=False, equality_0_90=False, n_plies=None): """ checks the manufacturability of a stacking sequence list """ if n_plies is not None and ss.size != n_plies: raise Exception("Wrong number of plies") if constraints.dam_tol: if not is_dam_tol(ss, constraints): print_ss(ss) raise Exception("Damage tolerance constraint not satisfied") if not no_bal_check and constraints.bal: if not is_balanced(ss, constraints): raise Exception("Balance constraint not satisfied") if not no_ipo_check and constraints.ipo: lampamA = calc_lampamA(ss, constraints) if (abs(lampamA[2:4]) > 1e-10).any(): print_ss(ss) print('lampamA', lampamA) # print('ipo') raise Exception("In plane orthotropy constraint not satisfied") if constraints.diso: if hasattr(constraints, 'dam_tol_rule'): if not is_diso_ss(ss, constraints.delta_angle, constraints.dam_tol, constraints.dam_tol_rule): raise Exception("Disorientation constraint not satisfied") else: if not is_diso_ss(ss, constraints.delta_angle, constraints.dam_tol, constraints.n_plies_dam_tol): raise Exception("Disorientation constraint not satisfied") if constraints.contig: if not is_contig(ss, constraints.n_contig): raise Exception("Contiguity constraint not satisfied") if constraints.rule_10_percent: if not is_ten_percent_rule( constraints, stack=ss, equality_45_135=equality_45_135, equality_0_90=equality_0_90): raise Exception("10% rule not satisfied") return 0 if __name__ == "__main__": print('\n*** Test for the function check_ss_manufacturability ***') constraints = Constraints( sym=True, bal=True, ipo=True, oopo=False, dam_tol=False, rule_10_percent=True, percent_0=10, percent_45=0, percent_90=10, percent_135=0, percent_45_135=10, diso=True, contig=True, n_contig=5, delta_angle=45, set_of_angles=np.array([0, 45, -45, 90])) ss = np.array([ 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 0, -45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, -45, 0, 0, 45, 45, 0, 0, -45, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, -45, 0, 0, 0, 0, 45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0], int) check_ss_manufacturability(ss, constraints)
42.257732
1,024
0.56648
__version__ = '1.0' __author__ = 'Noemie Fedon' import sys import numpy as np sys.path.append(r'C:\RELAY') from src.contiguity import is_contig from src.disorientation import is_diso_ss from src.balance import is_balanced from src.dam_tol import is_dam_tol from src.ten_percent_rule import is_ten_percent_rule from src.lp_functions_2 import calc_lampamA from src.constraints import Constraints from src.pretty_print import print_ss def check_ss_manufacturability( ss, constraints, no_ipo_check=False, no_bal_check=False, equality_45_135=False, equality_0_90=False, n_plies=None): if n_plies is not None and ss.size != n_plies: raise Exception("Wrong number of plies") if constraints.dam_tol: if not is_dam_tol(ss, constraints): print_ss(ss) raise Exception("Damage tolerance constraint not satisfied") if not no_bal_check and constraints.bal: if not is_balanced(ss, constraints): raise Exception("Balance constraint not satisfied") if not no_ipo_check and constraints.ipo: lampamA = calc_lampamA(ss, constraints) if (abs(lampamA[2:4]) > 1e-10).any(): print_ss(ss) print('lampamA', lampamA) raise Exception("In plane orthotropy constraint not satisfied") if constraints.diso: if hasattr(constraints, 'dam_tol_rule'): if not is_diso_ss(ss, constraints.delta_angle, constraints.dam_tol, constraints.dam_tol_rule): raise Exception("Disorientation constraint not satisfied") else: if not is_diso_ss(ss, constraints.delta_angle, constraints.dam_tol, constraints.n_plies_dam_tol): raise Exception("Disorientation constraint not satisfied") if constraints.contig: if not is_contig(ss, constraints.n_contig): raise Exception("Contiguity constraint not satisfied") if constraints.rule_10_percent: if not is_ten_percent_rule( constraints, stack=ss, equality_45_135=equality_45_135, equality_0_90=equality_0_90): raise Exception("10% rule not satisfied") return 0 if __name__ == "__main__": print('\n*** Test for the function check_ss_manufacturability ***') constraints = Constraints( sym=True, bal=True, ipo=True, oopo=False, dam_tol=False, rule_10_percent=True, percent_0=10, percent_45=0, percent_90=10, percent_135=0, percent_45_135=10, diso=True, contig=True, n_contig=5, delta_angle=45, set_of_angles=np.array([0, 45, -45, 90])) ss = np.array([ 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 0, -45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, -45, 0, 0, 45, 45, 0, 0, -45, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, -45, 0, 0, 0, 0, 45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0], int) check_ss_manufacturability(ss, constraints)
true
true
1c4a1315d41ea7313fba33ea72615b897facc135
2,937
py
Python
bayesian/__init__.py
prashbnair/fabric8-analytics-server
af1f71018b82ebafae7701a443412eed34de5a15
[ "Apache-2.0" ]
null
null
null
bayesian/__init__.py
prashbnair/fabric8-analytics-server
af1f71018b82ebafae7701a443412eed34de5a15
[ "Apache-2.0" ]
null
null
null
bayesian/__init__.py
prashbnair/fabric8-analytics-server
af1f71018b82ebafae7701a443412eed34de5a15
[ "Apache-2.0" ]
null
null
null
"""Module with the declaration of web application and its basic endpoints.""" import logging import os from f8a_worker.setup_celery import init_selinon from flask import Flask from flask import g from flask import redirect from flask import url_for from flask_appconfig import AppConfig from flask_cache import Cache from flask_sqlalchemy import SQLAlchemy from raven.contrib.flask import Sentry def setup_logging(app): """Set up logger, the log level is read from the environment variable.""" if not app.debug: handler = logging.StreamHandler() log_level = os.environ.get('FLASK_LOGGING_LEVEL', logging.getLevelName(logging.WARNING)) handler.setLevel(log_level) app.logger.addHandler(handler) # we must initialize DB here to not create import loop with .auth... # flask really sucks at this rdb = SQLAlchemy() cache = Cache(config={'CACHE_TYPE': 'simple'}) def create_app(configfile=None): """Create the web application and define basic endpoints.""" # do the imports here to not shadow e.g. "import bayesian.frontend.api_v1" # by Blueprint imported here from bayesian.api_v1 import api_v1 from bayesian.api.api_v2 import api_v2 from bayesian.api.user_api import user_api from bayesian.utils import JSONEncoderWithExtraTypes app = Flask(__name__) AppConfig(app, configfile) cache.init_app(app) # actually init the DB with config values now rdb.init_app(app) app.rdb = rdb # We need JSON encoder that can serialize datetime.datetime app.json_encoder = JSONEncoderWithExtraTypes app.register_blueprint(api_v1) app.register_blueprint(api_v2) app.register_blueprint(user_api) # Redirect to latest API version if /api is accessed app.route('/api')(lambda: redirect(url_for('api_v1.apiendpoints__slashless'))) # Likewise for base URL, and make that accessible by name # Configure CORS. from flask_cors import CORS CORS(app, resources={r"/api/*": {"origins": "*"}}) @app.route('/') def base_url(): return redirect(url_for('api_v1.apiendpoints__slashless')) setup_logging(app) @app.before_request def set_current_user(): g.current_user = None @app.after_request def access_control_allow_origin(response): response.headers["Access-Control-Allow-Origin"] = "*" response.headers["Access-Control-Allow-Headers"] = "authorization, content-type, " \ "x-3scale-account-secret" response.headers["Access-Control-Allow-Methods"] = "DELETE, GET, HEAD, OPTIONS, " \ "PATCH, POST, PUT" response.headers["Allow"] = "GET, HEAD, OPTIONS, PATCH, POST, PUT" return response return app init_selinon() app = create_app() SENTRY_DSN = os.environ.get("SENTRY_DSN", "") sentry = Sentry(app, dsn=SENTRY_DSN, logging=True, level=logging.ERROR) app.logger.info('App initialized, ready to roll...')
31.244681
96
0.712632
import logging import os from f8a_worker.setup_celery import init_selinon from flask import Flask from flask import g from flask import redirect from flask import url_for from flask_appconfig import AppConfig from flask_cache import Cache from flask_sqlalchemy import SQLAlchemy from raven.contrib.flask import Sentry def setup_logging(app): if not app.debug: handler = logging.StreamHandler() log_level = os.environ.get('FLASK_LOGGING_LEVEL', logging.getLevelName(logging.WARNING)) handler.setLevel(log_level) app.logger.addHandler(handler) rdb = SQLAlchemy() cache = Cache(config={'CACHE_TYPE': 'simple'}) def create_app(configfile=None): from bayesian.api_v1 import api_v1 from bayesian.api.api_v2 import api_v2 from bayesian.api.user_api import user_api from bayesian.utils import JSONEncoderWithExtraTypes app = Flask(__name__) AppConfig(app, configfile) cache.init_app(app) rdb.init_app(app) app.rdb = rdb app.json_encoder = JSONEncoderWithExtraTypes app.register_blueprint(api_v1) app.register_blueprint(api_v2) app.register_blueprint(user_api) app.route('/api')(lambda: redirect(url_for('api_v1.apiendpoints__slashless'))) from flask_cors import CORS CORS(app, resources={r"/api/*": {"origins": "*"}}) @app.route('/') def base_url(): return redirect(url_for('api_v1.apiendpoints__slashless')) setup_logging(app) @app.before_request def set_current_user(): g.current_user = None @app.after_request def access_control_allow_origin(response): response.headers["Access-Control-Allow-Origin"] = "*" response.headers["Access-Control-Allow-Headers"] = "authorization, content-type, " \ "x-3scale-account-secret" response.headers["Access-Control-Allow-Methods"] = "DELETE, GET, HEAD, OPTIONS, " \ "PATCH, POST, PUT" response.headers["Allow"] = "GET, HEAD, OPTIONS, PATCH, POST, PUT" return response return app init_selinon() app = create_app() SENTRY_DSN = os.environ.get("SENTRY_DSN", "") sentry = Sentry(app, dsn=SENTRY_DSN, logging=True, level=logging.ERROR) app.logger.info('App initialized, ready to roll...')
true
true
1c4a13f112efc10a3c692c96f72757500976a370
688
py
Python
leetcode/122/122.best-time-to-buy-and-sell-stock-ii.py
Yu-Ren-NEU/Leetcode
e82bc2734680606f676fe867dbcb9b9e71635bf5
[ "MIT" ]
1
2020-01-06T06:54:22.000Z
2020-01-06T06:54:22.000Z
leetcode/122/122.best-time-to-buy-and-sell-stock-ii.py
Yu-Ren-NEU/Leetcode
e82bc2734680606f676fe867dbcb9b9e71635bf5
[ "MIT" ]
1
2020-01-07T02:22:06.000Z
2020-01-07T02:22:06.000Z
leetcode/122/122.best-time-to-buy-and-sell-stock-ii.py
Yu-Ren-NEU/Leetcode
e82bc2734680606f676fe867dbcb9b9e71635bf5
[ "MIT" ]
2
2020-01-06T20:04:04.000Z
2020-01-10T08:24:01.000Z
# # @lc app=leetcode id=122 lang=python3 # # [122] Best Time to Buy and Sell Stock II # # @lc code=start class Solution: def maxProfit(self, prices: []) -> int: # 我们将数值画到x-y坐标系里 # 其实要求的就是单调上升的曲线, 找到相应的波峰 if not prices or len(prices) == 1: return 0 result = 0 for i in range(1, len(prices)): if prices[i] > prices[i-1]: result += prices[i] - prices[i-1] return result def test(self): assert(self.maxProfit([7,1,5,3,6,4]) == 7) assert(self.maxProfit([1,2,3,4,5]) == 4) assert(self.maxProfit([7,6,4,3,1]) == 0) sol = Solution() sol.test() # @lc code=end
20.848485
50
0.52907
class Solution: def maxProfit(self, prices: []) -> int: if not prices or len(prices) == 1: return 0 result = 0 for i in range(1, len(prices)): if prices[i] > prices[i-1]: result += prices[i] - prices[i-1] return result def test(self): assert(self.maxProfit([7,1,5,3,6,4]) == 7) assert(self.maxProfit([1,2,3,4,5]) == 4) assert(self.maxProfit([7,6,4,3,1]) == 0) sol = Solution() sol.test()
true
true
1c4a14c58058bc5cfda3413c3de34fb336ef5ce8
1,418
py
Python
sw/scripts/jlink.py
alvarop/chaac
a86d3c71acf3f87584ba260cbfe207b3a09213ad
[ "MIT" ]
21
2019-01-27T03:15:27.000Z
2021-07-03T06:40:40.000Z
sw/scripts/jlink.py
alvarop/chaac
a86d3c71acf3f87584ba260cbfe207b3a09213ad
[ "MIT" ]
1
2021-07-08T04:46:42.000Z
2021-07-08T04:46:42.000Z
sw/scripts/jlink.py
alvarop/chaac
a86d3c71acf3f87584ba260cbfe207b3a09213ad
[ "MIT" ]
3
2019-02-19T19:57:30.000Z
2020-07-11T11:27:51.000Z
#!/usr/bin/env python import argparse import os import sys import time import tempfile import subprocess JLINK_PATH = "/usr/bin/JLinkExe" parser = argparse.ArgumentParser() parser.add_argument("--dump", action="store_true") parser.add_argument("--erase", action="store_true") parser.add_argument("--device", default="stm32l432kc", help="device name") parser.add_argument("--addr", default=0, help="start addr") parser.add_argument("--len", default=0x40000, help="read/write len") parser.add_argument("--filename", help="filename") args = parser.parse_args() def write_line(file, line): if line[-1] != "\n": line += "\n" file.write(line.encode("utf-8")) with tempfile.NamedTemporaryFile(prefix="jlink", delete=False) as scriptfile: write_line(scriptfile, "if swd") write_line(scriptfile, "device {}".format(args.device)) write_line(scriptfile, "speed 4000") write_line(scriptfile, "connect") write_line(scriptfile, "halt") if args.erase == True: write_line(scriptfile, "erase") elif args.dump == True: write_line( scriptfile, "savebin {} 0x{:X} 0x{:X}".format( args.filename, int(args.addr, 0), int(args.len, 0) ), ) write_line(scriptfile, "go") write_line(scriptfile, "exit") command_file = scriptfile.name if command_file: subprocess.call([JLINK_PATH, command_file])
25.781818
77
0.665021
import argparse import os import sys import time import tempfile import subprocess JLINK_PATH = "/usr/bin/JLinkExe" parser = argparse.ArgumentParser() parser.add_argument("--dump", action="store_true") parser.add_argument("--erase", action="store_true") parser.add_argument("--device", default="stm32l432kc", help="device name") parser.add_argument("--addr", default=0, help="start addr") parser.add_argument("--len", default=0x40000, help="read/write len") parser.add_argument("--filename", help="filename") args = parser.parse_args() def write_line(file, line): if line[-1] != "\n": line += "\n" file.write(line.encode("utf-8")) with tempfile.NamedTemporaryFile(prefix="jlink", delete=False) as scriptfile: write_line(scriptfile, "if swd") write_line(scriptfile, "device {}".format(args.device)) write_line(scriptfile, "speed 4000") write_line(scriptfile, "connect") write_line(scriptfile, "halt") if args.erase == True: write_line(scriptfile, "erase") elif args.dump == True: write_line( scriptfile, "savebin {} 0x{:X} 0x{:X}".format( args.filename, int(args.addr, 0), int(args.len, 0) ), ) write_line(scriptfile, "go") write_line(scriptfile, "exit") command_file = scriptfile.name if command_file: subprocess.call([JLINK_PATH, command_file])
true
true
1c4a14e406aa1e3b330cf49b14434163e125ec06
78,955
py
Python
futu/quote/open_quote_context.py
postpascal/py-futu-api
cb274d5ab5387dca190b739d161f2bc8eabe073d
[ "Apache-2.0" ]
1
2019-09-01T08:49:46.000Z
2019-09-01T08:49:46.000Z
futu/quote/open_quote_context.py
faruto/py-futu-api
cb274d5ab5387dca190b739d161f2bc8eabe073d
[ "Apache-2.0" ]
null
null
null
futu/quote/open_quote_context.py
faruto/py-futu-api
cb274d5ab5387dca190b739d161f2bc8eabe073d
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Market quote and trade context setting """ import datetime import math from time import sleep import pandas as pd from futu.common.open_context_base import OpenContextBase, ContextStatus from futu.quote.quote_query import * class OpenQuoteContext(OpenContextBase): """行情上下文对象类""" def __init__(self, host='127.0.0.1', port=11111): """ 初始化Context对象 :param host: host地址 :param port: 端口 """ self._ctx_subscribe = {} super(OpenQuoteContext, self).__init__(host, port, True) def close(self): """ 关闭上下文对象。 .. code:: python from futu import * quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) quote_ctx.close() """ super(OpenQuoteContext, self).close() def on_api_socket_reconnected(self): """for API socket reconnected""" # auto subscriber resub_count = 0 subtype_list = [] code_list = [] resub_dict = copy(self._ctx_subscribe) subtype_all_cnt = len(resub_dict.keys()) subtype_cur_cnt = 0 ret_code = RET_OK ret_msg = '' for subtype in resub_dict.keys(): subtype_cur_cnt += 1 code_set = resub_dict[subtype] code_list_new = [code for code in code_set] if len(code_list_new) == 0: continue if len(code_list) == 0: code_list = code_list_new subtype_list = [subtype] is_need_sub = False if code_list == code_list_new: if subtype not in subtype_list: subtype_list.append(subtype) # 合并subtype请求 else: ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list) logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format( len(code_list), ret_code, ret_msg, subtype_list, code_list)) if ret_code != RET_OK: break resub_count += len(code_list) code_list = code_list_new subtype_list = [subtype] # 循环即将结束 if subtype_cur_cnt == subtype_all_cnt and len(code_list): ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list) logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list)) if ret_code != RET_OK: break resub_count += len(code_list) code_list = [] subtype_list = [] logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg)) # 重定阅失败,重连 if ret_code != RET_OK: logger.error("reconnect subscribe error, close connect and retry!!") self._status = ContextStatus.Start self._wait_reconnect() return ret_code, ret_msg def get_trading_days(self, market, start=None, end=None): """获取交易日 :param market: 市场类型,Market_ :param start: 起始日期。例如'2018-01-01'。 :param end: 结束日期。例如'2018-01-01'。 start和end的组合如下: ========== ========== ======================================== start类型 end类型 说明 ========== ========== ======================================== str str start和end分别为指定的日期 None str start为end往前365天 str None end为start往后365天 None None end为当前日期,start为end往前365天 ========== ========== ======================================== :return: 成功时返回(RET_OK, data),data是[{'trade_date_type': 0, 'time': '2018-01-05'}]数组;失败时返回(RET_ERROR, data),其中data是错误描述字符串 """ if market is None or is_str(market) is False: error_str = ERROR_STR_PREFIX + "the type of market param is wrong" return RET_ERROR, error_str ret, msg, start, end = normalize_start_end_date(start, end, 365) if ret != RET_OK: return ret, msg query_processor = self._get_sync_query_processor( TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp) # the keys of kargs should be corresponding to the actual function arguments kargs = { 'market': market, 'start_date': start, 'end_date': end, 'conn_id': self.get_sync_conn_id() } ret_code, msg, trade_day_list = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg return RET_OK, trade_day_list def get_stock_basicinfo(self, market, stock_type=SecurityType.STOCK, code_list=None): """ 获取指定市场中特定类型的股票基本信息 :param market: 市场类型,futu.common.constant.Market :param stock_type: 股票类型, futu.common.constant.SecurityType :param code_list: 如果不为None,应该是股票code的iterable类型,将只返回指定的股票信息 :return: (ret_code, content) ret_code 等于RET_OK时, content为Pandas.DataFrame数据, 否则为错误原因字符串, 数据列格式如下 ================= =========== ============================================================================== 参数 类型 说明 ================= =========== ============================================================================== code str 股票代码 name str 名字 lot_size int 每手数量 stock_type str 股票类型,参见SecurityType stock_child_type str 涡轮子类型,参见WrtType stock_owner str 所属正股的代码 option_type str 期权类型,Qot_Common.OptionType strike_time str 行权日 strike_price float 行权价 suspension bool 是否停牌(True表示停牌) listing_date str 上市时间 stock_id int 股票id delisting bool 是否退市 ================= =========== ============================================================================== :example: .. code-block:: python from futu import * quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) print(quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.WARRANT)) print(quote_ctx.get_stock_basicinfo(Market.US, SecurityType.DRVT, 'US.AAPL210115C185000')) quote_ctx.close() """ param_table = {'market': market, 'stock_type': stock_type} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str if code_list is not None: if is_str(code_list): code_list = code_list.split(',') elif isinstance(code_list, list): pass else: return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'" query_processor = self._get_sync_query_processor( StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp) kargs = { "market": market, 'stock_type': stock_type, 'code_list': code_list, 'conn_id': self.get_sync_conn_id() } ret_code, msg, basic_info_list = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg col_list = [ 'code', 'name', 'lot_size', 'stock_type', 'stock_child_type', 'stock_owner', 'option_type', 'strike_time', 'strike_price', 'suspension', 'listing_date', 'stock_id', 'delisting' ] basic_info_table = pd.DataFrame(basic_info_list, columns=col_list) return RET_OK, basic_info_table def get_multiple_history_kline(self, codelist, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ): """ 获取多只股票的本地历史k线数据 :param codelist: 股票代码列表,list或str。例如:['HK.00700', 'HK.00001'],'HK.00700,HK.00001' :param start: 起始时间,例如'2017-06-20' :param end: 结束时间, 例如'2017-07-20',start与end组合关系参见 get_history_kline_ :param ktype: k线类型,参见KLType :param autype: 复权类型,参见AuType :return: 成功时返回(RET_OK, [data]),data是DataFrame数据, 数据列格式如下 ================= =========== ============================================================================== 参数 类型 说明 ================= =========== ============================================================================== code str 股票代码 time_key str k线时间 open float 开盘价 close float 收盘价 high float 最高价 low float 最低价 pe_ratio float 市盈率(该字段为比例字段,默认不展示%) turnover_rate float 换手率 volume int 成交量 turnover float 成交额 change_rate float 涨跌幅 last_close float 昨收价 ================= =========== ============================================================================== 失败时返回(RET_ERROR, data),其中data是错误描述字符串 """ if is_str(codelist): codelist = codelist.split(',') elif isinstance(codelist, list): pass else: return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'" result = [] for code in codelist: ret, data = self.get_history_kline(code, start, end, ktype, autype) if ret != RET_OK: return RET_ERROR, 'get history kline error: {}, {},{},{},{}'.format(data, code, start, end, ktype) result.append(data) return 0, result def _get_history_kline_impl(self, query_cls, code, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ, fields=[KL_FIELD.ALL] ): ret, msg, req_start, end = normalize_start_end_date(start, end, 365) if ret != RET_OK: return ret, msg req_fields = unique_and_normalize_list(fields) if not fields: req_fields = copy(KL_FIELD.ALL_REAL) req_fields = KL_FIELD.normalize_field_list(req_fields) if not req_fields: error_str = ERROR_STR_PREFIX + "the type of fields param is wrong" return RET_ERROR, error_str if autype is None: autype = 'None' param_table = {'code': code, 'ktype': ktype, 'autype': autype} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str max_kl_num = 1000 data_finish = False list_ret = [] # 循环请求数据,避免一次性取太多超时 while not data_finish: kargs = { "code": code, "start_date": req_start, "end_date": end, "ktype": ktype, "autype": autype, "fields": copy(req_fields), "max_num": max_kl_num, "conn_id": self.get_sync_conn_id() } query_processor = self._get_sync_query_processor(query_cls.pack_req, query_cls.unpack_rsp) ret_code, msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg list_kline, has_next, next_time = content data_finish = (not has_next) or (not next_time) req_start = next_time for dict_item in list_kline: list_ret.append(dict_item) # 表头列 col_list = ['code'] for field in req_fields: str_field = KL_FIELD.DICT_KL_FIELD_STR[field] if str_field not in col_list: col_list.append(str_field) kline_frame_table = pd.DataFrame(list_ret, columns=col_list) return RET_OK, kline_frame_table def get_history_kline(self, code, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ, fields=[KL_FIELD.ALL]): """ 得到本地历史k线,需先参照帮助文档下载k线 :param code: 股票代码 :param start: 开始时间,例如'2017-06-20' :param end: 结束时间,例如'2017-06-30' start和end的组合如下: ========== ========== ======================================== start类型 end类型 说明 ========== ========== ======================================== str str start和end分别为指定的日期 None str start为end往前365天 str None end为start往后365天 None None end为当前日期,start为end往前365天 ========== ========== ======================================== :param ktype: k线类型, 参见 KLType 定义 :param autype: 复权类型, 参见 AuType 定义 :param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN .... :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ================= =========== ============================================================================== 参数 类型 说明 ================= =========== ============================================================================== code str 股票代码 time_key str k线时间 open float 开盘价 close float 收盘价 high float 最高价 low float 最低价 pe_ratio float 市盈率(该字段为比例字段,默认不展示%) turnover_rate float 换手率 volume int 成交量 turnover float 成交额 change_rate float 涨跌幅 last_close float 昨收价 ================= =========== ============================================================================== :example: .. code:: python from futu import * quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) print(quote_ctx.get_history_kline('HK.00700', start='2017-06-20', end='2017-06-22')) quote_ctx.close() """ return self._get_history_kline_impl(GetHistoryKlineQuery, code, start=start, end=end, ktype=ktype, autype=autype, fields=fields) def request_history_kline(self, code, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ, fields=[KL_FIELD.ALL], max_count=1000, page_req_key=None): """ 拉取历史k线,不需要先下载历史数据。 :param code: 股票代码 :param start: 开始时间,例如'2017-06-20' :param end: 结束时间,例如'2017-07-20'。 start和end的组合如下: ========== ========== ======================================== start类型 end类型 说明 ========== ========== ======================================== str str start和end分别为指定的日期 None str start为end往前365天 str None end为start往后365天 None None end为当前日期,start为end往前365天 ========== ========== ======================================== :param ktype: k线类型, 参见 KLType 定义 :param autype: 复权类型, 参见 AuType 定义 :param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN .... :param max_count: 本次请求最大返回的数据点个数,传None表示返回start和end之间所有的数据。 :param page_req_key: 分页请求的key。如果start和end之间的数据点多于max_count,那么后续请求时,要传入上次调用返回的page_req_key。初始请求时应该传None。 :return: (ret, data, page_req_key) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下。page_req_key在分页请求时(即max_count>0) 可能返回,并且需要在后续的请求中传入。如果没有更多数据,page_req_key返回None。 ret != RET_OK 返回错误字符串 ================= =========== ============================================================================== 参数 类型 说明 ================= =========== ============================================================================== code str 股票代码 time_key str k线时间 open float 开盘价 close float 收盘价 high float 最高价 low float 最低价 pe_ratio float 市盈率(该字段为比例字段,默认不展示%) turnover_rate float 换手率 volume int 成交量 turnover float 成交额 change_rate float 涨跌幅 last_close float 昨收价 ================= =========== ============================================================================== :note :example: .. code:: python from futu import * quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) ret, data, page_req_key = quote_ctx.request_history_kline('HK.00700', start='2017-06-20', end='2018-06-22', max_count=50) print(ret, data) ret, data, page_req_key = quote_ctx.request_history_kline('HK.00700', start='2017-06-20', end='2018-06-22', max_count=50, page_req_key=page_req_key) print(ret, data) quote_ctx.close() """ next_page_req_key = None ret, msg, req_start, end = normalize_start_end_date(start, end, 365) if ret != RET_OK: return ret, msg, next_page_req_key req_fields = unique_and_normalize_list(fields) if not fields: req_fields = copy(KL_FIELD.ALL_REAL) req_fields = KL_FIELD.normalize_field_list(req_fields) if not req_fields: error_str = ERROR_STR_PREFIX + "the type of fields param is wrong" return RET_ERROR, error_str, next_page_req_key if autype is None: autype = 'None' param_table = {'code': code, 'ktype': ktype, 'autype': autype} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str, next_page_req_key max_kl_num = min(1000, max_count) if max_count is not None else 1000 data_finish = False list_ret = [] # 循环请求数据,避免一次性取太多超时 while not data_finish: kargs = { "code": code, "start_date": req_start, "end_date": end, "ktype": ktype, "autype": autype, "fields": copy(req_fields), "max_num": max_kl_num, "conn_id": self.get_sync_conn_id(), "next_req_key": page_req_key } query_processor = self._get_sync_query_processor(RequestHistoryKlineQuery.pack_req, RequestHistoryKlineQuery.unpack_rsp) ret_code, msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg, next_page_req_key list_kline, has_next, page_req_key = content list_ret.extend(list_kline) next_page_req_key = page_req_key if max_count is not None: if max_count > len(list_ret) and has_next: data_finish = False max_kl_num = min(max_count - len(list_ret), 1000) else: data_finish = True else: data_finish = not has_next # 表头列 col_list = ['code'] for field in req_fields: str_field = KL_FIELD.DICT_KL_FIELD_STR[field] if str_field not in col_list: col_list.append(str_field) kline_frame_table = pd.DataFrame(list_ret, columns=col_list) return RET_OK, kline_frame_table, next_page_req_key def get_autype_list(self, code_list): """ 获取给定股票列表的复权因子 :param code_list: 股票列表,例如['HK.00700'] :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ================================================================================= 参数 类型 说明 ===================== =========== ================================================================================= code str 股票代码 ex_div_date str 除权除息日 split_ratio float 拆合股比例(该字段为比例字段,默认不展示%),例如,对于5股合1股为1/5,对于1股拆5股为5/1 per_cash_div float 每股派现 per_share_div_ratio float 每股送股比例(该字段为比例字段,默认不展示%) per_share_trans_ratio float 每股转增股比例(该字段为比例字段,默认不展示%) allotment_ratio float 每股配股比例(该字段为比例字段,默认不展示%) allotment_price float 配股价 stk_spo_ratio float 增发比例(该字段为比例字段,默认不展示%) stk_spo_price float 增发价格 forward_adj_factorA float 前复权因子A forward_adj_factorB float 前复权因子B backward_adj_factorA float 后复权因子A backward_adj_factorB float 后复权因子B ===================== =========== ================================================================================= """ code_list = unique_and_normalize_list(code_list) for code in code_list: if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( ExrightQuery.pack_req, ExrightQuery.unpack_rsp) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, exr_record = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'ex_div_date', 'split_ratio', 'per_cash_div', 'per_share_div_ratio', 'per_share_trans_ratio', 'allotment_ratio', 'allotment_price', 'stk_spo_ratio', 'stk_spo_price', 'forward_adj_factorA', 'forward_adj_factorB', 'backward_adj_factorA', 'backward_adj_factorB' ] exr_frame_table = pd.DataFrame(exr_record, columns=col_list) return RET_OK, exr_frame_table def get_market_snapshot(self, code_list): """ 获取市场快照 :param code_list: 股票列表 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ======================= ============= ============================================================================== 参数 类型 说明 ======================= ============= ============================================================================== code str 股票代码 update_time str 更新时间(yyyy-MM-dd HH:mm:ss),(美股默认是美东时间,港股A股默认是北京时间) last_price float 最新价格 open_price float 今日开盘价 high_price float 最高价格 low_price float 最低价格 prev_close_price float 昨收盘价格 volume int 成交数量 turnover float 成交金额 turnover_rate float 换手率 suspension bool 是否停牌(True表示停牌) listing_date str 上市日期 (yyyy-MM-dd) equity_valid bool 是否正股(为true时以下正股相关字段才有合法数值) issued_shares int 发行股本 total_market_val float 总市值 net_asset int 资产净值 net_profit int 净利润 earning_per_share float 每股盈利 outstanding_shares int 流通股本 net_asset_per_share float 每股净资产 circular_market_val float 流通市值 ey_ratio float 收益率(该字段为比例字段,默认不展示%) pe_ratio float 市盈率(该字段为比例字段,默认不展示%) pb_ratio float 市净率(该字段为比例字段,默认不展示%) pe_ttm_ratio float 市盈率TTM(该字段为比例字段,默认不展示%) stock_owner str 涡轮所属正股的代码或期权的标的股代码 wrt_valid bool 是否是窝轮(为true时以下涡轮相关的字段才有合法数据) wrt_conversion_ratio float 换股比率(该字段为比例字段,默认不展示%) wrt_type str 窝轮类型,参见WrtType wrt_strike_price float 行使价格 wrt_maturity_date str 格式化窝轮到期时间 wrt_end_trade str 格式化窝轮最后交易时间 wrt_code str 窝轮对应的正股(此字段已废除,修改为stock_owner) wrt_recovery_price float 窝轮回收价 wrt_street_vol float 窝轮街货量 wrt_issue_vol float 窝轮发行量 wrt_street_ratio float 窝轮街货占比(该字段为比例字段,默认不展示%) wrt_delta float 窝轮对冲值 wrt_implied_volatility float 窝轮引伸波幅 wrt_premium float 窝轮溢价 lot_size int 每手股数 price_spread float 当前摆盘价差亦即摆盘数据的买档或卖档的相邻档位的报价差 option_valid bool 是否是期权(为true时以下期权相关的字段才有合法数值) option_type str 期权类型,参见OptionType strike_time str 行权日(美股默认是美东时间,港股A股默认是北京时间) option_strike_price float 行权价 option_contract_size int 每份合约数 option_open_interest int 未平仓合约数 option_implied_volatility float 隐含波动率 option_premium float 溢价 option_delta float 希腊值 Delta option_gamma float 希腊值 Gamma option_vega float 希腊值 Vega option_theta float 希腊值 Theta option_rho float 希腊值 Rho ======================= ============= ============================================================================== """ code_list = unique_and_normalize_list(code_list) if not code_list: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( MarketSnapshotQuery.pack_req, MarketSnapshotQuery.unpack_rsp) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, snapshot_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg equity_col_list = ['issued_shares', 'total_market_val', 'net_asset', 'net_profit', 'earning_per_share', 'outstanding_shares', 'circular_market_val', 'net_asset_per_share', 'ey_ratio', 'pe_ratio', 'pb_ratio', 'pe_ttm_ratio' ] wrt_col_list = ['wrt_conversion_ratio', 'wrt_type', 'wrt_strike_price', 'wrt_maturity_date', 'wrt_end_trade', 'wrt_recovery_price', 'wrt_street_vol', 'wrt_issue_vol', 'wrt_street_ratio', 'wrt_delta', 'wrt_implied_volatility', 'wrt_premium' ] option_col_list = ['option_type', 'strike_time', 'option_strike_price', 'option_contract_size', 'option_open_interest', 'option_implied_volatility', 'option_premium', 'option_delta', 'option_gamma', 'option_vega', 'option_theta', 'option_rho' ] col_list = [ 'code', 'update_time', 'last_price', 'open_price', 'high_price', 'low_price', 'prev_close_price', 'volume', 'turnover', 'turnover_rate', 'suspension', 'listing_date', 'lot_size', 'price_spread', 'stock_owner', 'ask_price', 'bid_price', 'ask_vol', 'bid_vol' ] col_list.append('equity_valid') col_list.extend(equity_col_list) col_list.append('wrt_valid') col_list.extend(wrt_col_list) col_list.append('option_valid') col_list.extend(option_col_list) snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list) return RET_OK, snapshot_frame_table def get_rt_data(self, code): """ 获取指定股票的分时数据 :param code: 股票代码,例如,HK.00700,US.APPL :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ========================================================================== 参数 类型 说明 ===================== =========== ========================================================================== code str 股票代码 time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间) is_blank bool 数据状态;正常数据为False,伪造数据为True opened_mins int 零点到当前多少分钟 cur_price float 当前价格 last_close float 昨天收盘的价格 avg_price float 平均价格 volume float 成交量 turnover float 成交金额 ===================== =========== ========================================================================== """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( RtDataQuery.pack_req, RtDataQuery.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, rt_data_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg for x in rt_data_list: x['code'] = code col_list = [ 'code', 'time', 'is_blank', 'opened_mins', 'cur_price', 'last_close', 'avg_price', 'volume', 'turnover' ] rt_data_table = pd.DataFrame(rt_data_list, columns=col_list) return RET_OK, rt_data_table def get_plate_list(self, market, plate_class): """ 获取板块集合下的子板块列表 :param market: 市场标识,注意这里不区分沪,深,输入沪或者深都会返回沪深市场的子板块(这个是和客户端保持一致的)参见Market :param plate_class: 板块分类,参见Plate :return: ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 plate_name str 板块名字 plate_id str 板块id ===================== =========== ============================================================== """ param_table = {'market': market, 'plate_class': plate_class} for x in param_table: param = param_table[x] if param is None or is_str(market) is False: error_str = ERROR_STR_PREFIX + "the type of market param is wrong" return RET_ERROR, error_str if market not in MKT_MAP: error_str = ERROR_STR_PREFIX + "the value of market param is wrong " return RET_ERROR, error_str if plate_class not in PLATE_CLASS_MAP: error_str = ERROR_STR_PREFIX + "the class of plate is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( SubplateQuery.pack_req, SubplateQuery.unpack_rsp) kargs = { 'market': market, 'plate_class': plate_class, 'conn_id': self.get_sync_conn_id() } ret_code, msg, subplate_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = ['code', 'plate_name', 'plate_id'] subplate_frame_table = pd.DataFrame(subplate_list, columns=col_list) return RET_OK, subplate_frame_table def get_plate_stock(self, plate_code): """ 获取特定板块下的股票列表 :param plate_code: 板块代码, string, 例如,”SH.BK0001”,”SH.BK0002”,先利用获取子版块列表函数获取子版块代码 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 lot_size int 每手股数 stock_name str 股票名称 stock_owner str 所属正股的代码 stock_child_type str 股票子类型,参见WrtType stock_type str 股票类型,参见SecurityType list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间) stock_id int 股票id ===================== =========== ============================================================== """ if plate_code is None or is_str(plate_code) is False: error_str = ERROR_STR_PREFIX + "the type of code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( PlateStockQuery.pack_req, PlateStockQuery.unpack_rsp) kargs = { "plate_code": plate_code, "conn_id": self.get_sync_conn_id() } ret_code, msg, plate_stock_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'lot_size', 'stock_name', 'stock_owner', 'stock_child_type', 'stock_type', 'list_time', 'stock_id', ] plate_stock_table = pd.DataFrame(plate_stock_list, columns=col_list) return RET_OK, plate_stock_table def get_broker_queue(self, code): """ 获取股票的经纪队列 :param code: 股票代码 :return: (ret, bid_frame_table, ask_frame_table)或(ret, err_message) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 后面两项为错误字符串 bid_frame_table 经纪买盘数据 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 bid_broker_id int 经纪买盘id bid_broker_name str 经纪买盘名称 bid_broker_pos int 经纪档位 ===================== =========== ============================================================== ask_frame_table 经纪卖盘数据 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 ask_broker_id int 经纪卖盘id ask_broker_name str 经纪卖盘名称 ask_broker_pos int 经纪档位 ===================== =========== ============================================================== """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( BrokerQueueQuery.pack_req, BrokerQueueQuery.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, ret_msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, ret_msg, ret_msg (_, bid_list, ask_list) = content col_bid_list = [ 'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos' ] col_ask_list = [ 'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos' ] bid_frame_table = pd.DataFrame(bid_list, columns=col_bid_list) ask_frame_table = pd.DataFrame(ask_list, columns=col_ask_list) return RET_OK, bid_frame_table, ask_frame_table def _check_subscribe_param(self, code_list, subtype_list): code_list = unique_and_normalize_list(code_list) subtype_list = unique_and_normalize_list(subtype_list) if len(code_list) == 0: msg = ERROR_STR_PREFIX + 'code_list is null' return RET_ERROR, msg, code_list, subtype_list if len(subtype_list) == 0: msg = ERROR_STR_PREFIX + 'subtype_list is null' return RET_ERROR, msg, code_list, subtype_list for subtype in subtype_list: if subtype not in SUBTYPE_MAP: subtype_str = ','.join([x for x in SUBTYPE_MAP]) msg = ERROR_STR_PREFIX + 'subtype is %s , which is wrong. (%s)' % ( subtype, subtype_str) return RET_ERROR, msg, code_list, subtype_list for code in code_list: ret, msg = split_stock_str(code) if ret != RET_OK: return RET_ERROR, msg, code_list, subtype_list return RET_OK, "", code_list, subtype_list def subscribe(self, code_list, subtype_list, is_first_push=True, subscribe_push=True): """ 订阅注册需要的实时信息,指定股票和订阅的数据类型即可 注意:len(code_list) * 订阅的K线类型的数量 <= 100 :param code_list: 需要订阅的股票代码列表 :param subtype_list: 需要订阅的数据类型列表,参见SubType :param is_first_push: 订阅成功后是否马上推送一次数据 :param subscribe_push: 订阅后不推送 :return: (ret, err_message) ret == RET_OK err_message为None ret != RET_OK err_message为错误描述字符串 :example: .. code:: python from futu import * quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) print(quote_ctx.subscribe(['HK.00700'], [SubType.QUOTE)]) quote_ctx.close() """ return self._subscribe_impl(code_list, subtype_list, is_first_push, subscribe_push) def _subscribe_impl(self, code_list, subtype_list, is_first_push, subscribe_push=True): ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list) if ret != RET_OK: return ret, msg kline_sub_count = 0 for sub_type in subtype_list: if sub_type in KLINE_SUBTYPE_LIST: kline_sub_count += 1 # if kline_sub_count * len(code_list) > MAX_KLINE_SUB_COUNT: # return RET_ERROR, 'Too many subscription' query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscribe_req, SubscriptionQuery.unpack_subscribe_rsp) kargs = { 'code_list': code_list, 'subtype_list': subtype_list, 'conn_id': self.get_sync_conn_id(), 'is_first_push': is_first_push, 'subscribe_push': subscribe_push } ret_code, msg, _ = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg for subtype in subtype_list: if subtype not in self._ctx_subscribe: self._ctx_subscribe[subtype] = set() code_set = self._ctx_subscribe[subtype] code_set.update(code_list) # # ret_code, msg, push_req_str = SubscriptionQuery.pack_push_req( # code_list, subtype_list, self.get_async_conn_id(), is_first_push) # # if ret_code != RET_OK: # return RET_ERROR, msg # # ret_code, msg = self._send_async_req(push_req_str) # if ret_code != RET_OK: # return RET_ERROR, msg return RET_OK, None def _reconnect_subscribe(self, code_list, subtype_list): # 将k线定阅和其它定阅区分开来 kline_sub_list = [] other_sub_list = [] for sub in subtype_list: if sub in KLINE_SUBTYPE_LIST: kline_sub_list.append(sub) else: other_sub_list.append(sub) # 连接断开时,可能会有大批股票需要重定阅,分次定阅,提高成功率 kline_sub_one_size = 1 if len(kline_sub_list) > 0: kline_sub_one_size = math.floor(100 / len(kline_sub_list)) sub_info_list = [ {"sub_list": kline_sub_list, "one_size": kline_sub_one_size}, {"sub_list": other_sub_list, "one_size": 100}, ] ret_code = RET_OK ret_data = None for info in sub_info_list: sub_list = info["sub_list"] one_size = info["one_size"] all_count = len(code_list) start_idx = 0 while start_idx < all_count and len(sub_list): sub_count = one_size if start_idx + one_size <= all_count else (all_count - start_idx) sub_codes = code_list[start_idx: start_idx + sub_count] start_idx += sub_count ret_code, ret_data = self._subscribe_impl(sub_codes, sub_list, False) if ret_code != RET_OK: break if ret_code != RET_OK: break return ret_code, ret_data def unsubscribe(self, code_list, subtype_list): """ 取消订阅 :param code_list: 取消订阅的股票代码列表 :param subtype_list: 取消订阅的类型,参见SubType :return: (ret, err_message) ret == RET_OK err_message为None ret != RET_OK err_message为错误描述字符串 """ ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list) if ret != RET_OK: return ret, msg query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req, SubscriptionQuery.unpack_unsubscribe_rsp) kargs = { 'code_list': code_list, 'subtype_list': subtype_list, "conn_id": self.get_sync_conn_id() } for subtype in subtype_list: if subtype not in self._ctx_subscribe: continue code_set = self._ctx_subscribe[subtype] for code in code_list: if code not in code_set: continue code_set.remove(code) ret_code, msg, _ = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id()) if ret_code != RET_OK: return RET_ERROR, msg ret_code, msg = self._send_async_req(unpush_req_str) if ret_code != RET_OK: return RET_ERROR, msg return RET_OK, None def query_subscription(self, is_all_conn=True): """ 查询已订阅的实时信息 :param is_all_conn: 是否返回所有连接的订阅状态,不传或者传False只返回当前连接数据 :return: (ret, data) ret != RET_OK 返回错误字符串 ret == RET_OK 返回 定阅信息的字典数据 ,格式如下: { 'total_used': 4, # 所有连接已使用的定阅额度 'own_used': 0, # 当前连接已使用的定阅额度 'remain': 496, # 剩余的定阅额度 'sub_list': # 每种定阅类型对应的股票列表 { 'BROKER': ['HK.00700', 'HK.02318'], 'RT_DATA': ['HK.00700', 'HK.02318'] } } """ is_all_conn = bool(is_all_conn) query_processor = self._get_sync_query_processor( SubscriptionQuery.pack_subscription_query_req, SubscriptionQuery.unpack_subscription_query_rsp) kargs = { "is_all_conn": is_all_conn, "conn_id": self.get_sync_conn_id() } ret_code, msg, sub_table = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg ret_dict = {} ret_dict['total_used'] = sub_table['total_used'] ret_dict['remain'] = sub_table['remain'] ret_dict['own_used'] = 0 ret_dict['sub_list'] = {} for conn_sub in sub_table['conn_sub_list']: is_own_conn = conn_sub['is_own_conn'] if is_own_conn: ret_dict['own_used'] = conn_sub['used'] if not is_all_conn and not is_own_conn: continue for sub_info in conn_sub['sub_list']: subtype = sub_info['subtype'] if subtype not in ret_dict['sub_list']: ret_dict['sub_list'][subtype] = [] code_list = ret_dict['sub_list'][subtype] for code in sub_info['code_list']: if code not in code_list: code_list.append(code) return RET_OK, ret_dict def get_stock_quote(self, code_list): """ 获取订阅股票报价的实时数据,有订阅要求限制。 对于异步推送,参见StockQuoteHandlerBase :param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 data_date str 日期 data_time str 时间(美股默认是美东时间,港股A股默认是北京时间) last_price float 最新价格 open_price float 今日开盘价 high_price float 最高价格 low_price float 最低价格 prev_close_price float 昨收盘价格 volume int 成交数量 turnover float 成交金额 turnover_rate float 换手率 amplitude int 振幅 suspension bool 是否停牌(True表示停牌) listing_date str 上市日期 (yyyy-MM-dd) price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差 dark_status str 暗盘交易状态,见DarkStatus strike_price float 行权价 contract_size int 每份合约数 open_interest int 未平仓合约数 implied_volatility float 隐含波动率 premium float 溢价 delta float 希腊值 Delta gamma float 希腊值 Gamma vega float 希腊值 Vega theta float 希腊值 Theta rho float 希腊值 Rho ===================== =========== ============================================================== """ code_list = unique_and_normalize_list(code_list) if not code_list: error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( StockQuoteQuery.pack_req, StockQuoteQuery.unpack_rsp, ) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, quote_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'data_date', 'data_time', 'last_price', 'open_price', 'high_price', 'low_price', 'prev_close_price', 'volume', 'turnover', 'turnover_rate', 'amplitude', 'suspension', 'listing_date', 'price_spread', 'dark_status', 'strike_price', 'contract_size', 'open_interest', 'implied_volatility', 'premium', 'delta', 'gamma', 'vega', 'theta', 'rho' ] quote_frame_table = pd.DataFrame(quote_list, columns=col_list) return RET_OK, quote_frame_table def get_rt_ticker(self, code, num=500): """ 获取指定股票的实时逐笔。取最近num个逐笔 :param code: 股票代码 :param num: 最近ticker个数(有最大个数限制,最近1000个) :return: (ret, data) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== stock_code str 股票代码 sequence int 逐笔序号 time str 成交时间(美股默认是美东时间,港股A股默认是北京时间) price float 成交价格 volume int 成交数量(股数) turnover float 成交金额 ticker_direction str 逐笔方向 type str 逐笔类型,参见TickerType ===================== =========== ============================================================== """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str if num is None or isinstance(num, int) is False: error_str = ERROR_STR_PREFIX + "the type of num param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( TickerQuery.pack_req, TickerQuery.unpack_rsp, ) kargs = { "code": code, "num": num, "conn_id": self.get_sync_conn_id() } ret_code, msg, ticker_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'time', 'price', 'volume', 'turnover', "ticker_direction", 'sequence', 'type' ] ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list) return RET_OK, ticker_frame_table def get_cur_kline(self, code, num, ktype=SubType.K_DAY, autype=AuType.QFQ): """ 实时获取指定股票最近num个K线数据,最多1000根 :param code: 股票代码 :param num: k线数据个数 :param ktype: k线类型,参见KLType :param autype: 复权类型,参见AuType :return: (ret, data) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 time_key str 时间(美股默认是美东时间,港股A股默认是北京时间) open float 开盘价 close float 收盘价 high float 最高价 low float 最低价 volume int 成交量 turnover float 成交额 pe_ratio float 市盈率(该字段为比例字段,默认不展示%) turnover_rate float 换手率 ===================== =========== ============================================================== """ param_table = {'code': code, 'ktype': ktype} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str if num is None or isinstance(num, int) is False: error_str = ERROR_STR_PREFIX + "the type of num param is wrong" return RET_ERROR, error_str if autype is not None and is_str(autype) is False: error_str = ERROR_STR_PREFIX + "the type of autype param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( CurKlineQuery.pack_req, CurKlineQuery.unpack_rsp, ) kargs = { "code": code, "num": num, "ktype": ktype, "autype": autype, "conn_id": self.get_sync_conn_id() } ret_code, msg, kline_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'time_key', 'open', 'close', 'high', 'low', 'volume', 'turnover', 'pe_ratio', 'turnover_rate' ] kline_frame_table = pd.DataFrame(kline_list, columns=col_list) return RET_OK, kline_frame_table def get_order_book(self, code): """ 获取实时摆盘数据 :param code: 股票代码 :return: (ret, data) ret == RET_OK 返回字典,数据格式如下 ret != RET_OK 返回错误字符串 {‘code’: 股票代码 ‘Ask’:[ (ask_price1, ask_volume1,order_num), (ask_price2, ask_volume2, order_num),…] ‘Bid’: [ (bid_price1, bid_volume1, order_num), (bid_price2, bid_volume2, order_num),…] } 'Ask':卖盘, 'Bid'买盘。每个元组的含义是(委托价格,委托数量,委托订单数) """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OrderBookQuery.pack_req, OrderBookQuery.unpack_rsp, ) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, orderbook = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg return RET_OK, orderbook def get_multi_points_history_kline(self, code_list, dates, fields, ktype=KLType.K_DAY, autype=AuType.QFQ, no_data_mode=KLNoDataMode.FORWARD): ''' 从本地历史K线中获取多支股票多个时间点的指定数据列 :param code_list: 单个或多个股票 'HK.00700' or ['HK.00700', 'HK.00001'] :param dates: 单个或多个日期 '2017-01-01' or ['2017-01-01', '2017-01-02'] :param fields: 单个或多个数据列 KL_FIELD.ALL or [KL_FIELD.DATE_TIME, KL_FIELD.OPEN] :param ktype: K线类型 :param autype: 复权类型 :param no_data_mode: 指定时间为非交易日时,对应的k线数据取值模式,参见KLNoDataMode :return: (ret, data) ret == RET_OK 返回pd dataframe数据,固定表头包括'code'(代码) 'time_point'(指定的日期) 'data_status' (KLDataStatus)。数据列格式如下 ret != RET_OK 返回错误字符串 ================= =========== ============================================================================== 参数 类型 说明 ================= =========== ============================================================================== code str 股票代码 time_point str 请求的时间 data_status str 数据点是否有效,参见KLDataStatus time_key str k线时间(美股默认是美东时间,港股A股默认是北京时间) open float 开盘价 close float 收盘价 high float 最高价 low float 最低价 pe_ratio float 市盈率(该字段为比例字段,默认不展示%) turnover_rate float 换手率 volume int 成交量 turnover float 成交额 change_rate float 涨跌幅 last_close float 昨收价 ================= =========== ============================================================================== ''' req_codes = unique_and_normalize_list(code_list) if not code_list: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str req_dates = unique_and_normalize_list(dates) if not dates: error_str = ERROR_STR_PREFIX + "the type of dates param is wrong" return RET_ERROR, error_str req_fields = unique_and_normalize_list(fields) if not fields: req_fields = copy(KL_FIELD.ALL_REAL) req_fields = KL_FIELD.normalize_field_list(req_fields) if not req_fields: error_str = ERROR_STR_PREFIX + "the type of fields param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( MultiPointsHisKLine.pack_req, MultiPointsHisKLine.unpack_rsp) # 一次性最多取100支股票的数据 max_req_code_num = 50 data_finish = False list_ret = [] # 循环请求数据,避免一次性取太多超时 while not data_finish: logger.debug('get_multi_points_history_kline - wait ... %s' % datetime.now()) kargs = { "code_list": req_codes, "dates": req_dates, "fields": copy(req_fields), "ktype": ktype, "autype": autype, "max_req": max_req_code_num, "no_data_mode": int(no_data_mode), "conn_id": self.get_sync_conn_id() } ret_code, msg, content = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg list_kline, has_next = content data_finish = (not has_next) for dict_item in list_kline: item_code = dict_item['code'] list_ret.append(dict_item) if item_code in req_codes: req_codes.remove(item_code) if 0 == len(req_codes): data_finish = True # 表头列 col_list = ['code', 'time_point', 'data_status'] for field in req_fields: str_field = KL_FIELD.DICT_KL_FIELD_STR[field] if str_field not in col_list: col_list.append(str_field) pd_frame = pd.DataFrame(list_ret, columns=col_list) return RET_OK, pd_frame def get_referencestock_list(self, code, reference_type): """ 获取证券的关联数据 :param code: 证券id,str,例如HK.00700 :param reference_type: 要获得的相关数据,参见SecurityReferenceType。例如WARRANT,表示获取正股相关的涡轮 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 返回错误字符串 ================= =========== ============================================================================== 参数 类型 说明 ================= =========== ============================================================================== code str 证券代码 lot_size int 每手数量 stock_type str 证券类型,参见SecurityType stock_name str 证券名字 list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间) wrt_valid bool 是否是涡轮,如果为True,下面wrt开头的字段有效 wrt_type str 涡轮类型,参见WrtType wrt_code str 所属正股 ================= =========== ============================================================================== """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( StockReferenceList.pack_req, StockReferenceList.unpack_rsp, ) kargs = { "code": code, 'ref_type': reference_type, "conn_id": self.get_sync_conn_id() } ret_code, msg, data_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'lot_size', 'stock_type', 'stock_name', 'list_time', 'wrt_valid', 'wrt_type', 'wrt_code' ] pd_frame = pd.DataFrame(data_list, columns=col_list) return RET_OK, pd_frame def get_owner_plate(self, code_list): """ 获取单支或多支股票的所属板块信息列表 :param code_list: 股票代码列表,仅支持正股、指数。list或str。例如:['HK.00700', 'HK.00001']或者'HK.00700,HK.00001'。 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 证券代码 plate_code str 板块代码 plate_name str 板块名字 plate_type str 板块类型(行业板块或概念板块),futu.common.constant.Plate ===================== =========== ============================================================== """ if is_str(code_list): code_list = code_list.split(',') elif isinstance(code_list, list): pass else: return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'" code_list = unique_and_normalize_list(code_list) for code in code_list: if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OwnerPlateQuery.pack_req, OwnerPlateQuery.unpack_rsp) kargs = { "code_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, owner_plate_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'plate_code', 'plate_name', 'plate_type' ] owner_plate_table = pd.DataFrame(owner_plate_list, columns=col_list) return RET_OK, owner_plate_table def get_holding_change_list(self, code, holder_type, start=None, end=None): """ 获取大股东持股变动列表,只提供美股数据 :param code: 股票代码. 例如:'US.AAPL' :param holder_type: 持有者类别,StockHolder_ :param start: 开始时间. 例如:'2016-10-01' :param end: 结束时间,例如:'2017-10-01'。 start与end的组合如下: ========== ========== ======================================== start类型 end类型 说明 ========== ========== ======================================== str str start和end分别为指定的日期 None str start为end往前365天 str None end为start往后365天 None None end为当前日期,start为end往前365天 ========== ========== ======================================== :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== holder_name str 高管名称 holding_qty float 持股数 holding_ratio float 持股比例(该字段为比例字段,默认不展示%) change_qty float 变动数 change_ratio float 变动比例(该字段为比例字段,默认不展示%) time str 发布时间(美股的时间默认是美东) ===================== =========== ============================================================== """ holder_type = STOCK_HOLDER_CLASS_MAP[holder_type] if code is None or is_str(code) is False: msg = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, msg if holder_type < 1 or holder_type > len(STOCK_HOLDER_CLASS_MAP): msg = ERROR_STR_PREFIX + "the type {0} is wrong, total number of types is {1}".format(holder_type, len(STOCK_HOLDER_CLASS_MAP)) return RET_ERROR, msg ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=365) if ret_code != RET_OK: return ret_code, msg query_processor = self._get_sync_query_processor( HoldingChangeList.pack_req, HoldingChangeList.unpack_rsp) kargs = { "code": code, "holder_type": holder_type, "conn_id": self.get_sync_conn_id(), "start_date": start, "end_date": end } ret_code, msg, owner_plate_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'holder_name', 'holding_qty', 'holding_ratio', 'change_qty', 'change_ratio', 'time' ] holding_change_list = pd.DataFrame(owner_plate_list, columns=col_list) return RET_OK, holding_change_list def get_option_chain(self, code, start=None, end=None, option_type=OptionType.ALL, option_cond_type=OptionCondType.ALL): """ 通过标的股查询期权 :param code: 股票代码,例如:'HK.02318' :param start: 开始日期,该日期指到期日,例如'2017-08-01' :param end: 结束日期(包括这一天),该日期指到期日,例如'2017-08-30'。 注意,时间范围最多30天 start和end的组合如下: ========== ========== ======================================== start类型 end类型 说明 ========== ========== ======================================== str str start和end分别为指定的日期 None str start为end往前30天 str None end为start往后30天 None None start为当前日期,end往后30天 ========== ========== ======================================== :param option_type: 期权类型,默认全部,全部/看涨/看跌,futu.common.constant.OptionType :param option_cond_type: 默认全部,全部/价内/价外,futu.common.constant.OptionCondType :return: (ret, data) ret == RET_OK 返回pd dataframe数据,数据列格式如下 ret != RET_OK 返回错误字符串 ================== =========== ============================================================== 参数 类型 说明 ================== =========== ============================================================== code str 股票代码 name str 名字 lot_size int 每手数量 stock_type str 股票类型,参见SecurityType option_type str 期权类型,Qot_Common.OptionType stock_owner str 标的股 strike_time str 行权日(美股默认是美东时间,港股A股默认是北京时间) strike_price float 行权价 suspension bool 是否停牌(True表示停牌) stock_id int 股票id ================== =========== ============================================================== """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=29, default_time_end='00:00:00', prefer_end_now=False) if ret_code != RET_OK: return ret_code, msg query_processor = self._get_sync_query_processor( OptionChain.pack_req, OptionChain.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id(), "start_date": start, "end_date": end, "option_cond_type": option_cond_type, "option_type": option_type } ret_code, msg, option_chain_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'name', 'lot_size', 'stock_type', 'option_type', 'stock_owner', 'strike_time', 'strike_price', 'suspension', 'stock_id' ] option_chain = pd.DataFrame(option_chain_list, columns=col_list) option_chain.sort_values(by=["strike_time", "strike_price"], axis=0, ascending=True, inplace=True) option_chain.index = range(len(option_chain)) return RET_OK, option_chain def get_order_detail(self, code): return RET_ERROR, "this service has been cancelled" """ 查询A股Level 2权限下提供的委托明细 :param code: 股票代码,例如:'HK.02318' :return: (ret, data) ret == RET_OK data为1个dict,包含以下数据 ret != RET_OK data为错误字符串 {‘code’: 股票代码 ‘Ask’:[ order_num, [order_volume1, order_volume2] ] ‘Bid’: [ order_num, [order_volume1, order_volume2] ] } 'Ask':卖盘, 'Bid'买盘。order_num指委托订单数量,order_volume是每笔委托的委托量,当前最多返回前50笔委托的委托数量。即order_num有可能多于后面的order_volume """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OrderDetail.pack_req, OrderDetail.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, order_detail = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg return RET_OK, order_detail def get_warrant(self, stock_owner='', req=None): """ :param stock_owner:所属正股 :param req:futu.quote.quote_get_warrant.Request """ from futu.quote.quote_get_warrant import Request if (req is None) or (not isinstance(req, Request)): req = Request() if stock_owner is not None: req.stock_owner = stock_owner query_processor = self._get_sync_query_processor(QuoteWarrant.pack_req, QuoteWarrant.unpack_rsp) kargs = { "req": req, "conn_id": self.get_sync_conn_id() } ret_code, msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg else: warrant_data_list, last_page, all_count = content col_list = ['stock', 'name', 'stock_owner', 'type', 'issuer', 'maturity_time', 'list_time', 'last_trade_time', 'recovery_price', 'conversion_ratio', 'lot_size', 'strike_price', 'last_close_price', 'cur_price', 'price_change_val', 'change_rate', 'status', 'bid_price', 'ask_price', 'bid_vol', 'ask_vol', 'volume', 'turnover', 'score', 'premium', 'break_even_point', 'leverage', 'ipop', 'price_recovery_ratio', 'conversion_price', 'street_rate', 'street_vol', 'amplitude', 'issue_size', 'high_price', 'low_price', 'implied_volatility', 'delta', 'effective_leverage', 'list_timestamp', 'last_trade_timestamp', 'maturity_timestamp'] warrant_data_frame = pd.DataFrame(warrant_data_list, columns=col_list) #1120400921001028854 return ret_code, (warrant_data_frame, last_page, all_count)
41.841547
184
0.45184
import datetime import math from time import sleep import pandas as pd from futu.common.open_context_base import OpenContextBase, ContextStatus from futu.quote.quote_query import * class OpenQuoteContext(OpenContextBase): def __init__(self, host='127.0.0.1', port=11111): self._ctx_subscribe = {} super(OpenQuoteContext, self).__init__(host, port, True) def close(self): super(OpenQuoteContext, self).close() def on_api_socket_reconnected(self): resub_count = 0 subtype_list = [] code_list = [] resub_dict = copy(self._ctx_subscribe) subtype_all_cnt = len(resub_dict.keys()) subtype_cur_cnt = 0 ret_code = RET_OK ret_msg = '' for subtype in resub_dict.keys(): subtype_cur_cnt += 1 code_set = resub_dict[subtype] code_list_new = [code for code in code_set] if len(code_list_new) == 0: continue if len(code_list) == 0: code_list = code_list_new subtype_list = [subtype] is_need_sub = False if code_list == code_list_new: if subtype not in subtype_list: subtype_list.append(subtype) else: ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list) logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format( len(code_list), ret_code, ret_msg, subtype_list, code_list)) if ret_code != RET_OK: break resub_count += len(code_list) code_list = code_list_new subtype_list = [subtype] if subtype_cur_cnt == subtype_all_cnt and len(code_list): ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list) logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list)) if ret_code != RET_OK: break resub_count += len(code_list) code_list = [] subtype_list = [] logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg)) if ret_code != RET_OK: logger.error("reconnect subscribe error, close connect and retry!!") self._status = ContextStatus.Start self._wait_reconnect() return ret_code, ret_msg def get_trading_days(self, market, start=None, end=None): if market is None or is_str(market) is False: error_str = ERROR_STR_PREFIX + "the type of market param is wrong" return RET_ERROR, error_str ret, msg, start, end = normalize_start_end_date(start, end, 365) if ret != RET_OK: return ret, msg query_processor = self._get_sync_query_processor( TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp) kargs = { 'market': market, 'start_date': start, 'end_date': end, 'conn_id': self.get_sync_conn_id() } ret_code, msg, trade_day_list = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg return RET_OK, trade_day_list def get_stock_basicinfo(self, market, stock_type=SecurityType.STOCK, code_list=None): param_table = {'market': market, 'stock_type': stock_type} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str if code_list is not None: if is_str(code_list): code_list = code_list.split(',') elif isinstance(code_list, list): pass else: return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'" query_processor = self._get_sync_query_processor( StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp) kargs = { "market": market, 'stock_type': stock_type, 'code_list': code_list, 'conn_id': self.get_sync_conn_id() } ret_code, msg, basic_info_list = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg col_list = [ 'code', 'name', 'lot_size', 'stock_type', 'stock_child_type', 'stock_owner', 'option_type', 'strike_time', 'strike_price', 'suspension', 'listing_date', 'stock_id', 'delisting' ] basic_info_table = pd.DataFrame(basic_info_list, columns=col_list) return RET_OK, basic_info_table def get_multiple_history_kline(self, codelist, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ): if is_str(codelist): codelist = codelist.split(',') elif isinstance(codelist, list): pass else: return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'" result = [] for code in codelist: ret, data = self.get_history_kline(code, start, end, ktype, autype) if ret != RET_OK: return RET_ERROR, 'get history kline error: {}, {},{},{},{}'.format(data, code, start, end, ktype) result.append(data) return 0, result def _get_history_kline_impl(self, query_cls, code, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ, fields=[KL_FIELD.ALL] ): ret, msg, req_start, end = normalize_start_end_date(start, end, 365) if ret != RET_OK: return ret, msg req_fields = unique_and_normalize_list(fields) if not fields: req_fields = copy(KL_FIELD.ALL_REAL) req_fields = KL_FIELD.normalize_field_list(req_fields) if not req_fields: error_str = ERROR_STR_PREFIX + "the type of fields param is wrong" return RET_ERROR, error_str if autype is None: autype = 'None' param_table = {'code': code, 'ktype': ktype, 'autype': autype} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str max_kl_num = 1000 data_finish = False list_ret = [] while not data_finish: kargs = { "code": code, "start_date": req_start, "end_date": end, "ktype": ktype, "autype": autype, "fields": copy(req_fields), "max_num": max_kl_num, "conn_id": self.get_sync_conn_id() } query_processor = self._get_sync_query_processor(query_cls.pack_req, query_cls.unpack_rsp) ret_code, msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg list_kline, has_next, next_time = content data_finish = (not has_next) or (not next_time) req_start = next_time for dict_item in list_kline: list_ret.append(dict_item) col_list = ['code'] for field in req_fields: str_field = KL_FIELD.DICT_KL_FIELD_STR[field] if str_field not in col_list: col_list.append(str_field) kline_frame_table = pd.DataFrame(list_ret, columns=col_list) return RET_OK, kline_frame_table def get_history_kline(self, code, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ, fields=[KL_FIELD.ALL]): return self._get_history_kline_impl(GetHistoryKlineQuery, code, start=start, end=end, ktype=ktype, autype=autype, fields=fields) def request_history_kline(self, code, start=None, end=None, ktype=KLType.K_DAY, autype=AuType.QFQ, fields=[KL_FIELD.ALL], max_count=1000, page_req_key=None): next_page_req_key = None ret, msg, req_start, end = normalize_start_end_date(start, end, 365) if ret != RET_OK: return ret, msg, next_page_req_key req_fields = unique_and_normalize_list(fields) if not fields: req_fields = copy(KL_FIELD.ALL_REAL) req_fields = KL_FIELD.normalize_field_list(req_fields) if not req_fields: error_str = ERROR_STR_PREFIX + "the type of fields param is wrong" return RET_ERROR, error_str, next_page_req_key if autype is None: autype = 'None' param_table = {'code': code, 'ktype': ktype, 'autype': autype} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str, next_page_req_key max_kl_num = min(1000, max_count) if max_count is not None else 1000 data_finish = False list_ret = [] while not data_finish: kargs = { "code": code, "start_date": req_start, "end_date": end, "ktype": ktype, "autype": autype, "fields": copy(req_fields), "max_num": max_kl_num, "conn_id": self.get_sync_conn_id(), "next_req_key": page_req_key } query_processor = self._get_sync_query_processor(RequestHistoryKlineQuery.pack_req, RequestHistoryKlineQuery.unpack_rsp) ret_code, msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg, next_page_req_key list_kline, has_next, page_req_key = content list_ret.extend(list_kline) next_page_req_key = page_req_key if max_count is not None: if max_count > len(list_ret) and has_next: data_finish = False max_kl_num = min(max_count - len(list_ret), 1000) else: data_finish = True else: data_finish = not has_next col_list = ['code'] for field in req_fields: str_field = KL_FIELD.DICT_KL_FIELD_STR[field] if str_field not in col_list: col_list.append(str_field) kline_frame_table = pd.DataFrame(list_ret, columns=col_list) return RET_OK, kline_frame_table, next_page_req_key def get_autype_list(self, code_list): code_list = unique_and_normalize_list(code_list) for code in code_list: if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( ExrightQuery.pack_req, ExrightQuery.unpack_rsp) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, exr_record = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'ex_div_date', 'split_ratio', 'per_cash_div', 'per_share_div_ratio', 'per_share_trans_ratio', 'allotment_ratio', 'allotment_price', 'stk_spo_ratio', 'stk_spo_price', 'forward_adj_factorA', 'forward_adj_factorB', 'backward_adj_factorA', 'backward_adj_factorB' ] exr_frame_table = pd.DataFrame(exr_record, columns=col_list) return RET_OK, exr_frame_table def get_market_snapshot(self, code_list): code_list = unique_and_normalize_list(code_list) if not code_list: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( MarketSnapshotQuery.pack_req, MarketSnapshotQuery.unpack_rsp) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, snapshot_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg equity_col_list = ['issued_shares', 'total_market_val', 'net_asset', 'net_profit', 'earning_per_share', 'outstanding_shares', 'circular_market_val', 'net_asset_per_share', 'ey_ratio', 'pe_ratio', 'pb_ratio', 'pe_ttm_ratio' ] wrt_col_list = ['wrt_conversion_ratio', 'wrt_type', 'wrt_strike_price', 'wrt_maturity_date', 'wrt_end_trade', 'wrt_recovery_price', 'wrt_street_vol', 'wrt_issue_vol', 'wrt_street_ratio', 'wrt_delta', 'wrt_implied_volatility', 'wrt_premium' ] option_col_list = ['option_type', 'strike_time', 'option_strike_price', 'option_contract_size', 'option_open_interest', 'option_implied_volatility', 'option_premium', 'option_delta', 'option_gamma', 'option_vega', 'option_theta', 'option_rho' ] col_list = [ 'code', 'update_time', 'last_price', 'open_price', 'high_price', 'low_price', 'prev_close_price', 'volume', 'turnover', 'turnover_rate', 'suspension', 'listing_date', 'lot_size', 'price_spread', 'stock_owner', 'ask_price', 'bid_price', 'ask_vol', 'bid_vol' ] col_list.append('equity_valid') col_list.extend(equity_col_list) col_list.append('wrt_valid') col_list.extend(wrt_col_list) col_list.append('option_valid') col_list.extend(option_col_list) snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list) return RET_OK, snapshot_frame_table def get_rt_data(self, code): if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( RtDataQuery.pack_req, RtDataQuery.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, rt_data_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg for x in rt_data_list: x['code'] = code col_list = [ 'code', 'time', 'is_blank', 'opened_mins', 'cur_price', 'last_close', 'avg_price', 'volume', 'turnover' ] rt_data_table = pd.DataFrame(rt_data_list, columns=col_list) return RET_OK, rt_data_table def get_plate_list(self, market, plate_class): param_table = {'market': market, 'plate_class': plate_class} for x in param_table: param = param_table[x] if param is None or is_str(market) is False: error_str = ERROR_STR_PREFIX + "the type of market param is wrong" return RET_ERROR, error_str if market not in MKT_MAP: error_str = ERROR_STR_PREFIX + "the value of market param is wrong " return RET_ERROR, error_str if plate_class not in PLATE_CLASS_MAP: error_str = ERROR_STR_PREFIX + "the class of plate is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( SubplateQuery.pack_req, SubplateQuery.unpack_rsp) kargs = { 'market': market, 'plate_class': plate_class, 'conn_id': self.get_sync_conn_id() } ret_code, msg, subplate_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = ['code', 'plate_name', 'plate_id'] subplate_frame_table = pd.DataFrame(subplate_list, columns=col_list) return RET_OK, subplate_frame_table def get_plate_stock(self, plate_code): if plate_code is None or is_str(plate_code) is False: error_str = ERROR_STR_PREFIX + "the type of code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( PlateStockQuery.pack_req, PlateStockQuery.unpack_rsp) kargs = { "plate_code": plate_code, "conn_id": self.get_sync_conn_id() } ret_code, msg, plate_stock_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'lot_size', 'stock_name', 'stock_owner', 'stock_child_type', 'stock_type', 'list_time', 'stock_id', ] plate_stock_table = pd.DataFrame(plate_stock_list, columns=col_list) return RET_OK, plate_stock_table def get_broker_queue(self, code): if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( BrokerQueueQuery.pack_req, BrokerQueueQuery.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, ret_msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, ret_msg, ret_msg (_, bid_list, ask_list) = content col_bid_list = [ 'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos' ] col_ask_list = [ 'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos' ] bid_frame_table = pd.DataFrame(bid_list, columns=col_bid_list) ask_frame_table = pd.DataFrame(ask_list, columns=col_ask_list) return RET_OK, bid_frame_table, ask_frame_table def _check_subscribe_param(self, code_list, subtype_list): code_list = unique_and_normalize_list(code_list) subtype_list = unique_and_normalize_list(subtype_list) if len(code_list) == 0: msg = ERROR_STR_PREFIX + 'code_list is null' return RET_ERROR, msg, code_list, subtype_list if len(subtype_list) == 0: msg = ERROR_STR_PREFIX + 'subtype_list is null' return RET_ERROR, msg, code_list, subtype_list for subtype in subtype_list: if subtype not in SUBTYPE_MAP: subtype_str = ','.join([x for x in SUBTYPE_MAP]) msg = ERROR_STR_PREFIX + 'subtype is %s , which is wrong. (%s)' % ( subtype, subtype_str) return RET_ERROR, msg, code_list, subtype_list for code in code_list: ret, msg = split_stock_str(code) if ret != RET_OK: return RET_ERROR, msg, code_list, subtype_list return RET_OK, "", code_list, subtype_list def subscribe(self, code_list, subtype_list, is_first_push=True, subscribe_push=True): return self._subscribe_impl(code_list, subtype_list, is_first_push, subscribe_push) def _subscribe_impl(self, code_list, subtype_list, is_first_push, subscribe_push=True): ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list) if ret != RET_OK: return ret, msg kline_sub_count = 0 for sub_type in subtype_list: if sub_type in KLINE_SUBTYPE_LIST: kline_sub_count += 1 query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscribe_req, SubscriptionQuery.unpack_subscribe_rsp) kargs = { 'code_list': code_list, 'subtype_list': subtype_list, 'conn_id': self.get_sync_conn_id(), 'is_first_push': is_first_push, 'subscribe_push': subscribe_push } ret_code, msg, _ = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg for subtype in subtype_list: if subtype not in self._ctx_subscribe: self._ctx_subscribe[subtype] = set() code_set = self._ctx_subscribe[subtype] code_set.update(code_list) return RET_OK, None def _reconnect_subscribe(self, code_list, subtype_list): kline_sub_list = [] other_sub_list = [] for sub in subtype_list: if sub in KLINE_SUBTYPE_LIST: kline_sub_list.append(sub) else: other_sub_list.append(sub) kline_sub_one_size = 1 if len(kline_sub_list) > 0: kline_sub_one_size = math.floor(100 / len(kline_sub_list)) sub_info_list = [ {"sub_list": kline_sub_list, "one_size": kline_sub_one_size}, {"sub_list": other_sub_list, "one_size": 100}, ] ret_code = RET_OK ret_data = None for info in sub_info_list: sub_list = info["sub_list"] one_size = info["one_size"] all_count = len(code_list) start_idx = 0 while start_idx < all_count and len(sub_list): sub_count = one_size if start_idx + one_size <= all_count else (all_count - start_idx) sub_codes = code_list[start_idx: start_idx + sub_count] start_idx += sub_count ret_code, ret_data = self._subscribe_impl(sub_codes, sub_list, False) if ret_code != RET_OK: break if ret_code != RET_OK: break return ret_code, ret_data def unsubscribe(self, code_list, subtype_list): ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list) if ret != RET_OK: return ret, msg query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req, SubscriptionQuery.unpack_unsubscribe_rsp) kargs = { 'code_list': code_list, 'subtype_list': subtype_list, "conn_id": self.get_sync_conn_id() } for subtype in subtype_list: if subtype not in self._ctx_subscribe: continue code_set = self._ctx_subscribe[subtype] for code in code_list: if code not in code_set: continue code_set.remove(code) ret_code, msg, _ = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id()) if ret_code != RET_OK: return RET_ERROR, msg ret_code, msg = self._send_async_req(unpush_req_str) if ret_code != RET_OK: return RET_ERROR, msg return RET_OK, None def query_subscription(self, is_all_conn=True): is_all_conn = bool(is_all_conn) query_processor = self._get_sync_query_processor( SubscriptionQuery.pack_subscription_query_req, SubscriptionQuery.unpack_subscription_query_rsp) kargs = { "is_all_conn": is_all_conn, "conn_id": self.get_sync_conn_id() } ret_code, msg, sub_table = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg ret_dict = {} ret_dict['total_used'] = sub_table['total_used'] ret_dict['remain'] = sub_table['remain'] ret_dict['own_used'] = 0 ret_dict['sub_list'] = {} for conn_sub in sub_table['conn_sub_list']: is_own_conn = conn_sub['is_own_conn'] if is_own_conn: ret_dict['own_used'] = conn_sub['used'] if not is_all_conn and not is_own_conn: continue for sub_info in conn_sub['sub_list']: subtype = sub_info['subtype'] if subtype not in ret_dict['sub_list']: ret_dict['sub_list'][subtype] = [] code_list = ret_dict['sub_list'][subtype] for code in sub_info['code_list']: if code not in code_list: code_list.append(code) return RET_OK, ret_dict def get_stock_quote(self, code_list): code_list = unique_and_normalize_list(code_list) if not code_list: error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( StockQuoteQuery.pack_req, StockQuoteQuery.unpack_rsp, ) kargs = { "stock_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, quote_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'data_date', 'data_time', 'last_price', 'open_price', 'high_price', 'low_price', 'prev_close_price', 'volume', 'turnover', 'turnover_rate', 'amplitude', 'suspension', 'listing_date', 'price_spread', 'dark_status', 'strike_price', 'contract_size', 'open_interest', 'implied_volatility', 'premium', 'delta', 'gamma', 'vega', 'theta', 'rho' ] quote_frame_table = pd.DataFrame(quote_list, columns=col_list) return RET_OK, quote_frame_table def get_rt_ticker(self, code, num=500): if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str if num is None or isinstance(num, int) is False: error_str = ERROR_STR_PREFIX + "the type of num param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( TickerQuery.pack_req, TickerQuery.unpack_rsp, ) kargs = { "code": code, "num": num, "conn_id": self.get_sync_conn_id() } ret_code, msg, ticker_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'time', 'price', 'volume', 'turnover', "ticker_direction", 'sequence', 'type' ] ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list) return RET_OK, ticker_frame_table def get_cur_kline(self, code, num, ktype=SubType.K_DAY, autype=AuType.QFQ): param_table = {'code': code, 'ktype': ktype} for x in param_table: param = param_table[x] if param is None or is_str(param) is False: error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x return RET_ERROR, error_str if num is None or isinstance(num, int) is False: error_str = ERROR_STR_PREFIX + "the type of num param is wrong" return RET_ERROR, error_str if autype is not None and is_str(autype) is False: error_str = ERROR_STR_PREFIX + "the type of autype param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( CurKlineQuery.pack_req, CurKlineQuery.unpack_rsp, ) kargs = { "code": code, "num": num, "ktype": ktype, "autype": autype, "conn_id": self.get_sync_conn_id() } ret_code, msg, kline_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'time_key', 'open', 'close', 'high', 'low', 'volume', 'turnover', 'pe_ratio', 'turnover_rate' ] kline_frame_table = pd.DataFrame(kline_list, columns=col_list) return RET_OK, kline_frame_table def get_order_book(self, code): if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OrderBookQuery.pack_req, OrderBookQuery.unpack_rsp, ) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, orderbook = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg return RET_OK, orderbook def get_multi_points_history_kline(self, code_list, dates, fields, ktype=KLType.K_DAY, autype=AuType.QFQ, no_data_mode=KLNoDataMode.FORWARD): req_codes = unique_and_normalize_list(code_list) if not code_list: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str req_dates = unique_and_normalize_list(dates) if not dates: error_str = ERROR_STR_PREFIX + "the type of dates param is wrong" return RET_ERROR, error_str req_fields = unique_and_normalize_list(fields) if not fields: req_fields = copy(KL_FIELD.ALL_REAL) req_fields = KL_FIELD.normalize_field_list(req_fields) if not req_fields: error_str = ERROR_STR_PREFIX + "the type of fields param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( MultiPointsHisKLine.pack_req, MultiPointsHisKLine.unpack_rsp) max_req_code_num = 50 data_finish = False list_ret = [] while not data_finish: logger.debug('get_multi_points_history_kline - wait ... %s' % datetime.now()) kargs = { "code_list": req_codes, "dates": req_dates, "fields": copy(req_fields), "ktype": ktype, "autype": autype, "max_req": max_req_code_num, "no_data_mode": int(no_data_mode), "conn_id": self.get_sync_conn_id() } ret_code, msg, content = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg list_kline, has_next = content data_finish = (not has_next) for dict_item in list_kline: item_code = dict_item['code'] list_ret.append(dict_item) if item_code in req_codes: req_codes.remove(item_code) if 0 == len(req_codes): data_finish = True col_list = ['code', 'time_point', 'data_status'] for field in req_fields: str_field = KL_FIELD.DICT_KL_FIELD_STR[field] if str_field not in col_list: col_list.append(str_field) pd_frame = pd.DataFrame(list_ret, columns=col_list) return RET_OK, pd_frame def get_referencestock_list(self, code, reference_type): if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( StockReferenceList.pack_req, StockReferenceList.unpack_rsp, ) kargs = { "code": code, 'ref_type': reference_type, "conn_id": self.get_sync_conn_id() } ret_code, msg, data_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'lot_size', 'stock_type', 'stock_name', 'list_time', 'wrt_valid', 'wrt_type', 'wrt_code' ] pd_frame = pd.DataFrame(data_list, columns=col_list) return RET_OK, pd_frame def get_owner_plate(self, code_list): if is_str(code_list): code_list = code_list.split(',') elif isinstance(code_list, list): pass else: return RET_ERROR, "code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'" code_list = unique_and_normalize_list(code_list) for code in code_list: if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OwnerPlateQuery.pack_req, OwnerPlateQuery.unpack_rsp) kargs = { "code_list": code_list, "conn_id": self.get_sync_conn_id() } ret_code, msg, owner_plate_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'plate_code', 'plate_name', 'plate_type' ] owner_plate_table = pd.DataFrame(owner_plate_list, columns=col_list) return RET_OK, owner_plate_table def get_holding_change_list(self, code, holder_type, start=None, end=None): holder_type = STOCK_HOLDER_CLASS_MAP[holder_type] if code is None or is_str(code) is False: msg = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, msg if holder_type < 1 or holder_type > len(STOCK_HOLDER_CLASS_MAP): msg = ERROR_STR_PREFIX + "the type {0} is wrong, total number of types is {1}".format(holder_type, len(STOCK_HOLDER_CLASS_MAP)) return RET_ERROR, msg ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=365) if ret_code != RET_OK: return ret_code, msg query_processor = self._get_sync_query_processor( HoldingChangeList.pack_req, HoldingChangeList.unpack_rsp) kargs = { "code": code, "holder_type": holder_type, "conn_id": self.get_sync_conn_id(), "start_date": start, "end_date": end } ret_code, msg, owner_plate_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'holder_name', 'holding_qty', 'holding_ratio', 'change_qty', 'change_ratio', 'time' ] holding_change_list = pd.DataFrame(owner_plate_list, columns=col_list) return RET_OK, holding_change_list def get_option_chain(self, code, start=None, end=None, option_type=OptionType.ALL, option_cond_type=OptionCondType.ALL): if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=29, default_time_end='00:00:00', prefer_end_now=False) if ret_code != RET_OK: return ret_code, msg query_processor = self._get_sync_query_processor( OptionChain.pack_req, OptionChain.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id(), "start_date": start, "end_date": end, "option_cond_type": option_cond_type, "option_type": option_type } ret_code, msg, option_chain_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'name', 'lot_size', 'stock_type', 'option_type', 'stock_owner', 'strike_time', 'strike_price', 'suspension', 'stock_id' ] option_chain = pd.DataFrame(option_chain_list, columns=col_list) option_chain.sort_values(by=["strike_time", "strike_price"], axis=0, ascending=True, inplace=True) option_chain.index = range(len(option_chain)) return RET_OK, option_chain def get_order_detail(self, code): return RET_ERROR, "this service has been cancelled" if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OrderDetail.pack_req, OrderDetail.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, order_detail = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg return RET_OK, order_detail def get_warrant(self, stock_owner='', req=None): from futu.quote.quote_get_warrant import Request if (req is None) or (not isinstance(req, Request)): req = Request() if stock_owner is not None: req.stock_owner = stock_owner query_processor = self._get_sync_query_processor(QuoteWarrant.pack_req, QuoteWarrant.unpack_rsp) kargs = { "req": req, "conn_id": self.get_sync_conn_id() } ret_code, msg, content = query_processor(**kargs) if ret_code != RET_OK: return ret_code, msg else: warrant_data_list, last_page, all_count = content col_list = ['stock', 'name', 'stock_owner', 'type', 'issuer', 'maturity_time', 'list_time', 'last_trade_time', 'recovery_price', 'conversion_ratio', 'lot_size', 'strike_price', 'last_close_price', 'cur_price', 'price_change_val', 'change_rate', 'status', 'bid_price', 'ask_price', 'bid_vol', 'ask_vol', 'volume', 'turnover', 'score', 'premium', 'break_even_point', 'leverage', 'ipop', 'price_recovery_ratio', 'conversion_price', 'street_rate', 'street_vol', 'amplitude', 'issue_size', 'high_price', 'low_price', 'implied_volatility', 'delta', 'effective_leverage', 'list_timestamp', 'last_trade_timestamp', 'maturity_timestamp'] warrant_data_frame = pd.DataFrame(warrant_data_list, columns=col_list) return ret_code, (warrant_data_frame, last_page, all_count)
true
true
1c4a1518d68c3a0ac4df0c03d4e9484fa9bf6c93
4,468
py
Python
autocomplete_light/tests/autocomplete/generic.py
andybak/django-autocomplete-light
19e46261a01a578d73bfae02bf772bc4d81984f9
[ "MIT" ]
null
null
null
autocomplete_light/tests/autocomplete/generic.py
andybak/django-autocomplete-light
19e46261a01a578d73bfae02bf772bc4d81984f9
[ "MIT" ]
null
null
null
autocomplete_light/tests/autocomplete/generic.py
andybak/django-autocomplete-light
19e46261a01a578d73bfae02bf772bc4d81984f9
[ "MIT" ]
null
null
null
from __future__ import unicode_literals from .case import * from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import Permission from ...example_apps.autocomplete_test_case_app.models import User, Group class AutocompleteGenericMock(autocomplete_light.AutocompleteGenericBase): choices = ( User.objects.filter(pk__lt=10), Group.objects.filter(pk__lt=10), ) search_fields = ( ('username', 'email'), ('name',), ) limit_choices = 3 class FormMock(forms.Form): x = autocomplete_light.GenericModelChoiceField( widget=autocomplete_light.ChoiceWidget( autocomplete=AutocompleteGenericMock)) class AutocompleteGenericTestCase(AutocompleteTestCase): autocomplete_mock = AutocompleteGenericMock def assert_choices_equal(self, result, test): self.assertEqual(list(result), test['expected']) def get_choices_for_values_tests(self): return ( { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), ], 'expected': [ self.james, self.bluesmen, ] }, { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.user_ctype.pk, self.elton.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), '%s-%s' % (self.group_ctype.pk, self.emos.pk), ], 'expected': [ self.james, self.bluesmen, ], 'name': 'should ignore values that are not in the querysets', }, ) def get_choices_for_request_tests(self): return ( { 'fixture': make_get_request('j'), 'expected': [ self.abe, self.rockers, self.bluesmen, ], }, { 'fixture': make_get_request('q=elton'), 'expected': [], 'name': 'should not propose models that are not in the qs', }, ) def get_validate_tests(self): return ( { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), '%s-%s' % (self.group_ctype.pk, self.emos.pk), ], 'expected': False, }, { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), ], 'expected': True, }, { 'fixture': [], 'expected': True, }, { 'fixture': ['bla'], 'expected': False, }, { 'fixture': ['123123-123123'], 'expected': False, }, ) def get_autocomplete_html_tests(self): return [] def get_widget_tests(self): return ( { 'form_class': FormMock, 'fixture': 'x=%s-%s' % ( self.group_ctype.pk, self.bluesmen.pk), 'expected_valid': True, 'expected_data': self.bluesmen, }, { 'form_class': FormMock, 'fixture': 'x=%s-%s' % ( self.group_ctype.pk, self.emos.pk), 'expected_valid': False, }, { 'form_class': FormMock, 'fixture': 'x=12343-2', 'expected_valid': False, }, { 'form_class': FormMock, 'fixture': 'x=%s-2' % ContentType.objects.get_for_model( Permission).pk, 'expected_valid': False, }, ) def test_default_search_fields(self): class MyGeneric(autocomplete_light.AutocompleteGenericBase): choices = [Group.objects.all()] self.assertEqual(MyGeneric.search_fields, [('name',)])
30.60274
77
0.456356
from __future__ import unicode_literals from .case import * from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import Permission from ...example_apps.autocomplete_test_case_app.models import User, Group class AutocompleteGenericMock(autocomplete_light.AutocompleteGenericBase): choices = ( User.objects.filter(pk__lt=10), Group.objects.filter(pk__lt=10), ) search_fields = ( ('username', 'email'), ('name',), ) limit_choices = 3 class FormMock(forms.Form): x = autocomplete_light.GenericModelChoiceField( widget=autocomplete_light.ChoiceWidget( autocomplete=AutocompleteGenericMock)) class AutocompleteGenericTestCase(AutocompleteTestCase): autocomplete_mock = AutocompleteGenericMock def assert_choices_equal(self, result, test): self.assertEqual(list(result), test['expected']) def get_choices_for_values_tests(self): return ( { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), ], 'expected': [ self.james, self.bluesmen, ] }, { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.user_ctype.pk, self.elton.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), '%s-%s' % (self.group_ctype.pk, self.emos.pk), ], 'expected': [ self.james, self.bluesmen, ], 'name': 'should ignore values that are not in the querysets', }, ) def get_choices_for_request_tests(self): return ( { 'fixture': make_get_request('j'), 'expected': [ self.abe, self.rockers, self.bluesmen, ], }, { 'fixture': make_get_request('q=elton'), 'expected': [], 'name': 'should not propose models that are not in the qs', }, ) def get_validate_tests(self): return ( { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), '%s-%s' % (self.group_ctype.pk, self.emos.pk), ], 'expected': False, }, { 'fixture': [ '%s-%s' % (self.user_ctype.pk, self.james.pk), '%s-%s' % (self.group_ctype.pk, self.bluesmen.pk), ], 'expected': True, }, { 'fixture': [], 'expected': True, }, { 'fixture': ['bla'], 'expected': False, }, { 'fixture': ['123123-123123'], 'expected': False, }, ) def get_autocomplete_html_tests(self): return [] def get_widget_tests(self): return ( { 'form_class': FormMock, 'fixture': 'x=%s-%s' % ( self.group_ctype.pk, self.bluesmen.pk), 'expected_valid': True, 'expected_data': self.bluesmen, }, { 'form_class': FormMock, 'fixture': 'x=%s-%s' % ( self.group_ctype.pk, self.emos.pk), 'expected_valid': False, }, { 'form_class': FormMock, 'fixture': 'x=12343-2', 'expected_valid': False, }, { 'form_class': FormMock, 'fixture': 'x=%s-2' % ContentType.objects.get_for_model( Permission).pk, 'expected_valid': False, }, ) def test_default_search_fields(self): class MyGeneric(autocomplete_light.AutocompleteGenericBase): choices = [Group.objects.all()] self.assertEqual(MyGeneric.search_fields, [('name',)])
true
true
1c4a1587700058c4fc116b57e305bc04604cf101
168
py
Python
cemm/exceptions.py
klaasnicolaas/python-cemm
fa1d9787bdf4d41e1850015e4d9df833d0b97b07
[ "MIT" ]
1
2022-02-20T17:26:02.000Z
2022-02-20T17:26:02.000Z
cemm/exceptions.py
klaasnicolaas/python-cemm
fa1d9787bdf4d41e1850015e4d9df833d0b97b07
[ "MIT" ]
109
2021-10-02T02:55:42.000Z
2022-03-30T04:32:25.000Z
cemm/exceptions.py
klaasnicolaas/python-cemm
fa1d9787bdf4d41e1850015e4d9df833d0b97b07
[ "MIT" ]
null
null
null
"""Exceptions for CEMM.""" class CEMMError(Exception): """General CEMM exception.""" class CEMMConnectionError(CEMMError): """CEMM connection exception."""
16.8
37
0.690476
class CEMMError(Exception): class CEMMConnectionError(CEMMError):
true
true
1c4a15ad041bcfb763454dea5efe21d533781450
2,980
py
Python
tests/unit/test_common.py
HemangChothani/google-resumable-media-python
7dc40de34533e4474240fc831b79cee2baa82c6e
[ "Apache-2.0" ]
1
2019-07-30T14:24:08.000Z
2019-07-30T14:24:08.000Z
tests/unit/test_common.py
HemangChothani/google-resumable-media-python
7dc40de34533e4474240fc831b79cee2baa82c6e
[ "Apache-2.0" ]
3
2019-07-07T17:55:56.000Z
2019-08-05T01:13:27.000Z
tests/unit/test_common.py
HemangChothani/google-resumable-media-python
7dc40de34533e4474240fc831b79cee2baa82c6e
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import pytest from google.resumable_media import common class TestInvalidResponse(object): def test_constructor(self): response = mock.sentinel.response error = common.InvalidResponse( response, 1, u'a', [b'm'], True) assert error.response is response assert error.args == (1, u'a', [b'm'], True) class TestRetryStrategy(object): def test_constructor_defaults(self): retry_strategy = common.RetryStrategy() assert retry_strategy.max_sleep == common.MAX_SLEEP assert ( retry_strategy.max_cumulative_retry == common.MAX_CUMULATIVE_RETRY) assert retry_strategy.max_retries is None def test_constructor_failure(self): with pytest.raises(ValueError) as exc_info: common.RetryStrategy(max_cumulative_retry=600.0, max_retries=12) exc_info.match(common._SLEEP_RETRY_ERROR_MSG) def test_constructor_explicit_bound_cumulative(self): max_sleep = 10.0 max_cumulative_retry = 100.0 retry_strategy = common.RetryStrategy( max_sleep=max_sleep, max_cumulative_retry=max_cumulative_retry) assert retry_strategy.max_sleep == max_sleep assert retry_strategy.max_cumulative_retry == max_cumulative_retry assert retry_strategy.max_retries is None def test_constructor_explicit_bound_retries(self): max_sleep = 13.75 max_retries = 14 retry_strategy = common.RetryStrategy( max_sleep=max_sleep, max_retries=max_retries) assert retry_strategy.max_sleep == max_sleep assert retry_strategy.max_cumulative_retry is None assert retry_strategy.max_retries == max_retries def test_retry_allowed_bound_cumulative(self): retry_strategy = common.RetryStrategy(max_cumulative_retry=100.0) assert retry_strategy.retry_allowed(50.0, 10) assert retry_strategy.retry_allowed(99.0, 7) assert retry_strategy.retry_allowed(100.0, 4) assert not retry_strategy.retry_allowed(101.0, 11) assert not retry_strategy.retry_allowed(200.0, 6) def test_retry_allowed_bound_retries(self): retry_strategy = common.RetryStrategy(max_retries=6) assert retry_strategy.retry_allowed(1000.0, 5) assert retry_strategy.retry_allowed(99.0, 6) assert not retry_strategy.retry_allowed(625.5, 7)
37.25
79
0.72349
import mock import pytest from google.resumable_media import common class TestInvalidResponse(object): def test_constructor(self): response = mock.sentinel.response error = common.InvalidResponse( response, 1, u'a', [b'm'], True) assert error.response is response assert error.args == (1, u'a', [b'm'], True) class TestRetryStrategy(object): def test_constructor_defaults(self): retry_strategy = common.RetryStrategy() assert retry_strategy.max_sleep == common.MAX_SLEEP assert ( retry_strategy.max_cumulative_retry == common.MAX_CUMULATIVE_RETRY) assert retry_strategy.max_retries is None def test_constructor_failure(self): with pytest.raises(ValueError) as exc_info: common.RetryStrategy(max_cumulative_retry=600.0, max_retries=12) exc_info.match(common._SLEEP_RETRY_ERROR_MSG) def test_constructor_explicit_bound_cumulative(self): max_sleep = 10.0 max_cumulative_retry = 100.0 retry_strategy = common.RetryStrategy( max_sleep=max_sleep, max_cumulative_retry=max_cumulative_retry) assert retry_strategy.max_sleep == max_sleep assert retry_strategy.max_cumulative_retry == max_cumulative_retry assert retry_strategy.max_retries is None def test_constructor_explicit_bound_retries(self): max_sleep = 13.75 max_retries = 14 retry_strategy = common.RetryStrategy( max_sleep=max_sleep, max_retries=max_retries) assert retry_strategy.max_sleep == max_sleep assert retry_strategy.max_cumulative_retry is None assert retry_strategy.max_retries == max_retries def test_retry_allowed_bound_cumulative(self): retry_strategy = common.RetryStrategy(max_cumulative_retry=100.0) assert retry_strategy.retry_allowed(50.0, 10) assert retry_strategy.retry_allowed(99.0, 7) assert retry_strategy.retry_allowed(100.0, 4) assert not retry_strategy.retry_allowed(101.0, 11) assert not retry_strategy.retry_allowed(200.0, 6) def test_retry_allowed_bound_retries(self): retry_strategy = common.RetryStrategy(max_retries=6) assert retry_strategy.retry_allowed(1000.0, 5) assert retry_strategy.retry_allowed(99.0, 6) assert not retry_strategy.retry_allowed(625.5, 7)
true
true
1c4a160721a6fe7cb7d9b89d16df5a107e587737
1,182
py
Python
ckanext/harvest/logic/auth/__init__.py
alphagov-mirror/ckanext-harvest
be4d134cf2e4d4548c67dc2f61b200948f0f74e0
[ "PostgreSQL" ]
86
2015-01-09T19:21:20.000Z
2022-03-23T07:17:27.000Z
ckanext/harvest/logic/auth/__init__.py
alphagov-mirror/ckanext-harvest
be4d134cf2e4d4548c67dc2f61b200948f0f74e0
[ "PostgreSQL" ]
319
2015-01-13T13:40:08.000Z
2022-03-24T12:13:42.000Z
ckanext/harvest/logic/auth/__init__.py
alphagov-mirror/ckanext-harvest
be4d134cf2e4d4548c67dc2f61b200948f0f74e0
[ "PostgreSQL" ]
154
2015-01-13T21:06:03.000Z
2022-03-15T12:10:57.000Z
from ckan.plugins import toolkit as pt from ckanext.harvest import model as harvest_model def user_is_sysadmin(context): ''' Checks if the user defined in the context is a sysadmin rtype: boolean ''' model = context['model'] user = context['user'] user_obj = model.User.get(user) if not user_obj: raise pt.Objectpt.ObjectNotFound('User {0} not found').format(user) return user_obj.sysadmin def _get_object(context, data_dict, name, class_name): ''' return the named item if in the data_dict, or get it from model.class_name ''' if name not in context: id = data_dict.get('id', None) obj = getattr(harvest_model, class_name).get(id) if not obj: raise pt.ObjectNotFound else: obj = context[name] return obj def get_source_object(context, data_dict={}): return _get_object(context, data_dict, 'source', 'HarvestSource') def get_job_object(context, data_dict={}): return _get_object(context, data_dict, 'job', 'HarvestJob') def get_obj_object(context, data_dict={}): return _get_object(context, data_dict, 'obj', 'HarvestObject')
26.266667
75
0.665821
from ckan.plugins import toolkit as pt from ckanext.harvest import model as harvest_model def user_is_sysadmin(context): model = context['model'] user = context['user'] user_obj = model.User.get(user) if not user_obj: raise pt.Objectpt.ObjectNotFound('User {0} not found').format(user) return user_obj.sysadmin def _get_object(context, data_dict, name, class_name): if name not in context: id = data_dict.get('id', None) obj = getattr(harvest_model, class_name).get(id) if not obj: raise pt.ObjectNotFound else: obj = context[name] return obj def get_source_object(context, data_dict={}): return _get_object(context, data_dict, 'source', 'HarvestSource') def get_job_object(context, data_dict={}): return _get_object(context, data_dict, 'job', 'HarvestJob') def get_obj_object(context, data_dict={}): return _get_object(context, data_dict, 'obj', 'HarvestObject')
true
true
1c4a165fe953944a991af628f8e92d4c59b8d672
1,549
py
Python
ml3d/torch/utils/roipool3d/roipool3d_utils.py
krshrimali/Open3D-ML
e6352ee84d38a4b90c71dd7f376f5570fe849537
[ "MIT" ]
447
2020-10-14T23:16:41.000Z
2021-07-27T06:57:45.000Z
ml3d/torch/utils/roipool3d/roipool3d_utils.py
krshrimali/Open3D-ML
e6352ee84d38a4b90c71dd7f376f5570fe849537
[ "MIT" ]
179
2021-07-27T15:32:33.000Z
2022-03-30T14:32:53.000Z
ml3d/torch/utils/roipool3d/roipool3d_utils.py
krshrimali/Open3D-ML
e6352ee84d38a4b90c71dd7f376f5570fe849537
[ "MIT" ]
92
2021-07-28T13:50:52.000Z
2022-03-30T09:24:33.000Z
import torch import open3d if open3d.core.cuda.device_count() > 0: from open3d.ml.torch.ops import roi_pool import numpy as np def enlarge_box3d(boxes3d, extra_width): """Enlarge 3D box. Args: boxes3d: (N, 7) [x, y, z, h, w, l, ry] extra_width: extra width """ if isinstance(boxes3d, np.ndarray): large_boxes3d = boxes3d.copy() else: large_boxes3d = boxes3d.clone() large_boxes3d[:, 3:6] += extra_width * 2 large_boxes3d[:, 1] += extra_width return large_boxes3d def roipool3d_gpu(pts, pts_feature, boxes3d, pool_extra_width, sampled_pt_num=512): """Roipool3D GPU. Args: pts: (B, N, 3) pts_feature: (B, N, C) boxes3d: (B, M, 7) pool_extra_width: float sampled_pt_num: int Returns: pooled_features: (B, M, 512, 3 + C) pooled_empty_flag: (B, M) """ if not open3d.core.cuda.device_count() > 0: raise NotImplementedError batch_size = pts.shape[0] pooled_boxes3d = enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7) pooled_features, pooled_empty_flag = roi_pool(pts.contiguous(), pooled_boxes3d.contiguous(), pts_feature.contiguous(), sampled_pt_num) return pooled_features, pooled_empty_flag
28.163636
78
0.542285
import torch import open3d if open3d.core.cuda.device_count() > 0: from open3d.ml.torch.ops import roi_pool import numpy as np def enlarge_box3d(boxes3d, extra_width): if isinstance(boxes3d, np.ndarray): large_boxes3d = boxes3d.copy() else: large_boxes3d = boxes3d.clone() large_boxes3d[:, 3:6] += extra_width * 2 large_boxes3d[:, 1] += extra_width return large_boxes3d def roipool3d_gpu(pts, pts_feature, boxes3d, pool_extra_width, sampled_pt_num=512): if not open3d.core.cuda.device_count() > 0: raise NotImplementedError batch_size = pts.shape[0] pooled_boxes3d = enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7) pooled_features, pooled_empty_flag = roi_pool(pts.contiguous(), pooled_boxes3d.contiguous(), pts_feature.contiguous(), sampled_pt_num) return pooled_features, pooled_empty_flag
true
true
1c4a18579c83b46e8f6e2e84863af984b6e3501c
2,335
py
Python
apricotlib/raptorx_secstr.py
malvikasharan/APRICOT
529afadfb99fa8249fa4ecfb07253eab892c7a8e
[ "0BSD" ]
5
2016-05-25T12:30:02.000Z
2021-04-11T14:55:32.000Z
apricotlib/raptorx_secstr.py
malvikasharan/APRICOT
529afadfb99fa8249fa4ecfb07253eab892c7a8e
[ "0BSD" ]
1
2017-05-20T07:19:25.000Z
2018-02-05T22:14:12.000Z
apricotlib/raptorx_secstr.py
malvikasharan/APRICOT
529afadfb99fa8249fa4ecfb07253eab892c7a8e
[ "0BSD" ]
6
2016-05-18T07:08:49.000Z
2021-02-20T14:28:55.000Z
#!/usr/bin/env python # Description = Predict 3 or 8 state secondary structure using RaptorX import os import subprocess class RaptorxSecstrAnalysis(object): def __init__(self, selected_proteins, raptorx_path, fasta_path, outpath): self._selected_proteins = selected_proteins self._raptorx_path = raptorx_path self._fasta_path = fasta_path self._outpath = outpath self._selected_protein_set = set() def streamline_raptorx_secstr_analysis(self): '''To call from apricot''' self.parse_selected_data() self.run_raptorx_analysis() self.create_job_completion_file() def parse_selected_data(self): '''Parses selected data for uid''' with open(self._selected_proteins, 'r') as in_fh: for entry in in_fh: if not entry.startswith('Entry'): self._selected_protein_set.add(entry.split('\t')[0]) return self._selected_protein_set def run_raptorx_analysis(self): '''Runs RaptorX on the selected uids for 8-state secondary structure prediction ''' for files in os.listdir(self._fasta_path): if files.split('.')[0] in self._selected_protein_set: print("RaptorX 8-state secondary structure analysis for %s" % files) subprocess.Popen( ["perl %s %s/%s" % (self._raptorx_path, self._fasta_path, files)], shell=True).wait() subprocess.Popen(["mv *.ss* %s" % self._outpath], shell=True).wait() subprocess.Popen(["mv *.horiz %s" % self._outpath], shell=True).wait() subprocess.Popen(["rm -rf tmp*.%s" % files.split('.')[0]], shell=True).wait() def create_job_completion_file(self): with open(self._outpath+'/raptorx_analysis.txt', 'w') as out_fh: out_fh.write("Secondary structures for the selected proteins are " "generated by RaptorX.\n") out_fh.write("The files generated by the analysis:.\n") out_fh.write('\n'.join(os.listdir(self._outpath)))
40.258621
78
0.568737
import os import subprocess class RaptorxSecstrAnalysis(object): def __init__(self, selected_proteins, raptorx_path, fasta_path, outpath): self._selected_proteins = selected_proteins self._raptorx_path = raptorx_path self._fasta_path = fasta_path self._outpath = outpath self._selected_protein_set = set() def streamline_raptorx_secstr_analysis(self): self.parse_selected_data() self.run_raptorx_analysis() self.create_job_completion_file() def parse_selected_data(self): with open(self._selected_proteins, 'r') as in_fh: for entry in in_fh: if not entry.startswith('Entry'): self._selected_protein_set.add(entry.split('\t')[0]) return self._selected_protein_set def run_raptorx_analysis(self): for files in os.listdir(self._fasta_path): if files.split('.')[0] in self._selected_protein_set: print("RaptorX 8-state secondary structure analysis for %s" % files) subprocess.Popen( ["perl %s %s/%s" % (self._raptorx_path, self._fasta_path, files)], shell=True).wait() subprocess.Popen(["mv *.ss* %s" % self._outpath], shell=True).wait() subprocess.Popen(["mv *.horiz %s" % self._outpath], shell=True).wait() subprocess.Popen(["rm -rf tmp*.%s" % files.split('.')[0]], shell=True).wait() def create_job_completion_file(self): with open(self._outpath+'/raptorx_analysis.txt', 'w') as out_fh: out_fh.write("Secondary structures for the selected proteins are " "generated by RaptorX.\n") out_fh.write("The files generated by the analysis:.\n") out_fh.write('\n'.join(os.listdir(self._outpath)))
true
true
1c4a18e8aea20d95df3d8fe117a47346ed852aa9
357
py
Python
tutorial/gallery.py
bricakeld/dash-docs
a79f52ac88c6ebff10a5b2e0af43e89410372dd4
[ "MIT" ]
null
null
null
tutorial/gallery.py
bricakeld/dash-docs
a79f52ac88c6ebff10a5b2e0af43e89410372dd4
[ "MIT" ]
3
2021-03-31T19:16:27.000Z
2021-12-13T20:27:16.000Z
tutorial/gallery.py
bricakeld/dash-docs
a79f52ac88c6ebff10a5b2e0af43e89410372dd4
[ "MIT" ]
1
2022-03-18T09:41:34.000Z
2022-03-18T09:41:34.000Z
# -*- coding: utf-8 -*- import dash_html_components as html import dash_core_components as dcc from textwrap import dedent layout = html.Div(className='gallery', children=[ dcc.Markdown(dedent(''' ## The Dash App Gallery has moved! It is now at [https://dash-gallery.plotly.host/Portal/](https://dash-gallery.plotly.host/Portal/) ''')) ])
27.461538
101
0.697479
import dash_html_components as html import dash_core_components as dcc from textwrap import dedent layout = html.Div(className='gallery', children=[ dcc.Markdown(dedent(''' ## The Dash App Gallery has moved! It is now at [https://dash-gallery.plotly.host/Portal/](https://dash-gallery.plotly.host/Portal/) ''')) ])
true
true
1c4a1ab1f277c42ed8aa39fecf9d2cbfe95ad12c
1,883
py
Python
examples/ramps.py
plecto/motorway
ce42b77a9a2d48cf1a9fd2f3bc405accb98030df
[ "Apache-2.0" ]
166
2015-01-19T05:39:24.000Z
2022-02-26T15:09:34.000Z
examples/ramps.py
plecto/motorway
ce42b77a9a2d48cf1a9fd2f3bc405accb98030df
[ "Apache-2.0" ]
17
2016-07-29T10:23:24.000Z
2022-01-04T10:09:01.000Z
examples/ramps.py
plecto/motorway
ce42b77a9a2d48cf1a9fd2f3bc405accb98030df
[ "Apache-2.0" ]
21
2016-09-08T12:26:50.000Z
2021-07-08T09:54:16.000Z
import time import uuid from motorway.contrib.amazon_kinesis.ramps import KinesisRamp from motorway.contrib.amazon_kinesis.intersections import KinesisInsertIntersection from motorway.contrib.amazon_sqs.ramps import SQSRamp from motorway.messages import Message from motorway.ramp import Ramp import random class WordRamp(Ramp): sentences = [ "Oak is strong and also gives shade.", "Cats and dogs each hate the other.", "The pipe began to rust while new.", "Open the crate but don't break the glass.", "Add the sum to the product of these three.", "Thieves who rob friends deserve jail.", "The ripe taste of cheese improves with age.", "Act on these orders with great speed.", "The hog crawled under the high fence.", "Move the vat over the hot fire.", ] def __init__(self, *args, **kwargs): super(WordRamp, self).__init__(*args, **kwargs) self.limit = 10000 self.progress = 1 def next(self): # yield Message(uuid.uuid4().int, self.sentences[random.randint(0, len(self.sentences) -1)]) if self.progress <= self.limit: self.progress += 1 # time.sleep(10) sentence = self.sentences[random.randint(0, len(self.sentences) -1)] yield Message(uuid.uuid4().int, sentence, grouping_value=sentence) else: time.sleep(1) def success(self, _id): pass #print "WordRamp %s was successful" % _id def failed(self, _id): print("WordRamp %s has failed" % _id) def should_run(self): return True class ExampleSQSRamp(SQSRamp): queue_name = "tutorial_motorway" class ExampleKinesisRamp(KinesisRamp): stream_name = "data-pipeline-test" class ExampleKinesisIntersection(KinesisInsertIntersection): stream_name = "data-pipeline-test"
30.868852
100
0.659586
import time import uuid from motorway.contrib.amazon_kinesis.ramps import KinesisRamp from motorway.contrib.amazon_kinesis.intersections import KinesisInsertIntersection from motorway.contrib.amazon_sqs.ramps import SQSRamp from motorway.messages import Message from motorway.ramp import Ramp import random class WordRamp(Ramp): sentences = [ "Oak is strong and also gives shade.", "Cats and dogs each hate the other.", "The pipe began to rust while new.", "Open the crate but don't break the glass.", "Add the sum to the product of these three.", "Thieves who rob friends deserve jail.", "The ripe taste of cheese improves with age.", "Act on these orders with great speed.", "The hog crawled under the high fence.", "Move the vat over the hot fire.", ] def __init__(self, *args, **kwargs): super(WordRamp, self).__init__(*args, **kwargs) self.limit = 10000 self.progress = 1 def next(self): # yield Message(uuid.uuid4().int, self.sentences[random.randint(0, len(self.sentences) -1)]) if self.progress <= self.limit: self.progress += 1 # time.sleep(10) sentence = self.sentences[random.randint(0, len(self.sentences) -1)] yield Message(uuid.uuid4().int, sentence, grouping_value=sentence) else: time.sleep(1) def success(self, _id): pass #print "WordRamp %s was successful" % _id def failed(self, _id): print("WordRamp %s has failed" % _id) def should_run(self): return True class ExampleSQSRamp(SQSRamp): queue_name = "tutorial_motorway" class ExampleKinesisRamp(KinesisRamp): stream_name = "data-pipeline-test" class ExampleKinesisIntersection(KinesisInsertIntersection): stream_name = "data-pipeline-test"
true
true
1c4a1e994f54a55d96dde9d569cf6f8d99b2a79c
13,569
py
Python
src/main/python/ui/batch.py
bmiller/beqdesigner
36d0c780507a564536038e2c9fc3b03b75dedaf4
[ "MIT" ]
16
2019-04-12T00:04:56.000Z
2022-03-15T14:26:56.000Z
src/main/python/ui/batch.py
bmiller/beqdesigner
36d0c780507a564536038e2c9fc3b03b75dedaf4
[ "MIT" ]
400
2018-08-27T10:04:00.000Z
2022-03-15T21:32:33.000Z
src/main/python/ui/batch.py
bmiller/beqdesigner
36d0c780507a564536038e2c9fc3b03b75dedaf4
[ "MIT" ]
6
2018-09-19T21:02:27.000Z
2020-10-18T04:11:01.000Z
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'batch.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_batchExtractDialog(object): def setupUi(self, batchExtractDialog): batchExtractDialog.setObjectName("batchExtractDialog") batchExtractDialog.resize(1727, 925) self.verticalLayout = QtWidgets.QVBoxLayout(batchExtractDialog) self.verticalLayout.setObjectName("verticalLayout") self.controlFrame = QtWidgets.QFrame(batchExtractDialog) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.controlFrame.sizePolicy().hasHeightForWidth()) self.controlFrame.setSizePolicy(sizePolicy) self.controlFrame.setFrameShape(QtWidgets.QFrame.Panel) self.controlFrame.setFrameShadow(QtWidgets.QFrame.Sunken) self.controlFrame.setObjectName("controlFrame") self.gridLayout = QtWidgets.QGridLayout(self.controlFrame) self.gridLayout.setObjectName("gridLayout") self.controlsLayout = QtWidgets.QGridLayout() self.controlsLayout.setObjectName("controlsLayout") self.threads = QtWidgets.QSpinBox(self.controlFrame) self.threads.setMinimum(1) self.threads.setMaximum(64) self.threads.setProperty("value", 1) self.threads.setObjectName("threads") self.controlsLayout.addWidget(self.threads, 3, 1, 1, 1) self.searchButton = QtWidgets.QPushButton(self.controlFrame) self.searchButton.setEnabled(False) self.searchButton.setObjectName("searchButton") self.controlsLayout.addWidget(self.searchButton, 5, 1, 1, 1) self.outputDirLabel = QtWidgets.QLabel(self.controlFrame) self.outputDirLabel.setObjectName("outputDirLabel") self.controlsLayout.addWidget(self.outputDirLabel, 1, 0, 1, 1) self.threadsLabel = QtWidgets.QLabel(self.controlFrame) self.threadsLabel.setObjectName("threadsLabel") self.controlsLayout.addWidget(self.threadsLabel, 3, 0, 1, 1) self.filterLabel = QtWidgets.QLabel(self.controlFrame) self.filterLabel.setToolTip("") self.filterLabel.setObjectName("filterLabel") self.controlsLayout.addWidget(self.filterLabel, 0, 0, 1, 1) self.extractButton = QtWidgets.QPushButton(self.controlFrame) self.extractButton.setEnabled(False) self.extractButton.setObjectName("extractButton") self.controlsLayout.addWidget(self.extractButton, 5, 2, 1, 1) self.resetButton = QtWidgets.QPushButton(self.controlFrame) self.resetButton.setEnabled(False) self.resetButton.setObjectName("resetButton") self.controlsLayout.addWidget(self.resetButton, 5, 3, 1, 1) self.outputDirPicker = QtWidgets.QToolButton(self.controlFrame) self.outputDirPicker.setObjectName("outputDirPicker") self.controlsLayout.addWidget(self.outputDirPicker, 1, 4, 1, 1) self.outputDir = QtWidgets.QLineEdit(self.controlFrame) self.outputDir.setEnabled(False) self.outputDir.setObjectName("outputDir") self.controlsLayout.addWidget(self.outputDir, 1, 1, 1, 3) self.filter = QtWidgets.QLineEdit(self.controlFrame) font = QtGui.QFont() font.setFamily("Consolas") self.filter.setFont(font) self.filter.setText("") self.filter.setObjectName("filter") self.controlsLayout.addWidget(self.filter, 0, 1, 1, 3) self.monoMix = QtWidgets.QCheckBox(self.controlFrame) self.monoMix.setChecked(True) self.monoMix.setObjectName("monoMix") self.controlsLayout.addWidget(self.monoMix, 5, 0, 1, 1) self.controlsLayout.setColumnStretch(1, 1) self.controlsLayout.setColumnStretch(2, 1) self.controlsLayout.setColumnStretch(3, 1) self.gridLayout.addLayout(self.controlsLayout, 0, 0, 1, 1) self.verticalLayout.addWidget(self.controlFrame) self.resultsFrame = QtWidgets.QFrame(batchExtractDialog) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.resultsFrame.sizePolicy().hasHeightForWidth()) self.resultsFrame.setSizePolicy(sizePolicy) self.resultsFrame.setFrameShape(QtWidgets.QFrame.Box) self.resultsFrame.setFrameShadow(QtWidgets.QFrame.Sunken) self.resultsFrame.setObjectName("resultsFrame") self.gridLayout_2 = QtWidgets.QGridLayout(self.resultsFrame) self.gridLayout_2.setObjectName("gridLayout_2") self.resultsTitle = QtWidgets.QLabel(self.resultsFrame) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.resultsTitle.setFont(font) self.resultsTitle.setFrameShape(QtWidgets.QFrame.Box) self.resultsTitle.setFrameShadow(QtWidgets.QFrame.Sunken) self.resultsTitle.setAlignment(QtCore.Qt.AlignCenter) self.resultsTitle.setObjectName("resultsTitle") self.gridLayout_2.addWidget(self.resultsTitle, 0, 0, 1, 1) self.resultsScrollArea = QtWidgets.QScrollArea(self.resultsFrame) self.resultsScrollArea.setWidgetResizable(True) self.resultsScrollArea.setObjectName("resultsScrollArea") self.resultsScrollAreaContents = QtWidgets.QWidget() self.resultsScrollAreaContents.setGeometry(QtCore.QRect(0, 0, 1669, 660)) self.resultsScrollAreaContents.setObjectName("resultsScrollAreaContents") self.resultsScrollLayout = QtWidgets.QGridLayout(self.resultsScrollAreaContents) self.resultsScrollLayout.setObjectName("resultsScrollLayout") self.resultsLayout = QtWidgets.QGridLayout() self.resultsLayout.setObjectName("resultsLayout") self.statusHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setUnderline(True) self.statusHeaderLabel.setFont(font) self.statusHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.statusHeaderLabel.setObjectName("statusHeaderLabel") self.resultsLayout.addWidget(self.statusHeaderLabel, 0, 0, 1, 1) self.probeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setItalic(True) font.setUnderline(True) self.probeHeaderLabel.setFont(font) self.probeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.probeHeaderLabel.setObjectName("probeHeaderLabel") self.resultsLayout.addWidget(self.probeHeaderLabel, 0, 2, 1, 1) self.streamHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.streamHeaderLabel.setFont(font) self.streamHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.streamHeaderLabel.setObjectName("streamHeaderLabel") self.resultsLayout.addWidget(self.streamHeaderLabel, 0, 3, 1, 1) self.inputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.inputFileHeaderLabel.setFont(font) self.inputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.inputFileHeaderLabel.setObjectName("inputFileHeaderLabel") self.resultsLayout.addWidget(self.inputFileHeaderLabel, 0, 1, 1, 1) self.channelsHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.channelsHeaderLabel.setFont(font) self.channelsHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.channelsHeaderLabel.setObjectName("channelsHeaderLabel") self.resultsLayout.addWidget(self.channelsHeaderLabel, 0, 4, 1, 1) self.outputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.outputFileHeaderLabel.setFont(font) self.outputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.outputFileHeaderLabel.setObjectName("outputFileHeaderLabel") self.resultsLayout.addWidget(self.outputFileHeaderLabel, 0, 6, 1, 1) self.progressHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.progressHeaderLabel.setFont(font) self.progressHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.progressHeaderLabel.setObjectName("progressHeaderLabel") self.resultsLayout.addWidget(self.progressHeaderLabel, 0, 8, 1, 1) self.lfeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.lfeHeaderLabel.setFont(font) self.lfeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.lfeHeaderLabel.setObjectName("lfeHeaderLabel") self.resultsLayout.addWidget(self.lfeHeaderLabel, 0, 5, 1, 1) self.ffmpegCliLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setItalic(True) font.setUnderline(True) self.ffmpegCliLabel.setFont(font) self.ffmpegCliLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.ffmpegCliLabel.setObjectName("ffmpegCliLabel") self.resultsLayout.addWidget(self.ffmpegCliLabel, 0, 7, 1, 1) self.resultsLayout.setColumnStretch(1, 1) self.resultsLayout.setColumnStretch(3, 2) self.resultsLayout.setColumnStretch(6, 1) self.resultsLayout.setColumnStretch(8, 1) self.resultsScrollLayout.addLayout(self.resultsLayout, 0, 0, 1, 1) self.resultsScrollArea.setWidget(self.resultsScrollAreaContents) self.gridLayout_2.addWidget(self.resultsScrollArea, 1, 0, 1, 1) self.verticalLayout.addWidget(self.resultsFrame) self.retranslateUi(batchExtractDialog) self.searchButton.clicked.connect(batchExtractDialog.search) self.extractButton.clicked.connect(batchExtractDialog.extract) self.outputDirPicker.clicked.connect(batchExtractDialog.select_output) self.filter.textChanged['QString'].connect(batchExtractDialog.enable_search) self.resetButton.clicked.connect(batchExtractDialog.reset_batch) self.threads.valueChanged['int'].connect(batchExtractDialog.change_pool_size) QtCore.QMetaObject.connectSlotsByName(batchExtractDialog) def retranslateUi(self, batchExtractDialog): _translate = QtCore.QCoreApplication.translate batchExtractDialog.setWindowTitle(_translate("batchExtractDialog", "Extract Audio")) self.searchButton.setText(_translate("batchExtractDialog", "Search")) self.outputDirLabel.setText(_translate("batchExtractDialog", "Output Directory")) self.threadsLabel.setText(_translate("batchExtractDialog", "Threads")) self.filterLabel.setText(_translate("batchExtractDialog", "Search Filter")) self.extractButton.setText(_translate("batchExtractDialog", "Extract")) self.resetButton.setText(_translate("batchExtractDialog", "Reset")) self.outputDirPicker.setText(_translate("batchExtractDialog", "...")) self.filter.setPlaceholderText(_translate("batchExtractDialog", "Enter 1 or more search filters, e.g. w:/films/*.mkv;y:/videos/**/*.m2ts")) self.monoMix.setText(_translate("batchExtractDialog", "Mix to Mono?")) self.resultsTitle.setText(_translate("batchExtractDialog", "Results")) self.statusHeaderLabel.setText(_translate("batchExtractDialog", "Status")) self.probeHeaderLabel.setText(_translate("batchExtractDialog", "Probe")) self.streamHeaderLabel.setText(_translate("batchExtractDialog", "Stream")) self.inputFileHeaderLabel.setText(_translate("batchExtractDialog", "Input File")) self.channelsHeaderLabel.setText(_translate("batchExtractDialog", "Channels")) self.outputFileHeaderLabel.setText(_translate("batchExtractDialog", "Output File")) self.progressHeaderLabel.setText(_translate("batchExtractDialog", "Progress")) self.lfeHeaderLabel.setText(_translate("batchExtractDialog", "LFE")) self.ffmpegCliLabel.setText(_translate("batchExtractDialog", "ffmpeg"))
57.012605
147
0.72614
from PyQt5 import QtCore, QtGui, QtWidgets class Ui_batchExtractDialog(object): def setupUi(self, batchExtractDialog): batchExtractDialog.setObjectName("batchExtractDialog") batchExtractDialog.resize(1727, 925) self.verticalLayout = QtWidgets.QVBoxLayout(batchExtractDialog) self.verticalLayout.setObjectName("verticalLayout") self.controlFrame = QtWidgets.QFrame(batchExtractDialog) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.controlFrame.sizePolicy().hasHeightForWidth()) self.controlFrame.setSizePolicy(sizePolicy) self.controlFrame.setFrameShape(QtWidgets.QFrame.Panel) self.controlFrame.setFrameShadow(QtWidgets.QFrame.Sunken) self.controlFrame.setObjectName("controlFrame") self.gridLayout = QtWidgets.QGridLayout(self.controlFrame) self.gridLayout.setObjectName("gridLayout") self.controlsLayout = QtWidgets.QGridLayout() self.controlsLayout.setObjectName("controlsLayout") self.threads = QtWidgets.QSpinBox(self.controlFrame) self.threads.setMinimum(1) self.threads.setMaximum(64) self.threads.setProperty("value", 1) self.threads.setObjectName("threads") self.controlsLayout.addWidget(self.threads, 3, 1, 1, 1) self.searchButton = QtWidgets.QPushButton(self.controlFrame) self.searchButton.setEnabled(False) self.searchButton.setObjectName("searchButton") self.controlsLayout.addWidget(self.searchButton, 5, 1, 1, 1) self.outputDirLabel = QtWidgets.QLabel(self.controlFrame) self.outputDirLabel.setObjectName("outputDirLabel") self.controlsLayout.addWidget(self.outputDirLabel, 1, 0, 1, 1) self.threadsLabel = QtWidgets.QLabel(self.controlFrame) self.threadsLabel.setObjectName("threadsLabel") self.controlsLayout.addWidget(self.threadsLabel, 3, 0, 1, 1) self.filterLabel = QtWidgets.QLabel(self.controlFrame) self.filterLabel.setToolTip("") self.filterLabel.setObjectName("filterLabel") self.controlsLayout.addWidget(self.filterLabel, 0, 0, 1, 1) self.extractButton = QtWidgets.QPushButton(self.controlFrame) self.extractButton.setEnabled(False) self.extractButton.setObjectName("extractButton") self.controlsLayout.addWidget(self.extractButton, 5, 2, 1, 1) self.resetButton = QtWidgets.QPushButton(self.controlFrame) self.resetButton.setEnabled(False) self.resetButton.setObjectName("resetButton") self.controlsLayout.addWidget(self.resetButton, 5, 3, 1, 1) self.outputDirPicker = QtWidgets.QToolButton(self.controlFrame) self.outputDirPicker.setObjectName("outputDirPicker") self.controlsLayout.addWidget(self.outputDirPicker, 1, 4, 1, 1) self.outputDir = QtWidgets.QLineEdit(self.controlFrame) self.outputDir.setEnabled(False) self.outputDir.setObjectName("outputDir") self.controlsLayout.addWidget(self.outputDir, 1, 1, 1, 3) self.filter = QtWidgets.QLineEdit(self.controlFrame) font = QtGui.QFont() font.setFamily("Consolas") self.filter.setFont(font) self.filter.setText("") self.filter.setObjectName("filter") self.controlsLayout.addWidget(self.filter, 0, 1, 1, 3) self.monoMix = QtWidgets.QCheckBox(self.controlFrame) self.monoMix.setChecked(True) self.monoMix.setObjectName("monoMix") self.controlsLayout.addWidget(self.monoMix, 5, 0, 1, 1) self.controlsLayout.setColumnStretch(1, 1) self.controlsLayout.setColumnStretch(2, 1) self.controlsLayout.setColumnStretch(3, 1) self.gridLayout.addLayout(self.controlsLayout, 0, 0, 1, 1) self.verticalLayout.addWidget(self.controlFrame) self.resultsFrame = QtWidgets.QFrame(batchExtractDialog) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.resultsFrame.sizePolicy().hasHeightForWidth()) self.resultsFrame.setSizePolicy(sizePolicy) self.resultsFrame.setFrameShape(QtWidgets.QFrame.Box) self.resultsFrame.setFrameShadow(QtWidgets.QFrame.Sunken) self.resultsFrame.setObjectName("resultsFrame") self.gridLayout_2 = QtWidgets.QGridLayout(self.resultsFrame) self.gridLayout_2.setObjectName("gridLayout_2") self.resultsTitle = QtWidgets.QLabel(self.resultsFrame) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.resultsTitle.setFont(font) self.resultsTitle.setFrameShape(QtWidgets.QFrame.Box) self.resultsTitle.setFrameShadow(QtWidgets.QFrame.Sunken) self.resultsTitle.setAlignment(QtCore.Qt.AlignCenter) self.resultsTitle.setObjectName("resultsTitle") self.gridLayout_2.addWidget(self.resultsTitle, 0, 0, 1, 1) self.resultsScrollArea = QtWidgets.QScrollArea(self.resultsFrame) self.resultsScrollArea.setWidgetResizable(True) self.resultsScrollArea.setObjectName("resultsScrollArea") self.resultsScrollAreaContents = QtWidgets.QWidget() self.resultsScrollAreaContents.setGeometry(QtCore.QRect(0, 0, 1669, 660)) self.resultsScrollAreaContents.setObjectName("resultsScrollAreaContents") self.resultsScrollLayout = QtWidgets.QGridLayout(self.resultsScrollAreaContents) self.resultsScrollLayout.setObjectName("resultsScrollLayout") self.resultsLayout = QtWidgets.QGridLayout() self.resultsLayout.setObjectName("resultsLayout") self.statusHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setUnderline(True) self.statusHeaderLabel.setFont(font) self.statusHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.statusHeaderLabel.setObjectName("statusHeaderLabel") self.resultsLayout.addWidget(self.statusHeaderLabel, 0, 0, 1, 1) self.probeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setItalic(True) font.setUnderline(True) self.probeHeaderLabel.setFont(font) self.probeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.probeHeaderLabel.setObjectName("probeHeaderLabel") self.resultsLayout.addWidget(self.probeHeaderLabel, 0, 2, 1, 1) self.streamHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.streamHeaderLabel.setFont(font) self.streamHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.streamHeaderLabel.setObjectName("streamHeaderLabel") self.resultsLayout.addWidget(self.streamHeaderLabel, 0, 3, 1, 1) self.inputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.inputFileHeaderLabel.setFont(font) self.inputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.inputFileHeaderLabel.setObjectName("inputFileHeaderLabel") self.resultsLayout.addWidget(self.inputFileHeaderLabel, 0, 1, 1, 1) self.channelsHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.channelsHeaderLabel.setFont(font) self.channelsHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.channelsHeaderLabel.setObjectName("channelsHeaderLabel") self.resultsLayout.addWidget(self.channelsHeaderLabel, 0, 4, 1, 1) self.outputFileHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.outputFileHeaderLabel.setFont(font) self.outputFileHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.outputFileHeaderLabel.setObjectName("outputFileHeaderLabel") self.resultsLayout.addWidget(self.outputFileHeaderLabel, 0, 6, 1, 1) self.progressHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.progressHeaderLabel.setFont(font) self.progressHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.progressHeaderLabel.setObjectName("progressHeaderLabel") self.resultsLayout.addWidget(self.progressHeaderLabel, 0, 8, 1, 1) self.lfeHeaderLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setBold(False) font.setItalic(True) font.setUnderline(True) font.setWeight(50) self.lfeHeaderLabel.setFont(font) self.lfeHeaderLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.lfeHeaderLabel.setObjectName("lfeHeaderLabel") self.resultsLayout.addWidget(self.lfeHeaderLabel, 0, 5, 1, 1) self.ffmpegCliLabel = QtWidgets.QLabel(self.resultsScrollAreaContents) font = QtGui.QFont() font.setItalic(True) font.setUnderline(True) self.ffmpegCliLabel.setFont(font) self.ffmpegCliLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.ffmpegCliLabel.setObjectName("ffmpegCliLabel") self.resultsLayout.addWidget(self.ffmpegCliLabel, 0, 7, 1, 1) self.resultsLayout.setColumnStretch(1, 1) self.resultsLayout.setColumnStretch(3, 2) self.resultsLayout.setColumnStretch(6, 1) self.resultsLayout.setColumnStretch(8, 1) self.resultsScrollLayout.addLayout(self.resultsLayout, 0, 0, 1, 1) self.resultsScrollArea.setWidget(self.resultsScrollAreaContents) self.gridLayout_2.addWidget(self.resultsScrollArea, 1, 0, 1, 1) self.verticalLayout.addWidget(self.resultsFrame) self.retranslateUi(batchExtractDialog) self.searchButton.clicked.connect(batchExtractDialog.search) self.extractButton.clicked.connect(batchExtractDialog.extract) self.outputDirPicker.clicked.connect(batchExtractDialog.select_output) self.filter.textChanged['QString'].connect(batchExtractDialog.enable_search) self.resetButton.clicked.connect(batchExtractDialog.reset_batch) self.threads.valueChanged['int'].connect(batchExtractDialog.change_pool_size) QtCore.QMetaObject.connectSlotsByName(batchExtractDialog) def retranslateUi(self, batchExtractDialog): _translate = QtCore.QCoreApplication.translate batchExtractDialog.setWindowTitle(_translate("batchExtractDialog", "Extract Audio")) self.searchButton.setText(_translate("batchExtractDialog", "Search")) self.outputDirLabel.setText(_translate("batchExtractDialog", "Output Directory")) self.threadsLabel.setText(_translate("batchExtractDialog", "Threads")) self.filterLabel.setText(_translate("batchExtractDialog", "Search Filter")) self.extractButton.setText(_translate("batchExtractDialog", "Extract")) self.resetButton.setText(_translate("batchExtractDialog", "Reset")) self.outputDirPicker.setText(_translate("batchExtractDialog", "...")) self.filter.setPlaceholderText(_translate("batchExtractDialog", "Enter 1 or more search filters, e.g. w:/films/*.mkv;y:/videos/**/*.m2ts")) self.monoMix.setText(_translate("batchExtractDialog", "Mix to Mono?")) self.resultsTitle.setText(_translate("batchExtractDialog", "Results")) self.statusHeaderLabel.setText(_translate("batchExtractDialog", "Status")) self.probeHeaderLabel.setText(_translate("batchExtractDialog", "Probe")) self.streamHeaderLabel.setText(_translate("batchExtractDialog", "Stream")) self.inputFileHeaderLabel.setText(_translate("batchExtractDialog", "Input File")) self.channelsHeaderLabel.setText(_translate("batchExtractDialog", "Channels")) self.outputFileHeaderLabel.setText(_translate("batchExtractDialog", "Output File")) self.progressHeaderLabel.setText(_translate("batchExtractDialog", "Progress")) self.lfeHeaderLabel.setText(_translate("batchExtractDialog", "LFE")) self.ffmpegCliLabel.setText(_translate("batchExtractDialog", "ffmpeg"))
true
true
1c4a200a2dc8929dcf29774d5051e237b10a7a33
69,048
py
Python
tensorflow/python/ops/math_ops.py
seyoung-hyun/tensorflow
2ac978d2532dd359dd7ebbd27ac13dfa147d755d
[ "Apache-2.0" ]
1
2018-10-16T07:59:09.000Z
2018-10-16T07:59:09.000Z
tensorflow/python/ops/math_ops.py
seyoung-hyun/tensorflow
2ac978d2532dd359dd7ebbd27ac13dfa147d755d
[ "Apache-2.0" ]
null
null
null
tensorflow/python/ops/math_ops.py
seyoung-hyun/tensorflow
2ac978d2532dd359dd7ebbd27ac13dfa147d755d
[ "Apache-2.0" ]
1
2018-10-16T07:58:38.000Z
2018-10-16T07:58:38.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Note: Elementwise binary operations in TensorFlow follow [numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). ## Arithmetic Operators TensorFlow provides several operations that you can use to add basic arithmetic operators to your graph. @@add @@sub @@mul @@multiply @@scalar_mul @@div @@divide @@truediv @@floordiv @@mod @@cross ## Basic Math Functions TensorFlow provides several operations that you can use to add basic mathematical functions to your graph. @@add_n @@abs @@neg @@negative @@sign @@inv @@square @@round @@sqrt @@rsqrt @@pow @@exp @@log @@log1p @@ceil @@floor @@maximum @@minimum @@cos @@sin @@lbeta @@tan @@acos @@asin @@atan @@lgamma @@digamma @@erf @@erfc @@squared_difference @@igamma @@igammac @@zeta @@polygamma @@betainc ## Matrix Math Functions TensorFlow provides several operations that you can use to add linear algebra functions on matrices to your graph. @@diag @@diag_part @@trace @@transpose @@eye @@matrix_diag @@matrix_diag_part @@matrix_band_part @@matrix_set_diag @@matrix_transpose @@matmul @@batch_matmul @@matrix_determinant @@matrix_inverse @@cholesky @@cholesky_solve @@matrix_solve @@matrix_triangular_solve @@matrix_solve_ls @@self_adjoint_eig @@self_adjoint_eigvals @@svd ## Complex Number Functions TensorFlow provides several operations that you can use to add complex number functions to your graph. @@complex @@complex_abs @@conj @@imag @@real ## Fourier Transform Functions TensorFlow provides several operations that you can use to add discrete Fourier transform functions to your graph. @@fft @@ifft @@fft2d @@ifft2d @@fft3d @@ifft3d ## Reduction TensorFlow provides several operations that you can use to perform common math computations that reduce various dimensions of a tensor. @@reduce_sum @@reduce_prod @@reduce_min @@reduce_max @@reduce_mean @@reduce_all @@reduce_any @@reduce_logsumexp @@count_nonzero @@accumulate_n @@einsum ## Scan TensorFlow provides several operations that you can use to perform scans (running totals) across one axis of a tensor. @@cumsum @@cumprod ## Segmentation TensorFlow provides several operations that you can use to perform common math computations on tensor segments. Here a segmentation is a partitioning of a tensor along the first dimension, i.e. it defines a mapping from the first dimension onto `segment_ids`. The `segment_ids` tensor should be the size of the first dimension, `d0`, with consecutive IDs in the range `0` to `k`, where `k<d0`. In particular, a segmentation of a matrix tensor is a mapping of rows to segments. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf.segment_sum(c, tf.constant([0, 0, 1])) ==> [[0 0 0 0] [5 6 7 8]] ``` @@segment_sum @@segment_prod @@segment_min @@segment_max @@segment_mean @@unsorted_segment_sum @@sparse_segment_sum @@sparse_segment_mean @@sparse_segment_sqrt_n ## Sequence Comparison and Indexing TensorFlow provides several operations that you can use to add sequence comparison and index extraction to your graph. You can use these operations to determine sequence differences and determine the indexes of specific values in a tensor. @@argmin @@argmax @@setdiff1d @@where @@unique @@edit_distance @@invert_permutation """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import common_shapes from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_util from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_control_flow_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import state_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_math_ops import * # pylint: enable=wildcard-import # Aliases for some automatically-generated names. argmax = gen_math_ops.arg_max argmin = gen_math_ops.arg_min linspace = gen_math_ops.lin_space # pylint: disable=anomalous-backslash-in-string,protected-access def abs(x, name=None): """Computes the absolute value of a tensor. Given a tensor of real numbers `x`, this operation returns a tensor containing the absolute value of each element in `x`. For example, if x is an input element and y is an output element, this operation computes \\\\(y = |x|\\\\). See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex number. Args: x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or `int64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` the same size and type as `x` with absolute values. """ with ops.name_scope(name, "Abs", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): if x.values.dtype in (dtypes.complex64, dtypes.complex128): x_abs = gen_math_ops.complex_abs(x.values, Tout=x.values.dtype.real_dtype, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_abs, shape=x.shape) x_abs = gen_math_ops._abs(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_abs, shape=x.shape) else: x = ops.convert_to_tensor(x, name="x") if x.dtype in (dtypes.complex64, dtypes.complex128): return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name) return gen_math_ops._abs(x, name=name) def divide(x, y, name=None): """Computes Python style division of `x` by `y`.""" with ops.name_scope(name, "Divide", [x]) as name: return x / y # Make Python Aliases multiply = gen_math_ops.mul subtract = gen_math_ops.sub negative = gen_math_ops.neg def neg(x, name=None): """Computes numerical negative value element-wise. I.e., \\(y = -x\\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. """ with ops.name_scope(name, "Neg", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_neg = gen_math_ops.neg(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_neg, shape=x.shape) else: return gen_math_ops.neg(x, name=name) def sign(x, name=None): """Returns an element-wise indication of the sign of a number. `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. """ with ops.name_scope(name, "Sign", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_sign = gen_math_ops.sign(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_sign, shape=x.shape) else: return gen_math_ops.sign(x, name=name) def square(x, name=None): """Computes square of x element-wise. I.e., \\(y = x * x = x^2\\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`. Has the same type as `x`. """ with ops.name_scope(name, "Square", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_square = gen_math_ops.square(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_square, shape=x.shape) else: return gen_math_ops.square(x, name=name) def sqrt(x, name=None): """Computes square root of x element-wise. I.e., \\(y = \sqrt{x} = x^{1/2}\\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. """ with ops.name_scope(name, "Sqrt", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_sqrt = gen_math_ops.sqrt(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_sqrt, shape=x.shape) else: return gen_math_ops.sqrt(x, name=name) def erf(x, name=None): """Computes the Gauss error function of `x` element-wise. Args: x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. """ with ops.name_scope(name, "Erf", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_erf = gen_math_ops.erf(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_erf, shape=x.shape) else: return gen_math_ops.erf(x, name=name) def complex_abs(x, name=None): r"""Computes the complex absolute value of a tensor. Given a tensor `x` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the absolute value of each element in `x`. All elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example: ``` # tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]] tf.complex_abs(x) ==> [5.25594902, 6.60492229] ``` Args: x: A `Tensor` of type `complex64` or `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name) def scalar_mul(scalar, x): """Multiplies a scalar times a `Tensor` or `IndexedSlices` object. Intended for use in gradient code which might deal with `IndexedSlices` objects, which are easy to multiply by a scalar but more expensive to multiply with arbitrary tensors. Args: scalar: A 0-D scalar `Tensor`. Must have known shape. x: A `Tensor` or `IndexedSlices` to be scaled. Returns: `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`. Raises: ValueError: if scalar is not a 0-D `scalar`. """ scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype, name="scalar") shape = scalar.get_shape() if shape.ndims == 0: if isinstance(x, ops.IndexedSlices): return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape) else: return scalar * x else: raise ValueError("Only scalar multiply works, got shape %s" % shape) def pow(x, y, name=None): """Computes the power of one value to another. Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for corresponding elements in `x` and `y`. For example: ``` # tensor 'x' is [[2, 2], [3, 3]] # tensor 'y' is [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` Args: x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. """ with ops.name_scope(name, "Pow", [x]) as name: return gen_math_ops._pow(x, y, name=name) def complex(real, imag, name=None): """Converts two real numbers to a complex number. Given a tensor `real` representing the real part of a complex number, and a tensor `imag` representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \\(a + bj\\), where *a* represents the `real` part and *b* represents the `imag` part. The input tensors `real` and `imag` must have the same shape. For example: ``` # tensor 'real' is [2.25, 3.25] # tensor `imag` is [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] ``` Args: real: A `Tensor`. Must be one of the following types: `float32`, `float64`. imag: A `Tensor`. Must have the same type as `real`. name: A name for the operation (optional). Returns: A `Tensor` of type `complex64` or `complex128`. """ real = ops.convert_to_tensor(real, name="real") imag = ops.convert_to_tensor(imag, name="imag") with ops.name_scope(name, "Complex", [real, imag]) as name: input_types = (real.dtype, imag.dtype) if input_types == (dtypes.float64, dtypes.float64): Tout = dtypes.complex128 elif input_types == (dtypes.float32, dtypes.float32): Tout = dtypes.complex64 else: raise TypeError("real and imag have incorrect types: " "{} {}".format(real.dtype.name, imag.dtype.name)) return gen_math_ops._complex(real, imag, Tout=Tout, name=name) def real(input, name=None): """Returns the real part of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the real part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part returned by this operation and *b* is the imaginary part. For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.real(input) ==> [-2.25, 3.25] ``` If `input` is already real, it is returned unchanged. Args: input: A `Tensor`. Must have numeric type. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ with ops.name_scope(name, "Real", [input]) as name: real_dtype = input.dtype.real_dtype if input.dtype.base_dtype == real_dtype: return input return gen_math_ops.real(input, Tout=real_dtype, name=name) def imag(input, name=None): """Returns the imaginary part of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the imaginary part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part returned by this operation. For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.imag(input) ==> [4.75, 5.75] ``` Args: input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`. """ with ops.name_scope(name, "Imag", [input]) as name: return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name) def round(x, name=None): """Rounds the values of a tensor to the nearest integer, element-wise. Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use tf::cint. For example: ```python # 'a' is [0.9, 2.5, 2.3, 1.5, -4.5] tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ] ``` Args: x: A `Tensor` of type `float32` or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as `x`. """ x = ops.convert_to_tensor(x, name="x") if x.dtype.is_integer: return x else: # TODO(nolivia): Switch to new Round op # return gen_math_ops.round(x, name=name) return gen_math_ops.floor(x + 0.5, name=name) ops.RegisterShape("Round")(common_shapes.call_cpp_shape_fn) def cast(x, dtype, name=None): """Casts a tensor to a new type. The operation casts `x` (in case of `Tensor`) or `x.values` (in case of `SparseTensor`) to `dtype`. For example: ```python # tensor `a` is [1.8, 2.2], dtype=tf.float tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32 ``` Args: x: A `Tensor` or `SparseTensor`. dtype: The destination type. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x`. Raises: TypeError: If `x` cannot be cast to the `dtype`. """ base_type = dtypes.as_dtype(dtype).base_dtype with ops.name_scope(name, "Cast", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): values_cast = cast(x.values, base_type, name=name) return sparse_tensor.SparseTensor(x.indices, values_cast, x.shape) else: # TODO(touts): Handle what Josh said. # # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype == base_type: return x return gen_math_ops.cast(x, base_type, name=name) def saturate_cast(value, dtype, name=None): """Performs a safe saturating cast of `value` to `dtype`. This function casts the input to `dtype` without applying any scaling. If there is a danger that values would over or underflow in the cast, this op applies the appropriate clamping before the cast. Args: value: A `Tensor`. dtype: The desired output `DType`. name: A name for the operation (optional). Returns: `value` safely cast to `dtype`. """ # When casting to a type with smaller representable range, clamp. # Note that this covers casting to unsigned types as well. with ops.name_scope(name, "saturate_cast", [value]) as name: value = ops.convert_to_tensor(value, name="value") dtype = dtypes.as_dtype(dtype).base_dtype if value.dtype.min < dtype.min: value = gen_math_ops.maximum(value, ops.convert_to_tensor( dtype.min, dtype=value.dtype, name="min")) if value.dtype.max > dtype.max: value = gen_math_ops.minimum(value, ops.convert_to_tensor( dtype.max, dtype=value.dtype, name="max")) return cast(value, dtype, name=name) def to_float(x, name="ToFloat"): """Casts a tensor to type `float32`. Args: x: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`. Raises: TypeError: If `x` cannot be cast to the `float32`. """ return cast(x, dtypes.float32, name=name) def to_double(x, name="ToDouble"): """Casts a tensor to type `float64`. Args: x: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`. Raises: TypeError: If `x` cannot be cast to the `float64`. """ return cast(x, dtypes.float64, name=name) def to_int32(x, name="ToInt32"): """Casts a tensor to type `int32`. Args: x: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`. Raises: TypeError: If `x` cannot be cast to the `int32`. """ return cast(x, dtypes.int32, name=name) def to_int64(x, name="ToInt64"): """Casts a tensor to type `int64`. Args: x: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`. Raises: TypeError: If `x` cannot be cast to the `int64`. """ return cast(x, dtypes.int64, name=name) def to_bfloat16(x, name="ToBFloat16"): """Casts a tensor to type `bfloat16`. Args: x: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`. Raises: TypeError: If `x` cannot be cast to the `bfloat16`. """ return cast(x, dtypes.bfloat16, name=name) ops.Tensor._override_operator("__neg__", gen_math_ops.neg) ops.Tensor._override_operator("__abs__", abs) # __invert__ corresponds to the ~ operator. Here we follow the numpy convention # ~ marks an elementwise bit-wise inverse. This is only implemented for boolean # tensors and will throw a TypeError if used on nonboolean arrays ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not) def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor): """Register operators with different tensor and scalar versions. If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices, sp_values, sp_shape, dense)` and outputs `(new_sp_values)`. Args: func: the operator op_name: name of the operator being overridden clazz_object: class to override for. Either `Tensor` or `SparseTensor`. """ def binary_op_wrapper(x, y): with ops.name_scope(None, op_name, [x, y]) as name: if not isinstance(y, sparse_tensor.SparseTensor): y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y") return func(x, y, name=name) def binary_op_wrapper_sparse(sp_x, y): with ops.name_scope(None, op_name, [sp_x, y]) as name: y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y") return sparse_tensor.SparseTensor( sp_x.indices, func(sp_x.indices, sp_x.values, sp_x.shape, y, name=name), sp_x.shape) def r_binary_op_wrapper(y, x): with ops.name_scope(None, op_name, [x, y]) as name: x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x") return func(x, y, name=name) # Propagate func.__doc__ to the wrappers try: doc = func.__doc__ except AttributeError: doc = None binary_op_wrapper.__doc__ = doc r_binary_op_wrapper.__doc__ = doc binary_op_wrapper_sparse.__doc__ = doc if clazz_object is ops.Tensor: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper) del binary_op_wrapper clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper) del r_binary_op_wrapper else: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper_sparse) del binary_op_wrapper_sparse # Conversion table for __truediv__. None entries mean no conversion required. _TRUEDIV_TABLE = { dtypes.uint8: dtypes.float32, dtypes.int8: dtypes.float32, dtypes.uint16: dtypes.float32, dtypes.int16: dtypes.float32, dtypes.int32: dtypes.float64, dtypes.int64: dtypes.float64, dtypes.float16: None, dtypes.float32: None, dtypes.float64: None, dtypes.complex64: None, dtypes.complex128: None, } # NOTE: the support of "sparse (true)div dense" is currently not baked in into # "tf.(true_)div()". Until such an API decision is made, the supported usage is # to explicitly use the "/" operator to invoke either truediv or div. def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None): """Internal helper function for 'sp_t / dense_t'.""" with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape, y]) as name: sp_values = ops.convert_to_tensor(sp_values, name="sp_values") y = ops.convert_to_tensor(y, name="y") x_dtype = sp_values.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) try: dtype = _TRUEDIV_TABLE[x_dtype] except KeyError: raise TypeError("Invalid dtype %r in __truediv__" % x_dtype) if dtype is not None: sp_values = cast(sp_values, dtype) y = cast(y, dtype) return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values, sp_shape, y, name=name) def truediv(x, y, name=None): """Divides x / y elementwise, always producing floating point results. The same as `tf.div` for floating point arguments, but casts integer arguments to floating point before dividing so that the result is always floating point. This op is generated by normal `x / y` division in Python 3 and in Python 2.7 with `from __future__ import division`. If you want integer division that rounds down, use `x // y` or `tf.floordiv`. `x` and `y` must have the same numeric type. If the inputs are floating point, the output will have the same type. If the inputs are integral, the inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32` and `int64` (matching the behavior of Numpy). Args: x: `Tensor` numerator of numeric type. y: `Tensor` denominator of numeric type. name: A name for the operation (optional). Returns: `x / y` evaluated in floating point. Raises: TypeError: If `x` and `y` have different dtypes. """ with ops.name_scope(name, "truediv", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") y = ops.convert_to_tensor(y, name="y") x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) try: dtype = _TRUEDIV_TABLE[x_dtype] except KeyError: raise TypeError("Invalid dtype %r in __truediv__" % x_dtype) if dtype is not None: x = cast(x, dtype) y = cast(y, dtype) return gen_math_ops.div(x, y, name=name) def floordiv(x, y, name=None): """Divides `x / y` elementwise, rounding down for floating point. The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for floating point arguments so that the result is always an integer (though possibly an integer represented as floating point). This op is generated by `x // y` floor division in Python 3 and in Python 2.7 with `from __future__ import division`. Note that for efficiency, `floordiv` uses C semantics for negative numbers (unlike Python and Numpy). `x` and `y` must have the same type, and the result will have the same type as well. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` rounded down (except possibly towards zero for negative integers). Raises: TypeError: If the inputs are complex. """ with ops.name_scope(name, "floordiv", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") dtype = x.dtype if dtype.is_floating: return gen_math_ops.floor(gen_math_ops.div(x, y), name=name) else: if not dtype.is_integer: raise TypeError("Expected floating point or integer, got %r" % dtype) # TODO(aselle): Switch to math_ops.floor_div() when ready # return gen_math_ops.floor_div(x, y, name=name) return gen_math_ops.div(x, y, name=name) def _mul_dispatch(x, y, name=None): """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse".""" is_tensor_y = isinstance(y, ops.Tensor) if is_tensor_y: return gen_math_ops.mul(x, y, name=name) else: assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse. new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values, y.shape, x, name) return sparse_tensor.SparseTensor(y.indices, new_vals, y.shape) _OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(gen_math_ops.add, "add") _OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub") _OverrideBinaryOperatorHelper(_mul_dispatch, "mul") _OverrideBinaryOperatorHelper(gen_math_ops.div, "div") _OverrideBinaryOperatorHelper(truediv, "truediv") _OverrideBinaryOperatorHelper(floordiv, "floordiv") # TODO(aselle): Switch mod to floor_mod when ready # _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod") _OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod") _OverrideBinaryOperatorHelper(pow, "pow") def logical_xor(x, y, name="LogicalXor"): """x ^ y = (x | y) & ~(x & y).""" # TODO(alemi) Make this a cwise op if people end up relying on it. return gen_math_ops.logical_and( gen_math_ops.logical_or(x, y), gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name=name) _OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and") _OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or") _OverrideBinaryOperatorHelper(logical_xor, "xor") ops.Tensor._override_operator("__lt__", gen_math_ops.less) ops.Tensor._override_operator("__le__", gen_math_ops.less_equal) ops.Tensor._override_operator("__gt__", gen_math_ops.greater) ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal) def range(start, limit=None, delta=1, dtype=None, name="range"): """Creates a sequence of numbers. Creates a sequence of numbers that begins at `start` and extends by increments of `delta` up to but not including `limit`. The dtype of the resulting tensor is inferred from the inputs unless it is provided explicitly. Like the Python builtin `range`, `start` defaults to 0, so that `range(n) = range(0, n)`. For example: ```python # 'start' is 3 # 'limit' is 18 # 'delta' is 3 tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] # 'start' is 3 # 'limit' is 1 # 'delta' is -0.5 tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5] # 'limit' is 5 tf.range(limit) ==> [0, 1, 2, 3, 4] ``` Args: start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit` is not None; otherwise, acts as range limit and first entry defaults to 0. limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None, defaults to the value of `start` while the first entry of the range defaults to 0. delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to 1. dtype: The type of the elements of the resulting tensor. name: A name for the operation. Defaults to "range". Returns: An 1-D `Tensor` of type `dtype`. """ if limit is None: start, limit = 0, start with ops.name_scope(name, "Range", [start, limit, delta]) as name: start = ops.convert_to_tensor(start, dtype=dtype, name="start") limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit") delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta") # infer dtype if not explicitly provided if dtype is None: dtype_hierarchy = [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64] assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta]) inferred_dtype = max([arg.dtype for arg in [start, limit, delta]], key=dtype_hierarchy.index) start = cast(start, inferred_dtype) limit = cast(limit, inferred_dtype) delta = cast(delta, inferred_dtype) return gen_math_ops._range(start, limit, delta, name=name) @ops.RegisterShape("Range") def _RangeShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0, 1, 2]) # Reduction operations def _ReductionDims(x, reduction_indices): """Returns range(0, rank(x)) if reduction_indices is None.""" if reduction_indices is not None: return reduction_indices else: # Fast path: avoid creating Rank and Range ops if ndims is known. if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None: return constant_op.constant(np.arange(x.get_shape().ndims), dtype=dtypes.int32) if (isinstance(x, sparse_tensor.SparseTensor) and x.shape.get_shape().is_fully_defined()): rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor. return constant_op.constant(np.arange(rank), dtype=dtypes.int32) # Otherwise, we rely on Range and Rank to do the right thing at run-time. return range(0, array_ops.rank(x)) def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the sum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python # 'x' is [[1, 1, 1] # [1, 1, 1]] tf.reduce_sum(x) ==> 6 tf.reduce_sum(x, 0) ==> [2, 2, 2] tf.reduce_sum(x, 1) ==> [3, 3] tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]] tf.reduce_sum(x, [0, 1]) ==> 6 ``` Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def count_nonzero(input_tensor, reduction_indices=None, keep_dims=False, dtype=dtypes.int64, name=None): """Computes number of nonzero elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. **NOTE** Floating point comparison to zero is done by exact floating point equality check. Small values are **not** rounded to zero for purposes of the nonzero check. For example: ```python # 'x' is [[0, 1, 0] # [1, 1, 0]] tf.count_nonzero(x) ==> 3 tf.count_nonzero(x, 0) ==> [1, 2, 0] tf.count_nonzero(x, 1) ==> [1, 2] tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]] tf.count_nonzero(x, [0, 1]) ==> 3 ``` Args: input_tensor: The tensor to reduce. Should be of numeric type, or `bool`. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. dtype: The output dtype; defaults to `tf.int64`. name: A name for the operation (optional). Returns: The reduced tensor (number of nonzero values). """ with ops.name_scope(name, "count_nonzero", [input_tensor]): input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor") zero = input_tensor.dtype.as_numpy_dtype() return cast( reduce_sum( # int64 reduction happens on GPU to_int64(gen_math_ops.not_equal(input_tensor, zero)), reduction_indices=reduction_indices, keep_dims=keep_dims), dtype=dtype) def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the mean of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python # 'x' is [[1., 1.] # [2., 2.]] tf.reduce_mean(x) ==> 1.5 tf.reduce_mean(x, 0) ==> [1.5, 1.5] tf.reduce_mean(x, 1) ==> [1., 2.] ``` Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the product of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the minimum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the maximum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the "logical and" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python # 'x' is [[True, True] # [False, False]] tf.reduce_all(x) ==> False tf.reduce_all(x, 0) ==> [False, False] tf.reduce_all(x, 1) ==> [True, False] ``` Args: input_tensor: The boolean tensor to reduce. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes the "logical or" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python # 'x' is [[True, True] # [False, False]] tf.reduce_any(x) ==> True tf.reduce_any(x, 0) ==> [True, True] tf.reduce_any(x, 1) ==> [True, False] ``` Args: input_tensor: The boolean tensor to reduce. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_logsumexp(input_tensor, reduction_indices=None, keep_dims=False, name=None): """Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `input_tensor` along the dimensions given in `reduction_indices`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_indices` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: ```python # 'x' is [[0, 0, 0]] # [0, 0, 0]] tf.reduce_logsumexp(x) ==> log(6) tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)] tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)] tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]] tf.reduce_logsumexp(x, [0, 1]) ==> log(6) ``` Args: input_tensor: The tensor to reduce. Should have numeric type. reduction_indices: The dimensions to reduce. If `None` (the default), reduces all dimensions. keep_dims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. """ with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name: my_max = array_ops.stop_gradient( reduce_max(input_tensor, reduction_indices, keep_dims=True)) result = gen_math_ops.log(reduce_sum( gen_math_ops.exp(input_tensor - my_max), reduction_indices, keep_dims=True)) + my_max if not keep_dims: if isinstance(reduction_indices, int): reduction_indices = [reduction_indices] result = array_ops.squeeze(result, reduction_indices) return result def trace(x, name=None): """ Compute the trace of a tensor `x`. `trace(x)` returns the sum along the main diagonal of each inner-most matrix in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])` For example: ```python # 'x' is [[1, 2], # [3, 4]] tf.trace(x) ==> 5 # 'x' is [[1,2,3], # [4,5,6], # [7,8,9]] tf.trace(x) ==> 15 # 'x' is [[[1,2,3], # [4,5,6], # [7,8,9]], # [[-1,-2,-3], # [-4,-5,-6], # [-7,-8,-9]]] tf.trace(x) ==> [15,-15] ``` Args: x: tensor. name: A name for the operation (optional). Returns: The trace of input tensor. """ with ops.name_scope(name, "Trace", [x]) as name: x = ops.convert_to_tensor(x, name="x") return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name) def matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None): """Multiplies matrix `a` by matrix `b`, producing `a` * `b`. The inputs must be two-dimensional matrices, with matching inner dimensions, possibly after transposition. Both matrices must be of the same type. The supported types are: `float32`, `float64`, `int32`, `complex64`. Either matrix can be transposed on the fly by setting the corresponding flag to `True`. This is `False` by default. If one or both of the matrices contain a lot of zeros, a more efficient multiplication algorithm can be used by setting the corresponding `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default. For example: ```python # 2-D tensor `a` a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.] [4. 5. 6.]] # 2-D tensor `b` b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.] [9. 10.] [11. 12.]] c = tf.matmul(a, b) => [[58 64] [139 154]] ``` Args: a: `Tensor` of type `float32`, `float64`, `int32` or `complex64`. b: `Tensor` with same type as `a`. transpose_a: If `True`, `a` is transposed before multiplication. transpose_b: If `True`, `b` is transposed before multiplication. a_is_sparse: If `True`, `a` is treated as a sparse matrix. b_is_sparse: If `True`, `b` is treated as a sparse matrix. name: Name for the operation (optional). Returns: A `Tensor` of the same type as `a`. """ with ops.name_scope(name, "MatMul", [a, b]) as name: a = ops.convert_to_tensor(a, name="a") b = ops.convert_to_tensor(b, name="b") sparse_matmul_types = [dtypes.bfloat16, dtypes.float32] use_sparse_matmul = (a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types and (a_is_sparse or b_is_sparse)) if dtypes.bfloat16 in (a.dtype, b.dtype): # matmul currently doesn't handle bfloat16 inputs. use_sparse_matmul = True if use_sparse_matmul: return sparse_matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name) else: return gen_math_ops._mat_mul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name) sparse_matmul = gen_math_ops._sparse_mat_mul batch_matmul = gen_math_ops._batch_mat_mul ops.RegisterShape("MatMul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseMatMul")(common_shapes.call_cpp_shape_fn) @ops.RegisterStatistics("MatMul", "flops") def _calc_mat_mul_flops(graph, node): """Calculates the compute resources needed for MatMul.""" transpose_a = node.attr["transpose_a"].b a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) a_shape.assert_is_fully_defined() if transpose_a: k = int(a_shape[0]) else: k = int(a_shape[1]) output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() output_count = np.prod(output_shape.as_list()) return ops.OpStats("flops", (k * output_count * 2)) def _as_indexed_slices(x, optimize=True): """Convert 'x' to IndexedSlices. Convert a dense Tensor to a block-sparse IndexedSlices. Args: x: Either a Tensor object, or an IndexedSlices object. optimize: if true, attempt to optimize the conversion of 'x'. Returns: An IndexedSlices object. Raises: TypeError: If 'x' is not a Tensor or an IndexedSlices object. """ # TODO(touts): op_scope if not isinstance(x, (ops.Tensor, ops.IndexedSlices)): raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x)) if isinstance(x, ops.IndexedSlices): return x x_shape = array_ops.shape_internal(x, optimize=optimize) return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape) def _as_indexed_slices_list(inputs, optimize=True): """Convert all elements of 'inputs' to IndexedSlices. Additionally, homogenize the types of all the indices to either int32 or int64. Args: inputs: List containing either Tensor or IndexedSlices objects. optimize: if true, attempt to optimize the conversion of each input. Returns: A list of IndexedSlices objects. Raises: TypeError: If 'inputs' is not a list or a tuple. """ if not isinstance(inputs, (list, tuple)): raise TypeError("Expected a list or tuple, not a %s" % type(inputs)) outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs] with_int32_index = [o.indices for o in outputs if o.indices.dtype == dtypes.int32] if not with_int32_index or len(with_int32_index) == len(outputs): return outputs casted_outputs = [] for o in outputs: if o.indices.dtype == dtypes.int32: casted_outputs.append( ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64), o.dense_shape)) else: casted_outputs.append(o) return casted_outputs def add_n(inputs, name=None): """Adds all input tensors element-wise. Args: inputs: A list of `Tensor` objects, each with same shape and type. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as the elements of `inputs`. Raises: ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred. """ if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if len(inputs) == 1: if name: return array_ops.identity(inputs[0], name=name) return inputs[0] return gen_math_ops._add_n(inputs, name=name) def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None): """Returns the element-wise sum of a list of tensors. Optionally, pass `shape` and `tensor_dtype` for shape and type checking, otherwise, these are inferred. NOTE: This operation is not differentiable and cannot be used if inputs depend on trainable variables. Please use `tf.add_n` for such cases. For example: ```python # tensor 'a' is [[1, 2], [3, 4]] # tensor `b` is [[5, 0], [0, 6]] tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]] # Explicitly pass shape and type tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32) ==> [[7, 4], [6, 14]] ``` Args: inputs: A list of `Tensor` objects, each with same shape and type. shape: Shape of elements of `inputs`. tensor_dtype: The type of `inputs`. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as the elements of `inputs`. Raises: ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred. """ if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if not all(x.dtype == inputs[0].dtype for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if shape is not None: shape = tensor_shape.as_shape(shape) else: shape = tensor_shape.unknown_shape() for input_tensor in inputs: if isinstance(input_tensor, ops.Tensor): shape = shape.merge_with(input_tensor.get_shape()) if len(inputs) == 1: return inputs[0] if tensor_dtype is None: tensor_dtype = inputs[0].dtype with ops.name_scope(name, "AccumulateN", inputs) as name: var = gen_state_ops._temporary_variable(shape=tensor_shape.vector(0), dtype=tensor_dtype) with ops.colocate_with(var): zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0]) zeros.set_shape(shape) ref = state_ops.assign(var, zeros, validate_shape=False) update_ops = [state_ops.assign_add(ref, input_tensor, use_locking=True) for input_tensor in inputs] with ops.control_dependencies(update_ops): return gen_state_ops._destroy_temporary_variable( ref, var_name=var.op.name, name=name) ops.RegisterShape("BatchMatMul")(common_shapes.call_cpp_shape_fn) def sigmoid(x, name=None): """Computes sigmoid of `x` element-wise. Specifically, `y = 1 / (1 + exp(-x))`. Args: x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`, or `qint32`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x` if `x.dtype != qint32` otherwise the return type is `quint8`. """ with ops.name_scope(name, "Sigmoid", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._sigmoid(x, name=name) def tanh(x, name=None): """Computes hyperbolic tangent of `x` element-wise. Args: x: A Tensor or SparseTensor with type `float`, `double`, `int32`, `complex64`, `int64`, or `qint32`. name: A name for the operation (optional). Returns: A Tensor or SparseTensor respectively with the same type as `x` if `x.dtype != qint32` otherwise the return type is `quint8`. """ with ops.name_scope(name, "Tanh", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_tanh = gen_math_ops._tanh(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_tanh, shape=x.shape) else: return gen_math_ops._tanh(x, name=name) def cumsum(x, axis=0, exclusive=False, reverse=False, name=None): """Compute the cumulative sum of the tensor `x` along `axis`. By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output: ```prettyprint tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] ``` By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed instead: ```prettyprint tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b] ``` By setting the `reverse` kwarg to `True`, the cumsum is performed in the opposite direction: ```prettyprint tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c] ``` This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: ```prettyprint tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0] ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor` of type `int32` (default: 0). reverse: A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ with ops.name_scope(name, "Cumsum", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.cumsum( x, axis, exclusive=exclusive, reverse=reverse, name=name) def cumprod(x, axis=0, exclusive=False, reverse=False, name=None): """Compute the cumulative product of the tensor `x` along `axis`. By default, this op performs an inclusive cumprod, which means that the first element of the input is identical to the first element of the output: ```prettyprint tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c] ``` By setting the `exclusive` kwarg to `True`, an exclusive cumprod is performed instead: ```prettyprint tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b] ``` By setting the `reverse` kwarg to `True`, the cumprod is performed in the opposite direction: ```prettyprint tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c] ``` This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: ```prettyprint tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1] ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor` of type `int32` (default: 0). reverse: A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ with ops.name_scope(name, "Cumprod", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.cumprod( x, axis, exclusive=exclusive, reverse=reverse, name=name) def conj(x, name=None): r"""Returns the complex conjugate of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element in `input`. The complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. The complex conjugate returned by this operation is of the form \\(a - bj\\). For example: # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] If `x` is real, it is returned unchanged. Args: x: `Tensor` to conjugate. Must have numeric type. name: A name for the operation (optional). Returns: A `Tensor` that is the conjugate of `x` (with the same type). Raises: TypeError: If `x` is not a numeric tensor. """ with ops.name_scope(name, "Conj", [x]) as name: x = ops.convert_to_tensor(x, name="x") if x.dtype.is_complex: return gen_math_ops._conj(x, name=name) elif x.dtype.is_floating or x.dtype.is_integer: return x else: raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype) ops.RegisterShape("Abs")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Acos")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Asin")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Atan")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Ceil")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Conj")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cos")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cross")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Exp")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Floor")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Imag")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Inv")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IsFinite")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IsInf")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IsNan")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Log")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Log1p")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LogicalNot")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Neg")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Real")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Rsqrt")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sign")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sin")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sqrt")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Square")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sigmoid")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Tanh")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Tan")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Lgamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Digamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Erf")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Erfc")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cast")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("ComplexAbs")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FFT")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IFFT")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FFT2D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IFFT2D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FFT3D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IFFT3D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("TanhGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SigmoidGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("InvGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SqrtGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("RsqrtGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cumsum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cumprod")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Add")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Complex")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Div")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Equal")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Greater")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("GreaterEqual")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Igamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Igammac")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Zeta")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Polygamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Less")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LessEqual")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LogicalAnd")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LogicalOr")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Maximum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Minimum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Mod")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FloorMod")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FloorDiv")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Mul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("NotEqual")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Pow")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sub")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SquaredDifference")(common_shapes.call_cpp_shape_fn) def _BroadcastShape(op): """Common shape function for binary operators that broadcast their inputs.""" return [common_shapes.broadcast_shape( op.inputs[0].get_shape(), op.inputs[1].get_shape())] ops.RegisterShape("Betainc")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseDenseCwiseMul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseDenseCwiseDiv")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseDenseCwiseAdd")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("AddN")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Select")(common_shapes.call_cpp_shape_fn) @ops.RegisterShape("ArgMax") @ops.RegisterShape("ArgMin") def _ArgOpShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1]) @ops.RegisterShape("All") @ops.RegisterShape("Any") @ops.RegisterShape("Max") @ops.RegisterShape("Mean") @ops.RegisterShape("Min") @ops.RegisterShape("Prod") @ops.RegisterShape("Sum") def _ReductionShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1]) ops.RegisterShape("SegmentMax")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentMean")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentMin")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentProd")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentSum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseSegmentMean")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseSegmentSqrtN")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseSegmentSum")(common_shapes.call_cpp_shape_fn) @ops.RegisterShape("SparseSegmentMeanGrad") @ops.RegisterShape("SparseSegmentSqrtNGrad") # pylint: disable=invalid-name def _SparseSegmentReductionGradShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[3]) # pylint: enable=invalid-name @ops.RegisterShape("UnsortedSegmentSum") def _UnsortedSegmentSumShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2]) @ops.RegisterShape("LinSpace") def _LinspaceShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2]) def reduced_shape(input_shape, axes): """Helper function for reduction ops. Args: input_shape: 1-D Tensor, the shape of the Tensor being reduced. axes: 1-D Tensor, the reduction axes. Returns: A 1-D Tensor, the output shape as if keep_dims were set to True. """ # Example: # cast needed for SparseTensor reductions input_shape = to_int32(input_shape) # [2, 3, 5, 7] axes = to_int32(axes) # [1, 2] input_rank = array_ops.size(input_shape) # 4 axes = (axes + input_rank) % input_rank axes_shape = array_ops.shape(axes) # [2] return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7] [range(input_rank), # [0, 1, 2, 3] axes], # [1, 2] [input_shape, # [2, 3, 5, 7] array_ops.fill(axes_shape, 1)]) # [1, 1] ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Requantize")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("RequantizationRange")(common_shapes.call_cpp_shape_fn)
33.930221
86
0.683119
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import common_shapes from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_util from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_control_flow_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops.gen_math_ops import * argmax = gen_math_ops.arg_max argmin = gen_math_ops.arg_min linspace = gen_math_ops.lin_space def abs(x, name=None): with ops.name_scope(name, "Abs", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): if x.values.dtype in (dtypes.complex64, dtypes.complex128): x_abs = gen_math_ops.complex_abs(x.values, Tout=x.values.dtype.real_dtype, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_abs, shape=x.shape) x_abs = gen_math_ops._abs(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_abs, shape=x.shape) else: x = ops.convert_to_tensor(x, name="x") if x.dtype in (dtypes.complex64, dtypes.complex128): return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name) return gen_math_ops._abs(x, name=name) def divide(x, y, name=None): with ops.name_scope(name, "Divide", [x]) as name: return x / y multiply = gen_math_ops.mul subtract = gen_math_ops.sub negative = gen_math_ops.neg def neg(x, name=None): with ops.name_scope(name, "Neg", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_neg = gen_math_ops.neg(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_neg, shape=x.shape) else: return gen_math_ops.neg(x, name=name) def sign(x, name=None): with ops.name_scope(name, "Sign", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_sign = gen_math_ops.sign(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_sign, shape=x.shape) else: return gen_math_ops.sign(x, name=name) def square(x, name=None): with ops.name_scope(name, "Square", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_square = gen_math_ops.square(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_square, shape=x.shape) else: return gen_math_ops.square(x, name=name) def sqrt(x, name=None): with ops.name_scope(name, "Sqrt", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_sqrt = gen_math_ops.sqrt(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_sqrt, shape=x.shape) else: return gen_math_ops.sqrt(x, name=name) def erf(x, name=None): with ops.name_scope(name, "Erf", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_erf = gen_math_ops.erf(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_erf, shape=x.shape) else: return gen_math_ops.erf(x, name=name) def complex_abs(x, name=None): return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name) def scalar_mul(scalar, x): scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype, name="scalar") shape = scalar.get_shape() if shape.ndims == 0: if isinstance(x, ops.IndexedSlices): return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape) else: return scalar * x else: raise ValueError("Only scalar multiply works, got shape %s" % shape) def pow(x, y, name=None): with ops.name_scope(name, "Pow", [x]) as name: return gen_math_ops._pow(x, y, name=name) def complex(real, imag, name=None): real = ops.convert_to_tensor(real, name="real") imag = ops.convert_to_tensor(imag, name="imag") with ops.name_scope(name, "Complex", [real, imag]) as name: input_types = (real.dtype, imag.dtype) if input_types == (dtypes.float64, dtypes.float64): Tout = dtypes.complex128 elif input_types == (dtypes.float32, dtypes.float32): Tout = dtypes.complex64 else: raise TypeError("real and imag have incorrect types: " "{} {}".format(real.dtype.name, imag.dtype.name)) return gen_math_ops._complex(real, imag, Tout=Tout, name=name) def real(input, name=None): with ops.name_scope(name, "Real", [input]) as name: real_dtype = input.dtype.real_dtype if input.dtype.base_dtype == real_dtype: return input return gen_math_ops.real(input, Tout=real_dtype, name=name) def imag(input, name=None): with ops.name_scope(name, "Imag", [input]) as name: return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name) def round(x, name=None): x = ops.convert_to_tensor(x, name="x") if x.dtype.is_integer: return x else: return gen_math_ops.floor(x + 0.5, name=name) ops.RegisterShape("Round")(common_shapes.call_cpp_shape_fn) def cast(x, dtype, name=None): base_type = dtypes.as_dtype(dtype).base_dtype with ops.name_scope(name, "Cast", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): values_cast = cast(x.values, base_type, name=name) return sparse_tensor.SparseTensor(x.indices, values_cast, x.shape) else: # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype == base_type: return x return gen_math_ops.cast(x, base_type, name=name) def saturate_cast(value, dtype, name=None): # When casting to a type with smaller representable range, clamp. # Note that this covers casting to unsigned types as well. with ops.name_scope(name, "saturate_cast", [value]) as name: value = ops.convert_to_tensor(value, name="value") dtype = dtypes.as_dtype(dtype).base_dtype if value.dtype.min < dtype.min: value = gen_math_ops.maximum(value, ops.convert_to_tensor( dtype.min, dtype=value.dtype, name="min")) if value.dtype.max > dtype.max: value = gen_math_ops.minimum(value, ops.convert_to_tensor( dtype.max, dtype=value.dtype, name="max")) return cast(value, dtype, name=name) def to_float(x, name="ToFloat"): return cast(x, dtypes.float32, name=name) def to_double(x, name="ToDouble"): return cast(x, dtypes.float64, name=name) def to_int32(x, name="ToInt32"): return cast(x, dtypes.int32, name=name) def to_int64(x, name="ToInt64"): return cast(x, dtypes.int64, name=name) def to_bfloat16(x, name="ToBFloat16"): return cast(x, dtypes.bfloat16, name=name) ops.Tensor._override_operator("__neg__", gen_math_ops.neg) ops.Tensor._override_operator("__abs__", abs) # __invert__ corresponds to the ~ operator. Here we follow the numpy convention # ~ marks an elementwise bit-wise inverse. This is only implemented for boolean # tensors and will throw a TypeError if used on nonboolean arrays ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not) def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor): def binary_op_wrapper(x, y): with ops.name_scope(None, op_name, [x, y]) as name: if not isinstance(y, sparse_tensor.SparseTensor): y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y") return func(x, y, name=name) def binary_op_wrapper_sparse(sp_x, y): with ops.name_scope(None, op_name, [sp_x, y]) as name: y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y") return sparse_tensor.SparseTensor( sp_x.indices, func(sp_x.indices, sp_x.values, sp_x.shape, y, name=name), sp_x.shape) def r_binary_op_wrapper(y, x): with ops.name_scope(None, op_name, [x, y]) as name: x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x") return func(x, y, name=name) # Propagate func.__doc__ to the wrappers try: doc = func.__doc__ except AttributeError: doc = None binary_op_wrapper.__doc__ = doc r_binary_op_wrapper.__doc__ = doc binary_op_wrapper_sparse.__doc__ = doc if clazz_object is ops.Tensor: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper) del binary_op_wrapper clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper) del r_binary_op_wrapper else: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper_sparse) del binary_op_wrapper_sparse # Conversion table for __truediv__. None entries mean no conversion required. _TRUEDIV_TABLE = { dtypes.uint8: dtypes.float32, dtypes.int8: dtypes.float32, dtypes.uint16: dtypes.float32, dtypes.int16: dtypes.float32, dtypes.int32: dtypes.float64, dtypes.int64: dtypes.float64, dtypes.float16: None, dtypes.float32: None, dtypes.float64: None, dtypes.complex64: None, dtypes.complex128: None, } # NOTE: the support of "sparse (true)div dense" is currently not baked in into # "tf.(true_)div()". Until such an API decision is made, the supported usage is # to explicitly use the "/" operator to invoke either truediv or div. def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None): with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape, y]) as name: sp_values = ops.convert_to_tensor(sp_values, name="sp_values") y = ops.convert_to_tensor(y, name="y") x_dtype = sp_values.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) try: dtype = _TRUEDIV_TABLE[x_dtype] except KeyError: raise TypeError("Invalid dtype %r in __truediv__" % x_dtype) if dtype is not None: sp_values = cast(sp_values, dtype) y = cast(y, dtype) return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values, sp_shape, y, name=name) def truediv(x, y, name=None): with ops.name_scope(name, "truediv", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") y = ops.convert_to_tensor(y, name="y") x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: raise TypeError("x and y must have the same dtype, got %r != %r" % (x_dtype, y_dtype)) try: dtype = _TRUEDIV_TABLE[x_dtype] except KeyError: raise TypeError("Invalid dtype %r in __truediv__" % x_dtype) if dtype is not None: x = cast(x, dtype) y = cast(y, dtype) return gen_math_ops.div(x, y, name=name) def floordiv(x, y, name=None): with ops.name_scope(name, "floordiv", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") dtype = x.dtype if dtype.is_floating: return gen_math_ops.floor(gen_math_ops.div(x, y), name=name) else: if not dtype.is_integer: raise TypeError("Expected floating point or integer, got %r" % dtype) # TODO(aselle): Switch to math_ops.floor_div() when ready # return gen_math_ops.floor_div(x, y, name=name) return gen_math_ops.div(x, y, name=name) def _mul_dispatch(x, y, name=None): is_tensor_y = isinstance(y, ops.Tensor) if is_tensor_y: return gen_math_ops.mul(x, y, name=name) else: assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse. new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values, y.shape, x, name) return sparse_tensor.SparseTensor(y.indices, new_vals, y.shape) _OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(gen_math_ops.add, "add") _OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub") _OverrideBinaryOperatorHelper(_mul_dispatch, "mul") _OverrideBinaryOperatorHelper(gen_math_ops.div, "div") _OverrideBinaryOperatorHelper(truediv, "truediv") _OverrideBinaryOperatorHelper(floordiv, "floordiv") # TODO(aselle): Switch mod to floor_mod when ready # _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod") _OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod") _OverrideBinaryOperatorHelper(pow, "pow") def logical_xor(x, y, name="LogicalXor"): # TODO(alemi) Make this a cwise op if people end up relying on it. return gen_math_ops.logical_and( gen_math_ops.logical_or(x, y), gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name=name) _OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and") _OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or") _OverrideBinaryOperatorHelper(logical_xor, "xor") ops.Tensor._override_operator("__lt__", gen_math_ops.less) ops.Tensor._override_operator("__le__", gen_math_ops.less_equal) ops.Tensor._override_operator("__gt__", gen_math_ops.greater) ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal) def range(start, limit=None, delta=1, dtype=None, name="range"): if limit is None: start, limit = 0, start with ops.name_scope(name, "Range", [start, limit, delta]) as name: start = ops.convert_to_tensor(start, dtype=dtype, name="start") limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit") delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta") # infer dtype if not explicitly provided if dtype is None: dtype_hierarchy = [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64] assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta]) inferred_dtype = max([arg.dtype for arg in [start, limit, delta]], key=dtype_hierarchy.index) start = cast(start, inferred_dtype) limit = cast(limit, inferred_dtype) delta = cast(delta, inferred_dtype) return gen_math_ops._range(start, limit, delta, name=name) @ops.RegisterShape("Range") def _RangeShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0, 1, 2]) # Reduction operations def _ReductionDims(x, reduction_indices): if reduction_indices is not None: return reduction_indices else: # Fast path: avoid creating Rank and Range ops if ndims is known. if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None: return constant_op.constant(np.arange(x.get_shape().ndims), dtype=dtypes.int32) if (isinstance(x, sparse_tensor.SparseTensor) and x.shape.get_shape().is_fully_defined()): rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor. return constant_op.constant(np.arange(rank), dtype=dtypes.int32) # Otherwise, we rely on Range and Rank to do the right thing at run-time. return range(0, array_ops.rank(x)) def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None): return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def count_nonzero(input_tensor, reduction_indices=None, keep_dims=False, dtype=dtypes.int64, name=None): with ops.name_scope(name, "count_nonzero", [input_tensor]): input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor") zero = input_tensor.dtype.as_numpy_dtype() return cast( reduce_sum( # int64 reduction happens on GPU to_int64(gen_math_ops.not_equal(input_tensor, zero)), reduction_indices=reduction_indices, keep_dims=keep_dims), dtype=dtype) def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None): return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None): return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None): return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None): return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None): return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None): return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor, reduction_indices), keep_dims, name=name) def reduce_logsumexp(input_tensor, reduction_indices=None, keep_dims=False, name=None): with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name: my_max = array_ops.stop_gradient( reduce_max(input_tensor, reduction_indices, keep_dims=True)) result = gen_math_ops.log(reduce_sum( gen_math_ops.exp(input_tensor - my_max), reduction_indices, keep_dims=True)) + my_max if not keep_dims: if isinstance(reduction_indices, int): reduction_indices = [reduction_indices] result = array_ops.squeeze(result, reduction_indices) return result def trace(x, name=None): with ops.name_scope(name, "Trace", [x]) as name: x = ops.convert_to_tensor(x, name="x") return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name) def matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None): with ops.name_scope(name, "MatMul", [a, b]) as name: a = ops.convert_to_tensor(a, name="a") b = ops.convert_to_tensor(b, name="b") sparse_matmul_types = [dtypes.bfloat16, dtypes.float32] use_sparse_matmul = (a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types and (a_is_sparse or b_is_sparse)) if dtypes.bfloat16 in (a.dtype, b.dtype): # matmul currently doesn't handle bfloat16 inputs. use_sparse_matmul = True if use_sparse_matmul: return sparse_matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name) else: return gen_math_ops._mat_mul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name) sparse_matmul = gen_math_ops._sparse_mat_mul batch_matmul = gen_math_ops._batch_mat_mul ops.RegisterShape("MatMul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseMatMul")(common_shapes.call_cpp_shape_fn) @ops.RegisterStatistics("MatMul", "flops") def _calc_mat_mul_flops(graph, node): transpose_a = node.attr["transpose_a"].b a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) a_shape.assert_is_fully_defined() if transpose_a: k = int(a_shape[0]) else: k = int(a_shape[1]) output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() output_count = np.prod(output_shape.as_list()) return ops.OpStats("flops", (k * output_count * 2)) def _as_indexed_slices(x, optimize=True): if not isinstance(x, (ops.Tensor, ops.IndexedSlices)): raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x)) if isinstance(x, ops.IndexedSlices): return x x_shape = array_ops.shape_internal(x, optimize=optimize) return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape) def _as_indexed_slices_list(inputs, optimize=True): if not isinstance(inputs, (list, tuple)): raise TypeError("Expected a list or tuple, not a %s" % type(inputs)) outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs] with_int32_index = [o.indices for o in outputs if o.indices.dtype == dtypes.int32] if not with_int32_index or len(with_int32_index) == len(outputs): return outputs casted_outputs = [] for o in outputs: if o.indices.dtype == dtypes.int32: casted_outputs.append( ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64), o.dense_shape)) else: casted_outputs.append(o) return casted_outputs def add_n(inputs, name=None): if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if len(inputs) == 1: if name: return array_ops.identity(inputs[0], name=name) return inputs[0] return gen_math_ops._add_n(inputs, name=name) def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None): if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if not all(x.dtype == inputs[0].dtype for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if shape is not None: shape = tensor_shape.as_shape(shape) else: shape = tensor_shape.unknown_shape() for input_tensor in inputs: if isinstance(input_tensor, ops.Tensor): shape = shape.merge_with(input_tensor.get_shape()) if len(inputs) == 1: return inputs[0] if tensor_dtype is None: tensor_dtype = inputs[0].dtype with ops.name_scope(name, "AccumulateN", inputs) as name: var = gen_state_ops._temporary_variable(shape=tensor_shape.vector(0), dtype=tensor_dtype) with ops.colocate_with(var): zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0]) zeros.set_shape(shape) ref = state_ops.assign(var, zeros, validate_shape=False) update_ops = [state_ops.assign_add(ref, input_tensor, use_locking=True) for input_tensor in inputs] with ops.control_dependencies(update_ops): return gen_state_ops._destroy_temporary_variable( ref, var_name=var.op.name, name=name) ops.RegisterShape("BatchMatMul")(common_shapes.call_cpp_shape_fn) def sigmoid(x, name=None): with ops.name_scope(name, "Sigmoid", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._sigmoid(x, name=name) def tanh(x, name=None): with ops.name_scope(name, "Tanh", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): x_tanh = gen_math_ops._tanh(x.values, name=name) return sparse_tensor.SparseTensor( indices=x.indices, values=x_tanh, shape=x.shape) else: return gen_math_ops._tanh(x, name=name) def cumsum(x, axis=0, exclusive=False, reverse=False, name=None): with ops.name_scope(name, "Cumsum", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.cumsum( x, axis, exclusive=exclusive, reverse=reverse, name=name) def cumprod(x, axis=0, exclusive=False, reverse=False, name=None): with ops.name_scope(name, "Cumprod", [x]) as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops.cumprod( x, axis, exclusive=exclusive, reverse=reverse, name=name) def conj(x, name=None): with ops.name_scope(name, "Conj", [x]) as name: x = ops.convert_to_tensor(x, name="x") if x.dtype.is_complex: return gen_math_ops._conj(x, name=name) elif x.dtype.is_floating or x.dtype.is_integer: return x else: raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype) ops.RegisterShape("Abs")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Acos")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Asin")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Atan")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Ceil")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Conj")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cos")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cross")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Exp")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Floor")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Imag")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Inv")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IsFinite")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IsInf")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IsNan")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Log")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Log1p")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LogicalNot")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Neg")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Real")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Rsqrt")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sign")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sin")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sqrt")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Square")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sigmoid")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Tanh")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Tan")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Lgamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Digamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Erf")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Erfc")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cast")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("ComplexAbs")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FFT")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IFFT")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FFT2D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IFFT2D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FFT3D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("IFFT3D")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("TanhGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SigmoidGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("InvGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SqrtGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("RsqrtGrad")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cumsum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Cumprod")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Add")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Complex")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Div")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Equal")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Greater")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("GreaterEqual")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Igamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Igammac")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Zeta")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Polygamma")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Less")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LessEqual")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LogicalAnd")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("LogicalOr")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Maximum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Minimum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Mod")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FloorMod")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("FloorDiv")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Mul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("NotEqual")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Pow")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Sub")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SquaredDifference")(common_shapes.call_cpp_shape_fn) def _BroadcastShape(op): return [common_shapes.broadcast_shape( op.inputs[0].get_shape(), op.inputs[1].get_shape())] ops.RegisterShape("Betainc")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseDenseCwiseMul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseDenseCwiseDiv")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseDenseCwiseAdd")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("AddN")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Select")(common_shapes.call_cpp_shape_fn) @ops.RegisterShape("ArgMax") @ops.RegisterShape("ArgMin") def _ArgOpShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1]) @ops.RegisterShape("All") @ops.RegisterShape("Any") @ops.RegisterShape("Max") @ops.RegisterShape("Mean") @ops.RegisterShape("Min") @ops.RegisterShape("Prod") @ops.RegisterShape("Sum") def _ReductionShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1]) ops.RegisterShape("SegmentMax")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentMean")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentMin")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentProd")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SegmentSum")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseSegmentMean")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseSegmentSqrtN")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("SparseSegmentSum")(common_shapes.call_cpp_shape_fn) @ops.RegisterShape("SparseSegmentMeanGrad") @ops.RegisterShape("SparseSegmentSqrtNGrad") def _SparseSegmentReductionGradShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[3]) @ops.RegisterShape("UnsortedSegmentSum") def _UnsortedSegmentSumShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2]) @ops.RegisterShape("LinSpace") def _LinspaceShape(op): return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2]) def reduced_shape(input_shape, axes): input_shape = to_int32(input_shape) axes = to_int32(axes) input_rank = array_ops.size(input_shape) axes = (axes + input_rank) % input_rank axes_shape = array_ops.shape(axes) return gen_data_flow_ops.dynamic_stitch( [range(input_rank), axes], [input_shape, array_ops.fill(axes_shape, 1)]) ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("Requantize")(common_shapes.call_cpp_shape_fn) ops.RegisterShape("RequantizationRange")(common_shapes.call_cpp_shape_fn)
true
true