input
stringlengths
2.65k
237k
output
stringclasses
1 value
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Amazon SageMaker Debugger provides full visibility into ML training jobs. This module provides SageMaker Debugger high-level methods to set up Debugger objects, such as Debugger built-in rules, tensor collections, and hook configuration. Use the Debugger objects for parameters when constructing a SageMaker estimator to initiate a training job. """ from __future__ import absolute_import import time from abc import ABC import attr import smdebug_rulesconfig as rule_configs from sagemaker import image_uris from sagemaker.utils import build_dict framework_name = "debugger" def get_rule_container_image_uri(region): """Return the Debugger rule image URI for the given AWS Region. For a full list of rule image URIs, see `Use Debugger Docker Images for Built-in or Custom Rules <https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-docker-images-rules.html>`_. Args: region (str): A string of AWS Region. For example, ``'us-east-1'``. Returns: str: Formatted image URI for the given AWS Region and the rule container type. """ return image_uris.retrieve(framework_name, region) def get_default_profiler_rule(): """Return the default built-in profiler rule with a unique name. Returns: sagemaker.debugger.ProfilerRule: The instance of the built-in ProfilerRule. """ default_rule = rule_configs.ProfilerReport() custom_name = f"{default_rule.rule_name}-{int(time.time())}" return ProfilerRule.sagemaker(default_rule, name=custom_name) @attr.s class RuleBase(ABC): """The SageMaker Debugger rule base class that cannot be instantiated directly. .. tip:: Debugger rule classes inheriting this RuleBase class are :class:`~sagemaker.debugger.Rule` and :class:`~sagemaker.debugger.ProfilerRule`. Do not directly use the rule base class to instantiate a SageMaker Debugger rule. Use the :class:`~sagemaker.debugger.Rule` classmethods for debugging and the :class:`~sagemaker.debugger.ProfilerRule` classmethods for profiling. Attributes: name (str): The name of the rule. image_uri (str): The image URI to use the rule. instance_type (str): Type of EC2 instance to use. For example, 'ml.c4.xlarge'. container_local_output_path (str): The local path to store the Rule output. s3_output_path (str): The location in S3 to store the output. volume_size_in_gb (int): Size in GB of the EBS volume to use for storing data. rule_parameters (dict): A dictionary of parameters for the rule. """ name = attr.ib() image_uri = attr.ib() instance_type = attr.ib() container_local_output_path = attr.ib() s3_output_path = attr.ib() volume_size_in_gb = attr.ib() rule_parameters = attr.ib() @staticmethod def _set_rule_parameters(source, rule_to_invoke, rule_parameters): """Create a dictionary of rule parameters. Args: source (str): Optional. A source file containing a rule to invoke. If provided, you must also provide rule_to_invoke. This can either be an S3 uri or a local path. rule_to_invoke (str): Optional. The name of the rule to invoke within the source. If provided, you must also provide source. rule_parameters (dict): Optional. A dictionary of parameters for the rule. Returns: dict: A dictionary of rule parameters. """ if bool(source) ^ bool(rule_to_invoke): raise ValueError( "If you provide a source, you must also provide a rule to invoke (and vice versa)." ) merged_rule_params = {} merged_rule_params.update(build_dict("source_s3_uri", source)) merged_rule_params.update(build_dict("rule_to_invoke", rule_to_invoke)) merged_rule_params.update(rule_parameters or {}) return merged_rule_params class Rule(RuleBase): """The SageMaker Debugger Rule class configures *debugging* rules to debug your training job. The debugging rules analyze tensor outputs from your training job and monitor conditions that are critical for the success of the training job. SageMaker Debugger comes pre-packaged with built-in *debugging* rules. For example, the debugging rules can detect whether gradients are getting too large or too small, or if a model is overfitting. For a full list of built-in rules for debugging, see `List of Debugger Built-in Rules <https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html>`_. You can also write your own rules using the custom rule classmethod. """ def __init__( self, name, image_uri, instance_type, container_local_output_path, s3_output_path, volume_size_in_gb, rule_parameters, collections_to_save, actions=None, ): """Configure the debugging rules using the following classmethods. .. tip:: Use the following ``Rule.sagemaker`` class method for built-in debugging rules or the ``Rule.custom`` class method for custom debugging rules. Do not directly use the :class:`~sagemaker.debugger.Rule` initialization method. """ super(Rule, self).__init__( name, image_uri, instance_type, container_local_output_path, s3_output_path, volume_size_in_gb, rule_parameters, ) self.collection_configs = collections_to_save self.actions = actions @classmethod def sagemaker( cls, base_config, name=None, container_local_output_path=None, s3_output_path=None, other_trials_s3_input_paths=None, rule_parameters=None, collections_to_save=None, actions=None, ): """Initialize a ``Rule`` object for a *built-in* debugging rule. Args: base_config (dict): Required. This is the base rule config dictionary returned from the :class:`~sagemaker.debugger.rule_configs` method. For example, ``rule_configs.dead_relu()``. For a full list of built-in rules for debugging, see `List of Debugger Built-in Rules <https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html>`_. name (str): Optional. The name of the debugger rule. If one is not provided, the name of the base_config will be used. container_local_output_path (str): Optional. The local path in the rule processing container. s3_output_path (str): Optional. The location in Amazon S3 to store the output tensors. The default Debugger output path for debugging data is created under the default output path of the :class:`~sagemaker.estimator.Estimator` class. For example, s3://sagemaker-<region>-<12digit_account_id>/<training-job-name>/debug-output/. other_trials_s3_input_paths ([str]): Optional. The Amazon S3 input paths of other trials to use the SimilarAcrossRuns rule. rule_parameters (dict): Optional. A dictionary of parameters for the rule. collections_to_save (:class:`~sagemaker.debugger.CollectionConfig`): Optional. A list of :class:`~sagemaker.debugger.CollectionConfig` objects to be saved. Returns: :class:`~sagemaker.debugger.Rule`: An instance of the built-in rule. **Example of how to create a built-in rule instance:** .. code-block:: python from sagemaker.debugger import Rule, rule_configs built_in_rules = [ Rule.sagemaker(rule_configs.built_in_rule_name_in_pysdk_format_1()), Rule.sagemaker(rule_configs.built_in_rule_name_in_pysdk_format_2()), ... Rule.sagemaker(rule_configs.built_in_rule_name_in_pysdk_format_n()) ] You need to replace the ``built_in_rule_name_in_pysdk_format_*`` with the names of built-in rules. You can find the rule names at `List of Debugger Built-in Rules <https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html>`_. **Example of creating a built-in rule instance with adjusting parameter values:** .. code-block:: python from sagemaker.debugger import Rule, rule_configs built_in_rules = [ Rule.sagemaker( base_config=rule_configs.built_in_rule_name_in_pysdk_format(), rule_parameters={ "key": "value" } collections_to_save=[ CollectionConfig( name="tensor_collection_name", parameters={ "key": "value" } ) ] ) ] For more information about setting up the ``rule_parameters`` parameter, see `List of Debugger Built-in Rules <https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html>`_. For more information about setting up the ``collections_to_save`` parameter, see the :class:`~sagemaker.debugger.CollectionConfig` class. """ merged_rule_params = {} if rule_parameters is not None and rule_parameters.get("rule_to_invoke") is not None: raise RuntimeError( """You cannot provide a 'rule_to_invoke' for SageMaker rules. Either remove the rule_to_invoke or use a custom rule. """ ) if actions is not None and not rule_configs.is_valid_action_object(actions): raise RuntimeError("""`actions` must be of type `Action` or `ActionList`!""") if other_trials_s3_input_paths is not None: for index, s3_input_path in enumerate(other_trials_s3_input_paths): merged_rule_params["other_trial_{}".format(str(index))] = s3_input_path default_rule_params = base_config["DebugRuleConfiguration"].get("RuleParameters", {}) merged_rule_params.update(default_rule_params) merged_rule_params.update(rule_parameters or {}) base_config_collections = [] for config in base_config.get("CollectionConfigurations", []): collection_name = None collection_parameters = {} for key, value in config.items(): if key == "CollectionName": collection_name = value if key == "CollectionParameters": collection_parameters = value base_config_collections.append( CollectionConfig(name=collection_name, parameters=collection_parameters) ) return cls( name=name or base_config["DebugRuleConfiguration"].get("RuleConfigurationName"), image_uri="DEFAULT_RULE_EVALUATOR_IMAGE", instance_type=None, container_local_output_path=container_local_output_path, s3_output_path=s3_output_path, volume_size_in_gb=None, rule_parameters=merged_rule_params, collections_to_save=collections_to_save or base_config_collections, actions=actions, ) @classmethod def custom( cls, name, image_uri, instance_type, volume_size_in_gb, source=None, rule_to_invoke=None, container_local_output_path=None, s3_output_path=None, other_trials_s3_input_paths=None, rule_parameters=None, collections_to_save=None, actions=None, ): """Initialize a ``Rule`` object for a *custom* debugging rule. You can create a custom rule that analyzes tensors emitted during the training of a model and monitors conditions that are critical for the success of a training job. For more information, see `Create Debugger Custom Rules for Training Job Analysis <https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-custom-rules.html>`_. Args: name (str): Required. The name of the debugger rule. image_uri (str): Required. The URI of the image to be used by the debugger rule. instance_type (str): Required. Type of EC2 instance to use, for example, 'ml.c4.xlarge'. volume_size_in_gb (int): Required. Size in GB of the EBS volume to use for storing data. source (str): Optional. A source file containing a rule to invoke. If provided, you must also provide rule_to_invoke. This can either be an S3 uri or a local path. rule_to_invoke (str): Optional. The name of the rule to invoke within the source. If provided, you must also provide source. container_local_output_path (str): Optional. The local path in the container. s3_output_path (str): Optional. The location in Amazon S3 to store the output tensors. The default Debugger output path for debugging data is created under the default output path of the :class:`~sagemaker.estimator.Estimator` class.
already_processed, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.Packet_Too_Big is not None: showIndent(outfile, level) outfile.write('Packet_Too_Big=%s,\n' % self.Packet_Too_Big) if self.MTU is not None: showIndent(outfile, level) outfile.write('MTU=%s,\n' % quote_python(self.MTU).encode(ExternalEncoding)) def build(self, node): self.buildAttributes(node, node.attrib, []) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Packet_Too_Big': sval_ = child_.text if sval_ in ('true', '1'): ival_ = True elif sval_ in ('false', '0'): ival_ = False else: raise_parse_error(child_, 'requires boolean') ival_ = self.gds_validate_boolean(ival_, node, 'Packet_Too_Big') self.Packet_Too_Big = ival_ elif nodeName_ == 'MTU': MTU_ = child_.text MTU_ = self.gds_validate_string(MTU_, node, 'MTU') self.MTU = MTU_ # end class ICMPv6PacketTooBigType class ICMPv6TimeExceededType(GeneratedsSuper): """Time exceeded error message; ICMP v6 type=3.""" subclass = None superclass = None def __init__(self, Hop_Limit_Exceeded=None, Fragment_Reassem_Time_Exceeded=None): self.Hop_Limit_Exceeded = Hop_Limit_Exceeded self.Fragment_Reassem_Time_Exceeded = Fragment_Reassem_Time_Exceeded def factory(*args_, **kwargs_): if ICMPv6TimeExceededType.subclass: return ICMPv6TimeExceededType.subclass(*args_, **kwargs_) else: return ICMPv6TimeExceededType(*args_, **kwargs_) factory = staticmethod(factory) def get_Hop_Limit_Exceeded(self): return self.Hop_Limit_Exceeded def set_Hop_Limit_Exceeded(self, Hop_Limit_Exceeded): self.Hop_Limit_Exceeded = Hop_Limit_Exceeded def get_Fragment_Reassem_Time_Exceeded(self): return self.Fragment_Reassem_Time_Exceeded def set_Fragment_Reassem_Time_Exceeded(self, Fragment_Reassem_Time_Exceeded): self.Fragment_Reassem_Time_Exceeded = Fragment_Reassem_Time_Exceeded def export(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6TimeExceededType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = [] self.exportAttributes(outfile, level, already_processed, namespace_, name_='ICMPv6TimeExceededType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write('/>\n') def exportAttributes(self, outfile, level, already_processed, namespace_='PacketObj:', name_='ICMPv6TimeExceededType'): pass def exportChildren(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6TimeExceededType', fromsubclass_=False): if self.Hop_Limit_Exceeded is not None: self.Hop_Limit_Exceeded.export(outfile, level, namespace_, name_='Hop_Limit_Exceeded') if self.Fragment_Reassem_Time_Exceeded is not None: self.Fragment_Reassem_Time_Exceeded.export(outfile, level, namespace_, name_='Fragment_Reassem_Time_Exceeded') def hasContent_(self): if ( self.Hop_Limit_Exceeded is not None or self.Fragment_Reassem_Time_Exceeded is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='ICMPv6TimeExceededType'): level += 1 self.exportLiteralAttributes(outfile, level, [], name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, already_processed, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.Hop_Limit_Exceeded is not None: showIndent(outfile, level) outfile.write('Hop_Limit_Exceeded=%s,\n' % self.Hop_Limit_Exceeded) if self.Fragment_Reassem_Time_Exceeded is not None: showIndent(outfile, level) outfile.write('Fragment_Reassem_Time_Exceeded=%s,\n' % self.Fragment_Reassem_Time_Exceeded) def build(self, node): self.buildAttributes(node, node.attrib, []) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Hop_Limit_Exceeded': sval_ = child_.text if sval_ in ('true', '1'): ival_ = True elif sval_ in ('false', '0'): ival_ = False else: raise_parse_error(child_, 'requires boolean') ival_ = self.gds_validate_boolean(ival_, node, 'Hop_Limit_Exceeded') self.Hop_Limit_Exceeded = ival_ elif nodeName_ == 'Fragment_Reassem_Time_Exceeded': sval_ = child_.text if sval_ in ('true', '1'): ival_ = True elif sval_ in ('false', '0'): ival_ = False else: raise_parse_error(child_, 'requires boolean') ival_ = self.gds_validate_boolean(ival_, node, 'Fragment_Reassem_Time_Exceeded') self.Fragment_Reassem_Time_Exceeded = ival_ # end class ICMPv6TimeExceededType class ICMPv6ParameterProblemType(GeneratedsSuper): """Parameter problem error message; ICMP v6 type=4.""" subclass = None superclass = None def __init__(self, Erroneous_Header_Field=None, Unrecognized_Next_Header_Type=None, Unrecognized_IPv6_Option=None, Pointer=None): self.Erroneous_Header_Field = Erroneous_Header_Field self.Unrecognized_Next_Header_Type = Unrecognized_Next_Header_Type self.Unrecognized_IPv6_Option = Unrecognized_IPv6_Option self.Pointer = Pointer def factory(*args_, **kwargs_): if ICMPv6ParameterProblemType.subclass: return ICMPv6ParameterProblemType.subclass(*args_, **kwargs_) else: return ICMPv6ParameterProblemType(*args_, **kwargs_) factory = staticmethod(factory) def get_Erroneous_Header_Field(self): return self.Erroneous_Header_Field def set_Erroneous_Header_Field(self, Erroneous_Header_Field): self.Erroneous_Header_Field = Erroneous_Header_Field def get_Unrecognized_Next_Header_Type(self): return self.Unrecognized_Next_Header_Type def set_Unrecognized_Next_Header_Type(self, Unrecognized_Next_Header_Type): self.Unrecognized_Next_Header_Type = Unrecognized_Next_Header_Type def get_Unrecognized_IPv6_Option(self): return self.Unrecognized_IPv6_Option def set_Unrecognized_IPv6_Option(self, Unrecognized_IPv6_Option): self.Unrecognized_IPv6_Option = Unrecognized_IPv6_Option def get_Pointer(self): return self.Pointer def set_Pointer(self, Pointer): self.Pointer = Pointer def export(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6ParameterProblemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = [] self.exportAttributes(outfile, level, already_processed, namespace_, name_='ICMPv6ParameterProblemType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write('/>\n') def exportAttributes(self, outfile, level, already_processed, namespace_='PacketObj:', name_='ICMPv6ParameterProblemType'): pass def exportChildren(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6ParameterProblemType', fromsubclass_=False): if self.Erroneous_Header_Field is not None: self.Erroneous_Header_Field.export(outfile, level, namespace_, name_='Erroneous_Header_Field') if self.Unrecognized_Next_Header_Type is not None: self.Unrecognized_Next_Header_Type.export(outfile, level, namespace_, name_='Unrecognized_Next_Header_Type') if self.Unrecognized_IPv6_Option is not None: self.Unrecognized_IPv6_Option.export(outfile, level, namespace_, name_='Unrecognized_IPv6_Option') if self.Pointer is not None: self.Pointer.export(outfile, level, namespace_, name_='Pointer') def hasContent_(self): if ( self.Erroneous_Header_Field is not None or self.Unrecognized_Next_Header_Type is not None or self.Unrecognized_IPv6_Option is not None or self.Pointer is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='ICMPv6ParameterProblemType'): level += 1 self.exportLiteralAttributes(outfile, level, [], name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, already_processed, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.Erroneous_Header_Field is not None: showIndent(outfile, level) outfile.write('Erroneous_Header_Field=%s,\n' % self.Erroneous_Header_Field) if self.Unrecognized_Next_Header_Type is not None: showIndent(outfile, level) outfile.write('Unrecognized_Next_Header_Type=%s,\n' % self.Unrecognized_Next_Header_Type) if self.Unrecognized_IPv6_Option is not None: showIndent(outfile, level) outfile.write('Unrecognized_IPv6_Option=%s,\n' % self.Unrecognized_IPv6_Option) if self.Pointer is not None: showIndent(outfile, level) outfile.write('Pointer=%s,\n' % quote_python(self.Pointer).encode(ExternalEncoding)) def build(self, node): self.buildAttributes(node, node.attrib, []) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Erroneous_Header_Field': sval_ = child_.text if sval_ in ('true', '1'): ival_ = True elif sval_ in ('false', '0'): ival_ = False else: raise_parse_error(child_, 'requires boolean') ival_ = self.gds_validate_boolean(ival_, node, 'Erroneous_Header_Field') self.Erroneous_Header_Field = ival_ elif nodeName_ == 'Unrecognized_Next_Header_Type': sval_ = child_.text if sval_ in ('true', '1'): ival_ = True elif sval_ in ('false', '0'): ival_ = False else: raise_parse_error(child_, 'requires boolean') ival_ = self.gds_validate_boolean(ival_, node, 'Unrecognized_Next_Header_Type') self.Unrecognized_Next_Header_Type = ival_ elif nodeName_ == 'Unrecognized_IPv6_Option': sval_ = child_.text if sval_ in ('true', '1'): ival_ = True elif sval_ in ('false', '0'): ival_ = False else: raise_parse_error(child_, 'requires boolean') ival_ = self.gds_validate_boolean(ival_, node, 'Unrecognized_IPv6_Option') self.Unrecognized_IPv6_Option = ival_ elif nodeName_ == 'Pointer': Pointer_ = child_.text Pointer_ = self.gds_validate_string(Pointer_, node, 'Pointer') self.Pointer = Pointer_ # end class ICMPv6ParameterProblemType class ICMPv6EchoRequestType(GeneratedsSuper): """Echo request informational ICMP v6 message; type=128.""" subclass = None superclass = None def __init__(self, Echo_Request=None, Data=None): self.Echo_Request = Echo_Request self.Data = Data def factory(*args_, **kwargs_): if ICMPv6EchoRequestType.subclass: return ICMPv6EchoRequestType.subclass(*args_, **kwargs_) else: return ICMPv6EchoRequestType(*args_, **kwargs_) factory = staticmethod(factory) def get_Echo_Request(self): return self.Echo_Request def set_Echo_Request(self, Echo_Request): self.Echo_Request = Echo_Request def get_Data(self): return self.Data def set_Data(self, Data): self.Data = Data def export(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6EchoRequestType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = [] self.exportAttributes(outfile, level, already_processed, namespace_, name_='ICMPv6EchoRequestType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write('/>\n') def exportAttributes(self, outfile, level, already_processed, namespace_='PacketObj:', name_='ICMPv6EchoRequestType'): pass def exportChildren(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6EchoRequestType', fromsubclass_=False): if self.Echo_Request is not None: self.Echo_Request.export(outfile, level, namespace_, name_='Echo_Request') if self.Data is not None: self.Data.export(outfile, level, namespace_, name_='Data') def hasContent_(self): if ( self.Echo_Request is not None or self.Data is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='ICMPv6EchoRequestType'): level += 1 self.exportLiteralAttributes(outfile, level, [], name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, already_processed, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.Echo_Request is not None: showIndent(outfile, level) outfile.write('Echo_Request=%s,\n' % self.Echo_Request) if self.Data is not None: showIndent(outfile, level) outfile.write('Data=%s,\n' % quote_python(self.Data).encode(ExternalEncoding)) def build(self, node): self.buildAttributes(node, node.attrib, []) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Echo_Request': sval_ = child_.text if sval_ in ('true', '1'): ival_ = True elif sval_ in ('false', '0'): ival_ = False else: raise_parse_error(child_, 'requires boolean') ival_ = self.gds_validate_boolean(ival_, node, 'Echo_Request') self.Echo_Request = ival_ elif nodeName_ == 'Data': Data_ = child_.text Data_ = self.gds_validate_string(Data_, node, 'Data') self.Data = Data_ # end class ICMPv6EchoRequestType class ICMPv6EchoReplyType(GeneratedsSuper): """Echo reply informational ICMP v6 message; type=129.""" subclass = None superclass = None def __init__(self, Echo_Reply=None, Data=None): self.Echo_Reply = Echo_Reply self.Data = Data def factory(*args_, **kwargs_): if ICMPv6EchoReplyType.subclass: return ICMPv6EchoReplyType.subclass(*args_, **kwargs_) else: return ICMPv6EchoReplyType(*args_, **kwargs_) factory = staticmethod(factory) def get_Echo_Reply(self): return self.Echo_Reply def set_Echo_Reply(self, Echo_Reply): self.Echo_Reply = Echo_Reply def get_Data(self): return self.Data def set_Data(self, Data): self.Data = Data def export(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6EchoReplyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = [] self.exportAttributes(outfile, level, already_processed, namespace_, name_='ICMPv6EchoReplyType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('</%s%s>\n' % (namespace_, name_)) else: outfile.write('/>\n') def exportAttributes(self, outfile, level, already_processed, namespace_='PacketObj:', name_='ICMPv6EchoReplyType'): pass def exportChildren(self, outfile, level, namespace_='PacketObj:', name_='ICMPv6EchoReplyType', fromsubclass_=False): if self.Echo_Reply is not None: self.Echo_Reply.export(outfile, level, namespace_, name_='Echo_Reply') if self.Data is not None: self.Data.export(outfile, level, namespace_, name_='Data') def hasContent_(self):
: spec.get('optional'), 'name' : name, 'label' : spec.desc, 'options' : attributes.options, 'value' : attributes.value if attributes.multiple else [attributes.value] }) elif type(attributes) is form_builder.TextField: attribute_inputs.append({ 'type' : 'text', 'name' : name, 'label' : spec.desc, 'value' : attributes.value, 'readonly' : spec.get('readonly') }) if data.missing_meta(): message = 'Required metadata values are missing. Some of these values may not be editable by the user. Selecting "Auto-detect" will attempt to fix these values.' status = 'warning' # datatype conversion conversion_options = [(convert_name, convert_id) for convert_id, convert_name in converters_collection] conversion_disable = len(conversion_options) == 0 conversion_inputs = [{ 'type' : 'select', 'name' : 'target_type', 'label' : 'Name', 'help' : 'This will create a new dataset with the contents of this dataset converted to a new format.', 'options' : conversion_options }] # datatype changeing datatype_options = [(ext_name, ext_id) for ext_id, ext_name in ldatatypes] datatype_disable = len(datatype_options) == 0 datatype_inputs = [{ 'type' : 'select', 'name' : 'datatype', 'label' : 'New Type', 'options' : datatype_options, 'value' : [ext_id for ext_id, ext_name in ldatatypes if ext_id == data.ext], 'help' : 'This will change the datatype of the existing dataset but not modify its contents. Use this if Galaxy has incorrectly guessed the type of your dataset.', }] # permissions permission_disable = True permission_inputs = list() if trans.user: if data.dataset.actions: in_roles = {} for action, roles in trans.app.security_agent.get_permissions(data.dataset).items(): in_roles[action.action] = [trans.security.encode_id(role.id) for role in roles] for index, action in trans.app.model.Dataset.permitted_actions.items(): if action == trans.app.security_agent.permitted_actions.DATASET_ACCESS: help_text = action.description + '<br/>NOTE: Users must have every role associated with this dataset in order to access it.' else: help_text = action.description permission_inputs.append({ 'type' : 'select', 'multiple' : True, 'optional' : True, 'name' : index, 'label' : action.action, 'help' : help_text, 'options' : all_roles, 'value' : in_roles.get(action.action), 'readonly' : not can_manage_dataset }) permission_disable = not can_manage_dataset else: permission_inputs.append({ 'name' : 'access_public', 'type' : 'hidden', 'label' : 'This dataset is accessible by everyone (it is public).', 'readonly' : True }) else: permission_inputs.append({ 'name' : 'no_access', 'type' : 'hidden', 'label' : 'Permissions not available (not logged in).', 'readonly' : True }) return { 'display_name' : data.get_display_name(), 'message' : message, 'status' : status, 'dataset_id' : dataset_id, 'attribute_inputs' : attribute_inputs, 'conversion_inputs' : conversion_inputs, 'conversion_disable': conversion_disable, 'datatype_inputs' : datatype_inputs, 'datatype_disable' : datatype_disable, 'permission_inputs' : permission_inputs, 'permission_disable': permission_disable } else: return self.message_exception(trans, 'You do not have permission to edit this dataset\'s ( id: %s ) information.' % str(dataset_id)) @web.expose_api_anonymous def set_edit(self, trans, payload=None, **kwd): """Allows user to modify parameters of an HDA.""" def __ok_to_edit_metadata(dataset_id): # prevent modifying metadata when dataset is queued or running as input/output # This code could be more efficient, i.e. by using mappers, but to prevent slowing down loading a History panel, we'll leave the code here for now for job_to_dataset_association in trans.sa_session.query( self.app.model.JobToInputDatasetAssociation).filter_by(dataset_id=dataset_id).all() \ + trans.sa_session.query(self.app.model.JobToOutputDatasetAssociation).filter_by(dataset_id=dataset_id).all(): if job_to_dataset_association.job.state not in [job_to_dataset_association.job.states.OK, job_to_dataset_association.job.states.ERROR, job_to_dataset_association.job.states.DELETED]: return False return True message = None status = 'success' dataset_id = payload.get('dataset_id') operation = payload.get('operation') if dataset_id is not None: id = self.decode_id(dataset_id) data = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get(id) if operation == 'attributes': # The user clicked the Save button on the 'Edit Attributes' form data.name = payload.get('name') data.info = payload.get('info') if __ok_to_edit_metadata(data.id): # The following for loop will save all metadata_spec items for name, spec in data.datatype.metadata_spec.items(): if not spec.get('readonly'): setattr(data.metadata, name, spec.unwrap(payload.get(name) or None)) data.datatype.after_setting_metadata(data) # Sanitize annotation before adding it. if payload.get('annotation'): annotation = sanitize_html(payload.get('annotation'), 'utf-8', 'text/html') self.add_item_annotation(trans.sa_session, trans.get_user(), data, annotation) # if setting metadata previously failed and all required elements have now been set, clear the failed state. if data._state == trans.model.Dataset.states.FAILED_METADATA and not data.missing_meta(): data._state = None message = 'Attributes updated. %s' % message if message else 'Attributes updated.' else: message = 'Attributes updated, but metadata could not be changed because this dataset is currently being used as input or output. You must cancel or wait for these jobs to complete before changing metadata.' status = 'warning' trans.sa_session.flush() elif operation == 'datatype': # The user clicked the Save button on the 'Change data type' form datatype = payload.get('datatype') if data.datatype.allow_datatype_change and trans.app.datatypes_registry.get_datatype_by_extension(datatype).allow_datatype_change: # prevent modifying datatype when dataset is queued or running as input/output if not __ok_to_edit_metadata(data.id): return self.message_exception(trans, 'This dataset is currently being used as input or output. You cannot change datatype until the jobs have completed or you have canceled them.') else: trans.app.datatypes_registry.change_datatype(data, datatype) trans.sa_session.flush() trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute(trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={'input1': data}, overwrite=False) # overwrite is False as per existing behavior message = 'Changed the type to %s.' % datatype else: return self.message_exception(trans, 'You are unable to change datatypes in this manner. Changing %s to %s is not allowed.' % (data.extension, datatype)) elif operation == 'autodetect': # The user clicked the Auto-detect button on the 'Edit Attributes' form # prevent modifying metadata when dataset is queued or running as input/output if not __ok_to_edit_metadata(data.id): return self.message_exception(trans, 'This dataset is currently being used as input or output. You cannot change metadata until the jobs have completed or you have canceled them.') else: for name, spec in data.metadata.spec.items(): # We need to be careful about the attributes we are resetting if name not in ['name', 'info', 'dbkey', 'base_name']: if spec.get('default'): setattr(data.metadata, name, spec.unwrap(spec.get('default'))) message = 'Attributes have been queued to be updated.' trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute(trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={'input1': data}) trans.sa_session.flush() elif operation == 'conversion': target_type = payload.get('target_type') if target_type: try: message = data.datatype.convert_dataset(trans, data, target_type) except Exception as e: return self.message_exception(trans, str(e)) elif operation == 'permission': if not trans.user: return self.message_exception(trans, 'You must be logged in if you want to change permissions.') if trans.app.security_agent.can_manage_dataset(trans.get_current_user_roles(), data.dataset): payload_permissions = {} for action in trans.app.model.Dataset.permitted_actions.keys(): payload_permissions[action] = [trans.security.decode_id(role_id) for role_id in util.listify(payload.get(action))] # The user associated the DATASET_ACCESS permission on the dataset with 1 or more roles. We # need to ensure that they did not associate roles that would cause accessibility problems. permissions, in_roles, error, message = \ trans.app.security_agent.derive_roles_from_access(trans, data.dataset.id, 'root', **payload_permissions) if error: # Keep the original role associations for the DATASET_ACCESS permission on the dataset. access_action = trans.app.security_agent.get_action(trans.app.security_agent.permitted_actions.DATASET_ACCESS.action) permissions[access_action] = data.dataset.get_access_roles(trans) trans.sa_session.refresh(data.dataset) return self.message_exception(trans, message) else: error = trans.app.security_agent.set_all_dataset_permissions(data.dataset, permissions) trans.sa_session.refresh(data.dataset) if error: return self.message_exception(trans, error) else: message = 'Your changes completed successfully.' else: return self.message_exception(trans, 'You are not authorized to change this dataset\'s permissions.') else: return self.message_exception(trans, 'Invalid operation identifier (%s).' % operation) return {'status': status, 'message': sanitize_text(message)} @web.expose @web.json @web.require_login("see all available datasets") def list(self, trans, **kwargs): """List all available datasets""" status = message = None if 'operation' in kwargs: operation = kwargs['operation'].lower() hda_ids = util.listify(kwargs.get('id', [])) # Display no message by default status, message = None, None # Load the hdas and ensure they all belong to the current user hdas = [] for encoded_hda_id in hda_ids: hda_id = self.decode_id(encoded_hda_id) hda = trans.sa_session.query(model.HistoryDatasetAssociation).filter_by(id=hda_id).first() if hda: # Ensure history is owned by current user if hda.history.user_id is not None and trans.user: assert trans.user.id == hda.history.user_id, "HistoryDatasetAssocation does not belong to current user" hdas.append(hda) else: log.warning("Invalid history_dataset_association id '%r' passed to list", hda_id) if hdas: if operation == "switch" or operation == "switch_history": # Switch to a history that the HDA resides in. # Convert hda to histories. histories = [] for hda in hdas: histories.append(hda.history) # Use history controller to switch the history. TODO: is this reasonable? status, message = trans.webapp.controllers['history']._list_switch(trans, histories) # Current history changed, refresh history frame; if switching to a dataset, set hda seek. kwargs['refresh_frames'] = ['history'] if operation == "switch": hda_ids = [trans.security.encode_id(hda.id) for hda in hdas] # TODO: Highlighting does not work, has to be revisited trans.template_context['seek_hda_ids'] = hda_ids elif operation == "copy to current history": # # Copy datasets to the current history. # target_histories = [trans.get_history()] # Reverse HDAs so that they appear in the history in the order they are provided. hda_ids.reverse() status, message = self._copy_datasets(trans, hda_ids, target_histories) # Current history changed, refresh history frame. kwargs['refresh_frames'] = ['history'] # Render the list view return self.stored_list_grid(trans, status=status, message=message, **kwargs) @web.expose def imp(self, trans, dataset_id=None, **kwd): """ Import another user's
import argparse import glob import os import random import logging import numpy as np import math from tqdm import tqdm import time import torch from transformers import AutoTokenizer, AutoModelForMaskedLM from transformers import DataCollatorForLanguageModeling from transformers.optimization import AdamW, get_linear_schedule_with_warmup from torch.utils.data import Dataset, DataLoader import pytorch_lightning as ptl from pytorch_lightning.logging.test_tube import TestTubeLogger from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateLogger logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # DONE: reproduce RoBERTa numbers on the Longformer corpus # DONE: testing ddp single machine # DONE: testing ddp multiple machines # DONE: testing resume from checkpoint # TODO: try on a TPU-pod # TODO: run on beaker on ai2-server1/2 try: import torch_xla.core.xla_model as xm except ImportError: XLA_AVAILABLE = False else: XLA_AVAILABLE = True class MMapTextDataset(Dataset): def __init__(self, mmap_filename, chunk_size, bos_token_id, eos_token_id): # `chunk_size - 2` to reserve space for <s> and </s> self.num_instances = np.memmap(mmap_filename, mode='r', dtype=np.uint16).shape[0] // (chunk_size - 2) # defer loading the token_ids memmap until after the first __getitem__ call. # when spawning new processes for ddp, there is a hard limit in python < 3.8 that # pickle files need to be < 4GB. By waiting until after the first __getitem__ we # don't have to pickle the memmap self.token_ids = None self._mmap_filename = mmap_filename self._chunk_size = chunk_size self._bos_token_id = bos_token_id self._eos_token_id = eos_token_id def __len__(self): return self.num_instances def __getitem__(self, i): if self.token_ids is None: self.token_ids = np.memmap(self._mmap_filename, mode='r', dtype=np.uint16) from_index = i * (self._chunk_size - 2) to_index = (i + 1) * (self._chunk_size - 2) data = np.concatenate(([self._bos_token_id], self.token_ids[from_index:to_index], [self._eos_token_id])) return torch.tensor(data, dtype=torch.long) # ========================= preprocessing code ========================= # @staticmethod def _process_file(full_fname): "Step 1: tokenize an input text file then save token ids into `np.memmap` shards of size `args.shard_size`" fname = full_fname.split('/')[-1] log_filename = f'{args.input_dir}/logs-{args.shard_size}/{fname}.log' if os.path.isfile(log_filename): logging.info(f'Skipping {full_fname} ...') return # log file already exists. Skip current file. logging.info(f'Processing {full_fname} ...') with open(full_fname, 'r') as fin: token_list = [] shard_count = 0 tokens_count = 0 def _write_shard(): if len(token_list) == 0: return if token_list[-1] != MMapTextDataset.tokenizer.sep_token_id: # handle a rare case token_list.append(MMapTextDataset.tokenizer.sep_token_id) shared_filename = f'{args.input_dir}/shards-{args.shard_size}/{fname}-{shard_count}.bin' logging.info(f'Writing {len(token_list)} tokens to shared {shared_filename}') fp = np.memmap(shared_filename, dtype=np.uint16, mode='w+', shape=len(token_list)) fp[:] = token_list[:] del fp # flush and close file for line in tqdm(fin): line = line.strip() if line == '': # drop empty lines continue tokens = MMapTextDataset.tokenizer.encode(line, add_special_tokens=False) # `__getitem__` adds special tokens token_list.extend(tokens) if len(token_list) > args.shard_size: _write_shard() tokens_count += len(token_list) token_list = [] shard_count += 1 else: token_list.append(MMapTextDataset.tokenizer.sep_token_id) _write_shard() tokens_count += len(token_list) with open(log_filename, 'w') as f: f.write(f'Generated {tokens_count} tokens in {shard_count + 1} shards') @staticmethod def _combine_shards(output_fname, shards_list): "Step 2: combining memmap shards into one `train.bin` or `val.bin` file" total_size = 0 for filename in shards_list: total_size += np.memmap(filename, mode='r', dtype=np.uint16).shape[0] logging.info(f'Writing {total_size} tokens to {output_fname}') all_token_ids = np.empty(total_size, dtype=np.uint16) last_token_index = 0 for filename in tqdm(shards_list): shared = np.memmap(filename, mode='r', dtype=np.uint16) all_token_ids[last_token_index:last_token_index+len(shared)] = shared[:] last_token_index += len(shared) fp = np.memmap(output_fname, dtype=np.uint16, mode='w+', shape=total_size) fp[:] = all_token_ids[:] del fp @staticmethod def raw_text_to_mmap(args): """This is the main preprocessing function. It processes all the text files in `args.input_dir` and outputs two np.memmap files, one for training and one for validation with ratio `args.train_dev_split`. Processing each input file involves tokenizing it, sharding it into shards of size `args.shard_size`, then writing each shard as an np.memmap file. The stream of tokens in the memmap file represents documents separated with `tokenizer.sep_token`. In `__getitem__`, the `tokenizer.bos_token` and `tokenizer.eos_token` are added. The reason for not adding them at preprocessing time is to allow different sequence lengths later on. Notice that this is the "FULL-SENTENCES" setting in the RoBERTa paper, Table2. """ MMapTextDataset.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True) assert len(MMapTextDataset.tokenizer) < 65535 # will use uint16 to store token ids all_files = glob.glob(f'{args.input_dir}/*.txt') if os.path.exists(f'{args.input_dir}/cache/train.bin') and os.path.exists(f'{args.input_dir}/cache/val.bin'): logger.info("Cache already exists. Remove the cache directory to regenerate") return try: os.mkdir(f'{args.input_dir}/cache/') except FileExistsError: pass try: os.mkdir(f'{args.input_dir}/shards-{args.shard_size}/') except FileExistsError: pass try: os.mkdir(f'{args.input_dir}/logs-{args.shard_size}/') # log progrss to be able to resume except FileExistsError: pass # STEP1: tokenizing and saving to shards if args.num_preprocessing_workers > 1: from multiprocessing.pool import Pool with Pool(args.num_preprocessing_workers) as p: list(tqdm(p.imap(MMapTextDataset._process_file, all_files), total=len(all_files))) else: [MMapTextDataset._process_file(f) for f in tqdm(all_files)] # STEP2: shuffling shards and combining them into train.bin and val.bin files all_shards = glob.glob(f'{args.input_dir}/shards-{args.shard_size}/*.bin') random.shuffle(all_shards) # shuffling based on shards not individual lines val_shards_count = int(args.train_dev_split * len(all_shards)) val_shards = all_shards[:val_shards_count] train_shards = all_shards[val_shards_count:] # TODO: if MMapTextDataset._combining_shards is very slow for large files, it can be skipped but we nned to # update the dataset to read from multiple shards directly MMapTextDataset._combine_shards(f'{args.input_dir}/cache/val.bin', val_shards) MMapTextDataset._combine_shards(f'{args.input_dir}/cache/train.bin', train_shards) del MMapTextDataset.tokenizer # ========================= end preprocessing code ========================= # class Pretrainer(ptl.LightningModule): def __init__(self, hparams): super().__init__() self.args = hparams self.hparams = self.args self.model = AutoModelForMaskedLM.from_pretrained(args.model) self.config = self.model.config tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) self.pad_token_id = tokenizer.pad_token_id self.eos_token_id = tokenizer.eos_token_id self.bos_token_id = tokenizer.bos_token_id logger.info(f'Creating dataset cache from dir {self.args.input_dir}. This could be slow the first time.') MMapTextDataset.raw_text_to_mmap(args) # TODO: add support for other objective functions (whole word masking, BART objectives) self.data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=True, mlm_probability=self.args.mlm_prob ) self.start_time = 0 def to(self, *args, **kwargs): param_count_before_to = len(list(self.parameters())) super().to(*args, **kwargs) if self.trainer.use_tpu: # need to re-tie the weights after moving to XLA! self.model.tie_weights() if 'roberta' in self.args.model: self.model.lm_head.bias = self.model.lm_head.decoder.bias param_count_after_to = len(list(self.parameters())) assert param_count_before_to == param_count_after_to def forward(self, input_ids=None, labels=None): # get the padding mask - 1 for NOT masked, 0 for MASKED/PAD attention_mask = (input_ids != self.pad_token_id).int() # output is loss, prediction_scores, hidden_states output = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels) return output[0] # loss def training_step(self, batch, batch_nb): loss = self(**batch) input_ids = batch['input_ids'] tensorboard_logs = { 'input_size': input_ids.numel(), 'mlm_loss': loss, 'mlm_bpc': loss/math.log(2), 'mlm_perplexity': torch.exp(loss), 'token_per_step': input_ids.numel() * self.args.grad_accum * self.trainer.world_size, } if self.start_time != 0: elapsed_time = time.time() - self.start_time tensorboard_logs['second_per_batch'] = elapsed_time self.start_time = time.time() if self.on_gpu: tensorboard_logs['memory'] = torch.cuda.memory_allocated(loss.device) / 1024 ** 3 return {'loss': loss, 'log': tensorboard_logs} def validation_step(self, batch, batch_nb): # TODO: log how long evaluation takes self.start_time = 0 # reset training_step timer loss = self(**batch) tensorboard_logs = { 'val_mlm_loss': loss.detach(), } return {'val_loss': tensorboard_logs["val_mlm_loss"], 'log': tensorboard_logs} def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['log']['val_mlm_loss'] for x in outputs if 'val_mlm_loss' in x['log']]).mean() if self.use_ddp: # TODO: PTL is already doing this. Is it still needed here? # https://github.com/PyTorchLightning/pytorch-lightning/blob/0.8.5/pytorch_lightning/metrics/converters.py#L251 torch.distributed.all_reduce(avg_loss, op=torch.distributed.ReduceOp.SUM) avg_loss /= torch.distributed.get_world_size() elif self.use_tpu: avg_loss = xm.all_reduce(xm.REDUCE_SUM, avg_loss) / xm.xrt_world_size() logs = {'val_mlm_loss': avg_loss} return {'log': logs, 'progress_bar': logs, "val_loss": avg_loss} def configure_optimizers(self): no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=self.args.train_steps ) return [optimizer], [{"scheduler": scheduler, "interval": "step"}] def _get_loader(self, fname, is_train): dataset = MMapTextDataset(fname, chunk_size=self.args.seqlen, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id) # TODO: consider `replace_sampler_ddp=True` and removing the following if statement if self.trainer.use_ddp: sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=is_train) shuffle = False elif self.trainer.use_tpu: sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=is_train, ) shuffle = False else: sampler = None shuffle = is_train loader = DataLoader( dataset, batch_size=self.args.batch_size, shuffle=shuffle, sampler=sampler, num_workers=self.args.num_workers, collate_fn=self.data_collator, drop_last=is_train, ) return loader def train_dataloader(self): return self._get_loader(f'{self.args.input_dir}/cache/train.bin', True) def val_dataloader(self): return self._get_loader(f'{self.args.input_dir}/cache/val.bin', False) def grad_norm(self, norm_type): # Override PTL `grad_norm` function to only return `total_grad_norm` instead norms of individual params # TODO: grad_norm reporting needs to take fp16 loss scale into account parameters = [p for p in self.parameters() if p.grad is not None] device = parameters[0].device total_norm = torch.zeros([], device=device if parameters else None) norm_type = float(norm_type) for p in parameters: param_norm = p.grad.data.pow(norm_type).sum() total_norm.add_(param_norm) total_norm = (total_norm ** (1.0 / norm_type)) return {'total_grad_norm': total_norm} @staticmethod def add_args(parser): parser.add_argument("--seed", type=int, default=3) # Dataset. Some of these params are only useful when generating the dataset cache parser.add_argument("--input_dir", type=str, default='/net/nfs.corp/s2-research/beltagy/longformer/data/') # Used only at the preprocessing phase parser.add_argument("--train_dev_split", type=float, default=0.05) parser.add_argument("--shard_size", type=int, default=1024 ** 3 // 4) # 250MB parser.add_argument("--num_preprocessing_workers", type=int, default=1) # Used only at the training phase parser.add_argument("--seqlen", type=int, default=512) parser.add_argument("--mlm_prob", type=float, default=0.15) # HF model loading parser.add_argument("--tokenizer", type=str, default='roberta-base') parser.add_argument("--model", type=str,
"Cannot set version on non-existent client %s" % client self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) self.versions[client] = version self.clients_xml.write() def resolve_client(self, addresspair, cleanup_cache=False): """Lookup address locally or in DNS to get a hostname.""" if addresspair in self.session_cache: # client _was_ cached, so there can be some expired # entries. we need to clean them up to avoid potentially # infinite memory swell cache_ttl = 90 if cleanup_cache: # remove entries for this client's IP address with # _any_ port numbers - perhaps a priority queue could # be faster? curtime = time.time() for addrpair in list(self.session_cache.keys()): if addresspair[0] == addrpair[0]: (stamp, _) = self.session_cache[addrpair] if curtime - stamp > cache_ttl: del self.session_cache[addrpair] # return the cached data try: (stamp, uuid) = self.session_cache[addresspair] if time.time() - stamp < cache_ttl: return self.session_cache[addresspair][1] except KeyError: # we cleaned all cached data for this client in cleanup_cache pass address = addresspair[0] if address in self.addresses: if len(self.addresses[address]) != 1: err = "Address %s has multiple reverse assignments; a uuid must be used" % address self.logger.error(err) raise Bcfg2.Server.Plugin.MetadataConsistencyError(err) return self.addresses[address][0] try: cname = socket.gethostbyaddr(address)[0].lower() if cname in self.aliases: return self.aliases[cname] return cname except socket.herror: warning = "address resolution error for %s" % address self.logger.warning(warning) raise Bcfg2.Server.Plugin.MetadataConsistencyError(warning) def _merge_groups(self, client, groups, categories=None): """ set group membership based on the contents of groups.xml and initial group membership of this client. Returns a tuple of (allgroups, categories)""" numgroups = -1 # force one initial pass if categories is None: categories = dict() while numgroups != len(groups): numgroups = len(groups) for predicate, group in self.group_membership.items(): if group.name in groups: continue if predicate(client, groups, categories): groups.add(group.name) if group.category: categories[group.category] = group.name for predicate, group in self.negated_groups.items(): if group.name not in groups: continue if predicate(client, groups, categories): groups.remove(group.name) if group.category: del categories[group.category] return (groups, categories) def get_initial_metadata(self, client): """Return the metadata for a given client.""" if False in list(self.states.values()): raise Bcfg2.Server.Plugin.MetadataRuntimeError("Metadata has not been read yet") client = client.lower() if client in self.aliases: client = self.aliases[client] groups = set() categories = dict() profile = None if client not in self.clients: pgroup = None if client in self.clientgroups: pgroup = self.clientgroups[client][0] elif self.default: pgroup = self.default if pgroup: self.set_profile(client, pgroup, (None, None), force=True) groups.add(pgroup) category = self.groups[pgroup].category if category: categories[category] = pgroup if (pgroup in self.groups and self.groups[pgroup].is_profile): profile = pgroup else: msg = "Cannot add new client %s; no default group set" % client self.logger.error(msg) raise Bcfg2.Server.Plugin.MetadataConsistencyError(msg) if client in self.clientgroups: for cgroup in self.clientgroups[client]: if cgroup in groups: continue if cgroup not in self.groups: self.groups[cgroup] = MetadataGroup(cgroup) category = self.groups[cgroup].category if category and category in categories: self.logger.warning("%s: Group %s suppressed by " "category %s; %s already a member " "of %s" % (self.name, cgroup, category, client, categories[category])) continue if category: categories[category] = cgroup groups.add(cgroup) # favor client groups for setting profile if not profile and self.groups[cgroup].is_profile: profile = cgroup groups, categories = self._merge_groups(client, groups, categories=categories) bundles = set() for group in groups: try: bundles.update(self.groups[group].bundles) except KeyError: self.logger.warning("%s: %s is a member of undefined group %s" % (self.name, client, group)) aliases = self.raliases.get(client, set()) addresses = self.raddresses.get(client, set()) version = self.versions.get(client, None) if client in self.passwords: password = self.passwords[client] else: password = None uuids = [item for item, value in list(self.uuid.items()) if value == client] if uuids: uuid = uuids[0] else: uuid = None if not profile: # one last ditch attempt at setting the profile profiles = [g for g in groups if g in self.groups and self.groups[g].is_profile] if len(profiles) >= 1: profile = profiles[0] return ClientMetadata(client, profile, groups, bundles, aliases, addresses, categories, uuid, password, version, self.query) def get_all_group_names(self): all_groups = set() all_groups.update(self.groups.keys()) all_groups.update([g.name for g in self.group_membership.values()]) all_groups.update([g.name for g in self.negated_groups.values()]) for grp in self.clientgroups.values(): all_groups.update(grp) return all_groups def get_all_groups_in_category(self, category): return set([g.name for g in self.groups.values() if g.category == category]) def get_client_names_by_profiles(self, profiles): rv = [] for client in list(self.clients): mdata = self.get_initial_metadata(client) if mdata.profile in profiles: rv.append(client) return rv def get_client_names_by_groups(self, groups): mdata = [self.core.build_metadata(client) for client in list(self.clients)] return [md.hostname for md in mdata if md.groups.issuperset(groups)] def get_client_names_by_bundles(self, bundles): mdata = [self.core.build_metadata(client) for client in list(self.clients.keys())] return [md.hostname for md in mdata if md.bundles.issuperset(bundles)] def merge_additional_groups(self, imd, groups): for group in groups: if group in imd.groups: continue if group in self.groups and self.groups[group].category: category = self.groups[group].category if self.groups[group].category in imd.categories: self.logger.warning("%s: Group %s suppressed by category " "%s; %s already a member of %s" % (self.name, group, category, imd.hostname, imd.categories[category])) continue imd.categories[group] = category imd.groups.add(group) self._merge_groups(imd.hostname, imd.groups, categories=imd.categories) for group in imd.groups: if group in self.groups: imd.bundles.update(self.groups[group].bundles) def merge_additional_data(self, imd, source, data): if not hasattr(imd, source): setattr(imd, source, data) imd.connectors.append(source) def validate_client_address(self, client, addresspair): """Check address against client.""" address = addresspair[0] if client in self.floating: self.debug_log("Client %s is floating" % client) return True if address in self.addresses: if client in self.addresses[address]: self.debug_log("Client %s matches address %s" % (client, address)) return True else: self.logger.error("Got request for non-float client %s from %s" % (client, address)) return False resolved = self.resolve_client(addresspair) if resolved.lower() == client.lower(): return True else: self.logger.error("Got request for %s from incorrect address %s" % (client, address)) self.logger.error("Resolved to %s" % resolved) return False def AuthenticateConnection(self, cert, user, password, address): """This function checks auth creds.""" if cert: id_method = 'cert' certinfo = dict([x[0] for x in cert['subject']]) # look at cert.cN client = certinfo['commonName'] self.debug_log("Got cN %s; using as client name" % client) auth_type = self.auth.get(client, 'cert+password') elif user == 'root': id_method = 'address' try: client = self.resolve_client(address) except Bcfg2.Server.Plugin.MetadataConsistencyError: err = sys.exc_info()[1] self.logger.error("Client %s failed to resolve: %s" % (address[0], err)) return False else: id_method = 'uuid' # user maps to client if user not in self.uuid: client = user self.uuid[user] = user else: client = self.uuid[user] # we have the client name self.debug_log("Authenticating client %s" % client) # next we validate the address if id_method == 'uuid': addr_is_valid = True else: addr_is_valid = self.validate_client_address(client, address) if not addr_is_valid: return False if id_method == 'cert' and auth_type != 'cert+password': # remember the cert-derived client name for this connection if client in self.floating: self.session_cache[address] = (time.time(), client) # we are done if cert+password not required return True if client not in self.passwords: if client in self.secure: self.logger.error("Client %s in secure mode but has no password" % address[0]) return False if password != self.password: self.logger.error("Client %s used incorrect global password" % address[0]) return False if client not in self.secure: if client in self.passwords: plist = [self.password, self.passwords[client]] else: plist = [self.password] if password not in plist: self.logger.error("Client %s failed to use either allowed " "password" % address[0]) return False else: # client in secure mode and has a client password if password != self.passwords[client]: self.logger.error("Client %s failed to use client password in " "secure mode" % address[0]) return False # populate the session cache if user != 'root': self.session_cache[address] = (time.time(), client) return True def process_statistics(self, meta, _): """Hook into statistics interface to toggle clients in bootstrap mode.""" client = meta.hostname if client in self.auth and self.auth[client] == 'bootstrap': self.update_client(client, dict(auth='cert')) def viz(self, hosts, bundles, key, only_client, colors): """Admin mode viz support.""" if only_client: clientmeta = self.core.build_metadata(only_client) def include_client(client): return not only_client or client != only_client def include_bundle(bundle): return not only_client or bundle in clientmeta.bundles def include_group(group): return not only_client or group in clientmeta.groups groups_tree = lxml.etree.parse(os.path.join(self.data, "groups.xml"), parser=Bcfg2.Server.XMLParser) try: groups_tree.xinclude() except lxml.etree.XIncludeError: self.logger.error("Failed to process XInclude for file %s: %s" % (dest, sys.exc_info()[1])) groups = groups_tree.getroot() categories = {'default': 'grey83'} viz_str = [] egroups = groups.findall("Group") + groups.findall('.//Groups/Group') for group in egroups: if not group.get('category') in categories: categories[group.get('category')] = colors.pop() group.set('color', categories[group.get('category')]) if None in categories: del categories[None] if hosts: instances = {} for client in list(self.clients): if include_client(client): continue if client in self.clientgroups: groups = self.clientgroups[client] elif self.default: groups = [self.default] else: continue for group in groups: try: instances[group].append(client) except KeyError: instances[group] = [client] for group, clist
operation: Can be "and", "or". :return: """ code = "; NOT EXPR\n" target_expr = node.getExpr() target_type = self.getLLVMType(target_expr.getExpressionType()) code_target, reg_target = self.astNodeToLLVM(target_expr) # extra load not necessary when dealing with Identifiers if not isinstance(target_expr, IdentifierExpr): load, reg_target = self.loadVariable(reg_target, target_type, False) code_target += load code += code_target # convert back to C-type? strongest_type = self.getStrongestType(target_type, target_type) llvm_type = "" if strongest_type == "float": code_target, reg_target = self.convertToFloat(reg_target, target_type) code += code_target code += "%{} = fcmp oeq float %{}, 0.0\n".format(self.cur_reg, reg_target) llvm_type = "i1" elif strongest_type == "int": code_target, reg_target = self.convertToInt(reg_target, target_type) code += code_target code += "%{} = icmp eq i32 %{}, 0\n".format(self.cur_reg, reg_target) llvm_type = "i1" elif strongest_type == "char": code_target, reg_target = self.convertToChar(reg_target, target_type) code += code_target code += "%{} = icmp eq i8 %{}, 0\n".format(self.cur_reg, reg_target) llvm_type = "i1" elif strongest_type == "bool": code_target, reg_target = self.convertToBool(reg_target, target_type) code += code_target code += "%{} = icmp eq i1 %{}, 0\n".format(self.cur_reg, reg_target) llvm_type = "i1" else: raise Exception("Invalid return type from getStrongestType '{}'".format(strongest_type)) self.cur_reg += 1 code += self.allocate(self.cur_reg, llvm_type, False) code += self.storeVariable(self.cur_reg, self.cur_reg - 1, llvm_type, False) self.cur_reg += 1 return code, self.cur_reg - 1 def getStrongestType(self, a, b): INTREP = ["int", "i32"] CHARREP = ["char", "i8"] BOOLREP = ["bool", "i1"] if a == "float" or b == "float": return "float" elif a in INTREP or b in INTREP: return "int" elif a in CHARREP or b in CHARREP: return "char" elif a in BOOLREP or b in BOOLREP: return "bool" else: raise Exception("Invalid call to getStrongestType for '{}' and '{}'".format(a, b)) def convertToFloat(self, reg, type): code = "" if "float" in type: return code, reg elif "char" in type: code += "%{} = sext i8 %{} to i32\n".format(self.cur_reg, reg) code += "%{} = sitofp i32 %{} to float\n".format(self.cur_reg + 1, self.cur_reg) self.cur_reg += 2 return code, self.cur_reg - 1 elif "int" in type or "i32" in type: code += "%{} = sitofp i32 %{} to float\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 elif "bool" in type or "i1" in type: code += "%{} = sext i1 %{} to i32\n".format(self.cur_reg, reg) code += "%{} = sitofp i32 %{} to float\n".format(self.cur_reg + 1, self.cur_reg) self.cur_reg += 2 return code, self.cur_reg - 1 else: raise Exception("Converting from '{}' to float for register '{}' is not defined.".format(type, reg)) def convertToInt(self, reg, type): code = "" if "int" in type or "i32" in type: return code, reg elif "i1" in type: code += "%{} = zext i1 %{} to i32\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 elif "i8" in type: code += "%{} = sext i8 %{} to i32\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 elif "float" in type: code += "%{} = fptosi float %{} to i32\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 else: raise Exception("Converting from '{}' to int for register '{}' is not defined.".format(type, reg)) def convertToDouble(self, reg, from_type): code = "" if "float" in from_type: code += "%{} = fpext float %{} to double\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 else: raise Exception("Converting from '{}' to float for register '{}' is not defined.".format(type, reg)) def convertToChar(self, reg, from_type): """ Returns LLVM IR code for converting the specified register of the specfied type to the "char"/"i8" type. """ code = "" if "char" in from_type or "i8" in from_type: return code, reg elif "i1" in from_type: code += "%{} = zext i1 %{} to i8\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 elif "i32" in from_type: code += "%{} = trunc i32 %{} to i8".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 elif "float" in from_type: code += "%{} = fptosi float %{} to i8\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 else: raise Exception("Converting from '{}' to char for register '{}' is not defined.".format(from_type, reg)) def convertToBool(self, reg, from_type): """ Returns LLVM IR code for converting the specified register of the specified type to the "bool"/"i1" type. """ code = "" if "bool" in from_type or "i1" in from_type: return code, reg elif "int" in from_type or "i32" in from_type: code += "%{} = icmp ne i32 %{}, 0\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 elif "char" in from_type or "i8" in from_type: code += "%{} = icmp ne i8 %{}, 0\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 elif "float" in from_type: code += "%{} = fcmp one float %{}, 0.0\n".format(self.cur_reg, reg) self.cur_reg += 1 return code, self.cur_reg - 1 else: raise Exception("Converting from '{}' to bool for register '{}' is not defined.".format(from_type, reg)) def convertToType(self, reg, old_type, new_type): if "int" in new_type or "i32" in new_type: return self.convertToInt(reg, old_type) elif "char" in new_type or "i8" in new_type: return self.convertToChar(reg, old_type) elif "bool" in new_type or "i1" in new_type: return self.convertToBool(reg, old_type) elif "float" in new_type: return self.convertToFloat(reg, old_type) elif "double" in new_type: return self.convertToDouble(reg, old_type) else: raise Exception( "Conversion from type '{}' to type '{}' in register '{}' is not possible.".format(old_type, new_type, reg)) def convertConstant(self, new_type, old_type, value): if old_type == "i8": value = value[1:-1] value = ord(value) elif old_type == "i32" and new_type != old_type: value = int(value) elif old_type == "float" and new_type != old_type: value = float(value) if old_type == "i1" and new_type != old_type: # value = False if value == "false" else value # value = True if value == "true" else value value = 0 if (value == "false" or value is False) else value value = 1 if (value == "true" or value is True) else value if new_type == old_type: # convert True to 1, False to 0, etc. value = 0 if (value == "false" or value is False) else value value = 1 if (value == "true" or value is True) else value return value elif new_type == "i1": return bool(value) # character elif old_type == 'i8' and new_type == "float": return self.floatToHex(float(ord(value))) elif old_type == "i8" and new_type == "i32": return ord(value) # integer elif old_type == "i32" and new_type == "float": return self.floatToHex(float(int(value))) elif old_type == "i32" and new_type == "i8": return int(value) # bool elif old_type == "i1" and new_type == "i8": return chr(int(value)) elif old_type == "i1" and new_type == "i32": return int(bool(value)) elif old_type == "i1" and new_type == "float": return float(bool(value)) elif old_type == "float" and new_type == "i32": return int(round(value)) elif old_type == "float": return self.convertConstant(new_type, "i32", int(round(value))) else: return value def returnStatement(self): self.cur_reg += 1 return "ret void\n", self.cur_reg - 1 def returnWithExprStatement(self, node): expr_type = self.getLLVMType(node.getExpression().getExpressionType()) function_return_type = self.getLLVMType(node.getFunctionType()) if isinstance(node.getExpression(), ConstantExpr) and not isinstance(node.getExpression(), StringConstantExpr): value = self.convertConstant(function_return_type, expr_type, node.getExpression().getValue()) value = self.floatToHex(value) if function_return_type == "float" else value code = "ret {} {}\n".format(function_return_type, value) self.cur_reg += 1 return code, self.cur_reg - 1 code, register = self.astNodeToLLVM(node.getExpression()) # extra load needed when not an identifier if not isinstance(node.getExpression(), IdentifierExpr): new_code, register = self.loadVariable(register, expr_type, False) code += new_code # type conversion if function_return_type != expr_type: convert, register = self.convertToType(register, expr_type, function_return_type) code += convert code += "ret {} %{}\n".format(function_return_type, register) self.cur_reg += 1 return code, self.cur_reg - 1 def comparisonExpr(self, node, int_op, float_op): code = "" type_left = self.getLLVMType(node.getLeft().getExpressionType()) type_right = self.getLLVMType(node.getRight().getExpressionType()) code_left, reg_left = self.astNodeToLLVM(node.getLeft()) if not isinstance(node.getLeft(), IdentifierExpr): load, reg_left = self.loadVariable(reg_left, type_left, False) code_left += load code_right, reg_right = self.astNodeToLLVM(node.getRight()) if not isinstance(node.getRight(), IdentifierExpr): load, reg_right = self.loadVariable(reg_right, type_right, False) code_right += load code += code_left code += code_right strongest_type = self.getStrongestType(type_left, type_right) if strongest_type == "float": code_left, reg_left = self.convertToFloat(reg_left, type_left) code_right, reg_right = self.convertToFloat(reg_right, type_right) code += code_left code += code_right code += "%{} = fcmp {} float %{}, %{}\n".format(self.cur_reg, float_op, reg_left, reg_right) else: code_left, reg_left = self.convertToInt(reg_left, type_left) code_right, reg_right
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # __ # \ /\ / _ / / _ _ # \/ \/ (- / /) () /__ (/ /- /- / (- /- # # author: <NAME> # contact: <EMAIL> import os import re import time import requests from lxml import etree from PIL import Image, ImageDraw, ImageFont # Layout # Material style with colored date bar LAYOUT_ONE = {'date_size': 145, 'post_size': 52, 'date_site': (20, -25), 'text_site': (40, 335), 'post_spacing': 26, 'date_spacing': -5, 'line_sum': 8, 'line_interval': 87, 'LAYOUT_ONE': None} # Material style with colored background LAYOUT_TWO = {'date_size': 145, 'post_size': 52, 'date_site': (20, -25), 'text_site': (40, 335), 'post_spacing': 26, 'date_spacing': -5, 'line_sum': 8, 'line_interval': 87, 'LAYOUT_TWO': None} # Regular style LAYOUT_THREE = {'date_size': 50, 'post_size': 50, 'date_site': (65, 60), 'text_site': (65, 190), 'post_spacing': 26, 'line_sum': 10, 'line_interval': 85} # (65, 200) 28 4/25 # Color scheme # Recommend LAYOUT_ONE, LAYOUT_TWO, LAYOUT_THREE LIGHT_GREY = {'bg': '#9E9E9E', 'text': '#FFFFFF'} RED = {'bg': '#EF9A9A', 'text': '#FFFFFF'} PINK = {'bg': '#F48FB1', 'text': '#FFFFFF'} PURPLE = {'bg': '#CE93D8', 'text': '#FFFFFF'} BLUE = {'bg': '#90CAF9', 'text': '#FFFFFF'} CYAN = {'bg': '#80DEEA', 'text': '#FFFFFF'} TEAL = {'bg': '#80CBC4', 'text': '#FFFFFF'} GREEN = {'bg': '#A5D6A7', 'text': '#FFFFFF'} ORANGE = {'bg': '#FFAB91', 'text': '#FFFFFF'} BROWN = {'bg': '#BCAAA4', 'text': '#FFFFFF'} # Recommend LAYOUT_TWO, LAYOUT_THREE YELLOW = {'bg': '#FFFAE9', 'text': '#584A3C'} WHITE = {'bg': '#FFFFFF', 'text': '#212121'} DARK_GREY = {'bg': '#424242', 'text': '#FFFFFF'} BLACK = {'bg': '#000000', 'text': '#FFFFFF'} CH_PUN = ['”', '’', ',', '。', '、', ':', ';', '!', '?', ')', '】', '}', '》', '」'] EN_PUN = ['"', "'", ',', '.', ':', ';', '!', '?', ')', ']', '}', '>'] LAYOUT = LAYOUT_ONE COLOR_SCHEME = LIGHT_GREY FONT = 'NotoSansCJKsc-DemiLight.otf' # NotoSansCJKsc-DemiLight.otf NotoSansCJKsc-Light.otf NotoSansMonoCJKsc-Regular.otf HEADER = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36', 'cookie': ''} URL = [''] EASY_READ = True page_counter = 0 session = requests.session() def print_log(*args, name='log_' + time.strftime('%y_%m'), cout=True, log=True): """print and save log to a file Args: *args:what to print name:name of the log file cout:print to screen or not log:log or not """ if cout: print(''.join(args)) if log: with open(os.path.abspath('.') + '\\%s.txt' % name, 'a', errors='ignore') as f: f.write('\n' + ''.join(args)) def timer(sec): """a timer that delay s secs Args: sec:delay s sec """ print_log('\n-----rest for %s sec-----' % sec) time.sleep(sec) def cut_line(string, point, reverse=False): """get the front or behind fragment of the line Args: string: string to cut point: point to stop reverse:return the behind fragment Returns: string """ counter = 0 for x in string: if x == point: break else: counter += 1 if not reverse: return string[:counter] elif reverse: return string[counter + 1:] def url_interpreter(url): """make a url executable Args: url:original url Returns: url """ if 'weibo.com' in url: if url.startswith('http://weibo.com/'): pass elif url.startswith('weibo.com/'): url = 'http://' + url else: raise ValueError('URL错误,请检查输入的URL是否为用户主页地址') source = session.get(url, headers=HEADER).text try: oid = re.findall(r"oid']='(.*?)'", source, re.S)[0] except IndexError: raise ConnectionError('Cookie可能已过期,请更新后再次尝试连接') url = 'http://weibo.cn/u/%s' % oid elif 'weibo.cn' in url: if url.startswith('http://weibo.cn/'): pass elif url.startswith('weibo.cn/'): url = 'http://' + url else: raise ValueError('URL错误,请检查输入的URL是否为用户主页地址') if '?' in url: url = cut_line(url, '?') else: raise ValueError('URL错误,请检查输入的URL是否为用户主页地址') return url def get_info(weibo_url): """get basic information of target user Args: weibo_url: target weibo user's homepage Returns: url_target, weibo_name, page_sum """ url_target = weibo_url + '?filter=1&page=1' html = session.get(url_target, headers=HEADER).content target_info = etree.HTML(html) weibo_name = target_info.xpath('/html/head/title/text()')[0] # get user name if weibo_name[-3:] == '的微博': weibo_name = weibo_name[:-3] elif weibo_name == '新浪通行证': # stop in login page raise ConnectionError('Cookie可能已过期,请更新后再次尝试连接') elif weibo_name == '微博': raise ConnectionError('Cookie不能为空,请输入Cookie') elif weibo_name == '微博广场': raise ConnectionRefusedError('抱歉,你已被关进小黑屋,请等待数小时后再次尝试登陆') elif weibo_name == '我的首页': raise ReferenceError('请输入用户主页地址') else: raise SystemError('请检查目标用户主页地址和cookie') try: page_sum = target_info.xpath('//*[@id="pagelist"]/form/div/input[1]/@value')[0] # get page sum except IndexError: page_sum = 1 print_log('\nTarget: %s' % weibo_name) return url_target, weibo_name, page_sum def make_directory(weibo_name): """make a directory named after weibo_name Args: weibo_name: weibo name of your target user """ global folder_path folder_path = os.path.join(os.path.abspath('weibo'), weibo_name) try: try: os.mkdir(os.path.join(os.path.abspath('.'), 'weibo')) except FileExistsError: pass os.mkdir(folder_path) # try to make a director except FileExistsError: pass def get_date(post_info): """get the date when target post was sent Args: post_info: details about when target post was sent Returns: pic_date, pic_link, pic_series_name """ if '月' in post_info[0:3]: # 今年的微博 'xx月xx日 xx:xx 来自xxxx' post_date = time.strftime('%y-') + post_info.replace('月', '-').replace('日', '')[0:11] # FIXME: 一小时内的微博小概率产生一分钟的误差 elif '前' in post_info[0:5]: # 一小时内的微博 'xx分钟前 来自xxxx' post_info = cut_line(post_info, '分') minute = time.localtime()[4] - int(post_info) # 现在22:05,45分钟前是负数 # type(post_info) = _ElementUnicodeResult if minute < 0: minute += 60 hour = int(time.strftime('%H')) - 1 post_date = time.strftime('%y-%m-%d ') + str(hour) + ':' + str(minute) else: post_date = time.strftime('%y-%m-%d %H:') + str(minute) elif '今天' in post_info[0:2]: # 今天的微博 '今天 xx:xx 来自xxxx' post_date = time.strftime('%y-%m-%d ') + post_info[3:8] else: # 今年之前的微博 'xxxx-xx-xx xx:xx:xx 来自xxxx' post_date = post_info[2:16] post_date_layout = time.strptime(post_date, '%y-%m-%d %H:%M') post_date_layout = time.strftime('%B %d %H:%M', post_date_layout) post_date = post_date.replace('-', '').replace(':', '').replace(' ', '_') return post_date, post_date_layout def traversal_weibo(url_target, weibo_name, page_sum): """traversal whole weibo Args: url_target: page to traversal page_sum: sum of pages weibo_name: weibo name of your target user """ global page_counter for i in range(1, int(page_sum) + 1): # search in all pages range(1, int(page_sum) + 1) url = url_target.replace('page=1', 'page=%s' % i) html = session.get(url, headers=HEADER).content # 获取页面源代码 time.sleep(1) # 等待页面加载 target = etree.HTML(html) page_counter += 1 post_in_page = target.xpath('//*[starts-with(@id, "M_")]/@id') for post_id in post_in_page: post = target.xpath('//*[@id="%s"]/div[1]/span[@class="ctt"]' % post_id)[0].xpath('string(.)') if '全文' in post[-2:]: whole_post_url = 'http://weibo.cn/comment/' + post_id[2:] whole_post = session.get(whole_post_url, headers=HEADER).content whole_post_tmp = etree.HTML(whole_post) post = whole_post_tmp.xpath('//*[@id="M_"]/div[1]/span[@class="ctt"]')[0].xpath('string(.)')[1:] div_sum = target.xpath('//*[@id="%s"]/div' % post_id) if len(div_sum) == 1: # 没图的情况 post_info = target.xpath('//*[@id="%s"]/div[1]/span[@class="ct"]' % post_id)[0].xpath('string(.)') elif len(div_sum) == 2: # 有图的情况 post_info = target.xpath('//*[@id="%s"]/div[2]/span[@class="ct"]' % post_id)[0].xpath('string(.)') # 发布的日期 post_date, post_date_layout = get_date(post_info) try: # 如果使用IDLE的话,不能输出Non-BMP字符 print_log('\n%s (page %s of %s)' % (post_info, i, page_sum)) except UnicodeEncodeError: post_info = cut_line(post_info, '来') print_log(post_info, ' (page %s of %s)' % (i, page_sum)) print_log(post, cout=False) post_name = weibo_name + '_' + post_date make_pic(post, post_date_layout, post_name, weibo_name) if page_counter % 10 == 0: timer(10) def count_width(i, post): if FONT == 'NotoSansCJKsc-DemiLight.otf': width_one_one = ['105', '124'] width_one_two = ['39', '44', '46', '58', '59', '73', '106', '108'] width_one_three = ['33', '102'] width_one_four = ['40', '41', '45', '91', '93', '123', '125'] width_one_five = ['116'] width_one_six = ['47', '92', '114'] width_one_seven = ['34', '42', '63', '115'] width_one_eight = ['120', '122'] width_two = ['99'] width_two_one = ['89', '118', '121'] width_two_two = ['35', '36', '43', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '60', '61', '62', '70', '74', '76', '94', '101', '107', '126'] width_two_three = ['86', '88', '95', '97', '103', ] width_three = ['65', '69', '83', '84', '90', '96', '104', '110', '111', '117'] width_three_one = ['67', '75', '80', '82', '98', '100', '112', '113'] width_three_two = ['38', '66', '68', '71'] width_three_three = ['72', '78', '79', '81', '85'] width_four = ['77', '119'] width_four_one = ['87'] width_four_two = ['37', '64', '109'] if str(ord(post[i])) in width_one_one: width = 0.52 elif str(ord(post[i])) in width_one_two: width = 0.56 elif str(ord(post[i])) in width_one_three: width = 0.65 elif str(ord(post[i])) in width_one_four: width = 0.7 elif str(ord(post[i])) in width_one_five: width = 0.75 elif str(ord(post[i])) in width_one_six: width = 0.8 elif str(ord(post[i])) in width_one_seven: width = 0.92 elif str(ord(post[i])) in width_one_eight: width = 0.95 elif str(ord(post[i])) in width_two: width = 1 elif str(ord(post[i])) in width_two_one: width = 1.05 elif str(ord(post[i])) in width_two_two: width = 1.1 elif str(ord(post[i])) in width_two_three: width = 1.15 elif str(ord(post[i])) in width_three: width = 1.2 elif str(ord(post[i])) in width_three_one: width = 1.3 elif str(ord(post[i])) in width_three_two: width = 1.4 elif str(ord(post[i])) in width_three_three: width = 1.5 elif str(ord(post[i])) in width_four: width = 1.65 elif str(ord(post[i])) in width_four_one: width = 1.8 elif str(ord(post[i])) in width_four_two: width = 1.9 else: width = 2 elif FONT == 'NotoSansCJKsc-Light.otf': width_one_one = ['39', '44', '46', '58', '59', '105', '106', '108', '124'] width_one_two = ['73'] width_one_three = ['33', '102'] width_one_four = ['40', '41', '91', '93', '123', '125'] width_one_five = ['45'] width_one_six = ['114', '116'] width_one_seven = ['47', '92'] width_one_eight = ['34'] width_one_nine = ['42', '63', '115', '120'] width_one_ten = ['118', '122'] width_two = ['89', '99', '121'] width_two_one = ['74', '76', '107'] width_two_two = ['35', '36', '43', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '60', '61', '62', '70', '88', '94', '97', '101', '103', '126'] width_two_three = ['86', '95']
same key, user root: PASSED", tid) # for user www-data output5 = runr(self, 'lamuc www-data xxx', tid) # print(output5) if output5 == '0': log_red("Verify that lamuc process generated a unique password, user www-data: FAILED", tid) else: log_green("Verify that lamuc process generated a unique password, user www-data: PASSED", tid) output6 = runr(self, 'lamuc www-data xxx', tid) # print(output6) if output6 != output5: log_red("Verify that lamuc process generated the same unique password in under 10 seconds, user www-data: FAILED", tid) else: log_green("Verify that lamuc process generated the same unique password in under 10 seconds, user www-data: PASSED", tid) output7 = runr(self, 'lamuc www-data xxy', tid) # print(output7) if output7 == output6: log_red("Verify that lamuc process generated a different unique password for a different key, user www-data: FAILED", tid) else: log_green("Verify that lamuc process generated a different unique password for a different key, user www-data: PASSED", tid) wait(10) output8 = runr(self, 'lamuc www-data xxx', tid) # print(output8) if output8 == output6: log_red("Verify that lamuc process generated a different password after 10 seconds for same key, user www-data: FAILED", tid) else: log_green("Verify that lamuc process generated a different password after 10 seconds for same key, user www-data: PASSED", tid) @unittest.skipIf(ve == 0, "Vehicle Emulator(Virtual Box) not available") def test_REFP_031_IPSEC(self): '''Verify if IPSEC tunnel can be configured successfully''' tid = 'REFP_031' print('[Test Case ID ]: %s' % tid) print('[Test Case Name ]: %s' % inspect.stack()[0].function) print('[Title ]: Verify if IPSEC tunnel can be configured successfully') print('[Product Requirement ]: EINST_011') print('[Development Task ]: CONLAREINS-25') print('[Test Automation Task ]: CONLAREINS-143') log_blue('[================================================================================================================]') # ssh = self.ssh # handle output = runr(self, 'setkey -D', tid) if "No SAD entries." in output: log_green("Verify that IPSEC tunnel is not operational already: PASSED", tid) else: log_red("Verify that IPSEC tunnel is not operational already : FAILED", tid) output = runr(self, 'ping -c 2 192.168.3.11 >/dev/null; echo $?', tid) if output != '0': log_green("Verify that the Virtual Box(Vehicle Simulator) is not accessible without the IPSEC tunnel: PASSED", tid) else: log_red("Verify that the Virtual Box(Vehicle Simulator) is not accessible without the IPSEC tunnel: FAILED", tid) output = runr(self, 'setkey -f /autonet/etc/ipsec-tools.conf; echo $?', tid) if output == '0': log_green("Verify that IPSEC tunnel started successfully: PASSED", tid) else: log_red("Verify that IPSEC tunnel started successfully: FAILED", tid) output = runr(self, 'setkey -D', tid) if "192.168.3.11 192.168.127.12" and "192.168.127.12 192.168.3.11" in output: log_green("Verify that IPSEC tunnel is running: PASSED", tid) else: log_red("Verify that IPSEC tunnel is running: FAILED", tid) output = runr(self, 'ping -c 2 192.168.3.11 >/dev/null; echo $?', tid) if output == '0': log_green("Verify that the IPSEC tunnel is configured and running successfully: PASSED", tid) else: log_red("Verify that the IPSEC tunnel is configured and running successfully: FAILED", tid) output = runr(self, 'setkey -F && setkey -FP >/dev/null; echo $?', tid) if output == '0': log_green("Verify that the IPSEC tunnel is stopped successfully: PASSED", tid) else: log_red("Verify that the IPSEC tunnel is stopped successfully: FAILED", tid) output = runr(self, 'ping -c 2 192.168.3.11 >/dev/null; echo $?', tid) if output != '0': log_green("Verify that the IPSEC tunnel is no more operational: PASSED", tid) else: log_red("Verify that the IPSEC tunnel is no more operational: FAILED", tid) @unittest.skip("systemd-analyze not available") def test_REFP_032_Boot_Time(self): '''Verify if Reference Platform boots under 3 seconds''' tid = 'REFP_032' print('[Test Case ID ]: %s' % tid) print('[Test Case Name ]: %s' % inspect.stack()[0].function) print('[Title ]: Verify if Reference Platform boots under 3 seconds') print('[Product Requirement ]: EINST_034') print('[Development Task ]: CONLAREINS-70') print('[Test Automation Task ]: CONLAREINS-184') log_blue('[================================================================================================================]') # ssh = self.ssh # handle output = runr(self, 'systemd-analyze', tid) total_boot_time = 0 for m in re.finditer('=', output): start = m.start() + 2 end = output.find('s', start) s = output[start:end] total_boot_time = float(s) # print(total_boot_time) if total_boot_time <= 3: log_green("Verify that reference Platforms bootup time (%.2f seconds) is under 3 seconds: PASSED" % total_boot_time, tid) else: log_red("Verify that reference Platforms bootup time (%.2f seconds) is under 3 seconds: FAILED" % total_boot_time, tid) def test_REFP_033_System_Size(self): '''Verify if System Size is less than 128M''' tid = 'REFP_033' print('[Test Case ID ]: %s' % tid) print('[Test Case Name ]: %s' % inspect.stack()[0].function) print('[Title ]: Verify if System Size is less than 128M') print('[Product Requirement ]: EINST_032') print('[Development Task ]: CONLAREINS-71') print('[Test Automation Task ]: CONLAREINS-226') log_blue('[================================================================================================================]') # ssh = self.ssh # handle # fixing CONLAREINS-574 # Collect the storage Available output = runr(self, "df -h --output='source,used,target'|grep root", tid) root_size = float(output[-8:-3]) # print(root_size) print("Memory used by root: %.1f" % root_size) # Collect the storage Available output = runr(self, "df -h --output='source,used,target'|grep user", tid) user_size = float(output[-12:-7]) print("Memory used by user: %.1f" % user_size) # Collect the storage Available output = runr(self, "df -h --output='source,used,target'|grep autonet", tid) autonet_size = float(output[-15:-10]) print("Memory used by autonet: %.1f" % autonet_size) # Collect the storage Available output = runr(self, "df -h --output='source,used,target'|grep proprietary", tid) prop_size = float(output[-19:-14]) print("Memory used by proprietary: %.1f" % prop_size) # output = runr(self, 'systemd-analyze', tid) total_system_size = root_size + user_size + autonet_size + prop_size if total_system_size <= 128: log_green("Verify that total System Size(%.1f) is under 128M: PASSED" % total_system_size, tid) else: log_red("Verify that total System Size(%.1f) is under 128M: FAILED" % total_system_size, tid) @unittest.skipIf(ve == 0, "Vehicle Emulator not available") def test_REFP_034_Vehicle_Emulator(self): '''Verify if Vehicle Emulator is installed and is operational''' tid = 'REFP_034' print('[Test Case ID ]: %s' % tid) print('[Test Case Name ]: %s' % inspect.stack()[0].function) print('[Title ]: Verify if Vehicle Emulator is installed and is operational') print('[Product Requirement ]: *') print('[Development Task ]: CONLAREINS-66') print('[Test Automation Task ]: CONLAREINS-182') log_blue('[================================================================================================================]') ssh = self.ssh # handle global original_stderr output = runr(self, 'ping -c 2 192.168.3.11 >/dev/null; echo $?', tid) if output == '0': log_green("Verify that the vehicle emulator is available: PASSED", tid) else: log_red("Verify that the vehicle emulator is available: FAILED", tid) warnings.simplefilter("ignore", category=PendingDeprecationWarning) warnings.simplefilter("ignore", category=ResourceWarning) sshve = paramiko.SSHClient() # handle sshve.set_missing_host_key_policy(paramiko.AutoAddPolicy()) while True: # writing next two lines to suppress the console error "Error reading SSH protocol banner", which is displayed at the time or board is "rebooting" original_stderr = sys.stderr sys.stderr = NullDevice() try: # print('test2') # wait(2) sshve.connect("192.168.3.11", username="lear-test1", password="<PASSWORD>", look_for_keys=False, allow_agent=False, banner_timeout=None, auth_timeout=None) ssh_time = (time.time() - start_time) break except paramiko.ssh_exception.socket.error as e: # print('\nATTENTION: SSH transport has socket error...\n') continue except paramiko.ssh_exception.AuthenticationException as e: # print('\nATTENTION: SSH transport has Authentication exception...\n') continue except paramiko.ssh_exception.BadHostKeyException as e: # print('\nATTENTION: SSH transport has BadHostKeyException...\n') continue except paramiko.ssh_exception.SSHException as e: # print('\nATTENTION: SSH transport has SSHException...\n') continue except Exception as e: # print('\nATTENTION: SSH transport has undefined exception...\n') continue # setting the standard errors back again original_stderr = sys.stderr self.ssh = sshve # handle # next two lines to check if the SSH control is on the Vehicle Emulator # output = runr(self, 'hostname', tid) # print(output) output = runr(self, 'ping -c 2 192.168.127.12 >/dev/null; echo $?', tid) if output == '0': log_green("Verify that the vehicle emulator is talking to Reference Platform: PASSED", tid) else: log_red("Verify that the vehicle emulator is talking to the Reference Platform: FAILED", tid) output = runr(self, 'ps -al|grep remote_commands >/dev/null; echo $?', tid) if output == '0': log_green('Verify that the "remote_commands" process is running on the Vehicle Emulator: PASSED', tid) else: log_red('Verify that the "remote_commands" process is running on the Vehicle Emulator: FAILED', tid) output = runr(self, 'ps -al|grep gpssim.py >/dev/null; echo $?', tid) if output == '0': log_green('Verify that the "gpssim.py" process is running on the Vehicle Emulator: PASSED', tid) else: log_red('Verify that the "gpssim.py" process is running on the Vehicle Emulator: FAILED', tid) output = runr(self, 'ps -al|grep gpssim.py >/dev/null; echo $?', tid) if output == '0': log_green('Verify that the "ve_control.py" process is running on the Vehicle Emulator: PASSED', tid) else: log_red('Verify that the "ve_control.py" process is running on the Vehicle Emulator: FAILED', tid) output = runr(self, 'echo getconfig|netcat -- 127.0.0.1 5013', tid) # print(output) if "2C4RC1CG6GR121659" in output: log_green('Verify that
<reponame>iVibudh/CER-ESA-Phase2 import pickle import nltk from nltk.corpus import stopwords from nltk.stem.porter import * from nltk.tokenize import word_tokenize Landscape_terrain_and_weather = """Physical and Meteorological Environment Physical Environment Meteorological Environment Precipitation Snowfall Wind rain Mean temperatures Slope Geotechnical Slumping Subsidence Weather Erosion Ice Permafrost Climate trend climate water erosion wind erosion acid-generating rock temperature physical meteorological landslides mudflows slumping subsidence seismicity flooding migrating watercourses eroding banks extreme weather events peak flow regime ice jams acid rock climate variability ground conditions thaw till earthquake avalanche sloping topography elevation terrain landscape weather physiography bedrock geology natural hazard""" Soil = """Soil Productivity Soil Agriculture Topsoil Subsoil Soil horizon Drainage Erosion soil contamination CCME Canadian Council of Ministers of the Environment Soil compaction Soil structure Soil classification Soil handling Containment reclamation thickness of horizon tilth grubbing soil quality salinity sediments rocks minerals sand chernozem DVG Dunvargan calcareous CRW sand sandy Glaciofluvial boulders gravel silt clay stone stoniness""" Plants = """Vegetation Plant planting Rare plant Boreal Grassland Prairie Forest forested Clearing plant community orchid orchard Weeds Invasive species invasive plants Seed mix Herbicide Tree leaf branch Growth Old growth Biodiversity forestry clubroot wood spruce fir birch pine aspen tamarack willow beech maple black walnut hickory oak redcedar hemlock Douglas-fir genus agricultural root seed mulcher mulch bentgrass sedge carex wood moss bulrush oatgrass mannagrass flower androgynum Aulacomnium undulatum Atrichum wheatgrass parviflora luzula crawfordii Achnatherum needlegrass eleocharis reedgrass calamagrostis latifolia Elymus grain wildrye meadow speargrass shrub chokecherry Gattinger\'s Agalinis Agalinis gattingeri Rough Agalinis Agalinis aspera Skinner\'s Agalinis Agalinis skinneriana Scarlet Ammannia Ammannia robusta Short-rayed Alkali Aster Symphyotrichum frondosum Eastern Mountain Avens Geum peckii Deltoid Balsamroot Balsamorhiza deltoidea Tall Beakrush Rhynchospora macrostachya Cherry Birch Betula lenta Bluehearts Buchnera americana Fernald\'s Braya Braya fernaldii Hairy Braya Braya pilosa Long\'s Braya Braya longii Tall Bugbane Actaea elata Bashful Bulrush Trichophorum planifolium Slender Bush-clover Lespedeza virginica California Buttercup Ranunculus californicus Water-plantain Buttercup Ranunculus alismifolius Butternut Juglans cinerea Eastern Prickly Pear Cactus Opuntia humifusa Spalding\'s Campion Silene spaldingii Coastal Scouler\'s Catchfly Silene scouleri grandis Muhlenberg\'s Centaury Centaurium muehlenbergii American Chestnut Castanea dentata Colicroot Aletris farinosa Slender Collomia Collomia tenella American Columbo Frasera caroliniensis Pink Coreopsis Coreopsis rosea Eastern Flowering Dogwood Cornus florida Contorted-pod Evening-primrose Camissonia contorta Southern Maidenhair Fern Adiantum capillus-veneris Eastern Prairie Fringed-orchid Platanthera leucophaea Western Prairie Fringed-orchid Platanthera praeclara Plymouth Gentian Sabatia kennedyana White Prairie Gentian Gentiana alba American Ginseng Panax quinquefolius Virginia Goat\'s-rue Tephrosia virginiana Showy Goldenrod Solidago speciosa Rayless Goldfields Lasthenia glaberrima Forked Three-awned Grass Aristida basiramea Fascicled Ironweed Vernonia fasciculata Tweedy\'s Lewisia Lewisiopsis tweedyi Small-flowered Lipocarpha Lipocarpha micrantha Seaside Birds-foot Lotus Lotus formosissimus Furbish\'s Lousewort Pedicularis furbishiae Dense-flowered Lupine Lupinus densiflorus Prairie Lupine Lupinus lepidus Streambank Lupine Lupinus rivularis Virginia Mallow Sida hermaphrodita White Meconella Meconella oregana Coast Microseris Microseris bigelovii Pink Milkwort Polygala incarnata Hoary Mountain-mint Pycnanthemum incanum Red Mulberry Morus rubra Phantom Orchid Cephalanthera austiniae Bearded Owl-clover Triphysaria versicolor Grand Coulee Owl-clover Orthocarpus barbatus Rosy Owl-clover Orthocarpus bracteosus Victoria\'s Owl-clover Castilleja victoriae Golden Paintbrush Castilleja levisecta Branched Phacelia Phacelia ramosissima Whitebark Pine Pinus albicaulis Heart-leaved Plantain Plantago cordata Large Whorled Pogonia Isotria verticillata Nodding Pogonia Triphora trianthophora Small Whorled Pogonia Isotria medeoloides Ogden\'s Pondweed Potamogeton ogdenii Fragrant Popcornflower Plagiobothrys figuratus Stoloniferous Pussytoes Antennaria flagellaris Engelmann\'s Quillwort Isoetes engelmannii Quebec Rockcress Boechera quebecensis Kellogg\'s Rush Juncus kelloggii Pink Sand-verbena Abronia umbellata Small-flowered Sand-verbena Tripterocalyx micranthus Dwarf Sandwort Minuartia pusilla False Hop Sedge Carex lupuliformis Foothill Sedge Carex tumulicola Juniper Sedge Carex juniperorum Lindley\'s False Silverpuffs Uropappus lindleyi Brook Spike-primrose Epilobium torreyi Dense Spike-primrose Epilobium densiflorum Bent Spike-rush Eleocharis geniculata Bent Spike-rush Eleocharis geniculata Horsetail Spike-rush Eleocharis equisetoides Thread-leaved Sundew Drosera filiformis Small-flowered Tonella Tonella tenella Toothcup Rotala ramosior Cucumber Tree Magnolia acuminata Bog Bird\'s-foot Trefoil Lotus pinnatus Drooping Trillium Trillium flexipes Howell\'s Triteleia Triteleia howellii Bird\'s-foot Violet Viola pedata Yellow Montane Violet praemorsa praemorsa Viola praemorsa ssp. praemorsa Barrens Willow Salix jejuna Spotted Wintergreen Chimaphila maculata Wood-poppy Stylophorum diphyllum Tall Woolly-heads Psilocarphus elatior Dwarf Woolly-heads Psilocarphus brevissimus Batwing Vinyl Lichen Leptogium platynum Boreal Felt Lichen Erioderma pedicellatum Pale-bellied Frost Lichen Physconia subpallida Seaside Centipede Lichen Heterodermia sitchensis Vole Ears Lichen Rusty Cord-moss Entosthodon rubiginosus Acuteleaf Small Limestone Moss Seligeria acutifolia Margined Streamside Moss Scouleria marginata Nugget Moss Microbryum vlassovii Poor Pocket Moss Fissidens pauperculus Rigid Apple Moss Bartramia stricta Roell\'s Brotherella Moss Brotherella roellii Silver Hair Moss Fabronia pusilla Griscom’s Arnica Arnica griscomii ssp. griscomii Anticosti Aster Symphyotrichum anticostense Gulf of St. Lawrence Aster Symphyotrichum laurentianum Western Silvery Aster Symphyotrichum sericeum White Wood Aster Eurybia divaricata Willowleaf Aster Symphyotrichum praealtum Eastern Baccharis Baccharis halimifolia Branched Bartonia Bartonia paniculata ssp. paniculata Dense Blazing Star Liatris spicata Kentucky Coffee-tree Gymnocladus dioicus Tiny Cryptantha Cryptantha minima Lakeside Daisy Hymenoxys herbacea Deerberry Vaccinium stamineum Gray\'s Desert-parsley Lomatium grayi Lemmon\'s Holly Fern Polystichum lemmonii Mountain Holly Fern Polystichum scopulinum Victorin\'s Gentian Gentianopsis virgata ssp. victorinii Showy Goldenrod Solidago speciosa Goldenseal Hydrastis canadensis Smooth Goosefoot Chenopodium subglabrum Round-leaved Greenbrier Smilax rotundifolia Dwarf Hackberry Celtis tenuifolia Wild Hyacinth Camassia scilloides Van Brunt\'s Jacob’s-ladder Polemonium vanbruntiae Small White Lady\'s-slipper Cypripedium candidum Hare-footed Locoweed Oxytropis lagopus Macoun\'s Meadowfoam Limnanthes macounii Mexican Mosquito-fern Azolla mexicana Slender Mouse-ear-cress Halimolobos virgata Cliff Paintbrush Castilleja rupicola Sweet Pepperbush Clethra alnifolia Showy Phlox Phlox speciosa ssp. occidentalis Slender Popcornflower Plagiobothrys tenellus Bolander\'s Quillwort Isoetes bolanderi False Rue-anemone Enemion biternatum Bear\'s-foot Sanicle Sanicula arctopoides Purple Sanicle Sanicula bipinnatifida Soapweed Yucca glauca Western Spiderwort Tradescantia occidentalis Hill\'s Thistle Cirsium hillii Toothcup Rotala ramosior Purple Twayblade Liparis liliifolia American Water-willow Justicia americana Green-scaled Willow Salix chlorolepis Blunt-lobed Woodsia Woodsia obtusa Seaside Bone Hypogymnia heterophylla Black-foam Lichen Anzia colpodes Crumpled Tarpaper Lichen Collema coniophilum Wrinkled Shingle Lichen Pannaria lurida Eastern Waterfan Peltigera hydrothyria Porsild\'s Bryum Mielichhoferia macrocarpa Alkaline Wing-nerved Moss Pterygoneurum kozlovii Haller\'s Apple Moss Bartramia halleriana Spoon-leaved Moss Bryoandersonia illecebra Blue Ash Fraxinus quadrangulata Crooked-stem Aster Symphyotrichum prenanthoides Nahanni Aster Symphyotrichum nahanniense White-top Aster Sericocarpus rigidus Vancouver Island Beggarticks Bidens amplissima Western Blue Flag Iris missouriensis Buffalograss Bouteloua dactyloides American Hart\'s-tongue Fern Asplenium scolopendrium Coastal Wood Fern Dryopteris arguta Goldencrest Lophiola aurea Houghton\'s Goldenrod Solidago houghtonii Riddell\'s Goldenrod Solidago riddellii Mackenzie Hairgrass Deschampsia mackenzieana Common Hoptree Ptelea trifoliata Tuberous Indian-plantain Arnoglossum plantagineum Dwarf Lake Iris Iris lacustris Eastern Lilaeopsis Lilaeopsis chinensis Lyall\'s Mariposa Lily Calochortus lyallii Fernald\'s Milk-vetch Astragalus robbinsii var. fernaldii Water Pennywort Hydrocotyle umbellata Beach Pinweed Lechea maritima Yukon Podistera Podistera yukonensis Hill\'s Pondweed Potamogeton hillii Hairy Prairie-clover Dalea villosa Prototype Quillwort Isoetes prototypus Redroot Lachnanthes caroliniana Climbing Prairie Rose Rosa setigera Swamp Rose-mallow Hibiscus moscheutos New Jersey Rush Juncus caesariensis Spiked Saxifrage Micranthes spicata Baikal Sedge Carex sabulosa Tubercled Spike-rush Eleocharis tuberculosa Floccose Tansy Tanacetum huronense var. floccosum Pitcher\'s Thistle Cirsium pitcheri Athabasca Thrift Armeria maritima interior Victorin\'s Water-hemlock Cicuta maculata var. victorinii Yukon Wild Buckwheat Eriogonum flavum var. aquilinum Felt-leaf Willow Salix silicicola Sand-dune Short-capsuled Willow Salix brachycarpa var. psammophila Turnor\'s Willow Salix turnorii Dwarf Woolly-heads Psilocarphus brevissimus Large-headed Woolly Yarrow Achillea millefolium var. megacephalum Banded Mosses Cord-moss Entosthodon fascicularis Columbian Carpet Moss Bryoerythrophyllum columbianum Twisted Oak Moss Syntrichia laevipila Tiny Tassel Crossidium seriatum Frosted Lichens Glass-whiskers Sclerophora peronella Flooded Jellyskin Leptogium rivulare Blue Felt Lichen Degelia plumbea Boreal Felt Lichen Erioderma pedicallatum Cryptic Paw Lichen Nephroma occultum Oldgrowth Specklebelly Lichen Pseudocyphellaria rainierensis Peacock Vinyl Lichen Leptogium polycarpum Mountain Crab-eye Acroscyphus sphaerophoroides Western Waterfan Peltigera gowardii""" Water = """Water Quality and Quantity Water waterbody water body watercourse evaporation transpiration Surface water Ground water Runoff Contamination contaminant Water use Hydrologic hydrological hydro hydrostatic salinity blasting Withdrawal Flow Peak Basin Inter-basin Water Quality Water quality testing Water table Containment Sediment sewer waste wastewater biosolids sludge septage groundwater groundwater-related aquifers streamflow aquatics acquatics river hydrometric watershed waterfall sea ocean lake pond fjords wadis runs reservoir lagoon bay harbor well well-water surface-water surfacewater hot spring creek tidal subtidal mercury water contamination""" Fish = """Fish Fish Habitat fish-bearing fisheries Fisheries and Oceans Canada mercury water contamination deleterious DFO Fisheries and Oceans Local fisheries Trout Fisheries Act Offsetting Instream work in-stream work Restricted activity period Fish-bearing water body Riparian acquatic Aquatic Aquatic invasive species Spawning Fry fingerling alevin chlorine chlorinated Sport fishery Spawning deterrent spawning period Stream River In-stream instream Wetted width substrate salmon oncorhynchus walleye pike crappie redhorse pumpkinseed fish bowfin bass catfish sunfish bluegill spotted gar muskellunge ruffe yellow perch shiner sucker whitefish cisco sea ocean lake pond bay subtidal Striped Bass Morone saxatilis Silver Chub Macrhybopsis storeriana Lake Chubsucker Erimyzon sucetta Shortnose Cisco Coregonus reighardi Spring Cisco Coregonus sp. Nooksack Dace Rhinichthys cataractae ssp. Redside Dace Clinostomus elongatus Speckled Dace Rhinichthys osculus Channel Darter Percina copelandi Channel Darter Percina copelandi Spotted Gar Lepisosteus oculatus Western Brook Lamprey Lampetra richardsoni Northern Madtom Noturus stigmosus Copper Redhorse Moxostoma hubbsi Atlantic Salmon Salmo salar Basking Shark Cetorhinus maximus White Shark Carcharodon carcharias Carmine Shiner Notropis percobromus Rainbow Smelt Osmerus mordax Rainbow Smelt Osmerus mordax Enos Lake Benthic Threespine Stickleback Gasterosteus aculeatus Enos Lake Limnetic Threespine Stickleback Misty Lake Lentic Threespine Stickleback Misty Lake Lotic Threespine Stickleback Vananda Creek Benthic Threespine Stickleback Vananda Creek Limnetic Threespine Stickleback Gasterosteus aculeatus White Sturgeon Acipenser transmontanus Rainbow Trout Oncorhynchus mykiss Atlantic Whitefish Coregonus huntsmani Eastern Sand Darter Ammocrypta pellucida Eastern Sand Darter Ammocrypta pellucida Vancouver Lamprey Entosphenus macrostomus Plains Minnow Hybognathus placitus Pugnose Minnow Opsopoeodus emiliae Western Silvery Minnow Hybognathus argyritis Black Redhorse Moxostoma duquesnei Coastrange Sculpin Cottus aleuticus Rocky Mountain Sculpin Cottus sp. Pugnose Shiner Notropis anogenus Silver Shiner Notropis photogenis Spotted Wolffish Anarhichas minor Mountain Sucker Catostomus platyrhynchus Salish Sucker Catostomus sp. cf. catostomus Bull Trout Salvelinus confluentus Westslope Cutthroat Trout Oncorhynchus clarkii lewisi Northern Wolffish Anarhichas denticulatus Bigmouth Buffalo Ictiobus cyprinellus Channel Darter Percina copelandi Dolly Varden Salvelinus malma malma Banded Killifish Fundulus diaphanus Upper Great Lakes Kiyi Coregonus kiyi kiyi Northern Brook Lamprey Ichthyomyzon fossor Silver Lamprey Ichthyomyzon unicuspis Cutlip Minnow Exoglossum maxillingua Grass Pickerel Esox americanus vermiculatus River Redhorse Moxostoma carinatum Rougheye Rockfish type I Sebastes sp. type I Rougheye Rockfish type II Sebastes sp. type II Yelloweye Rockfish Sebastes ruberrimus Columbia Sculpin Cottus hubbsi Deepwater Sculpin Myoxocephalus thompsonii Rocky Mountain Sculpin Cottus sp. Shorthead Sculpin Cottus confusus Bluntnose Sixgill Shark Hexanchus griseus Bridle Shiner Notropis bifrenatus Giant Threespine Stickleback Gasterosteus aculeatus Unarmoured Threespine Stickleback Gasterosteus aculeatus Green Sturgeon Acipenser medirostris Lake Sturgeon Acipenser fulvescens Shortnose Sturgeon Acipenser brevirostrum Mountain Sucker Catostomus platyrhynchus Spotted Sucker Minytrema melanops Northern Sunfish Lepomis peltastes Longspine Thornyhead Sebastolobus altivelis Tope Galeorhinus galeus Blackstripe Topminnow Fundulus notatus Bull Trout Salvelinus confluentus Westslope Cutthroat Trout Oncorhynchus clarkii lewisi Warmouth Lepomis gulosus Atlantic Wolffish Anarhichas lupus""" Wetlands = """Wetlands Class (wetland class) wetland Bog Fen Marsh Swamp Shallow water Wetland function Hydrological function Drainage area Canadian wetland classification system Federal policy on wetland conservation Wetland monitoring water recharge potholes ponds peatbogs mires mangrove forest carr pocosin floodplains vernal pool baygall slough""" Wildlife = """Wildlife and Wildlife Habitat wildlife wildlife habitat nocturnal bat trees hibernation migratory birds migratory bird sanctuary MBCA migratory birds convention act Nesting foraging Restricted activity period Mammal Ungulate Amphibian Reptile Breeding eggs Den migration staging movement corridors forest interior denning Wintering overwintering national park national wildlife reserve national wildlife area world biosphere reserve Hibernaculum hibernacula Riparian habitat Old growth habitat Sensitive period Sensory disturbance Mortality Mortality risk Habitat alteration Habitat loss Habitat destruction Range Population Distribution Sanctuary sanctuaries Important Bird Area Bat caribou bird goose swans frogs toads salamanders newts caecilians furbearing reptile invertebrate rattlesnake snake waterbird bear moose bat owl beaver moose polar bear bison puffin lynx deer wolf reindeer bear cougar goose coyote wolverine raccoon elk crane porcupine fox hare loon marmot rabbit bobcat owl rattlesnake insect bug weasel otter skunk mollusk mollusc reptile amphibian mineral lick minerallick hunting trapping American Badger jacksoni jaksoni Badger Taxidea taxus jacksoni American Badger jeffersonii Taxidea taxus jeffersonii American Badger jeffersonii jeffersonii Taxidea taxus jeffersonii Tri-coloured Bat Perimyotis subflavus Peary Caribou Caribou Rangifer tarandus pearyi Woodland Caribou Rangifer tarandus caribou Ord\'s Kangaroo Rat Dipodomys ordii Vancouver Island Marmot Marmota vancouverensis Townsend\'s Mole Scapanus townsendii Western Harvest Mouse dychei dychei Reithrodontomys megalotis dychei Little Brown Myotis Myotis Myotis lucifugus Northern Myotis Myotis septentrionalis Northern Bobwhite Colinus virginianus Yellow-breasted Chat auricollis auricollis Icteria virens auricollis Yellow-breasted Chat virens virens Icteria virens virens Whooping Crane Grus americana Eskimo Curlew Numenius borealis Acadian Flycatcher Empidonax virescens Horned Grebe Podiceps auritus Ivory Gull Pagophila eburnea Red Knot rufa rufa Calidris canutus rufa Streaked Horned Lark Eremophila alpestris strigata Barn Owl Tyto alba Burrowing Owl Athene cunicularia Spotted Owl caurina caurina Strix occidentalis caurina Mountain Plover Charadrius montanus Piping Plover circumcinctus circuinctus Charadrius melodus circumcinctus Piping Plover melodus melodus Charadrius melodus melodus King Rail Rallus elegans Greater Sage-Grouse urophasianus subspecies Centrocercus urophasianus urophasianus Williamson\'s Sapsucker Sphyrapicus thyroideus Pink-footed Shearwater Ardenna creatopus Loggerhead Shrike migrans migrans Lanius ludovicianus migrans Coastal Vesper Sparrow Pooecetes gramineus affinis Henslow\'s Sparrow Ammodramus henslowii Black Swift Cypseloides niger Roseate Tern Sterna dougallii Sage Thrasher Oreoscoptes montanus Cerulean Warbler Setophaga cerulea Kirtland\'s Warbler Dendroica kirtlandii Prothonotary Warbler Protonotaria citrea Red-headed Woodpecker Melanerpes erythrocephalus White-headed Woodpecker Picoides albolarvatus Cricket Frog Acris blanchardi Northern Leopard Frog Lithobates pipiens Oregon Spotted Frog Rana pretiosa Allegheny Mountain Dusky Salamander Desmognathus ochrophaeus Allegheny Mountain Dusky Salamander Desmognathus ochrophaeus Eastern Tiger Salamander Ambystoma tigrinum Jefferson Salamander Ambystoma jeffersonianum Northern Dusky Salamander Desmognathus fuscus Small-mouthed Salamander Ambystoma texanum Western Tiger Salamander Ambystoma mavortium Fowler\'s Toad Anaxyrus fowleri Eastern Reptiles Foxsnake Pantherophis gloydi Eastern Foxsnake Pantherophis gloydi Butler\'s Gartersnake Thamnophis butleri Greater Short-horned Lizard Phrynosoma hernandesi Massasauga Sistrurus catenatus Desert Nightsnake Hypsiglena chlorophaea Queensnake Regina septemvittata Blue Racer Coluber constrictor foxii Gray Ratsnake Pantherophis spiloides Leatherback Sea Turtle Dermochelys coriacea Leatherback Sea Turtle Dermochelys coriacea Loggerhead Sea Turtle Caretta caretta Five-lined Skink Plestiodon fasciatus Sharp-tailed Snake Contia tenuis Spiny Softshell Apalone spinifera Blanding\'s Turtle Emydoidea blandingii Spotted Turtle Clemmys guttata Western Painted Turtle Chrysemys picta bellii Broad-banded Forestsnail Allogona profunda Oregon Forestsnail Allogona townsendiana Proud Globelet Patera pennsylvanica Hotwater Physa Physella wrighti Island Blue Plebejus saepiolus insulanus Aweme Borer Papaipema aweme Hoptree Borer Prays atomocella Bogbean Buckmoth Hemileuca sp. Gypsy Cuckoo Bumble Bee Bombus bohemicus Rusty-patched Bumble Bee Bombus affinis Taylor\'s Checkerspot Euphydryas editha taylori Olive Clubtail Stylurus olivaceus Rapids Clubtail Gomphus quadricolor Riverine Clubtail Stylurus amnicola Skillet Clubtail Gomphus ventricosus Hungerford\'s Crawling Water Beetle Brychius hungerfordi Macropis Cuckoo Bee Epeoloides pilosulus Bert\'s Predaceous Diving Beetle Sanfilippodytes bertae Eastern Persius Duskywing Erynnis persius persius Okanagan Efferia Efferia okanagana Hine\'s Emerald Somatochlora hineana White Flower Moth Schinia bimatris Gold-edged Gem Schinia avemensis Behr\'s Hairstreak Satyrium behrii Half-moon Hairstreak Satyrium semiluna Mormon Metalmark Apodemia mormo Dusky Dune Moth Copablepharon longipenne Edwards\' Beach Moth Anarta edwardsii Five-spotted Bogus Yucca Moth Prodoxus quinquepunctellus Non-pollinating Yucca Moth Tegeticula corruptrix Sand-verbena Moth Copablepharon fuscum Yucca Moth Tegeticula yuccasella Maritime Ringlet Coenonympha nipisiquit Dakota Skipper Hesperia dacotae Poweshiek Skipperling Oarisma poweshiek Ottoe Skipper Hesperia ottoe False-foxglove Sun Moth Pyrrhia aurantiago Cobblestone Tiger Beetle Cicindela marginipennis Northern Barrens Tiger Beetle Cicindela patruela Wallis\' Dark Saltflat Tiger Beetle Cicindela parowana wallisi Pallid Bat Antrozous pallidus Wood Bison Bison bison athabascae Woodland Caribou Rangifer tarandus caribou Ermine haidarum subspecies Mustela erminea haidarum Grey Fox Urocyon cinereoargenteus Swift Fox Vulpes velox American Marten Martes americana atrata Black-tailed Prairie Dog Cynomys ludovicianus Short-tailed Birds Albatross Phoebastria albatrus Least Bittern Ixobrychus exilis Bobolink Dolichonyx oryzivorus Lark Bunting Calamospiza melanocorys Red Crossbill percna percna Loxia curvirostra percna Olive-sided Flycatcher Contopus cooperi Northern Goshawk laingi laingi Accipiter gentilis laingi Ross\'s Gull Rhodostethia rosea Ferruginous Hawk Buteo regalis Red Knot roselaari type Calidris canutus roselaari roselaari Chestnut-collared Longspur Calcarius ornatus McCown\'s Longspur Rhynchophanes mccownii Eastern Meadowlark Sturnella magna Marbled Murrelet Brachyramphus marmoratus Common Nighthawk Chordeiles minor Barn Owl Tyto alba Northern Saw-whet Owl brooksi brooksi Aegolius acadicus brooksi Sprague\'s Pipit Anthus spragueii Western Screech-owl kennicottii kennicottii Megascops kennicottii kennicottii Megascops kennicottii macfarlanei Loggerhead Shrike excubitorides excubitorides Lanius ludovicianus excubitorides Bank Swallow Riparia riparia Barn Swallow Hirundo rustica Chimney Swift Chaetura pelagica Bicknell\'s Thrush Catharus bicknelli Wood Thrush Hylocichla mustelina Canada Warbler Wilsonia canadensis Golden-winged Warbler Vermivora chrysoptera Louisiana Waterthrush Parkesia motacilla Whip-poor-will Caprimulgus vociferus Lewis\'s Woodpecker Melanerpes lewis Rocky Mountain Tailed Frog Ascaphus montanus Western Chorus Frog Pseudacris triseriata Coastal Giant Salamander Dicamptodon
<reponame>sniff122/ImageUploader try: from flask import render_template, jsonify, request, Flask, send_from_directory, redirect import json, time, random, requests, os import secrets import discord from discord.ext import commands from discord.ext.commands import bot import asyncio import aiohttp import requests import datetime import threading import logging from yaspin import yaspin from yaspin.spinners import Spinners from flask_basicauth import BasicAuth import DiscordObjects import APIkeyManagement import shutil import urllib except ImportError as e: print(u"\u001b[31mFailed to import module: '" + e.name + "'. Please make sure all dependencies that are in 'requirements.txt' are installed and try again.\u001b[0m") exit() spinner = yaspin() spinner.spinner = Spinners.line print(""" ======================= ImageUploader Developed by <NAME> sniff122 V: 2.0.0 ======================= """) try: with open("Config.json") as f: Config = json.load(f) except FileNotFoundError: print(u"\u001b[31mThe 'Config.json' file was not found, copying file from 'Config.example.json'\u001b[0m") try: shutil.copyfile(r"Config.example.json", r"Config.json") except: print(u"\u001b[31mThe 'Config.example.json' file was not found, downloading from GitHub\u001b[0m") urllib.request.urlretrieve( "https://raw.githubusercontent.com/sniff122Development/ImageUploader/master/Config.example.json", "Config.json") print( u"\u001b[31mDone! Exiting application, please edit 'Config.json' and restart. If you need assistance, please see https://github.com/sniff122Development/ImageUploader/wiki\u001b[0m") exit() PROJECT_HOME = os.path.dirname(os.path.realpath(__file__)) UPLOAD_DIRECTORY = PROJECT_HOME + "/" + Config["webserver"]["upload_directory"] CONFIG_DIRECTORY = PROJECT_HOME + "/" + Config["webserver"]["data_directory"] if not os.path.exists(str(CONFIG_DIRECTORY)): os.mkdir(CONFIG_DIRECTORY) if os.path.exists(str(CONFIG_DIRECTORY + "/APIKeys.json")): with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "r") as f: apikeys = json.load(f) else: with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "w") as f: f.write("{}") f.close() with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "r") as f: apikeys = json.load(f) if os.path.exists(str(CONFIG_DIRECTORY + "/files.json")): with open(str(CONFIG_DIRECTORY + "/files.json"), "r") as f: files = json.load(f) else: with open(str(CONFIG_DIRECTORY + "/files.json"), "w") as f: f.write("{}") f.close() with open(str(CONFIG_DIRECTORY + "/files.json"), "r") as f: files = json.load(f) if os.path.exists(str(CONFIG_DIRECTORY + "/shortlinks.json")): with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "r") as f: shortlinks = json.load(f) else: with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "w") as f: f.write("{}") f.close() with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "r") as f: shortlinks = json.load(f) def saveconfigs(keys, filetokens, shortlinks): with open(str(CONFIG_DIRECTORY + "/APIKeys.json"), "w") as f: json.dump(keys, f, indent=4) with open(str(CONFIG_DIRECTORY + "/files.json"), "w") as f: json.dump(filetokens, f, indent=4) with open(str(CONFIG_DIRECTORY + "/shortlinks.json"), "w") as f: json.dump(shortlinks, f, indent=4) # ============================= # ==========WEBSERVER========== # ============================= port = Config["webserver"]["port"] listen_address = Config["webserver"]["listen"] app = Flask(__name__) app.config['BASIC_AUTH_USERNAME'] = Config["webserver"]["admin_auth"]["username"] app.config['BASIC_AUTH_PASSWORD'] = Config["webserver"]["admin_auth"]["password"] basic_auth = BasicAuth(app) WEBROOT = Config["webserver"]["webroot"] RecentFile = "" def apikeyvalid(key): if key in apikeys: return True else: return False def checkiffileexists(filename): if filename in shortlinks: return True else: return False @app.route("/", methods=['GET']) def web_root(): return render_template("index.htm", uploadapi=str(WEBROOT + "/api/upload"), linkshortapi=str(WEBROOT + "/api/url"), webroot=str(WEBROOT)) @app.route("/js/<jstype>", methods=["GET"]) def return_js(jstype): if jstype in ["admin_files.js", "admin_keys.js", "admin_links.js"]: return send_from_directory("JS", jstype) else: return jsonify({"Status": 404, "Message": "Not Found"}) @app.route("/api/upload", methods=["POST"]) def upload_file(): apikey = str(request.headers.get("Auth")) if apikeyvalid(str(apikey)): if request.files["file"]: uploadfile = request.files["file"] filename = uploadfile.filename filenamesplit = str(filename).split(".") ext = str(filenamesplit[len(filenamesplit) - 1]) filetoken = str(secrets.token_hex(10)) while checkiffileexists(filetoken): filetoken = str(secrets.token_hex(10)) filename = filetoken + "." + ext savepath = os.path.join(UPLOAD_DIRECTORY, filename) try: uploadfile.save(savepath) except: os.mkdir(UPLOAD_DIRECTORY) uploadfile.save(savepath) apikeys[apikey]["file-names"].append(filename) files[filename] = apikey saveconfigs(apikeys, files, shortlinks) if Config["bot"]["Enabled"] == "True": embed = DiscordObjects.DiscordEmbed(title="New Image Uploaded", description="There is a new image!", footer=DiscordObjects.EmbedFooter(""), colour=0xffffff, image=DiscordObjects.EmbedImage( str("https://" + request.headers['Host'] + "/uploads/" + filename)), author=DiscordObjects.EmbedAuthor("ImageUploader"), fields=[ DiscordObjects.EmbedField(name="URL:", value=str("https://" + request.headers['Host'] + "/uploads/" + filename), inline=False)], thumbnail=DiscordObjects.EmbedImage( str("https://" + request.headers['Host'] + "/uploads/" + filename))) webhookcontent = DiscordObjects.DiscordWebhookContent(username="ImageUploader", avatar_url=Config["bot"]["webhook"]["avatar_url"], tts=False, embed=[embed]) DiscordObjects.WebhookPost(Config["bot"]["webhook"]["url"], webhookcontent) global RecentFile RecentFile = str("https://" + request.headers['Host'] + "/uploads/" + filename) return jsonify({"Status": 200, "Message": "OK", "FileLink": str("https://" + request.headers['Host'] + "/uploads/" + filename)}) else: return jsonify({"Status": 403, "Message": "Forbidden - No file provided"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/url", methods=["POST"]) def shorten_link(): apikey = str(request.headers.get("Auth")) if apikeyvalid(apikey): url = str(request.headers.get("url")) if url: exists = False for urlid in shortlinks: if url == shortlinks[urlid]: exists = True break else: continue if not exists: urlid = str(secrets.token_hex(4)) while urlid in shortlinks: urlid = str(secrets.token_hex(4)) try: apikeys[apikey]["short-urls"].append(urlid) except: apikeys[apikey]["short-urls"] = [urlid] shortlinks[urlid] = {"url": url, "key": apikey} saveconfigs(apikeys, files, shortlinks) return jsonify({"Status": 200, "Message": "OK", "shorturl": str("https://" + request.headers['Host'] + "/link/" + urlid)}) else: return jsonify({"Status": 403, "Message": "Forbidden - No url provided"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/uploads/<file>", methods=['GET']) def get_file(file): try: try: check = files[file] except KeyError: return jsonify({"Status": 404, "Message": "Not Found"}) return send_from_directory(UPLOAD_DIRECTORY, file) except: return jsonify({"Status": 500, "Message": "Internal Server Error"}) @app.route("/link/<link>", methods=["GET"]) def get_link(link): try: if link in shortlinks: return redirect(shortlinks[link]["url"]) else: return jsonify({"Status": 404, "Message": "Not Found"}) except: return jsonify({"Status": 500, "Message": "Internal Server Error"}) # ==WEBSERVER=ADMIN== @app.route("/admin", methods=['GET']) @basic_auth.required def admin_root(): return render_template("admin.htm", webroot=WEBROOT, recentfile=RecentFile) @app.route("/api/admin/listfiles", methods=["GET"]) def admin_get_files(): username = str(request.headers.get("username")) password = str(request.headers.get("password")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]): filelist = [] for filename in files: filelist.append(filename) return jsonify({"Status": 200, "Message": "Ok", "files": filelist}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/listlinks", methods=["GET"]) def admin_get_links(): username = str(request.headers.get("username")) password = str(request.headers.get("password")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]): linklist = [] for linkid in shortlinks: linklist.append(linkid) return jsonify({"Status": 200, "Message": "Ok", "links": linklist}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/uploadfile", methods=["POST"]) def admin_upload_file(): apikey = str(request.headers.get("Auth")) username = str(request.headers.get("username")) password = str(request.headers.get("password")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)): if request.files["file"]: filename = str(request.headers.get('FileName')) file = request.files["file"] savepath = os.path.join(UPLOAD_DIRECTORY, filename) file.save(savepath) apikeys[apikey]["file-names"].append(filename) files[filename] = apikey saveconfigs(apikeys, files, shortlinks) if Config["bot"]["Enabled"] == "True": embed = DiscordObjects.DiscordEmbed(title="New Image Uploaded", description="There is a new image!", footer=DiscordObjects.EmbedFooter("There be a new image!"), colour=0xffffff, image=DiscordObjects.EmbedImage( str("https://" + request.headers['Host'] + "/uploads/" + filename)), author=DiscordObjects.EmbedAuthor("ImageUploader"), fields=[DiscordObjects.EmbedField(name="URL:", value=str( "https://" + request.headers['Host'] + "/uploads/" + filename), inline=False)], thumbnail=DiscordObjects.EmbedImage( str("https://" + request.headers['Host'] + "/uploads/" + filename))) webhookcontent = DiscordObjects.DiscordWebhookContent(username="ImageUploader", avatar_url=Config["bot"]["webhook"]["avatar_url"], tts=False, embed=[embed]) DiscordObjects.WebhookPost(Config["bot"]["webhook"]["url"], webhookcontent) global RecentFile RecentFile = str("https://" + request.headers['Host'] + "/uploads/" + filename) return jsonify({"Status": 200, "Message": "OK", "FileLink": str("https://" + request.headers['Host'] + "/uploads/" + filename)}) else: return jsonify({"Status": 403, "Message": "Forbidden", "Extra Info": "No File Prodived"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/url", methods=["POST"]) def admin_new_url(): apikey = str(request.headers.get("Auth")) username = str(request.headers.get("username")) password = str(request.headers.get("password")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)): url = str(request.headers.get("url")) if url: try: index = apikeys[apikey]["short-urls"].index(request.headers.get("id")) return jsonify({"Status": 403, "Message": "Forbidden - Link ID already exists"}) except: try: apikeys[apikey]["short-urls"].append(request.headers.get("id")) except: apikeys[apikey]["short-urls"] = [request.headers.get("id")] shortlinks[request.headers.get("id")] = {"url": url, "key": apikey} saveconfigs(apikeys, files, shortlinks) return jsonify( {"Status": 200, "Message": "OK", "shorturl": str("https://" + request.headers['Host'] + "/link/" + request.headers.get("id"))}) else: return jsonify({"Status": 403, "Message": "Forbidden - No url provided"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/deletefile", methods=["DELETE"]) def admin_delete_file(): apikey = str(request.headers.get("Auth")) username = str(request.headers.get("username")) password = str(request.headers.get("password")) filename = str(request.headers.get("filename")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)): try: oldpath = os.path.join("data", "uploads", filename) os.remove(oldpath) apikey = files[filename] del files[filename] fileindex = apikeys[apikey]["file-names"].index(filename) del apikeys[apikey]["file-names"][fileindex] saveconfigs(apikeys, files, shortlinks) return jsonify({"Status": 200, "Message": "OK"}) except FileNotFoundError: return jsonify({"Status": 404, "Message": "Not Found"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/deletelink", methods=["DELETE"]) def admin_delete_link(): apikey = str(request.headers.get("Auth")) username = str(request.headers.get("username")) password = str(request.headers.get("password")) lid = str(request.headers.get("id")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)): try: apikey = shortlinks[lid]["key"] del shortlinks[lid] linkindex = apikeys[apikey]["short-urls"].index(lid) del apikeys[apikey]["short-urls"][linkindex] saveconfigs(apikeys, files, shortlinks) return jsonify({"Status": 200, "Message": "OK"}) except KeyError: return jsonify({"Status": 404, "Message": "Not Found"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/renamefile", methods=["PUT"]) def admin_rename_file(): apikey = str(request.headers.get("Auth")) username = str(request.headers.get("username")) password = str(request.headers.get("password")) oldfilename = str(request.headers.get("oldfilename")) newfilename = str(request.headers.get("newfilename")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)): try: oldpath = os.path.join(UPLOAD_DIRECTORY, oldfilename) newpath = os.path.join(UPLOAD_DIRECTORY, newfilename) os.rename(oldpath, newpath) apikey = files[oldfilename] del files[oldfilename] files[newfilename] = apikey fileindex = apikeys[apikey]["file-names"].index(oldfilename) del apikeys[apikey]["file-names"][fileindex] apikeys[apikey]["file-names"].append(newfilename) saveconfigs(apikeys, files, shortlinks) return jsonify({"Status": 200, "Message": "OK", "NewLink": f"{WEBROOT}/uploads/{newfilename}"}) except FileNotFoundError: return jsonify({"Status": 404, "Message": "Not Found"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/renamelink", methods=["PUT"]) def admin_rename_link(): apikey = str(request.headers.get("Auth")) username = str(request.headers.get("username")) password = str(request.headers.get("password")) oldid = str(request.headers.get("oldid")) newid = str(request.headers.get("newid")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]) and apikeyvalid(str(apikey)): try: oldobj = shortlinks[oldid] del shortlinks[oldid] shortlinks[newid] = oldobj fileindex = apikeys[apikey]["short-urls"].index(oldid) apikeys[apikey]["short-urls"].pop(fileindex) apikeys[apikey]["short-urls"].append(newid) saveconfigs(apikeys, shortlinks, shortlinks) return jsonify({"Status": 200, "Message": "OK", "NewLink": f"{WEBROOT}/uploads/{newid}"}) except Exception as e: print(e) return jsonify({"Status": 404, "Message": "Not Found"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/newkey", methods=["GET"]) def admin_new_key(): username = str(request.headers.get("username")) password = str(request.headers.get("password")) name = str(request.headers.get("name")) if (username == Config["webserver"]["admin_auth"]["username"]) and ( password == Config["webserver"]["admin_auth"]["password"]): try: changes = APIkeyManagement.genkey(name, apikeys) apikeys.update(changes["apikeys"]) newkey = changes["newkey"] saveconfigs(apikeys, shortlinks, shortlinks) return jsonify({"Status": 200, "Message": "OK", "newkey": newkey}) except Exception as e: print(e) return jsonify({"Status": 500, "Message": "Internal Server Error"}) else: return jsonify({"Status": 401, "Message": "Unauthorized"}) @app.route("/api/admin/revokekey",
<gh_stars>1-10 # pyOCD debugger # Copyright (c) 2018-2019 Arm Limited # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...flash.flash import Flash from ...core.coresight_target import CoreSightTarget from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap) from ...debug.svd.loader import SVDFile FLASH_ALGO_QSPI = { 'load_address' : 0x20000000, 'instructions': [ 0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2, 0x4604b570, 0x4616460d, 0x44484867, 0xb9e86800, 0x44784866, 0x44494966, 0x46086008, 0x44494965, 0x46086008, 0x44494964, 0x68096008, 0xf0006808, 0x4961fc96, 0x68084449, 0xf0002101, 0xb108f9bb, 0xbd702001, 0x49582001, 0x60084449, 0xe7f82000, 0x4604b510, 0x4854b984, 0x68004448, 0xd10b2801, 0x44494955, 0xf0006808, 0xb108fa1f, 0xbd102001, 0x494d2000, 0x60084449, 0xe7f82000, 0x494eb510, 0x22004449, 0x46116808, 0xfba4f000, 0x2001b108, 0x2000bd10, 0xb510e7fc, 0xf0244604, 0x4604407f, 0x44494945, 0x3280f44f, 0x46216808, 0xfb92f000, 0x2001b108, 0x2000bd10, 0xe92de7fc, 0x460441f0, 0x4617460d, 0x407ff024, 0x493b4604, 0x462b4449, 0x6808463a, 0xf0004621, 0x4606faf2, 0x4630b116, 0x81f0e8bd, 0xe7fb2000, 0x43f8e92d, 0x46884607, 0xf0274615, 0xbf00467f, 0xe0232400, 0x4449492e, 0x466a2304, 0x46316808, 0xfa8bf000, 0x0000f89d, 0x42885d29, 0xf89dd111, 0x1c601001, 0x42815c28, 0xf89dd10b, 0x1ca01002, 0x42815c28, 0xf89dd105, 0x1ce01003, 0x42815c28, 0x1930d002, 0x83f8e8bd, 0x1d241d36, 0xd3d94544, 0xe7f72000, 0x43f8e92d, 0x460f4606, 0xf0264614, 0xbf00457f, 0x0800f04f, 0x4915e01d, 0x23044449, 0x6808466a, 0xf0004629, 0xf89dfa58, 0x42a00000, 0xf89dd10b, 0x42a00001, 0xf89dd107, 0x42a00002, 0xf89dd103, 0x42a00003, 0x2001d002, 0x83f8e8bd, 0xf1081d2d, 0x45b80804, 0x2000d3df, 0x0000e7f6, 0x00000004, 0x00000d9a, 0x00000008, 0x0000000c, 0x00000020, 0xb118b570, 0x2d2018d5, 0xb902d800, 0x2a20bd70, 0x6001d101, 0x2501e7fa, 0x1e6c4095, 0xfa046805, 0x43b5f603, 0x0604ea01, 0x4335409e, 0xbf006005, 0xb510e7ec, 0x21064604, 0xf0006820, 0xbd10fd7b, 0x4604b57f, 0x2000460d, 0x90009003, 0x90029001, 0xaa032301, 0x68202165, 0xfd7ef000, 0xb1164606, 0xb0044630, 0xb125bd70, 0xd00d2d01, 0xd1212d02, 0xf89de015, 0xf040000c, 0x90030040, 0x000cf89d, 0x0080f040, 0xe0179003, 0x000cf89d, 0x0040f020, 0xf89d9003, 0xf040000c, 0x90030080, 0xf89de00c, 0xf040000c, 0x90030040, 0x000cf89d, 0x0080f020, 0xe0019003, 0xe7d42001, 0x4620bf00, 0xffb9f7ff, 0x90002000, 0x90029001, 0xaa032301, 0x68202161, 0xfd5ff000, 0xb10e4606, 0xe7c24630, 0x462a462b, 0x68204629, 0xfb83f000, 0xb10e4606, 0xe7b84630, 0xe7b62000, 0x4604b57f, 0x2000460e, 0x90009003, 0x90029001, 0xaa032301, 0x68202185, 0xfd24f000, 0xb1154605, 0xb0044628, 0x2304bd70, 0x4631461a, 0xf7ffa803, 0x4620ff6d, 0xff83f7ff, 0x90002000, 0x90029001, 0xaa032301, 0x68202181, 0xfd29f000, 0xb10d4605, 0xe7e64628, 0xe7e42000, 0x4605b57f, 0x90032000, 0x2000e00e, 0x90019000, 0x23019002, 0x2170aa03, 0xf0006828, 0x4604fcf7, 0x4620b114, 0xbd70b004, 0x000cf89d, 0x280009c0, 0x2000d0eb, 0xe92de7f6, 0xb0844dff, 0x46924606, 0x9c10469b, 0xf5c0b2e0, 0xe9dd7580, 0xe9cd1011, 0x90024100, 0x4652462b, 0x99056830, 0xfcf3f000, 0xb11f4607, 0xb0084638, 0x8df0e8bd, 0x442c44aa, 0xf7ff4630, 0x4680ffc5, 0x0f00f1b8, 0x4640d001, 0x4630e7f1, 0xff2ff7ff, 0x1011e9dd, 0x0305ebab, 0x4100e9cd, 0x46529002, 0x99056830, 0xfcd3f000, 0xb10f4607, 0xe7de4638, 0xe7dc2000, 0x4df3e92d, 0x4604b082, 0xb1209803, 0xd00b2801, 0xd11b2802, 0x2700e011, 0x0a03f04f, 0xf04f2600, 0x20000b02, 0xe0159001, 0xf04f2700, 0x26080a0b, 0x0b02f04f, 0x90012000, 0x2702e00c, 0x0a6bf04f, 0xf04f2608, 0x20000b32, 0xe0039001, 0xb0042001, 0x8df0e8bd, 0x4639bf00, 0xf7ff4620, 0x4680fef5, 0x0f00f1b8, 0x4640d001, 0x4631e7f1, 0xf7ff4620, 0x4680ff45, 0x0f00f1b8, 0x4640d001, 0xbf00e7e7, 0xf0006820, 0x2800fa40, 0x6820d0fa, 0xfa85f000, 0x682068a1, 0xfa91f000, 0xb10d4605, 0xe7d64628, 0x46514632, 0xf0006820, 0x4605faff, 0x4628b10d, 0x6820e7cd, 0x9a014659, 0xfb01f000, 0xb10d4605, 0xe7c44628, 0x7180f44f, 0xf0006820, 0x4605fb03, 0x4628b10d, 0x2103e7bb, 0xf0006820, 0x4605fb14, 0x4628b10d, 0x6820e7b3, 0xfa5ff000, 0x74209803, 0xe7ac2000, 0x4604b570, 0x46202100, 0xfea4f7ff, 0xb10d4605, 0xbd704628, 0x46202100, 0xfef6f7ff, 0xb10d4605, 0xe7f64628, 0x6820bf00, 0xf9f3f000, 0xd0fa2800, 0xf0006820, 0x6820fa38, 0xfb30f000, 0xf0006820, 0x2000fa3a, 0xbf007420, 0xe92de7e3, 0x46044df0, 0x46174688, 0x6820461d, 0x68406800, 0xfbb51c46, 0xb107fbf6, 0x2001b915, 0x8df0e8bd, 0x0005eb08, 0x428868e1, 0x2006d301, 0xf04fe7f6, 0xe00d0a00, 0x68204641, 0xfadef000, 0x46384632, 0xf0006861, 0x1badfc29, 0x44b04437, 0x0a01f10a, 0xd3ef45da, 0x4641b145, 0xf0006820, 0x462afacd, 0x68614638, 0xfc18f000, 0xf0006820, 0x2000fadc, 0xe92de7d4, 0x46044dfc, 0x4692460f, 0x6820461e, 0x68406800, 0x0801f100, 0x0f00f1ba, 0xb916d000, 0xe8bd2001, 0x19b88dfc, 0x428868e1, 0x2006d301, 0xf007e7f7, 0x1b7f0503, 0xfbb01970, 0x9001f0f8, 0x90002000, 0x4639e01e, 0xf0006820, 0xeba8fa9d, 0x68610205, 0x46511948, 0xfbe6f000, 0x0005eba8, 0xeba81a36, 0x44820005, 0x25004447, 0xf7ff4620, 0x4683fe99, 0x0f00f1bb, 0x4658d001, 0x9800e7d1, 0x90001c40, 0x0100e9dd, 0xd3dc4288, 0x4639b196, 0xf0006820, 0x6861fa79, 0x46321948, 0xf0004651, 0x4620fbc3, 0xfe7ef7ff, 0xf1bb4683, 0xd0010f00, 0xe7b64658, 0xf0006820, 0x2000fa7e, 0xe92de7b1, 0xb0844dff, 0x460e4605, 0x08f8461f, 0x7c289003, 0x2801b160, 0x2802d005, 0xf04fd107, 0x24080a6b, 0xf04fe008, 0x24080a0b, 0xbf00e004, 0x0a03f04f, 0xbf002400, 0xf04fbf00, 0xe0180b00, 0xe9cd2003, 0x94026000, 0x68282308, 0x9a064651, 0xfb50f000, 0xf1b84680, 0xd0030f00, 0xb0084640, 0x8df0e8bd, 0x30089806, 0x36089006, 0xf10b3f08, 0x98030b01, 0xd3e34583, 0x2003b17f, 0x6000e9cd, 0x463b9402, 0x46516828, 0xf0009a06, 0x4680fb33, 0x0f00f1b8, 0x4640d001, 0x2000e7e1, 0xe92de7df, 0xb0864dff, 0x460c4680, 0x08f0461e, 0xf8989005, 0xb1480010, 0xd0062801, 0xd1032802, 0x0b32f04f, 0xe0052500, 0xbf00bf00, 0x0b02f04f, 0xbf002500, 0x2000bf00, 0xe0379004, 0xf7ff4640, 0x0a21fd78, 0xebb11de0, 0xd00f2f10, 0x23082003, 0x0501e9cd, 0x46599400, 0x9a084640, 0xfe11f7ff, 0xb1a74607, 0xb00a4638, 0x8df0e8bd, 0xe9cd2003, 0x94000501, 0xf8d82308, 0x46590000, 0xf0009a08, 0x4682fb08, 0x0f00f1ba, 0x4650d001, 0x4640e7eb, 0xfddcf7ff, 0xb10f4607, 0xe7e44638, 0x30089808, 0x34089008, 0x98043e08, 0x90041c40, 0x0104e9dd, 0xd3c34288, 0x4640b376, 0xfd3bf7ff, 0x19a00a21, 0xebb11e40, 0xd00d2f10, 0x46332003, 0x0501e9cd, 0x46599400, 0x9a084640, 0xfdd3f7ff, 0xb1974607, 0xe7c04638, 0xe9cd2003, 0x94000501, 0xf8d84633, 0x46590000, 0xf0009a08, 0x4682facc, 0x0f00f1ba, 0x4650d001, 0x4640e7af, 0xfda0f7ff, 0xb10f4607, 0xe7a84638, 0xe7a62000, 0x4df0e92d, 0x4607b086, 0x4693460c, 0xf7ff4638, 0xf1bbfd04, 0xd0090f00, 0x5f80f5bb, 0xf5bbd01d, 0xd0124f00, 0x3f80f5bb, 0xe007d11f, 0x2005b11c, 0xe8bdb006, 0x25c78df0, 0xe0182600, 0x260325d8, 0xb108b2a0, 0xe7f32005, 0x2552e011, 0xf3c42603, 0xb108000e, 0xe7eb2005, 0x2520e009, 0xf3c42603, 0xb108000b, 0xe7e32005, 0x2001e001, 0xbf00e7e0, 0x42a068f8, 0x2006d801, 0x2000e7da, 0xc151a901, 0x90009004, 0x461a2300, 0x68384629, 0xf986f000, 0xf1b84680, 0xd0010f00, 0xe7c94640, 0xf7ff4638, 0x4682fd49, 0x0f00f1ba, 0x4640d001, 0x2000e7c0, 0x0000e7be, 0xb118b570, 0x2d2018d5, 0xb902d800, 0x2a20bd70, 0x6001d101, 0x2501e7fa, 0x1e6c4095, 0xfa046805, 0x43b5f603, 0x0604ea01, 0x4335409e, 0xbf006005, 0x4601e7ec, 0x68026808, 0x0fc06810, 0xe92d4770, 0x460545f8, 0x4614468a, 0x6828461e, 0xb10e6807, 0xe0011d38, 0x0008f107, 0xf8d84680, 0x90000000, 0x4628bf00, 0xffe5f7ff, 0xd0fa2800, 0xd9022c1f, 0xe8bd2001, 0x230085f8, 0x46512208, 0xf7ff4668, 0x2318ffbf, 0x46212205, 0xf7ff4668, 0x9800ffb9, 0x0000f8c8, 0xe7ec2000, 0xb1214601, 0xd0042901, 0xd1062902, 0x2000e003, 0x20014770, 0x2002e7fc, 0xf04fe7fa, 0xe7f730ff, 0x68084601, 0x68106802, 0x0001f000, 0x46014770, 0x6810680a, 0xf0226802, 0x60020201, 0x46014770, 0x6810680a, 0xf0426802, 0x60020201, 0xb5704770, 0x460c4605, 0x68066828, 0x4628bf00, 0xff9df7ff, 0xd0fa2800, 0x0001f004, 0x2c02b918, 0x2c20d301, 0x2001d901, 0x2001bd70, 0x0154ebc0, 0x22042313, 0xf7ff4630, 0x2000ff71, 0xe92de7f4, 0xb0824dff, 0x460f4682, 0xf8da4693, 0x68040000, 0x90016860, 0x900068a0, 0x4650bf00, 0xff77f7ff, 0xd0fa2800, 0xf7ff4638, 0x4605ffa5, 0xb9181c68, 0xb0062001, 0x8df0e8bd, 0x2308b13f, 0x46292202, 0xf7ff1d20, 0x2000ff4b, 0x4658e7f3, 0xff92f7ff, 0x98054680, 0xff8ef7ff, 0xf1084606, 0xb1080001, 0xb9081c70, 0xe7e42001, 0x22022308, 0xa8014629, 0xff34f7ff, 0x2202230c, 0xa8014641, 0xff2ef7ff, 0x22022310, 0xa8014631, 0xff28f7ff, 0x2202230c, 0x46684641, 0xff22f7ff, 0x22022310, 0x46684631, 0xff1cf7ff, 0x60609801, 0x60a09800, 0xe7c02000, 0x4604b570, 0x4616460d, 0x46322301, 0x46204629, 0xff2bf7ff, 0xb570bd70, 0x460d4604, 0x23004616, 0x46294632, 0xf7ff4620, 0xbd70ff20, 0x4604b570, 0x6820460d, 0xbf006806, 0xf7ff4620, 0x2800ff10, 0xf5b5d0fa, 0xd3015f80, 0xbd702001, 0x220c2304, 0xf1064629, 0xf7ff0014, 0x2000fee9, 0xb570e7f5, 0x460c4605, 0x68066828, 0x4628bf00, 0xfef7f7ff, 0xd0fa2800, 0x2c10b10c, 0x2001d901, 0x1e61bd70, 0x22042300, 0x0014f106, 0xfed0f7ff, 0xe7f52000, 0x4604b570, 0x6820460d, 0x46206803, 0xff22f7ff, 0xb1164606, 0xf7ff4620, 0x625dff24, 0xf4406818, 0x60183080, 0x4620b116, 0xff23f7ff, 0xb530bd70, 0x68184603, 0x46186804, 0xff0cf7ff, 0xb1154605, 0xf7ff4618, 0x6820ff0e, 0x3080f420, 0xb1156020, 0xf7ff4618, 0xbd30ff0e, 0x680a4601, 0x4a8e6810, 0x22036002, 0x22026042, 0x4a8c6082, 0x22006142, 0xf8c06242, 0xf8c02090, 0xf8c02094, 0xf8c020a8, 0x477020ac, 0x4dffe92d, 0x4616b086, 0xf8dd461d, 0xe9dda054, 0x98068712, 0x68046800, 0x90052000, 0xb10db116, 0xe0002001, 0x46832000, 0x0f00f1b8, 0xb10fd002, 0xe0002001, 0x90042000, 0x0f00f1ba, 0x2001d001, 0x2000e000, 0x20009003, 0x90029001, 0xd9032d08, 0xb00a2001, 0x8df0e8bd, 0xd9012f08, 0xe7f82001, 0x0f04f1ba, 0x2001d901, 0x9816e7f3, 0xd901281f, 0xe7ee2001, 0x0f00f1bb, 0x9804d003, 0x2001b108, 0x2318e7e7, 0xa8052208, 0xf7ff9907, 0xf1bbfe45, 0xd0090f00, 0xf4409805, 0x90050000, 0x23141e69, 0xa8052203, 0xfe38f7ff, 0xb3289804, 0xf4409805, 0x90054000, 0x230c1e79, 0xa8052203, 0xfe2cf7ff, 0x0003f008, 0xf007b968, 0xb9500003, 0x0000f8d8, 0x00a8f8c4, 0xd10f2f08, 0x0004f8d8, 0x00acf8c4, 0x463ae00a, 0xa8014641, 0xf898f000, 0xf8c49801, 0x980200a8, 0x00acf8c4, 0xb1689803, 0xf4409805, 0x90052000, 0xf8c49814, 0xf1aa0094, 0x23100101, 0xa8052202, 0xfe00f7ff, 0x22052307, 0x9916a805, 0xfdfaf7ff, 0xf8449805, 0x68200f90, 0x0001f040, 0x0990f844, 0xf8d4bf00, 0xf3c00090, 0x28000040, 0xf1bbd1f9, 0xd0190f00, 0x0003f006, 0xf005b958, 0xb9400003, 0x00a0f8d4, 0x2d086030, 0xf8d4d10e, 0x607000a4, 0xf8d4e00a, 0x900100a0, 0x00a4f8d4, 0x462a9002, 0x4630a901, 0xf850f000, 0xe7682000, 0xb085b530, 0x460d4604, 0x90012000, 0x90039002, 0x46039004, 0x46294602, 0x46209000, 0xff30f7ff, 0xbd30b005, 0x4df0e92d, 0x4606b086, 0x4614460f, 0xe9dd461d, 0xf8ddab0f, 0xb1048038, 0x2001b91d, 0xe8bdb006, 0x20008df0, 0xe8a1a901, 0x462b0d01, 0x46394622, 0x46309000, 0xff12f7ff, 0xe92de7f0, 0xb0864df0, 0x460f4606, 0x461d4614, 0xab0fe9dd, 0x8038f8dd, 0xb91db104, 0xb0062001, 0x8df0e8bd, 0x461a2300, 0x46304639, 0x0d30e88d, 0xfef8f7ff, 0x0000e7f3, 0x80780081, 0x00101002, 0x0301ea40, 0xd003079b, 0xc908e009, 0xc0081f12, 0xd2fa2a04, 0xf811e003, 0xf8003b01, 0x1e523b01, 0x4770d2f9, 0x52800000, 0x0003ffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00800000, 0x00000000, 0x00000000 ], # Function addresses 'pc_init': 0x20000021, 'pc_unInit': 0x20000071, 'pc_program_page': 0x200000db, 'pc_erase_sector': 0x200000b7, 'pc_eraseAll': 0x2000009d, 'static_base' : 0x20000000 + 0x00000020 + 0x00000db8, 'begin_stack' : 0x20001000, 'begin_data' : 0x20000000 + 0x1000, 'page_size' : 0x100, 'analyzer_supported' : False, 'analyzer_address' : 0x00000000, 'page_buffers' : [0x20001000, 0x20001100], # Enable double buffering 'min_program_length' : 0x100, # Flash information 'flash_start': 0x0, 'flash_size': 0x800000, 'sector_sizes': ( (0x0, 0x10000), ) } FLASH_ALGO_EFLASH = { 'load_address' : 0x20000000, # Flash algorithm as a hex string 'instructions': [ 0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2, 0x4604b570, 0x4616460d, 0x44484866, 0xb9986800, 0x44484865, 0x44494965, 0x48656048, 0x60084448, 0x49644608, 0x60084449, 0x49636808, 0xf8dbf000, 0x495c2001, 0x60084449, 0xbd702000, 0xb9414601, 0x44484858, 0x28016800, 0x2000d103, 0x444a4a55, 0x20006010, 0xb5104770, 0x44494956, 0x68082202, 0xf0002100, 0xb108fa2b, 0xbd102001, 0xe7fc2000, 0x4604b510, 0x6020f1a4, 0x4449494b, 0x1e496849, 0x0401ea00, 0x4449494b, 0x68082200, 0xf0004621, 0xb108fa15, 0xbd102001, 0xe7fc2000, 0x43f8e92d, 0x460d4604, 0xf1a44616, 0x493f6020, 0x68494449, 0xea001e49, 0x20040401, 0xea4f9000, 0x27000895, 0x493ce00e, 0x466b4449, 0x68084632, 0xf0004621, 0xb110f94b, 0xe8bd2001, 0x1d3683f8, 0x1c7f1d24, 0xd3ee4547, 0xe7f62000, 0x41fce92d, 0x460f4604, 0xf1a44690, 0x492c6020, 0x68494449, 0xea001e49, 0x20040401, 0x25009001, 0x492ae01a, 0xab014449, 0x6808466a, 0xf0004621, 0x2600f8c1, 0xf81de00c, 0x19a90006, 0x1001f818, 0xd0044288, 0x6020f104, 0xe8bd4428, 0x1c7681fc, 0xd3f02e04, 0x1d2d1d24, 0xd3e242bd, 0xe7f42000, 0x41fce92d, 0x460e4604, 0xf1a44617, 0x49146020, 0x68494449, 0xea001e49, 0x20040401, 0xf04f9001, 0xe0160800, 0x44494911, 0x466aab01, 0x46216808, 0xf890f000, 0xe0072500, 0x0005f81d, 0xd00242b8, 0xe8bd2001, 0x1c6d81fc, 0xd3f52d04, 0xf1081d24, 0x45b00804, 0x2000d3e6, 0x0000e7f3, 0x00000004, 0x0000000c, 0x00000014, 0x00000008, 0x0000001c, 0x02710000, 0x6804b510, 0xb11a6823, 0x041ff001, 0xe002601c, 0x041ff001, 0xbd10605c, 0x4604b510, 0x68096821, 0x5080f501, 0xf9eaf000, 0xb570bd10, 0x460d4604, 0x211f2200, 0xf7ff4620, 0x6821ffe3, 0xf5016809, 0x46295080, 0xf9cef000, 0xf7ff4620, 0x6861ffe5, 0x20016048, 0x70086861, 0xb570bd70, 0x25004604, 0x69a0bf00, 0x001cf000, 0xd0fa1e05, 0x0010f005, 0x4620b140, 0xf9b3f000, 0x69a0bf00, 0x000cf000, 0xd0fa1e05, 0xbd704628, 0x4604b570, 0xbf00460d, 0xf00069a0, 0x28000001, 0x61e5d1fa, 0x61602001, 0xf7ff4620, 0xbd70ffda, 0x4604b5f0, 0x462e460d, 0x0020f104, 0x2a1018c7, 0x2010d901, 0x4610e000, 0x18d04602, 0xd9002810, 0x21001ad2, 0x7838e004, 0x1c7f7030, 0x1c491c76, 0xd3f84291, 0xbdf04610, 0x4dfce92d, 0x460f4604, 0x461d4692, 0x68006820, 0x682e9001, 0x0b00f04f, 0x200046d8, 0x68609000, 0xb9107800, 0xe8bd2001, 0x68288dfc, 0x68614438, 0x42886849, 0x2005d901, 0x9801e7f5, 0xb1086980, 0xe7f02002, 0x68096821, 0x5080f501, 0xf963f000, 0xd0012880, 0xe7e62003, 0x4639e015, 0xf7ff9801,
-> Element: # todo: In the implementation below... # We use condition in context of "matching", i.e. as a predicate... # why then not accept Callable[[E], bool] also? # (as you remember, Condition is Callable[[E], None] throwing Error) # This will allow the following code be possible # results.element_by(lambda it: # Result(it).title.matching(have.text(text))) # instead of: # results.element_by(lambda it: have.text(text)( # Result(it).title)) # in addition to: # results.element_by_its(lambda it: # Result(it).title, have.text(text)) # Open Points: # - do we need element_by_its, if we allow Callable[[E], bool] ? # - if we add elements_by_its, do we need then to accept Callable[[E], bool] ? # - probably... Callable[[E], bool] will lead to worse error messages, # in such case we ignore thrown error's message # - hm... ut seems like we nevertheless ignore it... # we use element.matching(condition) below condition = condition if isinstance(condition, Condition) \ else Condition(str(condition), condition) def find() -> WebElement: cached = self.cached for element in cached: if element.matching(condition): return element() from selene.core import query outer_htmls = [query.outer_html(element) for element in cached] raise AssertionError( f'\n\tCannot find element by condition «{condition}» ' f'\n\tAmong {self}' f'\n\tActual webelements collection:' f'\n\t{outer_htmls}') # todo: isn't it better to print it all the time via hook, like for Element? return Element(Locator(f'{self}.element_by({condition})', find), self.config) def element_by_its( self, selector_or_callable: Union[str, tuple, Callable[[Element], Element]], condition: Condition[Element]) -> Element: """ :param selector_or_callable: - selector may be a str with css/xpath selector or tuple with by.* locator - callable should be a function on element that returns element :param condition: a condition to :return: element from collection that has inner/relative element matching condition GIVEN html elements somewhere in DOM:: .result .result-title .result-url .result-snippet THEN:: browser.all('.result')\ .element_by_its('.result-title', have.text(text))\ .element('.result-url').click() ... is a shortcut for:: browser.all('.result')\ .element_by(lambda it: have.text(text)(it.element('.result-title')))\ .element('.result-url').click() OR with PageObject: THEN:: Result(results.element_by_its(lambda it: Result(it).title, have.text(text)))\ .url.click() Shortcut for:: Result(results.element_by(lambda it: have.text(text)(Result(it).title)))\ .url.click() WHERE:: results = browser.all('.result') class Result: def __init__(self, element): self.element = element self.title = self.element.element('.result-title') self.url = self.element.element('.result-url') # ... """ # todo: main questions to answer before removing warning: # - isn't it enough to allow Callable[[Element], bool] as condition? # browser.all('.result').element_by( # lambda it: it.element('.result-title').matching(have.text('browser tests in Python'))) # .element('.result-url').click() # - how to improve error messages in case we pass lambda (not a fun with good name/str repr)? warnings.warn( 'element_by_its is experimental; might be renamed or removed in future', FutureWarning) def find_in(parent: Element): if callable(selector_or_callable): return selector_or_callable(parent) else: return parent.element(selector_or_callable) return self.element_by(lambda it: condition(find_in(it))) # todo: consider adding ss alias def all(self, css_or_xpath_or_by: Union[str, tuple]) -> Collection: warnings.warn('might be renamed or deprecated in future; ' 'all is actually a shortcut for collected(lambda element: element.all(selector)...' 'but we also have all_first and...' 'it is yet unclear what name would be best for all_first as addition to all... ' 'all_first might confuse with all(...).first... I mean: ' 'all_first(selector) is actually ' 'collected(lambda e: e.element(selector)) ' 'but it is not the same as ' 'all(selector).first ' 'that is collected(lambda e: e.all(selector)).first ... o_O ', FutureWarning) by = to_by(css_or_xpath_or_by) # todo: consider implement it through calling self.collected # because actually the impl is self.collected(lambda element: element.all(selector)) return Collection( Locator(f'{self}.all({by})', lambda: flatten([webelement.find_elements(*by) for webelement in self()])), self.config) # todo: consider adding s alias def all_first(self, css_or_xpath_or_by: Union[str, tuple]) -> Collection: warnings.warn('might be renamed or deprecated in future; ' 'it is yet unclear what name would be best... ' 'all_first might confuse with all(...).first... I mean: ' 'all_first(selector) is actually ' 'collected(lambda e: e.element(selector)) ' 'but it is not the same as ' 'all(selector).first ' 'that is collected(lambda e: e.all(selector)).first ... o_O ', FutureWarning) by = to_by(css_or_xpath_or_by) # todo: consider implement it through calling self.collected # because actually the impl is self.collected(lambda element: element.element(selector)) return Collection( Locator(f'{self}.all_first({by})', lambda: [webelement.find_element(*by) for webelement in self()]), self.config) def collected(self, finder: Callable[[Element], Union[Element, Collection]]) -> Collection: # todo: consider adding predefined queries to be able to write # collected(query.element(selector)) # over # collected(lambda element: element.element(selector)) # and # collected(query.all(selector)) # over # collected(lambda element: element.all(selector)) # consider also putting such element builders like to find.* module instead of query.* module # because they are not supposed to be used in entity.get(*) context defined for other query.* fns return Collection( Locator(f'{self}.collected({finder})', # todo: consider skipping None while flattening lambda: flatten([finder(element)() for element in self.cached])), self.config) # --- Assertable --- # def should(self, condition: Union[CollectionCondition, ElementCondition], timeout: int = None) -> Collection: if isinstance(condition, ElementCondition): for element in self: if timeout: warnings.warn( "using timeout argument is deprecated; " "use `browser.all('.foo').with_(Config(timeout=6)).should(have.size(0))`" "or just `...with_(timeout=6).should(...` style instead", DeprecationWarning) element.with_(Config(timeout=timeout)).should(condition) element.should(condition) else: if timeout: warnings.warn( "using timeout argument is deprecated; " "use `browser.all('.foo').with_(Config(timeout=6)).should(have.size(0))` " "or just `...with_(timeout=6).should(...` style instead", DeprecationWarning) self.with_(Config(timeout=timeout)).should(condition) super().should(condition) return self # --- Deprecated --- # def get_actual_webelements(self) -> List[WebElement]: warnings.warn( "considering to be deprecated; use collection as callable instead, like: browser.all('.foo')()", PendingDeprecationWarning) return self() def caching(self) -> Collection: warnings.warn("deprecated; use `cached` property instead: browser.all('#foo').cached", DeprecationWarning) return self.cached def all_by(self, condition: Condition[Element]) -> Collection: warnings.warn( "deprecated; use `filtered_by` instead: browser.all('.foo').filtered_by(be.enabled)", DeprecationWarning) return self.filtered_by(condition) def filter_by(self, condition: Condition[Element]) -> Collection: warnings.warn( "deprecated; use `filtered_by` instead: browser.all('.foo').filtered_by(be.enabled)", DeprecationWarning) return self.filtered_by(condition) def find_by(self, condition: Condition[Element]) -> Element: warnings.warn( "deprecated; use `element_by` instead: browser.all('.foo').element_by(be.enabled)", DeprecationWarning) return self.element_by(condition) def size(self): warnings.warn( "deprecated; use `len` standard function instead: len(browser.all('.foo'))", DeprecationWarning) return len(self) def should_each(self, condition: ElementCondition, timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method instead: browser.all('.foo').should(have.css_class('bar'))", DeprecationWarning) return self.should(condition, timeout) def assure(self, condition: Union[CollectionCondition, ElementCondition], timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method instead: browser.all('.foo').should(have.size(0))", DeprecationWarning) return self.should(condition, timeout) def should_be(self, condition: Union[CollectionCondition, ElementCondition], timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method with `be.*` style conditions instead: " "browser.all('.foo').should(be.*)", DeprecationWarning) return self.should(condition, timeout) def should_have(self, condition: Union[CollectionCondition, ElementCondition], timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method with `have.*` style conditions instead: " "browser.all('.foo').should(have.size(0))", DeprecationWarning) return self.should(condition, timeout) def should_not(self, condition: Union[CollectionCondition, ElementCondition], timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method with `be.not.*` or `have.no.*` style conditions instead: " "`browser.all('.foo').should(have.no.size(2))`, " "you also can build your own inverted conditions by: `not_zero = Condition.as_not(have.size(0'))`", DeprecationWarning) return self.should(condition, timeout) def assure_not(self, condition: Union[CollectionCondition, ElementCondition], timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method with `be.not.*` or `have.no.*` style conditions instead: " "`browser.all('.foo').should(have.no.size(2))`, " "you also can build your own inverted conditions by: `not_zero = Condition.as_not(have.size(0'))`", DeprecationWarning) return self.should(condition, timeout) def should_not_be(self, condition: Union[CollectionCondition, ElementCondition], timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method with `be.not.*` or `have.no.*` style conditions instead: " "`browser.all('.foo').should(have.no.size(2))`, " "you also can build your own inverted conditions by: `not_zero = Condition.as_not(have.size(0'))`", DeprecationWarning) return self.should(condition, timeout) def should_not_have(self, condition: Union[CollectionCondition, ElementCondition], timeout=None) -> Collection: warnings.warn( "deprecated; use `should` method with `be.not.*` or `have.no.*` style conditions instead: " "`browser.element('#foo').should(have.no.size(2))`, " "you also can build your own inverted conditions by: `not_zero = Condition.as_not(have.size(0'))`", DeprecationWarning) return self.should(condition, timeout) class SeleneCollection(Collection): # todo: consider deprecating this name pass class Browser(WaitingEntity): def __init__(self, config: Config): # todo: what about adding **config_as_kwargs? super().__init__(config) # todo: consider implement it as context manager too... def with_(self, config: Config = None, **config_as_kwargs) -> Browser: return Browser(self.config.with_(config, **config_as_kwargs)) def __str__(self): return 'browser' # todo: consider making it callable ... @property def driver(self) -> WebDriver: return self.config.driver # @property # def actions(self) -> ActionChains: # """ # It's kind of just a shortcut for pretty low level actions from selenium webdriver # Yet unsure about this property here:) # comparing to usual high level Selene API... # Maybe later it would be better to make own Actions with selene-style retries, etc. # """ # return ActionChains(self.config.driver) # --- Element builders --- # def element(self, css_or_xpath_or_by: Union[str, tuple]) -> Element: by = to_by(css_or_xpath_or_by) return Element( Locator(f'{self}.element({by})', lambda: self.driver.find_element(*by)), self.config) def all(self, css_or_xpath_or_by: Union[str, tuple]) -> Collection: by = to_by(css_or_xpath_or_by) return Collection( Locator(f'{self}.all({by})', lambda: self.driver.find_elements(*by)), self.config) # --- High Level Commands--- # def open(self, relative_or_absolute_url: str) -> Browser: width = self.config.window_width height = self.config.window_height if width and height: self.driver.set_window_size(int(width), int(height)) is_absolute
import json from unittest import TestCase from conda.common.compat import on_win from conda import text_type from conda._vendor.auxlib.ish import dals from conda.base.context import reset_context, context from conda.common.io import captured, env_var, replace_log_streams from conda.exceptions import CommandNotFoundError, FileNotFoundError, CondaHTTPError, CondaKeyError, \ CondaRevisionError, DirectoryNotFoundError, MD5MismatchError, PackageNotFoundError, TooFewArgumentsError, \ TooManyArgumentsError, conda_exception_handler, BasicClobberError, KnownPackageClobberError, \ UnknownPackageClobberError, SharedLinkPathClobberError, BinaryPrefixReplacementError, BinaryPrefixReplacementError def _raise_helper(exception): raise exception class ExceptionTests(TestCase): def test_TooManyArgumentsError(self): expected = 2 received = 5 offending_arguments = "groot" exc = TooManyArgumentsError(expected, received, offending_arguments) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.TooManyArgumentsError'>" assert json_obj['exception_name'] == 'TooManyArgumentsError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) assert json_obj['expected'] == 2 assert json_obj['received'] == 5 assert json_obj['offending_arguments'] == "groot" with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "TooManyArgumentsError: Got 5 arguments (g, r, o, o, t) but expected 2." def test_TooFewArgumentsError(self): expected = 5 received = 2 exc = TooFewArgumentsError(expected, received) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.TooFewArgumentsError'>" assert json_obj['exception_name'] == 'TooFewArgumentsError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) assert json_obj['expected'] == 5 assert json_obj['received'] == 2 with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "TooFewArgumentsError: Got 2 arguments but expected 5." def test_BasicClobberError(self): source_path = "some/path/on/goodwin.ave" target_path = "some/path/to/wright.st" exc = BasicClobberError(source_path, target_path, context) with env_var("CONDA_PATH_CONFLICT", "prevent", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == dals(""" ClobberError: Conda was asked to clobber an existing path. source path: some/path/on/goodwin.ave target path: some/path/to/wright.st """).strip() def test_KnownPackageClobberError(self): target_path = "some/where/on/goodwin.ave" colliding_dist_being_linked = "Groot" colliding_linked_dist = "Liquid" exc = KnownPackageClobberError(target_path, colliding_dist_being_linked, colliding_linked_dist, context) with env_var("CONDA_PATH_CONFLICT", "prevent", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == dals(""" ClobberError: The package 'Groot' cannot be installed due to a path collision for 'some/where/on/goodwin.ave'. This path already exists in the target prefix, and it won't be removed by an uninstall action in this transaction. The path appears to be coming from the package 'Liquid', which is already installed in the prefix. """).strip() def test_UnknownPackageClobberError(self): target_path = "siebel/center/for/c.s" colliding_dist_being_linked = "Groot" exc = UnknownPackageClobberError(target_path, colliding_dist_being_linked, context) with env_var("CONDA_PATH_CONFLICT", "prevent", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == dals(""" ClobberError: The package 'Groot' cannot be installed due to a path collision for 'siebel/center/for/c.s'. This path already exists in the target prefix, and it won't be removed by an uninstall action in this transaction. The path is one that conda doesn't recognize. It may have been created by another package manager. """).strip() def test_SharedLinkPathClobberError(self): target_path = "some/where/in/shampoo/banana" incompatible_package_dists = "Groot" exc = SharedLinkPathClobberError(target_path, incompatible_package_dists, context) with env_var("CONDA_PATH_CONFLICT", "prevent", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == dals(""" ClobberError: This transaction has incompatible packages due to a shared path. packages: G, r, o, o, t path: 'some/where/in/shampoo/banana' """).strip() def test_CondaFileNotFoundError(self): filename = "Groot" exc = FileNotFoundError(filename) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.FileNotFoundError'>" assert json_obj['exception_name'] == 'FileNotFoundError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "FileNotFoundError: Groot" def test_DirectoryNotFoundError(self): directory = "Groot" exc = DirectoryNotFoundError(directory) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.DirectoryNotFoundError'>" assert json_obj['exception_name'] == 'DirectoryNotFoundError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) assert json_obj['path'] == "Groot" with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "DirectoryNotFoundError: Groot" def test_MD5MismatchError(self): url = "https://download.url/path/to/file.tar.bz2" target_full_path = "/some/path/on/disk/another-name.tar.bz2" expected_md5sum = "abc123" actual_md5sum = "deadbeef" exc = MD5MismatchError(url, target_full_path, expected_md5sum, actual_md5sum) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.MD5MismatchError'>" assert json_obj['exception_name'] == 'MD5MismatchError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) assert json_obj['url'] == url assert json_obj['target_full_path'] == target_full_path assert json_obj['expected_md5sum'] == expected_md5sum assert json_obj['actual_md5sum'] == actual_md5sum with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == dals(""" MD5MismatchError: Conda detected a mismatch between the expected content and downloaded content for url 'https://download.url/path/to/file.tar.bz2'. download saved to: /some/path/on/disk/another-name.tar.bz2 expected md5 sum: abc123 actual md5 sum: deadbeef """).strip() def PackageNotFoundError(self): package = "Groot" exc = PackageNotFoundError(package) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.PackageNotFoundError'>" assert json_obj['message'] == text_type(exc) assert json_obj['package_name'] == package assert json_obj['error'] == repr(exc) with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "Package not found: Conda could not find Groot" def test_CondaRevisionError(self): message = "Groot" exc = CondaRevisionError(message) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.CondaRevisionError'>" assert json_obj['exception_name'] == 'CondaRevisionError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "CondaRevisionError: Groot." def test_CondaKeyError(self): key = "Groot" message = "Groot is not a key." exc = CondaKeyError(key, message) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.CondaKeyError'>" assert json_obj['exception_name'] == 'CondaKeyError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) assert json_obj['key'] == "Groot" with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "CondaKeyError: 'Groot': Groot is not a key." def test_CondaHTTPError(self): msg = "Groot" url = "https://download.url/path/to/groot.tar.gz" status_code = "Groot" reason = "COULD NOT CONNECT" elapsed_time = 1.24 exc = CondaHTTPError(msg, url, status_code, reason, elapsed_time) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.CondaHTTPError'>" assert json_obj['exception_name'] == 'CondaHTTPError' assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) assert json_obj['url'] == url assert json_obj['status_code'] == status_code assert json_obj['reason'] == reason assert json_obj['elapsed_time'] == elapsed_time with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == dals(""" CondaHTTPError: HTTP Groot COULD NOT CONNECT for url <https://download.url/path/to/groot.tar.gz> Elapsed: 1.24 Groot """).strip() def test_CommandNotFoundError_simple(self): cmd = "instate" exc = CommandNotFoundError(cmd) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.CommandNotFoundError'>" assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == "CommandNotFoundError: 'instate'" def test_CommandNotFoundError_conda_build(self): cmd = "build" exc = CommandNotFoundError(cmd) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.CommandNotFoundError'>" assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout assert c.stderr.strip() == ("CommandNotFoundError: You need to install conda-build in order to\n" \ "use the 'conda build' command.") def test_CommandNotFoundError_activate(self): cmd = "activate" exc = CommandNotFoundError(cmd) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) json_obj = json.loads(c.stdout) assert not c.stderr assert json_obj['exception_type'] == "<class 'conda.exceptions.CommandNotFoundError'>" assert json_obj['message'] == text_type(exc) assert json_obj['error'] == repr(exc) with env_var("CONDA_JSON", "no", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc) assert not c.stdout if on_win: message = "CommandNotFoundError: 'activate'" else: message = ("CommandNotFoundError: 'activate is not a conda command.\n" "Did you mean 'source activate'?") assert c.stderr.strip() == message def test_BinaryPrefixReplacementError(self): new_data_length = 1104 original_data_length = 1404 new_prefix = "some/where/on/goodwin.ave" path = "some/where/by/boneyard/creek" placeholder = "save/my/spot/in/374" exc = BinaryPrefixReplacementError(path, placeholder, new_prefix, original_data_length, new_data_length) with env_var("CONDA_JSON", "yes", reset_context): with captured() as c, replace_log_streams(): conda_exception_handler(_raise_helper, exc)
''' adapted from https://github.com/all-umass/ManifoldWarping ''' import numpy as np import scipy as sp import sys import time import scipy.spatial.distance as sd from sklearn.metrics.pairwise import euclidean_distances, pairwise_distances from sklearn.manifold import Isomap,LocallyLinearEmbedding import pandas as pd try: # Python 2 from itertools import izip except ImportError: # Python 3 izip = zip from itertools import product from functools import reduce import random from matplotlib import pyplot ''' Inter-data correspondences ''' class Correspondence(object): def __init__(self, pairs=None, matrix=None): assert pairs is not None or matrix is not None, \ 'Must provide either pairwise or matrix correspondences' self._pairs = pairs self._matrix = matrix def pairs(self): if self._pairs is None: self._pairs = np.vstack(np.nonzero(self._matrix)).T return self._pairs def matrix(self): if self._matrix is None: self._matrix = np.zeros(self._pairs.max(axis=0)+1) for i in self._pairs: self._matrix[i[0],i[1]] = 1 return self._matrix def dist_from(self, other): '''Calculates the warping path distance from this correspondence to another. Based on the implementation from CTW.''' B1, B2 = self._bound_row(), other._bound_row() gap0 = np.abs(B1[:-1,1] - B2[:-1,1]) gap1 = np.abs(B1[1:,0] - B2[1:,0]) d = gap0.sum() + (gap0!=gap1).sum()/2.0 return d / float(self.pairs()[-1,0]*other.pairs()[-1,0]) def warp(self, A, XtoY=True): '''Warps points in A by pairwise correspondence''' P = self.pairs() if not XtoY: P = np.fliplr(P) warp_inds = np.zeros(A.shape[0],dtype=np.int) j = 0 for i in range(A.shape[0]): while P[j,0] < i: j += 1 warp_inds[i] = P[j,1] return A[warp_inds] def _bound_row(self): P = self.pairs() n = P.shape[0] B = np.zeros((P[-1,0]+1,2),dtype=np.int) head = 0 while head < n: i = P[head,0] tail = head+1 while tail < n and P[tail,0] == i: tail += 1 B[i,:] = P[(head,tail-1),1] head = tail return B '''Distance functions, grouped by metric.''' class Metric(object): def __init__(self,dist,name): self.dist = dist # dist(x,y): distance between two points self.name = name def within(self,A): '''pairwise distances between each pair of rows in A''' return sd.squareform(sd.pdist(A,self.name),force='tomatrix') def between(self,A,B): '''cartesian product distances between pairs of rows in A and B''' return sd.cdist(A,B,self.name) def pairwise(self,A,B): '''distances between pairs of rows in A and B''' return np.array([self.dist(a,b) for a,b in izip(A,B)]) class SparseL2Metric(Metric): '''scipy.spatial.distance functions don't support sparse inputs, so we have a separate SparseL2 metric for dealing with them''' def __init__(self): Metric.__init__(self, euclidean_distances, 'sparseL2') def within(self, A): return euclidean_distances(A,A) def between(self,A,B): return euclidean_distances(A,B) def pairwise(self,A,B): '''distances between pairs of rows in A and B''' return Metric.pairwise(self, A, B).flatten() # commonly-used metrics L1 = Metric(sd.cityblock,'cityblock') L2 = Metric(sd.euclidean,'euclidean') SquaredL2 = Metric(sd.sqeuclidean,'sqeuclidean') SparseL2 = SparseL2Metric() def dtw(X, Y, metric=SquaredL2, debug=False): '''Dynamic Time Warping''' dist = metric.between(X,Y) if debug: path = _python_dtw_path(dist) else: path = _dtw_path(dist) return Correspondence(pairs=path) def _python_dtw_path(dist): '''Pure python, slow version of DTW''' nx,ny = dist.shape cost = np.zeros(dist.shape) trace = np.zeros(dist.shape,dtype=np.int) cost[0,:] = np.cumsum(dist[0,:]) cost[:,0] = np.cumsum(dist[:,0]) trace[0,:] = 1 trace[:,0] = 0 for i,j in product(range(1,nx),range(1,ny)): choices = dist[i,j] + np.array((cost[i-1,j], cost[i,j-1], cost[i-1,j-1])) trace[i,j] = choices.argmin() cost[i,j] = choices.min() path = [(nx-1,ny-1)] while not (i == 0 and j == 0): s = trace[i,j] if s == 0: i -= 1 elif s == 1: j -= 1 else: i -= 1 j -= 1 path.append((i,j)) return np.array(path)[::-1] # Shenanigans for running the fast C version of DTW, # but falling back to pure python if needed try: from scipy.weave import inline from scipy.weave.converters import blitz except ImportError: _dtw_path = _python_dtw_path else: def _dtw_path(dist): '''Fast DTW, with inlined C''' nx,ny = dist.shape path = np.zeros((nx+ny,2),dtype=np.int) code = ''' int i,j; double* cost = new double[ny]; cost[0] = dist(0,0); for (j=1; j<ny; ++j) cost[j] = dist(0,j) + cost[j-1]; char** trace = new char*[nx]; for (i=0; i<nx; ++i) { trace[i] = new char[ny]; trace[i][0] = 0; } for (j=0; j<ny; ++j) { trace[0][j] = 1; } double diag,c; for (i=1; i<nx; ++i){ diag = cost[0]; cost[0] += dist(i,0); for (j=1; j<ny; ++j){ // c <- min(cost[j],cost[j-1],diag), trace <- argmin if (diag < cost[j]){ if (diag < cost[j-1]){ c = diag; trace[i][j] = 2; } else { c = cost[j-1]; trace[i][j] = 1; } } else if (cost[j] < cost[j-1]){ c = cost[j]; trace[i][j] = 0; } else { c = cost[j-1]; trace[i][j] = 1; } diag = cost[j]; cost[j] = dist(i,j) + c; } } delete[] cost; i = nx-1; j = ny-1; int p = nx+ny-1; for (;p>=0; --p){ path(p,0) = i; path(p,1) = j; if (i==0 && j==0) break; switch (trace[i][j]){ case 0: --i; break; case 1: --j; break; default: --i; --j; } } for (i=0; i<nx; ++i) delete[] trace[i]; delete[] trace; return_val = p; ''' p = inline(code,('nx','ny','dist','path'),type_converters=blitz) return path[p:] ''' miscellaneous utilities ''' def pairwise_error(A,B,metric=L2): ''' sum of distances between points in A and B, normalized ''' return metric.pairwise(A/A.max(),B/B.max()).sum() def block_antidiag(*args): ''' makes a block anti-diagonal matrix from the block matices given ''' return np.fliplr(sp.linalg.block_diag(*map(np.fliplr,args))) class Timer(object): '''Context manager for simple timing of code: with Timer('test 1'): do_test1() ''' def __init__(self, name, out=sys.stdout): self.name = name self.out = out def __enter__(self): self.start = time.time() def __exit__(self,*args): self.out.write("%s : %0.3f seconds\n" % (self.name, time.time()-self.start)) return False def neighbor_graph(X, metric=SquaredL2, k=None, epsilon=None, symmetrize=True): '''Construct an adj matrix from a matrix of points (one per row)''' assert (k is None) ^ (epsilon is None), "Must provide `k` xor `epsilon`" dist = metric.within(X) adj = np.zeros(dist.shape) # TODO: scipy.sparse support, or at least use a smaller dtype if k is not None: # do k-nearest neighbors nn = np.argsort(dist)[:,:min(k+1,len(X))] # nn's first column is the point idx, rest are neighbor idxs if symmetrize: for inds in nn: adj[inds[0],inds[1:]] = 1 adj[inds[1:],inds[0]] = 1 else: for inds in nn: adj[inds[0],inds[1:]] = 1 else: # do epsilon-ball p_idxs, n_idxs = np.nonzero(dist<=epsilon) for p_idx, n_idx in zip(p_idxs, n_idxs): if p_idx != n_idx: # ignore self-neighbor connections adj[p_idx,n_idx] = 1 # epsilon-ball is typically symmetric, assuming a normal distance metric return adj def laplacian(W, normed=False, return_diag=False): '''Same as the dense laplacian from scipy.sparse.csgraph''' n_nodes = W.shape[0] lap = -np.asarray(W) # minus sign leads to a copy # set diagonal to zero, in case it isn't already lap.flat[::n_nodes + 1] = 0 d = -lap.sum(axis=0) # re-negate to get positive degrees if normed: d = np.sqrt(d) d_zeros = (d == 0) d[d_zeros] = 1 # avoid div by zero # follow is the same as: diag(1/d) x W x diag(1/d) (where x is np.dot) lap /= d lap /= d[:, np.newaxis] lap.flat[::n_nodes + 1] = 1 - d_zeros else: # put the degrees on the diagonal lap.flat[::n_nodes + 1] = d if return_diag: return lap, d return lap def lapeig(W=None, L=None, num_vecs=None, return_vals=False): tmp_L = (L is None) # we can overwrite L if it's a tmp variable if L is None: L = laplacian(W, normed=True) vals,vecs = sp.linalg.eigh(L, overwrite_a=tmp_L) # assumes L is symmetric! # not guaranteed to be in sorted order idx = np.argsort(vals) vecs = vecs.real[:,idx] vals = vals.real[idx] # discard any with really small eigenvalues for i in xrange(vals.shape[0]): if vals[i] >= 1e-8: break if num_vecs is None: # take all of them num_vecs = vals.shape[0] - i embedding = vecs[:,i:i+num_vecs] if return_vals: return embedding, vals[i:i+num_vecs] return embedding def lapeig_linear(X=None,W=None,L=None,num_vecs=None,k=None,eball=None): if L is None: if W is None: W = neighbor_graph(X, k=k, epsilon=eball) L = laplacian(W) u,s,_ = np.linalg.svd(np.dot(X.T,X)) Fplus = np.linalg.pinv(np.dot(u,np.diag(np.sqrt(s)))) T = reduce(np.dot,(Fplus,X.T,L,X,Fplus.T)) L = 0.5*(T+T.T) return lapeig(L=L,num_vecs=num_vecs) def isomap(X=None,W=None,num_vecs=None,k=None): embedder = Isomap(n_neighbors=k, n_components=num_vecs) return embedder.fit_transform(X) def lle(X=None,W=None,num_vecs=None,k=None): embedder = LocallyLinearEmbedding(n_neighbors=k, n_components=num_vecs) return embedder.fit_transform(X) def slow_features(X=None,num_vecs=None): assert X.shape[0] >= 2, 'Must have at least 2 points to compute derivative' t_cov = np.cov(X, rowvar=False) # variables are over columns dXdt = np.diff(X, axis=0) dt_cov = np.cov(dXdt, rowvar=False) if num_vecs is not None: num_vecs = (0,num_vecs-1) vals, vecs = sp.linalg.eigh(dt_cov, t_cov, eigvals=num_vecs, overwrite_a=True, overwrite_b=True) return vecs ''' Alignment techniques ''' def _manifold_setup(Wx,Wy,Wxy,mu): Wxy = mu * (Wx.sum() + Wy.sum()) / (2 * Wxy.sum()) * Wxy W = np.asarray(np.bmat(((Wx,Wxy),(Wxy.T,Wy)))) return laplacian(W) def _manifold_decompose(L,d1,d2,num_dims,eps,vec_func=None): vals,vecs = np.linalg.eig(L) idx = np.argsort(vals) for i in range(len(idx)): if vals[idx[i]] >= eps: break vecs = vecs.real[:,idx[i:]] if vec_func: vecs = vec_func(vecs) for i in range(vecs.shape[1]): vecs[:,i] /= np.linalg.norm(vecs[:,i]) map1 = vecs[:d1,:num_dims] map2 = vecs[d1:d1+d2,:num_dims] return map1,map2 def _linear_decompose(X,Y,L,num_dims,eps): Z = sp.linalg.block_diag(X.T,Y.T) u,s,_ = np.linalg.svd(np.dot(Z,Z.T)) Fplus = np.linalg.pinv(np.dot(u,np.diag(np.sqrt(s)))) T = reduce(np.dot,(Fplus,Z,L,Z.T,Fplus.T)) L = 0.5*(T+T.T) d1,d2 = X.shape[1],Y.shape[1] return _manifold_decompose(L,d1,d2,num_dims,eps,lambda v: np.dot(Fplus.T,v)) class LinearAlignment(object):
= False) ncutSizer.AddWidget(self.usencut) self.ncut = EcceSpinCtrl(self, hardRange = "[0..)", name = "ES.Theory.NWPW.EwaldNcut", default = 1, label = "") ncutSizer.AddWidget(self.ncut) nwpwRightSizer.AddWidget(ncutSizer) self.mapping = EcceSpinCtrl(self, hardRange = "[1..2]", name = "ES.Theory.NWPW.mapping", default = 1, label = "Parallel Mapping") nwpwLeftSizer.AddWidget(self.mapping) self.mulliken = EcceComboBox(self, choices = ["None","LCAO","Kawai"], name = "ES.Theory.NWPW.Mulliken", default = 0, label = "Mulliken:") nwpwRightSizer.AddWidget(self.mulliken) nwpwSizer.AddWidget(nwpwLeftSizer, flag = wx.ALL) nwpwSizer.AddWidget(nwpwRightSizer, flag = wx.ALL) self.panelSizer.Add(nwpwSizer) unitcellSizer = EcceBoxSizer(self, label = "Unit Cell", cols = 1) self.unitMenuSizer = EcceHBoxSizer() #Lattice support not in current NWChem 5.1.1 GDB 4/23/09 #cellChoice = ["None","SC","FCC","BCC", "LatticeVectors", "Lattice"] cellChoice = ["None","SC","FCC","BCC", "LatticeVectors"] self.cell = EcceComboBox(self, choices = cellChoice, name = "ES.Theory.NWPW.CellType", default = 0, label = "Cell Type:", export = 1) self.unitMenuSizer.AddWidget(self.cell) bcChoice = ["periodic","aperiodic"] self.bc = EcceComboBox(self, choices = bcChoice, name = "ES.Theory.NWPW.CellBoundary", default = 0, label = " Boundary Condition:") self.unitMenuSizer.AddWidget(self.bc) self.unitNonePanel = EcceSubPanel(self) unitNoneSizer = EcceVBoxSizer() noneSpacer = EcceSpacer(self.unitNonePanel, height = 81) unitNoneSizer.AddWidget(noneSpacer) self.unitNonePanel.SetSizer(unitNoneSizer) #lat input self.unitLPanel = EcceSubPanel(self) unitLSizer = EcceVBoxSizer() self.lat = EcceFloatInput(self.unitLPanel, default = 20.0, name = "ES.Theory.NWPW.CellL", hardRange = "[0..)", label = "L:") unitLSizer.AddWidget(self.lat) latSpacer = EcceSpacer(self.unitLPanel, height = 45) unitLSizer.AddWidget(latSpacer) self.unitLPanel.SetSizer(unitLSizer) #lattice vector input self.unitVectorPanel = EcceSubPanel(self) unitVectorSizer = EcceBoxSizer(self, cols = 3, style = 1) self.a1x = EcceFloatInput(self.unitVectorPanel, default = 20.0, name = "ES.Theory.NWPW.a1x", label = "a1:") unitVectorSizer.AddWidget(self.a1x) self.a1y = EcceFloatInput(self.unitVectorPanel, default = 0.0, name = "ES.Theory.NWPW.a1y", label = "") unitVectorSizer.AddWidget(self.a1y) self.a1z = EcceFloatInput(self.unitVectorPanel, default = 0.0, name = "ES.Theory.NWPW.a1z", label = "") unitVectorSizer.AddWidget(self.a1z) self.a2x = EcceFloatInput(self.unitVectorPanel, default = 0.0, name = "ES.Theory.NWPW.a2x", label = "a2:") unitVectorSizer.AddWidget(self.a2x) self.a2y = EcceFloatInput(self.unitVectorPanel, default = 20.0, name = "ES.Theory.NWPW.a2y", label = "") unitVectorSizer.AddWidget(self.a2y) self.a2z = EcceFloatInput(self.unitVectorPanel, default = 0.0, name = "ES.Theory.NWPW.a2z", label = "") unitVectorSizer.AddWidget(self.a2z) self.a3x = EcceFloatInput(self.unitVectorPanel, default = 0.0, name = "ES.Theory.NWPW.a3x", label = "a3:") unitVectorSizer.AddWidget(self.a3x) self.a3y = EcceFloatInput(self.unitVectorPanel, default = 0.0, name = "ES.Theory.NWPW.a3y", label = "") unitVectorSizer.AddWidget(self.a3y) self.a3z = EcceFloatInput(self.unitVectorPanel, default = 20.0, name = "ES.Theory.NWPW.a3z", label = "") unitVectorSizer.AddWidget(self.a3z) self.unitVectorPanel.SetSizer(unitVectorSizer) # lat_a, lat_b, ... input #Lattice support not in current NWChem 5.1.1 GDB 4/23/09 #self.unitLatticePanel = EcceSubPanel(self) #unitLatticeSizer = EcceBoxSizer(self, cols = 3, style = 1) #self.lat_a = EcceFloatInput(self.unitLatticePanel, # default = 20.0, # name = "ES.Theory.NWPW.lat_a", # hardRange = "[0..)", # label = "lat_a: ") #unitLatticeSizer.AddWidget(self.lat_a) #self.lat_b = EcceFloatInput(self.unitLatticePanel, # default = 20.0, # name = "ES.Theory.NWPW.lat_b", # hardRange = "[0..)", # label = "lat_b: ") #unitLatticeSizer.AddWidget(self.lat_b) #self.lat_c = EcceFloatInput(self.unitLatticePanel, # default = 20.0, # name = "ES.Theory.NWPW.lat_c", # hardRange = "[0..)", # label = "lat_c: ") #unitLatticeSizer.AddWidget(self.lat_c) #self.lat_alpha = EcceFloatInput(self.unitLatticePanel, # default = 20.0, # name = "ES.Theory.NWPW.lat_alpha", # hardRange = "[0..)", # label = "alpha:") #unitLatticeSizer.AddWidget(self.lat_alpha) #self.lat_beta = EcceFloatInput(self.unitLatticePanel, # default = 20.0, # name = "ES.Theory.NWPW.lat_beta", # hardRange = "[0..)", # label = "beta: ") #unitLatticeSizer.AddWidget(self.lat_beta) #self.lat_gamma = EcceFloatInput(self.unitLatticePanel, # default = 20.0, # name = "ES.Theory.NWPW.lat_gamma", # hardRange = "[0..)", # label = "gamma:") #unitLatticeSizer.AddWidget(self.lat_gamma) #latticeSpacer = EcceSpacer(self.unitLatticePanel, height = 9) #unitLatticeSizer.AddWidget(latticeSpacer) #self.unitLatticePanel.SetSizer(unitLatticeSizer) # ngrid input # Eric doesn't use this to generate an input file so I'm going to # comment it out for now until I hear back from him. GDB 4/22/09 #self.unitNgridPanel = EcceSubPanel(self) #unitNgridSizer = EcceHBoxSizer() #self.usengrid = EcceCheckBox(self.unitNgridPanel, # label = "ngrid:", # name = "ES.Theory.NWPW.UseNgrid", # default = False) #unitNgridSizer.AddWidget(self.usengrid) #self.ngrid1 = EcceSpinCtrl(self.unitNgridPanel, # hardRange = "[2..)", # name = "ES.Theory.NWPW.Ngrid1", # default = 32, # label = "") #unitNgridSizer.AddWidget(self.ngrid1) #self.ngrid2 = EcceSpinCtrl(self.unitNgridPanel, # hardRange = "[2..)", # name = "ES.Theory.NWPW.Ngrid2", # default = 32, # label = "") #unitNgridSizer.AddWidget(self.ngrid2) #self.ngrid3 = EcceSpinCtrl(self.unitNgridPanel, # hardRange = "[2..)", # name = "ES.Theory.NWPW.Ngrid3", # default = 32, # label = "") #unitNgridSizer.AddWidget(self.ngrid3) #self.unitNgridPanel.SetSizer(unitNgridSizer) unitcellSizer.AddWidget(self.unitMenuSizer, flag = wx.ALL) unitcellSizer.AddWidget(self.unitNonePanel, flag = wx.ALL) unitcellSizer.AddWidget(self.unitLPanel, flag = wx.ALL) unitcellSizer.AddWidget(self.unitVectorPanel, flag = wx.ALL) #unitcellSizer.AddWidget(self.unitLatticePanel, flag = wx.ALL) #unitcellSizer.AddWidget(self.unitNgridPanel, flag = wx.ALL) self.panelSizer.Add(unitcellSizer) # End Eric Bylaska's planewave module integration # THEORY OPTIONS DFT if EcceGlobals.Category == "DFT": dftSizer = EcceBoxSizer(self, label = "Theory Options - DFT", cols = 2) exchangeSizer = EcceLineLabelVBoxSizer(self, label = "Exchange-Correlation Functionals") xcFuncChoice = ["None", "HCTH (Gradient Corr.)", "HCTH 120 (Gradient Corr.)", "HCTH 147 (Gradient Corr.)", "HCTH 407 (Gradient Corr.)", "HCTHP 14 (Gradient Corr.)", "BLYP (hybrid)", # added by CAO "B1LYP (hybrid)", # added by CAO "B3LYP (hybrid)", "CAM-B3LYP (range)", # added by CAO "Adiabatic Conn. (hybrid)", "B3PW91 (hybrid)", # added by CAO "BP86 (hybrid)", # added by CAO "B3P86 (hybrid)", # added by CAO "X3LYP (hybrid)", # added by CAO "mPW1PW91 (hybrid)", # added by CAO "mPW1PBE (hybrid)", # added by CAO "mPW3PBE (hybrid)", # added by CAO "mPW1LYP (hybrid)", # added by CAO "TPSSh (hybrid)", # added by CAO "VSXC (hybrid)", # added by CAO "Gaussian B3 (hybrid)", # added by CAO "BOP (Gradient Corr.)",# added by CAO "PBEOP (Gradient Corr.)",# added by CAO "M05 (hybrid)",# added by CAO "M05-2X (hybrid)",# added by CAO "M06 (hybrid)",# added by CAO "M06-HF (hybrid)",# added by CAO "M06-2X (hybrid)",# added by CAO "M06-L (hybrid)",# added by CAO "Becke Half and Half (hybrid)", "Becke 1997 (hybrid)", "Becke 1997-1 (hybrid)", "Becke 1997-2 (hybrid)", "Becke 1997-GGA1 (Gradient Corr.)", "Becke 1998 (hybrid)", "PBE0 (hybrid)", "Mod. Perdew-Wang 1K (hybrid)", "BB1K (hybrid)", "Filatov-Thiel 1997 (Gradient Corr.)"] xcFuncDefault = 0 if EcceGlobals.ReactionStudyFlag != 0: xcFuncDefault =27 # CAO --changed from 15 to 37 to reflect added XCs elif os.environ.has_key("ECCE_NWCHEM_DFT_USE_B3LYP"): if os.environ["ECCE_NWCHEM_DFT_USE_B3LYP"] == "true": xcFuncDefault = 8 # CAO -- changed from 7 to 8 to reflect reordering self.xcFunc = EcceComboBox(self, choices = xcFuncChoice, name = "ES.Theory.DFT.XCFunctionals", default = xcFuncDefault, label = "Combined XC:") if xcFuncDefault != 0: self.xcFunc.export = 1 exchangeSizer.AddWidget(self.xcFunc) exFuncChoice = ["Slater (local)", "Becke88 (Gradient Corr.)", "Perdew 1991 (Gradient Corr.)", "PBE 1996 (Gradient Corr.)", "Gill 1996 (Gradient Corr.)", "Optimized Exchange (Gradient Corr.)", "Mod. Perdew-Wang 1991 (Gradient Corr.)", "Filatov-Thiel 1997 (Gradient Corr.)", # start -- added by CAO "M05 (Meta-GGA)", "M05-2X (Meta-GGA)", "M06 (Meta-GGA)", "M06-HF (Meta-GGA)", "M06-2X (Meta-GGA)", "M06-L (Meta-GGA)", "VSXC (Meta-GGA)", "rPBE (Hybr.)", "revPBE (Hybr.)", "PW6B95 (Hybr.)", "PWB6k (Hybr.)", "PKZB99 (Meta-GGA)", "TPSS03 (Meta-GGA)"] # end --added by CAO self.exFunc = EcceComboBox(self, choices = exFuncChoice, name = "ES.Theory.DFT.ExchangeFunctionals", default = 0, label = "Exchange:") exchangeSizer.AddWidget(self.exFunc) corrFuncChoice = ["VWN 1 (local)", "VWN 2 (local)", "VWN 3 (local)", "VWN 4 (local)", "VWN 5 (local)", "VWN 1/RPA (local)", "Perdew 1981 (local)", "Perdew-Wang 1991 (local)", "Perdew 1986 (Gradient Corr.)", "Lee-Yang-Parr (Gradient Corr.)", "Perdew 1991 (Gradient Corr.)", "PBE 1996 (Gradient Corr.)", "Filatov-Thiel 1997 (Gradient Corr.)" , #start -- added by CAO "M05 (Meta-GGA)", "M05-2X (Meta-GGA)", "M06 (Meta-GGA)", "M06-HF (Meta-GGA)", "M06-2X (Meta-GGA)", "M06-L (Meta-GGA)", "OP (Gradient Corr.)", "PKZB99 (Meta-GGA)", "TPSS03 (Meta-GGA)", "BC95 (Meta-GGA)", "PW6B95 (Meta-GGA)", "PWB6K (Meta-GGA)", "VSXC (Meta-GGA)"] #end -- added by CAO self.corrFunc = EcceComboBox(self, choices = corrFuncChoice, name = "ES.Theory.DFT.CorrelationFunctionals", default = 4, label = "Correlation:") exchangeSizer.AddWidget(self.corrFunc) self.coulomb = EcceExpInput(self, default = 1e-10, name = "ES.Theory.DFT.CoulombCutoff", hardRange = "[0..)", label = "Coulomb Screening Tolerance:") exchangeSizer.AddWidget(self.coulomb) gridSizer = EcceLineLabelVBoxSizer(self, label = "Grid Options") qualityChoice = ["Extra Coarse", "Coarse", "Medium", "Fine", "Extra Fine"] self.quality = EcceComboBox(self, choices = qualityChoice, name = "ES.Theory.DFT.GridDensity", default = 2, label = "Quality:") gridSizer.AddWidget(self.quality) angularChoice = ["Gauss-Legendre", "Lebedev"] self.angular = EcceComboBox(self, choices = angularChoice, name = "ES.Theory.DFT.GridAngular", default = 1, label = "Angular:") gridSizer.AddWidget(self.angular) partitionChoice = ["Becke", "Erf1", "SSF"] self.partition = EcceComboBox(self, choices = partitionChoice, name = "ES.Theory.DFT.GridPartition", default = 1, label = "Partition:") gridSizer.AddWidget(self.partition) radialChoice = ["Euler", "Mura", "Treutler"] self.radial = EcceComboBox(self, choices = radialChoice, name = "ES.Theory.DFT.GridRadial", default = 1, label = "Radial:") gridSizer.AddWidget(self.radial) dftSizer.AddWidget(exchangeSizer, flag=EcceGlobals.FlagDefault|wx.EXPAND) dftSizer.AddWidget(gridSizer, flag=EcceGlobals.FlagDefault|wx.EXPAND) self.panelSizer.Add(dftSizer) # FROZEN CORE OPTIONS if (EcceGlobals.Category == "MP" or EcceGlobals.Category == "CC"): fzSizer = EcceBoxSizer(self, label = "Frozen Core Options", cols = 2) self.fzTog = EcceCheckBox(self, label = " Use Frozen Core Orbitals", name = "ES.Theory.UseFrozenCores", default = True, export = 1) fzSizer.AddWidget(self.fzTog) self.fzSpin = EcceSpinCtrl(self, hardRange = "[0..%i]" % EcceGlobals.NumOccupiedOrbs, name = "ES.Theory.CorrelOrb.FrozenCoreValue", default = EcceGlobals.NumFrozenOrbs, label = "Freeze:", export = 1) fzSizer.AddWidget(self.fzSpin) self.panelSizer.Add(fzSizer) self.AddButtons() def CheckDependency(self): # Special logic for DirDyVTST task, as determined by overloading
<gh_stars>1-10 from fluiddb.cache.test.test_permission import CachingPermissionAPITestMixin from fluiddb.data.permission import Operation, Policy, getTagPermissions from fluiddb.data.system import createSystemData from fluiddb.data.user import Role from fluiddb.exceptions import FeatureError from fluiddb.model.exceptions import UnknownPathError from fluiddb.model.namespace import NamespaceAPI from fluiddb.model.permission import PermissionAPI from fluiddb.model.tag import TagAPI from fluiddb.model.test.test_permission import PermissionAPITestMixin from fluiddb.model.user import UserAPI, getUser from fluiddb.security.exceptions import PermissionDeniedError from fluiddb.security.permission import SecurePermissionAPI, checkPermissions from fluiddb.testing.basic import FluidinfoTestCase from fluiddb.testing.resources import ( BrokenCacheResource, CacheResource, ConfigResource, DatabaseResource, LoggingResource) class SecurePermissionAPITestMixin(object): def testGetWithUnknownPaths(self): """ L{SecurePermissionAPI.get} raises an L{UnknownPathError} if a path for unknown L{Namespace}s or L{Tag}s is specified. """ values = [(u'unknown/namespace', Operation.CREATE_NAMESPACE)] error = self.assertRaises(UnknownPathError, self.permissions.get, values) self.assertEqual([u'unknown/namespace'], error.paths) error = self.assertRaises(UnknownPathError, self.permissions.get, [(u'unknown/tag', Operation.UPDATE_TAG)]) self.assertEqual([u'unknown/tag'], error.paths) def testSetWithUnknownPaths(self): """ L{PermissionAPI.set} raises an L{UnknownPathError} if a path for unknown L{Namespace}s or L{Tag}s is specified. """ values = [(u'unknown/namespace', Operation.CREATE_NAMESPACE, Policy.OPEN, [])] error = self.assertRaises(UnknownPathError, self.permissions.set, values) self.assertEqual([u'unknown/namespace'], error.paths) values = [(u'unknown/tag', Operation.UPDATE_TAG, Policy.OPEN, [])] error = self.assertRaises(UnknownPathError, self.permissions.set, values) self.assertEqual([u'unknown/tag'], error.paths) class SecurePermissionAPITest(PermissionAPITestMixin, CachingPermissionAPITestMixin, SecurePermissionAPITestMixin, FluidinfoTestCase): resources = [('cache', CacheResource()), ('config', ConfigResource()), ('store', DatabaseResource())] def setUp(self): super(SecurePermissionAPITest, self).setUp() self.system = createSystemData() UserAPI().create([(u'username', u'password', u'User', u'<EMAIL>')]) self.user = getUser(u'username') self.permissions = SecurePermissionAPI(self.user) class SecurePermissionAPIWithBrokenCacheTest(PermissionAPITestMixin, SecurePermissionAPITestMixin, FluidinfoTestCase): resources = [('cache', BrokenCacheResource()), ('config', ConfigResource()), ('log', LoggingResource()), ('store', DatabaseResource())] def setUp(self): super(SecurePermissionAPIWithBrokenCacheTest, self).setUp() self.system = createSystemData() UserAPI().create([(u'username', u'password', u'User', u'<EMAIL>')]) self.user = getUser(u'username') self.permissions = SecurePermissionAPI(self.user) class SecurePermissionAPIWithAnonymousRoleTest(FluidinfoTestCase): resources = [('cache', CacheResource()), ('config', ConfigResource()), ('store', DatabaseResource())] def setUp(self): super(SecurePermissionAPIWithAnonymousRoleTest, self).setUp() system = createSystemData() user = system.users[u'anon'] self.permissions = SecurePermissionAPI(user) UserAPI().create([(u'username', u'password', u'User', u'<EMAIL>')]) user = getUser(u'username') TagAPI(user).create([(u'username/tag', u'description')]) def testGetNamespacePermissionsIsAlwaysDenied(self): """ L{SecurePermissionAPI.get} always denies access to get namespace permissions for the anonymous user. """ for operation in Operation.NAMESPACE_OPERATIONS: error = self.assertRaises(PermissionDeniedError, self.permissions.get, [(u'username', operation)]) self.assertEqual([(u'username', Operation.CONTROL_NAMESPACE)], sorted(error.pathsAndOperations)) def testGetTagPermissionsIsAlwaysDenied(self): """ L{SecurePermissionAPI.get} always denies access to get tag permissions for the anonymous user. """ for operation in [Operation.UPDATE_TAG, Operation.DELETE_TAG, Operation.CONTROL_TAG]: error = self.assertRaises(PermissionDeniedError, self.permissions.get, [(u'username/tag', operation)]) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG)], sorted(error.pathsAndOperations)) def testGetTagValuePermissionsIsAlwaysDenied(self): """ L{SecurePermissionAPI.get} always denies access to get tag value permissions for the anonymous user. """ for operation in [Operation.READ_TAG_VALUE, Operation.WRITE_TAG_VALUE, Operation.DELETE_TAG_VALUE, Operation.CONTROL_TAG_VALUE]: error = self.assertRaises(PermissionDeniedError, self.permissions.get, [(u'username/tag', operation)]) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG_VALUE)], sorted(error.pathsAndOperations)) def testSetNamespacePermissionsIsAlwaysDenied(self): """ L{SecurePermissionAPI.set} always denies changes to namespace permissions for the anonymous user. """ for operation in Operation.NAMESPACE_OPERATIONS: values = [(u'username', operation, Policy.OPEN, [])] error = self.assertRaises(PermissionDeniedError, self.permissions.set, values) self.assertEqual([(u'username', Operation.CONTROL_NAMESPACE)], sorted(error.pathsAndOperations)) def testSetTagPermissionsIsAlwaysDenied(self): """ L{SecurePermissionAPI.set} always denies changes to tag permissions for the anonymous user. """ for operation in [Operation.UPDATE_TAG, Operation.DELETE_TAG, Operation.CONTROL_TAG]: values = [(u'username/tag', operation, Policy.OPEN, [])] error = self.assertRaises(PermissionDeniedError, self.permissions.set, values) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG)], sorted(error.pathsAndOperations)) def testSetTagValuePermissionsIsAlwaysDenied(self): """ L{SecurePermissionAPI.set} always denies changes to tag permissions for the anonymous user. """ for operation in [Operation.READ_TAG_VALUE, Operation.WRITE_TAG_VALUE, Operation.DELETE_TAG_VALUE, Operation.CONTROL_TAG_VALUE]: values = [(u'username/tag', operation, Policy.OPEN, [])] error = self.assertRaises(PermissionDeniedError, self.permissions.set, values) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG_VALUE)], sorted(error.pathsAndOperations)) class SecurePermissionAPIWithNormalUserTest(FluidinfoTestCase): resources = [('cache', CacheResource()), ('config', ConfigResource()), ('store', DatabaseResource())] def setUp(self): super(SecurePermissionAPIWithNormalUserTest, self).setUp() createSystemData() UserAPI().create([(u'username', u'password', u'User', u'<EMAIL>')]) user = getUser(u'username') TagAPI(user).create([(u'username/tag', u'description')]) self.permissions = SecurePermissionAPI(user) def testGetNamespacePermissionsIsAllowed(self): """ Getting namespace permissions is allowed if the user has C{Operation.CONTROL_NAMESPACE} permissions. """ self.permissions.set([(u'username', Operation.CONTROL_NAMESPACE, Policy.OPEN, [])]) result = self.permissions.get([(u'username', Operation.CREATE_NAMESPACE)]) self.assertEqual(1, len(result)) def testGetNamespacePermissionsIsDenied(self): """ L{SecurePermissionAPI.set} should raise a L{PermissionDeniedError} if the user doesn't have C{Operation.CONTROL_NAMESPACE} permissions on the given path. """ self.permissions.set([(u'username', Operation.CONTROL_NAMESPACE, Policy.CLOSED, [])]) values = [(u'username', Operation.DELETE_NAMESPACE)] error = self.assertRaises(PermissionDeniedError, self.permissions.get, values) self.assertEqual([(u'username', Operation.CONTROL_NAMESPACE)], sorted(error.pathsAndOperations)) def testGetTagPermissionsIsAllowed(self): """ Getting tag permissions is allowed if the user has C{Operation.CONTROL_TAG} permissions. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG, Policy.OPEN, [])]) result = self.permissions.get([(u'username/tag', Operation.UPDATE_TAG)]) self.assertEqual(1, len(result)) def testGetTagPermissionsIsDenied(self): """ L{SecurePermissionAPI.set} should raise a L{PermissionDeniedError} if the user doesn't have C{Operation.CONTROL_TAG} permissions on the given path. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG, Policy.CLOSED, [])]) values = [(u'username/tag', Operation.DELETE_TAG)] error = self.assertRaises(PermissionDeniedError, self.permissions.get, values) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG)], sorted(error.pathsAndOperations)) def testGetTagValuePermissionsIsAllowed(self): """ Getting tag value permissions is allowed if the user has C{Operation.CONTROL_TAG_VALUE} permissions. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG_VALUE, Policy.OPEN, [])]) result = self.permissions.get([(u'username/tag', Operation.READ_TAG_VALUE)]) self.assertEqual(1, len(result)) def testGetTagValuePermissionsIsDenied(self): """ L{SecurePermissionAPI.get} should raise a L{PermissionDeniedError} if the user doesn't have C{Operation.CONTROL_TAG_VALUE} permissions on the given path. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG_VALUE, Policy.CLOSED, [])]) values = [(u'username/tag', Operation.WRITE_TAG_VALUE)] error = self.assertRaises(PermissionDeniedError, self.permissions.get, values) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG_VALUE)], sorted(error.pathsAndOperations)) def testSetNamespacePermissionsIsAllowed(self): """ Updating namespace permissions is allowed if the user has C{Operation.CONTROL_NAMESPACE} permissions. """ self.permissions.set([(u'username', Operation.CONTROL_NAMESPACE, Policy.OPEN, [])]) values = [(u'username', Operation.CREATE_NAMESPACE, Policy.CLOSED, [])] self.permissions.set(values) pathAndOperations = [(u'username', Operation.CREATE_NAMESPACE)] expected = { (u'username', Operation.CREATE_NAMESPACE): (Policy.CLOSED, [])} self.assertEqual(expected, self.permissions.get(pathAndOperations)) def testSetNamespacePermissionsIsDenied(self): """ L{SecurePermissionAPI.set} should raise a L{PermissionDeniedError} if the user doesn't have C{Operation.CONTROL_NAMESPACE} permissions on the given path. """ self.permissions.set([(u'username', Operation.CONTROL_NAMESPACE, Policy.CLOSED, [])]) values = [(u'username', Operation.DELETE_NAMESPACE, Policy.OPEN, [])] error = self.assertRaises(PermissionDeniedError, self.permissions.set, values) self.assertEqual([(u'username', Operation.CONTROL_NAMESPACE)], sorted(error.pathsAndOperations)) def testSetTagPermissionsIsAllowed(self): """ Updating tag permissions is allowed if the user has C{Operation.CONTROL_TAG} permissions. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG, Policy.OPEN, [])]) values = [(u'username/tag', Operation.UPDATE_TAG, Policy.CLOSED, [])] self.permissions.set(values) pathAndOperations = [(u'username/tag', Operation.UPDATE_TAG)] expected = { (u'username/tag', Operation.UPDATE_TAG): (Policy.CLOSED, [])} self.assertEqual(expected, self.permissions.get(pathAndOperations)) def testSetTagPermissionsIsDenied(self): """ L{SecurePermissionAPI.set} should raise a L{PermissionDeniedError} if the user doesn't have C{Operation.CONTROL_TAG} permissions on the given path. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG, Policy.CLOSED, [])]) values = [(u'username/tag', Operation.DELETE_TAG, Policy.OPEN, [])] error = self.assertRaises(PermissionDeniedError, self.permissions.set, values) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG)], sorted(error.pathsAndOperations)) def testSetTagValuePermissionsIsAllowed(self): """ Updating tag value permissions is allowed if the user has C{Operation.CONTROL_TAG_VALUE} permissions. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG_VALUE, Policy.OPEN, [])]) values = [(u'username/tag', Operation.READ_TAG_VALUE, Policy.CLOSED, [])] self.permissions.set(values) pathAndOperations = [(u'username/tag', Operation.READ_TAG_VALUE)] expected = { (u'username/tag', Operation.READ_TAG_VALUE): (Policy.CLOSED, [])} self.assertEqual(expected, self.permissions.get(pathAndOperations)) def testSetTagValuePermissionsIsDenied(self): """ L{SecurePermissionAPI.set} should raise a L{PermissionDeniedError} if the user doesn't have C{Operation.CONTROL_TAG_VALUE} permissions on the given path. """ self.permissions.set([(u'username/tag', Operation.CONTROL_TAG_VALUE, Policy.CLOSED, [])]) values = [(u'username/tag', Operation.WRITE_TAG_VALUE, Policy.CLOSED, [])] error = self.assertRaises(PermissionDeniedError, self.permissions.set, values) self.assertEqual([(u'username/tag', Operation.CONTROL_TAG_VALUE)], sorted(error.pathsAndOperations)) class SecurePermissionAPIWithSuperUserRoleTest(FluidinfoTestCase): resources = [('cache', CacheResource()), ('config', ConfigResource()), ('store', DatabaseResource())] def setUp(self): super(SecurePermissionAPIWithSuperUserRoleTest, self).setUp() system = createSystemData() user = system.users[u'fluiddb'] self.permissions = SecurePermissionAPI(user) TagAPI(user).create([(u'fluiddb/tag', u'description')]) def testGetNamespacePermissionsIsAlwaysAllowed(self): """ Getting namespace permissions is always allowed for the superuser. """ self.permissions.set([(u'fluiddb', Operation.CONTROL_NAMESPACE, Policy.CLOSED, [])]) result = self.permissions.get([(u'fluiddb', Operation.CREATE_NAMESPACE)]) self.assertEqual(1, len(result)) def testGetTagPermissionsIsAllowed(self): """ Getting tag permissions is always allowed for the superuser. """ self.permissions.set([(u'fluiddb/tag', Operation.CONTROL_TAG, Policy.CLOSED, [])]) result = self.permissions.get([(u'fluiddb/tag', Operation.UPDATE_TAG)]) self.assertEqual(1, len(result)) def testGetTagValuePermissionsIsAllowed(self): """ Getting tag-value permissions is always allowed for the superuser. """ self.permissions.set([(u'fluiddb/tag', Operation.CONTROL_TAG_VALUE, Policy.CLOSED, [])]) result = self.permissions.get([(u'fluiddb/tag', Operation.READ_TAG_VALUE)]) self.assertEqual(1, len(result)) def testSetNamespacePermissionsIsAlwaysAllowed(self): """ Updating namespace permissions is always allowed for the superuser. """ self.permissions.set([(u'fluiddb', Operation.CONTROL_NAMESPACE, Policy.CLOSED, [])]) values = [(u'fluiddb', Operation.CREATE_NAMESPACE, Policy.OPEN, [])] self.permissions.set(values) pathAndOperations = [(u'fluiddb', Operation.CREATE_NAMESPACE)] expected = { (u'fluiddb', Operation.CREATE_NAMESPACE): (Policy.OPEN, [])} self.assertEqual(expected, self.permissions.get(pathAndOperations)) def testSetTagPermissionsIsAllowed(self): """ Updating tag permissions is always allowed for the superuser. """ self.permissions.set([(u'fluiddb/tag', Operation.CONTROL_TAG, Policy.CLOSED, [])]) values = [(u'fluiddb/tag', Operation.UPDATE_TAG, Policy.OPEN, [])] self.permissions.set(values) pathAndOperations = [(u'fluiddb/tag', Operation.UPDATE_TAG)] expected = { (u'fluiddb/tag', Operation.UPDATE_TAG): (Policy.OPEN, [])} self.assertEqual(expected, self.permissions.get(pathAndOperations)) def testSetTagValuePermissionsIsAllowed(self): """ Updating tag-value permissions is always allowed for the superuser. """ self.permissions.set([(u'fluiddb/tag', Operation.CONTROL_TAG_VALUE, Policy.CLOSED, [])]) values = [(u'fluiddb/tag', Operation.READ_TAG_VALUE, Policy.OPEN, [])] self.permissions.set(values) pathAndOperations = [(u'fluiddb/tag', Operation.READ_TAG_VALUE)] expected = { (u'fluiddb/tag', Operation.READ_TAG_VALUE): (Policy.OPEN, [])} self.assertEqual(expected, self.permissions.get(pathAndOperations)) class CheckPermissionsTestMixin(object): def testCheckRaisesFeatureErrorIfValuesIsEmpty(self): """ L{checkPermissions} returns an empty list of values if a list of values is empty. """ self.assertEqual([], checkPermissions(self.user, [])) def testCheckRaisesFeatureErrorIfPathIsNone(self): """ L{checkPermissions} raises L{FeatureError} if one of the given paths is None. """ values = [(None, Operation.WRITE_TAG_VALUE)] self.assertRaises(FeatureError, checkPermissions, self.user, values) def testCheckRaisesFeatureErrorIfOperationIsInvalid(self): """ L{checkPermissions} raises L{FeatureError} if one of the given operations is invalid. """ values = [(u'username', None)] self.assertRaises(FeatureError, checkPermissions, self.user, values) class UserPermissionCheckerTest(CheckPermissionsTestMixin, FluidinfoTestCase): resources = [('config', ConfigResource()), ('cache', CacheResource()), ('store', DatabaseResource())] def setUp(self): super(UserPermissionCheckerTest, self).setUp() self.system = createSystemData() UserAPI().create([(u'username', u'password', u'User', u'<EMAIL>')]) self.user = getUser(u'username') self.permissions = PermissionAPI(self.user) def testCheckOpenPermission(self): """ L{checkPermissions} grants access when the policy is C{Policy.OPEN} and the L{User.id} is not in the exceptions list. """ TagAPI(self.user).create([(u'username/tag', u'description')]) self.permissions.set([(u'username/tag', Operation.UPDATE_TAG, Policy.OPEN, [])]) values = [(u'username/tag', Operation.UPDATE_TAG)] deniedOperations = checkPermissions(self.user, values) self.assertEqual([], deniedOperations) def testCheckOpenPermissionWithException(self): """ L{checkPermissions} denies access when the policy is C{Policy.OPEN} and the L{User.id} is in the exceptions list. """ TagAPI(self.user).create([(u'username/tag', u'description')]) self.permissions.set([(u'username/tag', Operation.UPDATE_TAG, Policy.OPEN, [u'username'])]) values = [(u'username/tag', Operation.UPDATE_TAG)] deniedOperations = checkPermissions(self.user, values) self.assertEqual([(u'username/tag', Operation.UPDATE_TAG)], list(deniedOperations)) def testCheckClosedPermission(self): """ L{checkPermissions} denies access when the policy is C{Policy.CLOSED} and the L{User.id} is not in the exceptions list. """ TagAPI(self.user).create([(u'username/tag', u'description')]) self.permissions.set([(u'username/tag', Operation.UPDATE_TAG, Policy.CLOSED, [])]) values = [(u'username/tag', Operation.UPDATE_TAG)] deniedOperations = checkPermissions(self.user, values) self.assertEqual([(u'username/tag', Operation.UPDATE_TAG)], deniedOperations) def testCheckClosedPermissionWithException(self): """
= 0 last_time = None for job in non_covered_jobs: if job.state == consts.JOB_STATE_EXECUTING_FINISHED: jobs_processing += 1 elif job.state == consts.JOB_STATE_ASSIGNED: jobs_executing += 1 elif job.state != consts.JOB_STATE_COMPLETED: jobs_waiting += 1 elif job.state == consts.JOB_STATE_COMPLETED: jobs_completed += 1 if last_time is None or (job.completed and job.completed > last_time): last_time = job.completed if job.completion_status not in [consts.JOB_CMPLT_ALL_OK, None]: jobs_error += 1 if jobs_total == jobs_completed and last_time: duration = last_time - self.created else: duration = utils.utcnow() - self.created data = dict(id=self.id, label=self.label, created=self.created.strftime("%Y-%m-%dT%H:%M:%SZ") if self.created else None, deleted=self.deleted.strftime("%Y-%m-%dT%H:%M:%SZ") if self.deleted else None, started=self.started.strftime("%Y-%m-%dT%H:%M:%SZ") if self.started else None, finished=self.finished.strftime("%Y-%m-%dT%H:%M:%SZ") if self.finished else None, duration=duration_to_txt(duration), state=consts.RUN_STATES_NAME[self.state], stage_name=self.stage.name, stage_id=self.stage_id, flow_id=self.flow_id, flow_kind='ci' if self.flow.kind == 0 else 'dev', args=self.args, jobs_total=jobs_total, jobs_waiting=jobs_waiting, jobs_executing=jobs_executing, jobs_processing=jobs_processing, jobs_error=jobs_error, tests_total=self.tests_total, tests_passed=self.tests_passed, tests_not_run=self.tests_not_run, issues_total=self.issues_total, issues_new=self.issues_new, new_cnt=self.new_cnt, no_change_cnt=self.no_change_cnt, regr_cnt=self.regr_cnt, fix_cnt=self.fix_cnt, repo_data=self.repo_data.data if self.repo_data else None, reason=self.reason['reason'], note=self.note) if with_project: data['project_id'] = self.flow.branch.project_id data['project_name'] = self.flow.branch.project.name if with_branch: data['branch_id'] = self.flow.branch_id data['branch_name'] = self.flow.branch.name if with_artifacts: data['artifacts_total'] = len(self.artifacts_files) infix = 'r/%d' % self.id data['report_entries'] = _get_report_entries(self.artifacts, infix) return data class Step(db.Model, DatesMixin): __tablename__ = "steps" id = Column(Integer, primary_key=True) index = Column(Integer, nullable=False) job_id = Column(Integer, ForeignKey('jobs.id'), nullable=False) job = relationship("Job", back_populates="steps") tool_id = Column(Integer, ForeignKey('tools.id'), nullable=False) tool = relationship("Tool", back_populates="steps") fields = Column(JSONB, nullable=False) result = Column(JSONB) status = Column(Integer) # services def get_json(self): data = dict(id=self.id, index=self.index, tool=self.tool.name, tool_id=self.tool_id, job_id=self.job_id, status=self.status, result=self.result) for f, v in self.fields.items(): data[f] = v return data class Job(db.Model, DatesMixin): __tablename__ = "jobs" id = Column(Integer, primary_key=True) name = Column(UnicodeText) assigned = Column(DateTime(timezone=True)) started = Column(DateTime(timezone=True)) finished = Column(DateTime(timezone=True)) # time when agent reported that job is finished processing_started = Column(DateTime(timezone=True)) # TODO: this is never used completed = Column(DateTime(timezone=True)) run_id = Column(Integer, ForeignKey('runs.id'), nullable=False) run = relationship("Run", back_populates="jobs") steps = relationship("Step", back_populates="job", order_by="Step.index") state = Column(Integer, default=consts.JOB_STATE_QUEUED) completion_status = Column(Integer) covered = Column(Boolean, default=False) notes = Column(UnicodeText) agent = relationship('Agent', uselist=False, back_populates="job", foreign_keys="Agent.job_id", post_update=True) agent_used_id = Column(Integer, ForeignKey('agents.id')) agent_used = relationship('Agent', foreign_keys=[agent_used_id], post_update=True) agents_group_id = Column(Integer, ForeignKey('agents_groups.id'), nullable=False) agents_group = relationship('AgentsGroup', back_populates="jobs") timeout = Column(Integer) system_id = Column(Integer, ForeignKey('systems.id', name='fk_systems_jobs'), nullable=False) # match name fk_systems_jobs with name in alembic migration system = relationship('System', back_populates="jobs") results = relationship('TestCaseResult', back_populates="job") issues = relationship('Issue', back_populates="job") def get_json(self): if self.started: if self.finished: duration = self.finished - self.started else: duration = utils.utcnow() - self.started duration = duration_to_txt(duration) else: duration = '' return dict(id=self.id, created=self.created.strftime("%Y-%m-%dT%H:%M:%SZ") if self.created else None, deleted=self.deleted.strftime("%Y-%m-%dT%H:%M:%SZ") if self.deleted else None, started=self.started.strftime("%Y-%m-%dT%H:%M:%SZ") if self.started else None, finished=self.finished.strftime("%Y-%m-%dT%H:%M:%SZ") if self.finished else None, completed=self.completed.strftime("%Y-%m-%dT%H:%M:%SZ") if self.completed else None, # processing_started=self.processing_started.strftime( # "%Y-%m-%dT%H:%M:%SZ") if self.processing_started else None, duration=duration, name=self.name, state=self.state, completion_status=self.completion_status, timeout=self.timeout, covered=self.covered, notes=self.notes, system_id=self.system_id, system=self.system.name, executor=self.system.executor, run_id=self.run_id, agents_group_id=self.agents_group_id, agents_group_name=self.agents_group.name, agent_id=self.agent_used_id if self.agent_used else 0, agent_name=self.agent_used.name if self.agent_used else '', steps=[s.get_json() for s in sorted(self.steps, key=lambda s: s.index)]) def __repr__(self): txt = 'Job %s, state:%s' % (self.id, consts.JOB_STATES_NAME[self.state]) txt += ', g:%s' % self.agents_group_id if self.agent_used_id: txt += ', ag:%s' % self.agent_used_id return "<%s>" % txt class TestCase(db.Model, DatesMixin): __tablename__ = "test_cases" __test__ = False # do not treat this class as a test by pytest id = Column(Integer, primary_key=True) name = Column(UnicodeText, unique=True) tool_id = Column(Integer, ForeignKey('tools.id'), nullable=False) tool = relationship('Tool', back_populates="test_cases") results = relationship("TestCaseResult", back_populates="test_case") class TestCaseResult(db.Model): __tablename__ = "test_case_results" __test__ = False # do not treat this class as a test by pytest id = Column(Integer, primary_key=True) test_case_id = Column(Integer, ForeignKey('test_cases.id'), nullable=False) test_case = relationship('TestCase', back_populates="results") job_id = Column(Integer, ForeignKey('jobs.id'), nullable=False) job = relationship('Job', back_populates="results") result = Column(Integer, default=0) values = Column(JSONB) cmd_line = Column(UnicodeText) instability = Column(Integer, default=0) age = Column(Integer, default=0) change = Column(Integer, default=consts.TC_RESULT_CHANGE_NO) relevancy = Column(Integer, default=0) def __repr__(self): txt = 'TCR %s, result:%s' % (self.id, consts.TC_RESULTS_NAME[self.result]) return "<%s>" % txt def get_json(self, with_extra=False): data = dict(id=self.id, test_case_id=self.test_case_id, test_case_name=self.test_case.name, result=self.result, values=self.values, cmd_line=self.cmd_line, instability=self.instability, age=self.age, change=self.change, relevancy=self.relevancy if self.relevancy is not None else 0, job_id=self.job_id, job_name=self.job.name, agents_group_name=self.job.agents_group.name, agents_group_id=self.job.agents_group_id, agent_name=self.job.agent_used.name if self.job.agent_used else '', agent_id=self.job.agent_used_id if self.job.agent_used else 0) if with_extra: data['project_id'] = self.job.run.flow.branch.project_id data['project_name'] = self.job.run.flow.branch.project.name data['branch_id'] = self.job.run.flow.branch_id data['branch_name'] = self.job.run.flow.branch.name data['flow_id'] = self.job.run.flow_id data['flow_kind'] = 'ci' if self.job.run.flow.kind == 0 else 'dev' data['run_id'] = self.job.run_id data['stage_id'] = self.job.run.stage_id data['stage_name'] = self.job.run.stage.name return data class Issue(db.Model): __tablename__ = "issues" id = Column(Integer, primary_key=True) issue_type = Column(Integer, default=consts.ISSUE_TYPE_ERROR) line = Column(Integer) column = Column(Integer) path = Column(UnicodeText) symbol = Column(UnicodeText) message = Column(UnicodeText) job_id = Column(Integer, ForeignKey('jobs.id'), nullable=False) job = relationship('Job', back_populates="issues") extra = Column(JSONB) age = Column(Integer, default=0) def get_json(self): data = dict(id=self.id, issue_type=self.issue_type, line=self.line, column=self.column, path=self.path, symbol=self.symbol, message=self.message, age=self.age, job_id=self.job_id, job_name=self.job.name, agents_group_name=self.job.agents_group.name, agents_group_id=self.job.agents_group_id, agent_name=self.job.agent_used.name, agent_id=self.job.agent_used_id) if self.extra: data.update(self.extra) return data class File(db.Model): __tablename__ = "files" id = Column(Integer, primary_key=True) path = Column(UnicodeText) artifacts = relationship('Artifact', back_populates="file") class Artifact(db.Model): __tablename__ = "artifacts" id = Column(Integer, primary_key=True) file_id = Column(Integer, ForeignKey('files.id'), nullable=False) file = relationship('File', back_populates="artifacts") flow_id = Column(Integer, ForeignKey('flows.id'), nullable=False, index=True) flow = relationship('Flow', back_populates="artifacts_files") run_id = Column(Integer, ForeignKey('runs.id'), nullable=False, index=True) run = relationship('Run', back_populates="artifacts_files") size = Column(Integer, default=0) section = Column(Integer, default=0) def get_json(self): return dict(id=self.id, path=self.file.path, size=self.size, flow_id=self.flow_id, run_id=self.run_id, stage=self.run.stage.name) class APSchedulerJob(db.Model): __tablename__ = 'apscheduler_jobs' id = Column(Unicode(191), autoincrement=False, nullable=False, primary_key=True) next_run_time = Column(DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True, index=True, unique=False) job_state = Column(BYTEA, autoincrement=False, nullable=False) # RESOURCES class System(db.Model): __tablename__ = "systems" id = Column(Integer, primary_key=True) name = Column(UnicodeText) executor = Column(UnicodeText) jobs = relationship('Job', back_populates="system") UniqueConstraint(name, executor, name='uq_system_name_executor') class Tool(db.Model, DatesMixin): __tablename__ = "tools" id = Column(Integer, primary_key=True) name = Column(UnicodeText) description = Column(UnicodeText) configuration = Column(UnicodeText) steps = relationship("Step", back_populates="tool") fields = Column(JSONB, nullable=False) # TODO should it have optional reference to project so that there are local and global tools? test_cases = relationship("TestCase", back_populates="tool") class AgentAssignment(db.Model): __tablename__ = "agent_assignments" agent_id = Column(Integer, ForeignKey('agents.id'), primary_key=True) agent = relationship('Agent', back_populates="agents_groups") agents_group_id = Column(Integer, ForeignKey('agents_groups.id'), primary_key=True) agents_group = relationship('AgentsGroup', back_populates="agents") class AgentsGroup(db.Model, DatesMixin): __tablename__ = "agents_groups" id = Column(Integer, primary_key=True) name = Column(UnicodeText) project_id = Column(Integer, ForeignKey('projects.id'), nullable=True) project = relationship('Project', back_populates="agents_groups") # agents = relationship("Agent", back_populates="agents_group") # static assignments agents = relationship('AgentAssignment', back_populates="agents_group") jobs = relationship("Job", back_populates="agents_group") deployment = Column(JSONB) extra_attrs = Column(JSONB) def get_json(self): deployment = self.deployment if not deployment: deployment = dict(method=0) if 'aws' not in deployment: deployment['aws'] = dict(region='', instances_limit=5, default_image='', instance_type='', disk_size=0, destruction_after_jobs=1, destruction_after_time=30) else: if 'destruction_after_jobs' not in deployment['aws']: deployment['aws']['destruction_after_jobs'] = 1 if 'destruction_after_time' not in deployment['aws']: deployment['aws']['destruction_after_time'] = 30 if 'disk_size' not in deployment['aws']: deployment['aws']['disk_size'] = 0 if 'aws_ecs_fargate' not in deployment: deployment['aws_ecs_fargate'] = dict(region='', instances_limit=5, cluster='', subnets='', security_groups='') if 'azure_vm' not in deployment: deployment['azure_vm'] = dict(location='', instances_limit=5, default_image='', vm_size='', destruction_after_jobs=1, destruction_after_time=30) return dict(id=self.id, created=self.created.strftime("%Y-%m-%dT%H:%M:%SZ") if self.created else None, deleted=self.deleted.strftime("%Y-%m-%dT%H:%M:%SZ") if self.deleted else None, name=self.name, project_id=self.project_id, project_name=self.project.name if self.project else None, agents_count=len([a for a in self.agents if not a.agent.deleted]), deployment=deployment) class Agent(db.Model, DatesMixin): __tablename__ = "agents" id = Column(Integer, primary_key=True) name = Column(UnicodeText, nullable=False) address = Column(UnicodeText, index=True, nullable=False, unique=True) ip_address = Column(UnicodeText) state = Column(Integer, default=0) disabled = Column(Boolean, default=False) comment = Column(UnicodeText) status_line = Column(UnicodeText) job_id = Column(Integer, ForeignKey('jobs.id')) job = relationship('Job', back_populates="agent", foreign_keys=[job_id]) authorized = Column(Boolean, default=False) last_seen = Column(DateTime(timezone=True)) host_info = Column(JSONB) user_attrs = Column(JSONB) extra_attrs = Column(JSONB) agents_groups = relationship('AgentAssignment', back_populates="agent") def __repr__(self): return "<Agent %s, job:%s>" % (self.id, self.job_id) def get_json(self): return dict(id=self.id, created=self.created.strftime("%Y-%m-%dT%H:%M:%SZ") if self.created else None, deleted=self.deleted.strftime("%Y-%m-%dT%H:%M:%SZ") if self.deleted else None, last_seen=self.last_seen.strftime("%Y-%m-%dT%H:%M:%SZ") if self.last_seen else None, name=self.name, address=self.address, authorized=self.authorized, ip_address=self.ip_address, state=self.state, disabled=self.disabled, comment=self.comment, status_line=self.status_line, host_info=self.host_info, user_attrs=self.user_attrs, extra_attrs=self.extra_attrs, groups=[dict(id=a.agents_group.id, name=a.agents_group.name) for a in self.agents_groups], job=self.job.get_json() if self.job else None) class Setting(db.Model): __tablename__ = "settings" id = Column(Integer, primary_key=True) name = Column(UnicodeText) value = Column(UnicodeText) val_type = Column(UnicodeText) # integer, text, boolean, password group = Column(UnicodeText) def get_json(self): return { "id": self.id, "name": self.name, "value": self.get_value(), "type": self.val_type} def get_value(self, password_blank=True): if self.val_type == "integer": return int(self.value) if self.val_type == "boolean": return self.value == 'True' if self.val_type == "password" and password_blank: return '' if self.value is None: return '' return self.value def set_value(self, value): if self.val_type == "integer": self.value = str(value) elif self.val_type == "boolean": self.value = str(value) else: self.value = value def get_setting(group, name): s = Setting.query.filter_by(group=group, name=name).one_or_none() if s is None: raise Exception('cannot find setting
self.repeated_multi_var_condition(): return self.enclose( "struct", self.repeated_declaration()) else: return self.var_type() # Whether this element needs a check (memcmp) for a string value. def expected_string_condition(self): return self.type in ["BSTR", "TSTR"] and self.value is not None # Whether this element should have a typedef in the code. def type_def_condition(self): if self in my_types.values() and self.multi_member(): return True return False # Whether this type needs a typedef for its repeated part. def repeated_type_def_condition(self): if self.repeated_multi_var_condition() and self.multi_var_condition(): return True return False # Return the type definition of this element, and all its children + key + # cbor. def type_def(self): ret_val = [] if self.type in ["LIST", "MAP", "GROUP", "UNION"]: ret_val.extend( [elem for typedef in [child.type_def() for child in self.value] for elem in typedef]) if self.cbor_var_condition(): ret_val.extend(self.cbor.type_def()) if self.key_var_condition(): ret_val.extend(self.key.type_def()) if self.type == "OTHER": ret_val.extend(my_types[self.value].type_def()) if self.repeated_type_def_condition(): ret_val.extend( [(self.single_var_type(full=False), self.repeated_type_name())]) if self.type_def_condition(): ret_val.extend([(self.single_var_type(), self.type_name())]) return ret_val # Return the function name and arguments to call to decode this element. Only used when this element DOESN'T define # its own decoder function (when it's a primitive type, for which functions already exist, or when the function is # defined elsewhere ("OTHER")) def single_func_prim(self): vals = [self.val_access(), min_val_or_null(self.min_value), max_val_or_null(self.max_value)] sizes = [self.val_access(), min_val_or_null(self.min_size), max_val_or_null(self.max_size)] if self.type in ["LIST", "MAP"]: assert len(self.value) <= 1, f"List must be empty or have a single element, has {len(self.value)} children." # Will fail runtime if we don't use lambda for single_func() # pylint: disable=unnecessary-lambda retval = { "INT": lambda: ["intx32_decode", *vals], "UINT": lambda: ["uintx32_decode", *vals], "NINT": lambda: ["intx32_decode", *vals], "FLOAT": lambda: ["float_decode", *vals], "BSTR": lambda: ["strx_decode" if not self.cbor_var_condition() else "strx_start_decode", *sizes], "TSTR": lambda: ["strx_decode", *sizes], "BOOL": lambda: ["boolx_decode", self.val_access(), min_val_or_null(1 if self.value else 0), max_val_or_null(0 if not self.value else 1)], "NIL": lambda: ["primx_decode", "NULL", min_val_or_null(22), max_val_or_null(22)], "MAP": lambda: self.value[0].single_func() if len(self.value) >= 1 else ["list_start_decode", self.count_var_access(), "0", "0"], "LIST": lambda: self.value[0].single_func() if len(self.value) >= 1 else ["list_start_decode", self.count_var_access(), "0", "0"], "ANY": lambda: ["any_decode", "NULL", "NULL", "NULL"], "OTHER": lambda: list_replace_if_not_null(my_types[self.value].single_func(), 1, self.val_access()), }[self.type]() return retval # Return the function name and arguments to call to decode this element. Only used when this element has its own # decode function def single_func_impl(self, full=True): return (self.decode_func_name() if full else self.repeated_decode_func_name(), self.var_access() if full else self.val_access(), "NULL", "NULL") # Whether this element needs its own decoder function. def single_func_impl_condition(self): return (False or self.key or self.cbor_var_condition() or self.expected_string_condition() or self.type_def_condition()) # Whether this element needs its own decoder function. def repeated_single_func_impl_condition(self): return self.repeated_type_def_condition() # Return the function name and arguments to call to decode this element. def single_func(self): if self.single_func_impl_condition(): return self.single_func_impl() else: return self.single_func_prim() # Return the function name and arguments to call to decode the repeated # part of this element. def repeated_single_func(self): if self.repeated_single_func_impl_condition(): return self.single_func_impl(full=False) else: return self.single_func_prim() # Return a number indicating how many other elements this element depends on. Used putting functions and typedefs # in the right order. def depends_on(self): ret_vals = [1] if not self.dependsOnCall: self.dependsOnCall = True if self.cbor_var_condition(): ret_vals.append(self.cbor.depends_on()) if self.key: ret_vals.append(self.key.depends_on()) if self.type == "OTHER": ret_vals.append(1 + my_types[self.value].depends_on()) if self.type in ["LIST", "MAP", "GROUP", "UNION"]: ret_vals.extend(child.depends_on() for child in self.value) self.dependsOnCall = False return max(ret_vals) # Make a string from the list returned by single_func_prim() def decode_single_func_prim(self): return decode_statement(*self.single_func_prim()) # Return the full code needed to decode a "BSTR" or "TSTR" element. def decode_str(self): assert self.type in ["BSTR", "TSTR"], "Expected string type." return self.decode_single_func_prim() + ( "&& !memcmp(\"{0}\", {1}.value, {1}.len)".format( self.value, self.val_access()) if self.expected_string_condition() else "") # Recursively sum the total minimum and maximum element count for this # element. def list_counts(self): return { "INT": lambda: (self.minQ, self.maxQ), "UINT": lambda: (self.minQ, self.maxQ), "NINT": lambda: (self.minQ, self.maxQ), "FLOAT": lambda: (self.minQ, self.maxQ), "BSTR": lambda: (self.minQ, self.maxQ), "TSTR": lambda: (self.minQ, self.maxQ), "BOOL": lambda: (self.minQ, self.maxQ), "NIL": lambda: (self.minQ, self.maxQ), "ANY": lambda: (self.minQ, self.maxQ), # Lists are their own element "LIST": lambda: (self.minQ, self.maxQ), # Maps are their own element "MAP": lambda: (self.minQ, self.maxQ), "GROUP": lambda: (self.minQ * sum((child.minQ for child in self.value)), self.maxQ * sum((child.maxQ for child in self.value))), "UNION": lambda: (self.minQ * min((child.minQ for child in self.value)), self.maxQ * max((child.maxQ for child in self.value))), "OTHER": lambda: (q1 * q2 for q1, q2 in zip((self.minQ, self.maxQ), my_types[self.value].list_counts())), }[self.type]() # Return the full code needed to decode a "LIST" or "MAP" element with children. def decode_list(self): assert self.type in ["LIST", "MAP"], "Expected LIST or MAP type, was %s." % self.type min_counts, max_counts = zip(*(child.list_counts() for child in self.value)) if self.value else ((0,), (0,)) return "(%s)" % (self.newl_ind + "&& ").join((decode_statement("list_start_decode", "*(p_temp_elem_count++)", str(sum(min_counts)), str(sum(max_counts))),) + tuple(child.full_decode() for child in self.value) + ("((p_state->elem_count = *(--p_temp_elem_count)) || 1)",)) # Return the full code needed to decode a "GROUP" element's children. def decode_group(self): assert self.type in ["GROUP"], "Expected GROUP type." return "(%s)" % (self.newl_ind + "&& ").join( (child.full_decode() for child in self.value)) # Return the full code needed to decode a "UNION" element's children. def decode_union(self): assert self.type in ["UNION"], "Expected UNION type." child_values = ["(%s && ((%s = %s) || 1))" % (child.full_decode(), self.choice_var_access(), child.var_name()) for child in self.value] # Reset state for all but the first child. for i in range(1, len(child_values)): child_values[i] = f"((p_state->p_payload = p_payload_bak) && ((p_state->elem_count = elem_count_bak) || 1) && {child_values[i]})" return "((p_payload_bak = p_state->p_payload) && ((elem_count_bak = p_state->elem_count) || 1) && (%s))" % \ (self.newl_ind + "|| ").join(child_values) # Return the full code needed to decode this element, including children, # key and cbor, excluding repetitions. def repeated_decode(self): decoder = { "INT": self.decode_single_func_prim, "UINT": self.decode_single_func_prim, "NINT": self.decode_single_func_prim, "FLOAT": self.decode_single_func_prim, "BSTR": self.decode_str, "TSTR": self.decode_str, "BOOL": self.decode_single_func_prim, "NIL": self.decode_single_func_prim, "ANY": self.decode_single_func_prim, "LIST": self.decode_list, "MAP": self.decode_list, "GROUP": self.decode_group, "UNION": self.decode_union, "OTHER": self.decode_single_func_prim, }[self.type]() if self.key or self.cbor: arguments = ([self.key.full_decode()] if self.key is not None else [])\ + ([decoder])\ + ([f"(p_state->elem_count += {self.cbor.maxQ})", self.cbor.full_decode()] if self.cbor_var_condition() else []) decoder = "(%s)" % ((self.newl_ind + "&& ").join(arguments),) return decoder # Code for the size of the repeated part of this element. def result_len(self): if self.repeated_type_name() is None: return "0" else: return "sizeof(%s)" % self.repeated_type_name() # Return the full code needed to decode this element, including children, # key, cbor, and repetitions. def full_decode(self): if self.multi_decode_condition(): func, *arguments = self.repeated_single_func() return ( "multi_decode(%s, %s, &%s, (void*)%s, %s, %s)" % (self.minQ, self.maxQ, self.count_var_access() if self.count_var_condition() else self.present_var_access(), func, decode_args(*arguments), self.result_len())) else: return self.repeated_decode() # Return the body of the decoder function for this element. def decode(self): return self.repeated_decode() # Recursively return a list of the bodies of the decoder functions for # this element and its children + key + cbor. def decoders(self): if self.type in ["LIST", "MAP", "GROUP", "UNION"]: for child in self.value: for decoder in child.decoders(): yield decoder if self.cbor: for decoder in self.cbor.decoders(): yield decoder if self.key: for decoder in self.key.decoders(): yield decoder if self.type == "OTHER" and self.value not in entry_type_names: for decoder in my_types[self.value].decoders(): yield decoder if self.repeated_single_func_impl_condition(): yield (self.decode(), self.repeated_decode_func_name(), self.repeated_type_name()) if ((self.type != "OTHER") or self.repeated_multi_var_condition()) and ( self.single_func_impl_condition()): decode_body = self.decode() yield (decode_body, self.decode_func_name(), self.type_name()) # Consumes and parses a single CDDL type, returning a # CodeGenerator instance. def parse_single(instr, base_name=None): value = CodeGenerator(base_name=base_name) instr = value.get_value(instr).strip() return value, instr # Parses entire instr and returns a list of CodeGenerator # instances. def parse(instr): instr = instr.strip() values = [] while instr != '': (value, instr) = parse_single(instr) values.append(value) return values # Returns a dict containing multiple typename=>string def get_types(instr): type_regex = r"(\s*?\$?\$?([\w-]+)\s*(\/{0,2})=\s*(.*?)(?=(\Z|\s*\$?\$?[\w-]+\s*\/{0,2}=(?!\>))))" result = defaultdict(lambda: "") types = [(key, value, slashes) for (_1, key, slashes, value, _2) in findall(type_regex, instr, S | M)] for key, value, slashes in types: if slashes: result[key] += slashes result[key] += value result[key] = result[key].lstrip(slashes) # strip from front else: result[key] = value return dict(result) # Strip CDDL comments (';') from the string. def strip_comments(instr): comment_regex = r"\;.*?\n" return sub(comment_regex, '', instr) # Return a list of typedefs for all defined types, with duplicate typedefs # removed. def unique_types(types): type_names = {} out_types = [] for mtype in types: for type_def
<gh_stars>10-100 """ Tools for reading and processing the sensitivity analysis data files. Some of the files are specific to our project (`input_parameters.csv' and `results.csv`), but the results of sensitivity analyses are formatted as any SALib analysis results will be from a sobol analysis. Our data files are stored outside this repository because they are too large, so users need to specify the path to their data. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import pandas as pd def _map_pretty_names(df, column_names, pretty_names): for name in column_names: df[name] = df[name].map(pretty_names).fillna(df[name]) return df def format_salib_output(salib_output, run_name, pretty_names=None): """ Function reads the output of SALib.analyze and returns a dictionary that savvy expects. Parameters ---------- path : dict salib analyze output run_name : str, the name of the simulation pretty_names: dict, optional a dictionary mapping old names to new names Returns : dict """ df_list = salib_output.to_df() # combine S1 and ST df_list[0] = pd.concat((df_list[0], df_list[1]), axis=1) df_list.pop(1) # Make the Parameter Column # for i, _ in enumerate(df_list): df_list[0]['Parameter'] = df_list[0].index if pretty_names: df_list[0] = _map_pretty_names(df_list[0], ['Parameter'], pretty_names) df_list[0].reset_index(inplace=True, drop=True) # split up the parameters from S2 df_list[-1][['Parameter_1', 'Parameter_2']] = df_list[-1].index.to_series().apply(pd.Series) df_list[-1].reset_index(inplace=True, drop=True) if pretty_names: df_list[-1] = _map_pretty_names(df_list[-1], ['Parameter_1', 'Parameter_2'], pretty_names) return {run_name: df_list} def read_file(path, numrows=None, drop=False, sep=','): """ Function reads a file of input parameters or model results and returns a pandas dataframe with its contents. The first line of the input should contain headers corresponding to the column names. Parameters ---------- path : str the complete filename, including absolute or relative path. numrows : int, optional number of rows of the file to read. If you don't specify this parameter all rows will be read. drop : list, optional list of strings indicating which (if any) of the named columns you do not want to include in the resulting dataframe. (ex. ['cats', 'dogs'], default is not to drop any rows). sep : str string indicating the column separator in the file (optional, default = ','). Returns -------- df : pandas dataframe A pandas dataframe with the contents of the file, limited to the number of rows specified and without the columns named in "drop". """ df = pd.read_csv(path, sep=sep, nrows=numrows) if not drop: df.drop(drop, axis=1, inplace=True) return df def get_params(path='./input_parameters.csv', numrows=None, drop=['End_time', 'Oxygen']): """ NOTE: This function is specific to our lignin modeling dataset and is not needed for the visualization features of savvy Returns a pandas dataframe with all the parameters analyzed in the sensitivity analysis, but not additional parameters like end time and oxygen content. If you would like all of the parameters (even those not analyzed for sensitivity) then pass drop=False. Parameters ---------- path : str, optional string containing the path to the parameters csv. numrows : int, optional the number of rows of the input_parameters file to read (default is to read all rows). drop : list, optional a list of strings for which parameters you do not want to include in the returned dataframe. If you want all params then pass drop=False. Returns ------- pandas dataframe """ return read_file(path, numrows=numrows, drop=drop) def get_results(path='./results.csv', numrows=None, drop=['light_aromatic_C-C', 'light_aromatic_methoxyl']): """ NOTE: This function is specific to our lignin modeling dataset and is not needed for the visualization features of savvy Returns a pandas dataframe with the results of running all of the simulations for the parameters sets in `input_parameters.csv`. This function drops two unused functional groups from the results file. Parameters ---------- path : str, optional the path to the results csv file. numrows : int, optional the number of rows of the input_parameters file to read (default is to read all rows). drop : list, optional a list of strings for which output measures to drop from the returned dataframe. If you want all outputs use drop=False. Returns ------- pandas dataframe """ return read_file(path, numrows=numrows, drop=drop) def get_sa_data(path='.'): """ This function reads and processes all the sensitivity analysis results in a specified folder and returns a dictionary with the corresponding dataframes for first/total order sensitivity indices and second order indices (if present). Sensitivity analysis results should be in the default SALib output format and must start with the word 'analysis'. NOTE: there are two lines of code at the beginning of this function (the filenames.remove lines) that are specific to our lignin modeling dataset. Future users can remove or modify these lines to use with other datasets. Parameters ----------- path : str, optional String containing the relative or absolute path of the directory where analysis_*.txt files are stored. There cannot be any files or folders within this directory that start with 'analysis' except those generated by the SALib sensitivity analysis. All `analysis*` files in this path should correspond to outputs from one sensitivity analysis project, and if second order sensitivity indices are included in any of the files they should be present in all the others. Returns -------- sens_dfs : dict Dictionary where keys are the names of the various output measures (one output measure per analysis file in the folder specified by path). Dictionary values are a list of pandas dataframes. sens_dfs['key'][0] is a dataframe with the first and total order indices of all the parameters with respect to the "key" output variable. sens_dfs['key'][1] is a dataframe with the second order indices for pairs of parameters (if second order indices are present in the analysis file). If there are no second order results in the analysis file then this value is a boolean, False. """ filenames = [filename for filename in os.listdir( path) if filename.startswith('analysis')] # These two functional groups are not present in the light oil fraction if 'analysis_light_aromatic-C-C.txt' in filenames: filenames.remove('analysis_light_aromatic-C-C.txt') if 'analysis_light_aromatic-methoxyl.txt' in filenames: filenames.remove('analysis_light_aromatic-methoxyl.txt') # Make a dictionary where keys are the different output measures # (one for each analysis file) and values are lists of dataframes # with the first/total analysis results, and the second order results. sens_dfs = {} for filename in filenames: name = filename[9:].replace('.txt', '') with open(path + filename) as result: contents = [] contents.append(result.readlines()) # find the line number in the file where 2nd order results appear for j, line in enumerate(contents[0]): # End this loop when you reach the line that separates # the first/total indices from the second order indices if line.startswith('\n'): break # If no second order indices in file else: j = False # If there are second order indices in the file if j: sens_dfs[name] = [pd.read_csv(path + filename, sep=' ', nrows=(j - 1)), pd.read_csv(path + filename, sep=' ', skiprows=j) ] else: sens_dfs[name] = [pd.read_csv(path + filename, sep=' '), False] # Deal with negative values. All negative values appear to be close # to zero already; they are the result of machine precision issues or # setting n too low when generating parameter sets. To properly # correct this issue you should re-run your model with n greater, # but sometimes that is too expensive so this is a hack to allow # display of them in a logical way. # . # adjust confidence interval to account for shifting sensitivity value sens_dfs[name][0].loc[sens_dfs[name][0]['S1'] < 0, 'S1_conf'] = ( sens_dfs[name][0]['S1_conf'] + sens_dfs[name][0]['S1'] - 0.0001) # set the new sensitivity value = 0.0001 sens_dfs[name][0].loc[sens_dfs[name][0]['S1'] < 0, 'S1'] = 0.0001 # do the same for total and second order indices sens_dfs[name][0].loc[sens_dfs[name][0]['ST'] < 0, 'ST_conf'] = ( sens_dfs[name][0]['ST_conf'] + sens_dfs[name][0]['ST'] - 0.0001) sens_dfs[name][0].loc[sens_dfs[name][0]['ST'] < 0, 'ST'] = 0.0001 if isinstance(sens_dfs[name][1], pd.DataFrame): sens_dfs[name][1].loc[sens_dfs[name][1]['S2'] < 0, 'S2_conf'] = ( sens_dfs[name][1]['S2_conf'] + sens_dfs[name][1]['S2'] - 0.0001) sens_dfs[name][1].loc[sens_dfs[name][1]['S2'] < 0, 'S2'] = 0.0001 # Change 'rxn' to 'k' for consistency with inputs file sens_dfs[name][0].Parameter = (sens_dfs[name][0].Parameter .str.replace('rxn', 'k', case=False)) return sens_dfs def find_unimportant_params(header='ST', path='.'): """ This function finds which parameters have sensitivities and confidence intervals equal to exactly 0.0, which means those parameters have no role in influencing the output variance for any of the calculated output measures. These parameters could
<gh_stars>0 # coding: utf8 """ A sample Python 2 script that demonstrates several anticipated read-only uses of EDD's REST API. The general process followed by this script is: 1) Query EDD and/or ICE for contextual data based on parameters used to narrow the bounds of the search. 2) Query EDD for a subset of studies of interest 3) Drill down into study internals, caching contextual data as needed to help further narrow and/or interpret search results. Clients would likely need to create additional caches -- this sample focuses just on querying / cacheing the most relevant and easily-cacheable EDD data. 4) If requested, write results to CSV file in a similar format to that produced by EDD's file export feature. One notable omission in this example is querying for line/assay metadata that define culture conditions. If the first version is helpful, further examples of those queries can be added here later as the API improves. For a simpler example of accessing EDD's REST API: """ import argparse import arrow import collections import csv import imp import logging from logging.config import dictConfig from future.utils import viewitems, viewvalues from os import path from requests import HTTPError, codes from six.moves.urllib.parse import urlparse from jbei.rest.auth import EddSessionAuth, IceSessionAuth from jbei.rest.clients.edd.api import EddApi from jbei.rest.clients.ice.api import IceApi from jbei.rest.clients.ice.api import Strain as IceStrain from jbei.rest.clients.ice.utils import build_entry_ui_url from jbei.utils import session_login, UserInputTimer from . import settings dictConfig(settings.LOGGING) logger = logging.getLogger(__name__) _PAGE_RECEIVED_MSG = ('Received page %(page)d with %(count)d %(class)s (total %(total)d ' 'found)') _EDD_URL_ARG = 'edd_url' _ICE_URL_ARG = 'ice_url' _USERNAME_ARG = 'username' _PASSWORD_ARG = 'password' _IGNORE_ICE_ERRORS_ARG = 'ignore_ice_errors' _OUTPUT_FILE_ARG = 'output_file' _OVERWRITE_ARG = 'overwrite' _ALLOW_CO_CULTURE_ARG = 'allow_co_culture' _TARGET_ICE_INSTANCE_ARG = 'target_ice_url' _STUDY_SLUG_ARG = 'study_slug' _STUDY_ID_ARG = 'study_id' _ICE_PARTS_ARG = 'ice_parts' _PROTOCOLS_ARG = 'protocols' _MTYPES_ARG = 'mtypes' _UNITS_ARG = 'units' _MOD_SINCE_ARG = 'mod_since' _ICE_PARTS_CONFIG = 'ICE_PART_IDS' class LogIndentAdapter(logging.LoggerAdapter): """ A simple adapter that allows us to set the indent level for log output to help improve readability. """ def __init__(self, logger, extra): super(LogIndentAdapter, self).__init__(logger, extra) self.indent_level = 0 def process(self, msg, kwargs): return '{i}{m}'.format(i='...' * self.indent_level, m=msg), kwargs logger = LogIndentAdapter(logger, {}) class SearchParameters: """ Captures parameters read from settings file that are used to narrow the bounds of EDD searches for this sample program. While not every possible query can be implemented in a simple example program, this script should hit many of the most highly-anticipated filtering options. Whenever possible, EDD clients are highly encouraged to filter results for better performance / earlier detection of some common errors. """ def __init__(self): self.study_slug = None # URL portion that uniquely identifies the study of interest self.study_id = None # UUID or integer pk used to specify a single study of interest # if no study is specified, used to search & process only studies updated after the # specified date. Note that at the time of writing, EDD's stored study modification date # is misleading and only applies to the study name/description/contact fields. self.studies_modified_since = None # optional filter parameters...if configured, we'll filter queries to only the ones that # contain one or more of these values self.ice_part_ids = [] # name regular expression searches to filter results by protocols, measurement types, etc. # of interest. Note that for production use, it's better to identify UUID's and do direct # lookup rather than name-based searches, but for example purposes this are simplest & # most durable across EDD instances self.protocol_name_regexes = [] self.measurement_type_name_regexes = [] self.unit_name_regexes = [] def filter_by_studies(self): return self.study_slug or self.study_id or self.studies_modified_since def filter_by_strains(self): return bool(self.ice_part_ids) def filter_by_measurement_types(self): return bool(self.measurement_type_name_regexes) def filter_by_protocols(self): return bool(self.protocol_name_regexes) def filter_by_units(self): return bool(self.unit_name_regexes) def has_filters(self): return (self.filter_by_studies() or self.filter_by_strains() or self.filter_by_measurement_types() or self.filter_by_protocols() or self.filter_by_units()) def print_summary(self): logger.info('Search parameters:') logger.indent_level += 1 if self.study_slug: logger.info('Study slug:\t%s' % self.study_slug) elif self.study_id: logger.info('Study id:\t%s' % self.study_id) elif self.studies_modified_since: logger.info('Studies mod after:\t%s' % self.studies_modified_since) if self.ice_part_ids: logger.info('ICE part ids: %s' % self.ice_part_ids) if self.protocol_name_regexes: logger.info('Protocols: %s' % self.protocol_name_regexes) if self.measurement_type_name_regexes: logger.info('Measurement types: %s' % self.measurement_type_name_regexes) if self.unit_name_regexes: logger.info('Units: %s' % self.unit_name_regexes) logger.indent_level -= 1 def extract_id_from_ui_url(ice_part_ui_url): """ Extracts an ICE identifier for a part from a valid ICE user interface URL. Note that ICE's user interface accepts multiple different identifiers, so prior knowledge is needed to distinguish between the identifiers accepted. :param ice_part_ui_url: :return: the identifier """ url_parts = urlparse(ice_part_ui_url) url_path = url_parts.path elts = url_path.split('/') if elts[-1]: return elts[-1] elif len(elts) > 1: return elts[-2] return None class ContextCache: """ A cache of contextual query results from EDD/ICE that should be static on a short time scale. These queries should be re-executed with client each program run, but provided the run length isn't too long, the results can be safely assumed to be static during a single execution. In this example, they're useful for things like interpreting and/or filtering out only the measurements and strains of interest for a particular client application. Depending on use, it may or may not be appropriate for clients to cache all of this information, but this example should be a good starting point for future work. """ def __init__(self): self.TARGETED_PROTEOMICS_PK = None self.protocols_by_pk = {} # strain lookup tables. Part ID is a locally-unique identifier used by ICE, and by JBEI/ABF # researchers, due to its brevity. EDD exposes minimal strain data in its API, # leaving strain tracking to ICE. self.ice_entries_by_url = {} # initially empty if not filtering by strain # Measurement type lookup tables self.meas_types_by_pk = {} self.measurement_types_by_name = {} self.units_by_pk = {} ################################################################ # ICE part access problems. ################################################################ # These are most likely due to user error at some stage of the process, but tend to # manifest during part lookup in ICE. self.missing_part_ids = [] # IDs for parts that ICE informed us are not present # ID's for ICE parts where we encountered permission problems during lookup. This happens!! self.ice_permission_error_part_ids = [] # User-provided part numbers for ICE parts where the requested part wasn't a strain as # required by EDD. TODO: recent changes have relaxed this restriction, e.g. for novel # enzymes. self.non_strain_ice_parts = [] # measurement type pks encountered during Measurement inspection that need to be looked # up afterward self.deferred_lookup_measurement_type_pks = set() self.deferred_lookup_strain_ids = set() self.observed_edd_strain_ids = set() def add_measurement_type(self, measurement_type, indent_level=0): logger.indent_level += 1 logger.debug('Caching MeasurementType "%s"' % measurement_type.type_name) logger.indent_level -= 1 self.meas_types_by_pk[measurement_type.pk] = measurement_type self.measurement_types_by_name[measurement_type.type_name] = measurement_type def add_protocol(self, protocol): logger.indent_level += 1 logger.debug('Caching Protocol "%s"' % protocol.name) logger.indent_level -= 1 self.protocols_by_pk[protocol.pk] = protocol def has_measurement_types(self): return bool(self.meas_types_by_pk) def add_units(self, units): logger.indent_level += 1 logger.debug('Caching MeasurementUnits "%s"' % units.unit_name) logger.indent_level -= 1 self.units_by_pk[units.pk] = units def add_observed_strain(self, strain_url): id = extract_id_from_ui_url(strain_url) self.observed_edd_strain_ids.add(id) # if we're filtering by strains and we've already seen this one, we're done if strain_url in self.ice_entries_by_url: return True # otherwise, cache the id and look up the strain in ICE later self.deferred_lookup_strain_ids.add(id) return False class ResultCache(object): """ A simple cache of results read from EDD's REST API. As result objects are read from incoming JSON and fed into this cache, a graph of related STUDY objects is constructed, overwriting pk-based fields received from the JSON. Note that related context data, whose lookup may be deferred, isn't resolved during the caching process (e.g. assay protocols, Measurements' MeasurementTypes, MeasurementUnits, etc). To facilitate testing, the cache also compares query results against the search parameters for consistency and raises an error if any REST query returns results that are inconsistent with previous observations. """ def __init__(self, global_search_parameters, context_cache): self.studies_by_pk = {} self.lines_by_pk = {} self.assays_by_pk = {} self.measurements_by_pk = {} self.observed_assay_protocol_pks = set() self.values_observed = 0 self.global_search_parameters = global_search_parameters self.context_cache = context_cache def process_studies(self, studies): self._cache_by_pk(studies, self.studies_by_pk) def process_lines(self, study_pk, lines): study = self.studies_by_pk[study_pk] for line in lines: line.study = study if not hasattr(study, 'lines'): study.lines = [line] else: study.lines.append(line) # cache by pk to allow lookup from assay & replicate lookup self._cache_by_pk(lines, self.lines_by_pk) def process_assays(self, assays): self._cache_by_pk(assays, self.assays_by_pk) for assay in assays: line = self.lines_by_pk[assay.line] assay.line = line self.observed_assay_protocol_pks.add(assay.protocol) if (assay.protocol not in self.context_cache.protocols_by_pk and self.global_search_parameters.filter_by_protocols()): raise RuntimeError('Assay search returned an assay with protocol %d, which was ' 'not included in results from initial protocol search.' % assay.protocol) if
-1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],\ [-1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],\ [-1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]] targEt = [[-1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],\ [-1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1,\ -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],\ [-1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]] targET = [[ 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1,\ -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1],\ [-1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\ [-1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\ -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]] targeT = [[ 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,\ -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1,\ -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1,\ -1, -1, -1, 1, -1, -1, -1,
import numpy as np import logging import itertools import operator from gelcoverage.tools.bed_reader import BedReader from gelcoverage.tools.bigwig_reader import BigWigReader import gelcoverage.constants as constants def find_gaps(coverages, start_position, coverage_threshold): """ Find continuous genomic positions under a given threshold coverage_threshold. :param coverages: list of depth of coverage values :param start_position: starting position of the coverages sequence :param coverage_threshold: the coverage threshold to determine gaps :return: the gaps start and end genomic coordinates in JSON-friendly format. Chromosome is not set as this information will be embedded within an exon-transcript-gene where the chromosome is available. """ end = start_position + len(coverages) open_gap = False current_gap = {} gaps = [] # Iterates through every coverage position for idx, value in enumerate(coverages): if value < coverage_threshold and not open_gap: open_gap = True current_gap[constants.GAP_START] = start_position + idx elif value >= coverage_threshold and open_gap: open_gap = False current_gap[constants.GAP_END] = start_position + idx - 1 current_gap[constants.GAP_LENGTH] = current_gap[constants.GAP_END] - current_gap[constants.GAP_START] + 1 gaps.append(current_gap) current_gap = {} # Closes the last gap when it extends until the last position if open_gap: current_gap[constants.GAP_END] = end current_gap[constants.GAP_LENGTH] = current_gap[constants.GAP_END] - current_gap[constants.GAP_START] + 1 gaps.append(current_gap) return gaps def compute_exon_level_statistics(coverages, gc_content): """ Computes coverage and GC content statistics :param coverages: list of depth of coverage values :param gc_content: the GC content for this sequence precomputed :return: the coverage and GC content exon statistics in JSON-friendly format """ stats = { constants.BASES: len(coverages) if coverages else 0, constants.AVERAGE: round(float(np.mean(coverages)), 3) if coverages else 0.0, constants.MEDIAN: round(float(np.median(coverages)), 3) if coverages else 0.0, constants.PERCENTILE75: round(float(np.percentile(coverages, 75)), 3) if coverages else 0.0, constants.PERCENTILE25: round(float(np.percentile(coverages, 25)), 3) if coverages else 0.0, constants.SD: round(float(np.std(coverages)), 3) if coverages else 0.0, constants.BASES_LT15X: int(np.sum(1 for x in coverages if x < 15)) if coverages else 0, constants.BASES_GTE15X: int(np.sum(1 for x in coverages if x >= 15)) if coverages else 0, constants.BASES_GTE30X: int(np.sum(1 for x in coverages if x >= 30)) if coverages else 0, constants.BASES_GTE50X: int(np.sum(1 for x in coverages if x >= 50) if coverages else 0) } stats[constants.LT15X] = round(float(stats[constants.BASES_LT15X]) / stats[constants.BASES], 5) \ if stats[constants.BASES] > 0 else 0.0 stats[constants.GTE15X] = round(float(stats[constants.BASES_GTE15X]) / stats[constants.BASES], 5) \ if stats[constants.BASES] > 0 else 0.0 stats[constants.GTE30X] = round(float(stats[constants.BASES_GTE30X]) / stats[constants.BASES], 5) \ if stats[constants.BASES] > 0 else 0.0 stats[constants.GTE50X] = round(float(stats[constants.BASES_GTE50X]) / stats[constants.BASES], 5) \ if stats[constants.BASES] > 0 else 0.0 if gc_content is not None: # GC content is not provided for padded exons stats[constants.GC_CONTENT] = gc_content return stats def compute_transcript_level_statistics(exons): """ Computes coverage and GC content statistics at gene level by aggregating the statistics at exon level. Median and percentiles are estimated by weighting the per-exon metric by the number of bases. :param exons: list of exon coverage and GC content statistics :return: the coverage and GC content gene statistics in JSON-friendly format """ exons_stats = [x[constants.STATISTICS] for x in exons] total_bases = int(np.sum([x[constants.BASES] for x in exons_stats])) if exons_stats else 0 bases_lt_15x = int(np.sum([x[constants.BASES_LT15X] for x in exons_stats])) if exons_stats else 0 bases_gte_15x = int(np.sum([x[constants.BASES_GTE15X] for x in exons_stats])) if exons_stats else 0 bases_gte_30x = int(np.sum([x[constants.BASES_GTE30X] for x in exons_stats])) if exons_stats else 0 bases_gte_50x = int(np.sum([x[constants.BASES_GTE50X] for x in exons_stats])) if exons_stats else 0 stats = { constants.BASES: total_bases, constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in exons_stats])), 3) if exons_stats else 0.0, constants.MEDIAN: round(float(np.sum( [x[constants.MEDIAN] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else float(0.0), constants.PERCENTILE25: round(float(np.sum( [x[constants.PERCENTILE25] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else 0.0, constants.PERCENTILE75: round(float(np.sum( [x[constants.PERCENTILE75] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else 0.0, constants.SD: round(float(np.sum( [x[constants.SD] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else 0.0, constants.LT15X: round(float(bases_lt_15x) / total_bases, 5) if total_bases > 0 else 0.0, constants.GTE15X: round(float(bases_gte_15x) / total_bases, 5) if total_bases > 0 else 0.0, constants.GTE30X: round(float(bases_gte_30x) / total_bases, 5) if total_bases > 0 else 0.0, constants.GTE50X: round(float(bases_gte_50x) / total_bases, 5) if total_bases > 0 else 0.0, constants.BASES_LT15X: bases_lt_15x, constants.BASES_GTE15X: bases_gte_15x, constants.BASES_GTE30X: bases_gte_30x, constants.BASES_GTE50X: bases_gte_50x } try: stats[constants.GC_CONTENT] = round(float(np.sum( [x[constants.GC_CONTENT] * x[constants.BASES] for x in exons_stats]) / total_bases), 5) \ if exons_stats and total_bases > 0 else 0.0 except KeyError: # There is no GC content data to show (e.g.: the union transcript) pass return stats def compute_coding_region_statistics(genes): """ :param genes: :return: """ logging.info("Computing coding region statistics...") results = { constants.STATISTICS: None, constants.CHROMOSOMES: [] } # Avoids failing when no genes have been reported (might be related with wrong BAM and/or gene list) if len(genes) == 0: return results # Compute the stats aggregated for union transcript genes_stats = [x[constants.UNION_TRANSCRIPT][constants.STATISTICS] for x in genes] total_bases = int(np.sum([x[constants.BASES] for x in genes_stats])) if genes_stats else 0 bases_lt_15x = int(np.sum([x[constants.BASES_LT15X] for x in genes_stats])) if genes_stats else 0 bases_gte_15x = int(np.sum([x[constants.BASES_GTE15X] for x in genes_stats])) if genes_stats else 0 bases_gte_30x = int(np.sum([x[constants.BASES_GTE30X] for x in genes_stats])) if genes_stats else 0 bases_gte_50x = int(np.sum([x[constants.BASES_GTE50X] for x in genes_stats])) if genes_stats else 0 results[constants.STATISTICS] = { constants.BASES: total_bases, constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in genes_stats])), 3) if genes_stats else 0.0, constants.MEDIAN: round(float(np.sum( [x[constants.MEDIAN] * x[constants.BASES] for x in genes_stats]) / total_bases), 3) if genes_stats and total_bases > 0 else 0.0, constants.PERCENTILE75: round(float(np.sum( [x[constants.PERCENTILE75] * x[constants.BASES] for x in genes_stats]) / total_bases), 3) if genes_stats and total_bases > 0 else 0.0, constants.PERCENTILE25: round(float(np.sum( [x[constants.PERCENTILE25] * x[constants.BASES] for x in genes_stats]) / total_bases), 3) if genes_stats and total_bases > 0 else 0.0, constants.SD: round(float(np.sum( [x[constants.SD] * x[constants.BASES] for x in genes_stats]) / total_bases), 3) if genes_stats and total_bases > 0 else 0.0, constants.LT15X: round(float(bases_lt_15x) / total_bases, 5) if total_bases > 0 else 0.0, constants.GTE15X: round(float(bases_gte_15x) / total_bases, 5) if total_bases > 0 else 0.0, constants.GTE30X: round(float(bases_gte_30x) / total_bases, 5) if total_bases > 0 else 0.0, constants.GTE50X: round(float(bases_gte_50x) / total_bases, 5) if total_bases > 0 else 0.0 } # Compute the stats disaggregated by chromosome chr2stats = [(x[constants.CHROMOSOME], x[constants.UNION_TRANSCRIPT][constants.STATISTICS]) for x in genes] def groupby_chromosome(list_of_tuples): it = itertools.groupby(list_of_tuples, operator.itemgetter(0)) for _chromosome, subiter in it: yield _chromosome, [item[1] for item in subiter] # Aggregates stats for all chromosomes chromosome_stats = dict(groupby_chromosome(chr2stats)) autosomes_stats = [] for chromosome, chr_stats in chromosome_stats.iteritems(): chr_total_bases = int(np.sum([x[constants.BASES] for x in chr_stats])) if chr_stats else 0 chr_bases_lt_15x = int(np.sum([x[constants.BASES_LT15X] for x in chr_stats])) if chr_stats else 0 chr_bases_gte_15x = int(np.sum([x[constants.BASES_GTE15X] for x in chr_stats])) if chr_stats else 0 chr_bases_gte_30x = int(np.sum([x[constants.BASES_GTE30X] for x in chr_stats])) if chr_stats else 0 chr_bases_gte_50x = int(np.sum([x[constants.BASES_GTE50X] for x in chr_stats])) if chr_stats else 0 formatted_chr_stats = { constants.CHROMOSOME: chromosome, constants.BASES: chr_total_bases, constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in chr_stats])), 3) if chr_stats else 0.0, constants.MEDIAN: round(float(np.sum( [x[constants.MEDIAN] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3) if chr_stats and chr_total_bases > 0 else 0.0, constants.PERCENTILE75: round(float(np.sum( [x[constants.PERCENTILE75] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3) if chr_stats and chr_total_bases > 0 else 0.0, constants.PERCENTILE25: round(float(np.sum( [x[constants.PERCENTILE25] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3) if chr_stats and chr_total_bases > 0 else 0.0, constants.SD: round(float(np.sum( [x[constants.SD] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3) if chr_stats and chr_total_bases > 0 else 0.0, constants.LT15X: round(float(chr_bases_lt_15x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0, constants.GTE15X: round(float(chr_bases_gte_15x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0, constants.GTE30X: round(float(chr_bases_gte_30x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0, constants.GTE50X: round(float(chr_bases_gte_50x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0 } results[constants.CHROMOSOMES].append(formatted_chr_stats) logging.info("Coding region statistics for chromosome %s computed!" % chromosome) # Records stats for autosome if chromosome in constants.AUTOSOME_IDS: autosomes_stats.append(formatted_chr_stats) # Aggregates stats for autosomes autosomes_total_bases = int(np.sum([x[constants.BASES] for x in autosomes_stats])) if autosomes_stats else 0 autosomes_chr_stats = { constants.CHROMOSOME: constants.AUTOSOMES, constants.BASES: autosomes_total_bases, constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in autosomes_stats])), 3) if autosomes_stats else 0.0, constants.MEDIAN: round(float(np.sum( [x[constants.MEDIAN] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 3) if autosomes_stats and autosomes_total_bases > 0 else 0.0, constants.PERCENTILE75: round(float(np.sum( [x[constants.PERCENTILE75] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 3) if autosomes_stats and autosomes_total_bases > 0 else 0.0, constants.PERCENTILE25: round(float(np.sum( [x[constants.PERCENTILE25] * x[constants.BASES]
<reponame>leplatrem/addons-server # -*- coding: utf-8 -*- import base64 import json from datetime import datetime from time import sleep from xml.dom import minidom from django.conf import settings from django.core.cache import cache from olympia import amo from olympia.amo.tests import TestCase from olympia.amo.urlresolvers import reverse from olympia.blocklist.models import ( BlocklistApp, BlocklistCA, BlocklistDetail, BlocklistGfx, BlocklistItem, BlocklistIssuerCert, BlocklistPlugin, BlocklistPref) from olympia.blocklist.utils import JSON_DATE_FORMAT base_xml = """ <?xml version="1.0"?> <blocklist xmlns="http://www.mozilla.org/2006/addons-blocklist"> </blocklist> """ class XMLAssertsMixin(object): def assertOptional(self, obj, field, xml_field): """Make sure that if the field isn't filled in, it's not in the XML.""" # Save the initial value. initial = getattr(obj, field) try: # If not set, the field isn't in the XML. obj.update(**{field: ''}) assert self.dom(self.fx4_url).getElementsByTagName(xml_field) == [] # If set, it's in the XML. obj.update(**{field: 'foobar'}) element = self.dom(self.fx4_url).getElementsByTagName(xml_field)[0] assert element.firstChild.nodeValue == 'foobar' finally: obj.update(**{field: initial}) def assertAttribute(self, obj, field, tag, attr_name): # Save the initial value. initial = getattr(obj, field) try: # If set, it's in the XML. obj.update(**{field: 'foobar'}) element = self.dom(self.fx4_url).getElementsByTagName(tag)[0] assert element.getAttribute(attr_name) == 'foobar' finally: obj.update(**{field: initial}) def assertEscaped(self, obj, field): """Make sure that the field content is XML escaped.""" obj.update(**{field: 'http://example.com/?foo=<bar>&baz=crux'}) r = self.client.get(self.fx4_url) assert 'http://example.com/?foo=&lt;bar&gt;&amp;baz=crux' in r.content class BlocklistViewTest(TestCase): def setUp(self): super(BlocklistViewTest, self).setUp() self.fx4_url = reverse('blocklist', args=[3, amo.FIREFOX.guid, '4.0']) self.fx2_url = reverse('blocklist', args=[2, amo.FIREFOX.guid, '2.0']) self.tb4_url = reverse('blocklist', args=[3, amo.THUNDERBIRD.guid, '4.0']) self.mobile_url = reverse('blocklist', args=[2, amo.MOBILE.guid, '.9']) cache.clear() self.json_url = reverse('blocklist.json') self.details = BlocklistDetail.objects.create( name='blocked item', who='All Firefox and Fennec users', why='Security issue', bug='http://bug.url.com/', ) def create_blplugin(self, app_guid=None, app_min=None, app_max=None, *args, **kw): plugin = BlocklistPlugin.objects.create(*args, **kw) app = BlocklistApp.objects.create(blplugin=plugin, guid=app_guid, min=app_min, max=app_max) return plugin, app def normalize(self, s): return '\n'.join(x.strip() for x in s.split()) def eq_(self, x, y): assert self.normalize(x) == self.normalize(y) def dom(self, url): r = self.client.get(url) return minidom.parseString(r.content) class BlocklistItemTest(XMLAssertsMixin, BlocklistViewTest): def setUp(self): super(BlocklistItemTest, self).setUp() self.item = BlocklistItem.objects.create(guid='<EMAIL>', details=self.details) self.pref = BlocklistPref.objects.create(blitem=self.item, pref='foo.bar') self.app = BlocklistApp.objects.create(blitem=self.item, guid=amo.FIREFOX.guid) def stupid_unicode_test(self): junk = u'\xc2\x80\x15\xc2\x80\xc3' url = reverse('blocklist', args=[3, amo.FIREFOX.guid, junk]) # Just make sure it doesn't fail. assert self.client.get(url).status_code == 200 def test_content_type(self): response = self.client.get(self.fx4_url) assert response['Content-Type'] == 'text/xml' def test_empty_string_goes_null_on_save(self): b = BlocklistItem(guid='guid', min='', max='', os='') b.save() assert b.min is None assert b.max is None assert b.os is None def test_lastupdate(self): def eq(a, b): assert a == b.replace(microsecond=0) def find_lastupdate(): bl = self.dom(self.fx4_url).getElementsByTagName('blocklist')[0] t = int(bl.getAttribute('lastupdate')) / 1000 return datetime.fromtimestamp(t) eq(find_lastupdate(), self.item.created) self.item.save() eq(find_lastupdate(), self.item.modified) plugin, app = self.create_blplugin(app_guid=amo.FIREFOX.guid) eq(find_lastupdate(), plugin.created) plugin.save() eq(find_lastupdate(), plugin.modified) gfx = BlocklistGfx.objects.create(guid=amo.FIREFOX.guid) eq(find_lastupdate(), gfx.created) gfx.save() eq(find_lastupdate(), gfx.modified) assert (self.item.created != self.item.modified != plugin.created != plugin.modified != gfx.created != gfx.modified) def test_no_items(self): self.item.delete() dom = self.dom(self.fx4_url) children = dom.getElementsByTagName('blocklist')[0].childNodes # There are only text nodes. assert all(e.nodeType == 3 for e in children) def test_existing_user_cookie(self): self.client.cookies[settings.BLOCKLIST_COOKIE] = 'adfadf' self.client.get(self.fx4_url) assert self.client.cookies[settings.BLOCKLIST_COOKIE].value == 'adfadf' def test_url_params(self): assert self.client.get(self.fx4_url).status_code == 200 assert self.client.get(self.fx2_url).status_code == 200 # We ignore trailing url parameters. assert self.client.get(self.fx4_url + 'other/junk/').status_code == 200 def test_app_guid(self): # There's one item for Firefox. r = self.client.get(self.fx4_url) assert r.status_code == 200 assert len(r.context['items']) == 1 # There are no items for mobile. r = self.client.get(self.mobile_url) assert r.status_code == 200 assert len(r.context['items']) == 0 # Without the app constraint we see the item. self.app.delete() r = self.client.get(self.mobile_url) assert r.status_code == 200 assert len(r.context['items']) == 1 def test_item_guid(self): items = self.dom(self.fx4_url).getElementsByTagName('emItem') assert len(items) == 1 assert items[0].getAttribute('id') == '<EMAIL>' def test_block_id(self): item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] assert item.getAttribute('blockID') == 'i' + str(self.details.id) def test_block_id_consistency(self): # Clean the current blocklist so that we have only one self.item.delete() same_guid = '<EMAIL>' # Create a first detail first_created_details = BlocklistDetail.objects.create( name='blocked item', who='All Firefox and Fennec users', why='Security issue', bug='http://bug.url.com/4567', ) # Create a second detail secondly_created_details = BlocklistDetail.objects.create( name='blocked item', who='All Firefox and Fennec users', why='Security issue', bug='http://bug.url.com/1234', ) # Create a first item with the greatest blockID BlocklistItem.objects.create( guid=same_guid, details=first_created_details ) sleep(1) # Create a second item with the lowest blockID BlocklistItem.objects.create( guid=same_guid, details=secondly_created_details ) item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] assert item.getAttribute('id') == same_guid # Check that the blockID is the smallest assert item.getAttribute('blockID') == ( 'i%s' % str(secondly_created_details.id)) def test_item_os(self): item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] assert 'os' not in item.attributes.keys() self.item.update(os='win,mac') item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] assert item.getAttribute('os') == 'win,mac' def test_item_pref(self): self.item.update(severity=2) assert len(self.vr()) == 1 item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] prefs = item.getElementsByTagName('prefs') pref = prefs[0].getElementsByTagName('pref') assert pref[0].firstChild.nodeValue == self.pref.pref def test_item_severity(self): self.item.update(severity=2) assert len(self.vr()) == 1 item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] vrange = item.getElementsByTagName('versionRange') assert vrange[0].getAttribute('severity') == '2' def test_item_severity_zero(self): # Don't show severity if severity==0. self.item.update(severity=0, min='0.1') assert len(self.vr()) == 1 item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] vrange = item.getElementsByTagName('versionRange') assert vrange[0].getAttribute('minVersion') == '0.1' assert not vrange[0].hasAttribute('severity') def vr(self): item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0] return item.getElementsByTagName('versionRange') def test_item_version_range(self): self.item.update(min='0.1') assert len(self.vr()) == 1 assert self.vr()[0].attributes.keys() == ['minVersion'] assert self.vr()[0].getAttribute('minVersion') == '0.1' self.item.update(max='0.2') keys = self.vr()[0].attributes.keys() assert len(keys) == 2 assert 'minVersion' in keys assert 'maxVersion' in keys assert self.vr()[0].getAttribute('minVersion') == '0.1' assert self.vr()[0].getAttribute('maxVersion') == '0.2' def test_item_multiple_version_range(self): # There should be two <versionRange>s under one <emItem>. self.item.update(min='0.1', max='0.2') BlocklistItem.objects.create(guid=self.item.guid, severity=3) item = self.dom(self.fx4_url).getElementsByTagName('emItem') assert len(item) == 1 vr = item[0].getElementsByTagName('versionRange') assert len(vr) == 2 assert vr[0].getAttribute('minVersion') == '0.1' assert vr[0].getAttribute('maxVersion') == '0.2' assert vr[1].getAttribute('severity') == '3' def test_item_target_app(self): app = self.app self.app.delete() self.item.update(severity=2) version_range = self.vr()[0] assert version_range.getElementsByTagName('targetApplication') == [] app.save() version_range = self.vr()[0] target_app = version_range.getElementsByTagName('targetApplication') assert len(target_app) == 1 assert target_app[0].getAttribute('id') == amo.FIREFOX.guid app.update(min='0.1', max='*') version_range = self.vr()[0] target_app = version_range.getElementsByTagName('targetApplication') assert target_app[0].getAttribute('id') == amo.FIREFOX.guid tvr = target_app[0].getElementsByTagName('versionRange') assert tvr[0].getAttribute('minVersion') == '0.1' assert tvr[0].getAttribute('maxVersion') == '*' def test_item_multiple_apps(self): # Make sure all <targetApplication>s go under the same <versionRange>. self.app.update(min='0.1', max='0.2') BlocklistApp.objects.create(guid=amo.FIREFOX.guid, blitem=self.item, min='3.0', max='3.1') version_range = self.vr()[0] apps = version_range.getElementsByTagName('targetApplication') assert len(apps) == 2 assert apps[0].getAttribute('id') == amo.FIREFOX.guid vr = apps[0].getElementsByTagName('versionRange')[0] assert vr.getAttribute('minVersion') == '0.1' assert vr.getAttribute('maxVersion') == '0.2' assert apps[1].getAttribute('id') == amo.FIREFOX.guid vr = apps[1].getElementsByTagName('versionRange')[0] assert vr.getAttribute('minVersion') == '3.0' assert vr.getAttribute('maxVersion') == '3.1' def test_item_empty_version_range(self): # No version_range without an app, min, max, or severity. self.app.delete() self.item.update(min=None, max=None, severity=None) assert len(self.vr()) == 0 def test_item_empty_target_app(self): # No empty <targetApplication>. self.item.update(severity=1) self.app.delete() app = self.dom(self.fx4_url).getElementsByTagName('targetApplication') assert app == [] def test_item_target_empty_version_range(self): app = self.dom(self.fx4_url).getElementsByTagName('targetApplication') assert app[0].getElementsByTagName('versionRange') == [] def test_name(self): self.assertAttribute(self.item, field='name', tag='emItem', attr_name='name') def test_creator(self): self.assertAttribute(self.item, field='creator', tag='emItem', attr_name='creator') def test_homepage_url(self): self.assertAttribute(self.item, field='homepage_url', tag='emItem', attr_name='homepageURL') def test_update_url(self): self.assertAttribute(self.item, field='update_url', tag='emItem', attr_name='updateURL') def test_urls_escaped(self): self.assertEscaped(self.item, 'homepage_url') self.assertEscaped(self.item, 'update_url') def test_addons_json(self): self.item.update(os='WINNT 5.0', name='addons name', severity=0, min='0', max='*') self.app.update(min='2.0', max='3.0') app2 = BlocklistApp.objects.create( blitem=self.item, guid=amo.FIREFOX.guid, min='1.0', max='2.0') r = self.client.get(self.json_url) blocklist = json.loads(r.content) item = blocklist['addons'][0] assert item['guid'] == self.item.guid assert item['name'] == self.item.name assert item['os'] == self.item.os # VersionRange assert item['versionRange'] == [{ 'severity': 0, 'minVersion': '0', 'maxVersion': '*', 'targetApplication': [{ 'guid': self.app.guid, 'minVersion': '2.0', 'maxVersion': '3.0', }, { 'guid': app2.guid, 'minVersion': '1.0', 'maxVersion': '2.0', }] }] created = self.item.details.created assert item['details'] == { 'name': 'blocked item', 'who': 'All Firefox and Fennec users', 'why': 'Security issue', 'created': created.strftime(JSON_DATE_FORMAT), 'bug': 'http://bug.url.com/' } def test_addons_json_with_no_app(self): self.item.update(os='WINNT 5.0', severity=0, min='0', max='*') self.app.delete() r = self.client.get(self.json_url) blocklist = json.loads(r.content) item = blocklist['addons'][0] assert 'name' not in item assert item['os'] == self.item.os # VersionRange assert item['versionRange'] == [{ 'severity': 0, 'minVersion': '0', 'maxVersion': '*', 'targetApplication': [] }] created = self.item.details.created assert item['details'] == { 'name': 'blocked item', 'who': 'All Firefox and Fennec users', 'why': 'Security issue', 'created': created.strftime(JSON_DATE_FORMAT), 'bug': 'http://bug.url.com/' } def test_two_blitem_for_same_addon_json(self): self.item.update(os='WINNT 5.0', name='addons name', severity=0, min='0', max='*') BlocklistApp.objects.create(blitem=self.item, guid=amo.FIREFOX.guid, min='1.0', max='2.0') details = BlocklistDetail.objects.create( name='blocked item', who='All Thunderbird users', why='Security issue', bug='http://bug.url.com/', ) item2 = BlocklistItem.objects.create(guid=self.item.guid, os='WINNT 5.0', name='addons name', severity=0, min='0', max='*', details=details) BlocklistApp.objects.create(blitem=item2, guid=amo.THUNDERBIRD.guid, min='17.0', max='*') r = self.client.get(self.json_url) blocklist = json.loads(r.content) assert 'Firefox' in r.content assert 'Thunderbird' in r.content # Items are not grouped by guid assert len(blocklist['addons']) == 2 assert len(blocklist['addons'][0]['versionRange']) == 1 assert len(blocklist['addons'][1]['versionRange']) == 1 class BlocklistPluginTest(XMLAssertsMixin, BlocklistViewTest): def setUp(self): super(BlocklistPluginTest, self).setUp() self.plugin, self.app = self.create_blplugin(app_guid=amo.FIREFOX.guid, details=self.details) def test_no_plugins(self): dom = BlocklistViewTest.dom(self, self.mobile_url) children = dom.getElementsByTagName('blocklist')[0].childNodes # There are only text nodes. assert all(e.nodeType == 3 for e in children) def dom(self, url=None): url = url or self.fx4_url r = self.client.get(url) d = minidom.parseString(r.content) return d.getElementsByTagName('pluginItem')[0] def test_plugin_empty(self): self.app.delete() assert self.dom().attributes.keys() == ['blockID'] assert self.dom().getElementsByTagName('match') == [] assert self.dom().getElementsByTagName('versionRange') == [] def test_block_id(self):
# Copyright 2020, by the California Institute of Technology. ALL RIGHTS # RESERVED. United States Government Sponsorship acknowledged. Any # commercial use must be negotiated with the Office of Technology Transfer # at the California Institute of Technology. #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Name: # habex.m # # Purpose: # Representation of the Habex telescope and coronagraph. To be called using # the PROPER library procedure "proper.prop_run". # # Inputs: # lambda_m # The wavelength of propagation in meters (note that the wavelength is provided # to proper.prop_run in microns and is converted to meters in there). # gridsize # Size of the computational grid (gridsize by gridsize elements). Must be # a power of 2. # # Outputs: # wavefront # Variable in which the computed E-field at the final image plane is returned. # The field is sampled by "final_sampling_lam0" lambda_m/D over "nout" by "nout" # pixels. # sampling_m # The sampling at the final image plane in meters per pixel # # Optional keywords or switches: # optval # (Optional) Structure whose fields are values # that are passed to the prescription for use as the prescription desires. # # Revision history: # Written by <NAME> (Jet Propulsion Laboratory, California Inst. Technology), January 2020 # Translated to Python by <NAME> (JPL, CIT), February 2020. Added an option, # use_pr, to retrieve the E-field at the pupil before the focal plane mask. Also # added the vortex as the focal plane mask. ##---------------------------------------------------------------------------------- import numpy as np #import matplotlib.pyplot as plt # For Debugging #from astropy.io import fits # For Debugging import proper # Use v3.2 or higher import falco # FALCO needed for propagation to/from vortex def habex(lambda_m, gridsize, PASSVALUE={'dummy':0}): nact = 64; #-- number of actuators across DM nact_across_pupil = 62; #-- number of actuators across pupil dm_xc = 31.5; #-- wavefront centered at corner of DM actuator (0,0 is center of 1st actuator) dm_yc = 31.5; dm_sampling = 0.4e-3; #-- DM actuator spacing (BMC) #-- default settings (override with optval) map_dir = '../maps/'; #-- directory containing optical surface error maps lambda0_um = 0.5; #-- default reference wavelength (center of bandpass) for star offsets & field stop size use_errors = 1; #-- 1 = use optical surface errors, 0 = none zindex = np.array([0,]) #-- vector of Zernike indices (Noll ordered) zval = np.array([0,]) #-- vector of Zernike coefficients (unobscured RMS wavefront in meters) xoffset = 0; #-- star X offset in lambda0/D units (must then provide lambda0_um) yoffset = 0; #-- star Y offset in lambda0/D units use_dm1 = 0; #-- use DM1 (if non-zero, must then provide pokes (meters) in "dm1" array) use_dm2 = 0; #-- use DM2 (if non-zero, must then provide pokes (meters) in "dm2" array) use_fpm = 1; #-- use focal plane mask (0 = no FPM) use_lyot_stop = 1; #-- use Lyot stop (0 = no stop) use_field_stop = 1; #-- use field stop (0 = no stop) field_stop_radius = 25.0; #-- field stop radius in lam0/D final_sampling_lam0 = 0.2; #-- sampling at final image plane in lam0/D nout = 300; #-- output field size (nout x nout pixels) normLyotDiam = 0.95; #-- Lyot stop outer diameter normalized to the beam diameter vortexCharge = 6; #-- charge of the vortex focal plane mask pupil_diam_pix = nact_across_pupil * 7 #-- define sampling of pupil based on having 7 pixels across each DM actuator pr_pupil_diam_pix = pupil_diam_pix; #-- define sampling of pupil used for flattening phase with the DMs use_pr = False #-- whether to return a fake phase retrieval of the pupil rather than the focal plane #-- override defaults using values passed using optval structure if 'PASSVALUE' in locals(): if 'lam0' in PASSVALUE: lamba0_um = PASSVALUE['lam0'] if 'lambda0_um' in PASSVALUE: lambda0_um = PASSVALUE['lambda0_um'] if 'use_errors' in PASSVALUE: use_errors = PASSVALUE['use_errors'] if 'zindex' in PASSVALUE: zindex = PASSVALUE['zindex'] if 'zval' in PASSVALUE: zval = PASSVALUE['zval'] if 'xoffset' in PASSVALUE: xoffset = PASSVALUE['xoffset'] if 'yoffset' in PASSVALUE: yoffset = PASSVALUE['yoffset'] if 'use_dm1' in PASSVALUE: use_dm1 = PASSVALUE['use_dm1'] if 'dm1' in PASSVALUE: dm1 = PASSVALUE['dm1'] if 'use_dm2' in PASSVALUE: use_dm2 = PASSVALUE['use_dm2'] if 'dm2' in PASSVALUE: dm2 = PASSVALUE['dm2'] if 'use_fpm' in PASSVALUE: use_fpm = PASSVALUE['use_fpm'] if 'use_lyot_stop' in PASSVALUE: use_lyot_stop = PASSVALUE['use_lyot_stop'] if 'use_field_stop' in PASSVALUE: use_field_stop = PASSVALUE['use_field_stop'] if 'field_stop_radius' in PASSVALUE: field_stop_radius = PASSVALUE['field_stop_radius'] if 'final_sampling_lam0' in PASSVALUE: final_sampling_lam0 = PASSVALUE['final_sampling_lam0'] if 'nout' in PASSVALUE: nout = PASSVALUE['nout'] if 'normLyotDiam' in PASSVALUE: normLyotDiam = PASSVALUE['normLyotDiam'] if 'vortexCharge' in PASSVALUE: vortexCharge = PASSVALUE['vortexCharge'] if 'map_dir' in PASSVALUE: map_dir = PASSVALUE['map_dir'] if 'pupil_diam_pix' in PASSVALUE: pupil_diam_pix = PASSVALUE['pupil_diam_pix'] if 'pr_pupil_diam_pix' in PASSVALUE: pr_pupil_diam_pix = PASSVALUE['pr_pupil_diam_pix'] if 'use_pr' in PASSVALUE: use_pr = PASSVALUE['use_pr'] # Convert 0 and 1 to False and True use_errors = bool(use_errors) use_dm1 = bool(use_dm1) use_dm2 = bool(use_dm2) use_fpm = bool(use_fpm) use_lyot_stop = bool(use_lyot_stop) use_field_stop = bool(use_field_stop) use_pr = bool(use_pr) if(np.isscalar(zindex)): zindex = np.asarray((zindex,)) else: # Check if iterable. If not, then make an array containing 0 try: temp = zindex[0] except: zindex = np.array([0]) lambda0_m = lambda0_um * 1.0e-6; pupil_ratio = pupil_diam_pix / float(gridsize) #-- define optical prescription (distances, focal lengths) diam = 4.00; r_pri = 19.8; h_pri = 2.5; z_pri = h_pri**2 / (2*r_pri) fl_pri = np.sqrt(h_pri**2 + (r_pri/2-z_pri)**2) #-- effective focal length of primary as a pure parabola d_pri_sec = 9.172532289071727; d_focus_sec = fl_pri - d_pri_sec; d_sec_focus = 7.979857207574376844; fl_sec = 1 / (1/d_sec_focus - 1/d_focus_sec) d_sec_m3 = 9.076690863872008; fl_m3 = d_sec_m3 - d_sec_focus; d_m3_fold = 0.654597300210990; d_fold_fsm = 0.577743120280288; d_fsm_dichroic = 0.1950; d_dichroic_m4 = 0.450; fl_m4 = 0.5075; d_m4_m5 = 0.762954002022743; fl_m5 = d_m4_m5 - fl_m4; d_m5_dm1 = 0.220615776458241; d_dm1_dm2 = 0.32; d_dm2_qwp = 0.32 + 0.157485214529470; fl_m6 = 1.029143136045496931; d_qwp_m6 = fl_m6 - (d_dm1_dm2 + d_dm2_qwp) d_m6_fpm = fl_m6; d_fpm_m7 = 0.255580492381039; fl_m7 = d_fpm_m7; d_m7_lyotstop = fl_m7; d_lyotstop_m8 = 0.2536; fl_m8 = d_lyotstop_m8; d_m8_fieldstop = fl_m8; d_fieldstop_m9 = d_m8_fieldstop; fl_m9 = d_fieldstop_m9; d_m9_filter = 0.296399999724129; d_filter_m10 = 0.462615469378302; fl_m10 = 0.503971038519431261; d_m10_ccd = fl_m10; wavefront = proper.prop_begin(diam, lambda_m, gridsize, pupil_diam_pix/gridsize) proper.prop_circular_aperture(wavefront, diam/2) if not zindex[0] == 0: proper.prop_zernikes(wavefront, zindex, zval) #-- optionally add Zernikes if( (xoffset != 0) or (yoffset != 0) ): #-- star X,Y offset in lam0/D xoffset_lam = xoffset * lambda0_m / lambda_m; yoffset_lam = yoffset * lambda0_m / lambda_m; u = (np.arange(gridsize)-gridsize/2.) / (pupil_diam_pix/2.) # IDL version: u = (dindgen(gridsize)-gridsize/2) / (pupil_diam_pix/2) xtilt = np.exp( 1j * np.pi * u * xoffset_lam ) ytilt = np.exp( 1j * np.pi * u * yoffset_lam ) proper.prop_multiply(wavefront, ytilt.reshape((gridsize, 1)) @ xtilt.reshape((1, gridsize))) # IDL version: proper.prop_multiply, wavefront, xtilt # ytilt if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_PRIMARY_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_pri) proper.prop_define_entrance(wavefront) proper.prop_propagate(wavefront, d_pri_sec, 'secondary') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_SECONDARY_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_sec) proper.prop_propagate(wavefront, d_sec_m3, 'M3') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M3_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_m3) proper.prop_propagate(wavefront, d_m3_fold, 'fold') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_FOLD1_phase_error.fits', WAVEFRONT=True) proper.prop_propagate(wavefront, d_fold_fsm, 'FSM') #-- pupil at fast steering mirror (interface with telescope) if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_FSM_phase_error.fits', WAVEFRONT=True) proper.prop_propagate(wavefront, d_fsm_dichroic, 'dichroic') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_DICHROIC_phase_error.fits', WAVEFRONT=True) proper.prop_propagate(wavefront, d_dichroic_m4, 'M4') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M4_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_m4) proper.prop_propagate(wavefront, d_m4_m5, 'M5') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M5_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_m5) proper.prop_propagate(wavefront, d_m5_dm1, 'DM1') if(use_dm1): proper.prop_dm(wavefront, dm1, dm_xc, dm_yc, dm_sampling) if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_DM1_phase_error.fits', WAVEFRONT=True) proper.prop_propagate(wavefront, d_dm1_dm2, 'DM2') if(use_dm2): proper.prop_dm(wavefront, dm2, dm_xc, dm_yc, dm_sampling) if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_DM2_phase_error.fits', WAVEFRONT=True) proper.prop_propagate(wavefront, d_dm2_qwp, 'QWP') #-- quarter-wave plate if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_QWP1_phase_error.fits', WAVEFRONT=True) proper.prop_propagate(wavefront, d_qwp_m6, 'M6') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M6_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_m6) proper.prop_propagate(wavefront, d_m6_fpm) if not use_pr: # if use_fpm: # fpm = proper.prop_8th_order_mask(wavefront, 4.0, CIRCULAR=True) #--Band-limited mask if use_fpm: apRad = pupil_diam_pix/2. inVal = 0.3 #-- found empirically outVal = 5 #-- found empirically # 1) IFFT to previous pupil from FPM's plane # 2) Use propcustom_mft_Pup2Vortex2Pup() to go to Lyot plane # 3) IFFT to FPM's focal plane EpupPre = np.fft.ifftshift(np.fft.ifft2(wavefront.wfarr))*gridsize # wavefront.wf is already fftshifted EpupPost = falco.prop.mft_p2v2p(EpupPre, vortexCharge, apRad, inVal, outVal) wavefront.wfarr = np.fft.ifft2(np.fft.fftshift(EpupPost))*gridsize proper.prop_propagate(wavefront, d_fpm_m7, 'M7') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M7_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_m7) proper.prop_propagate(wavefront, d_m7_lyotstop, 'Lyot stop') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_QWP2_phase_error.fits', WAVEFRONT=True) if(use_lyot_stop): proper.prop_circular_aperture(wavefront, normLyotDiam, NORM=True) proper.prop_propagate(wavefront, d_lyotstop_m8, 'M8') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M8_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_m8) proper.prop_propagate(wavefront, proper.prop_get_distancetofocus(wavefront), 'field stop') if(use_field_stop): r_stop = field_stop_radius * lambda0_m / lambda_m; proper.prop_circular_aperture(wavefront, r_stop/pupil_ratio*proper.prop_get_sampling(wavefront)) proper.prop_propagate(wavefront, d_fieldstop_m9, 'M9') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M9_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront, fl_m9) proper.prop_propagate(wavefront, d_m9_filter, 'filter') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_FILTER_phase_error.fits', WAVEFRONT=True) proper.prop_propagate(wavefront, d_filter_m10, 'M10') if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M10_phase_error.fits', WAVEFRONT=True) proper.prop_lens(wavefront,
import datetime import reversion from django.conf import settings from django.contrib import messages from django.contrib.admin.models import LogEntry, CHANGE from django.contrib.auth import get_user_model from django.contrib.auth.decorators import login_required from django.contrib.contenttypes.models import ContentType from django.db.models import Count, Q from django.contrib.gis.geos import Point from django.contrib.gis.measure import Distance from django.core.paginator import Paginator from django.forms import modelform_factory from django.http import Http404, JsonResponse from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse from django.views import generic from django.views.decorators.cache import cache_page from django.views.generic import TemplateView from django_registration.backends.activation.views import ( ActivationView, RegistrationView, ) from core import mailer from erp.models import Accessibilite, Activite, Commune, Erp, Vote from erp.provider import search as provider_search from erp import forms from erp import schema from erp import serializers from erp import versioning from subscription.models import ErpSubscription def handler403(request, exception): return render( request, "403.html", context={"exception": exception}, status=403, ) def handler404(request, exception): return render( request, "404.html", context={"exception": exception}, status=404, ) def handler500(request): return render(request, "500.html", context={}, status=500) def make_geojson(erp_qs): "Take an Erp queryset and serialize it to geojson." serializer = serializers.SpecialErpSerializer() return serializer.serialize( erp_qs, geometry_field="geom", use_natural_foreign_keys=True, fields=[ "pk", "nom", "activite__nom", "activite__vector_icon", "adresse", "absolute_url", "contrib_localisation_url", "has_accessibilite", ], ) def home(request): return render(request, "index.html") def challenge_ddt(request): start_date = datetime.datetime(2021, 2, 22, 9) stop_date = datetime.datetime(2021, 3, 31, 23, 59, 59) today = datetime.datetime.today() filters = Q( erp__published=True, erp__accessibilite__isnull=False, erp__geom__isnull=False, erp__user__email__contains="rhone.gouv.fr", erp__created_at__gte=start_date, erp__created_at__lt=stop_date, ) excludes = Q(erp__user__username="julien") top_contribs = ( get_user_model() .objects.annotate( erp_count_published=Count( "erp", filter=filters, excude=excludes, distinct=True, ) ) .filter(filters) .exclude(excludes) .filter(erp_count_published__gt=0) .order_by("-erp_count_published") ) return render( request, "challenge/podium.html", context={ "start_date": start_date, "stop_date": stop_date, "today": today, "top_contribs": top_contribs, }, ) def communes(request): communes_qs = Commune.objects.erp_stats()[:12] latest = ( Erp.objects.select_related("activite", "commune_ext") .published() .order_by("-created_at")[:17] ) return render( request, "communes.html", context={"communes": communes_qs, "latest": latest}, ) def search(request): q = request.GET.get("q") localize = request.GET.get("localize") paginator = pager = None pager_base_url = None page_number = 1 lat = None lon = None geojson_list = None if q and len(q) > 0: erp_qs = ( Erp.objects.select_related("accessibilite", "activite", "commune_ext") .published() .search(q) ) if localize == "1": try: (lat, lon) = ( float(request.GET.get("lat")), float(request.GET.get("lon")), ) erp_qs = erp_qs.nearest((lat, lon)).order_by("distance") except ValueError: pass paginator = Paginator(erp_qs, 10) page_number = request.GET.get("page", 1) pager = paginator.get_page(page_number) pager_base_url = ( f"?q={q or ''}&localize={localize or ''}&lat={lat or ''}&lon={lon or ''}" ) geojson_list = make_geojson(pager) return render( request, "search/results.html", context={ "paginator": paginator, "pager": pager, "pager_base_url": pager_base_url, "page_number": page_number, "localize": localize, "lat": request.GET.get("lat"), "lon": request.GET.get("lon"), "search": q, "geojson_list": geojson_list, "commune_json": None, "around": None, # XXX: (lat, lon) }, ) class CustomActivationCompleteView(TemplateView): def get_context_data(self, **kwargs): "Spread the next redirect value from qs param to template context key." context = super().get_context_data(**kwargs) context["next"] = self.request.GET.get("next", "") return context class CustomRegistrationView(RegistrationView): def get_email_context(self, activation_key): "Add the next redirect value to the email template context." context = super().get_email_context(activation_key) context["next"] = self.request.GET.get("next", "") return context class CustomActivationView(ActivationView): def get_success_url(self, user=None): "Add the next redirect path to the success redirect url" url = super().get_success_url(user) next = self.request.GET.get("next", "") if not next and self.extra_context and "next" in self.extra_context: next = self.extra_context.get("next", "") return f"{url}?next={next}" @cache_page(60 * 15) def autocomplete(request): suggestions = [] q = request.GET.get("q", "") commune_slug = request.GET.get("commune_slug") if len(q) < 3: return JsonResponse({"suggestions": suggestions}) qs = Erp.objects.published() if commune_slug: qs = qs.filter(commune_ext__slug=commune_slug) qs = qs.search(q)[:7] for erp in qs: suggestions.append( { "value": erp.nom + ", " + erp.adresse, "data": { "score": erp.rank, "activite": erp.activite and erp.activite.slug, "url": erp.get_absolute_url(), }, } ) suggestions = sorted(suggestions, key=lambda s: s["data"]["score"], reverse=True) return JsonResponse({"suggestions": suggestions}) class EditorialView(TemplateView): template_name = "editorial/base.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) return context class BaseListView(generic.ListView): model = Erp queryset = Erp.objects.select_related( "activite", "accessibilite", "commune_ext", "statuscheck" ).published() _commune = None @property def around(self): raw = self.request.GET.get("around") if raw is None: return try: rlon, rlat = raw.split(",") return (float(rlon), float(rlat)) except (IndexError, ValueError, TypeError): return None @property def commune(self): if self._commune is None: self._commune = get_object_or_404( Commune.objects.select_related(), slug=self.kwargs["commune"] ) return self._commune @property def search_terms(self): q = self.request.GET.get("q", "").strip() if len(q) >= 2: return q def get_queryset(self): queryset = super().get_queryset() queryset = queryset.in_commune(self.commune) if self.search_terms is not None: queryset = queryset.search(self.search_terms) else: if "activite_slug" in self.kwargs: if self.kwargs["activite_slug"] == "non-categorises": queryset = queryset.filter(activite__isnull=True) else: queryset = queryset.filter( activite__slug=self.kwargs["activite_slug"] ) queryset = queryset.order_by("nom") if self.around is not None: queryset = queryset.nearest(self.around) # We can't hammer the pages with too many entries, hard-limiting here return queryset[:500] class App(BaseListView): "Static, template-based Web application views." template_name = "erps/commune.html" def get(self, request, *args, **kwargs): if self.search_terms is not None and self.request.GET.get("scope") == "country": return redirect(reverse("home") + "?q=" + self.search_terms) return super().get(request, *args, **kwargs) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["around"] = ( list(self.around) if self.around is not None else self.around ) context["commune"] = self.commune context["commune_json"] = self.commune.toTemplateJson() context["search_terms"] = self.search_terms context["activites"] = Activite.objects.in_commune( self.commune ).with_erp_counts() context["activite_slug"] = self.kwargs.get("activite_slug") if ( "activite_slug" in self.kwargs and self.kwargs["activite_slug"] != "non-categorises" ): context["current_activite"] = get_object_or_404( Activite, slug=self.kwargs["activite_slug"] ) if "erp_slug" in self.kwargs: context["user_is_subscribed"] = False erp = get_object_or_404( Erp.objects.select_related( "accessibilite", "activite", "commune_ext", "user", "statuscheck" ) .published() .with_votes(), slug=self.kwargs["erp_slug"], ) context["erp"] = erp if erp.has_accessibilite(): form = forms.ViewAccessibiliteForm(instance=erp.accessibilite) context["accessibilite_data"] = form.get_accessibilite_data() if self.request.user.is_authenticated: context["user_vote"] = Vote.objects.filter( user=self.request.user, erp=erp ).first() context["user_is_subscribed"] = erp.is_subscribed_by(self.request.user) context["object_list"] = ( Erp.objects.select_related("accessibilite", "commune_ext", "activite") .published() .nearest([erp.geom.coords[1], erp.geom.coords[0]]) .filter(distance__lt=Distance(km=20))[:16] ) context["geojson_list"] = make_geojson(context["object_list"]) return context @login_required def vote(request, erp_slug): if not request.user.is_active: raise Http404("Only active users can vote") erp = get_object_or_404( Erp, slug=erp_slug, published=True, accessibilite__isnull=False ) if request.method == "POST": action = request.POST.get("action") comment = request.POST.get("comment") if action == "DOWN" else None vote = erp.vote(request.user, action, comment=comment) if vote: mailer.mail_admins( f"Vote {'positif' if vote.value == 1 else 'négatif'} pour {erp.nom} ({erp.commune_ext.nom})", "mail/vote_notification.txt", { "erp": erp, "vote": vote, "SITE_NAME": settings.SITE_NAME, "SITE_ROOT_URL": settings.SITE_ROOT_URL, }, ) messages.add_message( request, messages.SUCCESS, "Votre vote a été enregistré." ) return redirect(erp.get_absolute_url()) @login_required def mon_compte(request): return render(request, "compte/index.html") @login_required def mon_identifiant(request): if request.method == "POST": form = forms.UsernameChangeForm(request.POST) if form.is_valid(): username = form.cleaned_data["username"] user = get_user_model().objects.get(id=request.user.id) old_username = user.username user.username = username user.save() LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(user).pk, object_id=user.id, object_repr=username, action_flag=CHANGE, change_message=f"Changement de nom d'utilisateur (avant: {old_username})", ) messages.add_message( request, messages.SUCCESS, f"Votre nom d'utilisateur a été changé en {user.username}.", ) return redirect("mon_identifiant") else: form = forms.UsernameChangeForm(initial={"username": request.user.username}) return render( request, "compte/mon_identifiant.html", context={"form": form}, ) @login_required def mes_erps(request): qs = Erp.objects.select_related("accessibilite", "activite", "commune_ext").filter( user_id=request.user.pk ) published_qs = qs.published() non_published_qs = qs.not_published() erp_total_count = qs.count() erp_published_count = published_qs.count() erp_non_published_count = non_published_qs.count() published = request.GET.get("published") if published == "1": qs = published_qs elif published == "0": qs = non_published_qs qs = qs.filter(user_id=request.user.pk).order_by("-updated_at") paginator = Paginator(qs, 10) page_number = request.GET.get("page", 1) pager = paginator.get_page(page_number) return render( request, "compte/mes_erps.html", context={ "erp_total_count": erp_total_count, "erp_published_count": erp_published_count, "erp_non_published_count": erp_non_published_count, "pager": pager, "pager_base_url": f"?published={published or ''}", "filter_published": published, }, ) def _mes_contributions_view(request, qs, recues=False): paginator = Paginator(qs, 10) page_number = request.GET.get("page", 1) pager = paginator.get_page(page_number) return render( request, "compte/mes_contributions.html", context={"pager": pager, "recues": recues}, ) @login_required def mes_contributions(request): qs = versioning.get_user_contributions(request.user) return _mes_contributions_view(request, qs) @login_required def mes_contributions_recues(request): qs = versioning.get_user_contributions_recues(request.user) return _mes_contributions_view(request, qs, recues=True) @login_required def mes_abonnements(request): qs = ( ErpSubscription.objects.select_related( "erp", "erp__activite", "erp__commune_ext", "erp__user" ) .filter(user=request.user) .order_by("-updated_at") ) paginator = Paginator(qs, 10) page_number = request.GET.get("page", 1) pager = paginator.get_page(page_number) return render( request, "compte/mes_abonnements.html", context={"pager": pager, "pager_base_url": "?1"}, ) @login_required @reversion.views.create_revision() def contrib_delete(request, erp_slug): erp = get_object_or_404(Erp, slug=erp_slug, user=request.user) if request.method == "POST": form = forms.PublicErpDeleteForm(request.POST) if form.is_valid(): erp.delete() messages.add_message( request, messages.SUCCESS, "L'établissement a été supprimé." ) return redirect("mes_erps") else: form = forms.PublicErpDeleteForm() return render( request, template_name="contrib/delete.html", context={"erp": erp, "form": form}, ) @login_required def contrib_start(request): form = forms.ProviderGlobalSearchForm( initial={"code_insee": request.GET.get("code_insee")} ) return render( request, template_name="contrib/0-start.html", context={"step": 1, "form": form}, ) @login_required def contrib_global_search(request): results = error = None form = forms.ProviderGlobalSearchForm(request.GET if request.GET else None) if form.is_valid(): try: results = provider_search.global_search( form.cleaned_data["search"], form.cleaned_data["code_insee"], ) except RuntimeError as err: error = err return render( request, template_name="contrib/0a-search_results.html", context={ "step": 1, "results": results, "form": form, "form_type": "global", "error": error, }, ) @login_required @reversion.views.create_revision() def contrib_admin_infos(request): data = None data_error = None existing_matches = None if request.method == "POST": form = forms.PublicErpAdminInfosForm(request.POST) if form.is_valid(): existing_matches = Erp.objects.find_existing_matches( form.cleaned_data.get("nom"), form.cleaned_data.get("geom") ) if len(existing_matches) == 0 or request.POST.get("force") == "1": erp = form.save(commit=False) erp.published = False erp.user = request.user erp.save() messages.add_message( request, messages.SUCCESS, "Les données ont été enregistrées." ) return redirect("contrib_localisation", erp_slug=erp.slug) else: encoded_data = request.GET.get("data") if encoded_data is not None: try: data = serializers.decode_provider_data(encoded_data) except RuntimeError as err: data_error = err form = forms.PublicErpAdminInfosForm(data) return render( request, template_name="contrib/1-admin-infos.html", context={ "step": 1, "form": form, "has_data": data is not None, "data_error": data_error, "existing_matches": existing_matches, }, ) @login_required @reversion.views.create_revision() def
3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,7]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([1,2,2]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([1,2,1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,5]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([2,1,1]) trans_den = N.array([3,3,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([2,1,-1]) trans_den = N.array([3,3,6]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(167, 'R -3 c :H', transformations) space_groups[167] = sg space_groups['R -3 c :H'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(168, 'P 6', transformations) space_groups[168] = sg space_groups['P 6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(169, 'P 61', transformations) space_groups[169] = sg space_groups['P 61'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,5]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,6]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(170, 'P 65', transformations) space_groups[170] = sg space_groups['P 65'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(171, 'P 62', transformations) space_groups[171] = sg space_groups['P 62'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,2]) trans_den = N.array([1,1,3]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(172, 'P 64', transformations) space_groups[172] = sg space_groups['P 64'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(173, 'P 63', transformations) space_groups[173] = sg space_groups['P 63'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(174, 'P -6', transformations) space_groups[174] = sg space_groups['P -6'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(175, 'P 6/m', transformations) space_groups[175] = sg space_groups['P 6/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,0,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,0,0,0,1,0,0,0,-1]) rot.shape = (3, 3) trans_num = N.array([0,0,-1]) trans_den = N.array([1,1,2]) transformations.append((rot, trans_num, trans_den)) sg = SpaceGroup(176, 'P 63/m', transformations) space_groups[176] = sg space_groups['P 63/m'] = sg transformations = [] rot = N.array([1,0,0,0,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,1,0,-1,1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([0,-1,0,1,-1,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([-1,1,0,-1,0,0,0,0,1]) rot.shape = (3, 3) trans_num = N.array([0,0,0]) trans_den = N.array([1,1,1]) transformations.append((rot, trans_num, trans_den)) rot = N.array([1,-1,0,0,-1,0,0,0,-1]) rot.shape = (3, 3) trans_num
# Copyright 2019 Rackspace US Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from tempest.lib import exceptions from octavia_tempest_plugin.common.decorators import skip_if_not_implemented from octavia_tempest_plugin.services.load_balancer.v2 import base_client LOG = logging.getLogger(__name__) Unset = base_client.Unset class FlavorClient(base_client.BaseLBaaSClient): root_tag = 'flavor' list_root_tag = 'flavors' @skip_if_not_implemented def create_flavor(self, name, flavor_profile_id, description=Unset, enabled=Unset, return_object_only=True): """Create a flavor. :param name: Human-readable name of the resource. :param flavor_profile_id: The ID of the associated flavor profile. :param description: A human-readable description for the resource. :param enabled: If the resource is available for use. The default is True. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A flavor object. """ kwargs = {arg: value for arg, value in locals().items() if arg != 'self' and value is not Unset} return self._create_object(**kwargs) @skip_if_not_implemented def show_flavor(self, flavor_id, query_params=None, return_object_only=True): """Get the flavor details. :param flavor_id: The flavor ID to query. :param query_params: The optional query parameters to append to the request. Ex. fields=id&fields=name :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A flavor object. """ return self._show_object(obj_id=flavor_id, query_params=query_params, return_object_only=return_object_only) @skip_if_not_implemented def list_flavors(self, query_params=None, return_object_only=True): """Get a list of flavor objects. :param query_params: The optional query parameters to append to the request. Ex. fields=id&fields=name :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A list of flavor objects. """ return self._list_objects(query_params=query_params, return_object_only=return_object_only) @skip_if_not_implemented def update_flavor(self, flavor_id, name=Unset, description=Unset, enabled=Unset, return_object_only=True): """Update a flavor. :param flavor_id: The flavor ID to update. :param name: Human-readable name of the resource. :param description: A human-readable description for the resource. :param enabled: If the resource is available for use. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A flavor object. """ kwargs = {arg: value for arg, value in locals().items() if arg != 'self' and value is not Unset} kwargs['obj_id'] = kwargs.pop('flavor_id') return self._update_object(**kwargs) @skip_if_not_implemented def delete_flavor(self, flavor_id, ignore_errors=False): """Delete a flavor. :param flavor_id: The flavor ID to delete. :param ignore_errors: True if errors should be ignored. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit
to create backup")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.config.get_wallet_path())) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return i = 1 while True: filename = "wallet_%d" % i if filename in os.listdir(wallet_folder): i += 1 else: break full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_master_public_keys) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in macOS using this as work around tools_menu.addAction(_("Electrum-Ciphscoin preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) #help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://github.com/ciphscoin/electrum-ciphscoin")) help_menu.addSeparator() #help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://github.com/ciphscoin/electrum-spacecoin")).setShortcut(QKeySequence.HelpContents) #self._auto_crash_reports = QAction(_("&Automated Crash Reports"), self, checkable=True) #self._auto_crash_reports.setChecked(self.config.get("show_crash_reporter", default=False)) #self._auto_crash_reports.triggered.connect(self.auto_crash_reports) #help_menu.addAction(self._auto_crash_reports) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def auto_crash_reports(self, state): self.config.set_key("show_crash_reporter", state) self.setup_exception_hook() def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters()[0] self.pay_to_URI('ciphscoin:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum-Ciphscoin", _("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum-Ciphscoin focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), "<a href=\"https://github.com/ciphscoin/electrum-ciphscoin/issues\">https://github.com/Ciphscoin/electrum-ciphscoin/issues</a><br/><br/>", _("Before reporting a bug, upgrade to the most recent version of Electrum-Ciphscoin (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum-Ciphscoin - " + _("Reporting Bugs")) def notify_transactions(self): if not self.network or not self.network.is_connected(): return self.print_error("Notifying GUI") if len(self.tx_notifications) > 0: # Combine the transactions if there are at least three num_txns = len(self.tx_notifications) if num_txns >= 3: total_amount = 0 for tx in self.tx_notifications: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if v > 0: total_amount += v self.notify(_("{} new transactions received: Total amount received in the new transactions {}") .format(num_txns, self.format_amount_and_units(total_amount))) self.tx_notifications = [] else: for tx in self.tx_notifications: if tx: self.tx_notifications.remove(tx) is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if v > 0: self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum-Ciphscoin", message, QIcon(":icons/electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum-Ciphscoin", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) path = os.path.join( directory, filename ) fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def connect_slots(self, sender): sender.timer_signal.connect(self.timer_actions) def timer_actions(self): # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() # update fee if self.require_fee_update: self.do_update_fee() self.require_fee_update = False def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces) def format_amount_and_units(self, amount): text = self.format_amount(amount) + ' '+ self.base_unit() x = self.fx.format_amount_and_units(amount) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): return '%s sat/kB' % round(fee_rate) def get_decimal_point(self): return self.decimal_point def base_unit(self): assert self.decimal_point in [2, 5, 8] if self.decimal_point == 2: return 'uCIPHS' if self.decimal_point == 5: return 'mCIPHS' if self.decimal_point == 8: return 'CIPHS' raise Exception('Unknown base unit') def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None or not self.network.is_running(): text = _("Offline") icon = QIcon(":icons/status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: text = _("Synchronizing...") icon = QIcon(":icons/status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = QIcon(":icons/status_lagging.png") else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, True).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = QIcon(":icons/status_connected.png") else: icon = QIcon(":icons/status_connected_proxy.png") else: text = _("Not connected") icon = QIcon(":icons/status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self): self.history_list.update() self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.update_completions() def create_history_tab(self): from .history_list import HistoryList self.history_list = l = HistoryList(self) l.searchable_list = l l.setObjectName("history_container") toolbar = l.create_toolbar(self.config) toolbar_shown = self.config.get('show_toolbar_history', False) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_transaction(self, tx, tx_desc = None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, self, tx_desc) def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_address_e = ButtonsLineEdit() self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) msg = _('Ciphscoin address where the payment should be received. Note that each payment request uses a different Spacecoin address.') self.receive_address_label = HelpLabel(_('Receiving address'), msg) self.receive_address_e.textChanged.connect(self.update_receive_qr) self.receive_address_e.setFocusPolicy(Qt.ClickFocus) grid.addWidget(self.receive_address_label, 0, 0) grid.addWidget(self.receive_address_e, 0, 1, 1, -1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 1, 0) grid.addWidget(self.receive_message_e, 1, 1, 1, -1) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 2, 0) grid.addWidget(self.receive_amount_e, 2, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.expires_combo = QComboBox() self.expires_combo.addItems([i[0] for i in expiration_values]) self.expires_combo.setCurrentIndex(3) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) msg = '
<filename>environments/maze.py<gh_stars>1-10 from typing import List, Tuple import copy import math from termcolor import colored from gym.envs.classic_control import rendering import numpy as np from environments import DiscreteEnv SCREEN_WIDTH = 600 SCREEN_HEIGHT = 600 VIEWER = rendering.Viewer(SCREEN_WIDTH, SCREEN_HEIGHT) LEFT = 0 DOWN = 1 RIGHT = 2 UP = 3 MAZE_XS = np.array([ ['A', 'S', 'S', 'S', 'G'], ]) MAZE_SMALL = np.array([ ['A', 'W', 'S', 'S'], ['S', 'S', 'S', 'W'], ['W', 'S', 'S', 'W'], ['yT', 'S', 'S', 'G'], ]) MAZE = np.array([ ['A', 'W', 'H', 'S', 'S', 'W', 'W', 'mT'], ['S', 'W', 'W', 'W', 'S', 'G', 'W', 'S'], ['S', 'W', 'S', 'W', 'S', 'W', 'W', 'S'], ['S', 'S', 'S', 'W', 'S', 'S', 'S', 'S'], ['W', 'W', 'S', 'S', 'S', 'W', 'W', 'S'], ['S', 'S', 'S', 'W', 'W', 'W', 'S', 'S'], ['W', 'W', 'W', 'W', 'S', 'S', 'S', 'W'], ['yT', 'S', 'S', 'S', 'S', 'W', 'S', 'cT'], ]) MAZE_LARGE = np.array([ ['A', 'W', 'H', 'S', 'S', 'W', 'W', 'mT', 'W', 'W', 'S', 'H'], ['S', 'W', 'W', 'W', 'S', 'S', 'W', 'S', 'S', 'S', 'S', 'W'], ['S', 'W', 'S', 'W', 'S', 'W', 'W', 'S', 'W', 'S', 'W', 'W'], ['S', 'S', 'S', 'W', 'S', 'S', 'S', 'S', 'W', 'S', 'S', 'S'], ['W', 'W', 'S', 'S', 'S', 'W', 'W', 'S', 'W', 'W', 'W', 'S'], ['H', 'S', 'S', 'W', 'W', 'W', 'S', 'S', 'S', 'W', 'G', 'S'], ['W', 'W', 'W', 'W', 'S', 'S', 'S', 'W', 'S', 'W', 'W', 'S'], ['S', 'S', 'S', 'S', 'S', 'W', 'S', 'W', 'S', 'S', 'S', 'S'], ['S', 'W', 'W', 'W', 'S', 'W', 'S', 'W', 'S', 'W', 'W', 'W'], ['S', 'yT', 'W', 'S', 'S', 'W', 'S', 'S', 'S', 'S', 'S', 'S'], ['S', 'W', 'W', 'S', 'W', 'W', 'S', 'W', 'S', 'W', 'W', 'S'], ['S', 'S', 'S', 'S', 'W', 'S', 'S', 'W', 'cT', 'W', 'H', 'S'], ]) MAZE_XL = np.array([ ['A', 'W', 'S', 'W', 'W', 'W', 'S', 'S', 'S', 'S', 'S', 'S', 'W', 'S', 'S', 'S', 'S', 'S', 'W', 'mT'], ['S', 'W', 'S', 'W', 'S', 'W', 'S', 'W', 'W', 'W', 'W', 'S', 'S', 'S', 'W', 'W', 'W', 'S', 'W', 'S'], ['S', 'W', 'S', 'S', 'S', 'S', 'S', 'S', 'W', 'S', 'W', 'W', 'W', 'W', 'W', 'S', 'S', 'S', 'S', 'S'], ['S', 'S', 'S', 'W', 'S', 'W', 'S', 'W', 'S', 'S', 'S', 'W', 'H', 'W', 'W', 'W', 'W', 'S', 'W', 'W'], ['S', 'W', 'W', 'W', 'S', 'W', 'S', 'W', 'W', 'W', 'S', 'W', 'S', 'W', 'S', 'S', 'W', 'S', 'S', 'W'], ['S', 'W', 'S', 'S', 'S', 'W', 'S', 'S', 'S', 'W', 'S', 'W', 'S', 'S', 'S', 'S', 'W', 'W', 'S', 'S'], ['S', 'W', 'H', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'S', 'W', 'W', 'S', 'W', 'S', 'S', 'S', 'S', 'W'], ['S', 'W', 'W', 'W', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'W', 'W', 'S', 'W', 'W', 'W'], ['S', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'S', 'W', 'S', 'S', 'S', 'S', 'H'], ['W', 'W', 'S', 'W', 'W', 'W', 'S', 'S', 'S', 'S', 'W', 'H', 'W', 'S', 'W', 'W', 'W', 'S', 'W', 'W'], ['S', 'S', 'S', 'W', 'S', 'W', 'S', 'W', 'W', 'S', 'S', 'S', 'W', 'S', 'S', 'S', 'W', 'S', 'S', 'S'], ['S', 'W', 'S', 'S', 'S', 'S', 'S', 'S', 'W', 'S', 'W', 'W', 'W', 'W', 'W', 'S', 'W', 'W', 'S', 'W'], ['S', 'W', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'S', 'S', 'S', 'G', 'S', 'S', 'S', 'S', 'W', 'S', 'S'], ['H', 'W', 'S', 'S', 'S', 'W', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'S', 'W', 'W', 'W', 'W', 'S'], ['W', 'W', 'S', 'W', 'W', 'W', 'S', 'W', 'W', 'W', 'W', 'W', 'S', 'W', 'S', 'W', 'W', 'S', 'W', 'S'], ['S', 'S', 'S', 'W', 'S', 'S', 'S', 'W', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'W', 'S', 'S', 'S', 'S'], ['S', 'W', 'W', 'W', 'S', 'W', 'H', 'W', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'W', 'S'], ['S', 'S', 'S', 'W', 'S', 'W', 'W', 'W', 'S', 'W', 'S', 'S', 'S', 'S', 'S', 'S', 'W', 'S', 'S', 'S'], ['W', 'W', 'W', 'W', 'S', 'S', 'S', 'W', 'S', 'W', 'S', 'W', 'S', 'W', 'W', 'S', 'W', 'W', 'W', 'S'], ['cT', 'S', 'S', 'S', 'S', 'W', 'S', 'S', 'S', 'S', 'S', 'W', 'H', 'W', 'S', 'S', 'S', 'S', 'S', 'yT'], ]) MAZES = { 'xs': MAZE_XS, 's': MAZE_SMALL, 'm': MAZE, 'l': MAZE_LARGE, 'xl': MAZE_XL } class MazeEnv(DiscreteEnv): def __init__(self, size: str = 'm', treasure_reward: float = 2.0, goal_hole_reward: float = 1.0): """ Maze environment is a deterministic and discrete environment. The goal is to reach the goal-field, while collecting treasures and bypass holes. The Agent can move in four directions while walls and end of map restrict his movement. The goal and holes end the episode. :param size: There are different sizes possible for the maze, stored in MAZES global variable :param treasure_reward: Reward, if the agent collects a treasure :param goal_hole_reward: Reward scale for goal and hole, negative for the hole """ self.size = size self.init_maze = self._choose_maze(self.size) self.maze = self.init_maze.copy() self.maze_size = np.prod(self.maze.shape) self.treasure_reward = treasure_reward self.goal_hole_reward = goal_hole_reward self.agent_position = self.get_agent_position() self.treasures_collected = {'yellow': False, 'magenta': False, 'cyan': False} self.mcts_moves = dict() # for visualization of mcts moves # either one of the three treasures are collected or not, # this binary information is included in the information of the state nS = np.prod(self.maze.shape) * 8 nA = 4 # get transition probabilities P = dict() for s in range(nS): P[s] = {} P[s][LEFT] = self._calculate_transition_prob(s, LEFT) P[s][DOWN] = self._calculate_transition_prob(s, DOWN) P[s][RIGHT] = self._calculate_transition_prob(s, RIGHT) P[s][UP] = self._calculate_transition_prob(s, UP) # Calculate initial state distribution # We always start in state (0, 0) isd = np.zeros(nS) isd[0] = 1.0 super(MazeEnv, self).__init__(nS, nA, P, isd) def reset(self) -> int: self.maze = self.init_maze.copy() self.agent_position = self.get_agent_position() self.treasures_collected = {'yellow': False, 'magenta': False, 'cyan': False} return self._get_state() def _new_maze(self) -> None: self.init_maze = self._choose_maze(self.size) def get_agent_position(self) -> int: for row_idx, row in enumerate(self.maze): for col_idx, col in enumerate(row): if col == 'A': return self._coordinate_to_index(row_idx, col_idx) @staticmethod def _choose_maze(size: str) -> np.ndarray: return MAZES[size].copy() def _draw_state_on_viewer(self) -> None: world_width = len(self.maze[0]) scale = SCREEN_WIDTH / world_width for row_idx, row in enumerate(self.maze): row_idx = len(self.maze) - row_idx - 1 # inverted from bottom to top for column_idx, field in enumerate(row): l, r, t, b = scale * column_idx, scale * (column_idx + 1), scale * row_idx, scale * (row_idx + 1) cw = (l + r) / 2 ch = (t + b) / 2 pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) pole.set_color(1, 1, 1) VIEWER.add_geom(pole) if field == 'W': pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)]) pole.set_color(.2, .2, .9) if field == 'A': pole = rendering.make_circle((scale / 2) * 0.8) pole.add_attr(rendering.Transform(translation=(cw, ch))) pole.set_color(.8, .2, .2) if field == 'yT': pole = rendering.FilledPolygon([(l, ch), (cw, t), (r, ch), (cw, b)]) pole.set_color(0, .7, .7) if field == 'cT': pole = rendering.FilledPolygon([(l, ch), (cw, t), (r, ch), (cw, b)]) pole.set_color(.7, 0, .7) if field == 'mT': pole = rendering.FilledPolygon([(l, ch), (cw, t), (r, ch), (cw, b)]) pole.set_color(.7, .7, 0) if field == 'G': pole = rendering.make_circle((scale / 2) * 0.8) pole.add_attr(rendering.Transform(translation=(cw, ch))) pole.set_color(.2, .9, .2) if field == 'H': pole = rendering.make_circle((scale / 2) * 0.8) pole.add_attr(rendering.Transform(translation=(cw, ch))) pole.set_color(.1, .1, .1) VIEWER.add_geom(pole) def render(self, mode: str = 'human') -> None: self._draw_state_on_viewer() return VIEWER.render(return_rgb_array=mode == 'rgb_array') def visualize_mcts_tree(self, root_node) -> None: self._draw_state_on_viewer() for child in root_node.children: copy_env = copy.deepcopy(self) self.recursive_draw_action_tree(child, copy_env) max_visits = max(self.mcts_moves.values()) for move in self.mcts_moves.keys(): (c_cw, c_ch), (n_cw, n_ch) = move visits = self.mcts_moves[move] line_thickness = math.ceil((visits / max_visits) * 15) + 5 line = rendering.Line((c_cw, c_ch), (n_cw, n_ch), width=line_thickness) line.set_color(.8, .2, .2) VIEWER.add_geom(line) return VIEWER.render() def recursive_draw_action_tree(self, node, env) -> None: world_width = len(self.maze[0]) scale = SCREEN_WIDTH / world_width cp_r, cp_c = self._index_to_coordinate(env.get_agent_position()) # current point row and col cp_r, cp_c cp_r = len(self.maze) - cp_r - 1 # inverted from bottom to top env.step(node.action) np_r, np_c = self._index_to_coordinate(env.get_agent_position()) # new point row and col np_r, np_c
_status = 39 elif cmd.count("resize") : _fmsg += "Cannot resize object." _status = 40 elif cmd.count("migrate") : _fmsg += "Cannot migrate object." _status = 41 elif cmd.count("protect") : _fmsg += "Cannot protect object." _status = 41 elif cmd.count("login") : _fmsg += "Cannot login to object." _status = 42 elif cmd.count("display") : _fmsg += "Cannot display object." _status = 43 else : _status = 0 _obj_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], _obj_type, \ True, obj_attr_list["name"], False) _preserved_command = obj_attr_list["command"] _preserved_command_originated = obj_attr_list["command_originated"] obj_attr_list.update(_obj_attr_list) obj_attr_list["command"] = _preserved_command obj_attr_list["command_originated"] = _preserved_command_originated if cmd.count("capture") : obj_attr_list["mgt_101_capture_request_originated"] = obj_attr_list["command_originated"] if obj_attr_list["capture_supported"].lower() != "true" : _msg = "Capture operations are not supported on \"" + _cloud_parameters["description"] + "\" clouds." _status = 9000 raise self.ObjectOperationException(_msg, _status) elif cmd.count("migrate") or cmd.count("protect") : op = cmd.split("-")[1] obj_attr_list["mgt_501_" + op + "_request_originated"] = obj_attr_list["command_originated"] vmc_attr = self.osci.get_object(obj_attr_list["cloud_name"], "VMC", False, obj_attr_list["vmc"], False) host_attr = self.osci.get_object(obj_attr_list["cloud_name"], "HOST", False, obj_attr_list["host"], False) dest_name = obj_attr_list["destination"] if not dest_name[:5] == "host_" : dest_name = "host_" + dest_name if host_attr["name"] == dest_name : _msg = "Source and destination hosts are the same. Try again." _status = 9421 raise self.ObjectOperationException(_msg, _status) if not self.osci.object_exists(obj_attr_list["cloud_name"], "HOST", dest_name, True) : _msg = "Destination HOST object for migration does not exist: " + obj_attr_list["destination"] _status = 9001 raise self.ObjectOperationException(_msg, _status) dest_host_attr = self.osci.get_object(obj_attr_list["cloud_name"], "HOST", True, dest_name, False) dest_vmc_attr = self.osci.get_object(obj_attr_list["cloud_name"], "VMC", False, dest_host_attr["vmc"], False) obj_attr_list["destination_vmc"] = dest_vmc_attr["uuid"] obj_attr_list["destination_vmc_cloud_ip"] = dest_vmc_attr["cloud_ip"] obj_attr_list["destination_vmc_name"] = dest_vmc_attr["name"] obj_attr_list["destination_vmc_pool"] = dest_vmc_attr["pool"] if vmc_attr[op + "_supported"].lower() != "true" : _msg = op + " operations are not supported on the source: " + vmc_attr["name"] _status = 9002 raise self.ObjectOperationException(_msg, _status) if dest_vmc_attr[op + "_supported"].lower() != "true" : _msg = op + " operations are not supported on the destination: " + dest_name _status = 9002 raise self.ObjectOperationException(_msg, _status) obj_attr_list["destination_name"] = dest_name obj_attr_list["destination_uuid"] = dest_host_attr["uuid"] obj_attr_list["destination"] = dest_name.split("host_", 1)[1] if obj_attr_list["interface"] == "default" : if dest_host_attr[op + "_interface"] != "default" : obj_attr_list["interface"] = dest_host_attr[op + "_interface"] else : obj_attr_list["interface"] = obj_attr_list["destination"] obj_attr_list["destination_ip"] = dest_host_attr["cloud_ip"] choices = obj_attr_list[op + "_protocol_supported"].split(",") obj_attr_list["choices"] = ",".join(choices) if obj_attr_list["protocol"] == "default" : if (op + "_protocol") in obj_attr_list : obj_attr_list["protocol"] = obj_attr_list[op + "_protocol"] else : cbwarn("default " + op + "_protocol not specified for this cloud." \ " Will assume defaults.", True) obj_attr_list["protocol"] = choices[0] if obj_attr_list["protocol"] not in choices : raise self.ObjectOperationException(op + " protocol " + obj_attr_list["protocol"] + \ " not supported. Please choose one of: " + \ " ".join(choices), 9003) elif cmd.count("runstate") : obj_attr_list["mgt_201_runstate_request_originated"] = obj_attr_list["command_originated"] if obj_attr_list["runstate_supported"].lower() != "true" : _msg = "Runstate operations are not supported on \"" + _cloud_parameters["description"] + "\" clouds." _status = 9000 raise self.ObjectOperationException(_msg, _status) elif cmd.count("resize") : obj_attr_list["mgt_301_resize_request_originated"] = obj_attr_list["command_originated"] if obj_attr_list["resize_supported"].lower() != "true" : _msg = "Resize operations are not supported on \"" + _cloud_parameters["description"] + "\" clouds." _status = 9000 raise self.ObjectOperationException(_msg, _status) elif cmd.count("detach") : cbdebug("Overriding 901 with " + str(obj_attr_list["command_originated"])) obj_attr_list["mgt_901_deprovisioning_request_originated"] = obj_attr_list["command_originated"] elif cmd.count("login") or cmd.count("display") : pass else : False ######### "ACTIVE" OPERATION OBJECT INITIALIZATION - END ######### ######### "PASSIVE" OPERATION OBJECT INITIALIZATION - BEGIN ######### elif cmd == "cloud-list" : _fmsg = "" _status = 0 elif cmd == "global-list" : _fmsg = "" _status = 0 elif cmd == "global-show" : _fmsg = "" _status = 0 elif cmd == "global-alter" : _fmsg = "" _status = 0 elif cmd == "wait-until" : _status = 0 # This is not an error. Do not delete. elif cmd.count("api") : _status = 0 elif cmd.count("list") or \ cmd.count("show") or \ cmd.count("alter") or \ cmd.count("stats") or \ cmd.count("expid-manage") : _cloud_parameters = self.get_cloud_parameters(obj_attr_list["cloud_name"]) obj_attr_list["username"] = _cloud_parameters["username"] if cmd.count("show") or cmd.count("alter") : if '_' not in obj_attr_list["name"] and '-' not in obj_attr_list["name"] and _obj_type.upper() != "VMC": obj_attr_list["name"] = _obj_type.lower() + "_" + obj_attr_list["name"] if _obj_type.lower().count("counter") : _status = self.get_counters(obj_attr_list["cloud_name"], obj_attr_list) if cmd.count("alter") : _status = self.get_counters(obj_attr_list["cloud_name"], obj_attr_list) if cmd.count("stats") : obj_attr_list["command"] = obj_attr_list["command"].replace("stats-get", "stats") obj_attr_list["regression"] = _cloud_parameters["regression"] obj_attr_list["cloud_name"] = _cloud_parameters["name"] obj_attr_list["cloud_model"] = _cloud_parameters["model"] obj_attr_list["all"] = _cloud_parameters["all"] _status = 0 ######### "PASSIVE" OPERATION OBJECT INITIALIZATION - END ######### else : _fmsg = "Unknown operation for " + _obj_type + " object: " _fmsg += cmd _msg = _fmsg _status = 35 obj_attr_list["command"] = obj_attr_list["command"].replace("-", '').replace("cloud", "cld") if _operation + "_parallel" not in obj_attr_list and not cmd.count("api-check") and not cmd.count("list") : msci = self.get_msci(obj_attr_list["cloud_name"]) if msci : self.get_counters(obj_attr_list["cloud_name"], obj_attr_list) self.record_management_metrics(obj_attr_list["cloud_name"], _obj_type, obj_attr_list, "trace") except self.osci.ObjectStoreMgdConnException as obj : for line in traceback.format_exc().splitlines() : cbwarn(line, True) _status = obj.status _fmsg = str(obj.msg) except self.ObjectOperationException as obj : for line in traceback.format_exc().splitlines() : cbwarn(line, True) _status = obj.status _fmsg = str(obj.msg) except Exception as e : for line in traceback.format_exc().splitlines() : cbwarn(line, True) _status = 32 _fmsg = str(e) finally : if _status : _msg = _obj_type + " object initialization failure: " + _fmsg cberr(_msg) else : _msg = _obj_type + " object initialization success." cbdebug(_msg) return _status, _msg @trace def pre_populate_host_info(self, obj_attr_list) : ''' TBD ''' _filestor_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \ "GLOBAL", False, "filestore", \ False) _vpn_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \ "GLOBAL", False, "vpn", \ False) _metric_store_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \ "GLOBAL", \ False, \ "metricstore", \ False) _log_store = self.osci.get_object(obj_attr_list["cloud_name"], \ "GLOBAL", False, \ "logstore", False) _api_attr_list = self.osci.get_object(obj_attr_list["cloud_name"], \ "GLOBAL", \ False, \ "api_defaults", \ False) obj_attr_list["filestore_host"] = _filestor_attr_list["hostname"] obj_attr_list["filestore_port"] = _filestor_attr_list["port"] obj_attr_list["filestore_username"] = _filestor_attr_list["username"] obj_attr_list["filestore_protocol"] = _filestor_attr_list["protocol"] obj_attr_list["vpn_server_ip"] = _vpn_attr_list["server_ip"] obj_attr_list["vpn_server_host"] = _vpn_attr_list["server_ip"] obj_attr_list["vpn_server_bootstrap"] = _vpn_attr_list["server_bootstrap"] obj_attr_list["vpn_redis_discovery"] = _vpn_attr_list["redis_discovery"] obj_attr_list["vpn_server_port"] = _vpn_attr_list["server_port"] obj_attr_list["vpn_server_protocol"] = "TCP" obj_attr_list["metricstore_host"] = _metric_store_attr_list["host"] obj_attr_list["metricstore_port"] = _metric_store_attr_list[_metric_store_attr_list["kind"] + "_port"] obj_attr_list["metricstore_protocol"] = _metric_store_attr_list["protocol"] obj_attr_list["logstore_host"] = _log_store["hostname"] obj_attr_list["logstore_port"] = _log_store["port"] obj_attr_list["logstore_protocol"] = _log_store["protocol"] obj_attr_list["objectstore_host"] = self.osci.host obj_attr_list["objectstore_port"] = self.osci.port obj_attr_list["objectstore_dbid"] = self.osci.dbid obj_attr_list["objectstore_timeout"] = self.osci.timout obj_attr_list["objectstore_protocol"] = "TCP" obj_attr_list["api_host"] = _api_attr_list["hostname"] obj_attr_list["api_port"] = _api_attr_list["port"] obj_attr_list["objectstore_dbid"] = self.osci.dbid obj_attr_list["objectstore_timeout"] = self.osci.timout if obj_attr_list["login"] != "root" : obj_attr_list["remote_dir_home"] = "/home/" + obj_attr_list["login"] else : obj_attr_list["remote_dir_home"] = "/root" obj_attr_list["remote_dir_path"] = obj_attr_list["remote_dir_home"] + '/' + obj_attr_list["remote_dir_name"] return True @trace def admission_control(self, obj_type, obj_attr_list, transaction) : ''' TBD ''' try : _cloud_name = obj_attr_list["cloud_name"] _status = 100 _fmsg = "An error has occurred, but no error message was captured" _admission_control_limits = self.osci.get_object(obj_attr_list["cloud_name"], "GLOBAL", False, \ "admission_control", False) if _admission_control_limits[obj_type.lower() + "_max_reservations"].count('.'): _admission_control_limits[obj_type.lower() + "_max_reservations"] = _admission_control_limits[obj_type.lower() + "_max_reservations"].split('.')[0] if transaction == "attach" : if obj_type != "AI" : _reservation = self.osci.update_counter(obj_attr_list["cloud_name"], obj_type, \ "RESERVATIONS", \ "increment") else : # Since AIs have to wait for the VMs to be created, the # reservation is already taken during the "pre-attach AI" # phase. _reservation = self.osci.count_object(obj_attr_list["cloud_name"], "AI", "RESERVATIONS") if int(_reservation) > int(_admission_control_limits[obj_type.lower() + "_max_reservations"]) : _status = 101 _fmsg = "Reservations for " + obj_type + " objects exhausted." self.osci.update_counter(obj_attr_list["cloud_name"], obj_type, "RESERVATIONS", \ "decrement") raise self.ObjectOperationException(_fmsg, 10) if obj_type == "VM" : obj_attr_list["last_known_state"] = "about to obtain a reservation" vmc = obj_attr_list["vmc"] _msg = "Increasing the \"number of VMs\" counter for the VMC: " + vmc cbdebug(_msg) _vmc_reservation = self.osci.update_object_attribute(obj_attr_list["cloud_name"], "VMC", \ vmc, False, "nr_vms", 1, True) _msg = "New value is " + str(_vmc_reservation) cbdebug(_msg) _vmc_attrs = self.osci.get_object(obj_attr_list["cloud_name"], "VMC", False, vmc, False) if _vmc_attrs["max_vm_reservations"].count('.'): _vmc_attrs["max_vm_reservations"] = _admission_control_limits["max_vm_reservations"].split('.')[0] if int(_vmc_reservation) > int(_vmc_attrs["max_vm_reservations"]) : _status = 102 _fmsg ="VMC-wide reservations for VM objects exhausted." raise self.ObjectOperationException(_fmsg, 10) # This key can be safely deleted. It should not be written # in the datastore as part of the "VM" object (it is already # part of the "VMC" object. if "vmc_max_vm_reservations" in obj_attr_list : del obj_attr_list["vmc_max_vm_reservations"] else : True elif transaction == "schedule" : ''' We don't lock here because pre_attach_vm needs to be serialized during the scheduling process and has already taken the lock.
<gh_stars>1-10 import os import importlib.resources from zipfile import ZipFile import numpy as np import time import datetime as dt from tensorflow.keras.utils import plot_model # req: pydot, graphviz from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras import optimizers, callbacks from tensorflow.keras.layers import ( Dense, Input, concatenate, Conv1D, MaxPool1D, Dropout, Flatten, BatchNormalization, Conv3D, MaxPool3D, GlobalAveragePooling3D, ) from tensorflow.keras.metrics import RootMeanSquaredError as RMSE from spacekit.generator.augment import augment_data, augment_image from spacekit.analyzer.track import stopwatch from spacekit.builder.blueprints import Blueprint TF_CPP_MIN_LOG_LEVEL = 2 # TODO: K-fold cross-val class class Builder: """Class for building and training a neural network.""" def __init__( self, train_data=None, test_data=None, blueprint=None, model_path=None ): if train_data is not None: self.X_train = train_data[0] self.y_train = train_data[1] if test_data is not None: self.X_test = test_data[0] self.y_test = test_data[1] self.blueprint = blueprint self.model_path = model_path self.batch_size = 32 self.epochs = 60 self.lr = 1e-4 self.decay = [100000, 0.96] self.early_stopping = None self.callbacks = None self.verbose = 2 self.ensemble = False self.model = None self.mlp = None self.cnn = None self.ann = None self.step_size = None self.steps_per_epoch = None self.batch_maker = None self.history = None self.name = None self.tx_file = None def load_saved_model(self, arch="ensembleSVM", compile_params=None): """Load saved keras model from local disk (located at the ``model_path`` attribute) or a pre-trained model from spacekit.skopes.trained_networks (if ``model_path`` attribute is None). Example for ``compile_params``: ``dict(loss="binary_crossentropy", metrics=["accuracy"], optimizer=Adam(learning_rate=optimizers.schedules.ExponentialDecay(lr=1e-4, decay_steps=100000, decay_rate=0.96, staircase=True)))`` Parameters ---------- arch : str, optional select a pre-trained model included from the spacekit library of trained networks ("ensembleSVM" or "calmodels"), by default "ensembleSVM" compile_params : dict, optional Compile the model using kwarg parameters, by default None Returns ------- Keras functional Model object pre-trained (and/or compiled) functional Keras model. """ if self.model_path is None: model_src = "spacekit.builder.trained_networks" archive_file = f"{arch}.zip" with importlib.resources.path(model_src, archive_file) as mod: self.model_path = mod if str(self.model_path).split(".")[-1] == "zip": self.model_path = self.unzip_model_files() self.model = load_model(self.model_path) if compile_params: self.model = Model(inputs=self.model.inputs, outputs=self.model.outputs) self.model.compile(**compile_params) return self.model def unzip_model_files(self, extract_to="models"): """Extracts a keras model object from a zip archive Parameters ---------- extract_to : str, optional directory location to extract into, by default "models" Returns ------- string path to where the model archive has been extracted """ os.makedirs(extract_to, exist_ok=True) model_base = os.path.basename(self.model_path).split(".")[0] with ZipFile(self.model_path, "r") as zip_ref: zip_ref.extractall(extract_to) self.model_path = os.path.join(extract_to, model_base) return self.model_path def find_tx_file(self, name="tx_data.json"): if self.model_path: tx_file = os.path.join(self.model_path, name) if os.path.exists(tx_file): self.tx_file = tx_file def set_build_params( self, input_shape=None, output_shape=None, layers=None, kernel_size=None, activation=None, cost_function=None, strides=None, optimizer=None, lr_sched=None, loss=None, metrics=None, input_name=None, output_name=None, name=None, ): """Set custom build parameters for a Builder object. Parameters ---------- input_shape : tuple shape of the inputs output_shape : tuple shape of the output layers: list sizes of hidden (dense) layers kernel_size : int size of the kernel activation : string dense layer activation cost_function: str function to update weights (calculate cost) strides : int number of strides optimizer : object type of optimizer to use lr_sched: bool use a learning_rate schedule such as ExponentialDecay loss : string loss metric to monitor metrics : list metrics for model to train on Returns ------- self spacekit.builder.architect.Builder class object with updated attributes """ self.input_shape = input_shape self.output_shape = output_shape self.layers = layers self.kernel_size = kernel_size self.activation = activation self.cost_function = cost_function self.strides = strides self.optimizer = optimizer self.lr_sched = lr_sched self.loss = loss self.metrics = metrics self.input_name = input_name self.output_name = output_name self.name = name return self def fit_params( self, batch_size=32, epochs=60, lr=1e-4, decay=[100000, 0.96], early_stopping=None, verbose=2, ensemble=False, ): """Set custom model fitting parameters as Builder object attributes. Parameters ---------- batch_size : int, optional size of each training batch, by default 32 epochs : int, optional number of epochs, by default 60 lr : float, optional initial learning rate, by default 1e-4 decay : list, optional decay_steps, decay_rate, by default [100000, 0.96] early_stopping : str, optional use an early stopping callback, by default None verbose : int, optional set the verbosity level, by default 2 ensemble : bool, optional ensemble type network, by default False Returns ------- self spacekit.builder.architect.Builder class object with updated fitting parameters. """ self.batch_size = batch_size self.epochs = epochs self.lr = lr self.decay = decay self.early_stopping = early_stopping self.verbose = verbose self.ensemble = ensemble return self def get_blueprint(self, architecture, fitting=True): draft = Blueprint(architecture=architecture) self.set_build_params(**draft.building()) if fitting is True: self.fit_params(**draft.fitting()) return self def decay_learning_rate(self): """Set learning schedule with exponential decay Returns ------- keras.optimizers.schedules.ExponentialDecay exponential decay learning rate schedule """ lr_schedule = optimizers.schedules.ExponentialDecay( self.lr, decay_steps=self.decay[0], decay_rate=self.decay[1], staircase=True ) return lr_schedule def set_callbacks(self, patience=15): """Set an early stopping callback by monitoring the model training for either accuracy or loss. For classifiers, use 'val_accuracy' or 'val_loss'. For regression use 'val_loss' or 'val_rmse'. Returns ------- list [callbacks.ModelCheckpoint, callbacks.EarlyStopping] """ model_name = str(self.model.name_scope().rstrip("/")) checkpoint_cb = callbacks.ModelCheckpoint( f"{model_name}_checkpoint.h5", save_best_only=True ) early_stopping_cb = callbacks.EarlyStopping( monitor=self.early_stopping, patience=patience ) self.callbacks = [checkpoint_cb, early_stopping_cb] return self.callbacks def save_model(self, weights=True, output_path="."): """The model architecture, and training configuration (including the optimizer, losses, and metrics) are stored in saved_model.pb. The weights are saved in the variables/ directory. Parameters ---------- weights : bool, optional save weights learned by the model separately also, by default True output_path : str, optional where to save the model files, by default "." """ if self.name is None: self.name = str(self.model.name_scope().rstrip("/")) datestamp = dt.datetime.now().isoformat().split("T")[0] model_name = f"{self.name}_{datestamp}" else: model_name = self.name model_path = os.path.join(output_path, "models", model_name) weights_path = f"{model_path}/weights/ckpt" self.model.save(model_path) if weights is True: self.model.save_weights(weights_path) for root, _, files in os.walk(model_path): indent = " " * root.count(os.sep) print("{}{}/".format(indent, os.path.basename(root))) for filename in files: print("{}{}".format(indent + " ", filename)) self.model_path = model_path def model_diagram( self, model=None, output_path=None, show_shapes=True, show_dtype=False, LR=False, expand_nested=True, show_layer_names=False, ): rank = "LR" if LR is True else "TB" if model is None: model = self.model if output_path is None: output_path = os.getcwd() try: plot_model( model, to_file=f"{output_path}/{model.name}.png", show_shapes=show_shapes, show_dtype=show_dtype, show_layer_names=show_layer_names, rankdir=rank, expand_nested=expand_nested, dpi=96, layer_range=None, ) # TODO error handling except Exception as e: print(e) # TODO # def timer(self, func, model_name): # def wrap(): # start = time.time() # stopwatch(f"TRAINING ***{model_name}***", t0=start) # func() # end = time.time() # stopwatch(f"TRAINING ***{model_name}***", t0=start, t1=end) # return func # return wrap # @timer def batch_fit(self): """ Fits cnn using a batch generator of equal positive and negative number of samples, rotating randomly. Returns ------- tf.keras.model.history Keras training history """ model_name = str(self.model.name_scope().rstrip("/").upper()) print("FITTING MODEL...") validation_data = ( (self.X_test, self.y_test) if self.X_test is not None else None ) if self.early_stopping is not None: self.callbacks = self.set_callbacks() start = time.time() stopwatch(f"TRAINING ***{model_name}***", t0=start) if self.steps_per_epoch is None or 0: self.steps_per_epoch = 1 self.history = self.model.fit( self.batch_maker(), validation_data=validation_data, verbose=self.verbose, epochs=self.epochs, steps_per_epoch=self.steps_per_epoch, callbacks=self.callbacks, ) end = time.time() stopwatch(f"TRAINING ***{model_name}***", t0=start, t1=end) self.model.summary() return self.history def fit(self, params=None): """Fit a model to the training data. Parameters ---------- params : dictionary, optional set custom fit params, by default None Returns ------- tf.keras.model.history Keras training history object """ if params is not None: self.fit_params(**params) model_name = str(self.model.name_scope().rstrip("/").upper()) print("FITTING MODEL...") validation_data = ( (self.X_test, self.y_test) if self.X_test is not None else None ) if self.early_stopping is not None: self.callbacks = self.set_callbacks() start = time.time() stopwatch(f"TRAINING ***{model_name}***", t0=start) self.history = self.model.fit( self.X_train, self.y_train, batch_size=self.batch_size, validation_data=validation_data, verbose=self.verbose, epochs=self.epochs, callbacks=self.callbacks, ) end = time.time() stopwatch(f"TRAINING ***{model_name}***", t0=start, t1=end) self.model.summary() return self.history class BuilderMLP(Builder): """Subclass for building and training MLP neural networks Parameters ---------- Builder : class spacekit.builder.architect.Builder class object """ def __init__(self, X_train, y_train, X_test, y_test, blueprint="mlp"): super().__init__(train_data=(X_train, y_train), test_data=(X_test, y_test)) self.blueprint = blueprint self.input_shape = X_train.shape[1] self.output_shape = 1 self.layers = [18, 32, 64, 32, 18] self.input_name = "mlp_inputs" self.output_name = "mlp_output" self.name = "sequential_mlp" self.activation = "leaky_relu" self.cost_function = "sigmoid" self.lr_sched = True self.optimizer = Adam self.loss = "binary_crossentropy" self.metrics = ["accuracy"] self.step_size = X_train.shape[0] self.steps_per_epoch = self.step_size // self.batch_size self.batch_maker = self.batch def build(self): """Build and compile an MLP network Returns ------- tf.keras.model compiled model object """ # visible layer inputs = Input(shape=(self.input_shape,), name=self.input_name) # hidden layers x = Dense( self.layers[0], activation=self.activation, name=f"1_dense{self.layers[0]}" )(inputs) for i, layer in enumerate(self.layers[1:]): i += 1 x = Dense(layer, activation=self.activation, name=f"{i+1}_dense{layer}")(x) # output layer if self.blueprint == "ensemble": self.mlp = Model(inputs, x, name="mlp_ensemble") return
VersionInfo.parse(mine) if theirs.major != mine.major: warnings.warn( f"Model was trained with {library} {theirs}, yours is {mine}. " f"Bad things will probably happen unless you update {library} to {theirs.major}.x." ) if theirs.minor > mine.minor: warnings.warn( f"Model was trained with {library} {theirs}, yours is {mine}. " f"This should be OK but you might want to update {library}." ) def on_load_checkpoint(self, checkpoint: Dict[str, Any]): self.check_version( "pyannote.audio", checkpoint["pyannote.audio"]["versions"]["pyannote.audio"], __version__, ) self.check_version( "torch", checkpoint["pyannote.audio"]["versions"]["torch"], torch.__version__, ) self.check_version( "pytorch-lightning", checkpoint["pytorch-lightning_version"], pl.__version__ ) self.specifications = checkpoint["pyannote.audio"]["specifications"] self.setup() self.introspection = checkpoint["pyannote.audio"]["introspection"] def forward(self, waveforms: torch.Tensor) -> torch.Tensor: msg = "Class {self.__class__.__name__} should define a `forward` method." raise NotImplementedError(msg) def helper_default_activation(self, specifications: Specifications) -> nn.Module: """Helper function for default_activation Parameters ---------- specifications: Specifications Task specification. Returns ------- activation : nn.Module Default activation function. """ if specifications.problem == Problem.BINARY_CLASSIFICATION: return nn.Sigmoid() elif specifications.problem == Problem.MONO_LABEL_CLASSIFICATION: return nn.LogSoftmax(dim=-1) elif specifications.problem == Problem.MULTI_LABEL_CLASSIFICATION: return nn.Sigmoid() else: msg = "TODO: implement default activation for other types of problems" raise NotImplementedError(msg) # convenience function to automate the choice of the final activation function def default_activation(self) -> nn.Module: """Guess default activation function according to task specification * sigmoid for binary classification * log-softmax for regular multi-class classification * sigmoid for multi-label classification Returns ------- activation : nn.Module Activation. """ return self.helper_default_activation(self.specifications) # training data logic is delegated to the task because the # model does not really need to know how it is being used. def train_dataloader(self) -> DataLoader: return self.task.train_dataloader() # training step logic is delegated to the task because the # model does not really need to know how it is being used. def training_step(self, batch, batch_idx): return self.task.training_step(batch, batch_idx) # validation data logic is delegated to the task because the # model does not really need to know how it is being used. def val_dataloader(self) -> DataLoader: return self.task.val_dataloader() # validation logic is delegated to the task because the # model does not really need to know how it is being used. def validation_step(self, batch, batch_idx): return self.task.validation_step(batch, batch_idx) def validation_epoch_end(self, outputs): return self.task.validation_epoch_end(outputs) def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=1e-3) def _helper_up_to( self, module_name: Text, requires_grad: bool = False ) -> List[Text]: """Helper function for freeze_up_to and unfreeze_up_to""" tokens = module_name.split(".") updated_modules = list() for name, module in ModelSummary(self, max_depth=-1).named_modules: name_tokens = name.split(".") matching_tokens = list( token for token, other_token in zip(name_tokens, tokens) if token == other_token ) # if module is A.a.1 & name is A.a, we do not want to freeze the whole A.a module # because it might contain other modules like A.a.2 and A.a.3 if matching_tokens and len(matching_tokens) == len(tokens) - 1: continue for parameter in module.parameters(recurse=True): parameter.requires_grad = requires_grad module.train(mode=requires_grad) updated_modules.append(name) #  stop once we reached the requested module if module_name == name: break if module_name not in updated_modules: raise ValueError(f"Could not find module {module_name}") return updated_modules def freeze_up_to(self, module_name: Text) -> List[Text]: """Freeze model up to specific module Parameters ---------- module_name : str Name of module (included) up to which the model will be frozen. Returns ------- frozen_modules : list of str List of names of frozen modules Raises ------ ValueError when requested module does not exist Note ---- The order of modules is the one reported by self.summary("full"). If your model does not follow a sequential structure, you might want to use freeze_by_name for more control. """ return self._helper_up_to(module_name, requires_grad=False) def unfreeze_up_to(self, module_name: Text) -> List[Text]: """Unfreeze model up to specific module Parameters ---------- module_name : str Name of module (included) up to which the model will be unfrozen. Returns ------- unfrozen_modules : list of str List of names of frozen modules Raises ------ ValueError when requested module does not exist Note ---- The order of modules is the one reported by self.summary("full"). If your model does not follow a sequential structure, you might want to use freeze_by_name for more control. """ return self._helper_up_to(module_name, requires_grad=True) def _helper_by_name( self, modules: Union[List[Text], Text], recurse: bool = True, requires_grad: bool = False, ) -> List[Text]: """Helper function for freeze_by_name and unfreeze_by_name""" updated_modules = list() # Force modules to be a list if isinstance(modules, str): modules = [modules] for name, module in ModelSummary(self, max_depth=-1).named_modules: if name not in modules: continue for parameter in module.parameters(recurse=True): parameter.requires_grad = requires_grad module.train(requires_grad) # keep track of updated modules updated_modules.append(name) missing = list(set(modules) - set(updated_modules)) if missing: raise ValueError(f"Could not find the following modules: {missing}.") return updated_modules def freeze_by_name( self, modules: Union[Text, List[Text]], recurse: bool = True, ) -> List[Text]: """Freeze modules Parameters ---------- modules : list of str, str Name(s) of modules to freeze recurse : bool, optional If True (default), freezes parameters of these modules and all submodules. Otherwise, only freezes parameters that are direct members of these modules. Returns ------- frozen_modules: list of str Names of frozen modules Raises ------ ValueError if at least one of `modules` does not exist. """ return self._helper_by_name( modules, recurse=recurse, requires_grad=False, ) def unfreeze_by_name( self, modules: Union[List[Text], Text], recurse: bool = True, ) -> List[Text]: """Unfreeze modules Parameters ---------- modules : list of str, str Name(s) of modules to unfreeze recurse : bool, optional If True (default), unfreezes parameters of these modules and all submodules. Otherwise, only unfreezes parameters that are direct members of these modules. Returns ------- unfrozen_modules: list of str Names of unfrozen modules Raises ------ ValueError if at least one of `modules` does not exist. """ return self._helper_by_name(modules, recurse=recurse, requires_grad=True) @classmethod def from_pretrained( cls, checkpoint: Union[Path, Text], map_location=None, hparams_file: Union[Path, Text] = None, strict: bool = True, use_auth_token: Union[Text, None] = None, cache_dir: Union[Path, Text] = CACHE_DIR, **kwargs, ) -> "Model": """Load pretrained model Parameters ---------- checkpoint : Path or str Path to checkpoint, or a remote URL, or a model identifier from the huggingface.co model hub. map_location: optional Same role as in torch.load(). Defaults to `lambda storage, loc: storage`. hparams_file : Path or str, optional Path to a .yaml file with hierarchical structure as in this example: drop_prob: 0.2 dataloader: batch_size: 32 You most likely won’t need this since Lightning will always save the hyperparameters to the checkpoint. However, if your checkpoint weights do not have the hyperparameters saved, use this method to pass in a .yaml file with the hparams you would like to use. These will be converted into a dict and passed into your Model for use. strict : bool, optional Whether to strictly enforce that the keys in checkpoint match the keys returned by this module’s state dict. Defaults to True. use_auth_token : str, optional When loading a private huggingface.co model, set `use_auth_token` to True or to a string containing your hugginface.co authentication token that can be obtained by running `huggingface-cli login` cache_dir: Path or str, optional Path to model cache directory. Defaults to content of PYANNOTE_CACHE environment variable, or "~/.cache/torch/pyannote" when unset. kwargs: optional Any extra keyword args needed to init the model. Can also be used to override saved hyperparameter values. Returns ------- model : Model Model See also -------- torch.load """ # pytorch-lightning expects str, not Path. checkpoint = str(checkpoint) if hparams_file is not None: hparams_file = str(hparams_file) # resolve the checkpoint to # something that pl will handle if os.path.isfile(checkpoint): path_for_pl = checkpoint elif urlparse(checkpoint).scheme in ("http", "https"): path_for_pl = checkpoint else: # Finally, let's try to find it on Hugging Face model hub # e.g. julien-c/voice-activity-detection is a valid model id # and julien-c/voice-activity-detection@main supports specifying a commit/branch/tag. if "@" in checkpoint: model_id = checkpoint.split("@")[0] revision = checkpoint.split("@")[1] else: model_id = checkpoint revision = None url = hf_hub_url( model_id, filename=HF_PYTORCH_WEIGHTS_NAME, revision=revision ) path_for_pl = cached_download( url=url, library_name="pyannote", library_version=__version__, cache_dir=cache_dir, use_auth_token=use_auth_token, ) # HACK Huggingface download counters rely on config.yaml # HACK Therefore we download config.yaml even though we # HACK do not use it. Fails silently in case model does not # HACK have a config.yaml file. try: config_url = hf_hub_url( model_id, filename=HF_LIGHTNING_CONFIG_NAME, revision=revision ) _
<reponame>maxmuffin/SmartSeat<filename>IA/server/server.py import datetime import hashlib import json import os import sqlite3 import time import uuid import apscheduler import joblib import numpy import socket import pandas as pd from pythonping import ping from Crypto.Cipher import AES from apscheduler.schedulers.background import BackgroundScheduler from flask import Flask, request, abort, jsonify, send_from_directory import paho.mqtt.client as mqtt from influxdb import InfluxDBClient hostname = socket.gethostname() IPAddr = socket.gethostbyname(hostname) port = 8000 UPLOAD_DIRECTORY = "./server/data/uploaded_files" DB_FILE = "./server/DB/SmartSeat.db" key1 = "SmartSeatApp2019" key2 = '<KEY>' if not os.path.exists(UPLOAD_DIRECTORY): os.makedirs(UPLOAD_DIRECTORY) api = Flask(__name__) sched = BackgroundScheduler(daemon=True) last_prediction = [-1, 0] chair_on = False with open("./server/last_prediction.txt", "w") as fp1: fp1.write(str(last_prediction[0]) + "," + str(last_prediction[1]) + "," + str(chair_on)) def save_prediction(pred_value, acc_value, username): clients = InfluxDBClient('localhost', 8086, 'root', 'root', 'SmartSeat') clients.create_database("SmartSeat") json_body = [ { "measurement": "Predictions", "fields": { "prediction": pred_value, "accuracy": acc_value, "username": username } } ] print("Saving to InfluxDB >> Prediction: " + str(pred_value) + ", Accuracy: " + str(acc_value)) clients.write_points(json_body) def save_sensor_data(seat1, seat2, seat3, seat4, back1, back2, back3, username): clients = InfluxDBClient('localhost', 8086, 'root', 'root', 'SmartSeat') clients.create_database("SmartSeat") json_body = [ { "measurement": "SensorsData", "fields": { "seat1": seat1, "seat2": seat2, "seat3": seat3, "seat4": seat4, "back1": back1, "back2": back2, "back3": back3, "username": username } } ] # print( # "Saving to InfluxDB >> Values: " + seat1 + "," + seat2 + "," + seat3 + # "," + seat4 + "," + back1 + "," + back2 + "," + back3 + ", " + username # ) clients.write_points(json_body) def get_values(username): try: client = InfluxDBClient('localhost', 8086, 'root', 'root', 'SmartSeat') query_all = "select * from Predictions where username='"+username+"' order by time asc tz('Europe/Rome');" result_all = client.query(query_all) points_all = list(result_all.get_points()) # DAY MEASUREMENT by HOUR:MINUTE:SECOND today = datetime.date.today() arr_day = {} for value in points_all: if value['time'].split("T")[0] == str(today): posture = str(value['prediction']) if posture in ['2', '3', '4', '5', '6', '7', '8']: # wrong posture = '1' elif posture == '1': # correct posture = '2' elif posture == '0': # nosit posture = '0' arr_day[(value['time'].split("T")[1]).split(".")[0]] = posture if arr_day.__len__() == 0: arr_day["00:00:00"] = "0" # ALL MEASUREMENT by POSTURE TYPE check_date = str(datetime.date(1970, 1, 1)) counter_correct = 0 counter_wrong = 0 counter_no_sit = 0 arr_counter = {} for value in points_all: posture = value['prediction'] if value['time'].split("T")[0] != check_date: check_date = value['time'].split("T")[0] arr_counter[check_date] = {'correct': 0, 'wrong': 0, 'no_sit': 0} counter_correct = 0 counter_wrong = 0 counter_no_sit = 0 if posture == 0: counter_no_sit = counter_no_sit + 1 arr_counter[check_date]['no_sit'] = counter_no_sit elif posture == 1: counter_correct = counter_correct + 1 arr_counter[check_date]['correct'] = counter_correct elif posture in [2, 3, 4, 5, 6, 7, 8]: counter_wrong = counter_wrong + 1 arr_counter[check_date]['wrong'] = counter_wrong else: if posture == 0: counter_no_sit = counter_no_sit + 1 arr_counter[check_date]['no_sit'] = counter_no_sit elif posture == 1: counter_correct = counter_correct + 1 arr_counter[check_date]['correct'] = counter_correct elif posture in [2, 3, 4, 5, 6, 7, 8]: counter_wrong = counter_wrong + 1 arr_counter[check_date]['wrong'] = counter_wrong arr_all = {'Correct': {}, 'Wrong': {}, 'NotSitted': {}} print(arr_counter) for date, val in arr_counter.items(): arr_all['Correct'][date] = val['correct'] arr_all['Wrong'][date] = val['wrong'] arr_all['NotSitted'][date] = val['no_sit'] # FINAL ARRAY for the response (JSON) arr_final = {'day_measurement': arr_day, 'all_measurement': arr_all} response = json.dumps(arr_final) except ConnectionError: with open("./server/data/json_graph.json", "r") as fp2: response = fp2.read() print(response) return response, 201 index_performance = 1 string_performance = "" def query_model(data, chair): # CSV data to pandas array global index_performance print("INDICE: " + str(index_performance)) global string_performance string_performance = string_performance + str(index_performance) + " - DataReceived: " + str(time.time()) chair_on = True filename = str(uuid.uuid4()) with open(os.path.join(UPLOAD_DIRECTORY, filename + ".csv"), "wt") as fp: fp.write(data) file_csv = "./server/data/uploaded_files/" + filename + ".csv" columns_name = ['seduta1', 'seduta2', 'seduta3', 'seduta4', 'schienale1', 'schienale2', 'schienale3'] csv_file_predict = pd.read_csv(file_csv, names=columns_name) print(csv_file_predict.head(10)) os.remove("./server/data/uploaded_files/" + filename + ".csv") rfc = joblib.load("./server/trained_model.skl") # Query RandomForest ML Model x_query = csv_file_predict try: # Query SQL for bind username-chair print(chair) query_check_exists = "SELECT USERNAME FROM BIND WHERE CHAIRKEY='" + chair + "'" conn = sqlite3.connect(DB_FILE) cursor = conn.cursor() cursor.execute(query_check_exists) username_exists = str(cursor.fetchone()).replace("('","").replace("',)","") print("User: ") print(username_exists) cursor.close() conn.close() if username_exists: rfc_predict = rfc.predict(x_query) string_performance = string_performance + " - DataProcessed: " + str(time.time()) print("Predict ", rfc_predict) # Counting and Saving prediction on InfluxDB unique, counts = numpy.unique(rfc_predict, return_counts=True) unique_counts = dict(zip(unique, counts)) print(unique_counts) max_acc = 0 max_val = 0 for val, acc in unique_counts.items(): if acc > max_acc: max_acc = acc max_val = val last_prediction = [max_val, max_acc] save_prediction(max_val, max_acc, username_exists) # Parsing data and saving values of every sensors for riga in data.split("\n"): data = riga.split(",") if len(data) > 1: save_sensor_data(data[0], data[1], data[2], data[3], data[4], data[5], data[6], username_exists) string_performance = string_performance + " - DataSaved: " + str(time.time()) + "\n" index_performance = index_performance + 1 # update last_prediction file with open("./server/last_prediction.txt", "w") as fp1: fp1.write(str(last_prediction[0]) + "," + str(last_prediction[1]) + "," + str(chair_on)) print("Postura " + str(last_prediction[0]) + " al " + str(last_prediction[1] * 10) + "%") else: print("No Binding found for CHAIR: " + chair) except ValueError as err: print(err) def bind_chair(username, chair): query_check_user = "SELECT USERNAME FROM BIND WHERE CHAIRKEY='"+chair+"'" conn = sqlite3.connect(DB_FILE) cursor = conn.cursor() cursor.execute(query_check_user) username_chair = cursor.fetchone() cursor.close() conn.close() username_chair = (str(username_chair)).replace("('","").replace("',)","") print(" "+username_chair+" ") if username_chair == username or username_chair == '': query_update = "UPDATE BIND SET USERNAME = '" + username + "' WHERE CHAIRKEY = '" + chair + "'" conn = sqlite3.connect(DB_FILE) cursor = conn.cursor() cursor.execute(query_update) cursor.close() conn.commit() conn.close() print("Chair binded -> " + username) return '{"bind":"True"}' else: print("Chair already binded -> " + username_chair) return '{"bind":"False"}' def unbind_chair(username,chair): query_check_user = "SELECT USERNAME FROM BIND WHERE CHAIRKEY='" + chair + "'" conn = sqlite3.connect(DB_FILE) cursor = conn.cursor() cursor.execute(query_check_user) username_chair = cursor.fetchone() cursor.close() conn.close() username_chair = (str(username_chair)).replace("('", "").replace("',)", "") if username_chair == username: query_update = "UPDATE BIND SET USERNAME = '' WHERE CHAIRKEY = '" + chair + "'" conn = sqlite3.connect(DB_FILE) cursor = conn.cursor() cursor.execute(query_update) cursor.close() conn.commit() conn.close() print("Chair unbinded -> " + username) return '{"unbind":"True"}' else: print("Chair not binded to " + username) return '{"unbind":"False"}' def check_online(ip_address): response = ping(target=ip_address, count=1, timeout=5) print("-- CHECK RASPONLINE -- " ) if str(response).startswith("Request timed out"): print(" "+ ip_address+" OFFLINE") with open("./server/last_prediction.txt", "w") as fp4: fp4.write(str(-1) + "," + str(0) + "," + str(False)) else: print(" "+ ip_address+" ONLINE") print("---------------------- \n") def on_message(client, userdata, message): print("-- MQTT Message -- ") print(message.topic) if str(message.topic).startswith('seat/'): decryptor = AES.new(key2, AES.MODE_CBC, key1) decryptedMessage = str((decryptor.decrypt(message.payload)).decode('utf-8')).replace("@", "") query_model(decryptedMessage, str(message.topic).replace('seat/chair', '')) print("------------------ \n") broker_address = "localhost" broker_port = 1883 topic = "seat/#" mqtt_username = "smartseat" mqtt_password = "<PASSWORD>" try: client = mqtt.Client("Server") client.username_pw_set(mqtt_username, password=mqtt_password) client.on_message = on_message client.connect(broker_address) client.loop_start() client.subscribe(topic) except Exception as e: print(e) @api.route("/rasp_online",methods=['GET','POST']) def rasp_online(): try: sched.remove_all_jobs(jobstore='default') sched.add_job(check_online, kwargs={'ip_address': request.remote_addr}, trigger='interval', seconds=10, jobstore='default') sched.start() except apscheduler.schedulers.SchedulerAlreadyRunningError as schedExc: print(schedExc) return "", 201 @api.route("/files") def list_files(): """Endpoint to list files on the server.""" files = [] for filename in os.listdir(UPLOAD_DIRECTORY): path = os.path.join(UPLOAD_DIRECTORY, filename) if os.path.isfile(path): files.append(filename) return jsonify(files) @api.route("/files/<path:path>") def get_file(path): """Download a file""" return send_from_directory(UPLOAD_DIRECTORY, path, as_attachment=True) @api.route("/files/<filename>", methods=["POST"]) def post_file(filename): """Upload a file""" if "/" in filename: # Return $== BAD REQUEST abort(400, "no subdirectories directories allowed") with open(os.path.join(UPLOAD_DIRECTORY, filename), "wb") as fp: fp.write(request.data) # Return 201 CREATED return "", 201 @api.route("/signup", methods=["POST"]) def signup(): # Parsing JSON data with Registration data file_json = request.data signup_data = json.loads(file_json) username = signup_data['username'] password = signup_data['password'] name = signup_data['name'] surname = signup_data['surname'] email = signup_data['mail'] weight = signup_data['weight'] height = signup_data['height'] sex = signup_data['sex'] # Adding Salt to password salted_password = "<PASSWORD>" + password + "<PASSWORD>" # Hashing MD5 password value password_hashed = hashlib.md5(salted_password.encode()) # print("user: " + username + # "\npassword: " + password + # "\nname: " + name + # "\nsurname:" + surname + # "\nemail: " + email + # "\nhashed: ") # print(password_hashed) # Check if not registered query_check_exists = "SELECT * FROM USERS WHERE USERNAME='" + username + "'" conn = sqlite3.connect(DB_FILE) cursor = conn.cursor() cursor.execute(query_check_exists) username_exists = cursor.fetchone() print("Sign-in Check: ") print(username_exists) cursor.close() conn.close() if username_exists: print("Signed False") return '{"signed":"false"}', 201 else: # Insert new user if not exists query_login = "INSERT INTO USERS(USERNAME,PASSWORD,NAME,SURNAME,MAIL,WEIGHT,HEIGHT,SEX) " \ "VALUES ('" + username + "','" + password + "','" + name + "','" + surname + "','" \ + email + "','" + str(weight) + "','" + str(height) + "','" +
from __future__ import absolute_import import pytest try: import rasterio except: rasterio = None rasterio_available = pytest.mark.skipif(rasterio is None, reason="requires rasterio") from os import path from itertools import product import datashader as ds import xarray as xr import numpy as np import dask.array as da from datashader.resampling import compute_chunksize BASE_PATH = path.split(__file__)[0] DATA_PATH = path.abspath(path.join(BASE_PATH, 'data')) TEST_RASTER_PATH = path.join(DATA_PATH, 'world.rgb.tif') @pytest.fixture def cvs(): with xr.open_rasterio(TEST_RASTER_PATH) as src: res = ds.utils.calc_res(src) left, bottom, right, top = ds.utils.calc_bbox(src.x.values, src.y.values, res) return ds.Canvas(plot_width=2, plot_height=2, x_range=(left, right), y_range=(bottom, top)) @rasterio_available def test_raster_aggregate_default(cvs): with xr.open_rasterio(TEST_RASTER_PATH) as src: agg = cvs.raster(src) assert agg is not None @rasterio_available def test_raster_aggregate_nearest(cvs): with xr.open_rasterio(TEST_RASTER_PATH) as src: agg = cvs.raster(src, upsample_method='nearest') assert agg is not None @pytest.mark.skip('use_overviews opt no longer supported; may be re-implemented in the future') @rasterio_available def test_raster_aggregate_with_overviews(cvs): with xr.open_rasterio(TEST_RASTER_PATH) as src: agg = cvs.raster(src, use_overviews=True) assert agg is not None @pytest.mark.skip('use_overviews opt no longer supported; may be re-implemented in the future') @rasterio_available def test_raster_aggregate_without_overviews(cvs): with xr.open_rasterio(TEST_RASTER_PATH) as src: agg = cvs.raster(src, use_overviews=False) assert agg is not None @rasterio_available def test_out_of_bounds_return_correct_size(cvs): with xr.open_rasterio(TEST_RASTER_PATH) as src: cvs = ds.Canvas(plot_width=2, plot_height=2, x_range=[1e10, 1e20], y_range=[1e10, 1e20]) try: cvs.raster(src) except ValueError: pass else: assert False @rasterio_available def test_partial_extent_returns_correct_size(): with xr.open_rasterio(TEST_RASTER_PATH) as src: res = ds.utils.calc_res(src) left, bottom, right, top = ds.utils.calc_bbox(src.x.values, src.y.values, res) half_width = (right - left) / 2 half_height = (top - bottom) / 2 cvs = ds.Canvas(plot_width=512, plot_height=256, x_range=[left-half_width, left+half_width], y_range=[bottom-half_height, bottom+half_height]) agg = cvs.raster(src) assert agg.shape == (3, 256, 512) assert agg is not None @rasterio_available def test_partial_extent_with_layer_returns_correct_size(cvs): with xr.open_rasterio(TEST_RASTER_PATH) as src: res = ds.utils.calc_res(src) left, bottom, right, top = ds.utils.calc_bbox(src.x.values, src.y.values, res) half_width = (right - left) / 2 half_height = (top - bottom) / 2 cvs = ds.Canvas(plot_width=512, plot_height=256, x_range=[left-half_width, left+half_width], y_range=[bottom-half_height, bottom+half_height]) agg = cvs.raster(src, layer=1) assert agg.shape == (256, 512) assert agg is not None @rasterio_available def test_calc_res(): """Assert that resolution is calculated correctly when using the xarray rasterio backend. """ with xr.open_rasterio(TEST_RASTER_PATH) as src: xr_res = ds.utils.calc_res(src) with rasterio.open(TEST_RASTER_PATH) as src: rio_res = src.res assert np.allclose(xr_res, rio_res) @rasterio_available def test_calc_bbox(): """Assert that bounding boxes are calculated correctly when using the xarray rasterio backend. """ with xr.open_rasterio(TEST_RASTER_PATH) as src: xr_res = ds.utils.calc_res(src) xr_bounds = ds.utils.calc_bbox(src.x.values, src.y.values, xr_res) with rasterio.open(TEST_RASTER_PATH) as src: rio_bounds = src.bounds assert np.allclose(xr_bounds, rio_bounds, atol=1.0) # allow for absolute diff of 1.0 def test_raster_both_ascending(): """ Assert raster with ascending x- and y-coordinates is aggregated correctly. """ xs = np.arange(10) ys = np.arange(5) arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, arr) assert np.allclose(agg.X.values, xs) assert np.allclose(agg.Y.values, ys) def test_raster_both_ascending_partial_range(): """ Assert raster with ascending x- and y-coordinates and a partial canvas range is aggregated correctly. """ xs = np.arange(10) ys = np.arange(5) arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(7, 3, x_range=(.5, 7.5), y_range=(.5, 3.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, xarr.sel(X=slice(1, 7), Y=slice(1, 3))) assert np.allclose(agg.X.values, xs[1:8]) assert np.allclose(agg.Y.values, ys[1:4]) def test_raster_both_descending(): """ Assert raster with ascending x- and y-coordinates is aggregated correctly. """ xs = np.arange(10)[::-1] ys = np.arange(5)[::-1] arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, arr) assert np.allclose(agg.X.values, xs) assert np.allclose(agg.Y.values, ys) def test_raster_both_descending_partial_range(): """ Assert raster with ascending x- and y-coordinates and a partial canvas range is aggregated correctly. """ xs = np.arange(10)[::-1] ys = np.arange(5)[::-1] arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(7, 3, x_range=(.5, 7.5), y_range=(.5, 3.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, xarr.sel(Y=slice(3,1), X=slice(7, 1)).data) assert np.allclose(agg.X.values, xs[2:9]) assert np.allclose(agg.Y.values, ys[1:4]) def test_raster_x_ascending_y_descending(): """ Assert raster with ascending x- and descending y-coordinates is aggregated correctly. """ xs = np.arange(10) ys = np.arange(5)[::-1] arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, arr) assert np.allclose(agg.X.values, xs) assert np.allclose(agg.Y.values, ys) def test_raster_x_ascending_y_descending_partial_range(): """ Assert raster with ascending x- and descending y-coordinates is aggregated correctly. """ xs = np.arange(10) ys = np.arange(5)[::-1] arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(7, 2, x_range=(0.5, 7.5), y_range=(1.5, 3.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, xarr.sel(X=slice(1, 7), Y=slice(3, 2)).data) assert np.allclose(agg.X.values, xs[1:8]) assert np.allclose(agg.Y.values, ys[1:3]) def test_raster_x_descending_y_ascending(): """ Assert raster with descending x- and ascending y-coordinates is aggregated correctly. """ xs = np.arange(10)[::-1] ys = np.arange(5) arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, arr) assert np.allclose(agg.X.values, xs) assert np.allclose(agg.Y.values, ys) def test_raster_x_descending_y_ascending_partial_range(): """ Assert raster with descending x- and ascending y-coordinates is aggregated correctly. """ xs = np.arange(10)[::-1] ys = np.arange(5) arr = xs*ys[np.newaxis].T xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X']) cvs = ds.Canvas(7, 2, x_range=(.5, 7.5), y_range=(1.5, 3.5)) agg = cvs.raster(xarr) assert np.allclose(agg.data, xarr.sel(X=slice(7, 1), Y=slice(2, 3)).data) assert np.allclose(agg.X.values, xs[2:9]) assert np.allclose(agg.Y.values, ys[2:4]) def test_raster_integer_nan_value(): """ Ensure custom nan_value is handled correctly for integer arrays. """ cvs = ds.Canvas(plot_height=2, plot_width=2, x_range=(0, 1), y_range=(0,1)) array = np.array([[9999, 1, 2, 3], [4, 9999, 6, 7], [8, 9, 9999, 11]]) coords = {'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)} xr_array = xr.DataArray(array, coords=coords, dims=['y', 'x']) agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999) expected = np.array([[4, 7], [9, 11]]) assert np.allclose(agg.data, expected) assert agg.data.dtype.kind == 'i' assert np.allclose(agg.x.values, np.array([0.25, 0.75])) assert np.allclose(agg.y.values, np.array([0.25, 0.75])) def test_raster_float_nan_value(): """ Ensure default nan_value is handled correctly for float arrays """ cvs = ds.Canvas(plot_height=2, plot_width=2, x_range=(0, 1), y_range=(0,1)) array = np.array([[np.NaN, 1., 2., 3.], [4., np.NaN, 6., 7.], [8., 9., np.NaN, 11.]]) coords = {'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)} xr_array = xr.DataArray(array, coords=coords, dims=['y', 'x']) agg = cvs.raster(xr_array, downsample_method='max') expected = np.array([[4, 7], [9, 11]]) assert np.allclose(agg.data, expected) assert agg.data.dtype.kind == 'f' assert np.allclose(agg.x.values, np.array([0.25, 0.75])) assert np.allclose(agg.y.values, np.array([0.25, 0.75])) def test_raster_integer_nan_value_padding(): """ Ensure that the padding values respect the supplied nan_value. """ cvs = ds.Canvas(plot_height=3, plot_width=3, x_range=(0, 2), y_range=(0, 2)) array = np.array([[9999, 1, 2, 3], [4, 9999, 6, 7], [8, 9, 9999, 11]]) xr_array = xr.DataArray(array, coords={'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}, dims=['y', 'x']) agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999) expected = np.array([[4, 7, 9999], [9, 11, 9999], [9999, 9999, 9999]]) assert np.allclose(agg.data, expected) assert agg.data.dtype.kind == 'i' assert np.allclose(agg.x.values, np.array([1/3., 1.0, 5/3.])) assert np.allclose(agg.y.values, np.array([1/3., 1.0, 5/3.])) def test_raster_float_nan_value_padding(): """ Ensure that the padding values respect the supplied nan_value. """ cvs = ds.Canvas(plot_height=3, plot_width=3, x_range=(0, 2), y_range=(0, 2)) array = np.array([[np.nan, 1., 2., 3.], [4., np.nan, 6., 7.], [8., 9., np.nan, 11.]]) xr_array = xr.DataArray(array, coords={'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}, dims=['y', 'x']) agg = cvs.raster(xr_array, downsample_method='max') expected = np.array([[4., 7., np.nan], [9., 11., np.nan], [np.nan, np.nan, np.nan]]) assert np.allclose(agg.data, expected, equal_nan=True) assert agg.data.dtype.kind == 'f' assert np.allclose(agg.x.values, np.array([1/3., 1.0, 5/3.])) assert np.allclose(agg.y.values, np.array([1/3., 1.0, 5/3.])) def test_raster_single_pixel_range(): """ Ensure that canvas range covering a single pixel are handled correctly. """ cvs = ds.Canvas(plot_height=3, plot_width=3, x_range=(0, 0.1), y_range=(0, 0.1)) array = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]) xr_array = xr.DataArray(array, dims=['y', 'x'], coords={'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}) agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999) expected = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) assert np.allclose(agg.data, expected) assert agg.data.dtype.kind == 'i' assert np.allclose(agg.x.values, np.array([1/60., 1/20., 1/12.])) assert np.allclose(agg.y.values, np.array([1/60., 1/20., 1/12.])) def test_raster_single_pixel_range_with_padding(): """ Ensure that canvas range covering a single pixel and small area beyond the defined data ranges is handled correctly. """ # The .301 value ensures that one pixel covers the edge of the input extent cvs = ds.Canvas(plot_height=4, plot_width=6, x_range=(-0.5, 0.25), y_range=(-.5, 0.301)) cvs2 = ds.Canvas(plot_height=4, plot_width=6, x_range=(-0.5, 0.25), y_range=(-.5, 0.3)) array = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype='f') xr_array = xr.DataArray(array, dims=['y', 'x'], coords={'x': np.linspace(0.125, .875, 4), 'y': np.linspace(0.125, 0.625, 3)}) agg = cvs.raster(xr_array, downsample_method='max', nan_value=np.NaN) agg2 = cvs2.raster(xr_array, downsample_method='max', nan_value=np.NaN) expected = np.array([ [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN, 0,
# -*- coding: utf-8 -*- # Copyright (c) 2019 by University of Kassel, T<NAME>, RWTH Aachen University and Fraunhofer # Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual # contributors (see AUTHORS file for details). All rights reserved. import numpy as np import pandas as pd import datetime as dt from packaging import version from pandapower import compare_arrays try: import pplog as logging except ImportError: import logging logger = logging.getLogger(__name__) __author__ = 'smeinecke' def ensure_iterability(var, len_=None): """ This function ensures iterability of a variable (and optional length). """ if hasattr(var, "__iter__") and not isinstance(var, str): if isinstance(len_, int) and len(var) != len_: raise ValueError("Length of variable differs from %i." % len_) else: len_ = len_ or 1 var = [var]*len_ return var def find_idx_by_name(df, column, name): idx = df.index[df[column] == name] if len(idx) == 0: raise UserWarning("In column '%s', there is no element named %s" % (column, name)) if len(idx) > 1: raise UserWarning("In column '%s', multiple elements are named %s" % (column, name)) return idx[0] def idx_in_2nd_array(arr1, arr2, match=True): """ This function returns an array of indices of arr1 matching arr2. arr1 may include duplicates. If an item of arr1 misses in arr2, 'match' decides whether the idx of the nearest value is returned (False) or an error is raised (True). """ if match: missings = list(set(arr1) - set(arr2)) if len(missings): raise ValueError("These values misses in arr2: " + str(missings)) arr1_, uni_inverse = np.unique(arr1, return_inverse=True) sort_lookup = np.argsort(arr2) arr2_ = np.sort(arr2) idx = np.searchsorted(arr2_, arr1_) res = sort_lookup[idx][uni_inverse] return res def column_indices(df, query_cols): """ returns an numpy array with the indices of the columns requested by 'query_cols'. Works propperly for string column names. """ cols = df.columns.values sidx = np.argsort(cols) return sidx[np.searchsorted(cols, query_cols, sorter=sidx)] def merge_dataframes(dfs, keep="first", sort_index=True, sort_column=True, column_to_sort=None, index_time_str=None, **kwargs): """ This is a wrapper function of pandas.concat(dfs, axis=0) to merge DataFrames. INPUT: **dfs** (DataFrames) - a sequence or mapping of DataFrames OPTIONAL: **keep** (str, "first") - Flag to decide which data are kept in case of duplicated indices - first, last or all duplicated data. **sort_index** (bool, True) - If True, the indices of the returning DataFrame will be sorted. If False, the indices and columns will be in order of the original DataFrames. **sort_column** (bool, True) - If True, the indices of the returning DataFrame will be sorted. If False, the indices and columns will be in order of the original DataFrames. **column_to_sort** (-, None) - If given, 'column_to_sort' must be a column name occuring in both DataFrames. The returning DataFrame will be sorted by this column. The input indices get lost. **index_time_str** (str, None) - If given, the indices or the 'column_to_sort' if given will be sorted in datetime order. ****kwargs** - Keyword arguments for pandas.concat() except axis, such as sort, join, join_axes, ignore_index, keys. 'sort' can overwrite 'sort_index' and 'sort_column'. """ if "axis" in kwargs: if kwargs["axis"] != 0: logger.warning("'axis' is always assumed as zero.") kwargs.pop("axis") if "sort" in kwargs: if not kwargs["sort"] == sort_index == sort_column: sort_index = kwargs["sort"] sort_column = kwargs["sort"] if not sort_index or not sort_column: logger.warning("'sort' overwrites 'sort_index' and 'sort_column'.") kwargs.pop("sort") # --- set index_column as index if column_to_sort is not None: if any([column_to_sort not in df.columns for df in dfs]): raise KeyError("column_to_sort '%s' must be a column of " % column_to_sort + "both dataframes, df1 and df2") if not sort_index: logger.warning("Since 'column_to_sort' is given, the returning DataFrame will be" + "sorted by this column as well as the columns, although 'sort' " + "was given as False.") sort_index = True dfs = [df.set_index(column_to_sort) for df in dfs] # --- concat df = pd.concat(dfs, axis=0, **kwargs) # --- unsorted index and columns output_index = df.index.drop_duplicates() # --- drop rows with duplicated indices if keep == "first": df = df.groupby(df.index).first() elif keep == "last": df = df.groupby(df.index).last() elif keep != "all": raise ValueError("This value %s is unknown to 'keep'" % keep) # --- sorted index and reindex columns if sort_index: if index_time_str: dates = [dt.datetime.strptime(ts, index_time_str) for ts in df.index] dates.sort() output_index = [dt.datetime.strftime(ts, index_time_str) for ts in dates] if keep == "all": logger.warning("If 'index_time_str' is not None, keep cannot be 'all' but are " + "assumed as 'first'.") else: output_index = sorted(df.index) # --- reindex as required if keep != "all": if version.parse(pd.__version__) >= version.parse("0.21.0"): df = df.reindex(output_index) else: df = df.reindex_axis(output_index) if sort_column: if version.parse(pd.__version__) >= version.parse("0.21.0"): df = df.reindex(columns=sorted(df.columns)) else: df = df.reindex_axis(sorted(df.columns), axis=1) # --- get back column_to_sort as column from index if column_to_sort is not None: df.reset_index(inplace=True) return df def get_unique_duplicated_dict(df, subset=None, only_dupl_entries=False): """ Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values of the dict are the indices which are duplicated to each key index. This is a wrapper function of _get_unique_duplicated_dict() to consider only_dupl_entries. """ is_dupl = df.duplicated(subset=subset, keep=False) uniq_dupl_dict = _get_unique_duplicated_dict(df[is_dupl], subset) if not only_dupl_entries: others = df.index[~is_dupl] uniq_empties = {o: [] for o in others} # python 3.5+ # uniq_dupl_dict = {**uniq_dupl_dict, **uniq_empties} # python 3.4 for k, v in uniq_empties.items(): uniq_dupl_dict[k] = v return uniq_dupl_dict def _get_unique_duplicated_dict(df, subset=None): """ Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values of the dict are the indices which are duplicated to each key index. """ subset = subset or df.columns dupl = df.index[df.duplicated(subset=subset)] uniq = df.index[~df.duplicated(subset=subset)] uniq_dupl_dict = {} # nan_str only needed since compare_arrays() using old numpy versions connected to python 3.4 # don't detect reliably nans as equal nan_str = "nan" while nan_str in df.values: nan_str += "n" for uni in uniq: do_dupl_fit = compare_arrays( np.repeat(df.loc[uni, subset].fillna(nan_str).values.reshape(1, -1), len(dupl), axis=0), df.loc[dupl, subset].fillna(nan_str).values).all(axis=1) uniq_dupl_dict[uni] = list(dupl[do_dupl_fit]) return uniq_dupl_dict def reindex_dict_dataframes(dataframes_dict): """ Set new continuous index starting at zero for every DataFrame in the dict. """ for key in dataframes_dict.keys(): if isinstance(dataframes_dict[key], pd.DataFrame) and key != "StudyCases": dataframes_dict[key].index = list(range(dataframes_dict[key].shape[0])) def ensure_full_column_data_existence(dict_, tablename, column): """ Ensures that the column of a dict's DataFrame is fully filled with information. If there are missing data, it will be filled up by name tablename+index """ missing_data = dict_[tablename].index[dict_[tablename][column].isnull()] # fill missing data by tablename+index, e.g. "Bus 2" dict_[tablename][column].loc[missing_data] = [tablename + ' %s' % n for n in ( missing_data.values + 1)] return dict_[tablename] def avoid_duplicates_in_column(dict_, tablename, column): """ Avoids duplicates in given column (as type string) of a dict's DataFrame """ query = dict_[tablename][column].duplicated(keep=False) for double in dict_[tablename][column].loc[query].unique(): idx = dict_[tablename][column].index[dict_[tablename][column] == double] dict_[tablename][column].loc[idx] = [double + " (%i)" % i for i in range(len(idx))] if sum(dict_[tablename][column].duplicated()): raise ValueError("The renaming by 'double + int' was not appropriate to remove all " + "duplicates.") def append_str_by_underline_count(str_series, append_only_duplicates=False, counting_start=1, reserved_strings=None): """ Returns a Series of appended strings and a set of all strings which were appended or are set as reserved by input. INPUT: **str_series** (Series with string values) - strings to be appended by "_" + a number OPTIONAL: **append_only_duplicates** (bool, False) - If True, all strings will be appended. If False, only duplicated strings will be appended. **counting_start** (int, 1) - Integer to start appending with **reserved_strings** (iterable, None) - strings which are not allowed in str_series and must be appended. OUTPUT: **appended_strings** (Series with string values) - appended strings **reserved_strings** (set) - all reserved_strings from input and all strings which were appended """ # --- initalizations # ensure only unique values in reserved_strings: reserved_strings = pd.Series(sorted(set(reserved_strings))) if reserved_strings is not None \ else pd.Series() count = counting_start # --- do first append # concatenate reserved_strings and str_series (which should be appended by "_%i") # must be in this order (first reserved_strings) to append only the str_series (keep='first') if not append_only_duplicates: series = str_series + "_%i" % count series = pd.concat([reserved_strings, series], ignore_index=True) all_dupl = pd.Series([True]*len(series)) else: series = pd.concat([reserved_strings, str_series], ignore_index=True) all_dupl = pd.Series([True]*len(reserved_strings)+[False]*len(str_series)) dupl = series.duplicated() all_dupl |= dupl series.loc[dupl] += "_%i" % count dupl = series.duplicated() all_dupl |= dupl # --- append as much as
TypeError('ClassName must be a class name string') self.name = name self.class_name = class_name def to_json(self): return {self.name: self.class_name} class Operation(ToJson, ToCodeString): def __init__(self, _class_name, view=None, options=None, views=None): self._class_name = _class_name if view is not None and isinstance(view, list): views = view view = None if view is not None and isinstance(view, dict): view = JsonConverter.from_json(view, View) self.view = view self.views = None if views is not None and isinstance(views, list): self.views = [] for view in views: if not isinstance(view, View): view = JsonConverter.from_json(view, View) self.views.append(view) self.options = options def to_json(self): operation = {'class': self._class_name} if self.options is not None: operation['options'] = self.options if self.view is not None: operation['view'] = self.view.to_json() if self.views is not None: operation['views'] = [] for view in self.views: operation['views'].append(view.to_json()) return operation class Match(ToJson, ToCodeString): def __init__(self, _class_name): self._class_name = _class_name def to_json(self): return { 'class': self._class_name } class ElementMatch(Match): CLASS = "uk.gov.gchq.gaffer.store.operation.handler.join.match.ElementMatch" def __init__(self, group_by_properties=None): super().__init__(_class_name=self.CLASS) self.group_by_properties = group_by_properties def to_json(self): match_json = super().to_json() if (self.group_by_properties is not None): match_json['groupByProperties'] = self.group_by_properties return match_json class KeyFunctionMatch(Match): CLASS = "uk.gov.gchq.gaffer.store.operation.handler.join.match.KeyFunctionMatch" def __init__(self, first_key_function=None, second_key_function=None): super().__init__(_class_name=self.CLASS) if not isinstance(first_key_function, gaffer_functions.Function): self.first_key_function = JsonConverter.from_json(first_key_function, class_obj=gaffer_functions.Function) else: self.first_key_function = first_key_function if not isinstance(second_key_function, gaffer_functions.Function): self.second_key_function = JsonConverter.from_json(second_key_function, class_obj=gaffer_functions.Function) else: self.second_key_function = second_key_function def to_json(self): match_json = super().to_json() if self.first_key_function is not None: match_json['firstKeyFunction'] = self.first_key_function.to_json() if self.second_key_function is not None: match_json['secondKeyFunction'] = self.second_key_function.to_json() return match_json class OperationChain(Operation): CLASS = "uk.gov.gchq.gaffer.operation.OperationChain" def __init__(self, operations, options=None): super().__init__( _class_name=self.CLASS, options=options) self._class_name = self.CLASS self.operations = operations def to_json(self): operation_chain_json = super().to_json() operations_json = [] for operation in self.operations: if isinstance(operation, ToJson): operations_json.append(operation.to_json()) else: operations_json.append(operation) operation_chain_json['operations'] = operations_json return operation_chain_json class OperationChainDAO(OperationChain): CLASS = "uk.gov.gchq.gaffer.operation.OperationChainDAO" def __init__(self, operations, options=None): super().__init__(operations=operations, options=options) def to_json(self): operation_chain_json = super().to_json() operation_chain_json.pop('class', None) return operation_chain_json class GetTraits(Operation): CLASS = 'uk.gov.gchq.gaffer.store.operation.GetTraits' def __init__(self, current_traits, options=None): super().__init__( _class_name=self.CLASS, options=options) self.current_traits = current_traits def to_json(self): operation = super().to_json() operation['currentTraits'] = self.current_traits return operation class AddElements(Operation): """ This class defines a Gaffer Add Operation. """ CLASS = 'uk.gov.gchq.gaffer.operation.impl.add.AddElements' def __init__(self, input=None, skip_invalid_elements=None, validate=None, options=None): super().__init__( _class_name=self.CLASS, options=options) self.input = input self.skip_invalid_elements = skip_invalid_elements self.validate = validate def to_json(self): operation = super().to_json() if self.skip_invalid_elements is not None: operation['skipInvalidElements'] = self.skip_invalid_elements if self.validate is not None: operation['validate'] = self.validate if self.input is not None: elements_json = [] for element in self.input: elements_json.append(element.to_json()) operation['input'] = elements_json return operation class GenerateElements(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.generate.GenerateElements' def __init__(self, element_generator, input=None, options=None): super().__init__( _class_name=self.CLASS, options=options) if not isinstance(element_generator, gaffer_functions.ElementGenerator): element_generator = gaffer_functions.ElementGenerator( element_generator['class'], element_generator) self.element_generator = element_generator self.input = input def to_json(self): operation = super().to_json() if self.input is not None: input_json = [] for item in self.input: if isinstance(item, ToJson): input_json.append(item.to_json()) else: input_json.append(item) operation['input'] = input_json operation['elementGenerator'] = self.element_generator.to_json() return operation class GenerateObjects(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.generate.GenerateObjects' def __init__(self, element_generator, input=None, options=None): super().__init__( _class_name=self.CLASS, options=options) if not isinstance(element_generator, gaffer_functions.ElementGenerator): element_generator = gaffer_functions.ElementGenerator( element_generator['class'], element_generator) self.element_generator = element_generator self.input = input def to_json(self): operation = super().to_json() if self.input is not None: elements_json = [] for element in self.input: if isinstance(element, ToJson): elements_json.append(element.to_json()) else: elements_json.append(element) operation['input'] = elements_json operation['elementGenerator'] = self.element_generator.to_json() return operation class Validate(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.Validate' def __init__(self, validate, skip_invalid_elements=True, options=None): super().__init__( _class_name=self.CLASS, options=options) self.validate = validate self.skip_invalid_elements = skip_invalid_elements def to_json(self): operation = super().to_json() operation['validate'] = self.validate operation['skipInvalidElements'] = self.skip_invalid_elements return operation class ExportToGafferResultCache(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache' def __init__(self, key=None, op_auths=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) if not isinstance(key, str) and key is not None: raise TypeError('key must be a string') self.key = key self.op_auths = op_auths def to_json(self): operation = super().to_json() if self.key is not None: operation['key'] = self.key if self.op_auths is not None: operation['opAuths'] = self.op_auths return operation class GetGafferResultCacheExport(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.resultcache.GetGafferResultCacheExport' def __init__(self, job_id=None, key=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.job_id = job_id self.key = key def to_json(self): operation = super().to_json() if self.job_id is not None: operation['jobId'] = self.job_id if self.key is not None: operation['key'] = self.key return operation class ExportToSet(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet' def __init__(self, key=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) if not isinstance(key, str) and key is not None: raise TypeError('key must be a string') self.key = key def to_json(self): operation = super().to_json() if self.key is not None: operation['key'] = self.key return operation class GetSetExport(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.set.GetSetExport' def __init__(self, job_id=None, key=None, start=None, end=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.job_id = job_id self.key = key self.start = start self.end = end def to_json(self): operation = super().to_json() if self.job_id is not None: operation['jobId'] = self.job_id if self.key is not None: operation['key'] = self.key if self.start is not None: operation['start'] = self.start if self.end is not None: operation['end'] = self.end return operation class GetExports(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.GetExports' def __init__(self, get_exports=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.get_exports = [] for export in get_exports: if not isinstance(export, Operation): export = JsonConverter.from_json(export) self.get_exports.append(export) def to_json(self): operation = super().to_json() if self.get_exports is not None: exports = [] for export in self.get_exports: exports.append(export.to_json()) operation['getExports'] = exports return operation class GetJobDetails(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.job.GetJobDetails' def __init__(self, job_id=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.job_id = job_id def to_json(self): operation = super().to_json() if self.job_id is not None: operation['jobId'] = self.job_id return operation class GetAllJobDetails(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.job.GetAllJobDetails' def __init__(self, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) def to_json(self): operation = super().to_json() return operation class GetJobResults(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.job.GetJobResults' def __init__(self, job_id, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.job_id = job_id def to_json(self): operation = super().to_json() operation['jobId'] = self.job_id return operation class CancelScheduledJob(Operation): CLASS = "uk.gov.gchq.gaffer.operation.impl.job.CancelScheduledJob" def __init__(self, job_id): super().__init__(_class_name=self.CLASS) self.job_id = job_id def to_json(self): operation_json = super().to_json() if self.job_id is not None: operation_json['jobId'] = self.job_id return operation_json class SplitStoreFromFile(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.SplitStoreFromFile' def __init__(self, input_path, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.input_path = input_path def to_json(self): operation = super().to_json() operation['inputPath'] = self.input_path return operation class SplitStoreFromIterable(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.SplitStoreFromIterable' def __init__(self, input=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.input = input def to_json(self): operation = super().to_json() if self.input is not None: operation['input'] = self.input return operation class SampleElementsForSplitPoints(Operation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.SampleElementsForSplitPoints' def __init__(self, input=None, num_splits=None, proportion_to_sample=None, options=None): super().__init__( _class_name=self.CLASS, view=None, options=options) self.input = input self.num_splits = num_splits self.proportion_to_sample = proportion_to_sample def to_json(self): operation = super().to_json() if self.input is not None: elements_json = [] for element in self.input: elements_json.append(element.to_json()) operation['input'] = elements_json if self.num_splits is not None: operation['numSplits'] = self.num_splits if self.proportion_to_sample is not None: operation['proportionToSample'] = self.proportion_to_sample return operation class GetOperation(Operation): def __init__(self, _class_name, input=None, view=None, directed_type=None, include_incoming_out_going=None, # deprecated, use seed_matching instead seed_matching_type=None, seed_matching=None, options=None): super().__init__( _class_name=_class_name, view=view, options=options) if not isinstance(_class_name, str): raise TypeError( 'ClassName must be the operation class name as a string') self.input = input self.directed_type = directed_type self.include_incoming_out_going = include_incoming_out_going self.seed_matching = seed_matching_type if seed_matching is not None: self.seed_matching = seed_matching def to_json(self): operation = super().to_json() if self.input is not None: json_seeds = [] if isinstance(self.input, list): for seed in self.input: if isinstance(seed, ElementSeed): json_seeds.append(seed.to_json()) else: json_seeds.append(EntitySeed(seed).to_json()) else: if isinstance(self.input, ElementSeed): json_seeds.append(self.input.to_json()) else: json_seeds.append(EntitySeed(self.input).to_json()) operation['input'] = json_seeds if self.seed_matching is not None: operation['seedMatching'] = self.seed_matching if self.include_incoming_out_going is not None: if self.directed_type is not None: operation['directedType'] = self.directed_type operation[ 'includeIncomingOutGoing'] = self.include_incoming_out_going return operation class GetElements(GetOperation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.get.GetElements' def __init__(self, input=None, view=None, directed_type=None, include_incoming_out_going=None, seed_matching_type=None, # deprecated, use seed_matching instead seed_matching=None, options=None): super().__init__( _class_name=self.CLASS, input=input, view=view, directed_type=directed_type, include_incoming_out_going=include_incoming_out_going, seed_matching_type=seed_matching_type, seed_matching=seed_matching, options=options) class GetFromEndpoint(Operation): CLASS = "uk.gov.gchq.gaffer.operation.impl.get.GetFromEndpoint" def __init__(self, endpoint, options=None): super().__init__(_class_name=self.CLASS, options=options) self.endpoint = endpoint def to_json(self): operation_json = super().to_json() if self.endpoint is not None: operation_json['endpoint'] = self.endpoint return operation_json class GetAdjacentIds(GetOperation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds' def __init__(self, input=None, view=None, include_incoming_out_going=None, options=None): super().__init__( _class_name=self.CLASS, input=input, view=view, directed_type=None, include_incoming_out_going=include_incoming_out_going, seed_matching_type=None, seed_matching=None, options=options) class GetAllElements(GetOperation): CLASS = 'uk.gov.gchq.gaffer.operation.impl.get.GetAllElements' def __init__(self, view=None, directed_type=None, options=None): super().__init__( _class_name=self.CLASS, input=None, view=view, directed_type=directed_type, include_incoming_out_going=None, options=options) class NamedOperation(GetOperation): CLASS = 'uk.gov.gchq.gaffer.named.operation.NamedOperation' def __init__(self, operation_name, input=None, view=None, parameters=None, options=None): super().__init__( _class_name=self.CLASS, input=input, view=view, directed_type=None, include_incoming_out_going=None, seed_matching_type=None, seed_matching=None, options=options) self.operation_name = operation_name self.parameters = parameters def to_json(self): operation = super().to_json() operation['operationName'] = self.operation_name if self.parameters is not None: operation['parameters'] = self.parameters return operation class AddNamedOperation(Operation): CLASS = 'uk.gov.gchq.gaffer.named.operation.AddNamedOperation' def __init__(self, operation_chain, operation_name, description=None, read_access_roles=None, write_access_roles=None, overwrite_flag=None, parameters=None, options=None, score=None): super().__init__( _class_name=self.CLASS, options=options) if isinstance(operation_chain, OperationChain): if not isinstance(operation_chain, OperationChainDAO): operation_chain =
self._get_value_at_indirect_y() cycles = 5 else: raise RuntimeError(f"Unknown op code: {op_code}") result = (self._a - value)&0xFF self._carry = self._a >= value self._zero = self._a == value self._negative = result&0x80 > 0 self._system.consume_cycles(cycles) def CPX(self, op_code): # Compare Memory and Index X # X - M N Z C I D V # + + + - - - # addressing assembler opc bytes cyles # -------------------------------------------- # immediate CPX #oper E0 2 2 # zeropage CPX oper E4 2 3 # absolute CPX oper EC 3 4 value = None cycles = None if (op_code == 0xE0): # immediate value = self._get_next_byte() cycles = 2 elif (op_code == 0xE4): # zeropage value = self._get_value_at_zeropage() cycles = 3 elif (op_code == 0xEC): # absolute value = self._get_value_at_absolute() cycles = 4 else: raise RuntimeError(f"Unknown op code: {op_code}") result = (self._x - value)&0xFF self._carry = self._x >= value self._zero = self._x == value self._negative = result&0x80 > 0 self._system.consume_cycles(cycles) def CPY(self, op_code): # Compare Memory and Index Y # Y - M N Z C I D V # + + + - - - # addressing assembler opc bytes cyles # -------------------------------------------- # immediate CPY #oper C0 2 2 # zeropage CPY oper C4 2 3 # absolute CPY oper CC 3 4 value = None cycles = None if (op_code == 0xC0): # immediate value = self._get_next_byte() cycles = 2 elif (op_code == 0xC4): # zeropage value = self._get_value_at_zeropage() cycles = 3 elif (op_code == 0xCC): # absolute value = self._get_value_at_absolute() cycles = 4 else: raise RuntimeError(f"Unknown op code: {op_code}") result = (self._y - value)&0xFF self._carry = self._y >= value self._zero = self._y == value self._negative = result&0x80 > 0 self._system.consume_cycles(cycles) def DEC(self, op_code): # Decrement Memory by One # M - 1 -> M N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # zeropage DEC oper C6 2 5 # zeropage,X DEC oper,X D6 2 6 # absolute DEC oper CE 3 3 # absolute,X DEC oper,X DE 3 7 address = None cycles = None if (op_code == 0xC6): # zeropage address = self._get_address_at_zeropage() cycles = 5 elif (op_code == 0xD6): # zeropage,X address = self._get_address_at_zeropage_x() cycles = 6 elif (op_code == 0xCE): # absolute address = self._get_address_at_absolute() cycles = 3 elif (op_code == 0xDE): # absolute,X address = self._get_address_at_absolute_x() cycles = 7 else: raise RuntimeError(f"Unknown op code: {op_code}") value = (self._system.mmu.read_byte(address)-1)&0xFF self._negative = value&0x80 > 1 self._zero = value == 0 self._system.mmu.write_byte(address, value) self._system.consume_cycles(cycles) def DEX(self, op_code): # Decrement Index X by One # X - 1 -> X N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # implied DEC CA 1 2 self._x = (self._x - 1)&0xFF self._negative = self._x&0x80 > 1 self._zero = self._x == 0 cycles = 2 def DEY(self, op_code): # Decrement Index Y by One # Y - 1 -> Y N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # implied DEC 88 1 2 self._y = (self._y - 1)&0xFF self._negative = self._y&0x80 > 1 self._zero = self._y == 0 cycles = 2 def EOR(self, op_code): # Exclusive-OR Memory with Accumulator # A EOR M -> A N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # immediate EOR #oper 49 2 2 # zeropage EOR oper 45 2 3 # zeropage,X EOR oper,X 55 2 4 # absolute EOR oper 4D 3 4 # absolute,X EOR oper,X 5D 3 4* # absolute,Y EOR oper,Y 59 3 4* # (indirect,X) EOR (oper,X) 41 2 6 # (indirect),Y EOR (oper),Y 51 2 5* value = None cycles = None if (op_code == 0x49): # immediate value = self._get_next_byte() cycles = 2 elif (op_code == 0x45): # zeropage value = self._get_value_at_zeropage() cycles = 3 elif (op_code == 0x55): # zeropage,X value = self._get_value_at_zeropage_x() cycles = 4 elif (op_code == 0x4D): # absolute value = self._get_value_at_absolute() cycles = 4 elif (op_code == 0x5D): # absolute,X value = self._get_value_at_absolute_x() cycles = 4 elif (op_code == 0x59): # absolute,Y value = self._get_value_at_absolute_y() cycles = 4 elif (op_code == 0x41): # (indirect,X) value = self._get_value_at_indirect_x() cycles = 6 elif (op_code == 0x51): # (indirect),Y value = self._get_value_at_indirect_y() cycles = 5 else: raise RuntimeError(f"Unknown op code: {op_code}") self._a ^= value self._negative = (self._a>>7) == 1 self._zero = self._a == 0 self._system.consume_cycles(cycles) def INC(self, op_code): # Increment Memory by One # M + 1 -> M N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # zeropage INC oper E6 2 5 # zeropage,X INC oper,X F6 2 6 # absolute INC oper EE 3 6 # absolute,X INC oper,X FE 3 7 address = None cycles = None if (op_code == 0xE6): # zeropage address = self._get_address_at_zeropage() cycles = 5 elif (op_code == 0xF6): # zeropage,X address = self._get_address_at_zeropage_x() cycles = 6 elif (op_code == 0xEE): # absolute address = self._get_address_at_absolute() cycles = 6 elif (op_code == 0xFE): # absolute,X address = self._get_address_at_absolute_x() cycles = 7 else: raise RuntimeError(f"Unknown op code: {op_code}") value = (self._system.mmu.read_byte(address)+1)&0xFF self._negative = (value>>7) == 1 self._zero = value == 0 self._system.mmu.write_byte(address, value) self._system.consume_cycles(cycles) def INX(self, op_code): # Increment Index X by One # X + 1 -> X N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # implied INX E8 1 2 self._x = (self._x + 1)&0xFF self._negative = self._x&0x80 > 0 self._zero = self._x == 0 cycles = 2 def INY(self, op_code): # Increment Index Y by One # Y + 1 -> Y N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # implied INY C8 1 2 self._y = (self._y + 1)&0xFF self._negative = self._y&0x80 > 0 self._zero = self._y == 0 cycles = 2 def JMP(self, op_code): # Jump to New Location # (PC+1) -> PCL N Z C I D V # (PC+2) -> PCH - - - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # absolute JMP oper 4C 3 3 # indirect JMP (oper) 6C 3 5 address = None cycles = None if (op_code == 0x4C): # absolute pcl = self._system.mmu.read_byte(self._pc) pch = self._system.mmu.read_byte(self._pc+1) address = (pch<<8)+pcl cycles = 3 elif (op_code == 0x6C): # indirect address = self._get_address_at_indirect() cycles = 5 self._pc = address self._system.consume_cycles(cycles) def JSR(self, op_code): # Jump to New Location Saving Return Address # push (PC+2), N Z C I D V # (PC+1) -> PCL - - - - - - # (PC+2) -> PCH # addressing assembler opc bytes cyles # -------------------------------------------- # absolute JSR oper 20 3 6 next_address = self._pc+1 self.push(next_address>>8) # HI byte self.push(next_address&0xFF) # LO byte self._pc = self._get_address_at_absolute() cycles = 6 def LDA(self, op_code): # Load Accumulator with Memory # M -> A N Z C I D V # + + - - - - # addressing assembler opc bytes cyles # -------------------------------------------- # immediate LDA #oper A9 2 2 # zeropage LDA oper A5 2 3 # zeropage,X LDA oper,X B5 2 4 # absolute LDA oper AD 3 4 # absolute,X LDA oper,X BD 3 4* # absolute,Y LDA oper,Y B9 3 4* # (indirect,X) LDA (oper,X) A1 2 6 # (indirect),Y LDA (oper),Y B1 2 5* value = None cycles = None if (op_code == 0xA9): # immedidate value = self._get_next_byte() cycles = 2
<= 0) m.c2709 = Constraint(expr= m.x2708 - m.b3019 <= 0) m.c2710 = Constraint(expr= m.x2709 - m.b3019 <= 0) m.c2711 = Constraint(expr= m.x2710 - m.b3019 <= 0) m.c2712 = Constraint(expr= m.x2711 - m.b3019 <= 0) m.c2713 = Constraint(expr= m.x2712 - m.b3019 <= 0) m.c2714 = Constraint(expr= m.x2713 - m.b3019 <= 0) m.c2715 = Constraint(expr= m.x2714 - m.b3019 <= 0) m.c2716 = Constraint(expr= m.x2715 - m.b3019 <= 0) m.c2717 = Constraint(expr= m.x2716 - m.b3019 <= 0) m.c2718 = Constraint(expr= m.x2717 - m.b3019 <= 0) m.c2719 = Constraint(expr= m.x2718 - m.b3019 <= 0) m.c2720 = Constraint(expr= m.x2719 - m.b3019 <= 0) m.c2721 = Constraint(expr= m.x2720 - m.b3019 <= 0) m.c2722 = Constraint(expr= m.x2721 - m.b3019 <= 0) m.c2723 = Constraint(expr= m.x2722 - m.b3019 <= 0) m.c2724 = Constraint(expr= m.x2723 - m.b3019 <= 0) m.c2725 = Constraint(expr= m.x2724 - m.b3019 <= 0) m.c2726 = Constraint(expr= m.x2725 - m.b3019 <= 0) m.c2727 = Constraint(expr= m.x2726 - m.b3019 <= 0) m.c2728 = Constraint(expr= m.x2727 - m.b3019 <= 0) m.c2729 = Constraint(expr= m.x2728 - m.b3019 <= 0) m.c2730 = Constraint(expr= m.x2729 - m.b3019 <= 0) m.c2731 = Constraint(expr= m.x2730 - m.b3019 <= 0) m.c2732 = Constraint(expr= m.x2731 - m.b3019 <= 0) m.c2733 = Constraint(expr= m.x2732 - m.b3019 <= 0) m.c2734 = Constraint(expr= m.x2733 - m.b3019 <= 0) m.c2735 = Constraint(expr= m.x2734 - m.b3019 <= 0) m.c2736 = Constraint(expr= m.x2735 - m.b3019 <= 0) m.c2737 = Constraint(expr= m.x2736 - m.b3019 <= 0) m.c2738 = Constraint(expr= m.x2737 - m.b3019 <= 0) m.c2739 = Constraint(expr= m.x2738 - m.b3019 <= 0) m.c2740 = Constraint(expr= m.x2739 - m.b3019 <= 0) m.c2741 = Constraint(expr= m.x2740 - m.b3019 <= 0) m.c2742 = Constraint(expr= m.x2741 - m.b3019 <= 0) m.c2743 = Constraint(expr= m.x2742 - m.b3019 <= 0) m.c2744 = Constraint(expr= m.x2743 - m.b3019 <= 0) m.c2745 = Constraint(expr= m.x2744 - m.b3019 <= 0) m.c2746 = Constraint(expr= m.x2745 - m.b3019 <= 0) m.c2747 = Constraint(expr= m.x2746 - m.b3019 <= 0) m.c2748 = Constraint(expr= m.x2747 - m.b3019 <= 0) m.c2749 = Constraint(expr= m.x2748 - m.b3019 <= 0) m.c2750 = Constraint(expr= m.x2749 - m.b3019 <= 0) m.c2751 = Constraint(expr= m.x2750 - m.b3019 <= 0) m.c2752 = Constraint(expr= m.x2751 - m.b3019 <= 0) m.c2753 = Constraint(expr= m.x2752 - m.b3019 <= 0) m.c2754 = Constraint(expr= m.x2753 - m.b3019 <= 0) m.c2755 = Constraint(expr= m.x2754 - m.b3019 <= 0) m.c2756 = Constraint(expr= m.x2755 - m.b3019 <= 0) m.c2757 = Constraint(expr= m.x2756 - m.b3019 <= 0) m.c2758 = Constraint(expr= m.x2757 - m.b3019 <= 0) m.c2759 = Constraint(expr= m.x2758 - m.b3019 <= 0) m.c2760 = Constraint(expr= m.x2759 - m.b3019 <= 0) m.c2761 = Constraint(expr= m.x2760 - m.b3019 <= 0) m.c2762 = Constraint(expr= m.x2761 - m.b3019 <= 0) m.c2763 = Constraint(expr= m.x2762 - m.b3019 <= 0) m.c2764 = Constraint(expr= m.x2763 - m.b3019 <= 0) m.c2765 = Constraint(expr= m.x2764 - m.b3019 <= 0) m.c2766 = Constraint(expr= m.x2765 - m.b3019 <= 0) m.c2767 = Constraint(expr= m.x2766 - m.b3019 <= 0) m.c2768 = Constraint(expr= m.x2767 - m.b3019 <= 0) m.c2769 = Constraint(expr= m.x2768 - m.b3019 <= 0) m.c2770 = Constraint(expr= m.x2769 - m.b3019 <= 0) m.c2771 = Constraint(expr= m.x2770 - m.b3019 <= 0) m.c2772 = Constraint(expr= m.x2771 - m.b3019 <= 0) m.c2773 = Constraint(expr= m.x2772 - m.b3019 <= 0) m.c2774 = Constraint(expr= m.x2773 - m.b3019 <= 0) m.c2775 = Constraint(expr= m.x2774 - m.b3019 <= 0) m.c2776 = Constraint(expr= m.x2775 - m.b3019 <= 0) m.c2777 = Constraint(expr= m.x2776 - m.b3019 <= 0) m.c2778 = Constraint(expr= m.x2777 - m.b3019 <= 0) m.c2779 = Constraint(expr= m.x2778 - m.b3019 <= 0) m.c2780 = Constraint(expr= m.x2779 - m.b3019 <= 0) m.c2781 = Constraint(expr= m.x2780 - m.b3019 <= 0) m.c2782 = Constraint(expr= m.x2781 - m.b3019 <= 0) m.c2783 = Constraint(expr= m.x2782 - m.b3019 <= 0) m.c2784 = Constraint(expr= m.x2783 - m.b3019 <= 0) m.c2785 = Constraint(expr= m.x2784 - m.b3019 <= 0) m.c2786 = Constraint(expr= m.x2785 - m.b3019 <= 0) m.c2787 = Constraint(expr= m.x2786 - m.b3019 <= 0) m.c2788 = Constraint(expr= m.x2787 - m.b3019 <= 0) m.c2789 = Constraint(expr= m.x2788 - m.b3019 <= 0) m.c2790 = Constraint(expr= m.x2789 - m.b3019 <= 0) m.c2791 = Constraint(expr= m.x2790 - m.b3019 <= 0) m.c2792 = Constraint(expr= m.x2791 - m.b3019 <= 0) m.c2793 = Constraint(expr= m.x2792 - m.b3019 <= 0) m.c2794 = Constraint(expr= m.x2793 - m.b3019 <= 0) m.c2795 = Constraint(expr= m.x2794 - m.b3019 <= 0) m.c2796 = Constraint(expr= m.x2795 - m.b3019 <= 0) m.c2797 = Constraint(expr= m.x2796 - m.b3019 <= 0) m.c2798 = Constraint(expr= m.x2797 - m.b3019 <= 0) m.c2799 = Constraint(expr= m.x2798 - m.b3019 <= 0) m.c2800 = Constraint(expr= m.x2799 - m.b3019 <= 0) m.c2801 = Constraint(expr= m.x2800 - m.b3019 <= 0) m.c2802 = Constraint(expr= m.x2801 - m.b3019 <= 0) m.c2803 = Constraint(expr= m.x2802 - m.b3019 <= 0) m.c2804 = Constraint(expr= m.x2803 - m.b3019 <= 0) m.c2805 = Constraint(expr= m.x2804 - m.b3019 <= 0) m.c2806 = Constraint(expr= m.x2805 - m.b3019 <= 0) m.c2807 = Constraint(expr= m.x2806 - m.b3019 <= 0) m.c2808 = Constraint(expr= m.x2807 - m.b3019 <= 0) m.c2809 = Constraint(expr= m.x2808 - m.b3019 <= 0) m.c2810 = Constraint(expr= m.x2809 - m.b3019 <= 0) m.c2811 = Constraint(expr= m.x2810 - m.b3019 <= 0) m.c2812 = Constraint(expr= m.x2811 - m.b3019 <= 0) m.c2813 = Constraint(expr= m.x2812 - m.b3019 <= 0) m.c2814 = Constraint(expr= m.x2813 - m.b3019 <= 0) m.c2815 = Constraint(expr= m.x2814 - m.b3019 <= 0) m.c2816 = Constraint(expr= m.x2815 - m.b3019 <= 0) m.c2817 = Constraint(expr= m.x2816 - m.b3019 <= 0) m.c2818 = Constraint(expr= m.x2817 - m.b3019 <= 0) m.c2819 = Constraint(expr= m.x2818 - m.b3019 <= 0) m.c2820 = Constraint(expr= m.x2819 - m.b3019 <= 0) m.c2821 = Constraint(expr= m.x2820 - m.b3019 <= 0) m.c2822 = Constraint(expr= m.x2821 - m.b3019 <= 0) m.c2823 = Constraint(expr= m.x2822 - m.b3019 <= 0) m.c2824 = Constraint(expr= m.x2823 - m.b3019 <= 0) m.c2825 = Constraint(expr= m.x2824 - m.b3019 <= 0) m.c2826 = Constraint(expr= m.x2825 - m.b3019 <= 0) m.c2827 = Constraint(expr= m.x2826 - m.b3019 <= 0) m.c2828 = Constraint(expr= m.x2827 - m.b3019 <= 0) m.c2829 = Constraint(expr= m.x2828 - m.b3019 <= 0) m.c2830 = Constraint(expr= m.x2829 - m.b3019 <= 0) m.c2831 = Constraint(expr= m.x2830 - m.b3019 <= 0) m.c2832 = Constraint(expr= m.x2831 - m.b3019 <= 0) m.c2833 = Constraint(expr= m.x2832 - m.b3019 <= 0) m.c2834 = Constraint(expr= m.x2833 - m.b3019 <= 0) m.c2835 = Constraint(expr= m.x2834 - m.b3019 <= 0) m.c2836 = Constraint(expr= m.x2835 - m.b3019 <= 0) m.c2837 = Constraint(expr= m.x2836 - m.b3019 <= 0) m.c2838 = Constraint(expr= m.x2837 - m.b3019 <= 0) m.c2839 = Constraint(expr= m.x2838 - m.b3019 <= 0) m.c2840 = Constraint(expr= m.x2839 - m.b3019 <= 0) m.c2841 = Constraint(expr= m.x2840 - m.b3019 <= 0) m.c2842 = Constraint(expr= m.x2841 - m.b3019 <= 0) m.c2843 = Constraint(expr= m.x2842 - m.b3019 <= 0) m.c2844 = Constraint(expr= m.x2843 - m.b3019 <= 0) m.c2845 = Constraint(expr= m.x2844 - m.b3019 <= 0) m.c2846 = Constraint(expr= m.x2845 - m.b3019 <= 0) m.c2847 = Constraint(expr= m.x2846 - m.b3019 <= 0) m.c2848 = Constraint(expr= m.x2847 - m.b3019 <= 0) m.c2849 = Constraint(expr= m.x2848 - m.b3019 <= 0) m.c2850 = Constraint(expr= m.x2849 - m.b3019 <= 0) m.c2851 = Constraint(expr= m.x2850 - m.b3019 <= 0) m.c2852 = Constraint(expr= m.x2851 - m.b3020 <= 0) m.c2853 = Constraint(expr= m.x2852 - m.b3020 <= 0) m.c2854 = Constraint(expr= m.x2853 - m.b3020 <= 0) m.c2855 = Constraint(expr= m.x2854 - m.b3020 <= 0) m.c2856 = Constraint(expr= m.x2855 - m.b3020 <= 0) m.c2857 = Constraint(expr= m.x2856 - m.b3020 <= 0) m.c2858 = Constraint(expr= m.x2857 - m.b3020 <= 0) m.c2859 = Constraint(expr= m.x2858 - m.b3020 <= 0) m.c2860 = Constraint(expr= m.x2859 - m.b3020 <= 0) m.c2861 = Constraint(expr= m.x2860 - m.b3020 <= 0) m.c2862 = Constraint(expr= m.x2861 - m.b3020 <= 0) m.c2863 = Constraint(expr= m.x2862 - m.b3020 <= 0) m.c2864 = Constraint(expr= m.x2863 - m.b3020 <= 0) m.c2865 = Constraint(expr= m.x2864 - m.b3020 <= 0) m.c2866 = Constraint(expr= m.x2865 - m.b3020 <= 0) m.c2867 = Constraint(expr= m.x2866 - m.b3020 <= 0) m.c2868 = Constraint(expr= m.x2867 - m.b3020 <= 0) m.c2869 = Constraint(expr= m.x2868 - m.b3020 <= 0) m.c2870 = Constraint(expr= m.x2869 - m.b3020 <= 0) m.c2871 = Constraint(expr= m.x2870 - m.b3020 <= 0) m.c2872 = Constraint(expr= m.x2871 - m.b3020 <= 0) m.c2873 = Constraint(expr= m.x2872 - m.b3020 <= 0) m.c2874 = Constraint(expr= m.x2873 - m.b3020 <= 0) m.c2875 = Constraint(expr= m.x2874 - m.b3020 <= 0) m.c2876 = Constraint(expr= m.x2875 - m.b3020 <= 0) m.c2877 = Constraint(expr= m.x2876 - m.b3020 <= 0) m.c2878 = Constraint(expr= m.x2877 - m.b3020 <= 0) m.c2879 = Constraint(expr= m.x2878 - m.b3020 <= 0) m.c2880 = Constraint(expr= m.x2879 - m.b3020 <= 0) m.c2881 = Constraint(expr= m.x2880 - m.b3020 <= 0) m.c2882 = Constraint(expr= m.x2881 - m.b3020 <= 0) m.c2883 = Constraint(expr= m.x2882 - m.b3020 <= 0) m.c2884 = Constraint(expr= m.x2883 - m.b3020 <= 0) m.c2885 = Constraint(expr= m.x2884 - m.b3020 <= 0) m.c2886 = Constraint(expr= m.x2885 - m.b3020 <= 0) m.c2887 = Constraint(expr= m.x2886 - m.b3020 <= 0) m.c2888 = Constraint(expr= m.x2887 - m.b3020 <= 0) m.c2889 = Constraint(expr= m.x2888 - m.b3020 <= 0) m.c2890 = Constraint(expr= m.x2889 - m.b3020 <= 0) m.c2891 = Constraint(expr= m.x2890 -
round_bound if rounds != rounds_bound: # stack: i, rounds o.extend( _compile_to_assembly( rounds_bound, withargs, existing_labels, break_dest, height + 2 ) ) # stack: i, rounds, rounds_bound # assert rounds <= rounds_bound # TODO this runtime assertion should never fail for # internally generated repeats. # maybe drop it or jump to 0xFE o.extend(["DUP2", "GT", "_sym_revert0", "JUMPI"]) # stack: i, rounds # if (0 == rounds) { goto end_dest; } o.extend(["DUP1", "ISZERO", exit_dest, "JUMPI"]) # stack: start, rounds if start.value != 0: o.extend(["DUP2", "ADD"]) # stack: i, exit_i o.extend(["SWAP1"]) if i_name.value in withargs: raise CompilerPanic(f"shadowed loop variable {i_name}") withargs[i_name.value] = height + 1 # stack: exit_i, i o.extend([entry_dest, "JUMPDEST"]) o.extend( _compile_to_assembly( body, withargs, existing_labels, (exit_dest, continue_dest, height + 2), height + 2 ) ) del withargs[i_name.value] # clean up any stack items left by body o.extend(["POP"] * body.valency) # stack: exit_i, i # increment i: o.extend([continue_dest, "JUMPDEST", "PUSH1", 1, "ADD"]) # stack: exit_i, i+1 (new_i) # if (exit_i != new_i) { goto entry_dest } o.extend(["DUP2", "DUP2", "XOR", entry_dest, "JUMPI"]) o.extend([exit_dest, "JUMPDEST", "POP", "POP"]) return o # Continue to the next iteration of the for loop elif code.value == "continue": if not break_dest: raise CompilerPanic("Invalid break") dest, continue_dest, break_height = break_dest return [continue_dest, "JUMP"] # Break from inside a for loop elif code.value == "break": if not break_dest: raise CompilerPanic("Invalid break") dest, continue_dest, break_height = break_dest n_local_vars = height - break_height # clean up any stack items declared in the loop body cleanup_local_vars = ["POP"] * n_local_vars return cleanup_local_vars + [dest, "JUMP"] # Break from inside one or more for loops prior to a return statement inside the loop elif code.value == "cleanup_repeat": if not break_dest: raise CompilerPanic("Invalid break") # clean up local vars and internal loop vars _, _, break_height = break_dest # except don't pop label params if "return_buffer" in withargs: break_height -= 1 if "return_pc" in withargs: break_height -= 1 return ["POP"] * break_height # With statements elif code.value == "with": o = [] o.extend(_compile_to_assembly(code.args[1], withargs, existing_labels, break_dest, height)) old = withargs.get(code.args[0].value, None) withargs[code.args[0].value] = height o.extend( _compile_to_assembly(code.args[2], withargs, existing_labels, break_dest, height + 1) ) if code.args[2].valency: o.extend(["SWAP1", "POP"]) else: o.extend(["POP"]) if old is not None: withargs[code.args[0].value] = old else: del withargs[code.args[0].value] return o # runtime statement (used to deploy runtime code) elif code.value == "deploy": memsize = code.args[0].value # used later to calculate _mem_deploy_start ir = code.args[1] padding = code.args[2].value assert isinstance(memsize, int), "non-int memsize" assert isinstance(padding, int), "non-int padding" begincode = mksymbol("runtime_begin") subcode = _compile_to_assembly(ir) o = [] # COPY the code to memory for deploy o.extend(["_sym_subcode_size", begincode, "_mem_deploy_start", "CODECOPY"]) # calculate the len of runtime code o.extend(["_OFST", "_sym_subcode_size", padding]) # stack: len o.extend(["_mem_deploy_start"]) # stack: len mem_ofst o.extend(["RETURN"]) # since the asm data structures are very primitive, to make sure # assembly_to_evm is able to calculate data offsets correctly, # we pass the memsize via magic opcodes to the subcode subcode = [f"_DEPLOY_MEM_OFST_{memsize}"] + subcode # append the runtime code after the ctor code o.extend([begincode, "BLANK"]) # `append(...)` call here is intentional. # each sublist is essentially its own program with its # own symbols. # in the later step when the "ir" block compiled to EVM, # symbols in subcode are resolved to position from start of # runtime-code (instead of position from start of bytecode). o.append(subcode) return o # Seq (used to piece together multiple statements) elif code.value == "seq": o = [] for arg in code.args: o.extend(_compile_to_assembly(arg, withargs, existing_labels, break_dest, height)) if arg.valency == 1 and arg != code.args[-1]: o.append("POP") return o # Seq without popping. # Assure (if false, invalid opcode) elif code.value == "assert_unreachable": o = _compile_to_assembly(code.args[0], withargs, existing_labels, break_dest, height) end_symbol = mksymbol("reachable") o.extend([end_symbol, "JUMPI", "INVALID", end_symbol, "JUMPDEST"]) return o # Assert (if false, exit) elif code.value == "assert": o = _compile_to_assembly(code.args[0], withargs, existing_labels, break_dest, height) o.extend(["ISZERO"]) o.extend(_assert_false()) return o # SHA3 a single value elif code.value == "sha3_32": o = _compile_to_assembly(code.args[0], withargs, existing_labels, break_dest, height) o.extend( [ "PUSH1", MemoryPositions.FREE_VAR_SPACE, "MSTORE", "PUSH1", 32, "PUSH1", MemoryPositions.FREE_VAR_SPACE, "SHA3", ] ) return o # SHA3 a 64 byte value elif code.value == "sha3_64": o = _compile_to_assembly(code.args[0], withargs, existing_labels, break_dest, height) o.extend(_compile_to_assembly(code.args[1], withargs, existing_labels, break_dest, height)) o.extend( [ "PUSH1", MemoryPositions.FREE_VAR_SPACE2, "MSTORE", "PUSH1", MemoryPositions.FREE_VAR_SPACE, "MSTORE", "PUSH1", 64, "PUSH1", MemoryPositions.FREE_VAR_SPACE, "SHA3", ] ) return o elif code.value == "select": # b ^ ((a ^ b) * cond) where cond is 1 or 0 # let t = a ^ b cond = code.args[0] a = code.args[1] b = code.args[2] o = [] o.extend(_compile_to_assembly(b, withargs, existing_labels, break_dest, height)) o.extend(_compile_to_assembly(a, withargs, existing_labels, break_dest, height + 1)) # stack: b a o.extend(["DUP2", "XOR"]) # stack: b t o.extend(_compile_to_assembly(cond, withargs, existing_labels, break_dest, height + 2)) # stack: b t cond o.extend(["MUL", "XOR"]) # stack: b ^ (t * cond) return o # <= operator elif code.value == "le": return _compile_to_assembly( IRnode.from_list(["iszero", ["gt", code.args[0], code.args[1]]]), withargs, existing_labels, break_dest, height, ) # >= operator elif code.value == "ge": return _compile_to_assembly( IRnode.from_list(["iszero", ["lt", code.args[0], code.args[1]]]), withargs, existing_labels, break_dest, height, ) # <= operator elif code.value == "sle": return _compile_to_assembly( IRnode.from_list(["iszero", ["sgt", code.args[0], code.args[1]]]), withargs, existing_labels, break_dest, height, ) # >= operator elif code.value == "sge": return _compile_to_assembly( IRnode.from_list(["iszero", ["slt", code.args[0], code.args[1]]]), withargs, existing_labels, break_dest, height, ) # != operator elif code.value == "ne": return _compile_to_assembly( IRnode.from_list(["iszero", ["eq", code.args[0], code.args[1]]]), withargs, existing_labels, break_dest, height, ) # e.g. 95 -> 96, 96 -> 96, 97 -> 128 elif code.value == "ceil32": # floor32(x) = x - x % 32 == x & 0b11..100000 == x & (~31) # ceil32(x) = floor32(x + 31) == (x + 31) & (~31) x = code.args[0] return _compile_to_assembly( IRnode.from_list(["and", ["add", x, 31], ["not", 31]]), withargs, existing_labels, break_dest, height, ) # jump to a symbol, and push variable # of arguments onto stack elif code.value == "goto": o = [] for i, c in enumerate(reversed(code.args[1:])): o.extend(_compile_to_assembly(c, withargs, existing_labels, break_dest, height + i)) o.extend(["_sym_" + str(code.args[0]), "JUMP"]) return o # push a literal symbol elif isinstance(code.value, str) and is_symbol(code.value): return [code.value] # set a symbol as a location. elif code.value == "label": label_name = code.args[0].value assert isinstance(label_name, str) if label_name in existing_labels: raise Exception(f"Label with name {label_name} already exists!") else: existing_labels.add(label_name) if code.args[1].value != "var_list": raise CodegenPanic("2nd arg to label must be var_list") var_args = code.args[1].args body = code.args[2] # new scope height = 0 withargs = {} for arg in reversed(var_args): assert isinstance( arg.value, str ) # already checked for higher up but only the paranoid survive withargs[arg.value] = height height += 1 body_asm = _compile_to_assembly( body, withargs=withargs, existing_labels=existing_labels, height=height ) # pop_scoped_vars = ["POP"] * height # for now, _rewrite_return_sequences forces # label params to be consumed implicitly pop_scoped_vars = [] return ["_sym_" + label_name, "JUMPDEST"] + body_asm + pop_scoped_vars elif code.value == "exit_to": raise CodegenPanic("exit_to not implemented yet!") # inject debug opcode. elif code.value == "debugger": return mkdebug(pc_debugger=False, source_pos=code.source_pos) # inject debug opcode. elif code.value == "pc_debugger": return mkdebug(pc_debugger=True, source_pos=code.source_pos) else: raise Exception("Weird code element: " + repr(code)) def note_line_num(line_number_map, item, pos): # Record line number attached to pos. if isinstance(item, Instruction): if item.lineno is not None: offsets = (item.lineno, item.col_offset, item.end_lineno, item.end_col_offset) else: offsets = None line_number_map["pc_pos_map"][pos] = offsets added_line_breakpoint = note_breakpoint(line_number_map, item, pos) return added_line_breakpoint def note_breakpoint(line_number_map, item, pos): # Record line number attached to pos. if item == "DEBUG": # Is PC debugger, create PC breakpoint. if item.pc_debugger: line_number_map["pc_breakpoints"].add(pos) # Create line number breakpoint. else: line_number_map["breakpoints"].add(item.lineno + 1) def _prune_unreachable_code(assembly): # In converting IR to assembly we sometimes end up with unreachable # instructions - POPing to clear the stack or STOPing execution at the # end of a function that has already returned or reverted. This should # be addressed in the IR, but for now we do a final sanity check here # to avoid unnecessary bytecode bloat. changed = False i = 0 while i <
from __future__ import absolute_import, division, print_function import os from dynd import nd import datashape import sys from functools import partial from datashape import dshape, Record, to_numpy_dtype, Option from datashape.predicates import isscalar import toolz from toolz import concat, partition_all, first, merge from cytoolz import pluck import copy from datetime import datetime from numbers import Number from collections import Iterable, Iterator import numpy as np import pandas as pd import tables as tb from ..compute.chunks import ChunkIterator, chunks from ..data.meta import Concat from ..dispatch import dispatch from .. import expr from ..expr import Expr, Projection, Field, Symbol from ..compute.core import compute from ..resource import resource from ..compatibility import _strtypes, map from ..utils import keywords from ..data.utils import sort_dtype_items from ..pytables import PyTables from ..compute.spark import RDD __all__ = ['into', 'discover'] @dispatch(object, object) def into(a, b, **kwargs): """ Push data in ``b`` into a container of type ``a`` Examples -------- >>> into([], (1, 2, 3)) [1, 2, 3] >>> into(np.ndarray, [['Alice', 100], ['Bob', 200]], names=['name', 'amt']) rec.array([('Alice', 100), ('Bob', 200)], dtype=[('name', 'S5'), ('amt', '<i8')]) >>> into(pd.DataFrame, _) name amt 0 Alice 100 1 Bob 200 """ raise NotImplementedError( "Blaze does not know a rule for the following conversion" "\n%s <- %s" % (type(a).__name__, type(b).__name__)) # Optional imports try: from bokeh.objects import ColumnDataSource except ImportError: ColumnDataSource = type(None) try: import bcolz from bcolz import ctable, carray except ImportError: ctable = type(None) carray = type(None) try: from pymongo.collection import Collection except ImportError: Collection = type(None) try: from ..data import DataDescriptor, CSV, JSON, JSON_Streaming, Excel, SQL except ImportError: DataDescriptor = type(None) CSV = type(None) JSON = type(None) JSON_STREAMING = type(None) Excel = type(None) @dispatch(type, object) def into(a, b, **kwargs): """ Resolve into when given a type as a first argument Usually we give into an example of the thing that we want >>> into([], (1, 2, 3)) # give me a list like [] [1, 2, 3] However sometimes it's inconvenient to construct a dummy example. In those cases we just specify the desired type >>> into(list, (1, 2, 3)) [1, 2, 3] """ f = into.dispatch(a, type(b)) try: a = a() except: pass return f(a, b, **kwargs) @dispatch((list, tuple, set), (list, tuple, set, Iterator, type(dict().items()), pd.Series, np.record, np.void)) def into(a, b, **kwargs): return type(a)(b) @dispatch(set, list) def into(a, b, **kwargs): try: return set(b) except TypeError: return set(map(tuple, b)) @dispatch(dict, (list, tuple, set)) def into(a, b, **kwargs): return dict(b) @dispatch((list, tuple, set), dict) def into(a, b, **kwargs): return type(a)(map(type(a), sorted(b.items(), key=lambda x: x[0]))) @dispatch(nd.array, (Iterable, Number) + _strtypes) def into(a, b, **kwargs): return nd.array(b, **kwargs) @dispatch(nd.array, nd.array) def into(a, b, **kwargs): return b @dispatch(np.ndarray, np.ndarray) def into(a, b, **kwargs): return b @dispatch(list, nd.array) def into(a, b, **kwargs): return nd.as_py(b, tuple=True) @dispatch(tuple, nd.array) def into(a, b, **kwargs): return tuple(nd.as_py(b, tuple=True)) @dispatch(np.ndarray, nd.array) def into(a, b, **kwargs): return nd.as_numpy(b, allow_copy=True) def dtype_from_tuple(t): dshape = discover(t) names = ['f%d' % i for i in range(len(t))] types = [x.measure.to_numpy_dtype() for x in dshape.measure.dshapes] return np.dtype(list(zip(names, types))) @dispatch(np.ndarray, (Iterable, Iterator)) def into(a, b, **kwargs): b = iter(b) first = next(b) b = toolz.concat([[first], b]) if isinstance(first, datetime): b = map(np.datetime64, b) if isinstance(first, (list, tuple)): return np.rec.fromrecords([tuple(x) for x in b], dtype=kwargs.pop('dtype', dtype_from_tuple(first)), **kwargs) elif hasattr(first, 'values'): #detecting sqlalchemy.engine.result.RowProxy types and similar return np.asarray([tuple(x.values()) for x in b], **kwargs) else: return np.asarray(list(b), **kwargs) def degrade_numpy_dtype_to_python(dt): """ >>> degrade_numpy_dtype_to_python(np.dtype('M8[ns]')) dtype('<M8[us]') >>> dt = np.dtype([('a', 'S7'), ('b', 'M8[D]'), ('c', 'M8[ns]')]) >>> degrade_numpy_dtype_to_python(dt) dtype([('a', 'S7'), ('b', '<M8[D]'), ('c', '<M8[us]')]) """ replacements = {'M8[ns]': np.dtype('M8[us]'), 'M8[as]': np.dtype('M8[us]')} dt = replacements.get(dt.str.lstrip('<>'), dt) if str(dt)[0] == '[': return np.dtype([(name, degrade_numpy_dtype_to_python(dt[name])) for name in dt.names]) return dt @dispatch(list, np.ndarray) def into(a, b, **kwargs): if 'M8' in str(b.dtype) or 'datetime' in str(b.dtype): b = b.astype(degrade_numpy_dtype_to_python(b.dtype)) return numpy_ensure_strings(b).tolist() @dispatch(set, object) def into(a, b, **kwargs): return set(into(list, b, **kwargs)) @dispatch(pd.DataFrame, np.ndarray) def into(df, x, **kwargs): if len(df.columns) > 0: columns = list(df.columns) else: columns = list(x.dtype.names) return pd.DataFrame(numpy_ensure_strings(x), columns=columns) @dispatch((pd.DataFrame, list, tuple, Iterator, nd.array), tb.Table) def into(a, t, **kwargs): x = into(np.ndarray, t) return into(a, x, **kwargs) @dispatch(np.ndarray, tb.Table) def into(_, t, **kwargs): res = t[:] dt_fields = [k for k, v in t.coltypes.items() if v == 'time64'] if not dt_fields: return res for f in dt_fields: # pytables is in seconds since epoch res[f] *= 1e6 fields = [] for name, dtype in sort_dtype_items(t.coldtypes.items(), t.colnames): typ = getattr(t.cols, name).type fields.append((name, {'time64': 'datetime64[us]', 'time32': 'datetime64[D]', 'string': dtype.str}.get(typ, typ))) return res.astype(np.dtype(fields)) def numpy_fixlen_strings(x): """ Returns new array with strings as fixed length >>> from numpy import rec >>> x = rec.array([(1, 'Alice', 100), (2, 'Bob', 200)], ... dtype=[('id', 'i8'), ('name', 'O'), ('amount', 'i8')]) >>> numpy_fixlen_strings(x) # doctest: +SKIP rec.array([(1, 'Alice', 100), (2, 'Bob', 200)], dtype=[('id', '<i8'), ('name', 'S5'), ('amount', '<i8')]) """ if "'O'" in str(x.dtype): dt = [(n, "S%d" % max(map(len, x[n])) if x.dtype[n] == 'O' else x.dtype[n]) for n in x.dtype.names] x = x.astype(dt) return x def typehint(x, typedict): """Replace the dtypes in `x` keyed by `typedict` with the dtypes in `typedict`. """ dtype = x.dtype lhs = dict(zip(dtype.fields.keys(), map(first, dtype.fields.values()))) dtype_list = list(merge(lhs, typedict).items()) return x.astype(np.dtype(sort_dtype_items(dtype_list, dtype.names))) @dispatch(tb.Table, np.ndarray) def into(t, x, **kwargs): dt_types = dict((k, 'datetime64[us]') for k, (v, _) in x.dtype.fields.items() if issubclass(v.type, np.datetime64)) x = numpy_ensure_bytes(numpy_fixlen_strings(x)) x = typehint(typehint(x, dt_types), dict.fromkeys(dt_types, 'f8')) for name in dt_types: x[name] /= 1e6 t.append(x) return t @dispatch(tb.Table, ChunkIterator) def into(t, c, **kwargs): for chunk in c: into(t, chunk, **kwargs) return t @dispatch(tb.node.MetaNode, tb.Table) def into(table, data, filename=None, datapath=None, **kwargs): dshape = datashape.dshape(kwargs.setdefault('dshape', discover(data))) t = PyTables(filename, datapath=datapath, dshape=dshape) return into(t, data) @dispatch(ctable, tb.Table) def into(bc, data, **kwargs): cs = chunks(data) bc = into(bc, next(cs)) for chunk in cs: bc.append(chunk) return bc @dispatch(tb.node.MetaNode, np.ndarray) def into(_, x, filename=None, datapath=None, **kwargs): # tb.node.MetaNode == type(tb.Table) x = numpy_ensure_bytes(numpy_fixlen_strings(x)) t = PyTables(filename, datapath=datapath, dshape=discover(x)) return into(t, x, **kwargs) @dispatch(tb.node.MetaNode, (ctable, list)) def into(_, data, filename=None, datapath=None, **kwargs): t = PyTables(filename, datapath=datapath, dshape=kwargs.get('dshape', discover(data))) for chunk in map(partial(into, np.ndarray), chunks(data)): into(t, chunk) return t @dispatch(tb.Table, (pd.DataFrame, CSV, SQL, nd.array, Collection)) def into(a, b, **kwargs): return into(a, into(np.ndarray, b), **kwargs) @dispatch(tb.Table, _strtypes) def into(a, b, **kwargs): kw = dict(kwargs) if 'output_path' in kw: del kw['output_path'] r = resource(b, **kw) return into(a, r, **kwargs) @dispatch(list, pd.DataFrame) def into(_, df, **kwargs): return into([], into(np.ndarray(0), df)) @dispatch(pd.DataFrame, nd.array) def into(a, b, **kwargs): ds = dshape(nd.dshape_of(b)) if list(a.columns): names = list(a.columns) elif isinstance(ds[-1], Record): names = ds[-1].names else: names = None if names: return pd.DataFrame(nd.as_py(b), columns=names) else: return pd.DataFrame(nd.as_py(b)) @dispatch(pd.DataFrame, (list, tuple, Iterator, type(dict().items()))) def into(df, seq, **kwargs): if list(df.columns): return pd.DataFrame(list(seq), columns=df.columns, **kwargs) else: return pd.DataFrame(list(seq), **kwargs) @dispatch(pd.DataFrame, pd.DataFrame) def into(_, df, **kwargs): return df.copy() @dispatch(pd.Series, pd.Series) def into(_, ser, **kwargs): return ser @dispatch(pd.Series, Iterator) def into(a, b, **kwargs): return into(a, list(b), **kwargs) @dispatch(pd.Series, (list, tuple)) def into(a, b, **kwargs): return pd.Series(b, **kwargs) @dispatch(pd.Series, Expr) def into(ser, col, **kwargs): ser = into(ser, compute(col)) ser.name = col._name return ser @dispatch(pd.Series, pd.DataFrame) def into(a, b, **kwargs): if len(b.columns) != 1: raise TypeError('Cannot transform a multiple column expression to a' ' Series') s = b.squeeze() if a.name is not None: s.name = a.name return s @dispatch(pd.Series, Projection) def into(ser, col, **kwargs): return into(pd.Series, into(pd.DataFrame, col)) @dispatch(pd.Series, np.ndarray) def into(s, x, **kwargs): return pd.Series(numpy_ensure_strings(x), name=s.name) @dispatch(pd.DataFrame, pd.Series) def into(_, df, **kwargs): return pd.DataFrame(df) @dispatch(list, pd.Series) def into(_, ser, **kwargs): return ser.tolist() @dispatch(nd.array, pd.DataFrame) def into(a, df, **kwargs): schema = discover(df) arr = nd.empty(str(schema)) for i in range(len(df.columns)): arr[:, i] = np.asarray(df[df.columns[i]]) return arr @dispatch(np.ndarray, pd.DataFrame) def into(a, df, **kwargs): return df.to_records(index=False) @dispatch(nd.array) def discover(arr): return dshape(nd.dshape_of(arr)) @dispatch(pd.DataFrame) def discover(df): obj = datashape.coretypes.object_ names = list(df.columns) dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes)) dtypes = [datashape.string if dt == obj else dt for dt in dtypes] schema = Record(list(zip(names, dtypes))) return len(df) * schema @dispatch(pd.Series) def discover(s): return discover(s.to_frame()) @dispatch(np.ndarray, carray) def into(a, b, **kwargs): return b[:] @dispatch(pd.Series, carray) def into(a, b, **kwargs): return into(a, into(np.ndarray, b)) @dispatch(ColumnDataSource, (pd.DataFrame, np.ndarray, ctable)) def into(cds, t, **kwargs): columns = discover(t).subshape[0][0].names return ColumnDataSource(data=dict((col, into([], t[col])) for col in columns)) @dispatch(ColumnDataSource, Expr) def into(cds, t, **kwargs): columns = t.fields return ColumnDataSource(data=dict((col, into([], t[col])) for col in columns)) @dispatch(ColumnDataSource, tb.Table) def into(cds, t, **kwargs): return into(cds, into(pd.DataFrame, t)) @dispatch(ColumnDataSource, nd.array) def into(cds, t, **kwargs): columns = discover(t).subshape[0][0].names return ColumnDataSource(data=dict((col, into([], getattr(t, col))) for col in columns)) @dispatch(ColumnDataSource, Collection) def into(cds, other, **kwargs): return into(cds, into(pd.DataFrame, other)) @dispatch(ctable, Expr) def into(a, b, **kwargs): c = compute(b) if isinstance(c, (list, tuple, Iterator)): kwargs['types'] = [datashape.to_numpy_dtype(t) for t in b.schema[0].types] kwargs['names'] = b.fields return into(a, c, **kwargs) @dispatch(pd.DataFrame, ColumnDataSource) def into(df, cds, **kwargs): return cds.to_df() def fix_len_string_filter(ser): """ Convert object strings to fixed length, pass through others """ if ser.dtype == np.dtype('O'): return np.asarray(list(ser)) else: return np.asarray(ser) @dispatch(ctable, nd.array) def into(a, b, **kwargs): names = dshape(nd.dshape_of(b))[1].names columns = [getattr(b, name) for name in names] columns = [np.asarray(nd.as_py(c))
retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) switch_libvirt_plugin(flag=False, dop=dop, webobj=webobj, host=host) def switch_sensors_plugin(flag=True, dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) configName = "sensors" dop.cdp_set("collectdplugin",[configName,"LoadPlugin","sensors"],"sensors",multiple_file=True,is_opt_multi=True) _keys = [configName,"Plugin","sensors"] # 既存の設定を削除 keys = _keys + ["Sensor"] if dop.cdp_isset("collectdplugin",keys,multiple_file=True) is True: for _k in list(dop.cdp_get("collectdplugin",keys,multiple_file=True).keys()): keys = _keys + ["Sensor",_k] dop.cdp_delete("collectdplugin",keys,multiple_file=True) orders_key = _keys + ["Sensor","@ORDERS"] dop.cdp_unset("collectplugin",orders_key,multiple_file=True) orders = [] #from karesansui.lib.utils import get_sensor_chip_name #chip_name = get_sensor_chip_name() chip_name = "it8712-isa-0290" # temperature for _temp in ["temp","temp1","temp2","temp3","temp4","temp5","temp6","temp7"]: sensor_id = "%s/temperature-%s" % (chip_name,_temp) value = "\"%s\"" % sensor_id keys = _keys + ["Sensor",value] dop.cdp_set("collectdplugin",keys,value,multiple_file=True,is_opt_multi=True) orders.append([value]) # fanspeed for _fan in ["fan1","fan2","fan3","fan4","fan5","fan6","fan7"]: sensor_id = "%s/fanspeed-%s" % (chip_name,_fan) value = "\"%s\"" % sensor_id keys = _keys + ["Sensor",value] dop.cdp_set("collectdplugin",keys,value,multiple_file=True,is_opt_multi=True) orders.append([value]) # voltage for _in in ["in0","in1","in2","in3","in4","in5","in6","in7","in8","in9","in10"]: sensor_id = "%s/voltage-%s" % (chip_name,_in) value = "\"%s\"" % sensor_id keys = _keys + ["Sensor",value] dop.cdp_set("collectdplugin",keys,value,multiple_file=True,is_opt_multi=True) orders.append([value]) dop.cdp_set("collectplugin",orders_key,orders,multiple_file=True,is_opt_multi=True) # 上記で指定されたものを対象外にする場合はtrue keys = _keys + ["IgnoreSelected"] value = "false" dop.cdp_set("collectdplugin",keys,value,multiple_file=True) if flag is True: dop.cdp_uncomment("collectdplugin",[configName,"LoadPlugin","sensors"],recursive=True,multiple_file=True) dop.cdp_uncomment("collectdplugin",_keys,recursive=True,multiple_file=True) else: dop.cdp_comment("collectdplugin",[configName,"LoadPlugin","sensors"],recursive=True,multiple_file=True) dop.cdp_comment("collectdplugin",_keys,recursive=True,multiple_file=True) def enable_sensors_plugin(dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) switch_sensors_plugin(flag=True, dop=dop, webobj=webobj, host=host) def disable_sensors_plugin(dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) switch_sensors_plugin(flag=False, dop=dop, webobj=webobj, host=host) def switch_uptime_plugin(flag=True, dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) configName = "uptime" dop.cdp_set("collectdplugin",[configName,"LoadPlugin","uptime"],"uptime",multiple_file=True,is_opt_multi=True) if flag is True: dop.cdp_uncomment("collectdplugin",[configName,"LoadPlugin","uptime"],recursive=True,multiple_file=True) else: dop.cdp_comment("collectdplugin",[configName,"LoadPlugin","uptime"],recursive=True,multiple_file=True) def enable_uptime_plugin(dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) switch_uptime_plugin(flag=True, dop=dop, webobj=webobj, host=host) def disable_uptime_plugin(dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) switch_uptime_plugin(flag=False, dop=dop, webobj=webobj, host=host) def switch_users_plugin(flag=True, dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) configName = "users" dop.cdp_set("collectdplugin",[configName,"LoadPlugin","users"],"users",multiple_file=True,is_opt_multi=True) if flag is True: dop.cdp_uncomment("collectdplugin",[configName,"LoadPlugin","users"],recursive=True,multiple_file=True) else: dop.cdp_comment("collectdplugin",[configName,"LoadPlugin","users"],recursive=True,multiple_file=True) def enable_users_plugin(dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) switch_users_plugin(flag=True, dop=dop, webobj=webobj, host=host) def disable_users_plugin(dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) switch_users_plugin(flag=False, dop=dop, webobj=webobj, host=host) def init_filter(dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) configName = "filter" load_plugins = ["match_regex","match_value","target_notification"] if dop.cdp_isset("collectdplugin",[configName,"@ORDERS"],multiple_file=True) is True: orders = dop.get("collectdplugin",[configName,"@ORDERS"]) else: orders = [] for plugin_name in load_plugins: if dop.cdp_isset("collectdplugin",[configName,"LoadPlugin",plugin_name],multiple_file=True) is False: dop.cdp_set("collectdplugin",[configName,"LoadPlugin",plugin_name],plugin_name,multiple_file=True,is_opt_multi=True) orders.append(["LoadPlugin",plugin_name]) dop.set("collectdplugin",[configName,"@ORDERS"],orders) def set_chain_rule(type,chain,rule,params,dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) init_filter(dop, webobj, host) configName = "filter" _keys = [configName,type,"\"%s\"" % chain] if dop.cdp_isset("collectdplugin", _keys, multiple_file=True) is False: dop.cdp_set("collectdplugin", _keys, chain, multiple_file=True, is_opt_multi=True) dop.cdp_set_pre_comment("collectdplugin", _keys, [''], multiple_file=True) _keys = [configName,"Chain",chain] try: plugin = "\"^%s$\"" % params["Plugin"] keys = _keys + ["Rule",rule,"Match","regex","Plugin"] dop.cdp_set("collectdplugin", keys, plugin, multiple_file=True) except: pass try: type_instance = "\"^%s$\"" % params["TypeInstance"] keys = _keys + ["Rule",rule,"Match","regex","TypeInstance"] dop.cdp_set("collectdplugin", keys, type_instance, multiple_file=True) except: pass try: min = params["Min"] keys = _keys + ["Rule",rule,"Match","value","Min"] dop.cdp_set("collectdplugin", keys, min, multiple_file=True) except: pass try: max = params["Max"] keys = _keys + ["Rule",rule,"Match","value","Max"] dop.cdp_set("collectdplugin", keys, max, multiple_file=True) except: pass try: invert = params["Invert"] keys = _keys + ["Rule",rule,"Match","value","Invert"] dop.cdp_set("collectdplugin", keys, invert, multiple_file=True) except: pass try: satisfy = "\"%s\"" % params["Satisfy"] keys = _keys + ["Rule",rule,"Match","value","Satisfy"] dop.cdp_set("collectdplugin", keys, satisfy, multiple_file=True) except: pass try: if params['Target'] == "notification": try: message = "\"%s\"" % params["Message"] keys = _keys + ["Rule",rule,"Target","notification","Message"] dop.cdp_set("collectdplugin", keys, message,multiple_file=True) except: pass try: severity = "\"%s\"" % params["Severity"] keys = _keys + ["Rule",rule,"Target","notification","Severity"] dop.cdp_set("collectdplugin", keys, severity,multiple_file=True) except: pass else: try: keys = _keys + ["Rule",rule,"Target",params['Target'],"Pass"] dop.cdp_set("collectdplugin", keys, "" ,multiple_file=True) dop.cdp_comment("collectdplugin", keys, multiple_file=True) except: pass except: pass #keys = _keys + ["Target"] #dop.cdp_set("collectdplugin", keys, "\"write\"", multiple_file=True) def set_pre_cache_chain_rule(chain,rule,params,dop=None,webobj=None,host=None): global DictOp if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) set_chain_rule("PreCacheChain",chain,rule,params,dop,webobj,host) def set_post_cache_chain_rule(chain,rule,params,dop=None,webobj=None,host=None): global DictOp if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) set_chain_rule("PostCacheChain",chain,rule,params,dop,webobj,host) def create_threshold_config_name(plugin,selector): configName = "threshold" data = plugin_selector_to_dict(selector) config_name = "%s_%s" % (configName,plugin,) try: config_name += ":%s" % (data['plugin_instance'],) except: config_name += ":" try: config_name += ":%s" % (data['type'],) except: config_name += ":" try: config_name += ":%s" % (data['type_instance'],) except: config_name += ":" try: config_name += ":%s" % (data['ds'],) except: config_name += ":" try: config_name += ":%s" % (data['host'],) except: pass #config_name += ":" return config_name def set_threshold(plugin,selector,params,dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) enable_python_plugin(dop=dop, webobj=webobj, host=host) config_name = create_threshold_config_name(plugin,selector) data = plugin_selector_to_dict(selector) try: plugin_instance = data['plugin_instance'] except: plugin_instance = None try: type = data['type'] except: type = None try: type_instance = data['type_instance'] except: type_instance = None try: ds = data['ds'] except: ds = None try: host = data['host'] _keys = [config_name,"Threshold","","Host",host,"Plugin",plugin] except: host = None _keys = [config_name,"Threshold","","Plugin",plugin] if plugin_instance is not None: keys = _keys + ["Instance"] try: int(plugin_instance) _plugin_instance = "\"%d\"" % int(plugin_instance) except: _plugin_instance = plugin_instance pass dop.cdp_set("collectdplugin", keys, _plugin_instance, multiple_file=True) if type is not None: _keys = _keys + ["Type",type] if type_instance is not None: keys = _keys + ["Instance"] dop.cdp_set("collectdplugin", keys, "\"%s\"" % type_instance, multiple_file=True) if ds is not None: keys = _keys + ["DataSource"] dop.cdp_set("collectdplugin", keys, "\"%s\"" % ds, multiple_file=True) try: params['Message'] except: msg_dict = {} for _param in ["plugin","plugin_instance","type","type_instance","ds","host"]: try: exec("if %s is not None: msg_dict['%s'] = str(%s)" % (_param,_param,_param,)) exec("if %s is None: msg_dict['%s'] = '%%{%s}'" % (_param,_param,_param,)) except: pass for _param in ["WarningMax","WarningMin","FailureMax","FailureMin","Percentage","Persist","Hits","Hysteresis"]: try: _name = re.sub("([a-z])([A-Z])","\\1_\\2",_param).lower() exec("msg_dict['%s'] = params['%s']" % (_name,_param,)) except: pass params['Message'] = "\"%s\"" % str(msg_dict) for _param in ["WarningMax","WarningMin","FailureMax","FailureMin","Percentage","Persist","Hits","Hysteresis","Message"]: try: param_val = params[_param] keys = _keys + [_param] dop.cdp_set("collectdplugin", keys, param_val, multiple_file=True) except: pass def disable_threshold(plugin,selector,dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) config_name = create_threshold_config_name(plugin,selector) keys = [config_name,"Threshold",""] dop.cdp_comment("collectdplugin", keys, multiple_file=True) def enable_threshold(plugin,selector,dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) config_name = create_threshold_config_name(plugin,selector) keys = [config_name,"Threshold",""] dop.cdp_uncomment("collectdplugin", keys, recursive=True, multiple_file=True) def delete_threshold(plugin,selector,dop=None, webobj=None, host=None): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) config_name = create_threshold_config_name(plugin,selector) keys = [config_name] dop.delete("collectdplugin", keys) def initialize_collectd_settings(dop=None, webobj=None, host=None, force=False, reverse=False): global DictOp retval = False if dop is None: if isinstance(DictOp, types.InstanceType) and DictOp.__class__.__name__ == "DictOp": dop = DictOp else: dop = _get_collectd_config(webobj, host) # general settings if not "collectd" in dop.ModuleNames: from karesansui.lib.parser.collectd import collectdParser conf_arr = collectdParser().read_conf() dop.addconf("collectd",conf_arr) default_params = { "Hostname" :"\"localhost\"", "FQDNLookup" :"true", "BaseDir" :"\"%s\"" % COLLECTD_DATA_DIR, "PIDFile" :"\"%s\"" % COLLECTD_PID_FILE, "PluginDir" :"\"%s\"" % COLLECTD_PLUGIN_DIR, "TypesDB" :"\"%s/types.db\"" % COLLECTD_SHARE_DIR, "Include" :"\"%s/*.conf\"" % PARSER_COLLECTD_PLUGIN_DIR, "Interval" :"3", "ReadThreads":"5", } for _k,_v in default_params.items(): if dop.cdp_isset("collectd",[_k]) is False or force is True: # Include行は複数設定可(is_opt_multi)なのでis_opt_multi=Trueでset if _k == "Include": # 既存Include行を削除 if dop.cdp_isset("collectd",[_k]) is True: for _k2 in list(dop.cdp_get("collectd",[_k]).keys()): dop.cdp_delete("collectd",[_k,_k2]) dop.cdp_set("collectd",[_k,_v] ,_v,is_opt_multi=True) else: dop.cdp_set("collectd",[_k] ,_v) # each plugin settings
#!/usr/bin/env python """ Subband Autocorrelation Classification (SAcC) Pitch Tracker feature Based on Matlab code by <NAME> and <NAME> Python port based on SRI Feature template. 2013-08-25 <NAME> <EMAIL> """ import os import numpy as np import scipy.signal import scipy.io import scipy.cluster.vq # For SRI's wavreading code import scipy.io.wavfile as wav import mlp import sbpca ################## from sbpca_viterbi.m def viterbi(posteriors, hmm_vp = 0.9): """ % path = sbpca_viterbi(posteriors, hmm_vp) % Find the best (viterbi) path through a set of pitch class % posteriors, for the SAcC pitch tracker. % <posteriors> is <nbins> x <nframes> % <hmm_vp> is % 2013-08-23 <NAME> <EMAIL> sbpca refactor cleanup """ # Equalizing variance in log-posterior domain per BSL implementation sposts = np.exp(standardize(np.log(posteriors))) # Set up data for decode nbins, nframes = np.shape(sposts) npch = nbins - 1 # number of actual pitches (i.e., all except unvoiced) # Parameters uvtrp = 0.9 # prob of going from unvoiced to voiced (9x larger # than BSL's code, to compensate for normalization of txmat) vutrp = 0.01 # prob of going from voiced to unvoiced transfloor = np.exp(-10.0) # smallest transition probability wdyn = 3.0 # laplacian half-width for transition probs #hmm_vp = 0.9 # scaling of unvoiced state # Transition matrix - row = from, column = to # A matrix of how far apart two bins are ijdiff = np.abs(np.tile(range(npch), (npch, 1)).transpose() - range(npch)) # pitch-to-pitch transitions are laplacian # summed in log-domain, per BSL... pptxmat = np.log(transfloor + np.exp(np.exp(-np.abs(ijdiff)/wdyn))) # normalize rows of pitch-to-pitch transitions to be true probabilities pptxmat /= pptxmat.sum(axis=1)[:, np.newaxis] # transmat wraps unvoiced state around pitch-to-pitch transmat = np.vstack( (np.r_[(1-uvtrp), uvtrp/npch*np.ones(npch)], np.hstack((vutrp*np.ones( (npch, 1) ), (1-vutrp)*pptxmat)))) # penalize unvoiced posterior & renormalize sposts[0,] = hmm_vp * sposts[0,] # renormalize columns sposts /= sposts.sum(axis=0) priors = np.ones(nbins)/nbins return viterbi_path(sposts, priors, transmat) #%%%%%%%%%%%%%%%%%%%%%%% def standardize(array): """ N = standardize(array) % Make each column of an array have a zero mean and unit sd % was "normalise" by <EMAIL> (not to confuse with kpm's normalise) """ stddev = array.std(axis=0) # normalize each column return (array - array.mean(axis=0))/(stddev+(stddev==0)) ################## from viterbi_path.m def viterbi_path(posteriors, priors, transmat): """ % path = viterbi_path(posteriors, priors, transmat) % Find best path through spectrogram-like posteriors (one % column per time frame). Transmat is row from, column to. % Linear probabilities (not log). % Return sequence of state indices. % 2013-08-23 <NAME> <EMAIL> sbpca refactor cleanup """ (nbins, nframes) = np.shape(posteriors) # Array to hold traceback prev = np.zeros( (nbins, nframes) , int) # <pstate> holds normalized probability-to-date of landing in this # state along best path pstate = priors*posteriors[:, 0] # normalize probs of best path to each state, to avoid underflow pstate = pstate/np.sum(pstate) use_log = True #print "use_log=", use_log # now calculate forward if use_log: # log domain logtransmat = np.log(transmat.transpose()) pstate = np.log(pstate) for i in range(1, nframes): probs = (logtransmat + np.tile(np.log(posteriors[:, i]),(nbins, 1)).transpose() + np.tile(pstate, (nbins, 1))) pstate = np.max(probs, axis=1) prev[:, i] = np.argmax(probs, axis=1) # Renormalize to keep probabilities in a sensible range pstate = pstate - np.mean(pstate) else: # linear likelihood domain for i in range(1, nframes): # Find most likely combination of previous prob-to-path, # and transition probs = transmat.transpose() * np.outer(posteriors[:, i], pstate) pstate = np.max(probs, axis=1) prev[:, i] = np.argmax(probs, axis=1) # Renormalize to keep probabilities in a sensible range pstate = pstate/sum(pstate) # traceback best precedent matrix to get best path path = np.zeros(nframes, int) # best final state path[nframes-1] = np.argmax(pstate) # .. and all its predecessors for pth in range(nframes, 1, -1): path[pth-2] = prev[path[pth-1], pth-1] return path ##################################### def dithering(data, noiselevel=1e-3): """ % y = dithering(x, noiselevel) % Add low-level noise to x to avoid digital zeros % noiselevel is scaling factor below SD of signal at which % noise is added (default 1e-3). """ # Ensure consistent random sequence (in dither() np.random.seed(0) # Generate the dither sequence xlen = len(data) dither = np.random.rand(xlen) + np.random.rand(xlen) - 1 # add it on 120 dB below the signal spow = np.std(data) #print "dithering off" #return x #print "dithering at 1e-3" #return data + 1e-6 * spow * dither return data + noiselevel * spow * dither # For SRI's wavreading code import scipy.io.wavfile as wav from scikits.audiolab import Sndfile # For command line import os import sys def readsph(filename): """ read in audio data from a sphere file. Return d, sr """ f = Sndfile(filename, 'r') data = f.read_frames(f.nframes, dtype=np.float32) sr = f.samplerate return data, sr def readwav(filename): """ read in audio data from a wav file. Return d, sr """ # Read in wav file sr, wavd = wav.read(filename) # normalize short ints to floats of -1 / 1 data = np.asfarray(wavd) / 32768.0 return data, sr def audioread(filename, targetsr=None): """ Read a soundfile of either WAV or SPH, based on filename returns d, sr """ fileName, fileExtension = os.path.splitext(filename) if fileExtension == ".wav": data, sr = readwav(filename) elif fileExtension == ".sph": data, sr = readsph(filename) else: raise NameError( ("Cannot determine type of infile " + filename) ) # Maybe fix sample rate #if srate == 16000 and self.sbpca.srate == 8000: if targetsr != None and sr != targetsr: # Right now, only downsample by integer numbers decimfact = int(np.round(sr/targetsr)) data = scipy.signal.decimate(np.r_[data[1:], 0], decimfact, ftype='fir') # slight trim to ss.decimate to make its phase align # to matlab's resample # for case of resampling 16 kHz down to 8 kHz delay = 7 data = np.r_[data[delay:], np.zeros(delay)] sr = sr/decimfact return data, sr # Main class class SAcC(object): """ Compute Subband Autocorrelation Classification (SAcC) pitch track """ def __init__(self, config): """ Initialize default values """ #self.config = config # initialize the sbpca subsystem self.sbpca = sbpca.SbPca(config) # initialize the mlp subsytem self.net = mlp.MLP(config['wgt_file'], config['norms_file']) # parameters specific to SAcC part self.ptchtab = np.r_[0, np.loadtxt(config['pcf_file'])] self.hmm_vp = config['hmm_vp'] self.n_s = 10.0 self.start_utt = 0 self.write_rownum = False self.write_time = False self.write_sbac = False self.write_sbpca = False self.write_posteriors = False self.write_pitch = True self.write_pvx = True self.dither_level = 1e-3 if 'n_s' in config: self.n_s = config['n_s'] if 'start_utt' in config: self.start_utt = config['start_utt'] if 'write_rownum' in config: self.write_rownum = config['write_rownum'] if 'write_time' in config: self.write_time = config['write_time'] if 'write_sbac' in config: self.write_sbac = config['write_sbac'] if 'write_sbpca' in config: self.write_sbpca = config['write_sbpca'] if 'write_posteriors' in config: self.write_posteriors = config['write_posteriors'] if 'write_pitch' in config: self.write_pitch = config['write_pitch'] if 'write_pvx' in config: self.write_pvx = config['write_pvx'] # added 2014-04-10 if 'dither_level' in config: self.dither_level = config['dither_level'] def __call__(self, filename): """ This is called for each file """ # remove dependency on libsndfile. Only accept wav, use Sox data, srate = audioread(filename, targetsr=self.sbpca.srate) assert srate == self.sbpca.srate # Actually run it ftrs = self.sacc(data, srate) # Return the features return ftrs def sacc(self, data, srate): """ Run the SAcC pitch tracker on the specified waveform/sampling rate using the configuration specified on construction Return two vectors, pitch (in Hz) and P(voicing) (posterior) """ # Pad out d with zeros so get right number of winsamps frames # (and add unique dithering noise over whole signal) xdat = dithering(np.r_[data, np.zeros(self.sbpca.maxlags)], self.dither_level) # Pre-allocate whole activations matrix nframes = self.sbpca.nframes(len(data)) # acts = np.zeros( (len(self.net.obias), nframes) ) acts = np.zeros( (len(self.net.obias), 0) ) # (nChs, nDim, nLag) = np.shape(self.sbpca.mapping) # if self.output_pcas: # ftrs = np.zeros( (nChs, nDim, nframes) ) # elif self.output_autocos: # ftrs = np.zeros( (nChs, nLag, nframes) ) # else: # ftrs = np.zeros( (2, nframes) ) framesamps = self.sbpca.framesamps # How many frames to process each time in loop #blockframes = 100 blockframes = max(1, int(np.ceil(self.n_s * (srate/framesamps)))) blocksamps = blockframes * framesamps nblocks = int(np.ceil(float(nframes) / float(blockframes))) # How many frames do we try to prepad? prepadframes = 10 isfirst = 1 donefr = 0 for block in range(nblocks): # Figure next block of samples, including pre- and post-padding actualprepadframes = min(prepadframes, block*blockframes) blockbasesamp = block*blocksamps blocklastsamp = min(len(xdat), blockbasesamp + blocksamps +self.sbpca.padsamps) xpts = xdat[(blockbasesamp - actualprepadframes*framesamps) :blocklastsamp] # Run the sbpca part acs = self.sbpca.calc_autocos(xpts, srate, isfirst) (nsb, nlg, nfr)
where a message came from so we can GC it properly', 'typedef struct msg_udata { // confuse over-simplified pretty-printer', ' ::google::protobuf::MessageLite * msg;', ' bool lua_owns;', ' lua_protobuf_gc_callback gc_callback;', ' void * callback_data;', '} msg_udata;', '',]) return lines def proto_function_open_name(filename): return 'lua_protobuf_%s_' % filename.replace('.proto', '') def package_function_prefix(package): return 'lua_protobuf_%s_' % package.replace('.', '_') def message_function_prefix(package, message): return '%s%s_' % (package_function_prefix(package), message) def message_open_function_name(package, message): '''Returns function name that registers the Lua library for a message type''' return '%sopen' % message_function_prefix(package, message) def cpp_class(package, message = None): '''Returns the fully qualified class name for a message type''' if not message: return package.replace('.', '::') return '::%s::%s' % ( package.replace('.', '::'), message ) def field_function_name(package, message, prefix, field): '''Obtain the function name of a field accessor/mutator function''' return '%s%s_%s' % ( message_function_prefix(package, message), prefix, field ) def field_function_start(package, message, prefix, field): '''Obtain the start of function for a field accessor function''' return [ 'int %s(lua_State *L)' % field_function_name(package, message, prefix, field.lower()), '{', ] def lua_libname(package, message): '''Returns the Lua library name for a specific message''' return 'protobuf.%s.%s' % (package, message) def metatable(package, message): '''Returns Lua metatable for protocol buffer message type''' return 'protobuf_.%s.%s' % (package, message) def obtain_message_from_udata(package, message=None, index=1, varname='m'): '''Statement that obtains a message from userdata''' c = cpp_class(package, message) return [ 'msg_udata * %sud = (msg_udata *)%s;' % ( varname, check_udata(package, message, index) ), '%s *%s = (%s *)%sud->msg;' % ( c, varname, c, varname ), ] def check_udata(package, message, index=1): '''Validates a udata is instance of protocol buffer message By default, it validates udata at top of the stack ''' return 'luaL_checkudata(L, %d, "%s")' % ( index, metatable(package, message) ) def has_body(package, message, field): '''Returns the function body for a has_<field> function''' lines = [] lines.extend(obtain_message_from_udata(package, message)) lines.append('lua_pushboolean(L, m->has_%s());' % field.lower()) lines.append('return 1;') return lines def clear_body(package, message, field): '''Returns the function body for a clear_<field> function''' lines = [] lines.extend(obtain_message_from_udata(package, message)) lines.append('m->clear_%s();' % field.lower()) lines.append('return 0;') return lines def size_body(package, message, field): '''Returns the function body for a size_<field> function''' lines = [] lines.extend(obtain_message_from_udata(package, message)) lines.append('int size = m->%s_size();' % field.lower()) lines.append('lua_pushinteger(L, size);') lines.append('return 1;') return lines def add_body(package, message, field, type_name): '''Returns the function body for the add_<field> function for repeated embedded messages''' lines = [] lines.extend(obtain_message_from_udata(package, message)) lines.extend([ '%s *msg_new = m->add_%s();' % ( cpp_class(type_name), field.lower() ), # since the message is allocated out of the containing message, Lua # does not need to do GC 'lua_protobuf%s_pushreference(L, msg_new, NULL, NULL);' % type_name.replace('.', '_'), 'return 1;', ]) return lines def field_get(package, message, field_descriptor): '''Returns function definition for a get_<field> function''' name = field_descriptor.name type = field_descriptor.type type_name = field_descriptor.type_name label = field_descriptor.label repeated = label == FieldDescriptor.LABEL_REPEATED lines = [] lines.extend(field_function_start(package, message, 'get', name)) lines.extend(obtain_message_from_udata(package, message)) # the logic is significantly different depending on if the field is # singular or repeated. # for repeated, we have an argument which points to the numeric index to # retrieve. in true Lua convention, we index starting from 1, which is # different from protocol buffers, which indexes from 0 if repeated: lines.extend([ 'if (lua_gettop(L) != 2) {', 'return luaL_error(L, "missing required numeric argument");', '}', 'lua_Integer index = luaL_checkinteger(L, 2);', 'if (index < 1 || index > m->%s_size()) {' % name.lower(), # TODO is returning nil the more Lua way? 'return luaL_error(L, "index must be between 1 and current size: %%d", m->%s_size());' % name.lower(), '}', ]) # TODO float and double types are not equivalent. don't treat them as such # TODO figure out how to support 64 bit integers properly if repeated: if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]: lines.extend([ 'string s = m->%s(index - 1);' % name.lower(), 'lua_pushlstring(L, s.c_str(), s.size());', ]) elif type == FieldDescriptor.TYPE_BOOL: lines.append('lua_pushboolean(L, m->%s(index-1));' % name.lower()) elif type in [FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_FIXED32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32]: lines.append('lua_pushinteger(L, m->%s(index-1));' % name.lower()) elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64, FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]: lines.append('lua_pushinteger(L, m->%s(index-1));' % name.lower()) elif type == FieldDescriptor.TYPE_FLOAT or type == FieldDescriptor.TYPE_DOUBLE: lines.append('lua_pushnumber(L, m->%s(index-1));' % name.lower()) elif type == FieldDescriptor.TYPE_ENUM: lines.append('lua_pushnumber(L, m->%s(index-1));' % name.lower()) elif type == FieldDescriptor.TYPE_MESSAGE: lines.extend([ '%s * got_msg = m->mutable_%s(index-1);' % ( type_name.replace('.', '::'), name.lower() ), 'lua_protobuf%s_pushreference(L, got_msg, NULL, NULL);' % type_name.replace('.', '_'), ]) else: lines.append('return luaL_error(L, "lua-protobuf does not support this field type");') else: # for scalar fields, we push nil if the value is not defined # this is the Lua way if type == FieldDescriptor.TYPE_STRING or type == FieldDescriptor.TYPE_BYTES: lines.append('string s = m->%s();' % name.lower()) lines.append('if (m->has_%s()) lua_pushlstring(L, s.c_str(), s.size()); else lua_pushnil(L);' % name.lower()) elif type == FieldDescriptor.TYPE_BOOL: lines.append('if (m->has_%s()) lua_pushboolean(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() )) elif type in [FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_FIXED32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32]: lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() )) elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64, FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]: lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() )) elif type == FieldDescriptor.TYPE_FLOAT or type == FieldDescriptor.TYPE_DOUBLE: lines.append('if (m->has_%s()) lua_pushnumber(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() )) elif type == FieldDescriptor.TYPE_ENUM: lines.append('if (m->has_%s()) lua_pushinteger(L, m->%s()); else lua_pushnil(L);' % ( name.lower(), name.lower() )) elif type == FieldDescriptor.TYPE_MESSAGE: lines.extend([ 'if (!m->has_%s()) {' % name.lower(), 'lua_pushnil(L);', '}', # we push the message as userdata # since the message is allocated out of the parent message, we # don't need to do garbage collection '%s * got_msg = m->mutable_%s();' % ( type_name.replace('.', '::'), name.lower() ), 'lua_protobuf%s_pushreference(L, got_msg, NULL, NULL);' % type_name.replace('.', '_'), ]) else: # not supported yet :( lines.append('return luaL_error(L, "lua-protobuf does not support this field type");') lines.append('return 1;') lines.append('}\n') return lines def field_set_assignment(field, args): return [ 'if (index == current_size + 1) {', 'm->add_%s(%s);' % ( field.lower(), args ), '}', 'else {', 'm->set_%s(index-1, %s);' % ( field.lower(), args ), '}', ] def field_set(package, message, field_descriptor): '''Returns function definition for a set_<field> function''' name = field_descriptor.name type = field_descriptor.type type_name = field_descriptor.type_name label = field_descriptor.label repeated = label == FieldDescriptor.LABEL_REPEATED lines = [] lines.extend(field_function_start(package, message, 'set', name.lower())) lines.extend(obtain_message_from_udata(package, message, 1)) # we do things differently depending on if this is a singular or repeated field # for singular fields, the new value is the first argument # for repeated fields, the index is arg1 and the value is arg2 if repeated: lines.extend([ 'if (lua_gettop(L) != 3) {', ' return luaL_error(L, "required 2 arguments not passed to function");', '}', 'lua_Integer index = luaL_checkinteger(L, 2);', 'int current_size = m->%s_size();' % name.lower(), 'if (index < 1 || index > current_size + 1) {', 'return luaL_error(L, "index must be between 1 and %d", current_size + 1);', '}', # we don't support the automagic nil clears value... yet 'if (lua_isnil(L, 3)) {', 'return luaL_error(L, "cannot assign nil to repeated fields (yet)");', '}', ]) # TODO proper 64 bit handling # now move on to the assignment if repeated: if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]: lines.extend([ 'size_t length = 0;', 'const char *s = luaL_checklstring(L, 3, &length);', ]) lines.extend(field_set_assignment(name, 's, length')) elif type == FieldDescriptor.TYPE_BOOL: lines.append('bool b = !!lua_toboolean(L, 3);') lines.extend(field_set_assignment(name, 'b')) elif type in [ FieldDescriptor.TYPE_DOUBLE, FieldDescriptor.TYPE_FLOAT ]: lines.append('double d = lua_tonumber(L, 3);') lines.extend(field_set_assignment(name, 'd')) elif type in [ FieldDescriptor.TYPE_INT32, FieldDescriptor.TYPE_FIXED32, FieldDescriptor.TYPE_UINT32, FieldDescriptor.TYPE_SFIXED32, FieldDescriptor.TYPE_SINT32 ]: lines.append('lua_Integer i = lua_tointeger(L, 3);') lines.extend(field_set_assignment(name, 'i')) elif type in [ FieldDescriptor.TYPE_INT64, FieldDescriptor.TYPE_UINT64, FieldDescriptor.TYPE_FIXED64, FieldDescriptor.TYPE_SFIXED64, FieldDescriptor.TYPE_SINT64]: lines.append('lua_Integer i = lua_tointeger(L, 3);') lines.extend(field_set_assignment(name, 'i')) elif type == FieldDescriptor.TYPE_ENUM: lines.append('lua_Integer i = lua_tointeger(L, 3);') lines.extend(field_set_assignment(name, '(%s)i' % type_name.replace('.', '::'))) elif type == FieldDescriptor.TYPE_MESSAGE: lines.append('return luaL_error(L, "to manipulate embedded messages, fetch the embedded message and modify it");') else: lines.append('return luaL_error(L, "field type not yet supported");') lines.append('return 0;') else: # if they call set() with nil, we interpret as a clear # this is the Lua way, after all lines.extend([ 'if (lua_isnil(L, 2)) {', 'm->clear_%s();' % name.lower(), 'return 0;', '}', '', ]) if type in [ FieldDescriptor.TYPE_STRING, FieldDescriptor.TYPE_BYTES ]: lines.extend([ 'if (!lua_isstring(L, 2)) return luaL_error(L, "passed value is not a string");', 'size_t len;', 'const char *s = lua_tolstring(L, 2, &len);', 'if (!s) {', 'luaL_error(L, "could not obtain string on stack. weird");', '}', 'm->set_%s(s, len);'
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: <NAME> <<EMAIL>> # ''' Non-relativistic NMR shielding tensor ''' import time from functools import reduce import numpy from pyscf import lib from pyscf.lib import logger from pyscf.scf import _vhf from pyscf.scf import cphf from pyscf.soscf.newton_ah import _gen_rhf_response from pyscf.data import nist # flatten([[XX, XY, XZ], # [YX, YY, YZ], # [ZX, ZY, ZZ]]) TENSOR_IDX = numpy.arange(9) def dia(mol, dm0, gauge_orig=None, shielding_nuc=None): '''Note the side effects of set_common_origin''' if shielding_nuc is None: shielding_nuc = range(mol.natm) if gauge_orig is not None: mol.set_common_origin(gauge_orig) msc_dia = [] for n, atm_id in enumerate(shielding_nuc): mol.set_rinv_origin(mol.atom_coord(atm_id)) # a11part = (B dot) -1/2 frac{\vec{r}_N}{r_N^3} r (dot mu) if gauge_orig is None: h11 = mol.intor('int1e_giao_a11part', 9) else: h11 = mol.intor('int1e_cg_a11part', 9) trh11 = -(h11[0] + h11[4] + h11[8]) h11[0] += trh11 h11[4] += trh11 h11[8] += trh11 if gauge_orig is None: h11 += mol.intor('int1e_a01gp', 9) a11 = numpy.einsum('xij,ij->x', h11, dm0) msc_dia.append(a11) # XX, XY, XZ, YX, YY, YZ, ZX, ZY, ZZ = 1..9 # => [[XX, XY, XZ], [YX, YY, YZ], [ZX, ZY, ZZ]] return numpy.array(msc_dia).reshape(-1, 3, 3) # Note mo10 is the imaginary part of MO^1 def para(mol, mo10, mo_coeff, mo_occ, shielding_nuc=None): if shielding_nuc is None: shielding_nuc = range(mol.natm) para_vir = numpy.empty((len(shielding_nuc),3,3)) para_occ = numpy.empty((len(shielding_nuc),3,3)) occidx = mo_occ > 0 viridx = mo_occ == 0 orbo = mo_coeff[:,occidx] orbv = mo_coeff[:,viridx] # *2 for doubly occupied orbitals dm10_oo = numpy.asarray([reduce(numpy.dot, (orbo, x[occidx]*2, orbo.T.conj())) for x in mo10]) dm10_vo = numpy.asarray([reduce(numpy.dot, (orbv, x[viridx]*2, orbo.T.conj())) for x in mo10]) for n, atm_id in enumerate(shielding_nuc): mol.set_rinv_origin(mol.atom_coord(atm_id)) # H^{01} = 1/2(A01 dot p + p dot A01) => (a01p + c.c.)/2 ~ <a01p> # Im[A01 dot p] = Im[vec{r}/r^3 x vec{p}] = Im[-i p (1/r) x p] = -p (1/r) x p h01i = mol.intor_asymmetric('int1e_prinvxp', 3) # = -Im[H^{01}] # <H^{01},MO^1> = - Tr(Im[H^{01}],Im[MO^1]) = Tr(-Im[H^{01}],Im[MO^1]) para_occ[n] = numpy.einsum('xji,yij->xy', dm10_oo, h01i) * 2 # *2 for + c.c. para_vir[n] = numpy.einsum('xji,yij->xy', dm10_vo, h01i) * 2 # *2 for + c.c. msc_para = para_occ + para_vir return msc_para, para_vir, para_occ def make_h10(mol, dm0, gauge_orig=None, verbose=logger.WARN): '''Imaginary part of H10 operator Note the side effects of set_common_origin ''' if isinstance(verbose, logger.Logger): log = verbose else: log = logger.Logger(mol.stdout, verbose) if gauge_orig is None: # A10_i dot p + p dot A10_i consistents with <p^2 g> # A10_j dot p + p dot A10_j consistents with <g p^2> # 1/2(A10_j dot p + p dot A10_j) => Im[1/4 (rjxp - pxrj)] = -1/2 <irjxp> log.debug('First-order GIAO Fock matrix') h1 = -.5 * mol.intor('int1e_giao_irjxp', 3) + make_h10giao(mol, dm0) else: mol.set_common_origin(gauge_orig) h1 = -.5 * mol.intor('int1e_cg_irxp', 3) return h1 def get_jk(mol, dm0): # J = Im[(i i|\mu g\nu) + (i gi|\mu \nu)] = -i (i i|\mu g\nu) # K = Im[(\mu gi|i \nu) + (\mu i|i g\nu)] # = [-i (\mu g i|i \nu)] - h.c. (-h.c. for anti-symm because of the factor -i) intor = mol._add_suffix('int2e_ig1') vj, vk = _vhf.direct_mapdm(intor, # (g i,j|k,l) 'a4ij', ('lk->s1ij', 'jk->s1il'), -dm0, 3, # xyz, 3 components mol._atm, mol._bas, mol._env) vk = vk - numpy.swapaxes(vk, -1, -2) return vj, vk def make_h10giao(mol, dm0): vj, vk = get_jk(mol, dm0) h1 = vj - .5 * vk # Im[<g\mu|H|g\nu>] = -i * (gnuc + gkin) h1 -= mol.intor_asymmetric('int1e_ignuc', 3) if mol.has_ecp(): h1 -= mol.intor_asymmetric('ECPscalar_ignuc', 3) h1 -= mol.intor('int1e_igkin', 3) return h1 def make_s10(mol, gauge_orig=None): if gauge_orig is None: # Im[<g\mu |g\nu>] s1 = -mol.intor_asymmetric('int1e_igovlp', 3) else: nao = mol.nao_nr() s1 = numpy.zeros((3,nao,nao)) return s1 def solve_mo1(mo_energy, mo_occ, h1, s1): '''uncoupled first order equation''' e_a = mo_energy[mo_occ==0] e_i = mo_energy[mo_occ>0] e_ai = 1 / (e_a.reshape(-1,1) - e_i) hs = h1 - s1 * e_i mo10 = numpy.empty_like(hs) mo10[:,mo_occ==0,:] = -hs[:,mo_occ==0,:] * e_ai mo10[:,mo_occ>0,:] = -s1[:,mo_occ>0,:] * .5 e_ji = e_i.reshape(-1,1) - e_i mo_e10 = hs[:,mo_occ>0,:] + mo10[:,mo_occ>0,:] * e_ji return mo10, mo_e10 class NMR(lib.StreamObject): def __init__(self, scf_method): self.mol = scf_method.mol self.verbose = scf_method.mol.verbose self.stdout = scf_method.mol.stdout self.chkfile = scf_method.chkfile self._scf = scf_method self.shielding_nuc = range(self.mol.natm) # gauge_orig=None will call GIAO. Specify coordinate for common gauge self.gauge_orig = None self.cphf = True self.max_cycle_cphf = 20 self.conv_tol = 1e-9 self.mo10 = None self.mo_e10 = None self._keys = set(self.__dict__.keys()) def dump_flags(self): log = logger.Logger(self.stdout, self.verbose) log.info('\n') log.info('******** %s for %s ********', self.__class__, self._scf.__class__) if self.gauge_orig is None: log.info('gauge = GIAO') else: log.info('Common gauge = %s', str(self.gauge_orig)) log.info('shielding for atoms %s', str(self.shielding_nuc)) if self.cphf: log.info('Solving MO10 eq with CPHF.') log.info('CPHF conv_tol = %g', self.conv_tol) log.info('CPHF max_cycle_cphf = %d', self.max_cycle_cphf) if not self._scf.converged: log.warn('Ground state SCF is not converged') return self def kernel(self, mo1=None): return self.shielding(mo1) def shielding(self, mo1=None): cput0 = (time.clock(), time.time()) self.check_sanity() self.dump_flags() unit_ppm = nist.ALPHA**2 * 1e6 msc_dia = self.dia() * unit_ppm msc_para, para_vir, para_occ = self.para(mo10=mo1) msc_para *= unit_ppm para_vir *= unit_ppm para_occ *= unit_ppm e11 = msc_para + msc_dia logger.timer(self, 'NMR shielding', *cput0) if self.verbose > logger.QUIET: for i, atm_id in enumerate(self.shielding_nuc): _write(self.stdout, e11[i], '\ntotal shielding of atom %d %s' \ % (atm_id, self.mol.atom_symbol(atm_id))) _write(self.stdout, msc_dia[i], 'dia-magnetism') _write(self.stdout, msc_para[i], 'para-magnetism') if self.verbose >= logger.INFO: _write(self.stdout, para_occ[i], 'occ part of para-magnetism') _write(self.stdout, para_vir[i], 'vir part of para-magnetism') return e11 def dia(self, mol=None, dm0=None, gauge_orig=None, shielding_nuc=None): if mol is None: mol = self.mol if gauge_orig is None: gauge_orig = self.gauge_orig if shielding_nuc is None: shielding_nuc = self.shielding_nuc if dm0 is None: dm0 = self._scf.make_rdm1() return dia(mol, dm0, gauge_orig, shielding_nuc) def para(self, mol=None, mo10=None, mo_coeff=None, mo_occ=None, shielding_nuc=None): if mol is None: mol = self.mol if mo_coeff is None: mo_coeff = self._scf.mo_coeff if mo_occ is None: mo_occ = self._scf.mo_occ if shielding_nuc is None: shielding_nuc = self.shielding_nuc if mo10 is None: self.mo10, self.mo_e10 = self.solve_mo1() mo10 = self.mo10 return para(mol, mo10, mo_coeff, mo_occ, shielding_nuc) def make_h10(self, mol=None, dm0=None, gauge_orig=None): if mol is None: mol = self.mol if dm0 is None: dm0 = self._scf.make_rdm1() if gauge_orig is None: gauge_orig = self.gauge_orig log = logger.Logger(self.stdout, self.verbose) h1 = make_h10(mol, dm0, gauge_orig, log) if self.chkfile: lib.chkfile.dump(self.chkfile, 'nmr/h1', h1) return h1 def make_s10(self, mol=None, gauge_orig=None): if mol is None: mol = self.mol if gauge_orig is None: gauge_orig = self.gauge_orig return make_s10(mol, gauge_orig) def solve_mo1(self, mo_energy=None, mo_occ=None, h1=None, s1=None, with_cphf=None): cput1 = (time.clock(), time.time()) log = logger.Logger(self.stdout, self.verbose) if mo_energy is None: mo_energy = self._scf.mo_energy if mo_occ is None: mo_occ = self._scf.mo_occ if with_cphf is None: with_cphf = self.cphf mol = self.mol mo_coeff = self._scf.mo_coeff orbo = mo_coeff[:,mo_occ>0] if h1 is None: dm0 = self._scf.make_rdm1(mo_coeff, mo_occ) h1 = numpy.asarray([reduce(numpy.dot, (mo_coeff.T.conj(), x, orbo)) for x in self.make_h10(mol, dm0)]) if s1 is None: s1 = numpy.asarray([reduce(numpy.dot, (mo_coeff.T.conj(), x, orbo)) for x in self.make_s10(mol)]) cput1 = log.timer('first order Fock matrix', *cput1) if with_cphf: vind = self.gen_vind(self._scf, mo_coeff, mo_occ) mo10, mo_e10 = cphf.solve(vind, mo_energy, mo_occ, h1, s1, self.max_cycle_cphf, self.conv_tol, verbose=log) else: mo10, mo_e10 = solve_mo1(mo_energy, mo_occ, h1, s1) logger.timer(self, 'solving mo1 eqn', *cput1) return mo10, mo_e10 def gen_vind(self, mf, mo_coeff, mo_occ): '''Induced potential''' vresp = _gen_rhf_response(mf, hermi=2) occidx = mo_occ > 0 orbo = mo_coeff[:,occidx] nocc = orbo.shape[1] nao, nmo = mo_coeff.shape def vind(mo1): #direct_scf_bak, mf.direct_scf = mf.direct_scf, False dm1 = [reduce(numpy.dot, (mo_coeff, x*2, orbo.T.conj())) for x in mo1.reshape(3,nmo,nocc)] dm1 = numpy.asarray([d1-d1.conj().T for d1 in dm1]) v1mo = numpy.asarray([reduce(numpy.dot, (mo_coeff.T.conj(), x, orbo)) for x in vresp(dm1)]) #mf.direct_scf = direct_scf_bak return v1mo.ravel() return vind def _write(stdout, msc3x3, title): stdout.write('%s\n' % title) stdout.write('B_x %s\n' % str(msc3x3[0])) stdout.write('B_y %s\n' % str(msc3x3[1])) stdout.write('B_z %s\n' % str(msc3x3[2])) stdout.flush() if __name__ == '__main__': from pyscf import gto from pyscf import scf mol = gto.Mole() mol.verbose = 0 mol.output = None mol.atom.extend([ [1 , (0. , 0. , .917)], ['F' , (0. , 0. , 0.)], ]) mol.nucmod = {'F': 2}
import asyncio import dataclasses import datetime from typing import Optional from anyio import to_thread from sqlalchemy.ext.asyncio import AsyncSession from Backend.core.errors import CustomException from Backend.crud import crud_activities, destiny_manifest, discord_users from Backend.crud.destiny.collectibles import collectibles from Backend.crud.destiny.items import destiny_items from Backend.crud.destiny.records import records from Backend.database.models import ( Collectibles, DestinyPresentationNodeDefinition, DestinyRecordDefinition, DiscordUsers, Records, ) from Backend.misc.cache import cache from Backend.misc.helperFunctions import get_datetime_from_bungie_entry from Backend.networking.bungieApi import BungieApi from Backend.networking.bungieRoutes import clan_user_route, profile_route, stat_route from Shared.enums.destiny import DestinyInventoryBucketEnum, DestinyPresentationNodeWeaponSlotEnum from Shared.functions.formatting import make_progress_bar_text from Shared.functions.helperFunctions import get_now_with_tz from Shared.networkingSchemas import ValueModel from Shared.networkingSchemas.destiny import ( BoolModelObjective, BoolModelRecord, DestinyCatalystModel, DestinyCatalystsModel, DestinyCharacterModel, DestinyCharactersModel, DestinyRecordModel, DestinySealModel, DestinySealsModel, DestinyTriumphScoreModel, SeasonalChallengesModel, SeasonalChallengesRecordModel, SeasonalChallengesTopicsModel, ) from Shared.networkingSchemas.destiny.clan import DestinyClanModel @dataclasses.dataclass class DestinyProfile: """User specific API calls""" db: AsyncSession user: DiscordUsers race_map = {2803282938: "Awoken", 898834093: "Exo", 3887404748: "Human"} gender_map = { 2204441813: "Female", 3111576190: "Male", } class_map = {671679327: "Hunter", 2271682572: "Warlock", 3655393761: "Titan"} _triumphs: dict = dataclasses.field(init=False, default_factory=dict) def __post_init__(self): # some shortcuts self.discord_id = self.user.discord_id self.destiny_id = self.user.destiny_id self.system = self.user.system # the network class self.api = BungieApi(db=self.db, user=self.user) async def get_clan(self) -> DestinyClanModel: """Return the user's clan""" response = await self.api.get(route=clan_user_route.format(destiny_id=self.destiny_id, system=self.system)) results = response.content["results"] if not results: raise CustomException("UserNoClan") return DestinyClanModel(id=results[0]["group"]["groupId"], name=results[0]["group"]["name"]) async def get_seal_completion(self) -> DestinySealsModel: """Gets all seals and the users completion status""" # get the seals seals = await destiny_items.get_seals(db=self.db) # loop through the seals and format the data result = DestinySealsModel() for seal, triumphs in seals.items(): # get user completion user_guilded_completed = [] user_guilded_completed_int = 0 user_completed = [] user_completed_int = 0 for triumph in triumphs: user_data = await self.has_triumph(triumph.reference_id) model = DestinyRecordModel( name=triumph.name, description=triumph.description, completed=user_data.bool, ) # handle guilded triumphs differently if triumph.for_title_gilding: user_guilded_completed.append(model) if user_data.bool: user_guilded_completed_int += 1 else: user_completed.append(model) if user_data.bool: user_completed_int += 1 # normal triumph data completion_percentage = user_completed_int / len(user_completed) data = DestinySealModel( name=seal.name, description=seal.description, completed=True if completion_percentage == 1 else False, completion_percentage=completion_percentage, completion_status=make_progress_bar_text(completion_percentage), records=user_completed, ) # add it to the correct type if data.completed: result.completed.append(data) else: result.not_completed.append(data) # guilded triumph data if user_guilded_completed: completion_percentage = user_guilded_completed_int / len(user_guilded_completed) data = DestinySealModel( name=seal.name, description=seal.description, completed=True if completion_percentage == 1 else False, completion_percentage=completion_percentage, completion_status=make_progress_bar_text(completion_percentage), records=user_guilded_completed, ) # add it to the correct type if data.completed: result.guilded.append(data) else: result.not_guilded.append(data) return result async def get_catalyst_completion(self) -> DestinyCatalystsModel: """Gets all catalysts and the users completion status""" catalysts = await destiny_items.get_catalysts(db=self.db) triumphs = await self.get_triumphs() # check their completion result = DestinyCatalystsModel() for catalyst in catalysts: # get the completion rate if await self.has_triumph(catalyst.reference_id): completion_percentage = 1 else: user_data = triumphs[str(catalyst.reference_id)] if user_data["objectives"] and user_data["objectives"][0]["completionValue"]: i = 0 percentages = [] for part in user_data["objectives"]: i += 1 percentages.append( part["progress"] / part["completionValue"] if part["completionValue"] != 0 else 0 ) completion_percentage = sum(percentages) / len(percentages) else: completion_percentage = 0 model = DestinyCatalystModel( name=catalyst.name, complete=completion_percentage == 1, completion_percentage=completion_percentage, completion_status=make_progress_bar_text(completion_percentage), ) # get the slot and sort them if DestinyPresentationNodeWeaponSlotEnum.KINETIC.value in catalyst.parent_node_hashes: result.kinetic.append(model) elif DestinyPresentationNodeWeaponSlotEnum.ENERGY.value in catalyst.parent_node_hashes: result.energy.append(model) elif DestinyPresentationNodeWeaponSlotEnum.POWER.value in catalyst.parent_node_hashes: result.power.append(model) # add to the total if model.complete: result.completed += 1 return result async def get_used_vault_space(self) -> int: """Gets the current used vault space of the user""" buckets = await self.__get_inventory_bucket(DestinyInventoryBucketEnum.VAULT) return len(buckets[DestinyInventoryBucketEnum.VAULT]) async def get_bright_dust(self) -> int: """Gets the current bright dust of the user""" return await self.__get_currency_amount(bucket=DestinyInventoryBucketEnum.BRIGHT_DUST) async def get_legendary_shards(self) -> int: """Gets the current legendary shards of the user""" return await self.__get_currency_amount(bucket=DestinyInventoryBucketEnum.SHARDS) async def get_consumable_amount(self, consumable_id: int) -> int: """Returns the amount of a consumable this user has""" buckets = await self.__get_inventory_bucket( DestinyInventoryBucketEnum.VAULT, DestinyInventoryBucketEnum.CONSUMABLES ) # get the value value = 0 for bucket in buckets.values(): if consumable_id in bucket: value += bucket[consumable_id]["quantity"] return value async def get_max_power(self) -> float: """Returns the max power of the user""" char_data = await self.__get_all_inventory_bucket(include_item_level=True) # look at each character max_power = await to_thread.run_sync(get_max_power_subprocess, char_data) return max_power async def get_last_online(self) -> datetime.datetime: """Returns the last online time""" result = await self.__get_profile() return get_datetime_from_bungie_entry(result["profile"]["data"]["dateLastPlayed"]) async def get_triumph_score(self) -> DestinyTriumphScoreModel: """Returns the triumph score""" triumphs_data = await self.get_triumphs() return DestinyTriumphScoreModel( active_score=triumphs_data["active_score"], legacy_score=triumphs_data["legacy_score"], lifetime_score=triumphs_data["lifetime_score"], ) async def has_triumph(self, triumph_hash: str | int) -> BoolModelRecord: """Returns if the triumph is gotten""" triumph_hash = int(triumph_hash) # check cache async with asyncio.Lock(): if self.destiny_id not in cache.triumphs: cache.triumphs.update({self.destiny_id: {}}) if triumph_hash not in cache.triumphs[self.destiny_id]: # check if the last update is older than 10 minutes if self._triumphs and ( self.user.triumphs_last_updated + datetime.timedelta(minutes=10) > get_now_with_tz() ): sought_triumph = self._triumphs[str(triumph_hash)] else: # get from db and return that if it says user got the triumph result = await records.has_record(db=self.db, destiny_id=self.destiny_id, triumph_hash=triumph_hash) if result: # only caching already got triumphs cache.triumphs[self.destiny_id].update({triumph_hash: True}) return BoolModelRecord(bool=True) # alright, the user doesn't have the triumph, at least not in the db. So let's update the db entries triumphs_data = await self.get_triumphs() to_insert = [] sought_triumph = {} # loop through all triumphs and add them / update them in the db for triumph_id, triumph_info in triumphs_data.items(): try: triumph_id = int(triumph_id) except ValueError: # this is the "active_score", ... fields continue if triumph_id in cache.triumphs[self.destiny_id]: continue # does the entry exist in the db? # we don't need to re calc the state if its already marked as earned in the db result = await records.get_record( db=self.db, destiny_id=self.destiny_id, triumph_hash=triumph_id ) if result and result.completed: cache.triumphs[self.destiny_id].update({triumph_id: True}) continue # calculate if the triumph is gotten and save the triumph we are looking for status = True if "objectives" not in triumph_info: # make sure it's RewardUnavailable aka legacy assert triumph_info["state"] & 2 # https://bungie-net.github.io/multi/schema_Destiny-DestinyRecordState.html#schema_Destiny-DestinyRecordState status &= triumph_info["state"] & 1 else: for part in triumph_info["objectives"]: status &= part["complete"] # is this the triumph we are looking for? if triumph_id == triumph_hash: sought_triumph = triumph_info # don't really need to insert not-gained triumphs if status: cache.triumphs[self.destiny_id].update({triumph_id: True}) # do we need to update or insert? if not result: # insert to_insert.append( Records(destiny_id=self.destiny_id, record_id=triumph_id, completed=True) ) else: # update await records.update_record(db=self.db, obj=result, completed=True) # mass insert the missing entries if to_insert: await records.insert_records(db=self.db, objs=to_insert) # save the update time await discord_users.update(db=self.db, to_update=self.user, triumphs_last_updated=get_now_with_tz()) # now check again if its completed if triumph_hash in cache.triumphs[self.destiny_id]: return BoolModelRecord(bool=True) # if not, return the data with the objectives info result = BoolModelRecord(bool=False) if "objectives" in sought_triumph: for part in sought_triumph["objectives"]: result.objectives.append(BoolModelObjective(objective_id=part["objectiveHash"], bool=part["complete"])) return result async def has_collectible(self, collectible_hash: str | int) -> bool: """Returns if the collectible is gotten""" collectible_hash = int(collectible_hash) # check cache async with asyncio.Lock(): if self.destiny_id not in cache.collectibles: cache.collectibles.update({self.destiny_id: {}}) if collectible_hash not in cache.collectibles[self.destiny_id]: # check if the last update is older than 10 minutes if self.user.collectibles_last_updated + datetime.timedelta(minutes=10) > get_now_with_tz(): return False # get from db and return that if it says user got the collectible result = await collectibles.has_collectible( db=self.db, destiny_id=self.destiny_id, collectible_hash=collectible_hash ) if result: # only caching already got collectibles cache.collectibles[self.destiny_id].update({collectible_hash: True}) return True # as with the triumphs, we need to update our local collectible data now collectibles_data = await self.get_collectibles() to_insert = [] # loop through the collectibles for collectible_id, collectible_info in collectibles_data.items(): collectible_id = int(collectible_id) if collectible_id in cache.collectibles[self.destiny_id]: continue # does the entry exist in the db? # we don't need to re calc the state if its already marked as owned in the db result = await collectibles.get_collectible( db=self.db, destiny_id=self.destiny_id, collectible_hash=collectible_id ) if result and result.owned: cache.collectibles[self.destiny_id].update({collectible_id: True}) continue # bit 1 not being set means the collectible is gotten # see https://bungie-net.github.io/multi/schema_Destiny-DestinyCollectibleState.html#schema_Destiny-DestinyCollectibleState status = collectible_info["state"] & 1 == 0 # don't really need to insert not-owned collectibles if status: cache.collectibles[self.destiny_id].update({collectible_id: True}) # do we need to update or insert? if not result: # insert to_insert.append( Collectibles(destiny_id=self.destiny_id, collectible_id=collectible_id, owned=True) ) else: # update await collectibles.update_collectible(db=self.db, obj=result, owned=True) # mass insert the missing entries if to_insert: await collectibles.insert_collectibles(db=self.db, objs=to_insert) # save the update time await discord_users.update(db=self.db, to_update=self.user, collectibles_last_updated=get_now_with_tz()) # now check again if its owned if collectible_hash in cache.collectibles[self.destiny_id]: return True else: return False async def get_metric_value(self, metric_hash: str | int) -> int: """Returns the value of the given metric hash""" metric_hash = str(metric_hash) metrics = await self.get_metrics() try: return metrics[metric_hash]["objectiveProgress"]["progress"] except KeyError: raise CustomException("BungieDestinyItemNotExist") async def
<gh_stars>0 # -*- coding: utf-8 -*- """ Created on Mon Oct 14 15:48:42 2019 @author: jpass """ import pandas as pd import uuid import os import logging from time import strftime, localtime #logging.getLogger().setLevel(logging.DEBUG) logging.getLogger().setLevel(logging.INFO) md_datedef = strftime("%Y-%m-%d", localtime()) logging.debug(str(md_datedef)) ''' row['Asset Identifier (Asset ID)'] is blank in exemplar spreadsheet template and is used synonymously with metadata identifer. However, it could be used to give an idenifier to the asset, which is separate to the metadata identifier (UUID). In the XML there is a location for an id for the dataset, which doesn't allow a UUID, so in the code we will generate a dataset id based on some prefix and row number... ''' idprefix = 'IAR_KEN_' assetIDisPopulated = False sampleNumber = 1461 standardName = 'National Geodata Centre for Kenya Schema' standardVersion = '1.0' md_poc_org = 'Ministry of Petroleum and Mining (National Geodata Centre for Kenya)' md_poc_email = '<EMAIL>' md_poc_indiv = 'enquiries' showIndividual = True useListedIndiv = False write_to = "X:\\md\\kenxml" #write_to = "X:\\md\\xmlout" if not os.path.exists(write_to): os.makedirs(write_to) os.chdir(write_to) #diart2 = 'X:\md\BGS_Data&Info_AssetRegister_Template_v2.xlsx' #diart2 = 'X:\md\BGS_Data&Info_AssetRegister_Template_v2_Kenya.xlsx' diart2 = 'X:\md\BGS_Data&Info_AssetRegister_Template_v2_Kenya_20191114.xlsx' # assets is the dataframe #assets = pd.read_excel(diart2, sheet_name='Data&Info Asset Register', header=2, dtype=str) assets = pd.read_excel(diart2, sheet_name='Data&Info Asset Register', header=2) # ref https://stackoverflow.com/questions/45148292/python-pandas-read-excel-dtype-str-replace-nan-by-blank-when-reading-or-whe # Replace NaN values in spreadsheet with empty string, otherwise get 'nan' literal in output assets = assets.fillna('') headings = assets.columns logging.debug("Column headings" + str(headings)) data_types = assets.dtypes logging.debug("data types" + str(data_types)) headOutput = assets.head() logging.debug("head()" + str(headOutput)) # First two rows are examples and we should remove them from the dataframe # (or possibly the spreadsheet) #assets.drop(assets.index[[0,1]]) # Make a copy of the data frame dropping first two rows actual_assets = assets.drop(assets.index[[0,1]]).copy() gmd_start = '<?xml version="1.0" encoding="UTF-8"?>\n<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd" xmlns:gco="http://www.isotc211.org/2005/gco" xmlns:gml="http://www.opengis.net/gml/3.2" xmlns:gmx="http://www.isotc211.org/2005/gmx" xmlns:gsr="http://www.isotc211.org/2005/gsr" xmlns:gss="http://www.isotc211.org/2005/gss" xmlns:gts="http://www.isotc211.org/2005/gts" xmlns:srv="http://www.isotc211.org/2005/srv" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:geonet="http://www.fao.org/geonetwork" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\nxsi:schemaLocation="http://www.isotc211.org/2005/gmd http://inspire.ec.europa.eu/draft-schemas/inspire-md-schemas/apiso-inspire/apiso-inspire.xsd">\n' gmd_end = '</gmd:MD_Metadata>' elemAtt_close = '">' empty_elemAtt_close = '"/>' # We'll need to insert a UUID as the metadata record unique identifier file_id_start = '<gmd:fileIdentifier>\n<gco:CharacterString>' file_id_end = '</gco:CharacterString>\n</gmd:fileIdentifier>\n' # Language of the metadata should always be English (for us) md_lang = '<gmd:language>\n<gmd:LanguageCode codeList="ML_gmxCodelists.xml#LanguageCode" codeListValue="eng">English</gmd:LanguageCode>\n</gmd:language>\n' # Below assumes everything is a dataset (not service, series, nongeographic...) md_hlevelds = '<gmd:hierarchyLevel>\n<gmd:MD_ScopeCode codeList="gmxCodelists.xml#MD_ScopeCode" codeListValue="dataset">dataset</gmd:MD_ScopeCode>\n</gmd:hierarchyLevel>\n' md_hlevelsr = '<gmd:hierarchyLevel>\n<gmd:MD_ScopeCode codeList="gmxCodelists.xml#MD_ScopeCode" codeListValue="series">series</gmd:MD_ScopeCode>\n</gmd:hierarchyLevel>\n' md_hlevelng = '<gmd:hierarchyLevel>\n<gmd:MD_ScopeCode codeList="gmxCodelists.xml#MD_ScopeCode" codeListValue="nonGeographicDataset">information applies to non-geographic data</gmd:MD_ScopeCode>\n</gmd:hierarchyLevel>\n' # Just using dummy name and version below md_standard = '<gmd:metadataStandardName>\n<gco:CharacterString>' + standardName + '</gco:CharacterString>\n</gmd:metadataStandardName>\n' md_standard_version = '<gmd:metadataStandardVersion>\n<gco:CharacterString>' + standardVersion + '</gco:CharacterString>\n</gmd:metadataStandardVersion>\n' # We'll need to insert the actual contact details for the metadata md_contact_start = '<gmd:contact>\n<gmd:CI_ResponsibleParty>\n' md_pocInd_start = '<gmd:individualName>\n<gco:CharacterString>' md_pocInd_end = '</gco:CharacterString>\n</gmd:individualName>\n' md_pocOrg_start = '<gmd:organisationName>\n<gco:CharacterString>' md_pocOrg_end = '</gco:CharacterString>\n</gmd:organisationName>\n' md_conInfo_start = '<gmd:contactInfo>\n<gmd:CI_Contact>\n<gmd:address>\n<gmd:CI_Address>\n<gmd:electronicMailAddress>\n<gco:CharacterString>' md_conInfo_end = '</gco:CharacterString>\n</gmd:electronicMailAddress>\n</gmd:CI_Address>\n</gmd:address>\n</gmd:CI_Contact>\n</gmd:contactInfo>\n' md_contact_end = '<gmd:role>\n<gmd:CI_RoleCode codeList="gmxCodelists.xml#CI_RoleCode" codeListValue="pointOfContact">pointOfContact</gmd:CI_RoleCode>\n</gmd:role>\n</gmd:CI_ResponsibleParty>\n</gmd:contact>\n' # Date when metadata was created / last published, it could be Now(), or it could reflect a date of asset registration (TBD) # If the former, then assset registration date should probably also be recorded somewhere (report) md_date_start = '<gmd:dateStamp>\n<gco:Date>' md_date_end = '</gco:Date>\n</gmd:dateStamp>\n' # Simple variant of wrapper for Coordinate reference system used by the dataset/asset refsys_start = '<gmd:referenceSystemInfo>\n<gmd:MD_ReferenceSystem>\n<gmd:referenceSystemIdentifier>\n<gmd:RS_Identifier>\n<gmd:code>\n<gco:CharacterString>' refsys_end = '</gco:CharacterString>\n</gmd:code>\n</gmd:RS_Identifier>\n</gmd:referenceSystemIdentifier>\n</gmd:MD_ReferenceSystem>\n</gmd:referenceSystemInfo>\n' title_start = '<gmd:title>\n<gco:CharacterString>' title_end = '</gco:CharacterString>\n</gmd:title>\n' abstract_start = '<gmd:abstract>\n<gco:CharacterString>' abstract_end = '</gco:CharacterString>\n</gmd:abstract>\n' # There are many variants for keywords, listing individually or grouped, +/- relating to thesauri ds_keyword_part1_start = '<gmd:descriptiveKeywords>\n<gmd:MD_Keywords>\n' ds_keyword_part1_end = '</gmd:MD_Keywords>\n</gmd:descriptiveKeywords>\n' ds_keyword_part2_start = '<gmd:keyword>\n<gco:CharacterString>' ds_keyword_part2_end = '</gco:CharacterString>\n</gmd:keyword>\n' no_mineral_kw = '<gmd:keyword>\n<gco:CharacterString>No Mineral commodity/Keywords supplied</gco:CharacterString>\n</gmd:keyword>\n' ds_maintenance_start = '<gmd:resourceMaintenance>\n<gmd:MD_MaintenanceInformation>\n' ds_maintenance_end = '</gmd:MD_MaintenanceInformation>\n</gmd:resourceMaintenance>\n' ds_updateFreq_start = '<gmd:maintenanceAndUpdateFrequency>\n<gmd:MD_MaintenanceFrequencyCode codeList="gmxCodelists.xml#MD_MaintenanceFrequencyCode" codeListValue="' ds_updateFreq_end = '</gmd:MD_MaintenanceFrequencyCode>\n</gmd:maintenanceAndUpdateFrequency>\n' ds_identInfo_start = '<gmd:identificationInfo>\n' ds_ident_start = '<gmd:MD_DataIdentification id="' ds_identInfo_end = '</gmd:MD_DataIdentification>\n</gmd:identificationInfo>\n' missing_date_start = '<gmd:date>\n<gmd:CI_Date>\n<gmd:date gco:nilReason="missing" />\n' publication_type2 = '<gmd:dateType>\n<gmd:CI_DateTypeCode codeList="gmxCodelists.xml#CI_DateTypeCode" codeListValue="publication">Date of publication</gmd:CI_DateTypeCode>\n</gmd:dateType>' missing_date_end = '</gmd:CI_Date>\n</gmd:date>\n' ds_citation_start = '<gmd:citation>\n<gmd:CI_Citation>\n' ds_citation_end = '</gmd:CI_Citation>\n</gmd:citation>\n' ds_citation_date_start = '<gmd:date>\n<gmd:CI_Date>\n<gmd:date>\n<gco:Date>' ds_citation_date_mid = '</gco:Date>\n</gmd:date>\n<gmd:dateType>\n<gmd:CI_DateTypeCode codeList="gmxCodelists.xml#CI_DateTypeCode" codeListValue="' ds_citation_date_end = '</gmd:CI_DateTypeCode>\n</gmd:dateType>\n</gmd:CI_Date>\n</gmd:date>\n' publication_type = 'publication' + elemAtt_close + 'Date of publication' # Edition ds_citation_edit_start = '<gmd:edition>\n<gco:CharacterString>' ds_citation_edit_end = '</gco:CharacterString>\n</gmd:edition>\n' ds_citation_edit_notdef = '<gmd:edition gco:nilReason="unknown"/>\n' ds_citation_presform_start = '<gmd:presentationForm>\n<gmd:CI_PresentationFormCode codeList="gmxCodelists.xml#CI_PresentationFormCode" codeListValue="' ds_citation_presform_end = '</gmd:CI_PresentationFormCode>\n</gmd:presentationForm>\n' # Our scale spatialRes_start = '<gmd:spatialResolution>\n<gmd:MD_Resolution>\n<gmd:equivalentScale>\n<gmd:MD_RepresentativeFraction>\n<gmd:denominator>\n<gco:Integer>' spatialRes_end = '</gco:Integer>\n</gmd:denominator>\n</gmd:MD_RepresentativeFraction>\n</gmd:equivalentScale>\n</gmd:MD_Resolution>\n</gmd:spatialResolution>\n' spatialRes_notdefined = '<gmd:spatialResolution>\n<gmd:MD_Resolution>\n<gmd:equivalentScale gco:nilReason="unknown" />\n</gmd:MD_Resolution>\n</gmd:spatialResolution>\n' ds_lang_start = '<gmd:language>\n<gmd:LanguageCode codeList="http://www.loc.gov/standards/iso639-2/php/code_list.php" codeListValue="' ds_lang_end = '</gmd:LanguageCode>\n</gmd:language>\n' eng_lang = 'eng' + elemAtt_close + 'English' fre_lang = 'fre' + elemAtt_close + 'French' kin_lang = 'kin' + elemAtt_close + 'Kinyarwanda' swa_lang = 'swa' + elemAtt_close + 'Swahili' vie_lang = 'vie' + elemAtt_close + 'Vietnamese' giTopicCat_start = '<gmd:topicCategory>\n<gmd:MD_TopicCategoryCode>' giTopicCat_end = '</gmd:MD_TopicCategoryCode>\n</gmd:topicCategory>\n' ds_distribs_open = '<gmd:distributionInfo>\n<gmd:MD_Distribution>\n' ds_distribs_close = '</gmd:MD_Distribution>\n</gmd:distributionInfo>\n' md_format_start = '<gmd:distributionFormat>\n<gmd:MD_Format>\n<gmd:name>\n<gco:CharacterString>' md_format_end = '</gco:CharacterString>\n</gmd:name>\n<gmd:version gco:nilReason="unknown"/>\n</gmd:MD_Format>\n</gmd:distributionFormat>\n' transOpt_start = '<gmd:transferOptions>\n<gmd:MD_DigitalTransferOptions>\n' transOpt_end = '</gmd:MD_DigitalTransferOptions>\n</gmd:transferOptions>\n' onlineLink = '<gmd:onLine>\n<gmd:CI_OnlineResource>\n<gmd:linkage>\n<gmd:URL>' oLLnkName = '</gmd:URL>\n</gmd:linkage>\n<gmd:name>\n<gco:CharacterString>' oLLnkDesc = '</gco:CharacterString>\n</gmd:name>\n<gmd:description>\n<gco:CharacterString>' oLLnkFunc = '</gco:CharacterString>\n</gmd:description>\n<gmd:function>\n<gmd:CI_OnLineFunctionCode codeList="gmxCodelists.xml#CI_OnLineFunctionCode" codeListValue="download"/>\n</gmd:function>\n</gmd:CI_OnlineResource>\n</gmd:onLine>\n' ds_dqinfo_start = '<gmd:dataQualityInfo>\n<gmd:DQ_DataQuality>\n<gmd:scope gco:nilReason="unknown"/>\n' ds_dqinfo_end = '</gmd:DQ_DataQuality>\n</gmd:dataQualityInfo>\n' lineage_start = "<gmd:lineage>\n<gmd:LI_Lineage>\n<gmd:statement>\n<gco:CharacterString>" lineage_end = "</gco:CharacterString>\n</gmd:statement>\n</gmd:LI_Lineage>\n</gmd:lineage>\n" # Status ds_prog_start = '<gmd:status>\n<gmd:MD_ProgressCode codeList="gmxCodelists.xml#MD_ProgressCode" codeListValue="' ds_prog_end = '</gmd:status>\n' ds_prog_notdefined = '<gmd:status gco:nilReason="missing" />\n' # Series Name/Parent Title ''' Not sure where to map this to, there is a series section at the end of the document, but it really needs a fullish metadata description. Using instead parent identifier at top of document... ''' # Note ds_series XML is invalid as <gmd:composedOf> is element only, but in the code we have text ds_series_notdefined = '<gmd:series gco:nilReason="inapplicable"/>\n' ds_series_start = '<gmd:series>\n<gmd:DS_ProductionSeries>\n<gmd:composedOf>' ds_series_end = '</gmd:composedOf>\n<gmd:seriesMetadata gco:nilReason="unknown" />\n</gmd:DS_ProductionSeries>\n</gmd:series>\n' ds_parent_notdef = '<gmd:parentIdentifier gco:nilReason="inapplicable"/>\n' ds_parent_start = '<gmd:parentIdentifier>\n<gco:CharacterString>' ds_parent_end = '</gco:CharacterString>\n</gmd:parentIdentifier>\n' ds_collective_start = '<gmd:collectiveTitle>\n<gco:CharacterString>' ds_collective_end = '</gco:CharacterString>\n</gmd:collectiveTitle>\n' ds_collective_nodef = '<gmd:collectiveTitle gco:nilReason="inapplicable" />\n' ds_extent_notdef = '<gmd:extent gco:nilReason="unknown" />\n' ds_extent_start = '<gmd:extent>\n<gmd:EX_Extent>\n<gmd:description>\n<gco:CharacterString>' ds_descriptEnd = '</gco:CharacterString>\n</gmd:description>\n' ds_extent_end = '</gmd:EX_Extent>\n</gmd:extent>\n' ds_geogElem_start = '<gmd:geographicElement>\n<gmd:EX_GeographicBoundingBox>\n' ds_geogElem_end = '</gmd:EX_GeographicBoundingBox>\n</gmd:geographicElement>\n' westStart = '<gmd:westBoundLongitude>\n<gco:Decimal>' westEndEastStart = '</gco:Decimal>\n</gmd:westBoundLongitude>\n<gmd:eastBoundLongitude>\n<gco:Decimal>' eastEndSouthStart = '</gco:Decimal>\n</gmd:eastBoundLongitude>\n<gmd:southBoundLatitude>\n<gco:Decimal>' southEndNorthStart = '</gco:Decimal>\n</gmd:southBoundLatitude>\n<gmd:northBoundLatitude>\n<gco:Decimal>' northEnd = '</gco:Decimal>\n</gmd:northBoundLatitude>\n' ''' Temporal extent may be not populated, one time (instant), or a range (period) ''' ds_tempo_notdef = '<gmd:extent>\n<gmd:EX_Extent>\n<gmd:temporalElement gco:nilReason="unknown" />\n</gmd:EX_Extent>\n</gmd:extent>\n' ds_tempo_start = '<gmd:extent>\n<gmd:EX_Extent>\n<gmd:temporalElement>\n<gmd:EX_TemporalExtent>\n<gmd:extent>\n' ds_tempo_end = '</gmd:extent>\n</gmd:EX_TemporalExtent>\n</gmd:temporalElement>\n</gmd:EX_Extent>\n</gmd:extent>\n' ds_tempo_TI_start = '<gml:TimeInstant gml:id="ti_1">\n<gml:timePosition>' ds_tempo_TI_end = '</gml:timePosition>\n</gml:TimeInstant>\n' ds_tempo_TP_start = '<gml:TimePeriod gml:id="tp_1">\n<gml:beginPosition>' ds_tempo_TP_mid = '</gml:beginPosition>\n<gml:endPosition>' ds_tempo_TP_end = '</gml:endPosition>\n</gml:TimePeriod>\n' ds_usecon_start = '<gmd:resourceConstraints xlink:title="Limitations/Use_constraints">\n<gmd:MD_Constraints>\n<gmd:useLimitation>\n<gco:CharacterString>' ds_usecon_end = '</gco:CharacterString>\n</gmd:useLimitation>\n</gmd:MD_Constraints>\n</gmd:resourceConstraints>\n' ds_accesscon_start = '<gmd:resourceConstraints xlink:title="Limitations/Access constraints">\n<gmd:MD_LegalConstraints>\n<gmd:accessConstraints>\n<gmd:MD_RestrictionCode codeList="gmxCodelists.xml#MD_RestrictionCode" codeListValue="otherRestrictions"/>\n</gmd:accessConstraints>\n<gmd:otherConstraints>\n<gco:CharacterString>' ds_accesscon_end = '</gco:CharacterString>\n</gmd:otherConstraints>\n</gmd:MD_LegalConstraints>\n</gmd:resourceConstraints>\n' ds_lic_start = '<gmd:resourceConstraints xlink:title="Conditions/Licence/Restriction Code">\n<gmd:MD_LegalConstraints>\n<gmd:useConstraints>\n<gmd:MD_RestrictionCode codeList="gmxCodelists.xml#MD_RestrictionCode" codeListValue="' ds_lic_end = '</gmd:useConstraints>\n<gmd:otherConstraints>\n<gmx:Anchor xlink:href="#">Conditions apply</gmx:Anchor>\n</gmd:otherConstraints>\n</gmd:MD_LegalConstraints>\n</gmd:resourceConstraints>\n' ds_lic_notdef = '<gmd:resourceConstraints xlink:title="Conditions/Licence/Restriction Code">\n<gmd:MD_LegalConstraints>\n<gmd:useConstraints gco:nilReason="missing" />\n<gmd:otherConstraints>\n<gmx:Anchor xlink:href="#">No specifed conditions apply</gmx:Anchor>\n</gmd:otherConstraints>\n</gmd:MD_LegalConstraints>\n</gmd:resourceConstraints>\n' ''' Template has 'Translation Needs' as ISO 19115, but not sure where this would go mapping to otherCitationDetails below. ''' ds_cit_otherdet_start = '<gmd:otherCitationDetails>\n<gco:CharacterString>' ds_cit_otherdet_end = '</gco:CharacterString>\n</gmd:otherCitationDetails>\n' ds_cit_otherdet_notdef = '<gmd:otherCitationDetails gco:nilReason="inapplicable"/>\n' ''' Contacts ''' aut_poc = '<gmd:pointOfContact xlink:title="Author">\n<gmd:CI_ResponsibleParty>\n' pub_poc = '<gmd:pointOfContact xlink:title="Publisher">\n<gmd:CI_ResponsibleParty>\n' cud_poc = '<gmd:pointOfContact xlink:title="Custodian">\n<gmd:CI_ResponsibleParty>\n' own_poc = '<gmd:pointOfContact xlink:title="Owner">\n<gmd:CI_ResponsibleParty>\n' poc_poc = '<gmd:pointOfContact xlink:title="PointOfContact">\n<gmd:CI_ResponsibleParty>\n' indNam_start = '<gmd:individualName>\n<gco:CharacterString>' indNam_end = '</gco:CharacterString>\n</gmd:individualName>\n' orgNam_start = '<gmd:organisationName>\n<gco:CharacterString>' orgNam_end = '</gco:CharacterString>\n</gmd:organisationName>\n' conAdd_start = '<gmd:contactInfo>\n<gmd:CI_Contact>\n<gmd:address>\n<gmd:CI_Address>\n' conAdd_end = '</gmd:CI_Address>\n</gmd:address>\n</gmd:CI_Contact>\n</gmd:contactInfo>\n' adminArea_start = '<gmd:administrativeArea>\n<gco:CharacterString>' adminArea_end = '</gco:CharacterString>\n</gmd:administrativeArea>\n' role_start = '<gmd:role>\n<gmd:CI_RoleCode codeList="gmxCodelists.xml#CI_RoleCode" codeListValue="' aut_role = 'author">Author' pub_role = 'publisher">Publisher' cud_role = 'custodian">Custodian' own_role = 'owner">Owner' poc_role = 'pointOfContact">Point of Contact' pocEnd = '</gmd:CI_RoleCode>\n</gmd:role>\n</gmd:CI_ResponsibleParty>\n</gmd:pointOfContact>\n' suppInf_start = '<gmd:supplementalInformation>\n<gco:CharacterString>\n<![CDATA[ {' suppInf_end = '} ]]>\n</gco:CharacterString>\n</gmd:supplementalInformation>\n' ar_tn = '"Translation Needs":"' ar_nop = '"Number of pages (Hardcopy)":"' ar_alh = '"Archive Location (Hardcopy)":"' ar_lin = '"Location in Archive (Hardcopy)":"' ar_rai = '"Risk and Impact":"' ar_vat = '"Vital asset to the organisation?":"' ar_cav = '"Current Asset Volume":"' ar_ds = '"Digitalizing status":"' ar_sd = '"Scanned Date":"' ar_nsc = '"Name of Staff Scanning":"' ar_dal = '"Digital Asset location":"' ar_rp = '"Retention period":"' ar_sw = '"Shared with":"' ar_com = '"Comments":"' qc = '",\n' graphic_start = '<gmd:graphicOverview>\n<gmd:MD_BrowseGraphic>\n' graphic_end = '</gmd:MD_BrowseGraphic>\n</gmd:graphicOverview>\n' fileName_start = '<gmd:fileName>\n<gco:CharacterString>' fileName_end = '</gco:CharacterString>\n</gmd:fileName>\n' fileDesc_start = '<gmd:fileDescription>\n<gco:CharacterString>' fileDesc_end = '</gco:CharacterString>\n</gmd:fileDescription>\n' fileType_start = '<gmd:fileType>\n<gco:CharacterString>' fileType_end = '</gco:CharacterString>\n</gmd:fileType>' fileName_default = 'http://www.sciencekids.co.nz/images/pictures/flags680/Kenya.jpg' fileDesc_default = 'National flag of Kenya' fileType_default = 'image/jpeg' for index, row in actual_assets.head(n=sampleNumber).iterrows(): ''' Start file here ''' file_id = str(uuid.uuid4()) fileout = open(file_id + ".xml","a") '''# 1 <gmd:fileIdentifier> ''' file_id_x = file_id_start + file_id + file_id_end '''#2 <gmd:language> ''' '''#3 <gmd:parentIdentifier> ''' if not row["Series Name/Parent Title"]: ds_series_x = ds_parent_notdef ds_coll_x = ds_collective_nodef else: ds_series_x = ds_parent_start + str(row["Series Name/Parent Title"]) + ds_parent_end ds_coll_x = ds_collective_start + str(row["Series Name/Parent Title"]) + ds_collective_end '''#4 <gmd:hierarchyLevel>''' logging.debug(str(row["Resource Type"])) if row["Resource Type"] == 'dataset': md_hlevel = md_hlevelds elif row["Resource Type"] == 'series': md_hlevel = md_hlevelsr else: md_hlevel = md_hlevelng '''#5 <gmd:hierarchyLevelName> ''' '''#6 <gmd:contact> (contact for metadata) ''' if not useListedIndiv: md_pocIndx = md_pocInd_start + md_poc_indiv + md_pocInd_end else: md_pocIndx = md_pocInd_start + str(row["Name of staff member entering metadata records"]) + md_pocInd_end md_pocOrgx = md_pocOrg_start + md_poc_org + md_pocOrg_end md_pocEmailx = md_conInfo_start + md_poc_email + md_conInfo_end if showIndividual: md_contact_x = md_contact_start + md_pocIndx + md_pocOrgx + md_pocEmailx + md_contact_end else: md_contact_x = md_contact_start + md_pocOrgx + md_pocEmailx + md_contact_end '''#7 <gmd:dateStamp> (metadata date)''' # Date is converted to format like '2019-07-25 00:00:00' # which is not correct format for gco:Date or gco:DateTime # need to convert to either `2019-07-25` or '2019-07-25T00:00:00' if not row["Date of record entered"]: md_date_x = md_date_start + md_datedef + md_date_end else: if (str(row["Date of record entered"]).find('-') == 0): # date is invalid like --05-16 # reformat to 2016-05 reformattedDate = "20" + str(row["Date of record entered"])[4:5] + "-" + str(row["Date of record entered"])[2:3] md_date_x = md_date_start + reformattedDate + md_date_end else: md_date_x = md_date_start + str(row["Date of record entered"])[0:10] + md_date_end ''' #8 <gmd:metadataStandardName> ''' ''' #9 <gmd:metadataStandardVersion> ''' ''' #10 <gmd:dataSetURI> ''' ''' #11 <gmd:spatialRepresentationInfo> ''' ''' #12 <gmd:referenceSystemInfo> ''' ''' We could have more than one refsys, but we can't really use a reliable splitting mechanism even if more than one becuase this is free text ''' refsys_x = refsys_start + str(row["Spatial Reference System"]) + refsys_end ''' #13 <gmd:identificationInfo> starts ''' if assetIDisPopulated: ds_ident_x = ds_ident_start + idprefix + str(row['Asset Identifier (Asset ID)']).strip() + elemAtt_close else: ds_ident_x = ds_ident_start + idprefix + str(index) + elemAtt_close ''' #14 <gmd:citation>
<reponame>samcom12/anuga_core<filename>anuga/culvert_flows/tests/test_culvert_class.py #!/usr/bin/env python from __future__ import print_function from __future__ import division from builtins import range from past.utils import old_div import unittest import os.path import sys from anuga.utilities.system_tools import get_pathname_from_package from anuga.geometry.polygon_function import Polygon_function from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross from anuga.abstract_2d_finite_volumes.quantity import Quantity import anuga from anuga.culvert_flows.culvert_class import Culvert_flow, \ Culvert_flow_rating, Culvert_flow_energy from anuga.culvert_flows.culvert_routines import boyd_generalised_culvert_model from math import pi, pow, sqrt import numpy as num # Helper functions def run_culvert_flow_problem(depth): """Run flow with culvert given depth """ length = 40. width = 5. dx = dy = 1 # Resolution: Length of subdivisions on both axes points, vertices, boundary = rectangular_cross(int(old_div(length,dx)), int(old_div(width,dy)), len1=length, len2=width) domain = anuga.Domain(points, vertices, boundary) domain.set_name('Test_culvert_shallow') # Output name domain.set_default_order(2) #---------------------------------------------------------------------- # Setup initial conditions #---------------------------------------------------------------------- def topography(x, y): """Set up a weir A culvert will connect either side """ # General Slope of Topography z=old_div(-x,1000) N = len(x) for i in range(N): # Sloping Embankment Across Channel if 5.0 < x[i] < 10.1: # Cut Out Segment for Culvert face if 1.0+(x[i]-5.0)/5.0 < y[i] < 4.0 - (x[i]-5.0)/5.0: z[i]=z[i] else: z[i] += 0.5*(x[i] -5.0) # Sloping Segment U/S Face if 10.0 < x[i] < 12.1: z[i] += 2.5 # Flat Crest of Embankment if 12.0 < x[i] < 14.5: # Cut Out Segment for Culvert face if 2.0-(x[i]-12.0)/2.5 < y[i] < 3.0 + (x[i]-12.0)/2.5: z[i]=z[i] else: z[i] += 2.5-1.0*(x[i] -12.0) # Sloping D/S Face return z domain.set_quantity('elevation', topography) domain.set_quantity('friction', 0.01) # Constant friction domain.set_quantity('stage', expression='elevation + %f' % depth) # Shallow initial condition # Boyd culvert culvert = Culvert_flow(domain, label='Culvert No. 1', description='This culvert is a test unit 1.2m Wide by 0.75m High', end_point0=[9.0, 2.5], end_point1=[13.0, 2.5], width=1.20, height=0.75, culvert_routine=boyd_generalised_culvert_model, number_of_barrels=1, update_interval=2, verbose=False) domain.forcing_terms.append(culvert) #----------------------------------------------------------------------- # Setup boundary conditions #----------------------------------------------------------------------- # Inflow based on Flow Depth and Approaching Momentum Br = anuga.Reflective_boundary(domain) # Solid reflective wall domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br}) #----------------------------------------------------------------------- # Evolve system through time #----------------------------------------------------------------------- #print 'depth', depth ref_volume = domain.get_quantity('stage').get_integral() for t in domain.evolve(yieldstep = 0.1, finaltime = 10): new_volume = domain.get_quantity('stage').get_integral() msg = ('Total volume has changed: Is %.8f m^3 should have been %.8f m^3' % (new_volume, ref_volume)) assert num.allclose(new_volume, ref_volume), msg os.remove('Test_culvert_shallow.sww') class Test_Culvert(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_that_culvert_runs_rating(self): """test_that_culvert_runs_rating This test exercises the culvert and checks values outside rating curve are dealt with """ path = get_pathname_from_package('anuga.culvert_flows') path = os.path.join(path, 'tests', 'data') length = 40. width = 5. dx = dy = 1 # Resolution: Length of subdivisions on both axes points, vertices, boundary = rectangular_cross(int(old_div(length,dx)), int(old_div(width,dy)), len1=length, len2=width) domain = anuga.Domain(points, vertices, boundary) domain.set_name('Test_culvert') # Output name domain.set_default_order(2) #---------------------------------------------------------------------- # Setup initial conditions #---------------------------------------------------------------------- def topography(x, y): """Set up a weir A culvert will connect either side """ # General Slope of Topography z=old_div(-x,1000) N = len(x) for i in range(N): # Sloping Embankment Across Channel if 5.0 < x[i] < 10.1: # Cut Out Segment for Culvert face if 1.0+(x[i]-5.0)/5.0 < y[i] < 4.0 - (x[i]-5.0)/5.0: z[i]=z[i] else: z[i] += 0.5*(x[i] -5.0) # Sloping Segment U/S Face if 10.0 < x[i] < 12.1: z[i] += 2.5 # Flat Crest of Embankment if 12.0 < x[i] < 14.5: # Cut Out Segment for Culvert face if 2.0-(x[i]-12.0)/2.5 < y[i] < 3.0 + (x[i]-12.0)/2.5: z[i]=z[i] else: z[i] += 2.5-1.0*(x[i] -12.0) # Sloping D/S Face return z domain.set_quantity('elevation', topography) domain.set_quantity('friction', 0.01) # Constant friction domain.set_quantity('stage', expression='elevation') # Dry initial condition filename=os.path.join(path, 'example_rating_curve.csv') culvert = Culvert_flow(domain, culvert_description_filename=filename, end_point0=[9.0, 2.5], end_point1=[13.0, 2.5], width=1.00, use_velocity_head=True, verbose=False) domain.forcing_terms.append(culvert) #----------------------------------------------------------------------- # Setup boundary conditions #----------------------------------------------------------------------- # Inflow based on Flow Depth and Approaching Momentum Bi = anuga.Dirichlet_boundary([0.0, 0.0, 0.0]) Br = anuga.Reflective_boundary(domain) # Solid reflective wall Bo = anuga.Dirichlet_boundary([-5, 0, 0]) # Outflow # Upstream and downstream conditions that will exceed the rating curve # I.e produce delta_h outside the range [0, 10] specified in the the # file example_rating_curve.csv Btus = anuga.Time_boundary(domain, \ lambda t: [100*num.sin(old_div(2*pi*(t-4),10)), 0.0, 0.0]) Btds = anuga.Time_boundary(domain, \ lambda t: [-5*(num.cos(old_div(2*pi*(t-4),20))), 0.0, 0.0]) domain.set_boundary({'left': Btus, 'right': Btds, 'top': Br, 'bottom': Br}) #----------------------------------------------------------------------- # Evolve system through time #----------------------------------------------------------------------- min_delta_w = sys.maxsize max_delta_w = -min_delta_w for t in domain.evolve(yieldstep = 1, finaltime = 25): delta_w = culvert.inlet.stage - culvert.outlet.stage if delta_w > max_delta_w: max_delta_w = delta_w if delta_w < min_delta_w: min_delta_w = delta_w pass # Check that extreme values in rating curve have been exceeded # so that we know that condition has been exercised assert min_delta_w < 0 assert max_delta_w > 10 os.remove('Test_culvert.sww') def test_that_culvert_dry_bed_rating_does_not_produce_flow(self): """test_that_culvert_in_dry_bed_does_not_produce_flow(self): Test that culvert on a sloping dry bed doesn't produce flows although there will be a 'pressure' head due to delta_w > 0 This one is using the rating curve variant """ path = get_pathname_from_package('anuga.culvert_flows') path = os.path.join(path, 'tests', 'data') length = 40. width = 5. dx = dy = 1 # Resolution: Length of subdivisions on both axes points, vertices, boundary = rectangular_cross(int(old_div(length,dx)), int(old_div(width,dy)), len1=length, len2=width) domain = anuga.Domain(points, vertices, boundary) domain.set_name('Test_culvert_dry') # Output name domain.set_default_order(2) #---------------------------------------------------------------------- # Setup initial conditions #---------------------------------------------------------------------- def topography(x, y): """Set up a weir A culvert will connect either side """ # General Slope of Topography z=old_div(-x,1000) N = len(x) for i in range(N): # Sloping Embankment Across Channel if 5.0 < x[i] < 10.1: # Cut Out Segment for Culvert face if 1.0+(x[i]-5.0)/5.0 < y[i] < 4.0 - (x[i]-5.0)/5.0: z[i]=z[i] else: z[i] += 0.5*(x[i] -5.0) # Sloping Segment U/S Face if 10.0 < x[i] < 12.1: z[i] += 2.5 # Flat Crest of Embankment if 12.0 < x[i] < 14.5: # Cut Out Segment for Culvert face if 2.0-(x[i]-12.0)/2.5 < y[i] < 3.0 + (x[i]-12.0)/2.5: z[i]=z[i] else: z[i] += 2.5-1.0*(x[i] -12.0) # Sloping D/S Face return z domain.set_quantity('elevation', topography) domain.set_quantity('friction', 0.01) # Constant friction domain.set_quantity('stage', expression='elevation') # Dry initial condition filename = os.path.join(path, 'example_rating_curve.csv') culvert = Culvert_flow(domain, culvert_description_filename=filename, end_point0=[9.0, 2.5], end_point1=[13.0, 2.5], height=0.75, verbose=False) domain.forcing_terms.append(culvert) #----------------------------------------------------------------------- # Setup boundary conditions #----------------------------------------------------------------------- # Inflow based on Flow Depth and Approaching Momentum Br = anuga.Reflective_boundary(domain) # Solid reflective wall domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br}) #----------------------------------------------------------------------- # Evolve system through time #----------------------------------------------------------------------- ref_volume = domain.get_quantity('stage').get_integral() for t in domain.evolve(yieldstep = 1, finaltime = 25): new_volume = domain.get_quantity('stage').get_integral() msg = 'Total volume has changed' assert num.allclose(new_volume, ref_volume, rtol=1.0e-10), msg pass os.remove('Test_culvert_dry.sww') def test_that_culvert_flows_conserves_volume(self): """test_that_culvert_flows_conserves_volume Test that culvert on a sloping dry bed limits flows when very little water is present at inlet. Uses helper function: run_culvert_flow_problem(depth): """ # Try this for a range of depths for depth in [0.1, 1.0]: #[0.1, 0.2, 0.5, 1.0]: run_culvert_flow_problem(depth) def OBSOLETE_XXXtest_that_culvert_rating_limits_flow_in_shallow_inlet_condition(self): """test_that_culvert_rating_limits_flow_in_shallow_inlet_condition Test that culvert on a sloping dry bed limits flows when very little water is present at inlet This one is using the rating curve variant """ path = get_pathname_from_package('anuga.culvert_flows') length = 40. width = 5. dx = dy = 1 # Resolution: Length of subdivisions on both axes points, vertices, boundary = rectangular_cross(int(old_div(length,dx)), int(old_div(width,dy)), len1=length, len2=width) domain = anuga.Domain(points, vertices, boundary) domain.set_name('Test_culvert_shallow') # Output name domain.set_default_order(2) #---------------------------------------------------------------------- # Setup initial conditions #---------------------------------------------------------------------- def topography(x, y): """Set up a weir A culvert will connect either side """ # General Slope of Topography z=old_div(-x,1000) N = len(x) for i in range(N): # Sloping Embankment Across Channel if 5.0 < x[i] < 10.1: # Cut Out Segment for Culvert face if 1.0+(x[i]-5.0)/5.0 < y[i] < 4.0 - (x[i]-5.0)/5.0: z[i]=z[i] else: z[i] += 0.5*(x[i] -5.0) # Sloping Segment U/S Face if 10.0 < x[i] < 12.1: z[i] += 2.5 # Flat Crest of Embankment if 12.0 < x[i]
on with the creation try: self._create_colls(investigation) success_msg = ( 'Collection structure for sample data ' '{}d in iRODS'.format(action) ) if settings.SHEETS_ENABLE_CACHE: success_msg += ', initiated iRODS cache update' messages.success(self.request, success_msg) except taskflow.FlowSubmitException as ex: messages.error(self.request, str(ex)) return redirect(redirect_url) def get(self, request, *args, **kwargs): return super().render_to_response(self.get_context_data()) class SheetVersionListView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, ListView, ): """Sample Sheet version list view""" model = ISATab permission_required = 'samplesheets.view_versions' template_name = 'samplesheets/sheet_versions.html' paginate_by = settings.SHEETS_VERSION_PAGINATION def get_queryset(self): return ISATab.objects.filter( project__sodar_uuid=self.kwargs['project'] ).order_by('-date_created') def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) context['current_version'] = None if context['investigation']: context['current_version'] = ( ISATab.objects.filter( project=self.get_project(), investigation_uuid=context['investigation'].sodar_uuid, ) .order_by('-date_created') .first() ) return context class SheetVersionCompareView( LoginRequiredMixin, LoggedInPermissionMixin, InvestigationContextMixin, ProjectPermissionMixin, SheetImportMixin, TemplateView, ): """Sample Sheet version compare view""" permission_required = 'samplesheets.view_versions' template_name = 'samplesheets/version_compare.html' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) source_uuid = self.request.GET.get('source') target_uuid = self.request.GET.get('target') source = ISATab.objects.filter(sodar_uuid=source_uuid).first() target = ISATab.objects.filter(sodar_uuid=target_uuid).first() context['source'] = source_uuid context['target'] = target_uuid context['source_title'] = source.date_created if source else 'N/A' context['target_title'] = target.date_created if target else 'N/A' return context class SheetVersionCompareFileView( LoginRequiredMixin, LoggedInPermissionMixin, InvestigationContextMixin, ProjectPermissionMixin, SheetImportMixin, TemplateView, ): """Sample Sheet version compare file view""" permission_required = 'samplesheets.view_versions' template_name = 'samplesheets/version_compare_file.html' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) context['source'] = self.request.GET.get('source') context['target'] = self.request.GET.get('target') context['filename'] = self.request.GET.get('filename') context['category'] = self.request.GET.get('category') return context class SheetVersionRestoreView( LoginRequiredMixin, LoggedInPermissionMixin, InvestigationContextMixin, ProjectPermissionMixin, SheetImportMixin, TemplateView, ): """Sample Sheet version restoring view""" template_name = 'samplesheets/version_confirm_restore.html' permission_required = 'samplesheets.manage_sheet' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) investigation = context['investigation'] if not investigation: return context context['sheet_version'] = ISATab.objects.filter( sodar_uuid=self.kwargs['isatab'] ).first() return context def post(self, request, **kwargs): timeline = get_backend_api('timeline_backend') tl_event = None project = self.get_project() sheet_io = SampleSheetIO(allow_critical=settings.SHEETS_ALLOW_CRITICAL) new_inv = None redirect_url = reverse( 'samplesheets:versions', kwargs={'project': project.sodar_uuid} ) old_inv = Investigation.objects.filter( project=project, active=True ).first() if not old_inv: # This shouldn't happen, but just in case messages.error( request, 'Existing sheet not found, unable to restore' ) return redirect(redirect_url) isa_version = ISATab.objects.filter( sodar_uuid=self.kwargs.get('isatab') ).first() if not isa_version: messages.error( request, 'ISA-Tab version not found, unable to restore' ) return redirect(redirect_url) if timeline: tl_event = timeline.add_event( project=project, app_name=APP_NAME, user=self.request.user, event_name='sheet_restore', description='restore sheets from version {isatab}', ) tl_event.add_object( obj=isa_version, label='isatab', name=isa_version.get_full_name(), ) try: new_inv = sheet_io.import_isa( isa_data=isa_version.data, project=project, archive_name=isa_version.archive_name, user=request.user, replace=True if old_inv else False, replace_uuid=old_inv.sodar_uuid if old_inv else None, save_isa=False, # Already exists as isa_version ) except Exception as ex: self.handle_import_exception(ex, tl_event) if new_inv: new_inv = self.handle_replace( investigation=new_inv, old_inv=old_inv, tl_event=tl_event ) if new_inv: new_inv = self.finalize_import( investigation=new_inv, action='restore', tl_event=tl_event, isa_version=isa_version, ) # Edit isa_version to bump it in the list if 'RESTORE' not in isa_version.tags: isa_version.tags.append('RESTORE') isa_version.date_created = Now() isa_version.save() return redirect( reverse( 'samplesheets:project_sheets', kwargs={'project': project.sodar_uuid}, ) ) class SheetVersionUpdateView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, UpdateView, ): """Sample sheet version update view""" permission_required = 'samplesheets.manage_sheet' model = ISATab form_class = SheetVersionEditForm template_name = 'samplesheets/version_update.html' slug_url_kwarg = 'isatab' slug_field = 'sodar_uuid' def form_valid(self, form): obj = form.save() messages.success( self.request, 'Description updated for sheet version "{}".'.format( obj.get_full_name() ), ) return redirect( reverse( 'samplesheets:versions', kwargs={'project': self.get_project().sodar_uuid}, ) ) class SheetVersionDeleteView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, DeleteView, ): """Sample sheet version deletion view""" permission_required = 'samplesheets.manage_sheet' template_name = 'samplesheets/version_confirm_delete.html' model = ISATab slug_url_kwarg = 'isatab' slug_field = 'sodar_uuid' def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) investigation = context['investigation'] if not investigation: return context context['sheet_version'] = ISATab.objects.filter( sodar_uuid=self.kwargs['isatab'] ).first() return context def get_success_url(self): timeline = get_backend_api('timeline_backend') project = self.get_project() if timeline: tl_event = timeline.add_event( project=project, app_name=APP_NAME, user=self.request.user, event_name='version_delete', description='delete sample sheet version {isatab}', status_type='OK', ) tl_event.add_object( obj=self.object, label='isatab', name=self.object.get_full_name(), ) messages.success( self.request, 'Deleted sample sheet version: {}'.format( self.object.get_full_name() ), ) return reverse( 'samplesheets:versions', kwargs={'project': project.sodar_uuid} ) class SheetVersionDeleteBatchView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, TemplateView, ): """Sample sheet version batch deletion view""" permission_required = 'samplesheets.manage_sheet' template_name = 'samplesheets/version_confirm_delete_batch.html' slug_url_kwarg = 'project' slug_field = 'sodar_uuid' def get_context_data(self, request, *args, **kwargs): context = super().get_context_data(*args, **kwargs) context['sheet_versions'] = ISATab.objects.filter( sodar_uuid__in=request.POST.getlist('version_check') ) return context def post(self, request, **kwargs): context = self.get_context_data(request, **kwargs) # Render confirm template if request.POST.get('confirm'): return super().render_to_response(context) # Else go on with deletion project = context['project'] version_count = context['sheet_versions'].count() timeline = get_backend_api('timeline_backend') if timeline: for sv in context['sheet_versions']: tl_event = timeline.add_event( project=project, app_name=APP_NAME, user=self.request.user, event_name='version_delete', description='delete sample sheet version {isatab}', status_type='OK', ) tl_event.add_object( obj=sv, label='isatab', name=sv.get_full_name(), ) context['sheet_versions'].delete() messages.success( request, 'Deleted {} sample sheet version{}.'.format( version_count, 's' if version_count != 1 else '', ), ) return redirect( reverse( 'samplesheets:versions', kwargs={'project': project.sodar_uuid} ) ) class IrodsAccessTicketListView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, ListView, ): """Sample Sheet version list view""" model = IrodsAccessTicket permission_required = 'samplesheets.edit_sheet' template_name = 'samplesheets/irods_access_tickets.html' paginate_by = settings.SHEETS_IRODS_TICKET_PAGINATION def get_queryset(self): return self.model.objects.filter( project__sodar_uuid=self.kwargs['project'] ) def get_context_data(self, *args, **kwargs): context = super().get_context_data(*args, **kwargs) irods_backend = get_backend_api('omics_irods') assays = Assay.objects.filter( study__investigation__project__sodar_uuid=self.kwargs['project'] ) context['track_hubs_available'] = bool( [ track_hub for assay in assays for track_hub in irods_backend.get_child_colls_by_path( irods_backend.get_path(assay) + '/' + TRACK_HUBS_COLL ) ] ) return context class IrodsAccessTicketCreateView( LoginRequiredMixin, LoggedInPermissionMixin, InvestigationContextMixin, ProjectPermissionMixin, SheetImportMixin, FormView, ): """Sample Sheet version restoring view""" permission_required = 'samplesheets.edit_sheet' template_name = 'samplesheets/irodsaccessticket_form.html' form_class = IrodsAccessTicketForm def get_initial(self): return {'project': self.get_project()} def form_valid(self, form): # Create iRODS ticket irods_backend = get_backend_api('omics_irods') ticket = irods_backend.issue_ticket( 'read', form.cleaned_data['path'], ticket_str=build_secret(16), expiry_date=form.cleaned_data.get('date_expires'), ) # Create database object obj = form.save(commit=False) obj.project = self.get_project() obj.assay = form.cleaned_data['assay'] obj.study = obj.assay.study obj.user = self.request.user obj.ticket = ticket.ticket obj.save() messages.success( self.request, 'iRODS access ticket "{}" created.'.format(obj.get_display_name()), ) return redirect( reverse( 'samplesheets:irods_tickets', kwargs={'project': self.kwargs['project']}, ) ) class IrodsAccessTicketUpdateView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, UpdateView, ): """Sample sheet version deletion view""" permission_required = 'samplesheets.edit_sheet' model = IrodsAccessTicket form_class = IrodsAccessTicketForm template_name = 'samplesheets/irodsaccessticket_form.html' slug_url_kwarg = 'irodsaccessticket' slug_field = 'sodar_uuid' def get_initial(self): return {'project': self.get_project()} def form_valid(self, form): obj = form.save() messages.success( self.request, 'iRODS access ticket "{}" updated.'.format(obj.get_display_name()), ) return redirect( reverse( 'samplesheets:irods_tickets', kwargs={'project': self.get_project().sodar_uuid}, ) ) class IrodsAccessTicketDeleteView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, DeleteView, ): """iRODS access ticket deletion view""" permission_required = 'samplesheets.delete_sheet' template_name = 'samplesheets/irodsaccessticket_confirm_delete.html' model = IrodsAccessTicket slug_url_kwarg = 'irodsaccessticket' slug_field = 'sodar_uuid' def get_success_url(self): return reverse( 'samplesheets:irods_tickets', kwargs={'project': self.object.project.sodar_uuid}, ) def delete(self, request, *args, **kwargs): obj = self.get_object() irods_backend = get_backend_api('omics_irods') try: irods_backend.delete_ticket(obj.ticket) messages.success( request, 'iRODS access ticket "{}" deleted.'.format( obj.get_display_name() ), ) except Exception as e: messages.error(request, '%s. Maybe it didn\'t exist.' % e) return super().delete(request, *args, **kwargs) class IrodsRequestCreateView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, IrodsRequestModifyMixin, FormView, ): """View for creating an iRODS data request""" permission_required = 'samplesheets.edit_sheet' template_name = 'samplesheets/irods_request_form.html' form_class = IrodsRequestForm def form_valid(self, form): project = self.get_project() # Create database object obj = form.save(commit=False) obj.user = self.request.user obj.project = project obj.save() # Create timeline event self.add_tl_create(obj) # Add app alerts to owners/delegates self.add_alerts_create(project) messages.success( self.request, 'iRODS data request "{}" created.'.format(obj.get_display_name()), ) return redirect( reverse( 'samplesheets:irods_requests', kwargs={'project': self.kwargs['project']}, ) ) class IrodsRequestUpdateView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, UpdateView, ): """View for updating an iRODS data request""" permission_required = 'samplesheets.edit_sheet' template_name = 'samplesheets/irods_request_form.html' model = IrodsDataRequest form_class = IrodsRequestForm slug_url_kwarg = 'irodsdatarequest' slug_field = 'sodar_uuid' def form_valid(self, form): timeline = get_backend_api('timeline_backend') # Create database object obj = form.save(commit=False) obj.user = self.request.user obj.project = self.get_project() obj.save() if timeline: tl_event = timeline.add_event( project=self.get_project(), app_name=APP_NAME, user=self.request.user, event_name='irods_request_update', description='update iRODS data request {irods_request}', status_type='OK', ) tl_event.add_object( obj=obj, label='irods_request', name=obj.get_display_name() ) messages.success( self.request, 'iRODS data request "{}" updated.'.format(obj.get_display_name()), ) return redirect( reverse( 'samplesheets:irods_requests', kwargs={'project': self.get_project().sodar_uuid}, ) ) class IrodsRequestDeleteView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, IrodsRequestModifyMixin, DeleteView, ): """View for deleting an iRODS data request""" permission_required = 'samplesheets.delete_sheet' template_name = 'samplesheets/irods_request_confirm_delete.html' model = IrodsDataRequest slug_url_kwarg = 'irodsdatarequest' slug_field = 'sodar_uuid' def get_success_url(self): # Add timeline event self.add_tl_delete(self.object) # Handle project alerts self.handle_alerts_deactivate(self.object) messages.success(self.request, 'iRODS data request deleted.') return reverse( 'samplesheets:irods_requests', kwargs={'project': self.object.project.sodar_uuid}, ) class IrodsRequestAcceptView( LoginRequiredMixin, LoggedInPermissionMixin, ProjectPermissionMixin, InvestigationContextMixin, IrodsRequestModifyMixin, FormView, ): """View for accepting an iRODS data request""" permission_required = 'samplesheets.manage_sheet' template_name = 'samplesheets/irods_request_accept_form.html' form_class = IrodsRequestAcceptForm def get_context_data(self, *args, **kwargs): context_data = super().get_context_data(*args, **kwargs) obj = IrodsDataRequest.objects.filter( sodar_uuid=self.kwargs['irodsdatarequest'] ).first() irods_backend = get_backend_api('omics_irods') context_data['irods_request'] = obj context_data['affected_objects'] = [] context_data['affected_collections'] = [] context_data['is_collection'] = obj.is_collection() if context_data['is_collection']: coll = irods_backend.get_coll_by_path( context_data['irods_request'].path ) context_data[ 'affected_objects' ] += irods_backend.get_objs_recursively(coll) context_data[ 'affected_collections' ] += irods_backend.get_colls_recursively(coll) return context_data def form_valid(self, request, *args, **kwargs): timeline = get_backend_api('timeline_backend') taskflow = get_backend_api('taskflow') app_alerts = get_backend_api('appalerts_backend') project = self.get_project() tl_event = None try: obj = IrodsDataRequest.objects.get( sodar_uuid=self.kwargs['irodsdatarequest'] ) except IrodsDataRequest.DoesNotExist: messages.error( self.request, 'iRODS data request {} doesn\' exist.'.format(
# This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Core module of the pulse drawer. This module provides the `DrawerCanvas` which is a collection of `Chart` object. The `Chart` object is a collection of drawings. A user can assign multiple channels to a single chart instance. For example, we can define a chart for specific qubit and assign all related channels to the chart. This chart-channel mapping is defined by the function specified by ``layout.chart_channel_map`` of the stylesheet. Because this chart instance is decoupled from the coordinate system of the plotter, we can arbitrarily place charts on the plotter canvas, i.e. if we want to create 3D plot, each chart may be placed on the X-Z plane and charts are arranged along the Y-axis. Thus this data model maximizes the flexibility to generate an output image. The chart instance is not just a container of drawings, as it also performs data processing like binding abstract coordinates and truncating long pulses for an axis break. Each chart object has `.parent` which points to the `DrawerCanvas` instance so that each child chart can refer to the global figure settings such as time range and axis break. Initialization ~~~~~~~~~~~~~~ The `DataCanvas` and `Chart` are not exposed to users as they are implicitly initialized in the interface function. It is noteworthy that the data canvas is agnostic to plotters. This means once the canvas instance is initialized we can reuse this data among multiple plotters. The canvas is initialized with a stylesheet and quantum backend information :py:class:~`qiskit.visualization.pulse_v2.device_info.DrawerBackendInfo`. Chart instances are automatically generated when pulse program is loaded. ```python canvas = DrawerCanvas(stylesheet=stylesheet, device=device) canvas.load_program(sched) canvas.update() ``` Once all properties are set, `.update` method is called to apply changes to drawings. If the `DrawDataContainer` is initialized without backend information, the output shows the time in units of the system cycle time `dt` and the frequencies are initialized to zero. Update ~~~~~~ To update the image, a user can set new values to canvas and then call the `.update` method. ```python canvas.set_time_range(2000, 3000, seconds=False) canvas.update() ``` All stored drawings are updated accordingly. The plotter API can access to drawings with `.collections` property of chart instance. This returns an iterator of drawing with the unique data key. If a plotter provides object handler for plotted shapes, the plotter API can manage the lookup table of the handler and the drawing by using this data key. """ from copy import deepcopy from enum import Enum from functools import partial from itertools import chain from typing import Union, List, Tuple, Iterator, Optional import numpy as np from qiskit import pulse from qiskit.pulse.transforms import target_qobj_transform from qiskit.visualization.exceptions import VisualizationError from qiskit.visualization.pulse_v2 import events, types, drawings, device_info from qiskit.visualization.pulse_v2.stylesheet import QiskitPulseStyle class DrawerCanvas: """Collection of `Chart` and configuration data. Pulse channels are associated with some `Chart` instance and drawing data object are stored in the `Chart` instance. Device, stylesheet, and some user generators are stored in the `DrawingCanvas` and `Chart` instances are also attached to the `DrawerCanvas` as children. Global configurations are accessed by those children to modify the appearance of the `Chart` output. """ def __init__(self, stylesheet: QiskitPulseStyle, device: device_info.DrawerBackendInfo): """Create new data container with backend system information. Args: stylesheet: Stylesheet to decide appearance of output image. device: Backend information to run the program. """ # stylesheet self.formatter = stylesheet.formatter self.generator = stylesheet.generator self.layout = stylesheet.layout # device info self.device = device # chart self.global_charts = Chart(parent=self, name='global') self.charts = [] # visible controls self.disable_chans = set() self.disable_types = set() # data scaling self.chan_scales = dict() # global time self._time_range = (0, 0) self._time_breaks = [] # title self.fig_title = '' @property def time_range(self) -> Tuple[int, int]: """Return current time range to draw. Calculate net duration and add side margin to edge location. Returns: Time window considering side margin. """ t0, t1 = self._time_range total_time_elimination = 0 for t0b, t1b in self.time_breaks: if t1b > t0 and t0b < t1: total_time_elimination += t1b - t0b net_duration = t1 - t0 - total_time_elimination new_t0 = t0 - net_duration * self.formatter['margin.left_percent'] new_t1 = t1 + net_duration * self.formatter['margin.right_percent'] return new_t0, new_t1 @time_range.setter def time_range(self, new_range: Tuple[int, int]): """Update time range to draw.""" self._time_range = new_range @property def time_breaks(self) -> List[Tuple[int, int]]: """Return time breaks with time range. If an edge of time range is in the axis break period, the axis break period is recalculated. Raises: VisualizationError: When axis break is greater than time window. Returns: List of axis break periods considering the time window edges. """ t0, t1 = self._time_range axis_breaks = [] for t0b, t1b in self._time_breaks: if t0b >= t1 or t1b <= t0: # skip because break period is outside of time window continue if t0b < t0 and t1b > t1: raise VisualizationError('Axis break is greater than time window. ' 'Nothing will be drawn.') if t0b < t0 < t1b: if t1b - t0 > self.formatter['axis_break.length']: new_t0 = t0 + 0.5 * self.formatter['axis_break.max_length'] axis_breaks.append((new_t0, t1b)) continue if t0b < t1 < t1b: if t1 - t0b > self.formatter['axis_break.length']: new_t1 = t1 - 0.5 * self.formatter['axis_break.max_length'] axis_breaks.append((t0b, new_t1)) continue axis_breaks.append((t0b, t1b)) return axis_breaks @time_breaks.setter def time_breaks(self, new_breaks: List[Tuple[int, int]]): """Set new time breaks.""" self._time_breaks = sorted(new_breaks, key=lambda x: x[0]) def load_program(self, program: Union[pulse.Waveform, pulse.ParametricPulse, pulse.Schedule]): """Load a program to draw. Args: program: `Waveform`, `ParametricPulse`, or `Schedule` to draw. Raises: VisualizationError: When input program is invalid data format. """ if isinstance(program, (pulse.Schedule, pulse.ScheduleBlock)): self._schedule_loader(program) elif isinstance(program, (pulse.Waveform, pulse.ParametricPulse)): self._waveform_loader(program) else: raise VisualizationError('Data type %s is not supported.' % type(program)) # update time range self.set_time_range(0, program.duration, seconds=False) # set title self.fig_title = self.layout['figure_title'](program=program, device=self.device) def _waveform_loader(self, program: Union[pulse.Waveform, pulse.ParametricPulse]): """Load Waveform instance. This function is sub-routine of py:method:`load_program`. Args: program: `Waveform` to draw. """ chart = Chart(parent=self) # add waveform data fake_inst = pulse.Play(program, types.WaveformChannel()) inst_data = types.PulseInstruction(t0=0, dt=self.device.dt, frame=types.PhaseFreqTuple(phase=0, freq=0), inst=fake_inst, is_opaque=program.is_parameterized()) for gen in self.generator['waveform']: obj_generator = partial(gen, formatter=self.formatter, device=self.device) for data in obj_generator(inst_data): chart.add_data(data) self.charts.append(chart) def _schedule_loader(self, program: Union[pulse.Schedule, pulse.ScheduleBlock]): """Load Schedule instance. This function is sub-routine of py:method:`load_program`. Args: program: `Schedule` to draw. """ program = target_qobj_transform(program, remove_directives=False) # initialize scale values self.chan_scales = {} for chan in program.channels: if isinstance(chan, pulse.channels.DriveChannel): self.chan_scales[chan] = self.formatter['channel_scaling.drive'] elif isinstance(chan, pulse.channels.MeasureChannel): self.chan_scales[chan] = self.formatter['channel_scaling.measure'] elif isinstance(chan, pulse.channels.ControlChannel): self.chan_scales[chan] = self.formatter['channel_scaling.control'] elif isinstance(chan, pulse.channels.AcquireChannel): self.chan_scales[chan] = self.formatter['channel_scaling.acquire'] else: self.chan_scales[chan] = 1.0 # create charts mapper = self.layout['chart_channel_map'] for name, chans in mapper(channels=program.channels, formatter=self.formatter, device=self.device): chart = Chart(parent=self, name=name) # add standard pulse instructions for chan in chans: chart.load_program(program=program, chan=chan) # add barriers barrier_sched = program.filter(instruction_types=[pulse.instructions.RelativeBarrier], channels=chans) for t0, _ in barrier_sched.instructions: inst_data = types.BarrierInstruction(t0, self.device.dt, chans) for gen in self.generator['barrier']: obj_generator = partial(gen, formatter=self.formatter, device=self.device) for data in obj_generator(inst_data): chart.add_data(data) # add chart axis chart_axis = types.ChartAxis(name=chart.name, channels=chart.channels) for gen in self.generator['chart']: obj_generator = partial(gen, formatter=self.formatter, device=self.device) for data in obj_generator(chart_axis): chart.add_data(data) self.charts.append(chart) # add snapshot data to global snapshot_sched = program.filter(instruction_types=[pulse.instructions.Snapshot]) for t0, inst in snapshot_sched.instructions: inst_data = types.SnapshotInstruction(t0, self.device.dt, inst.label, inst.channels) for gen in self.generator['snapshot']: obj_generator = partial(gen, formatter=self.formatter, device=self.device) for data in obj_generator(inst_data): self.global_charts.add_data(data) # calculate axis break self.time_breaks = self._calculate_axis_break(program) def _calculate_axis_break(self, program: pulse.Schedule) -> List[Tuple[int, int]]: """A helper function to calculate axis break of long pulse sequence. Args: program: A schedule to calculate axis break. Returns: List of axis break periods. """ axis_breaks = [] edges = set() for t0, t1 in chain.from_iterable(program.timeslots.values()): if t1 - t0 > 0: edges.add(t0) edges.add(t1) edges = sorted(edges) for t0, t1 in zip(edges[:-1], edges[1:]): if t1 - t0 > self.formatter['axis_break.length']: t_l = t0 + 0.5 * self.formatter['axis_break.max_length'] t_r = t1 - 0.5 * self.formatter['axis_break.max_length'] axis_breaks.append((t_l, t_r)) return axis_breaks def set_time_range(self, t_start: Union[int, float], t_end: Union[int, float], seconds: bool = True): """Set
<filename>universe/vncdriver/server_messages.py try: # In Py2, use the more efficient cStringIO implementation if it's # available from cStringIO import StringIO as BytesIO except ImportError: # Fall back to using normal BytesIO, six handles python 2 vs 3 compat from six import BytesIO import logging import numpy as np from universe import pyprofile import struct logger = logging.getLogger(__name__) class FramebufferUpdate(object): def __init__(self, rectangles): self.rectangles = rectangles class Rectangle(object): def __init__(self, x, y, width, height, encoding): self.x = x self.y = y self.width = width self.height = height self.encoding = encoding class PseudoCursorEncoding(object): def __init__(self, image, mask): self.image = image self.mask = mask @classmethod def parse_rectangle(cls, client, x, y, width, height, data): split = width * height * client.framebuffer.bypp image = np.frombuffer(data[:split], np.uint8).reshape((height, width, 4))[:, :, [0, 1, 2]] # Turn raw bytes into uint8 array mask = np.frombuffer(data[split:], np.uint8) # Turn uint8 array into bit array, and go over the scanlines mask = np.unpackbits(mask).reshape((height, -1 if mask.size else 0))[:, :width] encoding = cls(image, mask) return Rectangle(x, y, width, height, encoding) class RAWEncoding(object): def __init__(self, data): self.data = data @classmethod def parse_rectangle(cls, client, x, y, width, height, data): assert client.framebuffer.bpp == 32 data = np.frombuffer(data, np.uint8).reshape((height, width, 4))[:, :, [0, 1, 2]] encoding = cls(data) return Rectangle(x, y, width, height, encoding) class ZlibEncoding(object): def __init__(self, data): self.data = data @classmethod def parse_rectangle(cls, client, x, y, width, height, data): decompressed = client.zlib_decompressor.decompress(data) logger.debug('[zlib] Decompressed from %s bytes -> %s bytes', len(data), len(decompressed)) pyprofile.incr('vncdriver.recv_rectangle.zlib_encoding.decompressed_bytes', len(decompressed), unit=pyprofile.BYTES) data = np.frombuffer(decompressed, np.uint8).reshape((height, width, 4))[:, :, [0, 1, 2]] encoding = cls(data) return Rectangle(x, y, width, height, encoding) class ZRLEEncoding(object): def __init__(self, data): self.data = data @classmethod def parse_rectangle(cls, client, x, y, width, height, data): decompressed = client.zlib_decompressor.decompress(data) logger.debug('[zrle] Decompressed from %s bytes -> %s bytes', len(data), len(decompressed)) pyprofile.incr('vncdriver.recv_rectangle.zrle_encoding.decompressed_bytes', len(decompressed), unit=pyprofile.BYTES) if client.framebuffer.bpp > 24: bytes_per_pixel = 3 else: bytes_per_pixel = client.framebuffer.bypp buf = BytesIO(decompressed) data = cls._read(x, y, width, height, buf, bytes_per_pixel) encoding = cls(data) return Rectangle(x, y, width, height, encoding) @classmethod def _read(cls, x, y, width, height, buf, bytes_per_pixel): data = np.zeros([height, width, 3], dtype=np.uint8) + 255 for tile_y in range(0, height, 64): tile_height = min(64, height-tile_y) for tile_x in range(0, width, 64): tile_width = min(64, width-tile_x) tile = data[tile_y:tile_y+tile_height, tile_x:tile_x+tile_width] cls._read_tile(tile, buf, tile_width, tile_height, bytes_per_pixel) return data @classmethod def _read_tile(cls, tile, buf, tile_width, tile_height, bytes_per_pixel): assert bytes_per_pixel == 3 # Each tile begins with a subencoding type byte. The top bit of this # byte is set if the tile has been run-length encoded, clear otherwise. # The bottom 7 bits indicate the size of the palette used: zero means # no palette, 1 means that the tile is of a single color, and 2 to 127 # indicate a palette of that size. The special subencoding values 129 # and 127 indicate that the palette is to be reused from the last tile # that had a palette, with and without RLE, respectively. (subencoding,) = struct.unpack('!B', buf.read(1)) run_length_encoded = bool(subencoding & 128) palette_size = subencoding & 127 bytes = palette_size * bytes_per_pixel palette_data = buf.read(bytes) assert len(palette_data) == bytes, "Palette data came up short: {} bytes rather than {}".format(len(palette_data), bytes) logger.debug('Handling zrle tile: run_length_encoded=%s palette_size=%s', run_length_encoded, palette_size) if palette_size == 0 and not run_length_encoded: # 0: Raw pixel data. width*height pixel values follow (where width and # height are the width and height of the tile): # # +-----------------------------+--------------+-------------+ # | No. of bytes | Type [Value] | Description | # +-----------------------------+--------------+-------------+ # | width*height*BytesPerCPixel | CPIXEL array | pixels | # +-----------------------------+--------------+-------------+ data = buf.read(bytes_per_pixel * tile_width * tile_height) data = np.frombuffer(data, dtype=np.uint8).reshape((tile_height, tile_width, 3)) tile[:, :, :] = data return elif palette_size == 1 and not run_length_encoded: # 1: A solid tile consisting of a single color. The pixel value # follows: # # +----------------+--------------+-------------+ # | No. of bytes | Type [Value] | Description | # +----------------+--------------+-------------+ # | bytesPerCPixel | CPIXEL | pixelValue | # +----------------+--------------+-------------+ color = np.frombuffer(palette_data, dtype=np.uint8).reshape((3, )) tile[:, :, :] = color return elif not run_length_encoded: # 2 to 16: Packed palette types. The paletteSize is the value of the # subencoding, which is followed by the palette, consisting of # paletteSize pixel values. The packed pixels follow, with each # pixel represented as a bit field yielding a zero-based index into # the palette. For paletteSize 2, a 1-bit field is used; for # paletteSize 3 or 4, a 2-bit field is used; and for paletteSize # from 5 to 16, a 4-bit field is used. The bit fields are packed # into bytes, with the most significant bits representing the # leftmost pixel (i.e., big endian). For tiles not a multiple of 8, # 4, or 2 pixels wide (as appropriate), padding bits are used to # align each row to an exact number of bytes. # +----------------------------+--------------+--------------+ # | No. of bytes | Type [Value] | Description | # +----------------------------+--------------+--------------+ # | paletteSize*bytesPerCPixel | CPIXEL array | palette | # | m | U8 array | packedPixels | # +----------------------------+--------------+--------------+ # where m is the number of bytes representing the packed pixels. # For paletteSize of 2, this is div(width+7,8)*height; for # paletteSize of 3 or 4, this is div(width+3,4)*height; or for # paletteSize of 5 to 16, this is div(width+1,2)*height. palette = np.frombuffer(palette_data, dtype=np.uint8).reshape((-1, 3)) if palette_size > 16: # No palette reuse in zrle assert palette_size < 127 bits_per_packed_pixel = 8 elif palette_size > 4: bits_per_packed_pixel = 4 elif palette_size > 2: bits_per_packed_pixel = 2 else: bits_per_packed_pixel = 1 for j in range(tile_height): # Discard any leftover bits for each new line b = 0 nbits = 0 for i in range(tile_width): if nbits == 0: (b,) = struct.unpack('!B', buf.read(1)) nbits = 8 nbits -= bits_per_packed_pixel idx = (b >> nbits) & ((1 << bits_per_packed_pixel) - 1) & 127 tile[j, i, :] = palette[idx] return elif run_length_encoded and palette_size == 0: # 128: Plain RLE. The data consists of a number of runs, repeated # until the tile is done. Runs may continue from the end of one row # to the beginning of the next. Each run is represented by a single # pixel value followed by the length of the run. The length is # represented as one or more bytes. The length is calculated as one # more than the sum of all the bytes representing the length. Any # byte value other than 255 indicates the final byte. So for # example, length 1 is represented as [0], 255 as [254], 256 as # [255,0], 257 as [255,1], 510 as [255,254], 511 as [255,255,0], and # so on. # # +-------------------------+--------------+-----------------------+ # | No. of bytes | Type [Value] | Description | # +-------------------------+--------------+-----------------------+ # | bytesPerCPixel | CPIXEL | pixelValue | # | div(runLength - 1, 255) | U8 array | 255 | # | 1 | U8 | (runLength-1) mod 255 | # +-------------------------+--------------+-----------------------+ i = 0 pixels = tile_width * tile_height data = np.zeros((pixels, 3)) while i < pixels: pix = buf.read(bytes_per_pixel) pix = np.frombuffer(pix, dtype=np.uint8).reshape((3, )) count = 1 b = None while b == 255 or b is None: (b,) = struct.unpack('!B', buf.read(1)) count += b data[i:i+count, :] = pix i += count assert i == pixels elif run_length_encoded and palette_size > 1: # 130 to 255: Palette RLE. Followed by the palette, consisting of # paletteSize = (subencoding - 128) pixel values: # # +----------------------------+--------------+-------------+ # | No. of bytes | Type [Value] | Description | # +----------------------------+--------------+-------------+ # | paletteSize*bytesPerCPixel | CPIXEL array | palette | # +----------------------------+--------------+-------------+ # # Following the palette is, as with plain RLE, a number of runs, # repeated until the tile is done. A run of length one is # represented simply by a palette index: # # +--------------+--------------+--------------+ # | No. of bytes | Type [Value]
if CONTROLLED_VOCABULARY is None: self.CONTROLLED_VOCABULARY = [] else: self.CONTROLLED_VOCABULARY = CONTROLLED_VOCABULARY if LEXICON_REF is None: self.LEXICON_REF = [] else: self.LEXICON_REF = LEXICON_REF if EXTERNAL_REF is None: self.EXTERNAL_REF = [] else: self.EXTERNAL_REF = EXTERNAL_REF def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, ANNOTATION_DOCUMENT) if subclass is not None: return subclass(*args_, **kwargs_) if ANNOTATION_DOCUMENT.subclass: return ANNOTATION_DOCUMENT.subclass(*args_, **kwargs_) else: return ANNOTATION_DOCUMENT(*args_, **kwargs_) factory = staticmethod(factory) def get_LICENSE(self): return self.LICENSE def set_LICENSE(self, LICENSE): self.LICENSE = LICENSE def add_LICENSE(self, value): self.LICENSE.append(value) def insert_LICENSE_at(self, index, value): self.LICENSE.insert(index, value) def replace_LICENSE_at(self, index, value): self.LICENSE[index] = value def get_HEADER(self): return self.HEADER def set_HEADER(self, HEADER): self.HEADER = HEADER def get_TIME_ORDER(self): return self.TIME_ORDER def set_TIME_ORDER(self, TIME_ORDER): self.TIME_ORDER = TIME_ORDER def get_TIER(self): return self.TIER def set_TIER(self, TIER): self.TIER = TIER def add_TIER(self, value): self.TIER.append(value) def insert_TIER_at(self, index, value): self.TIER.insert(index, value) def replace_TIER_at(self, index, value): self.TIER[index] = value def get_LINGUISTIC_TYPE(self): return self.LINGUISTIC_TYPE def set_LINGUISTIC_TYPE(self, LINGUISTIC_TYPE): self.LINGUISTIC_TYPE = LINGUISTIC_TYPE def add_LINGUISTIC_TYPE(self, value): self.LINGUISTIC_TYPE.append(value) def insert_LINGUISTIC_TYPE_at(self, index, value): self.LINGUISTIC_TYPE.insert(index, value) def replace_LINGUISTIC_TYPE_at(self, index, value): self.LINGUISTIC_TYPE[index] = value def get_LOCALE(self): return self.LOCALE def set_LOCALE(self, LOCALE): self.LOCALE = LOCALE def add_LOCALE(self, value): self.LOCALE.append(value) def insert_LOCALE_at(self, index, value): self.LOCALE.insert(index, value) def replace_LOCALE_at(self, index, value): self.LOCALE[index] = value def get_LANGUAGE(self): return self.LANGUAGE def set_LANGUAGE(self, LANGUAGE): self.LANGUAGE = LANGUAGE def add_LANGUAGE(self, value): self.LANGUAGE.append(value) def insert_LANGUAGE_at(self, index, value): self.LANGUAGE.insert(index, value) def replace_LANGUAGE_at(self, index, value): self.LANGUAGE[index] = value def get_CONSTRAINT(self): return self.CONSTRAINT def set_CONSTRAINT(self, CONSTRAINT): self.CONSTRAINT = CONSTRAINT def add_CONSTRAINT(self, value): self.CONSTRAINT.append(value) def insert_CONSTRAINT_at(self, index, value): self.CONSTRAINT.insert(index, value) def replace_CONSTRAINT_at(self, index, value): self.CONSTRAINT[index] = value def get_CONTROLLED_VOCABULARY(self): return self.CONTROLLED_VOCABULARY def set_CONTROLLED_VOCABULARY(self, CONTROLLED_VOCABULARY): self.CONTROLLED_VOCABULARY = CONTROLLED_VOCABULARY def add_CONTROLLED_VOCABULARY(self, value): self.CONTROLLED_VOCABULARY.append(value) def insert_CONTROLLED_VOCABULARY_at(self, index, value): self.CONTROLLED_VOCABULARY.insert(index, value) def replace_CONTROLLED_VOCABULARY_at(self, index, value): self.CONTROLLED_VOCABULARY[index] = value def get_LEXICON_REF(self): return self.LEXICON_REF def set_LEXICON_REF(self, LEXICON_REF): self.LEXICON_REF = LEXICON_REF def add_LEXICON_REF(self, value): self.LEXICON_REF.append(value) def insert_LEXICON_REF_at(self, index, value): self.LEXICON_REF.insert(index, value) def replace_LEXICON_REF_at(self, index, value): self.LEXICON_REF[index] = value def get_EXTERNAL_REF(self): return self.EXTERNAL_REF def set_EXTERNAL_REF(self, EXTERNAL_REF): self.EXTERNAL_REF = EXTERNAL_REF def add_EXTERNAL_REF(self, value): self.EXTERNAL_REF.append(value) def insert_EXTERNAL_REF_at(self, index, value): self.EXTERNAL_REF.insert(index, value) def replace_EXTERNAL_REF_at(self, index, value): self.EXTERNAL_REF[index] = value def get_DATE(self): return self.DATE def set_DATE(self, DATE): self.DATE = DATE def get_AUTHOR(self): return self.AUTHOR def set_AUTHOR(self, AUTHOR): self.AUTHOR = AUTHOR def get_VERSION(self): return self.VERSION def set_VERSION(self, VERSION): self.VERSION = VERSION def get_FORMAT(self): return self.FORMAT def set_FORMAT(self, FORMAT): self.FORMAT = FORMAT def hasContent_(self): if ( self.LICENSE or self.HEADER is not None or self.TIME_ORDER is not None or self.TIER or self.LINGUISTIC_TYPE or self.LOCALE or self.LANGUAGE or self.CONSTRAINT or self.CONTROLLED_VOCABULARY or self.LEXICON_REF or self.EXTERNAL_REF ): return True else: return False def export(self, outfile, level, namespace_='', name_='ANNOTATION_DOCUMENT', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('ANNOTATION_DOCUMENT') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='ANNOTATION_DOCUMENT') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='ANNOTATION_DOCUMENT', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ANNOTATION_DOCUMENT'): if self.DATE is not None and 'DATE' not in already_processed: already_processed.add('DATE') outfile.write(' DATE="%s"' % self.gds_format_datetime(self.DATE, input_name='DATE')) if self.AUTHOR is not None and 'AUTHOR' not in already_processed: already_processed.add('AUTHOR') outfile.write(' AUTHOR=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.AUTHOR), input_name='AUTHOR')), )) if self.VERSION is not None and 'VERSION' not in already_processed: already_processed.add('VERSION') outfile.write(' VERSION=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.VERSION), input_name='VERSION')), )) if self.FORMAT != "2.8" and 'FORMAT' not in already_processed: already_processed.add('FORMAT') outfile.write(' FORMAT=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.FORMAT), input_name='FORMAT')), )) def exportChildren(self, outfile, level, namespace_='', name_='ANNOTATION_DOCUMENT', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for LICENSE_ in self.LICENSE: LICENSE_.export(outfile, level, namespace_, name_='LICENSE', pretty_print=pretty_print) if self.HEADER is not None: self.HEADER.export(outfile, level, namespace_, name_='HEADER', pretty_print=pretty_print) if self.TIME_ORDER is not None: self.TIME_ORDER.export(outfile, level, namespace_, name_='TIME_ORDER', pretty_print=pretty_print) for TIER_ in self.TIER: TIER_.export(outfile, level, namespace_, name_='TIER', pretty_print=pretty_print) for LINGUISTIC_TYPE_ in self.LINGUISTIC_TYPE: LINGUISTIC_TYPE_.export(outfile, level, namespace_, name_='LINGUISTIC_TYPE', pretty_print=pretty_print) for LOCALE_ in self.LOCALE: LOCALE_.export(outfile, level, namespace_, name_='LOCALE', pretty_print=pretty_print) for LANGUAGE_ in self.LANGUAGE: LANGUAGE_.export(outfile, level, namespace_, name_='LANGUAGE', pretty_print=pretty_print) for CONSTRAINT_ in self.CONSTRAINT: CONSTRAINT_.export(outfile, level, namespace_, name_='CONSTRAINT', pretty_print=pretty_print) for CONTROLLED_VOCABULARY_ in self.CONTROLLED_VOCABULARY: CONTROLLED_VOCABULARY_.export(outfile, level, namespace_, name_='CONTROLLED_VOCABULARY', pretty_print=pretty_print) for LEXICON_REF_ in self.LEXICON_REF: LEXICON_REF_.export(outfile, level, namespace_, name_='LEXICON_REF', pretty_print=pretty_print) for EXTERNAL_REF_ in self.EXTERNAL_REF: EXTERNAL_REF_.export(outfile, level, namespace_, name_='EXTERNAL_REF', pretty_print=pretty_print) def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('DATE', node) if value is not None and 'DATE' not in already_processed: already_processed.add('DATE') try: self.DATE = self.gds_parse_datetime(value) except ValueError as exp: raise ValueError('Bad date-time attribute (DATE): %s' % exp) value = find_attr_value_('AUTHOR', node) if value is not None and 'AUTHOR' not in already_processed: already_processed.add('AUTHOR') self.AUTHOR = value value = find_attr_value_('VERSION', node) if value is not None and 'VERSION' not in already_processed: already_processed.add('VERSION') self.VERSION = value value = find_attr_value_('FORMAT', node) if value is not None and 'FORMAT' not in already_processed: already_processed.add('FORMAT') self.FORMAT = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'LICENSE': obj_ = licenseType.factory() obj_.build(child_) self.LICENSE.append(obj_) obj_.original_tagname_ = 'LICENSE' elif nodeName_ == 'HEADER': obj_ = headType.factory() obj_.build(child_) self.HEADER = obj_ obj_.original_tagname_ = 'HEADER' elif nodeName_ == 'TIME_ORDER': obj_ = timeType.factory() obj_.build(child_) self.TIME_ORDER = obj_ obj_.original_tagname_ = 'TIME_ORDER' elif nodeName_ == 'TIER': obj_ = tierType.factory() obj_.build(child_) self.TIER.append(obj_) obj_.original_tagname_ = 'TIER' elif nodeName_ == 'LINGUISTIC_TYPE': obj_ = lingType.factory() obj_.build(child_) self.LINGUISTIC_TYPE.append(obj_) obj_.original_tagname_ = 'LINGUISTIC_TYPE' elif nodeName_ == 'LOCALE': obj_ = localeType.factory() obj_.build(child_) self.LOCALE.append(obj_) obj_.original_tagname_ = 'LOCALE' elif nodeName_ == 'LANGUAGE': obj_ = langType.factory() obj_.build(child_) self.LANGUAGE.append(obj_) obj_.original_tagname_ = 'LANGUAGE' elif nodeName_ == 'CONSTRAINT': obj_ = constraintType.factory() obj_.build(child_) self.CONSTRAINT.append(obj_) obj_.original_tagname_ = 'CONSTRAINT' elif nodeName_ == 'CONTROLLED_VOCABULARY': obj_ = convocType.factory() obj_.build(child_) self.CONTROLLED_VOCABULARY.append(obj_) obj_.original_tagname_ = 'CONTROLLED_VOCABULARY' elif nodeName_ == 'LEXICON_REF': obj_ = lexRefType.factory() obj_.build(child_) self.LEXICON_REF.append(obj_) obj_.original_tagname_ = 'LEXICON_REF' elif nodeName_ == 'EXTERNAL_REF': obj_ = extRefType.factory() obj_.build(child_) self.EXTERNAL_REF.append(obj_) obj_.original_tagname_ = 'EXTERNAL_REF' # end class ANNOTATION_DOCUMENT class headType(GeneratedsSuper): """This attribute is deprecated. Use MEDIA_DESCRIPTOR elements instead.""" subclass = None superclass = None def __init__(self, MEDIA_FILE=None, TIME_UNITS='milliseconds', MEDIA_DESCRIPTOR=None, LINKED_FILE_DESCRIPTOR=None, PROPERTY=None): self.original_tagname_ = None self.MEDIA_FILE = _cast(None, MEDIA_FILE) self.TIME_UNITS = _cast(None, TIME_UNITS) if MEDIA_DESCRIPTOR is None: self.MEDIA_DESCRIPTOR = [] else: self.MEDIA_DESCRIPTOR = MEDIA_DESCRIPTOR if LINKED_FILE_DESCRIPTOR is None: self.LINKED_FILE_DESCRIPTOR = [] else: self.LINKED_FILE_DESCRIPTOR = LINKED_FILE_DESCRIPTOR if PROPERTY is None: self.PROPERTY = [] else: self.PROPERTY = PROPERTY def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, headType) if subclass is not None: return subclass(*args_, **kwargs_) if headType.subclass: return headType.subclass(*args_, **kwargs_) else: return headType(*args_, **kwargs_) factory = staticmethod(factory) def get_MEDIA_DESCRIPTOR(self): return self.MEDIA_DESCRIPTOR def set_MEDIA_DESCRIPTOR(self, MEDIA_DESCRIPTOR): self.MEDIA_DESCRIPTOR = MEDIA_DESCRIPTOR def add_MEDIA_DESCRIPTOR(self, value): self.MEDIA_DESCRIPTOR.append(value) def insert_MEDIA_DESCRIPTOR_at(self, index, value): self.MEDIA_DESCRIPTOR.insert(index, value) def replace_MEDIA_DESCRIPTOR_at(self, index, value): self.MEDIA_DESCRIPTOR[index] = value def get_LINKED_FILE_DESCRIPTOR(self): return self.LINKED_FILE_DESCRIPTOR def set_LINKED_FILE_DESCRIPTOR(self, LINKED_FILE_DESCRIPTOR): self.LINKED_FILE_DESCRIPTOR = LINKED_FILE_DESCRIPTOR def add_LINKED_FILE_DESCRIPTOR(self, value): self.LINKED_FILE_DESCRIPTOR.append(value) def insert_LINKED_FILE_DESCRIPTOR_at(self, index, value): self.LINKED_FILE_DESCRIPTOR.insert(index, value) def replace_LINKED_FILE_DESCRIPTOR_at(self, index, value): self.LINKED_FILE_DESCRIPTOR[index] = value def get_PROPERTY(self): return self.PROPERTY def set_PROPERTY(self, PROPERTY): self.PROPERTY = PROPERTY def add_PROPERTY(self, value): self.PROPERTY.append(value) def insert_PROPERTY_at(self, index, value): self.PROPERTY.insert(index, value) def replace_PROPERTY_at(self, index, value): self.PROPERTY[index] = value def get_MEDIA_FILE(self): return self.MEDIA_FILE def set_MEDIA_FILE(self, MEDIA_FILE): self.MEDIA_FILE = MEDIA_FILE def get_TIME_UNITS(self): return self.TIME_UNITS def set_TIME_UNITS(self, TIME_UNITS): self.TIME_UNITS = TIME_UNITS def hasContent_(self): if ( self.MEDIA_DESCRIPTOR or self.LINKED_FILE_DESCRIPTOR or self.PROPERTY ): return True else: return False def export(self, outfile, level, namespace_='', name_='headType', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('headType') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='headType') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='headType', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='headType'): if self.MEDIA_FILE is not None and 'MEDIA_FILE' not in already_processed: already_processed.add('MEDIA_FILE') outfile.write(' MEDIA_FILE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.MEDIA_FILE), input_name='MEDIA_FILE')), )) if self.TIME_UNITS != "milliseconds" and 'TIME_UNITS' not in already_processed: already_processed.add('TIME_UNITS') outfile.write(' TIME_UNITS=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.TIME_UNITS), input_name='TIME_UNITS')), ))
from portality import models, lcc from portality.datasets import licenses, main_license_options from flask_login import current_user from portality.util import flash_with_url, listpop from copy import deepcopy from portality.formcontext.choices import Choices def interpret_list(current_values, allowed_values, substitutions): current_values = deepcopy(current_values) interpreted_fields = {} foreign_values = {} for cv in current_values: if cv not in allowed_values: foreign_values[current_values.index(cv)] = cv ps = foreign_values.keys() ps.sort() # FIXME: if the data is broken, just return it as is if len(ps) > len(substitutions): return current_values i = 0 for k in ps: interpreted_fields[substitutions[i].get("field")] = current_values[k] current_values[k] = substitutions[i].get("default") i += 1 return current_values, interpreted_fields def interpret_special(val): # if you modify this, make sure to modify reverse_interpret_special as well if isinstance(val, basestring): if val.lower() == Choices.TRUE.lower(): return True elif val.lower() == Choices.FALSE.lower(): return False elif val.lower() == Choices.NONE.lower(): return None elif val == Choices.digital_archiving_policy_val("none"): return None if isinstance(val, list): if len(val) == 1: actual_val = interpret_special(val[0]) if not actual_val: return [] return val return val return val def reverse_interpret_special(val, field=''): # if you modify this, make sure to modify interpret_special as well if val is None: return Choices.NONE elif val is True: return Choices.TRUE elif val is False: return Choices.FALSE # no need to handle digital archiving policy or other list # fields here - empty lists handled below if isinstance(val, list): if len(val) == 1: reverse_actual_val = reverse_interpret_special(val[0], field=field) return [reverse_actual_val] elif len(val) == 0: # mostly it'll just be a None val if field == 'digital_archiving_policy': return [Choices.digital_archiving_policy_val("none")] return [Choices.NONE] return val return val def interpret_other(value, other_field_data, other_value=Choices.OTHER, store_other_label=False): ''' Interpret a value list coming from (e.g.) checkboxes when one of them says "Other" and allows free-text input. The value can also be a string. In that case, if it matched other_value, other_field_data is returned instead of the original value. This is for radio buttons with an "Other" option - you only get 1 value from the form, but if it's "Other", you still need to replace it with the relevant free text field data. :param value: String or list of values from the form. checkboxes_field.data basically. :param other_field_data: data from the Other inline extra text input field. Usually checkboxes_field_other.data or similar. :param other_value: Which checkbox has an extra field? Put its value in here. It's "Other" by default. More technically: the value which triggers considering and adding the data in other_field to value. ''' # if you modify this, make sure to modify reverse_interpret_other too if isinstance(value, basestring): if value == other_value: return other_field_data elif isinstance(value, list): value = value[:] # if "Other" (or some custom value) is in the there, remove it and take the data from the extra text field if other_value in value and other_field_data: # preserve the order, important for reversing this process when displaying the edit form where = value.index(other_value) if store_other_label: # Needed when multiple items in the list could be freely specified, # i.e. unrestricted by the choices for that field. # Digital archiving policies is such a field, with both an # "Other" choice requiring free text input and a "A national library" # choice requiring free text input, presumably with the name # of the library. value[where] = [other_value, other_field_data] else: value[where] = other_field_data # don't know what else to do, just return it as-is return value def reverse_interpret_other(interpreted_value, possible_original_values, other_value=Choices.OTHER, replace_label=Choices.OTHER): ''' Returns tuple: (main field value or list of values, other field value) ''' # if you modify this, make sure to modify interpret_other too other_field_val = '' if isinstance(interpreted_value, basestring): # A special case first: where the value is the empty string. # In that case, the main field was never submitted (e.g. if it was # a choice of "Yes", "No" and "Other", none of those were submitted # as an answer - maybe it was an optional field). if not interpreted_value: return None, None # if the stored (a.k.a. interpreted) value is not one of the # possible values, then the "Other" option must have been # selected during initial submission # if so, all we've got to do is swap them # so the main field gets a value of "Other" or similar # and the secondary (a.k.a. other) field gets the currently # stored value - resulting in a form that looks exactly like the # one initially submitted if interpreted_value not in possible_original_values: return other_value, interpreted_value elif isinstance(interpreted_value, list): # 2 copies of the list needed interpreted_value = interpreted_value[:] # don't modify the original list passed in for iv in interpreted_value[:]: # don't modify the list while iterating over it # same deal here, if the original list was ['LOCKSS', 'Other'] # and the secondary field was 'some other policy' # then it would have been interpreted by interpret_other # into ['LOCKSS', 'some other policy'] # so now we need to turn that back into # (['LOCKSS', 'Other'], 'some other policy') if iv not in possible_original_values: where = interpreted_value.index(iv) if isinstance(iv, list): # This is a field with two or more choices which require # further specification via free text entry. # If we only recorded the free text values, we wouldn't # be able to tell which one relates to which choice. # E.g. ["some other archiving policy", "Library of Chile"] # does not tell us that "some other archiving policy" # is related to the "Other" field, and "Library of Chile" # is related to the "A national library field. # # [["Other", "some other archiving policy"], ["A national library", "Library of Chile"]] # does tell us that, on the other hand. # It is this case that we are dealing with here. label = iv[0] val = iv[1] if label == replace_label: other_field_val = val interpreted_value[where] = label else: continue else: other_field_val = iv interpreted_value[where] = other_value break return interpreted_value, other_field_val class JournalGenericXWalk(object): @classmethod def is_new_editor_group(cls, form, old): old_eg = old.editor_group new_eg = form.editor_group.data return old_eg != new_eg and new_eg is not None and new_eg != "" @classmethod def is_new_editor(cls, form, old): old_ed = old.editor new_ed = form.editor.data return old_ed != new_ed and new_ed is not None and new_ed != "" class SuggestionFormXWalk(JournalGenericXWalk): _formFields2objectFields = { "instructions_authors_url" : "bibjson.link.url where bibjson.link.type=author_instructions", "oa_statement_url" : "bibjson.link.url where bibjson.link.type=oa_statement", "aims_scope_url" : "bibjson.link.url where bibjson.link.type=aims_scope", "submission_charges_url" : "bibjson.submission_charges_url", "editorial_board_url" : "bibjson.link.url where bibjson.link.type=editorial_board", } @classmethod def formField2objectField(cls, field): return cls._formFields2objectFields.get(field, field) @classmethod def form2obj(cls, form): suggestion = models.Suggestion() bibjson = suggestion.bibjson() if form.title.data: bibjson.title = form.title.data bibjson.add_url(form.url.data, urltype='homepage') if form.alternative_title.data: bibjson.alternative_title = form.alternative_title.data if form.pissn.data: bibjson.add_identifier(bibjson.P_ISSN, form.pissn.data) if form.eissn.data: bibjson.add_identifier(bibjson.E_ISSN, form.eissn.data) if form.publisher.data: bibjson.publisher = form.publisher.data if form.society_institution.data: bibjson.institution = form.society_institution.data if form.platform.data: bibjson.provider = form.platform.data if form.contact_name.data or form.contact_email.data: suggestion.add_contact(form.contact_name.data, form.contact_email.data) if form.country.data: bibjson.country = form.country.data if interpret_special(form.processing_charges.data): bibjson.set_apc(form.processing_charges_currency.data, form.processing_charges_amount.data) if form.processing_charges_url.data: bibjson.apc_url = form.processing_charges_url.data if interpret_special(form.submission_charges.data): bibjson.set_submission_charges(form.submission_charges_currency.data, form.submission_charges_amount.data) if form.submission_charges_url.data: bibjson.submission_charges_url = form.submission_charges_url.data suggestion.set_articles_last_year(form.articles_last_year.data, form.articles_last_year_url.data) if interpret_special(form.waiver_policy.data): bibjson.add_url(form.waiver_policy_url.data, 'waiver_policy') # checkboxes if interpret_special(form.digital_archiving_policy.data) or form.digital_archiving_policy_url.data: archiving_policies = interpret_special(form.digital_archiving_policy.data) archiving_policies = interpret_other(archiving_policies, form.digital_archiving_policy_other.data, store_other_label=True) archiving_policies = interpret_other(archiving_policies, form.digital_archiving_policy_library.data, Choices.digital_archiving_policy_val("library"), store_other_label=True) bibjson.set_archiving_policy(archiving_policies, form.digital_archiving_policy_url.data) if form.crawl_permission.data and form.crawl_permission.data != 'None': bibjson.allows_fulltext_indexing = interpret_special(form.crawl_permission.data) # just binary # checkboxes article_ids = interpret_special(form.article_identifiers.data) article_ids = interpret_other(article_ids, form.article_identifiers_other.data) if article_ids: bibjson.persistent_identifier_scheme = article_ids if form.metadata_provision.data and form.metadata_provision.data != 'None': suggestion.article_metadata = interpret_special(form.metadata_provision.data) # just binary if (form.download_statistics.data and form.download_statistics.data != 'None') or form.download_statistics_url.data: bibjson.set_article_statistics(form.download_statistics_url.data, interpret_special(form.download_statistics.data)) if form.first_fulltext_oa_year.data: bibjson.set_oa_start(year=form.first_fulltext_oa_year.data) # checkboxes fulltext_format = interpret_other(form.fulltext_format.data, form.fulltext_format_other.data) if fulltext_format: bibjson.format = fulltext_format if form.keywords.data: bibjson.set_keywords(form.keywords.data) # tag list field if form.languages.data: bibjson.set_language(form.languages.data) # select multiple field - gives a list back bibjson.add_url(form.editorial_board_url.data, urltype='editorial_board') if form.review_process.data or form.review_process_url.data: bibjson.set_editorial_review(form.review_process.data, form.review_process_url.data) bibjson.add_url(form.aims_scope_url.data, urltype='aims_scope') bibjson.add_url(form.instructions_authors_url.data, urltype='author_instructions') if (form.plagiarism_screening.data and form.plagiarism_screening.data != 'None') or form.plagiarism_screening_url.data: bibjson.set_plagiarism_detection( form.plagiarism_screening_url.data, has_detection=interpret_special(form.plagiarism_screening.data) ) if form.publication_time.data: bibjson.publication_time = form.publication_time.data bibjson.add_url(form.oa_statement_url.data, urltype='oa_statement') license_type = interpret_other(form.license.data, form.license_other.data) license_title = license_type if license_type in licenses: by = licenses[license_type]['BY'] nc = licenses[license_type]['NC'] nd = licenses[license_type]['ND'] sa = licenses[license_type]['SA'] license_title = licenses[license_type]['title'] elif form.license_checkbox.data: by = True if 'BY' in form.license_checkbox.data else False nc = True
p[3] prnt.values = [p[5]] prnt.nl = True p[0] = prnt def p_print_stmt6(p): ''' print_stmt : PRINT RIGHTSHIFT test COMMA print_list ''' prnt = ast.Print() all_values = p[5] good_values = [item for item in all_values if item is not None] if all_values[-1] is None: nl = False else: nl = True prnt.dest = p[3] prnt.values = good_values prnt.nl = nl p[0] = prnt def p_print_list1(p): ''' print_list : test COMMA ''' p[0] = [p[1], None] def p_print_list2(p): ''' print_list : test print_list_list ''' p[0] = [p[1]] + p[2] def p_print_list3(p): ''' print_list : test print_list_list COMMA ''' p[0] = [p[1]] + p[2] + [None] def p_print_list_list1(p): ''' print_list_list : COMMA test ''' p[0] = [p[2]] def p_print_list_list2(p): ''' print_list_list : print_list_list COMMA test ''' p[0] = p[1] + [p[3]] def p_del_stmt(p): ''' del_stmt : DEL exprlist ''' exprlist = p[2] set_context(exprlist, Del, p) del_stmt = ast.Delete() del_stmt.targets = [exprlist] p[0] = del_stmt def p_pass_stmt(p): ''' pass_stmt : PASS ''' pass_stmt = ast.Pass() pass_stmt.lineno = p.lineno(1) p[0] = pass_stmt def p_flow_stmt(p): ''' flow_stmt : break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt ''' p[0] = p[1] def p_break_stmt(p): ''' break_stmt : BREAK ''' break_stmt = ast.Break() break_stmt.lineno = p.lineno(1) p[0] = break_stmt def p_continue_stmt(p): ''' continue_stmt : CONTINUE ''' continue_stmt = ast.Continue() continue_stmt.lineno = p.lineno(1) p[0] = continue_stmt def p_return_stmt1(p): ''' return_stmt : RETURN ''' ret = ast.Return() ret.value = None p[0] = ret def p_return_stmt2(p): ''' return_stmt : RETURN testlist ''' value = ast_for_testlist(p[2]) ret = ast.Return() ret.value = value p[0] = ret def p_raise_stmt1(p): ''' raise_stmt : RAISE ''' raise_stmt = ast.Raise() raise_stmt.type = None raise_stmt.inst = None raise_stmt.tback = None p[0] = raise_stmt def p_raise_stmt2(p): ''' raise_stmt : RAISE test ''' raise_stmt = ast.Raise() raise_stmt.type = p[2] raise_stmt.inst = None raise_stmt.tback = None p[0] = raise_stmt def p_raise_stmt3(p): ''' raise_stmt : RAISE test COMMA test ''' raise_stmt = ast.Raise() raise_stmt.type = p[2] raise_stmt.inst = p[4] raise_stmt.tback = None p[0] = raise_stmt def p_raise_stmt4(p): ''' raise_stmt : RAISE test COMMA test COMMA test ''' raise_stmt = ast.Raise() raise_stmt.type = p[2] raise_stmt.inst = p[4] raise_stmt.tback = p[6] p[0] = raise_stmt def p_yield_stmt(p): ''' yield_stmt : yield_expr ''' p[0] = ast.Expr(value=p[1]) def p_yield_expr1(p): ''' yield_expr : YIELD ''' p[0] = ast.Yield(value=None, lineno=p.lineno(1)) def p_yield_expr2(p): ''' yield_expr : YIELD testlist ''' value = ast_for_testlist(p[2]) p[0] = ast.Yield(value=value, lineno=p.lineno(1)) def p_global_stmt1(p): ''' global_stmt : GLOBAL NAME ''' global_stmt = ast.Global() global_stmt.names = [p[2]] global_stmt.lineno = p.lineno(1) p[0] = global_stmt def p_global_stmt2(p): ''' global_stmt : GLOBAL NAME globals_list ''' global_stmt = ast.Global() global_stmt.names = [p[2]] + p[3] global_stmt.lineno = p.lineno(1) p[0] = global_stmt def p_globals_list1(p): ''' globals_list : COMMA NAME globals_list ''' p[0] = [p[2]] + p[3] def p_globals_list2(p): ''' globals_list : COMMA NAME ''' p[0] = [p[2]] def p_exec_stmt1(p): ''' exec_stmt : EXEC expr ''' exec_stmt = ast.Exec() exec_stmt.body = p[2] exec_stmt.globals = None exec_stmt.locals = None p[0] = exec_stmt def p_exec_stmt2(p): ''' exec_stmt : EXEC expr IN test ''' exec_stmt = ast.Exec() exec_stmt.body = p[2] exec_stmt.globals = p[4] exec_stmt.locals = None p[0] = exec_stmt def p_exec_stmt3(p): ''' exec_stmt : EXEC expr IN test COMMA test ''' exec_stmt = ast.Exec() exec_stmt.body = p[2] exec_stmt.globals = p[4] exec_stmt.locals = p[6] p[0] = exec_stmt def p_assert_stmt1(p): ''' assert_stmt : ASSERT test ''' assert_stmt = ast.Assert() assert_stmt.test = p[2] assert_stmt.msg = None p[0] = assert_stmt def p_assert_stmt2(p): ''' assert_stmt : ASSERT test COMMA test ''' assert_stmt = ast.Assert() assert_stmt.test = p[2] assert_stmt.msg = p[4] p[0] = assert_stmt def p_expr_stmt1(p): ''' expr_stmt : testlist ''' expr = ast.Expr() expr.value = ast_for_testlist(p[1]) p[0] = expr def p_expr_stmt2(p): ''' expr_stmt : testlist augassign testlist | testlist augassign yield_expr ''' op, lineno = p[2] lhs = ast_for_testlist(p[1]) rhs = ast_for_testlist(p[3]) set_context(lhs, Store, p) if type(lhs) not in aug_assign_allowed: msg = 'illegal expression for augmented assignment' syntax_error(msg, FakeToken(p.lexer.lexer, lineno)) aug = ast.AugAssign() aug.target = lhs aug.value = rhs aug.op = op p[0] = aug def p_expr_stmt3(p): ''' expr_stmt : testlist equal_list ''' all_items = [p[1]] + p[2] targets = map(ast_for_testlist, all_items) value = targets.pop() for item in targets: if type(item) == ast.Yield: msg = "assignment to yield expression not possible" syntax_error(msg, FakeToken(p.lexer.lexer, item.lineno)) set_context(item, Store, p) assg = ast.Assign() assg.targets = targets assg.value = value p[0] = assg def p_augassign(p): ''' augassign : AMPEREQUAL | CIRCUMFLEXEQUAL | DOUBLESLASHEQUAL | DOUBLESTAREQUAL | LEFTSHIFTEQUAL | MINUSEQUAL | PERCENTEQUAL | PLUSEQUAL | RIGHTSHIFTEQUAL | SLASHEQUAL | STAREQUAL | VBAREQUAL ''' lineno = p.lineno(1) op = augassign_table[p[1]] p[0] = (op, lineno) def p_equal_list1(p): ''' equal_list : EQUAL testlist | EQUAL yield_expr ''' p[0] = [p[2]] def p_equal_list2(p): ''' equal_list : EQUAL testlist equal_list | EQUAL yield_expr equal_list ''' p[0] = [p[2]] + p[3] def p_testlist1(p): ''' testlist : test ''' p[0] = p[1] def p_testlist2(p): ''' testlist : test COMMA ''' p[0] = [p[1]] def p_testlist3(p): ''' testlist : test testlist_list ''' p[0] = [p[1]] + p[2] def p_testlist4(p): ''' testlist : test testlist_list COMMA ''' p[0] = [p[1]] + p[2] def p_testlist_list1(p): ''' testlist_list : COMMA test ''' p[0] = [p[2]] def p_testlist_list2(p): ''' testlist_list : testlist_list COMMA test ''' p[0] = p[1] + [p[3]] def p_compound_stmt(p): ''' compound_stmt : if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated ''' p[0] = p[1] def p_if_stmt1(p): ''' if_stmt : IF test COLON suite ''' if_stmt = ast.If() if_stmt.test = p[2] if_stmt.body = p[4] if_stmt.lineno = p.lineno(1) ast.fix_missing_locations(if_stmt) if_stmt.orelse = [] p[0] = if_stmt def p_if_stmt2(p): ''' if_stmt : IF test COLON suite elif_stmts ''' if_stmt = ast.If() if_stmt.test = p[2] if_stmt.body = p[4] if_stmt.lineno = p.lineno(1) if_stmt.orelse = [p[5]] ast.fix_missing_locations(if_stmt) p[0] = if_stmt def p_if_stmt3(p): ''' if_stmt : IF test COLON suite else_stmt ''' if_stmt = ast.If() if_stmt.test = p[2] if_stmt.body = p[4] if_stmt.lineno = p.lineno(1) if_stmt.orelse = p[5] ast.fix_missing_locations(if_stmt) p[0] = if_stmt def p_if_stmt4(p): ''' if_stmt : IF test COLON suite elif_stmts else_stmt ''' if_stmt = ast.If() if_stmt.test = p[2] if_stmt.body = p[4] if_stmt.lineno = p.lineno(1) elif_stmt = p[5] if_stmt.orelse = [elif_stmt] else_stmt = p[6] while elif_stmt.orelse: elif_stmt = elif_stmt.orelse[0] elif_stmt.orelse = else_stmt ast.fix_missing_locations(if_stmt) p[0] = if_stmt def p_elif_stmts1(p): ''' elif_stmts : elif_stmt elif_stmts ''' elif_stmt = p[1] elif_stmt.orelse = [p[2]] p[0] = elif_stmt def p_elif_stmts2(p): ''' elif_stmts : elif_stmt ''' p[0] = p[1] def p_elif_stmt(p): ''' elif_stmt : ELIF test COLON suite ''' if_stmt = ast.If() if_stmt.test = p[2] if_stmt.body = p[4] if_stmt.lineno = p.lineno(1) if_stmt.orelse = [] ast.fix_missing_locations(if_stmt) p[0] = if_stmt def p_else_stmt(p): ''' else_stmt : ELSE COLON suite ''' p[0] = p[3] def p_while_stmt1(p): ''' while_stmt : WHILE test COLON suite ''' while_stmt = ast.While() while_stmt.test = p[2] while_stmt.body = p[4] while_stmt.orelse = [] while_stmt.lineno = p.lineno(1) ast.fix_missing_locations(while_stmt) p[0] = while_stmt def p_while_stmt2(p): ''' while_stmt : WHILE test COLON suite ELSE COLON suite ''' while_stmt = ast.While() while_stmt.test = p[2] while_stmt.body = p[4] while_stmt.orelse = p[7] while_stmt.lineno = p.lineno(1) ast.fix_missing_locations(while_stmt) p[0] = while_stmt def p_for_stmt1(p): ''' for_stmt : FOR exprlist IN testlist COLON suite ''' for_stmt = ast.For() target = p[2] set_context(target, Store, p) for_stmt.target = target for_stmt.iter = ast_for_testlist(p[4]) for_stmt.body = p[6] for_stmt.orelse = [] for_stmt.lineno = p.lineno(1) ast.fix_missing_locations(for_stmt) p[0] = for_stmt def p_for_stmt2(p): ''' for_stmt : FOR exprlist IN testlist COLON suite ELSE COLON suite ''' for_stmt = ast.For() target = p[2] set_context(target, Store, p) for_stmt.target = target for_stmt.iter = ast_for_testlist(p[4]) for_stmt.body = p[6] for_stmt.orelse = p[9] for_stmt.lineno = p.lineno(1) ast.fix_missing_locations(for_stmt) p[0] = for_stmt def p_try_stmt1(p): ''' try_stmt : TRY COLON suite FINALLY COLON suite ''' try_finally = ast.TryFinally() try_finally.body = p[3] try_finally.finalbody = p[6] try_finally.lineno = p.lineno(1) ast.fix_missing_locations(try_finally) p[0] = try_finally def p_try_stmt2(p): ''' try_stmt : TRY COLON suite except_clauses ''' try_stmt = ast.TryExcept() try_stmt.body = p[3] try_stmt.handlers = p[4] try_stmt.orelse = [] try_stmt.lineno = p.lineno(1) ast.fix_missing_locations(try_stmt) p[0] = try_stmt def p_try_stmt3(p): ''' try_stmt : TRY COLON suite except_clauses ELSE COLON suite ''' try_stmt = ast.TryExcept() try_stmt.body = p[3] try_stmt.handlers = p[4] try_stmt.orelse = p[7] try_stmt.lineno = p.lineno(1) ast.fix_missing_locations(try_stmt) p[0] = try_stmt def p_try_stmt4(p): ''' try_stmt : TRY COLON suite except_clauses FINALLY COLON suite ''' lineno = p.lineno(1) try_finally = ast.TryFinally() try_stmt = ast.TryExcept() try_stmt.body = p[3] try_stmt.handlers = p[4] try_stmt.orelse = [] try_stmt.lineno = lineno ast.fix_missing_locations(try_stmt) try_finally.body = [try_stmt]
load(self, stream): # type: (Union[Path, StreamTextType]) -> Any """ at this point you either have the non-pure Parser (which has its own reader and scanner) or you have the pure Parser. If the pure Parser is set, then set the Reader and Scanner, if not already set. If either the Scanner or Reader are set, you cannot use the non-pure Parser, so reset it to the pure parser and set the Reader resp. Scanner if necessary """ if not hasattr(stream, 'read') and hasattr(stream, 'open'): # pathlib.Path() instance with stream.open('rb') as fp: return self.load(fp) constructor, parser = self.get_constructor_parser(stream) try: return constructor.get_single_data() finally: parser.dispose() try: self._reader.reset_reader() except AttributeError: pass try: self._scanner.reset_scanner() except AttributeError: pass def load_all(self, stream): # *, skip=None): # type: (Union[Path, StreamTextType]) -> Any if not hasattr(stream, 'read') and hasattr(stream, 'open'): # pathlib.Path() instance with stream.open('r') as fp: for d in self.load_all(fp): yield d return # if skip is None: # skip = [] # elif isinstance(skip, int): # skip = [skip] constructor, parser = self.get_constructor_parser(stream) try: while constructor.check_data(): yield constructor.get_data() finally: parser.dispose() try: self._reader.reset_reader() except AttributeError: pass try: self._scanner.reset_scanner() except AttributeError: pass def get_constructor_parser(self, stream): # type: (StreamTextType) -> Any """ the old cyaml needs special setup, and therefore the stream """ if self.Parser is not CParser: if self.Reader is None: self.Reader = ruamel.yaml.reader.Reader if self.Scanner is None: self.Scanner = ruamel.yaml.scanner.Scanner self.reader.stream = stream else: if self.Reader is not None: if self.Scanner is None: self.Scanner = ruamel.yaml.scanner.Scanner self.Parser = ruamel.yaml.parser.Parser self.reader.stream = stream elif self.Scanner is not None: if self.Reader is None: self.Reader = ruamel.yaml.reader.Reader self.Parser = ruamel.yaml.parser.Parser self.reader.stream = stream else: # combined C level reader>scanner>parser # does some calls to the resolver, e.g. BaseResolver.descend_resolver # if you just initialise the CParser, to much of resolver.py # is actually used rslvr = self.Resolver # if rslvr is ruamel.yaml.resolver.VersionedResolver: # rslvr = ruamel.yaml.resolver.Resolver class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore def __init__(selfx, stream, version=self.version, preserve_quotes=None): # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA CParser.__init__(selfx, stream) selfx._parser = selfx._composer = selfx self.Constructor.__init__(selfx, loader=selfx) selfx.allow_duplicate_keys = self.allow_duplicate_keys rslvr.__init__(selfx, version=version, loadumper=selfx) self._stream = stream loader = XLoader(stream) return loader, loader return self.constructor, self.parser def emit(self, events, stream): # type: (Any, Any) -> None """ Emit YAML parsing events into a stream. If stream is None, return the produced string instead. """ _, _, emitter = self.get_serializer_representer_emitter(stream, None) try: for event in events: emitter.emit(event) finally: try: emitter.dispose() except AttributeError: raise def serialize(self, node, stream): # type: (Any, Optional[StreamType]) -> Any """ Serialize a representation tree into a YAML stream. If stream is None, return the produced string instead. """ self.serialize_all([node], stream) def serialize_all(self, nodes, stream): # type: (Any, Optional[StreamType]) -> Any """ Serialize a sequence of representation trees into a YAML stream. If stream is None, return the produced string instead. """ serializer, _, emitter = self.get_serializer_representer_emitter(stream, None) try: serializer.open() for node in nodes: serializer.serialize(node) serializer.close() finally: try: emitter.dispose() except AttributeError: raise def dump(self, data, stream=None, *, transform=None): # type: (Any, Union[Path, StreamType], Any, Any) -> Any if self._context_manager: if not self._output: raise TypeError('Missing output stream while dumping from context manager') if transform is not None: raise TypeError( '{}.dump() in the context manager cannot have transform keyword ' ''.format(self.__class__.__name__) ) self._context_manager.dump(data) else: # old style if stream is None: raise TypeError('Need a stream argument when not dumping from context manager') return self.dump_all([data], stream, transform=transform) def dump_all(self, documents, stream, *, transform=None): # type: (Any, Union[Path, StreamType], Any) -> Any if self._context_manager: raise NotImplementedError self._output = stream self._context_manager = YAMLContextManager(self, transform=transform) for data in documents: self._context_manager.dump(data) self._context_manager.teardown_output() self._output = None self._context_manager = None def Xdump_all(self, documents, stream, *, transform=None): # type: (Any, Any, Any) -> Any """ Serialize a sequence of Python objects into a YAML stream. """ if not hasattr(stream, 'write') and hasattr(stream, 'open'): # pathlib.Path() instance with stream.open('w') as fp: return self.dump_all(documents, fp, transform=transform) # The stream should have the methods `write` and possibly `flush`. if self.top_level_colon_align is True: tlca = max([len(str(x)) for x in documents[0]]) # type: Any else: tlca = self.top_level_colon_align if transform is not None: fstream = stream if self.encoding is None: stream = StringIO() else: stream = BytesIO() serializer, representer, emitter = self.get_serializer_representer_emitter( stream, tlca ) try: self.serializer.open() for data in documents: try: self.representer.represent(data) except AttributeError: # nprint(dir(dumper._representer)) raise self.serializer.close() finally: try: self.emitter.dispose() except AttributeError: raise # self.dumper.dispose() # cyaml delattr(self, '_serializer') delattr(self, '_emitter') if transform: val = stream.getvalue() if self.encoding: val = val.decode(self.encoding) if fstream is None: transform(val) else: fstream.write(transform(val)) return None def get_serializer_representer_emitter(self, stream, tlca): # type: (StreamType, Any) -> Any # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler if self.Emitter is not CEmitter: if self.Serializer is None: self.Serializer = ruamel.yaml.serializer.Serializer self.emitter.stream = stream self.emitter.top_level_colon_align = tlca if self.scalar_after_indicator is not None: self.emitter.scalar_after_indicator = self.scalar_after_indicator return self.serializer, self.representer, self.emitter if self.Serializer is not None: # cannot set serializer with CEmitter self.Emitter = ruamel.yaml.emitter.Emitter self.emitter.stream = stream self.emitter.top_level_colon_align = tlca if self.scalar_after_indicator is not None: self.emitter.scalar_after_indicator = self.scalar_after_indicator return self.serializer, self.representer, self.emitter # C routines rslvr = ( ruamel.yaml.resolver.BaseResolver if 'base' in self.typ else ruamel.yaml.resolver.Resolver ) class XDumper(CEmitter, self.Representer, rslvr): # type: ignore def __init__( selfx, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, block_seq_indent=None, top_level_colon_align=None, prefix_colon=None, ): # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA CEmitter.__init__( selfx, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags, ) selfx._emitter = selfx._serializer = selfx._representer = selfx self.Representer.__init__( selfx, default_style=default_style, default_flow_style=default_flow_style ) rslvr.__init__(selfx) self._stream = stream dumper = XDumper( stream, default_style=self.default_style, default_flow_style=self.default_flow_style, canonical=self.canonical, indent=self.old_indent, width=self.width, allow_unicode=self.allow_unicode, line_break=self.line_break, explicit_start=self.explicit_start, explicit_end=self.explicit_end, version=self.version, tags=self.tags, ) self._emitter = self._serializer = dumper return dumper, dumper, dumper # basic types def map(self, **kw): # type: (Any) -> Any if 'rt' in self.typ: return CommentedMap(**kw) else: return dict(**kw) def seq(self, *args): # type: (Any) -> Any if 'rt' in self.typ: return CommentedSeq(*args) else: return list(*args) # helpers def official_plug_ins(self): # type: () -> Any """search for list of subdirs that are plug-ins, if __file__ is not available, e.g. single file installers that are not properly emulating a file-system (issue 324) no plug-ins will be found. If any are packaged, you know which file that are and you can explicitly provide it during instantiation: yaml = ruamel.yaml.YAML(plug_ins=['ruamel/yaml/jinja2/__plug_in__']) """ try: bd = os.path.dirname(__file__) except NameError: return [] gpbd = os.path.dirname(os.path.dirname(bd)) res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')] return res def register_class(self, cls): # type:(Any) -> Any """ register a class for dumping loading - if it has attribute yaml_tag use that to register, else use class name - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes as mapping """ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) try: self.representer.add_representer(cls, cls.to_yaml) except AttributeError: def t_y(representer, data): # type: (Any, Any) -> Any return representer.represent_yaml_object( tag, data, cls, flow_style=representer.default_flow_style ) self.representer.add_representer(cls, t_y) try: self.constructor.add_constructor(tag, cls.from_yaml) except AttributeError: def f_y(constructor, node): # type: (Any, Any) -> Any return constructor.construct_yaml_object(node, cls) self.constructor.add_constructor(tag, f_y) return cls # ### context manager def __enter__(self): # type: () -> Any self._context_manager = YAMLContextManager(self) return self def __exit__(self, typ, value, traceback): # type: (Any, Any, Any) -> None if typ: nprint('typ', typ) self._context_manager.teardown_output() # self._context_manager.teardown_input() self._context_manager = None # ### backwards compatibility def _indent(self, mapping=None, sequence=None, offset=None): # type: (Any, Any, Any) -> None if mapping is not None: self.map_indent = mapping if sequence is not None: self.sequence_indent = sequence if offset is not None: self.sequence_dash_offset = offset @property def indent(self): # type: () -> Any return self._indent @indent.setter def indent(self, val): # type: (Any) -> None self.old_indent = val @property def block_seq_indent(self): # type: () -> Any return self.sequence_dash_offset @block_seq_indent.setter def block_seq_indent(self, val): # type: (Any) -> None self.sequence_dash_offset = val def
maxsigma = np.max(mvg.sigmas, axis=0) for i in range(len(mvpars)): if np.isinf(lower[i]): lower[i] = minmu[i] - 100. * maxsigma[i] if np.isinf(upper[i]): upper[i] = maxmu[i] + 100. * maxsigma[i] # create a bounded MultivariateNormal distribution BoundedMvN = pymc3.Bound(pymc3.MvNormal, lower=lower, upper=upper) comp_dists = [] # list of any component modes for i in range(mvg.nmodes): comp_dists.append(BoundedMvN('comp{}'.format(i), mu=mvg.mus[i], cov=mvg.covs[i], shape=len(mvpars)).distribution) # create a Mixture model setname = 'mixture{}'.format(self.multivariate_normal_num_sets) mix = pymc3.Mixture(setname, w=mvg.weights, comp_dists=comp_dists, shape=len(mvpars), testval=testvals) for i, p in enumerate(mvpars): self.multivariate_normal_sets[p] = {} self.multivariate_normal_sets[p]['prior'] = mix[i] self.multivariate_normal_sets[p]['set'] = setname self.multivariate_normal_sets[p]['index'] = i self.multivariate_normal_num_sets += 1 # return required parameter return self.multivariate_normal_sets[key]['prior'] else: raise ValueError("Prior for '{}' is not a MultivariateGaussian".format(key)) def run_sampler(self): # set the step method pymc3, STEP_METHODS, floatX = self._import_external_sampler() step_methods = {m.__name__.lower(): m.__name__ for m in STEP_METHODS} if 'step' in self._kwargs: self.step_method = self._kwargs.pop('step') # 'step' could be a dictionary of methods for different parameters, # so check for this if self.step_method is None: pass elif isinstance(self.step_method, (dict, OrderedDict)): for key in self.step_method: if key not in self._search_parameter_keys: raise ValueError("Setting a step method for an unknown parameter '{}'".format(key)) else: # check if using a compound step (a list of step # methods for a particular parameter) if isinstance(self.step_method[key], list): sms = self.step_method[key] else: sms = [self.step_method[key]] for sm in sms: if sm.lower() not in step_methods: raise ValueError("Using invalid step method '{}'".format(self.step_method[key])) else: # check if using a compound step (a list of step # methods for a particular parameter) if isinstance(self.step_method, list): sms = self.step_method else: sms = [self.step_method] for i in range(len(sms)): if sms[i].lower() not in step_methods: raise ValueError("Using invalid step method '{}'".format(sms[i])) else: self.step_method = None # initialise the PyMC3 model self.pymc3_model = pymc3.Model() # set the prior self.set_prior() # if a custom log_likelihood function requires a `sampler` argument # then use that log_likelihood function, with the assumption that it # takes in a Pymc3 Sampler, with a pymc3_model attribute, and defines # the likelihood within that context manager likeargs = infer_args_from_method(self.likelihood.log_likelihood) if 'sampler' in likeargs: self.likelihood.log_likelihood(sampler=self) else: # set the likelihood function from predefined functions self.set_likelihood() # get the step method keyword arguments step_kwargs = self.kwargs.pop("step_kwargs") if step_kwargs is not None: # remove all individual default step kwargs if passed together using # step_kwargs keywords for key in self.default_step_kwargs: self.kwargs.pop(key) else: # remove any None default step keywords and place others in step_kwargs step_kwargs = {} for key in self.default_step_kwargs: if self.kwargs[key] is None: self.kwargs.pop(key) else: step_kwargs[key] = self.kwargs.pop(key) nuts_kwargs = self.kwargs.pop("nuts_kwargs") if nuts_kwargs is not None: # remove all individual default nuts kwargs if passed together using # nuts_kwargs keywords for key in self.default_nuts_kwargs: self.kwargs.pop(key) else: # remove any None default nuts keywords and place others in nut_kwargs nuts_kwargs = {} for key in self.default_nuts_kwargs: if self.kwargs[key] is None: self.kwargs.pop(key) else: nuts_kwargs[key] = self.kwargs.pop(key) methodslist = [] # set the step method if isinstance(self.step_method, (dict, OrderedDict)): # create list of step methods (any not given will default to NUTS) self.kwargs['step'] = [] with self.pymc3_model: for key in self.step_method: # check for a compound step list if isinstance(self.step_method[key], list): for sms in self.step_method[key]: curmethod = sms.lower() methodslist.append(curmethod) nuts_kwargs = self._create_nuts_kwargs(curmethod, key, nuts_kwargs, pymc3, step_kwargs, step_methods) else: curmethod = self.step_method[key].lower() methodslist.append(curmethod) nuts_kwargs = self._create_nuts_kwargs(curmethod, key, nuts_kwargs, pymc3, step_kwargs, step_methods) else: with self.pymc3_model: # check for a compound step list if isinstance(self.step_method, list): compound = [] for sms in self.step_method: curmethod = sms.lower() methodslist.append(curmethod) args, nuts_kwargs = self._create_args_and_nuts_kwargs(curmethod, nuts_kwargs, step_kwargs) compound.append(pymc3.__dict__[step_methods[curmethod]](**args)) self.kwargs['step'] = compound else: self.kwargs['step'] = None if self.step_method is not None: curmethod = self.step_method.lower() methodslist.append(curmethod) args, nuts_kwargs = self._create_args_and_nuts_kwargs(curmethod, nuts_kwargs, step_kwargs) self.kwargs['step'] = pymc3.__dict__[step_methods[curmethod]](**args) else: # re-add step_kwargs if no step methods are set if len(step_kwargs) > 0 and StrictVersion(pymc3.__version__) < StrictVersion("3.7"): self.kwargs['step_kwargs'] = step_kwargs # check whether only NUTS step method has been assigned if np.all([sm.lower() == 'nuts' for sm in methodslist]): # in this case we can let PyMC3 autoinitialise NUTS, so remove the step methods and re-add nuts_kwargs self.kwargs['step'] = None if len(nuts_kwargs) > 0 and StrictVersion(pymc3.__version__) < StrictVersion("3.7"): self.kwargs['nuts_kwargs'] = nuts_kwargs elif len(nuts_kwargs) > 0: # add NUTS kwargs to standard kwargs self.kwargs.update(nuts_kwargs) with self.pymc3_model: # perform the sampling trace = pymc3.sample(**self.kwargs) nparams = len([key for key in self.priors.keys() if not isinstance(self.priors[key], DeltaFunction)]) nsamples = len(trace) * self.chains self.result.samples = np.zeros((nsamples, nparams)) count = 0 for key in self.priors.keys(): if not isinstance(self.priors[key], DeltaFunction): # ignore DeltaFunction variables if not isinstance(self.priors[key], MultivariateGaussian): self.result.samples[:, count] = trace[key] else: # get multivariate Gaussian samples priorset = self.multivariate_normal_sets[key]['set'] index = self.multivariate_normal_sets[key]['index'] self.result.samples[:, count] = trace[priorset][:, index] count += 1 self.result.sampler_output = np.nan self.calculate_autocorrelation(self.result.samples) self.result.log_evidence = np.nan self.result.log_evidence_err = np.nan self.calc_likelihood_count() return self.result def _create_args_and_nuts_kwargs(self, curmethod, nuts_kwargs, step_kwargs): if curmethod == 'nuts': args, nuts_kwargs = self._get_nuts_args(nuts_kwargs, step_kwargs) else: args = step_kwargs.get(curmethod, {}) return args, nuts_kwargs def _create_nuts_kwargs(self, curmethod, key, nuts_kwargs, pymc3, step_kwargs, step_methods): if curmethod == 'nuts': args, nuts_kwargs = self._get_nuts_args(nuts_kwargs, step_kwargs) else: if step_kwargs is not None: args = step_kwargs.get(curmethod, {}) else: args = {} self.kwargs['step'].append( pymc3.__dict__[step_methods[curmethod]](vars=[self.pymc3_priors[key]], **args)) return nuts_kwargs @staticmethod def _get_nuts_args(nuts_kwargs, step_kwargs): if nuts_kwargs is not None: args = nuts_kwargs elif step_kwargs is not None: args = step_kwargs.pop('nuts', {}) # add values into nuts_kwargs nuts_kwargs = args else: args = {} return args, nuts_kwargs def _pymc3_version(self): pymc3, _, _ = self._import_external_sampler() return pymc3.__version__ def set_prior(self): """ Set the PyMC3 prior distributions. """ self.setup_prior_mapping() self.pymc3_priors = OrderedDict() pymc3, STEP_METHODS, floatX = self._import_external_sampler() # initialise a dictionary of multivariate Gaussian parameters self.multivariate_normal_sets = {} self.multivariate_normal_num_sets = 0 # set the parameter prior distributions (in the model context manager) with self.pymc3_model: for key in self.priors: # if the prior contains ln_prob method that takes a 'sampler' argument # then try using that lnprobargs = infer_args_from_method(self.priors[key].ln_prob) if 'sampler' in lnprobargs: try: self.pymc3_priors[key] = self.priors[key].ln_prob(sampler=self) except RuntimeError: raise RuntimeError(("Problem setting PyMC3 prior for ", "'{}'".format(key))) else: # use Prior distribution name distname = self.priors[key].__class__.__name__ if distname in self.prior_map: # check if we have a predefined PyMC3 distribution if 'pymc3' in self.prior_map[distname] and 'argmap' in self.prior_map[distname]: # check the required arguments for the PyMC3 distribution pymc3distname = self.prior_map[distname]['pymc3'] if pymc3distname not in pymc3.__dict__: raise ValueError("Prior '{}' is not a known PyMC3 distribution.".format(pymc3distname)) reqargs = infer_args_from_method(pymc3.__dict__[pymc3distname].__init__) # set keyword arguments priorkwargs = {} for (targ, parg) in self.prior_map[distname]['argmap'].items(): if hasattr(self.priors[key], targ): if parg in reqargs: if 'argtransform' in self.prior_map[distname]: if targ in self.prior_map[distname]['argtransform']: tfunc = self.prior_map[distname]['argtransform'][targ] else: def tfunc(x): return x else: def tfunc(x): return x priorkwargs[parg] = tfunc(getattr(self.priors[key], targ)) else: raise ValueError("Unknown argument {}".format(parg)) else: if parg in reqargs: priorkwargs[parg] = None self.pymc3_priors[key] = pymc3.__dict__[pymc3distname](key, **priorkwargs) elif 'internal' in self.prior_map[distname]: self.pymc3_priors[key] = self.prior_map[distname]['internal'](key) else: raise ValueError("Prior '{}' is not a known distribution.".format(distname)) else: raise ValueError("Prior '{}' is not a known distribution.".format(distname)) def set_likelihood(self): """ Convert any bilby likelihoods to PyMC3 distributions. """ # create theano Op for the log likelihood if not using a predefined model pymc3, STEP_METHODS, floatX = self._import_external_sampler() theano, tt, as_op = self._import_theano() class LogLike(tt.Op): itypes = [tt.dvector] otypes = [tt.dscalar] def __init__(self, parameters, loglike, priors): self.parameters = parameters self.likelihood = loglike self.priors = priors # set the fixed parameters for key in self.priors.keys(): if isinstance(self.priors[key], float): self.likelihood.parameters[key] = self.priors[key] self.logpgrad = LogLikeGrad(self.parameters, self.likelihood, self.priors) def perform(self, node, inputs, outputs): theta, = inputs for i, key in enumerate(self.parameters): self.likelihood.parameters[key] = theta[i] outputs[0][0] = np.array(self.likelihood.log_likelihood()) def grad(self, inputs, g): theta, = inputs return [g[0] * self.logpgrad(theta)] # create theano Op for calculating the gradient of the log likelihood class LogLikeGrad(tt.Op): itypes = [tt.dvector] otypes = [tt.dvector] def __init__(self, parameters, loglike, priors): self.parameters = parameters self.Nparams = len(parameters) self.likelihood = loglike self.priors = priors # set the fixed parameters for key in self.priors.keys(): if isinstance(self.priors[key], float): self.likelihood.parameters[key] = self.priors[key] def perform(self, node, inputs, outputs): theta, = inputs # define version of likelihood function to pass to derivative function def lnlike(values): for i, key in enumerate(self.parameters): self.likelihood.parameters[key] = values[i] return self.likelihood.log_likelihood() # calculate gradients
# # Copyright The NOMAD Authors. # # This file is part of NOMAD. # See https://nomad-lab.eu for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import numpy as np import logging from ase import Atoms as aseAtoms from nomad.units import ureg from nomad.parsing.file_parser import Quantity, TextParser from nomad.datamodel.metainfo.simulation.run import Run, Program from nomad.datamodel.metainfo.simulation.method import Method from nomad.datamodel.metainfo.simulation.system import System, Atoms from nomad.datamodel.metainfo.simulation.calculation import Calculation from nomad.datamodel.metainfo.workflow import Workflow, Elastic, StrainDiagrams from .metainfo.elastic import x_elastic_section_fitting_parameters class InfoParser(TextParser): def __init__(self): super().__init__(None) def init_quantities(self): self._quantities = [ Quantity( 'order', r'\s*Order of elastic constants\s*=\s*([0-9]+)', repeats=False, dtype=int), Quantity( 'calculation_method', r'\s*Method of calculation\s*=\s*([-a-zA-Z]+)\s*', repeats=False), Quantity( 'code_name', r'\s*DFT code name\s*=\s*([-a-zA-Z]+)', repeats=False), Quantity( 'space_group_number', r'\s*Space-group number\s*=\s*([0-9]+)', repeats=False), Quantity( 'equilibrium_volume', r'\s*Volume of equilibrium unit cell\s*=\s*([0-9.]+)\s*', unit='angstrom ** 3'), Quantity( 'max_strain', r'\s*Maximum Lagrangian strain\s*=\s*([0-9.]+)', repeats=False), Quantity( 'n_strains', r'\s*Number of distorted structures\s*=\s*([0-9]+)', repeats=False)] class StructureParser(TextParser): def __init__(self): super().__init__(None) def init_quantities(self): def get_sym_pos(val): val = val.strip().replace('\n', '').split() sym = [] pos = [] for i in range(0, len(val), 4): sym.append(val[i + 3].strip()) pos.append([float(val[j]) for j in range(i, i + 3)]) sym_pos = dict(symbols=sym, positions=pos) return sym_pos self._quantities = [ Quantity( 'cellpar', r'a\s*b\s*c\n([\d\.\s]+)\n\s*alpha\s*beta\s*gamma\n([\d\.\s]+)\n+', repeats=False), Quantity( 'sym_pos', r'Atom positions:\n\n([\s\d\.A-Za-z]+)\n\n', str_operation=get_sym_pos, repeats=False, convert=False)] class DistortedParametersParser(TextParser): def __init__(self): super().__init__(None) def init_quantities(self): self._quantities = [Quantity( 'deformation', r'Lagrangian strain\s*=\s*\(([eta\s\d\.,]+)\)', str_operation=lambda x: x.replace(',', '').split(), repeats=True, dtype=str)] class FitParser(TextParser): def __init__(self): super().__init__(None) def init_quantities(self): def split_eta_val(val): order, val = val.strip().split(' order fit.') val = [float(v) for v in val.strip().split()] return order, val[0::2], val[1::2] self._quantities = [Quantity( 'fit', r'(\w+ order fit\.\n[\d.\s\neE\-\+]+)\n', repeats=True, convert=False, str_operation=split_eta_val)] class ElasticConstant2Parser(TextParser): def __init__(self): super().__init__(None) def init_quantities(self): self._quantities = [ Quantity( 'voigt', r'Symmetry[\s\S]+\n\s*\n([C\d\s\n\(\)\-\+\/\*]+)\n', shape=(6, 6), dtype=str, repeats=False), Quantity( 'elastic_constant', r'Elastic constant[\s\S]+in GPa\s*:\s*\n\n([\-\d\.\s\n]+)\n', shape=(6, 6), dtype=float, unit='GPa', repeats=False), Quantity( 'compliance', r'Elastic compliance[\s\S]+in 1/GPa\s*:\s*\n\n([\-\d\.\s\n]+)\n', shape=(6, 6), dtype=float, unit='1/GPa', repeats=False)] def str_to_modulus(val_in): val_in = val_in.strip().split() key = val_in[0] unit = val_in[-1] if len(val_in) == 3 else None val = float(val_in[1]) val = val * ureg.GPa if unit is not None else val return key, val self._quantities.append(Quantity( 'modulus', r',\s*(\w+)\s*=\s*([\-\+\w\. ]+?)\n', str_operation=str_to_modulus, repeats=True)) self._quantities.append(Quantity( 'eigenvalues', r'Eigenvalues of elastic constant \(stiffness\) matrix:\s*\n+([\-\d\.\n\s]+)\n', unit='GPa', repeats=False)) class ElasticConstant3Parser(TextParser): def __init__(self): super().__init__(None) def init_quantities(self): def arrange_matrix(val): val = val.strip().split('\n') matrix = [v.strip().split() for v in val if v.strip()] matrix = np.array(matrix).reshape((12, 18)) arranged = [] for i in range(2): for j in range(3): arranged.append( matrix[i * 6: (i + 1) * 6, j * 6: (j + 1) * 6].tolist()) return arranged self._quantities = [ Quantity( 'elastic_constant', r'\%\s*\n([\s0-6A-L]*)[\n\s\%1-6\-ij]*([\s0-6A-L]*)\n', str_operation=arrange_matrix, dtype=str, repeats=False, convert=False), Quantity( 'cijk', r'(C\d\d\d)\s*=\s*([\-\d\.]+)\s*GPa', repeats=True, convert=False)] class ElasticParser: def __init__(self): self._mainfile = None self.logger = None self._deform_dirs = None self._deform_dir_prefix = 'Dst' self._dirs = [] self.info = InfoParser() self.structure = StructureParser() self.distorted_parameters = DistortedParametersParser() self.fit = FitParser() self.elastic_constant_2 = ElasticConstant2Parser() self.elastic_constant_3 = ElasticConstant3Parser() @property def deformation_dirs(self): if self._deform_dirs is None: self._deform_dirs = [ os.path.join(self.maindir, d) for d in self._dirs if d.startswith(self._deform_dir_prefix)] return self._deform_dirs def get_elastic_files(self, filename, extension, dirname=None): dirs = self._dirs if dirname is None else os.listdir(dirname) dirname = self.maindir if dirname is None else dirname filenames = [d for d in dirs if filename in d and d.endswith(extension)] if len(filenames) > 1: filenames = [d for d in filenames if d.startswith(filename)] if len(filenames) == 0: return return os.path.join(dirname, filenames[0]) def get_references_to_calculations(self): def output_file(dirname): code = self.info.get('code_name', '').lower() if code == 'exciting': return os.path.join(dirname, 'INFO.OUT') elif code == 'wien': return os.path.join(dirname, '%s_Converged.scf' % os.path.basename(dirname)) elif code == 'quantum': return os.path.join(dirname, '%s.out' % os.path.basename(dirname)) else: return None references = [] for deform_dir in self.deformation_dirs: sub_dirs = os.listdir(deform_dir) for sub_dir in sub_dirs: calc_dir = os.path.join(deform_dir, sub_dir) out_file = output_file(calc_dir) if out_file is not None and os.path.isfile(out_file): references.append(out_file) return references def get_structure_info(self): path = os.path.join(self.maindir, 'sgroup.out') if not os.path.isfile(path): return self.structure.mainfile = path cellpar = self.structure.get('cellpar', None) sym_pos = self.structure.get('sym_pos', {}) sym = sym_pos.get('symbols', None) pos = sym_pos.get('positions', None) if cellpar is None or sym is None or pos is None: return structure = aseAtoms(cell=cellpar, scaled_positions=pos, symbols=sym, pbc=True) positions = structure.get_positions() positions = positions * ureg.angstrom cell = structure.get_cell() cell = cell * ureg.angstrom return sym, positions, cell def get_strain_energy(self): strains, energies = [], [] for deform_dir in self.deformation_dirs: filenames = [d for d in os.listdir(deform_dir) if d.endswith('Energy.dat')] if not filenames: continue path = os.path.join(deform_dir, filenames[-1]) data = np.loadtxt(path).T strains.append(list(data[0])) # the peculiarity of the x_elastic_strain_diagram_values metainfo that it does # not have the energy unit energies.append((data[1] * ureg.hartree).to('J').magnitude) if len(np.shape(energies)) != 2: strains, energies = [], [] return strains, energies def get_strain_stress(self): strains = {'Lagrangian-stress': [], 'Physical-stress': []} stresses = {'Lagrangian-stress': [], 'Physical-stress': []} for deform_dir in self.deformation_dirs: filenames = [d for d in os.listdir(deform_dir) if d.endswith('stress.dat')] for filename in filenames: path = os.path.join(deform_dir, filename) if not os.path.isfile(path): continue with open(path) as f: lines = f.readlines() strain, stress = [], [] for line in lines: val = line.strip().split() if not val[0].strip().replace('.', '').isdecimal(): continue strain.append(float(val[0])) stress.append([float(v) for v in val[1:7]]) stype = filename.rstrip('.dat').split('_')[-1] strains[stype].append(strain) stresses[stype].append(stress) return strains, stresses def get_deformation_types(self): path = os.path.join(self.maindir, 'Distorted_Parameters') self.distorted_parameters.mainfile = path return self.distorted_parameters.get('deformation') def _get_fit(self, path_dir, file_ext): path_dir = os.path.join(self.maindir, path_dir) if not os.path.isdir(path_dir): return paths = [p for p in os.listdir(path_dir) if p.endswith(file_ext)] paths.sort() if not paths: return eta, val = {}, {} for path in paths: self.fit.mainfile = os.path.join(path_dir, path) fit_results = self.fit.get('fit', []) for result in fit_results: eta.setdefault(result[0], []) val.setdefault(result[0], []) eta[result[0]].append(result[1]) val[result[0]].append(result[2]) return eta, val def get_energy_fit(self): energy_fit = dict() for file_ext in ['d2E.dat', 'd3E.dat', 'ddE.dat']: result = self._get_fit('Energy-vs-Strain', file_ext) if result is None: continue result = list(result) result[1] = { key: (val * ureg.GPa).to('Pa').magnitude for key, val in result[1].items()} energy_fit['d2e'] = result result = self._get_fit('Energy-vs-Strain', 'CVe.dat') if result is not None: result = list(result) result[1] = { key: (val * ureg.hartree).to('J').magnitude for key, val in result[1].items()} energy_fit['cross-validation'] = result return energy_fit def get_stress_fit(self): stress_fit = dict() stress_fit['dtn'] = [[]] * 6 stress_fit['cross-validation'] = [[]] * 6 for strain_index in range(1, 7): result = self._get_fit('Stress-vs-Strain', '%d_dS.dat' % strain_index) if result is not None: result[1] = {key: val * ureg.GPa for key, val in result[1].items()} stress_fit['dtn'][strain_index - 1] = result result = self._get_fit('Stress-vs-Strain', '%d_CVe.dat' % strain_index) if result is not None: result[1] = {key: val * ureg.hartree for key, val in result[1].items()} stress_fit['cross-validation'][strain_index - 1] = result return stress_fit def get_input(self): paths = os.listdir(self.maindir) path = None order = self.info.get('order', 2) for p in paths: if 'ElaStic_' in p and p.endswith('.in') and str(order) in p: path = p break if path is None: return calc_method = self.info.get('calculation_method') eta_ec = [] fit_ec = [] def _is_number(var): try: float(var) return True except Exception: return False path = os.path.join(self.maindir, path) with open(path) as f: while True: line = f.readline() if not line: break if calc_method.lower() == 'energy': _, eta, fit = line.strip().split() eta_ec.append(float(eta)) fit_ec.append(int(fit)) elif calc_method.lower() == 'stress': val = line.strip().split() if not _is_number(val[0]): eta_ec.append([float(val[i + 1]) for i in range(6)]) else: fit_ec.append([int(val[i]) for i in range(6)]) else: pass return eta_ec, fit_ec def get_elastic_constants_order2(self): path = self.get_elastic_files('ElaStic_2nd', 'out') self.elastic_constant_2.mainfile = path matrices = dict() for key in ['voigt', 'elastic_constant', 'compliance']: val = self.elastic_constant_2.get(key, None) if val is not None: matrices[key] = val moduli = dict() for modulus in self.elastic_constant_2.get('modulus', []): moduli[modulus[0]] = modulus[1] eigenvalues = self.elastic_constant_2.get('eigenvalues') return matrices, moduli, eigenvalues def get_elastic_constants_order3(self): path = self.get_elastic_files('ElaStic_3rd.out', 'out') self.elastic_constant_3.mainfile = path elastic_constant_str = self.elastic_constant_3.get('elastic_constant') if elastic_constant_str is None: return cijk = dict() for element in self.elastic_constant_3.get('cijk', []): cijk[element[0]] = float(element[1]) # formulas for the coefficients coeff_A = cijk.get('C111', 0) + cijk.get('C112', 0) - cijk.get('C222', 0) coeff_B = -(cijk.get('C115', 0) + 3 * cijk.get('C125', 0)) / 2 coeff_C = (cijk.get('C114', 0) + 3 * cijk.get('C124', 0)) / 2 coeff_D = -(2 * cijk.get('C111',
################################################################################ # _ ____ ___ # # / \ / ___|_ _| # # / _ \| | | | # # / ___ \ |___ | | # # ____ _/_/ _\_\____|___| _ # # / ___|__ _| |__ | | ___ | _ \| | __ _ _ __ # # | | / _` | '_ \| |/ _ \ | |_) | |/ _` | '_ \ # # | |__| (_| | |_) | | __/ | __/| | (_| | | | | # # \____\__,_|_.__/|_|\___| |_| |_|\__,_|_| |_| # # # ################################################################################ # # # Copyright (c) 2015 Cisco Systems # # All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # # a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # # License for the specific language governing permissions and limitations # # under the License. # # # ################################################################################ import sys import re import acitoolkit as ACI eTree = None Verbose_import_ = False ( XMLParser_import_none, XMLParser_import_lxml, XMLParser_import_elementtree ) = range(3) XMLParser_import_library = None try: # lxml from lxml import etree as eTree XMLParser_import_library = XMLParser_import_lxml if Verbose_import_: print("running with lxml.etree") except ImportError: try: # cElementTree from Python 2.5+ import xml.etree.cElementTree as eTree XMLParser_import_library = XMLParser_import_elementtree if Verbose_import_: print("running with cElementTree on Python 2.5+") except ImportError: try: # ElementTree from Python 2.5+ import xml.etree.ElementTree as eTree XMLParser_import_library = XMLParser_import_elementtree if Verbose_import_: print("running with ElementTree on Python 2.5+") except ImportError: try: # normal cElementTree install import cElementTree as eTree XMLParser_import_library = XMLParser_import_elementtree if Verbose_import_: print("running with cElementTree") except ImportError: try: # normal ElementTree install # noinspection PyUnresolvedReferences import elementtree.ElementTree as eTree XMLParser_import_library = XMLParser_import_elementtree if Verbose_import_: print("running with ElementTree") except ImportError: raise ImportError( "Failed to import ElementTree from any known place") def parsexml_(*args, **kwargs): """ parsexml_ :param args: :param kwargs: :return: doc """ if XMLParser_import_library == XMLParser_import_lxml and 'parser' not in kwargs: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. kwargs['parser'] = eTree.ETCompatXMLParser() doc = eTree.parse(*args, **kwargs) return doc # # Globals # Tag_pattern_ = re.compile(r'({.*})?(.*)') # # Support/utility functions. # def indent(level): """ Indent the text to a specified level :param level: The number of 4 space increments :return: String containing the desired number of spaces for indentation """ return level * ' ' def quote_attrib(in_str): s1 = (isinstance(in_str, basestring) and in_str or '%s' % in_str) s1 = s1.replace('&', '&amp;') s1 = s1.replace('<', '&lt;') s1 = s1.replace('>', '&gt;') if '"' in s1: if "'" in s1: s1 = '"%s"' % s1.replace('"', "&quot;") else: s1 = "'%s'" % s1 else: s1 = '"%s"' % s1 return s1 def suffix_to_int(string): return int(re.search(r'(\d+)$', string).group(1)) class CABLEPLAN: def __init__(self, version=None): self.version = version self.switches = [] self.links = [] self.schemaLocation = 'nxos-cable-plan-schema.xsd' self.nsmap = None self.namespace = 'http://www.cisco.com/cableplan/Schema2' self.prefix = 'xsi' self.prefix_url = 'http://www.w3.org/2001/XMLSchema-instance' self.networkLocation = None self.idFormat = 'hostname' @classmethod def get(cls, source): """This will get input a cable plan from 'source'. If source is a string, it will get the cable plan from XML in a file whose name is source. If it is a Session, it will read the corresponding APIC to get the cable plan. :param source: filename of type string or Session of type Session :returns: CABLEPLAN """ if isinstance(source, str): return cls._parse(source) elif isinstance(source, ACI.Session): return cls._parse_apic(source) else: raise TypeError('source must of type str or type ACI.Session. Instead was '+type(source)) @classmethod def _parse(cls, in_file_name): doc = parsexml_(in_file_name) # This can be enhanced to parse a string rather than just a file with the # following line: # doc = parsexml_(StringIO(inString)) root_node = doc.getroot() cable_plan = cls() cable_plan._build_xml(root_node) return cable_plan @classmethod def _parse_apic(cls, session): pod = ACI.Pod.get(session)[0] pod.populate_children(deep=True) cable_plan = cls() cable_plan._build_apic(pod) return cable_plan def get_switch(self, switch_name=None): if switch_name: for switch in self.switches: if switch.get_name() == switch_name: return switch return None else: return self.switches[:] def get_spines(self): """Will return list of switches that are spines :returns: list of CpSwitch """ switch_list = [] for switch in self.switches: if switch.is_spine(): switch_list.append(switch) return switch_list def add_switch(self, new_switch): """This will new_switch to the CABLEPLAN. If the switch already exists, it will merge the new_switch with the existing one. It will also set the parent of the switch to be the CABLEPLAN. It will return the final switch, i.e. new_switch if no merge occurred or the newly merged switch if a merge did occur. :param new_switch: switch to be added of type CpSwitch :returns: CpSwitch """ if not isinstance(new_switch, CpSwitch): raise TypeError('add_switch expects object of type CpSwitch') new_switch.set_parent(self) for switch in self.switches: if switch == new_switch: switch.merge(new_switch) del new_switch return switch self.switches.append(new_switch) return new_switch def delete_switch(self, old_switch): if old_switch in self.switches: self.switches.remove(old_switch) def exists_switch(self, switch): return switch in self.switches def add_link(self, new_link): """Will add a link to the CABLEPLAN. Duplicates will not be allow, but overlapping will be. :param new_link: Link to be added of type CpLink :returns: None """ if new_link not in self.links: self.links.append(new_link) def delete_link(self, link): if link in self.links: self.links.remove(link) def exists_link(self, link): return link in self.links def get_links(self, switch1=None, switch2=None): """Returns a list of links. If switch is unspecified, it will return all links. If switch is specified, it will return all of the links that are connected to switch. If both switch1 and swithc2 are specified, it will return all links that are connected between the two switches. :param switch1: optional first switch of type CpSwitch :param switch2: optional second switch of type CpSwitch :returns: list of links of type CpLink """ if switch1: link_list = [] for link in self.links: if link.is_connected(switch1, switch2): link_list.append(link) return link_list else: return self.links[:] def difference_switch(self, cp): """Will return a list of switches that are in self, but not in cp. :param cp: cable plan :returns: list of CpSwitch """ result = [] myswitches = self.get_switch() cpswitches = cp.get_switch() for switch in myswitches: if switch not in cpswitches: result.append(switch) return result # Link comparison operations and support functions def reset_accounting(self): """clears the refernce count on each link :rtype : None """ for link in self.links: link.reset_accounting() def sorted_links(self, switch1, switch2): """returns a sorted list of links between switch1 and switch2. They are sorted by specificity from most specific to least specific. The specificity is determined by which list of ports is the minimum between source and destination and which is the minimum across links. :rtype : list :param switch1: :param switch2: """ result = [] links = self.get_links(switch1, switch2) num_links = len(links) for i in range(num_links): best_order = 100000 best_link = None for link in links: if (link.order() < best_order) and (link not in result): best_order = link.order() best_link = link if best_order < 100000: result.append(best_link) return result def _switch_link_diff(self, cp, switch1, switch2): """ returns a list links that go between switch1 and switch2 that are in self, but not in cp :param cp: cable plan of type CP_CABLEPLAN :param switch1: first switch of type CpSwitch :param switch2: second switch of type CpSwitch :returns: list of CpLink """ my_links = self.sorted_links(switch1, switch2) other_links = cp.sorted_links(switch1, switch2) for my_link in my_links: if my_link.remaining_need() > 0: # still need to retire some link capacity for otherLink in other_links: # loop through all of the otherLinks to find matches if otherLink.remaining_avail() > 0: # there is still some capacity in otherLink CpLink.match_links(my_link, otherLink) # match-up links if my_link.remaining_need() == 0: # done with myLink, go get next one break def difference_link(self, cp): """returns a list of links that are in self, but not in cp. :param cp: cable plan of type CABLEPLAN :returns: list of CpLink """ result = [] self.reset_accounting() cp.reset_accounting() for switch1 in self.get_switch(): for switch2 in self.get_switch(): self._switch_link_diff(cp, switch1, switch2) for myLink in self.get_links(): if myLink.remaining_need() > 0:
(avg) RAD += dRAD #minSd = opt0 ( RD_, 0.1 ) #if minSd != None : # if show : # print " SD0: %.1f" % minSd sdev = toRAD slope = 0 if RD_[0][1] <= RD_[-1][1] : sdev = 10.0 else : #for i in range ( len(RD_) ) : # RD_[i][1] = RD_[i][1] - RD_[-1][1] # if log : # Y[i] = Y[i] - Y[-1] #import time #start = time.time() sdev, A, B = optSGD ( RD_, 9000, 0.2 ) sdev, A, B = optSGD ( RD_, 9000, 0.02, sdev, A, B ) sdev, A, B = optSGD ( RD_, 9000, 0.002, sdev, A, B ) #end = time.time() #if log : print " sgd - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start)) sdev = sdev if log : print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B) #start = time.time() #sdev, A, B = optGN ( RD_, 0.0001 ) #print " gn - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start)) #end = time.time() if 1 : if 0 and sdev != None : if log : print " gn1 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B) else : sdev, A, B = optSGD ( RD_, 10000, 0.01 ) if log : print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B) sdev2, A2, B2 = optGN ( RD_, 0.0001, sdev, A, B ) if sdev2 != None : sdev, A, B = sdev2, A2, B2 if log : print " gn2 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B) #else : # return 10.0 if log : r = numpy.polyfit ( X, Y, 1, rcond=None, full=False, w=None, cov=False) print " sdev: %.4f, A %.4f, B %.4f // slope: %.4f y %.4f" % (sdev, A, B, r[0], r[1]) #A, B = 0.26+0.08, -0.08 lastX = 0 for i in range ( len(RD_) ) : x, y = RD_[i] gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B lv = x * r[0] + r[1] print "%.1f\t%f\t%f\t%f" % (x, y, gv, gvRef) lastX = x if 1 : x = lastX + dRAD #while x < min(4 * sdev,50.0) : while x < min(10.0,50.0) : gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B lv = x * r[0] + r[1] print "%.1f\t\t%f\t%f" % (x, gv, gvRef) x += dRAD #return abs(sdev), abs(slope) return abs(sdev) def TimeLeftStr ( atI, totI, totSec ) : leftTime = "" leftSec = 0.0 iPerSec = float(atI) / totSec if iPerSec > 0 : leftSec = float ( totI - atI ) / iPerSec leftHour = numpy.floor ( leftSec / 60.0 / 60.0 ) leftSec = leftSec - leftHour * 60.0 * 60.0 leftMin = numpy.floor ( leftSec / 60.0 ) leftSec = leftSec - leftMin * 60.0 leftTime = "%.0f:%.0f:%.0f" % (leftHour, leftMin, leftSec) return leftTime return "" def optGN ( V, err, S=None, A=None, B=None ) : y0 = V[0][1] yN = V[-1][1] if S == None : S = 0.5 A = y0+yN B = yN an = numpy.array ( [A,B,S] ) #print " _ -- A %.3f B %.3f s %.3f" % (A, B, S) reg = 1.0 badMatCount = 0 for i in range ( 1000 ) : J = numpy.zeros ( [len(V),3] ) e = numpy.zeros ( [len(V),1] ) err0 = 0 j = 0 for x,y in V : expv = numpy.exp ( -0.5 * numpy.power(x/S,2) ) v = A * expv + B yd = v - y err0 += yd * yd #print "%.2f,%.2f/%.2f(%.2f)" % (x, y, v, yd), dA = expv dB = 1 dS = A*x*x*numpy.power(S,-3) * expv J[j,:] = [dA, dB, dS] e[j,0] = yd j += 1 Jt = numpy.transpose(J) try : J_ = numpy.dot ( numpy.linalg.inv ( numpy.dot(Jt,J) ), Jt ) except : #print " - bad matrix?" #print numpy.dot(Jt,J) badMatCount += 1 if badMatCount > 3 : return None, None, None from numpy import random as R an = numpy.array ( [R.random()*(y0+yN),R.random()*yN,R.random()*10.0] ) A,B,S = an[0], an[1], an[2] #print " ? -- A %.3f B %.3f s %.3f" % (A, B, S) reg = 1.0 continue ad = numpy.dot ( J_, e ) ann = an - ( ad[:,0] * reg ) A,B,S = ann[0], ann[1], ann[2] err1 = err3 ( V, S, A, B ) #if err1 > err0 : # reg = reg * 0.1 # if reg < err : # break #else : an = ann #print " %d -- A %.3f B %.3f s %.3f - err %.3f, reg %.5f" % (i, A, B, S, err1, reg) if abs(err0 - err1) < err : #print " - done" break i += 1 return S,A,B def optSGD ( V, N, err, S=None, A=None, B=None ) : if S == None : y0 = V[0][1] yN = V[-1][1] S = 0.5 A = y0+yN B = yN from numpy import random lastE = err3 ( V, S, A, B ) #while True : for i in range(N) : S_ = S + random.normal ( 0, err ) # mean, sigma A_ = A + random.normal ( 0, err ) # mean, sigma B_ = B + random.normal ( 0, err ) # mean, sigma e = err3 ( V, S_, A_, B_ ) #print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd) if e < lastE : S, A, B = S_, A_, B_ lastE = e return S,A,B def err3 ( XYz, sd, A, B ) : y0 = XYz[0][1] err = 0 #for x,y in XYz[1:] : for x,y in XYz : yd = y - A * numpy.exp ( -0.5 * numpy.power(x/sd,2) ) - B err += yd * yd #err /= float(len(XYz)) return err def err ( XYz, sd ) : y0 = XYz[0][1] err = 0 for x,y in XYz[1:] : yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) ) err += yd * yd #err /= float(len(XYz)) return err def opt0 ( RD_, dStep ) : sd = 0.1 y0 = RD_[0][1] minSd, minErr, N = None, 1e99, float ( len(RD_)-1 ) while sd < 10.0 : err = 0 for x,y in RD_[1:] : yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) ) err += yd * yd err /= N #print err if err < minErr : minErr = err minSd = sd sd += dStep def opt ( V, maxErr ) : dd = 1.0 sdAt = 0.1 lastE = err ( V, sdAt ) #while True : for i in range(10000) : sdAt += dd e = err ( V, sdAt ) #print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd) if e >= lastE : dd *= -0.75 if abs(dd) < maxErr : return sdAt lastE = e return sdAt def CalcQForAts ( dmap, mol, ats, sigma=0.6 ) : minD, maxD = MinMaxD (dmap) from _multiscale import get_atom_coordinates from CGLutil.AdaptiveTree import AdaptiveTree allAts = [at for at in mol.atoms if not at.element.name == "H"] points = get_atom_coordinates ( allAts, transformed = False ) allAtTree = AdaptiveTree ( points.tolist(), allAts, 1.0) for at in ats : at.Q = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, minD=minD, maxD=maxD ) def Calc ( chimeraPath, numProc, res=3.0, bfactorF=-1, sigma=0.6 ) : print "Calc Q scores" print " - chimera path: ", chimeraPath print " - num processors: ", numProc print " - resolution: ", res
return self.send_response(Success=result, Message=msg) if member_id == 'all': res = self._rest_inbound_socket.conference_api(room, "say '%s'" % text, async=False) else: res = self._rest_inbound_socket.conference_api(room, "saymember %s '%s'" % (member_id, text), async=False) if not res: msg = "Conference Speak Failed" result = False return self.send_response(Success=result, Message=msg) elif res.startswith('Conference %s not found' % str(room)) or res.startswith('Non-Existant'): msg = "Conference Speak %s" % str(res) result = False return self.send_response(Success=result, Message=msg) msg = "Conference Speak Executed" result = True return self.send_response(Success=result, Message=msg) @auth_protect def conference_list_members(self): """ConferenceListMembers List all or some members in a conference POST Parameters --------------- ConferenceName: conference room name MemberFilter: a list of MemberID separated by comma. If set only get the members matching the MemberIDs in list. (default empty) CallUUIDFilter: a list of CallUUID separated by comma. If set only get the channels matching the CallUUIDs in list. (default empty) MutedFilter: 'true' or 'false', only get muted members or not (default 'false') DeafFilter: 'true' or 'false', only get deaf members or not (default 'false') All Filter parameters can be mixed. """ self._rest_inbound_socket.log.debug("RESTAPI ConferenceListMembers with %s" \ % str(request.form.items())) msg = "" result = False room = get_post_param(request, 'ConferenceName') members = get_post_param(request, 'MemberFilter') calluuids = get_post_param(request, 'CallUUIDFilter') onlymuted = get_post_param(request, 'MutedFilter') == 'true' onlydeaf = get_post_param(request, 'DeafFilter') == 'true' if not room: msg = "ConferenceName Parameter must be present" return self.send_response(Success=result, Message=msg) if not members: members = None res = self._rest_inbound_socket.conference_api(room, "xml_list", async=False) if not res: msg = "Conference ListMembers Failed" result = False return self.send_response(Success=result, Message=msg) elif res.startswith('Conference %s not found' % str(room)): msg = "Conference ListMembers %s" % str(res) result = False return self.send_response(Success=result, Message=msg) try: member_list = self._parse_conference_xml_list(res, member_filter=members, uuid_filter=calluuids, mute_filter=onlymuted, deaf_filter=onlydeaf) msg = "Conference ListMembers Executed" result = True return self.send_response(Success=result, Message=msg, List=member_list) except Exception, e: msg = "Conference ListMembers Failed to parse result" result = False self._rest_inbound_socket.log.error("Conference ListMembers Failed -- %s" % str(e)) return self.send_response(Success=result, Message=msg) @auth_protect def conference_list(self): """ConferenceList List all conferences with members POST Parameters --------------- MemberFilter: a list of MemberID separated by comma. If set only get the members matching the MemberIDs in list. (default empty) CallUUIDFilter: a list of CallUUID separated by comma. If set only get the channels matching the CallUUIDs in list. (default empty) MutedFilter: 'true' or 'false', only get muted members or not (default 'false') DeafFilter: 'true' or 'false', only get deaf members or not (default 'false') All Filter parameters can be mixed. """ self._rest_inbound_socket.log.debug("RESTAPI ConferenceList with %s" \ % str(request.form.items())) msg = "" result = False members = get_post_param(request, 'MemberFilter') calluuids = get_post_param(request, 'CallUUIDFilter') onlymuted = get_post_param(request, 'MutedFilter') == 'true' onlydeaf = get_post_param(request, 'DeafFilter') == 'true' res = self._rest_inbound_socket.conference_api(room='', command="xml_list", async=False) if res: try: confs = self._parse_conference_xml_list(res, member_filter=members, uuid_filter=calluuids, mute_filter=onlymuted, deaf_filter=onlydeaf) msg = "Conference List Executed" result = True return self.send_response(Success=result, Message=msg, List=confs) except Exception, e: msg = "Conference List Failed to parse result" result = False self._rest_inbound_socket.log.error("Conference List Failed -- %s" % str(e)) return self.send_response(Success=result, Message=msg) msg = "Conference List Failed" return self.send_response(Success=result, Message=msg) @auth_protect def group_call(self): """Make Outbound Group Calls in one request Allow initiating group outbound calls via the REST API. To make a group outbound call, make an HTTP POST request to the resource URI. POST Parameters ---------------- Required Parameters - You must POST the following parameters: Delimiter: Any special character (with the exception of '/' and ',') which will be used as a delimiter for the string of parameters below. E.g. '<' From: The phone number to use as the caller id for the call without the leading + To: The numbers to call without the leading + Gateways: Comma separated string of gateways to dial the call out GatewayCodecs: Comma separated string of codecs for gateways GatewayTimeouts: Comma separated string of timeouts for gateways GatewayRetries: Comma separated string of retries for gateways AnswerUrl: The URL that should be requested for XML when the call connects. Similar to the URL for your inbound calls TimeLimit: Define the max time of the calls Optional Parameters - You may POST the following parameters: [CallerName]: the caller name to use for call [HangupUrl]: URL that Plivo will notify to, with POST params when calls ends [RingUrl]: URL that Plivo will notify to, with POST params when calls starts ringing [HangupOnRing]: If Set to 0 we will hangup as soon as the number ring, if set to value X we will wait X seconds when start ringing and then hang up [ExtraDialString]: Additional Originate dialstring to be executed while making the outbound call [RejectCauses]: List of reject causes for each number (comma ',' separated). If attempt to call one number failed with a reject cause matching in this parameter, there isn't more call attempts for this number. [SendDigits]: A string of keys to dial after connecting to the number. Valid digits in the string include: any digit (0-9), '#' and '*'. Very useful, if you want to connect to a company phone number, and wanted to dial extension 1234 and then the pound key, use SendDigits=1234#. Remember to URL-encode this string, since the '#' character has special meaning in a URL. To wait before sending DTMF to the extension, you can add leading 'w' or 'W' characters. Each 'w' character waits 0.5 seconds instead of sending a digit. Each 'W' character waits 1.0 seconds instead of sending a digit. You can also add the tone duration in ms by appending @[duration] after string. Eg. 1w2w3@1000 [SendOnPreanswer]: SendDigits on early media instead of answer. [ConfirmSound]: Sound to play to called party before bridging call. [ConfirmKey]: A one key digits the called party must press to accept the call. """ self._rest_inbound_socket.log.debug("RESTAPI GroupCall with %s" \ % str(request.form.items())) msg = "" result = False request_uuid = str(uuid.uuid1()) default_reject_causes = "NO_ANSWER ORIGINATOR_CANCEL ALLOTTED_TIMEOUT NO_USER_RESPONSE CALL_REJECTED" caller_id = get_post_param(request, 'From') to_str = get_post_param(request, 'To') gw_str = get_post_param(request, 'Gateways') answer_url = get_post_param(request, 'AnswerUrl') delimiter = get_post_param(request, 'Delimiter') if delimiter in (',', '/'): msg = "This Delimiter is not allowed" return self.send_response(Success=result, Message=msg) elif not caller_id or not to_str or not gw_str or not answer_url or not delimiter: msg = "Mandatory Parameters Missing" return self.send_response(Success=result, Message=msg) elif not is_valid_url(answer_url): msg = "AnswerUrl is not Valid" return self.send_response(Success=result, Message=msg) hangup_url = get_post_param(request, 'HangupUrl') ring_url = get_post_param(request, 'RingUrl') if hangup_url and not is_valid_url(hangup_url): msg = "HangupUrl is not Valid" return self.send_response(Success=result, Message=msg) elif ring_url and not is_valid_url(ring_url): msg = "RingUrl is not Valid" return self.send_response(Success=result, Message=msg) extra_dial_string = get_post_param(request, 'ExtraDialString') gw_codecs_str = get_post_param(request, 'GatewayCodecs') gw_timeouts_str = get_post_param(request, 'GatewayTimeouts') gw_retries_str = get_post_param(request, 'GatewayRetries') send_digits_str = get_post_param(request, 'SendDigits') send_preanswer_str = get_post_param(request, 'SendOnPreanswer') time_limit_str = get_post_param(request, 'TimeLimit') hangup_on_ring_str = get_post_param(request, 'HangupOnRing') confirm_sound = get_post_param(request, 'ConfirmSound') confirm_key = get_post_param(request, 'ConfirmKey') reject_causes = get_post_param(request, 'RejectCauses') caller_name_str = get_post_param(request, 'CallerName') accountsid = get_post_param(request, 'AccountSID') or '' if reject_causes: reject_causes = " ".join([ r.strip() for r in reject_causes.split(',') ]) to_str_list = to_str.split(delimiter) gw_str_list = gw_str.split(delimiter) gw_codecs_str_list = gw_codecs_str.split(delimiter) gw_timeouts_str_list = gw_timeouts_str.split(delimiter) gw_retries_str_list = gw_retries_str.split(delimiter) send_digits_list = send_digits_str.split(delimiter) send_preanswer_list = send_preanswer_str.split(delimiter) time_limit_list = time_limit_str.split(delimiter) hangup_on_ring_list = hangup_on_ring_str.split(delimiter) caller_name_list = caller_name_str.split(delimiter) if len(to_str_list) < 2: msg = "GroupCall should be used for at least 2 numbers" return self.send_response(Success=result, Message=msg) elif len(to_str_list) != len(gw_str_list): msg = "'To' parameter length does not match 'Gateways' Length" return self.send_response(Success=result, Message=msg) # set group group_list = [] group_options = [] # set confirm confirm_options = "" if confirm_sound: confirm_sounds = self._prepare_play_string(confirm_sound) if confirm_sounds: play_str = '!'.join(confirm_sounds) play_str = "file_string://silence_stream://1!%s" % play_str # Use confirm key if present else just play music if confirm_key: confirm_music_str = "group_confirm_file=%s" % play_str confirm_key_str = "group_confirm_key=%s" % confirm_key else: confirm_music_str = "group_confirm_file=playback %s" % play_str confirm_key_str = "group_confirm_key=exec" # Cancel the leg timeout after the call is answered confirm_cancel = "group_confirm_cancel_timeout=1" confirm_options = "%s,%s,%s,playback_delimiter=!" % (confirm_music_str, confirm_key_str, confirm_cancel) group_options.append(confirm_options) # build calls for to in to_str_list: try: gw = gw_str_list.pop(0) except IndexError: break try: gw_codecs = gw_codecs_str_list.pop(0) except IndexError: gw_codecs =
'boundaries': [-0.5, 0.5, 1.5], 'coordinates': 'elevation azimuth range', 'scale_factor': 1, 'add_offset': 0, '_FillValue': 0, '_Write_as_dtype': 'uint8'}, HRV: { 'units': '%', 'standard_name': 'HRV', 'long_name': 'SEVIRI High Resolution Visible Reflectance'}, VIS006: { 'units': '%', 'standard_name': 'VIS006', 'long_name': 'SEVIRI Visible 0.6 um Reflectance'}, VIS008: { 'units': '%', 'standard_name': 'VIS008', 'long_name': 'SEVIRI Visible 0.8 um Reflectance'}, IR_016: { 'units': '%', 'standard_name': 'IR_016', 'long_name': 'SEVIRI Near-Infrared 1.6 um Reflectance'}, IR_039: { 'units': 'K', 'standard_name': 'IR_039', 'long_name': 'SEVIRI Infrared 3.9 um'}, WV_062: { 'units': 'K', 'standard_name': 'WV_062', 'long_name': 'SEVIRI Water Vapour 6.2 um'}, WV_073: { 'units': 'K', 'standard_name': 'WV_073', 'long_name': 'SEVIRI Water Vapour 7.3 um'}, IR_087: { 'units': 'K', 'standard_name': 'IR_087', 'long_name': 'SEVIRI Infrared 8.7 um'}, IR_097: { 'units': 'K', 'standard_name': 'IR_097', 'long_name': 'SEVIRI Infrared 9.7 um'}, IR_108: { 'units': 'K', 'standard_name': 'IR_108', 'long_name': 'SEVIRI Infrared 10.8 um'}, IR_120: { 'units': 'K', 'standard_name': 'IR_120', 'long_name': 'SEVIRI Infrared 12 um'}, IR_134: { 'units': 'K', 'standard_name': 'IR_134', 'long_name': 'SEVIRI Infrared 13.4 um'}, CTH: { 'units': 'm', 'standard_name': 'CTH', 'long_name': 'Cloud Top Height'}, HRV_norm: { 'units': '%', 'standard_name': 'HRV_norm', 'long_name': 'Normalized SEVIRI High Resolution Visible Reflectance'}, VIS006_norm: { 'units': '%', 'standard_name': 'VIS006_norm', 'long_name': 'Normalized SEVIRI Visible 0.6 um Reflectance'}, VIS008_norm: { 'units': '%', 'standard_name': 'VIS008_norm', 'long_name': 'Normalized SEVIRI Visible 0.8 um Reflectance'}, IR_016_norm: { 'units': '%', 'standard_name': 'IR_016_norm', 'long_name': 'Normalized SEVIRI Near-Infrared 1.6 um Reflectance'}, # Grid metadata 'grid_time': { 'units': 'seconds', 'standard_name': 'time', 'long_name': 'Time of grid', 'calendar': 'gregorian'}, 'origin_longitude': { 'long_name': 'Longitude at grid origin', 'units': 'degrees_east', 'standard_name': 'longitude', 'valid_min': -180., 'valid_max': 180.}, 'origin_latitude': { 'long_name': 'Latitude at grid origin', 'units': 'degrees_north', 'standard_name': 'latitude', 'valid_min': -90., 'valid_max': 90.}, 'origin_altitude': { 'long_name': 'Altitude at grid origin', 'units': 'm', 'standard_name': 'altitude'}, 'x': { 'standard_name': 'projection_x_coordinate', 'long_name': 'X distance on the projection plane from the origin', 'axis': 'X', 'units': 'm'}, 'y': { 'standard_name': 'projection_y_coordinate', 'long_name': 'Y distance on the projection plane from the origin', 'axis': 'Y', 'units': 'm'}, 'z': { 'standard_name': 'projection_z_coordinate', 'long_name': 'Z distance on the projection plane from the origin', 'axis': 'Z', 'units': 'm', 'positive': 'up'}, 'point_x': { 'long_name': 'Cartesian x distance of each grid point from the origin', 'units': 'meters'}, 'point_y': { 'long_name': 'Cartesian y distance of each grid point from the origin', 'units': 'meters'}, 'point_z': { 'long_name': 'Cartesian z distance of each grid point from the origin', 'positive': 'up', 'units': 'meters'}, 'point_longitude': { 'long_name': 'Longitude of each grid point', 'units': 'degrees_north'}, 'point_latitude': { 'long_name': 'Latitude of each grid point', 'units': 'degrees_east'}, 'point_altitude': { 'long_name': 'Altitude of each grid point', 'units': 'meters'}, 'radar_latitude': { 'long_name': 'Latitude of radars used to make the grid.', 'units': 'degrees_north', }, 'radar_longitude': { 'long_name': 'Longitude of radars used to make the grid.', 'units': 'degrees_east', }, 'radar_altitude': { 'long_name': 'Altitude of radars used to make the grid.', 'units': 'm', }, 'radar_time': { 'calendar': 'gregorian', 'long_name': 'Time in seconds of the volume start for each radar'}, 'radar_name': { 'long_name': 'Name of radar used to make the grid', }, } ############################################################################## # File specific metadata # # These dictionaries define metadata that is to be used only when reading in # a given type of file. This metadata is used in place of the # DEFAULT_METADATA when it is avialable. The main use of these variable # is to define field specific data, it is safe to leave some/all of these # empty if the default metadata is acceptable. ############################################################################## # Metadata for Sigmet/IRIS files sigmet_metadata = {} # Metadata for NEXRAD Level II files (Archive and CDM files) nexrad_metadata = { reflectivity: { 'units': 'dBZ', 'standard_name': 'equivalent_reflectivity_factor', 'long_name': 'Reflectivity', 'valid_max': 94.5, 'valid_min': -32.0, 'coordinates': 'elevation azimuth range'}, velocity: { 'units': 'meters_per_second', 'standard_name': 'radial_velocity_of_scatterers_away_from_instrument', 'long_name': 'Mean doppler Velocity', 'valid_max': 95.0, 'valid_min': -95.0, 'coordinates': 'elevation azimuth range'}, spectrum_width: { 'units': 'meters_per_second', 'standard_name': 'doppler_spectrum_width', 'long_name': 'Spectrum Width', 'valid_max': 63.0, 'valid_min': -63.5, 'coordinates': 'elevation azimuth range'}, differential_reflectivity: { 'units': 'dB', 'standard_name': 'log_differential_reflectivity_hv', 'long_name': 'log_differential_reflectivity_hv', 'valid_max': 7.9375, 'valid_min': -7.8750, 'coordinates': 'elevation azimuth range'}, differential_phase: { 'units': 'degrees', 'standard_name': 'differential_phase_hv', 'long_name': 'differential_phase_hv', 'valid_max': 360.0, 'valid_min': 0.0, 'coordinates': 'elevation azimuth range'}, cross_correlation_ratio: { 'units': 'ratio', 'standard_name': 'cross_correlation_ratio_hv', 'long_name': 'Cross correlation_ratio (RHOHV)', 'valid_max': 1.0, 'valid_min': 0.0, 'coordinates': 'elevation azimuth range'}, } # Metadata for NEXRAD Level 3 Products nexrad_level3_metadata = { radar_estimated_rain_rate: { 'units': 'inches', 'standard_name': 'radar_estimated_rain_rate', 'long_name': 'Radar estimated rain rate', 'coordinates': 'elevation azimuth range'}, radar_echo_classification: { 'units': 'legend', 'standard_name': 'radar_echo_classification', 'long_name': 'Radar echo classification', 'options': ('0: Below Threshold (ND), ' '10: Biological (BI), ' '20: Anomalous Propagation/Ground Clutter (GC), ' '30: Ice Crystals (IC), ' '40: Dry Snow (DS), ' '50: Wet Snow (WS), ' '60: Light and/or Moderate Rain (RA), ' '70: Heavy Rain (HR), ' '80: Big Drops (rain) (BD), ' '90: Graupel (GR), ' '100: Hail, possibly with rain (HA), ' '140: Unknown Classification (UK), ' '150: Range Folded (RH)'), 'coordinates': 'elevation azimuth range'}, } # Metadata for CF/Radial files cfradial_metadata = {} # Metadata for MDV files mdv_metadata = {} # Metadata for RSL files rsl_metadata = {} # Metadata for CSU-CHILL, CHL files chl_metadata = {} FILE_SPECIFIC_METADATA = { # Required 'sigmet': sigmet_metadata, 'nexrad_archive': nexrad_metadata, 'nexrad_cdm': nexrad_metadata, 'nexrad_level3': nexrad_level3_metadata, 'cfradial': cfradial_metadata, 'mdv': mdv_metadata, 'rsl': rsl_metadata, 'chl': chl_metadata, } ############################################################################## # Field name mapping # # These dictionaries map file field names or data types to a radar field # name. These are used to populate the radar.fields dictionary during a read # in Py-ART. A value of None will not include that field in the radar object. # These can be over-ridden on a per-read basis using the field_mapping # parameter, or using setting the file_field_names parameter to True. ############################################################################## # Sigmet/IRIS file field mapping # Note that multiple sigmet fields map to the same radar field, if # more than one of these fields are present the radar field will be # overwritten with the last sigmet field. sigmet_field_mapping = { # Sigmet data type :field name # (Data_type) Description 'XHDR': None, # (0) Extended Header 'DBT': total_power, # (1) Total Power 'DBZ': reflectivity, # (2) Reflectivity 'VEL': velocity, # (3) Velocity 'WIDTH': spectrum_width, # (4) Width 'ZDR': differential_reflectivity, # (5) Diff. reflectivity 'DBZC': corrected_reflectivity, # (7) Corrected reflectivity 'DBT2': total_power, # (8) Total Power 'DBZ2': reflectivity, # (9) Reflectivity 'VEL2': velocity, # (10) Velocity 'WIDTH2': spectrum_width, # (11) Width 'ZDR2': differential_reflectivity, # (12) Diff. reflectivity 'RAINRATE2': radar_estimated_rain_rate, # (13) Rainfall rate 'KDP': specific_differential_phase, # (14) KDP (diff. phase) 'KDP2': specific_differential_phase, # (15) KDP (diff. phase) 'PHIDP': differential_phase, # (16) PhiDP (diff. phase) 'VELC': corrected_velocity, # (17) Corrected velocity 'SQI': normalized_coherent_power, # (18) SQI 'RHOHV': cross_correlation_ratio, # (19) RhoHV 'RHOHV2': cross_correlation_ratio, # (20) RhoHV 'DBZC2': corrected_reflectivity, # (21) Corrected Reflec. 'VELC2': corrected_velocity, # (21) Corrected Velocity 'SQI2': normalized_coherent_power, # (23) SQI 'PHIDP2': differential_phase, # (24) PhiDP (diff. phase) 'LDRH': linear_depolarization_ratio_h, # (25) LDR xmt H, rcv V 'LDRH2': linear_depolarization_ratio_h, # (26) LDR xmt H, rcv V 'LDRV': linear_depolarization_ratio_v, # (27) LDR xmt V, rcv H 'LDRV2': linear_depolarization_ratio_v, # (28) LDR xmt V, rcv H 'HEIGHT': None, # (32) Height (1/10 km) 'VIL2': None, # (33) Linear Liquid 'RAW': None, # (34) Raw Data 'SHEAR': None, # (35) Wind Shear 'DIVERGE2': None, # (36) Divergence 'FLIQUID2': None, # (37) Floated liquid 'USER': None, # (38) User type 'OTHER': None, # (39) Unspecified 'DEFORM2': None, # (40) Deformation 'VVEL2': None, # (41) Vertical velocity 'HVEL2': None, # (42) Horizontal velocity 'HDIR2': None, # (43) Horiz. wind direction 'AXDIL2': None, # (44) Axis of dilation 'TIME2': None, # (45) Time in seconds 'RHOH': None, # (46) Rho, xmt H, rcv V 'RHOH2': None, # (47) Rho, xmt H, rcv V 'RHOV': None, # (48) Rho, xmt V, rcv H 'RHOV2': None, # (49) Rho, xmt V, rcv H 'PHIH': None, # (50) Phi, xmt H, rcv V 'PHIH2': None, # (51) Phi, xmt H, rcv V 'PHIV': None, # (52) Phi, xmt V, rcv H 'PHIV2': None, # (53) Phi, xmt V, rcv H 'USER2': None, # (54) User type 'HCLASS': radar_echo_classification, # (55) Hydrometeor class 'HCLASS2': radar_echo_classification, # (56) Hydrometeor class 'ZDRC': corrected_differential_reflectivity, # (57) Corrected diff. refl. 'ZDRC2': corrected_differential_reflectivity, # (58) Corrected diff. refl. 'UNKNOWN_59': None, #
#!/usr/bin/env python # -*- coding: utf-8 -*- __doc__ = """ =============================================== |module_summary| Embroidermodder_wxPythonGUI.py =============================================== Embroidermodder v2.0 wxPython GUI """ #-Imports.---------------------------------------------------------------------- #--Python Imports. import os import sys import webbrowser #--wxPython Imports. # import wxversion # wxversion.select('2.8-msw-unicode') # wxversion.select('2.9.4-msw') # wxversion.select('2.9.5-msw') # wxversion.select('3.0-msw') # wxversion.select('2.9.5-msw-phoenix') # wxversion.select('3.0.1-msw-phoenix') import wx # Use pyshell/crust for interactive debugging. import wx.py as py # Use pure python agw aui because it gets bugfixes/is updated. import wx.lib.agw.aui as aui #--Local Imports. from KeyboardShortcuts import * #-Globals----------------------------------------------------------------------- # Application Directories. try: gFileDir = os.path.dirname(os.path.abspath(__file__)) except: gFileDir = os.path.dirname(os.path.abspath(sys.argv[0])) ## print(gAppDir) ## print(os.path.basename(gAppDir)) if os.path.basename(os.path.dirname(gFileDir)) == 'python': # From experimental/python/gui dir gAppDir = gFileDir + os.sep + '..' + os.sep + '..' + os.sep + '..' + os.sep + 'embroidermodder2' elif os.path.basename(gFileDir) == 'embroidermodder2': # From embroidermodder2 dir gAppDir = gFileDir gImgDir = gAppDir + os.sep + 'images' gIconDir = gAppDir + os.sep + 'icons' + os.sep + 'default' gSpiralsImgPath = gImgDir + os.sep + 'texture-spirals.png' gLogoSpiralsImgPath = gImgDir + os.sep + 'logo-spirals.png' # Is wxPython Project Phoenix(PY2/PY3)? PHOENIX = True if 'phoenix' in wx.version() else False # Define a translation function. _ = wx.GetTranslation # Python Version Info Strings wxPythonVersion = wx.version() major, minor, micro, release = sys.version_info[0:-1] pythonVersion = u'%d.%d.%d-%s'%(major, minor, micro, release) pyVersionInfos = u'%s %s\n%s %s' %(_(u'Python'), pythonVersion, _(u'wxPython'), wxPythonVersion) DIR_WX = dir(wx) for ID in ('ID_ICONIZE_FRAME', 'ID_MAXIMIZE_FRAME'): if not ID in DIR_WX: # Older wx version hasn't incorporated these IDs yet. wx.ID_ICONIZE_FRAME = wx.NewId() wx.ID_MAXIMIZE_FRAME = wx.NewId() break # IDs ID_DETAILS = wx.NewId() ID_CHANGELOG = wx.NewId() ID_TIPOFTHEDAY = wx.NewId() ID_WINDOW_LAYOUT_CASCADE = wx.NewId() ID_WINDOW_LAYOUT_TILE = wx.NewId() ID_WINDOW_NEXT = wx.NewId() ID_WINDOW_PREVIOUS = wx.NewId() ID_ONLINE_WEBSITE = wx.NewId() ID_PYVERSION_INFOS = wx.NewId() ID_FULLSCREEN = wx.NewId() ID_TOOLS_DISTANCE = wx.NewId() ID_TOOLS_LOCATEPOINT = wx.NewId() ID_DRAW_CIRCLE = wx.NewId() ID_DRAW_DOLPHIN = wx.NewId() ID_DRAW_ELLIPSE = wx.NewId() ID_DRAW_HEART = wx.NewId() ID_DRAW_LINE = wx.NewId() ID_DRAW_PATH = wx.NewId() ID_DRAW_POINT = wx.NewId() ID_DRAW_POLYGON = wx.NewId() ID_DRAW_POLYLINE = wx.NewId() ID_DRAW_RECTANGLE = wx.NewId() ID_DRAW_SINGLELINETEXT = wx.NewId() ID_DRAW_SNOWFLAKE = wx.NewId() ID_DRAW_STAR = wx.NewId() ID_SANDBOX_RGB = wx.NewId() ID_SANDBOX_SANDBOX = wx.NewId() ID_MODIFY_DELETE = wx.NewId() ID_MODIFY_MOVE = wx.NewId() ID_MODIFY_ROTATE = wx.NewId() ID_MODIFY_SCALE = wx.NewId() ID_DIMENSION_QUICKLEADER = wx.NewId() ID_VIEW_ZOOM_REALTIME = wx.NewId() ID_VIEW_ZOOM_PREVIOUS = wx.NewId() ID_VIEW_ZOOM_WINDOW = wx.NewId() ID_VIEW_ZOOM_DYNAMIC = wx.NewId() ID_VIEW_ZOOM_SCALE = wx.NewId() ID_VIEW_ZOOM_CENTER = wx.NewId() ID_VIEW_ZOOM_IN = wx.NewId() ID_VIEW_ZOOM_OUT = wx.NewId() ID_VIEW_ZOOM_SELECTED = wx.NewId() ID_VIEW_ZOOM_ALL = wx.NewId() ID_VIEW_ZOOM_EXTENTS = wx.NewId() ID_VIEW_PAN_REALTIME = wx.NewId() ID_VIEW_PAN_POINT = wx.NewId() ID_VIEW_PAN_LEFT = wx.NewId() ID_VIEW_PAN_RIGHT = wx.NewId() ID_VIEW_PAN_UP = wx.NewId() ID_VIEW_PAN_DOWN = wx.NewId() ID_VIEW_DAY = wx.NewId() ID_VIEW_NIGHT = wx.NewId() # Wildcards for Open/Save Dialogs. WILDCARD_SUPPORTED = str('All Supported Files (*.100;*.10o;*.art;*.bmc;*.bro;*.cnd;*.col;*.csd;*.csv;*.dem;*.dsb;*.dst;*.dsz;*.dxf;*.edr;*.emd;*.exp;*.exy;*.eys;*.fxy;*.gnc;*.gt;*.hus;*.inb;*.inf;*.jef;*.ksm;*.max;*.mit;*.new;*.ofm;*.pcd;*.pcm;*.pcs;*.pec;*.pel;*.pem;*.pes;*.phb;*.phc;*.plt;*.rgb;*.sew;*.sst;*.stx;*.svg;*.t09;*.tap;*.thr;*.u00;*.u01;*.vip;*.vp3;*.xxx;*.zsk)' '|*.100;*.10o;*.art;*.bmc;*.bro;*.cnd;*.col;*.csd;*.csv;*.dem;*.dsb;*.dst;*.dsz;*.dxf;*.edr;*.emd;*.exp;*.exy;*.eys;*.fxy;*.gnc;*.gt;*.hus;*.inb;*.inf;*.jef;*.ksm;*.max;*.mit;*.new;*.ofm;*.pcd;*.pcm;*.pcs;*.pec;*.pel;*.pem;*.pes;*.phb;*.phc;*.plt;*.rgb;*.sew;*.sst;*.stx;*.svg;*.t09;*.tap;*.thr;*.u00;*.u01;*.vip;*.vp3;*.xxx;*.zsk') WILDCARD_ALL = str('All Files (*.*)|*.*|' '100 (*.100)|*.100|' '10o (*.10o)|*.10o|' 'ART (*.art)|*.art|' 'BMC (*.bmc)|*.bmc|' 'BRO (*.bro)|*.bro|' 'CND (*.cnd)|*.cnd|' 'COL (*.col)|*.col|' 'CSD (*.csd)|*.csd|' 'CSV (*.csv)|*.csv|' 'DAT (*.dat)|*.dat|' 'DEM (*.dem)|*.dem|' 'DSB (*.dsb)|*.dsb|' 'DST (*.dst)|*.dst|' 'DSZ (*.dsz)|*.dsz|' 'DXF (*.dxf)|*.dxf|' 'EDR (*.edr)|*.edr|' 'EMD (*.emd)|*.emd|' 'EXP (*.exp)|*.exp|' 'EXY (*.exy)|*.exy|' 'EYS (*.eys)|*.eys|' 'FXY (*.fxy)|*.fxy|' 'GNC (*.gnc)|*.gnc|' 'GT (*.gt)|*.gt|' 'HUS (*.hus)|*.hus|' 'INB (*.inb)|*.inb|' 'INF (*.inf)|*.inf|' 'JEF (*.jef)|*.jef|' 'KSM (*.ksm)|*.ksm|' 'MAX (*.max)|*.max|' 'MIT (*.mit)|*.mit|' 'NEW (*.new)|*.new|' 'OFM (*.ofm)|*.ofm|' 'PCD (*.pcd)|*.pcd|' 'PCM (*.pcm)|*.pcm|' 'PCQ (*.pcq)|*.pcq|' 'PCS (*.pcs)|*.pcs|' 'PEC (*.pec)|*.pec|' 'PEL (*.pel)|*.pel|' 'PEM (*.pem)|*.pem|' 'PES (*.pes)|*.pes|' 'PHB (*.phb)|*.phb|' 'PHC (*.phc)|*.phc|' 'PLT (*.plt)|*.plt|' 'RGB (*.rgb)|*.rgb|' 'SEW (*.sew)|*.sew|' 'SHV (*.shv)|*.shv|' 'SST (*.sst)|*.sst|' 'STX (*.stx)|*.stx|' 'SVG (*.svg)|*.svg|' 'T09 (*.t09)|*.t09|' 'TAP (*.tap)|*.tap|' 'THR (*.thr)|*.thr|' 'TXT (*.txt)|*.txt|' 'U00 (*.u00)|*.u00|' 'U01 (*.u01)|*.u01|' 'VIP (*.vip)|*.vip|' 'VP3 (*.vp3)|*.vp3|' 'XXX (*.xxx)|*.xxx|' 'ZSK (*.zsk)|*.zsk') # Define Colors. EMBROIDERBLUE1 = wx.Colour(12, 106, 176) # '#0C6AB0' EMBROIDERBLUE2 = wx.Colour(85, 196, 230) # '#55C4E6' # Embedded Python Shell. PYSHELL = 'crust' class PyShellPanel(wx.Panel): """ An Interactive Embedded Python Shell Class. """ def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.BORDER_SUNKEN, name='panel'): """Default class constructor.""" wx.Panel.__init__(self, parent, id, pos, size, style, name) self.CreatePyShell() vbSizer = wx.BoxSizer(wx.VERTICAL) try: vbSizer.Add(self.pythoncrust, 1, wx.EXPAND | wx.ALL, 5) except Exception: vbSizer.Add(self.pythonshell, 1, wx.EXPAND | wx.ALL, 5) self.SetSizer(vbSizer) def CreatePyShell(self): """Create and return the Python Shell.""" if PYSHELL == 'crust': # Use pycrust self.pythoncrust = py.crust.Crust(self, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=4194304, intro='Welcome To PyCrust %s - The Flakiest Python Shell' % (py.version.VERSION), locals=None, InterpClass=None, startupScript=None, execStartupScript=True) self.pythonshell = self.pythoncrust.shell else: # Use pyshell self.pythonshell = py.shell.Shell(self, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=4194304, intro='Welcome To PyCrust %s - The Flakiest Python Shell' % (py.version.VERSION), locals=None, InterpClass=None, startupScript=None, execStartupScript=True) global gPyShell gPyShell = gMainWin.gPyShell = self.pythonshell self.pythonshell.SetHelpText(_(u'PyShell is an embedded interactive Python shell.' + '\n' + _(u'Useful for live debugging also.'))) try: return self.pythoncrust except Exception as exc: return self.pythonshell class EmbroidermodderPanel(wx.Panel): """""" def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.BORDER_SUNKEN, name='panel'): """Default class constructor.""" wx.Panel.__init__(self, parent, id, pos, size, style, name) self.Bind(wx.EVT_SIZE, self.OnSize) self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground) self.Bind(wx.EVT_PAINT, self.OnPaint) self.backgroundBitmap = wx.Bitmap(gSpiralsImgPath, wx.BITMAP_TYPE_PNG) self.backgroundLogo = wx.Bitmap(gLogoSpiralsImgPath, wx.BITMAP_TYPE_PNG) self.backgroundLogoW, self.backgroundLogoH = self.backgroundLogo.GetSize() self.displaySizeBackgroundBmp = self.MakeDisplaySizeBackgroundBitmap(self.backgroundBitmap) try: self.SetBackgroundStyle(wx.BG_STYLE_PAINT) except AttributeError: # wx28 self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) ## self.SetDoubleBuffered(True) self.SetHelpText(_(u'Welcome to Embroidermodder!') + '\n\n' + _(u'This is the startup window.') + '\n' + _(u'You can choose to start a new design file,') + '\n' + _(u'Open an existing design, or browse the ') + '\n' + _(u'Embroidermodder Help files to get a start.') + '\n\n' + _(u'Now Stitchify!') ) self.Bind(wx.EVT_LEFT_DCLICK, parent.OnOpen) def MakeDisplaySizeBackgroundBitmap(self, bitmap=None): """ Create a Display-Size Bitmap for tiling on :class:`EmbroidermodderPanel`. :param `bitmap`: a `wx.Bitmap`. """ width, height = wx.GetDisplaySize() if PHOENIX: bmp = wx.Bitmap(width, height) else: # Classic wxPython bmp = wx.EmptyBitmap(width, height) dc = wx.MemoryDC(bmp) bgBmp = bitmap or self.backgroundBitmap bmpW = bgBmp.GetWidth() bmpH = bgBmp.GetHeight() localDrawBitmap = dc.DrawBitmap [[localDrawBitmap(bgBmp, x, y, True) for y in range(0, height, bmpH)] for x in range(0, width, bmpW)] return dc.GetAsBitmap(wx.Rect(0, 0, width, height)) def OnSize(self, event): """ Handles the ``wx.EVT_SIZE`` event for :class:`EmbroidermodderPanel`. :param `event`: A `wx.SizeEvent`_ to be processed. """ event.Skip() self.Refresh() def OnEraseBackground(self, event): """ Handles the ``wx.EVT_ERASE_BACKGROUND`` event for :class:`EmbroidermodderPanel`. :param `event`: A `wx.EraseEvent`_ to be processed. """ event.Skip() # essentially pass; Reduce Flicker because we are using a BufferedPaintDC def OnPaint(self, event): """ Handles the ``wx.EVT_PAINT`` event for :class:`EmbroidermodderPanel`. :param `event`: A `wx.PaintEvent`_ to be processed. """ event.Skip() cSizeX, cSizeY = self.GetClientSize() pdc = wx.BufferedPaintDC(self) pdc.Clear() pdc.DrawBitmap(self.displaySizeBackgroundBmp, 0, 0) if self.backgroundLogoW > cSizeX: # Proportional Scaling an Image. # # newWidth/oldWidth = newHeight/oldHeight # # Plug in the values that you know and solve # for the new dimension that you don't know. # Like this: # # newWidth/oldWidth = newHeight/oldHeight # oldHeight * newWidth/oldWidth = newHeight # newHeight = 300 # # 400/800 = newHeight/600 # 600 * 400/800 = newHeight # newHeight = 300 newHeight = self.backgroundLogoH * cSizeX//self.backgroundLogoW scaledBmp = self.backgroundLogo.ConvertToImage().Scale(cSizeX, newHeight, wx.IMAGE_QUALITY_HIGH).ConvertToBitmap() pdc.DrawBitmap(scaledBmp, 0, cSizeY // 2 - scaledBmp.GetHeight() // 2) else: pdc.DrawBitmap(self.backgroundLogo, cSizeX // 2 - self.backgroundLogoW // 2, cSizeY // 2 - self.backgroundLogoH // 2) class MainAuiManager(aui.AuiManager): """Advanced User Interface Manager for Embroidermodder.""" def __init__(self, managed_window=None, agwFlags= aui.AUI_MGR_ALLOW_FLOATING # | aui.AUI_MGR_ALLOW_ACTIVE_PANE # | aui.AUI_MGR_TRANSPARENT_DRAG | aui.AUI_MGR_TRANSPARENT_HINT | aui.AUI_MGR_VENETIAN_BLINDS_HINT # | aui.AUI_MGR_RECTANGLE_HINT | aui.AUI_MGR_HINT_FADE # | aui.AUI_MGR_NO_VENETIAN_BLINDS_FADE # | aui.AUI_MGR_LIVE_RESIZE | aui.AUI_MGR_ANIMATE_FRAMES | aui.AUI_MGR_PREVIEW_MINIMIZED_PANES # | aui.AUI_MGR_AERO_DOCKING_GUIDES | aui.AUI_MGR_WHIDBEY_DOCKING_GUIDES | aui.AUI_MGR_SMOOTH_DOCKING | aui.AUI_MGR_USE_NATIVE_MINIFRAMES # | aui.AUI_MGR_AUTONB_NO_CAPTION | 0): """Default class constructor.""" aui.AuiManager.__init__(self, managed_window, agwFlags) # ... Tell AuiManager to manage this frame self.SetManagedWindow(managed_window) self.SetAutoNotebookStyle( agwStyle= aui.AUI_NB_DRAW_DND_TAB # | aui.AUI_NB_TOP # | aui.AUI_NB_LEFT # | aui.AUI_NB_RIGHT | aui.AUI_NB_BOTTOM | aui.AUI_NB_TAB_SPLIT | aui.AUI_NB_TAB_MOVE # | aui.AUI_NB_TAB_EXTERNAL_MOVE # | aui.AUI_NB_TAB_FIXED_WIDTH | aui.AUI_NB_SCROLL_BUTTONS | aui.AUI_NB_WINDOWLIST_BUTTON # | aui.AUI_NB_CLOSE_BUTTON | aui.AUI_NB_CLOSE_ON_ACTIVE_TAB # | aui.AUI_NB_CLOSE_ON_ALL_TABS | aui.AUI_NB_MIDDLE_CLICK_CLOSE | aui.AUI_NB_SUB_NOTEBOOK # | aui.AUI_NB_HIDE_ON_SINGLE_TAB | aui.AUI_NB_SMART_TABS # | aui.AUI_NB_USE_IMAGES_DROPDOWN # | aui.AUI_NB_CLOSE_ON_TAB_LEFT | aui.AUI_NB_TAB_FLOAT | 0) arts = [aui.AuiDefaultTabArt, aui.AuiSimpleTabArt, aui.VC71TabArt, aui.FF2TabArt, aui.VC8TabArt, aui.ChromeTabArt] self.SetAutoNotebookTabArt(arts[5]()) # Embroidermodder theme is blue, so we will do ChromeTabs self.SetAnimationStep(40.0) # 30.0 is default class EmbroidermodderMainWindow(wx.Frame): """Main Frame Window for Embroidermodder application.""" def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): """Default class constructor.""" wx.Frame.__init__(self, parent, id, title, pos, size, style, name) global gMainWin gMainWin = self self.gImgDir = gImgDir #--- AuiManager self._mgr = MainAuiManager(self) ## self.SetDoubleBuffered(True) self.SetBackgroundColour(EMBROIDERBLUE1) self.gStatusBar = CustomStatusBar(self) self.SetStatusBar(self.gStatusBar) # Initial SetStatusText self.gStatusBar.SetStatusText('Welcome to Embroidermodder v2.0 build/rev #-----', 0) self.gMenuBar = self.CreateMenuBar() self.SetMenuBar(self.gMenuBar) self.BindEvents() self.embroidermodderPanel = EmbroidermodderPanel(self) self.pyshellPanel = PyShellPanel(self) self.pyshellPanel.Hide() vbSizer = wx.BoxSizer(wx.VERTICAL) vbSizer.Add(self.embroidermodderPanel, 1, wx.EXPAND | wx.ALL, 0) vbSizer.Add(self.pyshellPanel, 1, wx.EXPAND | wx.ALL, 0) self.SetSizer(vbSizer) def BindEvents(self): """Bind Events.""" self.Bind(wx.EVT_CLOSE, self.OnDestroy) self.Bind(wx.EVT_MENU, self.OnDestroy, id=wx.ID_EXIT) self.Bind(wx.EVT_MENU, self.OnOpen, id=wx.ID_OPEN) self.Bind(wx.EVT_MENU, self.OnSaveAs, id=wx.ID_SAVEAS) self.Bind(wx.EVT_MENU, self.DoIconize, id=wx.ID_ICONIZE_FRAME) self.Bind(wx.EVT_MENU, self.DoMaximize, id=wx.ID_MAXIMIZE_FRAME) self.Bind(wx.EVT_MENU, self.OnToggleFullScreenMode, id=ID_FULLSCREEN) self.Bind(wx.EVT_MENU, self.OnHelp, id=wx.ID_HELP) self.Bind(wx.EVT_MENU, self.OnAbout, id=wx.ID_ABOUT) def CreateMenuBar(self): """Create the MenuBar for the :class:`EmbroidermodderMainWindow`.""" self.gMenuBar = wx.MenuBar() self.gMenu_File = self.CreateMenu_File() self.gMenu_Edit = self.CreateMenu_Edit() self.gMenu_View = self.CreateMenu_View() self.gMenu_Tools = self.CreateMenu_Tools() self.gMenu_Draw = self.CreateMenu_Draw() self.gMenu_Sandbox = self.CreateMenu_Sandbox() self.gMenu_Modify = self.CreateMenu_Modify() self.gMenu_Dimension = self.CreateMenu_Dimension() self.gMenu_Settings = self.CreateMenu_Settings() self.gMenu_Window = self.CreateMenu_Window() self.gMenu_Help = self.CreateMenu_Help() self.gMenuBar.Append(self.gMenu_File, '&File') self.gMenuBar.Append(self.gMenu_Edit, '&Edit') self.gMenuBar.Append(self.gMenu_View, '&View') self.gMenuBar.Append(self.gMenu_Tools, '&Tools') self.gMenuBar.Append(self.gMenu_Draw, '&Draw') self.gMenuBar.Append(self.gMenu_Sandbox, '&Sandbox') self.gMenuBar.Append(self.gMenu_Modify, '&Modify') self.gMenuBar.Append(self.gMenu_Dimension, '&Dimension') self.gMenuBar.Append(self.gMenu_Settings, '&Settings') self.gMenuBar.Append(self.gMenu_Window, '&Window') self.gMenuBar.Append(self.gMenu_Help, '&Help') return self.gMenuBar def CreateMenu_File(self): """
de pesquisa', 'Survey Question added': 'Pergunta de pesquisa incluída', 'Survey Question deleted': 'Pergunta de pesquisa excluída', 'Survey Question updated': 'Pergunta de pesquisa atualizada', 'Survey Section': 'Seção da Pesquisa de Opinião', 'Survey Section Details': 'Detalhes de Seção de Pesquisa', 'Survey Section Display Name': 'Seção de pesquisa do nome de exibição', 'Survey Section added': 'Seção de Pesquisa incluída', 'Survey Section deleted': 'Seção de Pesquisa excluída', 'Survey Section updated': 'Seção de pesquisa atualizada', 'Survey Series': 'Série de Pesquisa', 'Survey Series Details': 'Série de Pesquisa Detalhes', 'Survey Series Name': 'Nome de Série de Pesquisa', 'Survey Series added': 'Série de Pesquisa incluída', 'Survey Series deleted': 'Série de Pesquisa excluída', 'Survey Series updated': 'Série de Pesquisa atualizada', 'Survey Template': 'Modelo de Pesquisa de Opinião', 'Survey Template Details': 'Definir detalhes do formulário', 'Survey Template added': 'Modelo de Pesquisa incluído', 'Survey Template deleted': 'Modelo de Pesquisa excluído', 'Survey Template updated': 'Definição de formulário actualizada', 'Survey Templates': 'Definir formulários', 'Symbology': 'Simbologia', 'Sync Conflicts': 'Conflitos de Sincronização', 'Sync History': 'Histórico de Sincronização', 'Sync Now': 'Sincronizar Agora', 'Sync Partners': 'Sincronizar parceiros', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'PARCEIROS DE Sincronização são instâncias ou PARES (SahanaEden, SahanaAgasti, Ushahidi, etc. ) que você deseja a informação de sincronização com. Clique no link sobre o direito de ir a página em que você pode incluir parceiros de sincronização, procurar por parceiros de sincronização e Modificá-las.', 'Sync Pools': 'Conjuntos de Sincronização', 'Sync Schedule': 'Planejamento de Sincronização', 'Sync Settings': 'Configurações de Sincronização', 'Sync process already started on': 'Processo de Sincronização já iniciado em', 'Sync process already started on ': 'Sync process already started on ', 'Synchronisation': 'Sincronização', 'Synchronization': 'Sincronização', 'Synchronization Conflicts': 'Conflitos de Sincronização', 'Synchronization Details': 'Detalhes de Sincronização', 'Synchronization History': 'Histórico de Sincronização', 'Synchronization Peers': 'Parceiros de Sincronização', 'Synchronization Settings': 'Configurações de sincronização', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Sincronização permite compartilhar dados que você tenha com outros e Atualizar seu próprio banco de dados com informações recentes de outros parceiros. Esta página fornece informações sobre como utilizar os recursos de sincronização de Sahana Éden', 'Synchronization not configured.': 'Sincronização não Configurada.', 'Synchronization settings updated': 'Configurações de sincronização atualizadas', 'Syncronisation History': 'Histórico De Sincronização', "System's Twitter account updated": 'DO SISTEMA Chilreiam conta ATUALIZADO', 'Tags': 'Tags', 'Take shelter in place or per <instruction>': 'Abrigue-se no local ou por', 'Task': 'Task', 'Task Details': 'Detalhes da Tarefa', 'Task List': 'Lista de tarefas', 'Task Status': 'Status da tarefa', 'Task added': 'Task Inclusa', 'Task deleted': 'Tarefa excluída', 'Task removed': 'Task removed', 'Task updated': 'Tarefa atualizada', 'Tasks': 'Tarefas', 'Team': 'Equipe', 'Team Description': 'Descrição da Equipe', 'Team Details': 'Detalhes da Equipe', 'Team ID': 'ID da Equipe', 'Team Id': 'Id da Equipe', 'Team Leader': 'Líder de Equipe', 'Team Member added': 'Membro da equipe incluído', 'Team Members': 'Membros da equipe', 'Team Name': 'Nome da equipe', 'Team Type': 'Tipo de equipe', 'Team added': 'Equipe incluída', 'Team deleted': 'Equipe excluída', 'Team updated': 'Equipa actualizada', 'Teams': 'Equipes', 'Technical testing only, all recipients disregard': 'Apenas teste técnico, todos os recipientes ignorem', 'Telecommunications': 'Telecomunicações', 'Telephone': 'Telefone', 'Telephone Details': 'Telephone Details', 'Telephony': 'Telefonia', 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.', 'Temp folder %s not writable - unable to apply theme!': 'PASTA Temp%s não gravável-impossível aplicar tema!', 'Template Name': 'Template Name', 'Template file %s not readable - unable to apply theme!': 'Modelo% arquivo não é Legível-impossível aplicar tema!', 'Templates': 'modelos', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Termo para o 5º nível de divisão administrativa nacional (por exemplo, uma subdivisão de código postal ou de zona de votação). Este nível não é frequentemente utilizado.', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Termo para o 4º nível de divisão administrativa nacional(por exemplo, vila, bairro ou distrito).', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Prazo para a principal divisão administrativa dentro do país (i.e. Estado ou Distrito).', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Prazo para a Secundária divisão administrativa dentro do país (por exemplo, Bairro ou Município).', 'Term for the secondary within-country administrative division (e.g. District).': 'Prazo para a Secundária divisão administrativa dentro do país (i.e. Bairro).', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Prazo para o 3ᵉʳ nível de divisão administrativa dentro do país (por exemplo, Cidade ou Municipio).', 'Term for the top-level administrative division (i.e. Country).': 'Prazo para a divisão administrativa de nível superior (por exemplo País).', 'Term for the top-level administrative division (typically Country).': 'Prazo para a divisão administrativa de nível superior (geralmente País).', 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.': 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.', 'Terms of Service:': 'Terms of Service:', 'Territorial Authority': 'Autoridade territoriais', 'Terrorism': 'Terrorismo', 'Tertiary Server (Optional)': 'Servidor terciário (opcional)', 'Text': 'texto', 'Text Color for Text blocks': 'Cor de texto para os blocos de texto', 'Text before each Text Field (One per line)': 'Texto antes de cada campo de texto (um por linha)', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Obrigado para validar seu e-mail. Sua conta de usuário ainda está pendente para aprovação pelo administrador do Sistema (%s). você receberá uma notificação por e-mail quando sua conta esteja ativada.', 'Thanks for your assistance': 'Obrigado por sua ajuda', 'The': 'O', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'O "query" é uma condição como "db.table1.field1==\'value\'". Algo como "db.table1.field1 == db.table2.field2" resulta em uma junção SQL.', 'The Area which this Site is located within.': 'A área que este Site está localizado', 'The Assessments module allows field workers to send in assessments.': 'O Modulo Avaliações permite aos trabalhadores de campo que enviem avaliações.', 'The Author of this Document (optional)': 'O autor deste documento (opcional)', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'O módulo avaliações De Construção permite a segurança edifício a ser avaliada, por exemplo, depois de um terremoto.', 'The Camp this Request is from': 'O Alojamento neste pedido é de', 'The Camp this person is checking into.': 'O Alojamento que esta pessoa está se registrando.', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'O local atual do Usuário/Grupo, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "O doador(s) para este projeto. Vários valores podem ser selecionados ao manter pressionado a chave 'control'", 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'O endereço de e-mail para onde os pedidos de aprovação são enviados (normalmente seria um correio de Grupo ao invés de um individual). Se o campo estiver em branco, os pedidos são aprovados automaticamente se o domínio corresponder.', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'O Sistema de Comunicação de Incidentes permite o Público em Geral reportar incidentes & ter esses rastreados.', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'A Localização da Pessoa vem do, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.':
Write a framerate chunk into the first image if self.image_number == 0: if not self.writeFramerateChunk(1 / self.control_proxy.StepTime, image_path): QMessageBox.warning( None, 'Saving framerate failed', "Framerate was not saved, this recorded image\n" + "sequence will have to be exported using\n" + "current Step Time to compute framerate.\n" + "Check Report View for more info.") self.image_number += 1 def findSequences(self, files): """ Method to find sequences between files. Files are scanned for sequences, the valid sequences are recognized and number of frames is counted. Args: files: A list of string file names. Returns: A dict with sequence names and numbers of frames. """ # Check there are any files if len(files) == 0: return {} # Go through the files sequences = {} for f in files: # Check they fit the name pattern img_name = re.search(r"(seq\d+)-(\d+)(?=\.png)", f) if img_name is not None: # Add new sequences if img_name.group(1) not in list(sequences.keys()): # Add sequence if it's starting with 0 if int(img_name.group(2)) == 0: sequences[img_name.group(1)] = 1 last_frame = int(img_name.group(2)) # Compute number of successive frames elif int(img_name.group(2)) == (last_frame + 1): sequences[img_name.group(1)] += 1 last_frame += 1 # Remove sequence if a frame is missing else: sequences.pop(img_name.group(1)) # Leave sequences longer than 1 frame sequences = {key: val for key, val in sequences.items() if val > 1} return sequences def showSequences(self, sequences): """ Method to show sequences to export on a dialog panel. Sequences and frame numbers are shown in a QTreeView, and buttons `'Confirm'` and `'Abort'` are attached under it. All of this is put under the Export button on the dialog panel. Args: sequences: A dict with sequence names and numbers of frames. """ # Add names to columns NAME, N_FRAMES = range(2) # Create a tree view and set it up self.trv_sequences = QTreeView() self.trv_sequences.setRootIsDecorated(False) self.trv_sequences.setAlternatingRowColors(True) self.trv_sequences.setToolTip("Select a sequence to export.") self.trv_sequences.setSizeAdjustPolicy( self.trv_sequences.AdjustToContents) self.trv_sequences.setSizePolicy( self.trv_sequences.sizePolicy().Ignored, self.trv_sequences.sizePolicy().Minimum) self.trv_sequences.header().setResizeMode( self.trv_sequences.header().Fixed) self.trv_sequences.header().setDefaultSectionSize(120) self.trv_sequences.setSelectionMode(self.trv_sequences.SingleSelection) # Prepare a table model = QStandardItemModel(0, 2, self.trv_sequences) # Prepare a header hdr_name = QStandardItem("Sequence Name") model.setHorizontalHeaderItem(NAME, hdr_name) hdr_frames = QStandardItem("# of frames") hdr_frames.setTextAlignment(Qt.AlignmentFlag.AlignRight) model.setHorizontalHeaderItem(N_FRAMES, hdr_frames) # Add data to the table for name, frames in sequences.items(): itm_name = QStandardItem(name) itm_name.setSelectable(True) itm_name.setEditable(False) itm_frames = QStandardItem(str(frames)) itm_frames.setSelectable(True) itm_frames.setEditable(False) itm_frames.setTextAlignment(Qt.AlignmentFlag.AlignRight) model.appendRow((itm_name, itm_frames)) # Add the table to the tree view self.trv_sequences.setModel(model) # Add the tree view to the panel under the EXPORT button self.form.lyt_main.insertWidget(5, self.trv_sequences) # Make column with the numbers of frames smaller self.trv_sequences.setColumnWidth(1, 80) # Select the first item self.trv_sequences.setCurrentIndex(model.index(0, 0)) # Add horizontal layout under the tree view self.lyt_export = QHBoxLayout() self.form.lyt_main.insertLayout(6, self.lyt_export) # Add buttons for confirmation of a selected sequence and # export abortion self.btn_confirm = QPushButton("Confirm") self.btn_confirm.setStyleSheet( """ QPushButton { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #0B0, stop: 1.0 #0D0); font-weight: bold; } QPushButton:hover {border-color: #0D0;} QPushButton:focus { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #0C0, stop: 1.0 #0F0); border-color: #0E0; color: #FFF; } QPushButton:pressed { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #0F0, stop: 1.0 #0C0); }""") self.btn_confirm.clicked.connect(self.exportConfirmed) self.btn_abort = QPushButton("Abort") self.btn_abort.setStyleSheet( """ QPushButton { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #B00, stop: 1.0 #D00); font-weight: bold; } QPushButton:hover {border-color: #D00;} QPushButton:focus { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #C00, stop: 1.0 #F00); border-color: #E00; color: #FFF; } QPushButton:pressed { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #F00, stop: 1.0 #C00); }""") self.btn_abort.clicked.connect(self.exportAborted) self.lyt_export.addWidget(self.btn_confirm) self.lyt_export.addWidget(self.btn_abort) # Create a function to disable deselection def mySelectionChanged(selected, deselected): if selected.isEmpty() and not deselected.isEmpty(): self.trv_sequences.selectionModel().select( deselected.first().indexes()[0], self.trv_sequences.selectionModel().Select | self.trv_sequences.selectionModel().Rows) # Connect the function as a slot for signal emitted when selection is # changed self.trv_sequences.selectionModel().selectionChanged.connect( mySelectionChanged) def exportConfirmed(self): """ Feedback method called when confirm button was clicked. Buttons are disabled, framerate is loaded from the first image chunks, selected sequence name is used to create an `image name` template and a `video name` which can be used in a FFMPEG command. Such a command is executed to convert the video, if FFMPEG is installed. Otherwise warnings are shown. """ # Disable export and confirm buttons self.btn_confirm.setEnabled(False) self.btn_abort.setEnabled(False) # Prepare arguments for ffmpeg conversion selected_seq = \ self.trv_sequences.selectionModel().selectedRows()[0].data() # Load framerate image_name = selected_seq + "-" + (NAME_NUMBER_FORMAT % 0) + ".png" image_path = path.join(self.control_proxy.ExportPath, image_name) # load fps from the first image fps = self.readFramerateChunk(image_path) if fps == -1.0: fps = 1 / self.control_proxy.StepTime QMessageBox.warning( None, 'Loading framerate failed', "Framerate was not loaded, this recorded image\n" + "sequence will be exported using current\n" + "Step Time: FPS = 1/(Step Time) = " + str(fps) + ".") image_name = '"' + path.normpath( path.join(self.control_proxy.ExportPath, selected_seq + "-" + NAME_NUMBER_FORMAT + ".png")) + '"' video_name = '"' + path.normpath( path.join(self.control_proxy.ExportPath, selected_seq + ".mp4")) + '"' # Prepare an ffmpeg command export_command = 'ffmpeg -r ' + str(fps) + ' -i ' + image_name \ + ' -c:v libx264 -pix_fmt yuv420p ' + video_name # Try to run the command try: return_val = subprocess.call(export_command) except OSError as e: if e.errno == os.errno.ENOENT: QMessageBox.warning(None, 'FFMPEG not available', "FFMPEG is necessary to export video.\n" + "Please install it") else: QMessageBox.warning(None, 'Something failed', str(e)) if return_val == 0: QMessageBox.information(None, 'Export successful!', "FFMPEG successfully converted image " + "sequence into a video.") else: QMessageBox.warning(None, 'FFMPEG unsuccessfull', "FFMPEG failed to convert sequence into " + "a video") # Close the export subform self.closeExportSubform() def exportAborted(self): """ Feedback method called when abort button was clicked. The part of the dialog panel used for video exporting is closed. """ # Close the export subform self.closeExportSubform() def closeExportSubform(self): """ Method used to close the part of the dialog panel used for video exporting. The QTreeView with sequence names and their numbers of frames are closed. Then `'Confirm'` and `'Abort'` buttons are removed and the rest of buttons is returned to the default state (the same as if pause button was pressed). """ # Close all parts of export subform and remove them from the panel self.trv_sequences.close() self.form.lyt_main.removeWidget(self.trv_sequences) self.btn_abort.close() self.lyt_export.removeWidget(self.btn_abort) self.btn_confirm.close() self.lyt_export.removeWidget(self.btn_confirm) self.form.lyt_main.removeItem(self.lyt_export) self.last_clicked = "pause" self.setInvalidButtons() def installPyPNGNotice(self): """ Method telling user that pyPNG library ought to be installed into FreeCAD. The pyPNG library is not part of FreeCAD and so we need to add it using pip. This method tells user to do so. """ QMessageBox.information( None, "Install PyPNG", "PyPNG is missing from your FreeCAD\n" + "Please follow these instructions to install it:\n\n" + "Windows:\n" + " 1) Open a command line window with admin privileges\n" + ' Press "Win + X" and "A"\n\n' + " 2) Go to the bin folder in your FreeCAD installation\n" + ' Type "CD ' + FreeCAD.getHomePath() + 'bin"\n\n' + " 3) Install PyPNG\n" + ' Type "python.exe -m pip install pyPNG"\n\n\n' + "Ubuntu (installed using PPA):\n" + " 1) Open a terminal window\n\n" + " 2) Install PyPNG\n" + ' Type "sudo python.exe -m pip install pyPNG"\n') # Alternative way to install it directly from FreeCAD # import pip # if hasattr(pip, "main"): # FreeCAD.Console.PrintLog("Installing pyPNG.\n") # if pip.main(["install", "pyPNG"]) != 0: # FreeCAD.Console.PrintError("pyPNG installation failed.\n") # FreeCAD.Console.PrintLog("Installation successful.\n") # else: # import pip._internal # if hasattr(pip._internal, "main"): # if pip._internal.main(["install", "pyPNG"]) != 0: # FreeCAD.Console.PrintError("pyPNG installation failed.\n") # FreeCAD.Console.PrintLog("Installation successful.\n") # else: # FreeCAD.Console.PrintLog( # "Unable to import and install pyPNG.\n") def writeFramerateChunk(self, framerate, image_path): """ Method to write a framerate into a PNG image as one of its chunks. This method tries to import pyPNG first. Then it tries to install it and import again. If either import is successful, all chunks currently in the PNG image at an `image_path` are extracted. The framerate chunk is added as the second chunk, right behind IHDR. Finally the image is rewritten with new list of chunks. Args: framerate: A float specifying the framerate to be written into the image. image_path: A str containing a path to an image about to be augmented. """ # import or install pyPNG try: import png except ModuleNotFoundError: self.installPyPNGNotice() return False except Exception as e: FreeCAD.Console.PrintError( "Unexpected error occurred while importing pyPNG - " + str(e)) #
con.state_machine.go_to(self.end_state, con.spawn, context=self.context) self.result = self.result.match_output if self.result.rfind(self.connection.hostname): self.result = self.result[ :self.result.rfind(self.connection.hostname)] else: dialog = self.service_dialog(service_dialog=reply) con.spawn.sendline(command) try: self.result = dialog.process(con.spawn, context=self.context, timeout=timeout) except Exception as err: raise SubCommandFailure("Failed to execute command on shell", err) con.state_machine.go_to(self.end_state, con.spawn, context=self.context) self.result = self.result.match_output if self.result.rfind(self.connection.hostname): self.result = self.result[ :self.result.rfind(self.connection.hostname)] def post_service(self, *args, **kwargs): if self.command_list_is_empty: pass else: state_machine = self.connection.state_machine state_machine.go_to(self.end_state, self.connection.spawn, context=self.connection.context) class HAShellExec(BaseService): """ Service to execute commands on shell. Arguments: command: List of command to execute on shell dialog: Dialog which include list of Statements for additional dialogs prompted by command executed, in-case it is not in the current list. timeout : Timeout value in sec for executing command on shell. Returns: console output: on Success Raises: SubCommandFailure: on failure. Example :: .. code-block:: python rtr.shellexec(['uname -a']) cmd = ['uname -a', 'ls -l'] dev.shellexec(cmd) """ def __init__(self, connection, context, **kwargs): super().__init__(connection, context, **kwargs) self.start_state = 'shell' self.end_state = 'enable' self.timeout = connection.settings.EXEC_TIMEOUT self.result = None # add the keyword arguments to the object self.__dict__.update(kwargs) def call_service(self, command=[], reply=Dialog([]), timeout=None, target=None, *args, **kwargs): con = self.connection con.log.debug("+++ shellexec +++") timeout = timeout or self.timeout self.command_list_is_empty = False spawn = self.get_spawn(target) handle = self.get_handle(target) state_machine = self.get_sm(target) try: state_machine.go_to(self.start_state, spawn, context=self.context) except Exception as err: raise SubCommandFailure("Failed to Bring device to Shell State", err) # if commands is a list if isinstance(command, collections.abc.Sequence): # No command passed, just move to config mode if len(command) == 0: self.result = " " self.command_list_is_empty = True elif len(command) == 1: dialog = self.service_dialog(service_dialog=reply, handle=handle) spawn.sendline(command[0]) try: self.result = dialog.process(spawn, timeout=timeout) except Exception as err: raise SubCommandFailure( "Failed to Bring device to shell State", err) self.result = self.result.match_output if self.result.rfind(self.connection.hostname): self.result = self.result[ :self.result.rfind(self.connection.hostname)] else: dialog = self.service_dialog(service_dialog=reply, handle=handle) # Commands are list of more than one command for cmd in command: spawn.sendline(cmd) try: self.result = dialog.process(spawn, timeout=timeout) except Exception as err: raise SubCommandFailure("Configuration failed", err) state_machine.go_to(self.end_state, spawn, context=self.context) self.result = self.result.match_output if self.result.rfind(self.connection.hostname): self.result = self.result[ :self.result.rfind(self.connection.hostname)] else: dialog = self.service_dialog(service_dialog=reply, handle=handle) spawn.sendline(command) try: self.result = dialog.process(spawn, context=self.context, timeout=timeout) except Exception as err: raise SubCommandFailure("Configuration failed", err) state_machine.go_to(self.end_state, spawn, context=self.context) self.result = self.result.match_output if self.result.rfind(self.connection.hostname): self.result = self.result[ :self.result.rfind(self.connection.hostname)] def post_service(self, *args, **kwargs): if self.command_list_is_empty: pass else: state_machine = self.connection.active.state_machine state_machine.go_to(self.end_state, self.connection.active.spawn, context=self.connection.context) class ListVdc(BaseService): def __init__(self, connection, context, **kwargs): super().__init__(connection, context, **kwargs) self.start_state = 'enable' self.end_state = 'enable' def call_service(self, timeout=10, command="show vdc"): initial_vdc = self.connection.current_vdc # in case not on default vdc then switchback if initial_vdc: self.connection.switchback() buffer = self.connection.execute("show vdc", timeout=timeout) self.result = re.findall(r'^\d+\s+(\S+)', buffer, re.MULTILINE) if initial_vdc: self.connection.switchto(initial_vdc) class SwitchVdc(BaseService): """Switch to a given VDC Name This command is available on the 'switchto' attribute of the connection object. In case the user is already on a VDC, it can switchback to the default and again switch to the said vdc, hence this command can be issued even from other VDCs Arguments: vdc_name: name of the vdc to switch to. timeout: timeout for the whole operations. dialog: additional dialog provided by the user. vdc_passwd: required for first login, defaults to <PASSWORD>. vdc_cred: credential to use for first login. command: alternate command to be used. Example: .. code-block:: python con.switchto("vdc2") Raises: SubCommandFailure Error """ def __init__(self, connection, context, **kwargs): super().__init__(connection, context, **kwargs) self.start_state = 'enable' self.end_state = 'enable' def call_service(self, vdc_name, timeout=20, command="switchto vdc", dialog=Dialog([]), vdc_cred=None, vdc_passwd=None): con = self.connection if pyats_credentials_available and vdc_passwd: warnings.warn(message = "Argument 'vdc_passwd' " "is now deprecated and replaced by 'vdc_cred'.", category = DeprecationWarning) credentials = con.context.credentials if credentials: credential = vdc_cred or con.context.default_cred_name try: vdc_passwd = to_plaintext(credentials[credential]['password']) except KeyError: raise UniconAuthenticationError("No password found " "for credential {}.".format(credential)) else: vdc_passwd = vdc_passwd or con.context.tacacs_password command = command + " " + vdc_name # if we are already on the same vdc, just bypass the call if con.current_vdc == vdc_name: con.log.info("device already on %s" % vdc_name) return vdc_name # vdc name must be valid one. vdc_list = con.list_vdc() if vdc_name not in vdc_list: raise SubCommandFailure("invalid vdc name: %s" % vdc_name) # in case we are on a VDC already, we need to switchback first if con.current_vdc is not None and \ con.current_vdc != vdc_name: con.switchback() new_hostname = con.hostname + '-' + vdc_name if con.is_ha: con.active.state_machine.hostname = new_hostname con.standby.state_machine.hostname = new_hostname else: con.state_machine.hostname = new_hostname # prepare the dialog to be used. command_dialog = Dialog([ [patterns.secure_password, send_response, {'response': "yes"}, True, True], [patterns.admin_password, send_response, {'response': vdc_passwd}, True, True], [patterns.setup_dialog, send_response, {'response': "no"}, True, True], ]) # append the dialog which user has provided. command_dialog += dialog try: con.execute(command, reply=command_dialog, timeout=timeout) except Exception as err: # this means there was some problem during the switching. Hence # rollback all the changes to the state machine if con.is_ha: con.active.state_machine.hostname = \ con.hostname con.standby.state_machine.hostname = \ con.hostname else: # update the vdc name in connection con.state_machine.hostname = \ con.hostname SubCommandFailure("failed to switch to vdc %s" % vdc_name) else: con.current_vdc = vdc_name # init the connection after switching into a vdc if con.is_ha: con.connection_provider.init_active() else: # duck type this properly in connection class. con.connection_provider.init_handle() self.result = vdc_name class SwitchbackVdc(BaseService): """switches back to default vdc""" def __init__(self, connection, context, **kwargs): super().__init__(connection, context, **kwargs) self.start_state = 'enable' self.end_state = 'enable' def call_service(self, timeout=10, command="switchback", dialog=Dialog()): # this service should be called only if we are on the VDC if self.connection.current_vdc: hostname = self.connection.hostname if self.connection.is_ha: self.connection.active.state_machine.hostname = hostname self.connection.standby.state_machine.hostname = hostname else: self.connection.state_machine.hostname = hostname self.connection.execute(command, timeout=timeout, reply=dialog) self.connection.current_vdc = None else: self.connection.log.info("already on default vdc") class CreateVdc(BaseService): def __init__(self, connection, context, **kwargs): super().__init__(connection, context, **kwargs) self.start_state = 'enable' self.end_state = 'enable' def call_service(self, vdc_name, command="vdc", dialog=Dialog(), timeout=120): initial_vdc = self.connection.current_vdc # Stringify command in case it is passed in as an object. command = str(command) + " " + vdc_name # the vdc should not be already present vdc_list = self.connection.list_vdc() if vdc_name in vdc_list: raise SubCommandFailure("vdc %s already exists" % vdc_name) # if not on default vdc then switchback before creating vdc if initial_vdc: self.connection.switchback() self.connection.configure(command, timeout=timeout, reply=dialog) self.result = vdc_name # if user was on some vdc when issuing this subcommand then switch # him back if initial_vdc: self.connection.switchto(initial_vdc) class DeleteVdc(BaseService): def __init__(self, connection, context, **kwargs): super().__init__(connection, context, **kwargs) self.start_state = "enable" self.end_state = "enable" def call_service(self, vdc_name, command="no vdc", dialog=Dialog(), timeout=90): # Stringify the command in case it is passed as an object. command = str(command) + " " + vdc_name initial_vdc = self.connection.current_vdc # cant delete the vdc on which device is present right now. if vdc_name == initial_vdc: raise SubCommandFailure( "can't delete vdc %s because device is already on that vdc" % vdc_name) # device must be in default vdc if initial_vdc: self.connection.switchback() # vdc must exist before it can be deleted. vdc_list = self.connection.list_vdc() if vdc_name not in vdc_list: raise SubCommandFailure("vdc %s doesn't exist" % vdc_name) # form the dialog command_dialog = Dialog([ [patterns.delete_vdc_confirm, send_response, {'response': "yes"}, True, True] ]) # add user dialog if provided command_dialog += dialog self.connection.configure( command, reply=command_dialog, timeout=timeout) self.result = vdc_name # if the device was no some vdc while issue this command then change # vdc to initial_vdc if initial_vdc: self.connection.switchto(initial_vdc) class AttachModuleConsole(BaseService): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.start_state = "enable" self.end_state = "enable" def call_service(self, module_num, **kwargs): self.result = self.__class__.ContextMgr(connection = self.connection, module_num = module_num, **kwargs) class ContextMgr(object): def __init__(self, connection, module_num, login_name = 'root', default_escape_chars = '~,', change_prompt = 'AUT0MAT10N# ', timeout = None): self.conn = connection self.module_num = module_num self.login_name = login_name self.escape_chars = default_escape_chars self.change_prompt = change_prompt self.timeout = timeout or connection.settings.CONSOLE_TIMEOUT def __enter__(self): self.conn.log.debug('+++ attaching console +++') # attach to console self.conn.sendline('attach console module %s' % self.module_num) try: match = self.conn.expect([r"Escape character is " r"(?P<escape_chars>.+?)'"], timeout = self.timeout) except SubCommandFailure: pass else: # save the new escape chars self.escape_chars = match.last_match.groupdict()['escape_chars'] # slow console for i in range(3): try: self.conn.sendline('') ret = self.conn.expect([r'.*login:'], timeout = self.timeout) except TimeoutError: pass except Exception: #
#!/usr/bin/env python """Filestore aff4:/files abstraction layer. Filestore allows for multiple different filestore modules to register URNs under aff4:/files to handle new file hash and new file creations. """ import hashlib import logging from grr.lib import fingerprint from grr.lib import rdfvalue from grr.lib import registry from grr.lib.rdfvalues import nsrl as rdf_nsrl from grr.server.grr_response_server import access_control from grr.server.grr_response_server import aff4 from grr.server.grr_response_server import data_store from grr.server.grr_response_server.aff4_objects import aff4_grr class FileStore(aff4.AFF4Volume): """Filestore for files downloaded from clients. Modules can register for file content by creating paths under "aff4:/files". By default files created in this namespace can be read by users that have the URN (hash). See lib/aff4_objects/user_managers.py. Filestores are operated on according to their PRIORITY value, lowest first. """ PATH = rdfvalue.RDFURN("aff4:/files") CHUNK_SIZE = 5 * 512 * 1024 PRIORITY = 99 # default low priority for subclasses EXTERNAL = False def GetChildrenByPriority(self, allow_external=True): """Generator that yields active filestore children in priority order.""" for child in sorted(self.OpenChildren(), key=lambda x: x.PRIORITY): if not allow_external and child.EXTERNAL: continue if child.Get(child.Schema.ACTIVE): yield child def AddURNToIndex(self, sha256hash, file_urn): for child in self.GetChildrenByPriority(): child.AddURN(sha256hash, file_urn) def AddURN(self, sha256hash, file_urn): pass def CheckHashes(self, hashes, external=True): """Checks a list of hashes for presence in the store. Sub stores need to pass back the original HashDigest objects since they carry state about the original file source. Only unique hashes are checked, if there is duplication in the hashes input it is the caller's responsibility to maintain any necessary mappings. Args: hashes: A list of Hash objects to check. external: If true, attempt to check stores defined as EXTERNAL. Yields: Tuples of (RDFURN, hash object) that exist in the store. """ hashes = set(hashes) for child in self.GetChildrenByPriority(allow_external=external): for urn, hash_obj in child.CheckHashes(hashes): yield urn, hash_obj hashes.discard(hash_obj) # Nothing to search for, we are done. if not hashes: break def AddFile(self, fd, external=True): """Create a new file in the file store. We delegate the actual file addition to our contained implementations. Implementations can either implement the AddFile() method, returning a file like object which will be written on, or directly support the AddBlobToStore() method which can copy the VFSBlobImage efficiently. Args: fd: An AFF4 object open for read/write. external: If true, attempt to add files to stores defined as EXTERNAL. """ files_for_write = [] for sub_store in self.GetChildrenByPriority(allow_external=external): new_file = sub_store.AddFile(fd) if new_file: files_for_write.append(new_file) fd.Seek(0) while files_for_write: # If we got filehandles back, send them the data. data = fd.Read(self.CHUNK_SIZE) if not data: break for child in files_for_write: child.Write(data) for child in files_for_write: child.Close() class SchemaCls(aff4.AFF4Volume.SchemaCls): ACTIVE = aff4.Attribute( "aff4:filestore_active", rdfvalue.RDFBool, "If true this filestore is active.", default=True) class FileStoreImage(aff4_grr.VFSBlobImage): """The AFF4 files that are stored in the file store area. This class is deprecated, the stored files are now just the original type we used to download them - VFSBlobImage mostly. No special treatment needed anymore. """ class FileStoreHash(rdfvalue.RDFURN): """Urns returned from HashFileStore.ListHashes().""" def __init__(self, initializer=None, fingerprint_type=None, hash_type=None, hash_value=None, age=None): if fingerprint_type: initializer = HashFileStore.PATH.Add(fingerprint_type).Add(hash_type).Add( hash_value) super(FileStoreHash, self).__init__(initializer=initializer, age=age) if initializer is not None: # TODO(amoser): Parsing URNs to get information about the object # is not the right way to do this. We need to find a better way # here and remove all the custom deserialization methods. self._ParseUrn() @classmethod def FromSerializedString(cls, value, age=None): result = super(FileStoreHash, cls).FromSerializedString(value, age=None) result._ParseUrn() # pylint: disable=protected-access return result @classmethod def FromDatastoreValue(cls, value, age=None): result = super(FileStoreHash, cls).FromDatastoreValue(value, age=None) result._ParseUrn() # pylint: disable=protected-access return result def _ParseUrn(self): relative_name = self.RelativeName(HashFileStore.PATH) if not relative_name: raise ValueError("URN %s is not a hash file store urn. Hash file store " "urn should start with %s." % (str(self), str(HashFileStore.PATH))) relative_path = relative_name.split("/") if (len(relative_path) != 3 or relative_path[0] not in HashFileStore.HASH_TYPES or relative_path[1] not in HashFileStore.HASH_TYPES[relative_path[0]]): raise ValueError( "URN %s is not a hash file store urn. Hash file store urn should " "look like: " "aff4:/files/hash/[fingerprint_type]/[hash_type]/[hash_value]." % str(self)) self.fingerprint_type, self.hash_type, self.hash_value = relative_path class HashFileStore(FileStore): """FileStore that stores files referenced by hash.""" PATH = rdfvalue.RDFURN("aff4:/files/hash") PRIORITY = 2 EXTERNAL = False HASH_TYPES = { "generic": ["md5", "sha1", "sha256", "SignedData"], "pecoff": ["md5", "sha1"] } def AddURN(self, sha256hash, file_urn): pass # Writing these indexes are causing production problems, and # they aren't currently used by anything. # # TODO(user): Implement a way to store this data without # melting bigtable or remove it entirely. # # index_urn = self.PATH.Add("generic/sha256").Add(sha256hash) # self._AddToIndex(index_urn, file_urn) def _AddToIndex(self, index_urn, file_urn): with data_store.DB.GetMutationPool() as mutation_pool: mutation_pool.FileHashIndexAddItem(index_urn, file_urn) @classmethod def Query(cls, index_urn, target_prefix="", limit=100, token=None): """Search the index for matches starting with target_prefix. Args: index_urn: The index to use. Should be a urn that points to the sha256 namespace. target_prefix: The prefix to match against the index. limit: Either a tuple of (start, limit) or a maximum number of results to return. token: A DB token. Returns: URNs of files which have the same data as this file - as read from the index. """ return data_store.DB.FileHashIndexQuery( index_urn, target_prefix, limit=limit) @classmethod def GetReferencesMD5(cls, md5_hash, target_prefix="", limit=100, token=None): urn = aff4.ROOT_URN.Add("files/hash/generic/md5").Add(str(md5_hash)) fd = aff4.FACTORY.Open(urn, token=token) return cls.Query(fd.urn, target_prefix="", limit=100, token=token) @classmethod def GetReferencesSHA1(cls, sha1_hash, target_prefix="", limit=100, token=None): urn = aff4.ROOT_URN.Add("files/hash/generic/sha1").Add(str(sha1_hash)) fd = aff4.FACTORY.Open(urn, token=token) return cls.Query(fd.urn, target_prefix="", limit=100, token=token) @classmethod def GetReferencesSHA256(cls, sha256_hash, target_prefix="", limit=100, token=None): urn = aff4.ROOT_URN.Add("files/hash/generic/sha256").Add(str(sha256_hash)) fd = aff4.FACTORY.Open(urn, token=token) return cls.Query(fd.urn, target_prefix="", limit=100, token=token) def CheckHashes(self, hashes): """Check hashes against the filestore. Blobs use the hash in the schema: aff4:/files/hash/generic/sha256/[sha256hash] Args: hashes: A list of Hash objects to check. Yields: Tuples of (RDFURN, hash object) that exist in the store. """ hash_map = {} for hsh in hashes: if hsh.HasField("sha256"): # The canonical name of the file is where we store the file hash. hash_map[aff4.ROOT_URN.Add("files/hash/generic/sha256").Add( str(hsh.sha256))] = hsh for metadata in aff4.FACTORY.Stat(list(hash_map)): yield metadata["urn"], hash_map[metadata["urn"]] def _GetHashers(self, hash_types): return [ getattr(hashlib, hash_type) for hash_type in hash_types if hasattr(hashlib, hash_type) ] def _HashFile(self, fd): """Look for the required hashes in the file.""" hashes = fd.Get(fd.Schema.HASH) if hashes: found_all = True for fingerprint_type, hash_types in self.HASH_TYPES.iteritems(): for hash_type in hash_types: if fingerprint_type == "pecoff": hash_type = "pecoff_%s" % hash_type if not hashes.HasField(hash_type): found_all = False break if not found_all: break if found_all: return hashes fingerprinter = fingerprint.Fingerprinter(fd) if "generic" in self.HASH_TYPES: hashers = self._GetHashers(self.HASH_TYPES["generic"]) fingerprinter.EvalGeneric(hashers=hashers) if "pecoff" in self.HASH_TYPES: hashers = self._GetHashers(self.HASH_TYPES["pecoff"]) if hashers: fingerprinter.EvalPecoff(hashers=hashers) if not hashes: hashes = fd.Schema.HASH() for result in fingerprinter.HashIt(): fingerprint_type = result["name"] for hash_type in self.HASH_TYPES[fingerprint_type]: if hash_type not in result: continue if hash_type == "SignedData": # There can be several certs in the same file. for signed_data in result[hash_type]: hashes.signed_data.Append( revision=signed_data[0], cert_type=signed_data[1], certificate=signed_data[2]) continue # Set the hashes in the original object if fingerprint_type == "generic": hashes.Set(hash_type, result[hash_type]) elif fingerprint_type == "pecoff": hashes.Set("pecoff_%s" % hash_type, result[hash_type]) else: logging.error("Unknown fingerprint_type %s.", fingerprint_type) try: fd.Set(hashes) except IOError: pass return hashes def AddFile(self, fd): """Adds a file to the hash file store. We take a file in the client space: aff4:/C.123123123/fs/os/usr/local/blah Hash it, update the hash in the original file if its different to the one calculated on the client, and copy the original AFF4 object to aff4:/files/hash/generic/sha256/123123123 (canonical reference) We then create symlinks for all other hash types: aff4:/files/hash/generic/sha1/345345345 aff4:/files/hash/generic/md5/456456456 aff4:/files/hash/pecoff/md5/aaaaaaaa (only for PEs) aff4:/files/hash/pecoff/sha1/bbbbbbbb (only for PEs) When present in PE files, the signing data (revision, cert_type, certificate) is added to the original object. This can't be done simply in the FileStore.Write() method with fixed hash buffer sizes because the authenticode hashes need to track hashing of different-sized regions based on the signature information. Args: fd: File open for reading. Raises: IOError: If there was an error writing the file. """ hashes = self._HashFile(fd) # The empty file is very common, we don't keep the back references for it # in the DB since it just takes up too much space. empty_hash = ("e3b0c44298fc1c149afbf4c8996fb924" "27ae41e4649b934ca495991b7852b855") if hashes.sha256 == empty_hash: return # Update the hashes field now that we have calculated them all. fd.Set(fd.Schema.HASH, hashes) fd.Flush() # sha256 is the canonical location. canonical_urn = self.PATH.Add("generic/sha256").Add(str(hashes.sha256)) if not
image.\n\n(string) --\n(string) --\n\n\n\n :type clientToken: string :param clientToken: [REQUIRED]\nThe idempotency token used to make this request idempotent.\nThis field is autopopulated if not provided.\n :rtype: dict ReturnsResponse Syntax { 'requestId': 'string', 'clientToken': 'string', 'imageBuildVersionArn': 'string' } Response Structure (dict) -- requestId (string) -- The request ID that uniquely identifies this request. clientToken (string) -- The idempotency token used to make this request idempotent. imageBuildVersionArn (string) -- The Amazon Resource Name (ARN) of the image that was created by this request. Exceptions imagebuilder.Client.exceptions.ServiceException imagebuilder.Client.exceptions.ClientException imagebuilder.Client.exceptions.ServiceUnavailableException imagebuilder.Client.exceptions.InvalidRequestException imagebuilder.Client.exceptions.IdempotentParameterMismatchException imagebuilder.Client.exceptions.ForbiddenException imagebuilder.Client.exceptions.CallRateLimitExceededException imagebuilder.Client.exceptions.ResourceInUseException :return: { 'requestId': 'string', 'clientToken': 'string', 'imageBuildVersionArn': 'string' } :returns: imagebuilder.Client.exceptions.ServiceException imagebuilder.Client.exceptions.ClientException imagebuilder.Client.exceptions.ServiceUnavailableException imagebuilder.Client.exceptions.InvalidRequestException imagebuilder.Client.exceptions.IdempotentParameterMismatchException imagebuilder.Client.exceptions.ForbiddenException imagebuilder.Client.exceptions.CallRateLimitExceededException imagebuilder.Client.exceptions.ResourceInUseException """ pass def create_image_pipeline(name=None, description=None, imageRecipeArn=None, infrastructureConfigurationArn=None, distributionConfigurationArn=None, imageTestsConfiguration=None, enhancedImageMetadataEnabled=None, schedule=None, status=None, tags=None, clientToken=None): """ Creates a new image pipeline. Image pipelines enable you to automate the creation and distribution of images. See also: AWS API Documentation Exceptions :example: response = client.create_image_pipeline( name='string', description='string', imageRecipeArn='string', infrastructureConfigurationArn='string', distributionConfigurationArn='string', imageTestsConfiguration={ 'imageTestsEnabled': True|False, 'timeoutMinutes': 123 }, enhancedImageMetadataEnabled=True|False, schedule={ 'scheduleExpression': 'string', 'pipelineExecutionStartCondition': 'EXPRESSION_MATCH_ONLY'|'EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE' }, status='DISABLED'|'ENABLED', tags={ 'string': 'string' }, clientToken='string' ) :type name: string :param name: [REQUIRED]\nThe name of the image pipeline.\n :type description: string :param description: The description of the image pipeline. :type imageRecipeArn: string :param imageRecipeArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the image recipe that will be used to configure images created by this image pipeline.\n :type infrastructureConfigurationArn: string :param infrastructureConfigurationArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the infrastructure configuration that will be used to build images created by this image pipeline.\n :type distributionConfigurationArn: string :param distributionConfigurationArn: The Amazon Resource Name (ARN) of the distribution configuration that will be used to configure and distribute images created by this image pipeline. :type imageTestsConfiguration: dict :param imageTestsConfiguration: The image test configuration of the image pipeline.\n\nimageTestsEnabled (boolean) --Defines if tests should be executed when building this image.\n\ntimeoutMinutes (integer) --The maximum time in minutes that tests are permitted to run.\n\n\n :type enhancedImageMetadataEnabled: boolean :param enhancedImageMetadataEnabled: Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default. :type schedule: dict :param schedule: The schedule of the image pipeline.\n\nscheduleExpression (string) --The expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition .\n\npipelineExecutionStartCondition (string) --The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE , EC2 Image Builder will build a new image only when there are known changes pending. When it is set to EXPRESSION_MATCH_ONLY , it will build a new image every time the CRON expression matches the current time.\n\n\n :type status: string :param status: The status of the image pipeline. :type tags: dict :param tags: The tags of the image pipeline.\n\n(string) --\n(string) --\n\n\n\n :type clientToken: string :param clientToken: [REQUIRED]\nThe idempotency token used to make this request idempotent.\nThis field is autopopulated if not provided.\n :rtype: dict ReturnsResponse Syntax { 'requestId': 'string', 'clientToken': 'string', 'imagePipelineArn': 'string' } Response Structure (dict) -- requestId (string) -- The request ID that uniquely identifies this request. clientToken (string) -- The idempotency token used to make this request idempotent. imagePipelineArn (string) -- The Amazon Resource Name (ARN) of the image pipeline that was created by this request. Exceptions imagebuilder.Client.exceptions.ServiceException imagebuilder.Client.exceptions.ClientException imagebuilder.Client.exceptions.ServiceUnavailableException imagebuilder.Client.exceptions.InvalidRequestException imagebuilder.Client.exceptions.IdempotentParameterMismatchException imagebuilder.Client.exceptions.ForbiddenException imagebuilder.Client.exceptions.CallRateLimitExceededException imagebuilder.Client.exceptions.ResourceInUseException imagebuilder.Client.exceptions.ResourceAlreadyExistsException :return: { 'requestId': 'string', 'clientToken': 'string', 'imagePipelineArn': 'string' } :returns: imagebuilder.Client.exceptions.ServiceException imagebuilder.Client.exceptions.ClientException imagebuilder.Client.exceptions.ServiceUnavailableException imagebuilder.Client.exceptions.InvalidRequestException imagebuilder.Client.exceptions.IdempotentParameterMismatchException imagebuilder.Client.exceptions.ForbiddenException imagebuilder.Client.exceptions.CallRateLimitExceededException imagebuilder.Client.exceptions.ResourceInUseException imagebuilder.Client.exceptions.ResourceAlreadyExistsException """ pass def create_image_recipe(name=None, description=None, semanticVersion=None, components=None, parentImage=None, blockDeviceMappings=None, tags=None, clientToken=None): """ Creates a new image recipe. Image recipes define how images are configured, tested, and assessed. See also: AWS API Documentation Exceptions :example: response = client.create_image_recipe( name='string', description='string', semanticVersion='string', components=[ { 'componentArn': 'string' }, ], parentImage='string', blockDeviceMappings=[ { 'deviceName': 'string', 'ebs': { 'encrypted': True|False, 'deleteOnTermination': True|False, 'iops': 123, 'kmsKeyId': 'string', 'snapshotId': 'string', 'volumeSize': 123, 'volumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1' }, 'virtualName': 'string', 'noDevice': 'string' }, ], tags={ 'string': 'string' }, clientToken='<PASSWORD>' ) :type name: string :param name: [REQUIRED]\nThe name of the image recipe.\n :type description: string :param description: The description of the image recipe. :type semanticVersion: string :param semanticVersion: [REQUIRED]\nThe semantic version of the image recipe.\n :type components: list :param components: [REQUIRED]\nThe components of the image recipe.\n\n(dict) --Configuration details of the component.\n\ncomponentArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the component.\n\n\n\n\n :type parentImage: string :param parentImage: [REQUIRED]\nThe parent image of the image recipe. The value of the string can be the ARN of the parent image or an AMI ID. The format for the ARN follows this example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/2019.x.x . The ARN ends with /20xx.x.x , which communicates to EC2 Image Builder that you want to use the latest AMI created in 20xx (year). You can provide the specific version that you want to use, or you can use a wildcard in all of the fields. If you enter an AMI ID for the string value, you must have access to the AMI, and the AMI must be in the same Region in which you are using Image Builder.\n :type blockDeviceMappings: list :param blockDeviceMappings: The block device mappings of the image recipe.\n\n(dict) --Defines block device mappings for the instance used to configure your image.\n\ndeviceName (string) --The device to which these mappings apply.\n\nebs (dict) --Use to manage Amazon EBS-specific configuration for this mapping.\n\nencrypted (boolean) --Use to configure device encryption.\n\ndeleteOnTermination (boolean) --Use to configure delete on termination of the associated device.\n\niops (integer) --Use to configure device IOPS.\n\nkmsKeyId (string) --Use to configure the KMS key to use when encrypting the device.\n\nsnapshotId (string) --The snapshot that defines the device contents.\n\nvolumeSize (integer) --Use to override the device\'s volume size.\n\nvolumeType (string) --Use to override the device\'s volume type.\n\n\n\nvirtualName (string) --Use to manage instance ephemeral devices.\n\nnoDevice (string) --Use to remove a mapping from the parent image.\n\n\n\n\n :type tags: dict :param tags: The tags of the image recipe.\n\n(string) --\n(string) --\n\n\n\n :type clientToken: string :param clientToken: [REQUIRED]\nThe idempotency token used to make this request idempotent.\nThis field is autopopulated if not provided.\n :rtype: dict ReturnsResponse Syntax { 'requestId': 'string', 'clientToken': 'string', 'imageRecipeArn': 'string' } Response Structure (dict) -- requestId (string) -- The request ID that uniquely identifies this request. clientToken (string) -- The idempotency token used to make this request idempotent. imageRecipeArn (string) -- The Amazon Resource Name (ARN) of the image recipe that was created by this request. Exceptions imagebuilder.Client.exceptions.ServiceException imagebuilder.Client.exceptions.ClientException imagebuilder.Client.exceptions.ServiceUnavailableException imagebuilder.Client.exceptions.InvalidRequestException imagebuilder.Client.exceptions.IdempotentParameterMismatchException imagebuilder.Client.exceptions.ForbiddenException imagebuilder.Client.exceptions.CallRateLimitExceededException imagebuilder.Client.exceptions.InvalidVersionNumberException imagebuilder.Client.exceptions.ResourceInUseException imagebuilder.Client.exceptions.ResourceAlreadyExistsException :return: { 'requestId': 'string', 'clientToken': 'string', 'imageRecipeArn': 'string' } :returns: imagebuilder.Client.exceptions.ServiceException imagebuilder.Client.exceptions.ClientException imagebuilder.Client.exceptions.ServiceUnavailableException imagebuilder.Client.exceptions.InvalidRequestException imagebuilder.Client.exceptions.IdempotentParameterMismatchException imagebuilder.Client.exceptions.ForbiddenException imagebuilder.Client.exceptions.CallRateLimitExceededException imagebuilder.Client.exceptions.InvalidVersionNumberException imagebuilder.Client.exceptions.ResourceInUseException imagebuilder.Client.exceptions.ResourceAlreadyExistsException """ pass def create_infrastructure_configuration(name=None, description=None, instanceTypes=None, instanceProfileName=None, securityGroupIds=None, subnetId=None, logging=None, keyPair=None, terminateInstanceOnFailure=None, snsTopicArn=None, tags=None, clientToken=None): """ Creates a new infrastructure configuration. An infrastructure configuration defines the environment in which your image will be built and tested. See also: AWS API Documentation Exceptions :example: response = client.create_infrastructure_configuration( name='string', description='string', instanceTypes=[ 'string', ], instanceProfileName='string', securityGroupIds=[ 'string', ], subnetId='string', logging={ 's3Logs': { 's3BucketName': 'string', 's3KeyPrefix': 'string' } }, keyPair='string', terminateInstanceOnFailure=True|False, snsTopicArn='string', tags={ 'string': 'string' }, clientToken='string' ) :type name: string :param name: [REQUIRED]\nThe name of the infrastructure configuration.\n :type description: string :param description: The description of the infrastructure configuration. :type instanceTypes: list :param instanceTypes: The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.\n\n(string) --\n\n :type instanceProfileName: string :param instanceProfileName: [REQUIRED]\nThe instance profile to associate with the instance used to customize your EC2 AMI.\n :type securityGroupIds: list :param securityGroupIds: The security group IDs to associate with the instance used to customize your EC2 AMI.\n\n(string) --\n\n :type subnetId: string :param subnetId: The subnet ID in which to place the instance used to customize your EC2 AMI. :type logging: dict :param logging: The logging configuration of the infrastructure configuration.\n\ns3Logs (dict) --The Amazon S3 logging configuration.\n\ns3BucketName (string) --The Amazon S3 bucket in which to store the logs.\n\ns3KeyPrefix (string) --The Amazon S3 path in which to store the logs.\n\n\n\n\n :type keyPair: string :param keyPair: The key pair of the infrastructure configuration. This can be used to log on to and debug the instance used to create
get_example_scope( "POST", "/", [], ), mock_receive([]), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_json_parameter_missing_property( app, mock_send, mock_receive ): @app.router.post("/") async def home(item: FromJSON[Item]): ... # Note: the following example missing one of the properties # required by the constructor app.normalize_handlers() await app( get_example_scope( "POST", "/", [[b"content-type", b"application/json"], [b"content-length", b"25"]], ), mock_receive([b'{"a":"Hello","b":"World"}']), mock_send, ) assert app.response.status == 400 assert ( b"Bad Request: invalid parameter in request payload, caused by type Item " + b"or one of its subproperties." in app.response.content.body ) @pytest.mark.asyncio async def test_handler_json_response_implicit(app, mock_send, mock_receive): @app.router.get("/") async def get_item() -> Item2: return Item2("Hello", "World", "!") # Note: the following example missing one of the properties # required by the constructor app.normalize_handlers() await app( get_example_scope( "GET", "/", [], ), mock_receive(), mock_send, ) assert app.response.status == 200 data = await app.response.json() assert data == Item2("Hello", "World", "!").__dict__ @pytest.mark.asyncio async def test_handler_json_response_implicit_no_annotation( app, mock_send, mock_receive ): @app.router.get("/") async def get_item(): return Item2("Hello", "World", "!") # Note: the following example missing one of the properties # required by the constructor app.normalize_handlers() await app( get_example_scope( "GET", "/", [], ), mock_receive(), mock_send, ) assert app.response.status == 200 data = await app.response.json() assert data == Item2("Hello", "World", "!").__dict__ @pytest.mark.asyncio async def test_handler_text_response_implicit(app, mock_send, mock_receive): @app.router.get("/") async def get_lorem(): return "Lorem ipsum" # Note: the following example missing one of the properties # required by the constructor app.normalize_handlers() await app( get_example_scope( "GET", "/", [], ), mock_receive(), mock_send, ) assert app.response.status == 200 data = await app.response.text() assert data == "Lorem ipsum" @pytest.mark.asyncio async def test_handler_from_json_parameter_missing_property_complex_type( app, mock_send, mock_receive ): @inject() @app.router.post("/") async def home(item: FromJSON[Foo]): ... # Note: the following example missing one of the properties # required by the constructor app.normalize_handlers() await app( get_example_scope( "POST", "/", [[b"content-type", b"application/json"], [b"content-length", b"34"]], ), mock_receive([b'{"item":{"a":"Hello","b":"World"}}']), mock_send, ) assert app.response.status == 400 assert ( b"Bad Request: invalid parameter in request payload, caused by type Foo " + b"or one of its subproperties." in app.response.content.body ) @pytest.mark.asyncio async def test_handler_from_json_parameter_missing_property_array( app, mock_send, mock_receive ): @app.router.post("/") async def home(item: FromJSON[List[Item]]): ... # Note: the following example missing one of the properties # required by the constructor app.normalize_handlers() await app( get_example_scope( "POST", "/", [[b"content-type", b"application/json"], [b"content-length", b"25"]], ), mock_receive([b'[{"a":"Hello","b":"World"}]']), mock_send, ) assert app.response.status == 400 assert ( b"Bad Request: invalid parameter in request payload, caused by type Item" in app.response.content.body ) @pytest.mark.asyncio async def test_handler_from_json_parameter_handles_request_without_body( app, mock_send, mock_receive ): @app.router.post("/") async def home(item: FromJSON[Item]): return Response(200) app.normalize_handlers() await app( get_example_scope( "POST", "/", [], ), mock_receive([]), mock_send, ) assert app.response.status == 400 assert app.response.content.body == b"Bad Request: Expected request content" @pytest.mark.asyncio async def test_handler_from_json_list_of_objects(app, mock_send, mock_receive): @app.router.post("/") async def home(item: FromJSON[List[Item]]): assert item is not None value = item.value item_one = value[0] item_two = value[1] assert item_one.a == "Hello" assert item_one.b == "World" assert item_one.c == 10 assert item_two.a == "Lorem" assert item_two.b == "ipsum" assert item_two.c == 55 app.normalize_handlers() await app( get_example_scope( "POST", "/", [[b"content-type", b"application/json"], [b"content-length", b"32"]], ), mock_receive( [ b'[{"a":"Hello","b":"World","c":10},' + b'{"a":"Lorem","b":"ipsum","c":55}]' ] ), mock_send, ) assert app.response.status == 204 @pytest.mark.parametrize( "expected_type,request_body,expected_result", [ [ List, b'["one","two","three"]', ["one", "two", "three"], ], [ List[bytes], b'["bG9yZW0gaXBzdW0=","aGVsbG8gd29ybGQ=","VGhyZWU="]', ["lorem ipsum", "hello world", "Three"], ], [ List[str], b'["one","two","three"]', ["one", "two", "three"], ], [ List[int], b"[20, 10, 0, 200, 12, 64]", [20, 10, 0, 200, 12, 64], ], [ List[float], b"[20.4, 10.23, 0.12, 200.00, 12.12, 64.01]", [20.4, 10.23, 0.12, 200.00, 12.12, 64.01], ], [ List[bool], b"[true, false, true, true, 1, 0]", [True, False, True, True, True, False], ], [ List[datetime], b'["2020-10-24", "2020-10-24T18:46:19.313346", "2019-05-30"]', [ datetime(2020, 10, 24), datetime(2020, 10, 24, 18, 46, 19, 313346), datetime(2019, 5, 30), ], ], [ List[date], b'["2020-10-24", "2020-10-24", "2019-05-30"]', [date(2020, 10, 24), date(2020, 10, 24), date(2019, 5, 30)], ], [ List[UUID], b'["d1e7745f-2a20-4181-8249-b7fef73592dd",' + b'"0bf95cca-3299-4cc0-93d1-ec8e041f5d3e",' + b'"d2d52dde-b174-47e0-8a8e-a07d6a559a3a"]', [ UUID("d1e7745f-2a20-4181-8249-b7fef73592dd"), UUID("0bf95cca-3299-4cc0-93d1-ec8e041f5d3e"), UUID("d2d52dde-b174-47e0-8a8e-a07d6a559a3a"), ], ], ], ) @pytest.mark.asyncio async def test_handler_from_json_list_of_primitives( expected_type, request_body, expected_result, app, mock_send, mock_receive ): @inject() @app.router.post("/") async def home(item: FromJSON[expected_type]): assert item is not None value = item.value assert value == expected_result app.normalize_handlers() await app( get_example_scope( "POST", "/", [ [b"content-type", b"application/json"], [b"content-length", str(len(request_body)).encode()], ], ), mock_receive([request_body]), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_json_dataclass(app, mock_send, mock_receive): @dataclass class Foo: foo: str ufo: bool @inject() @app.router.post("/") async def home(item: FromJSON[Foo]): assert item is not None value = item.value assert value.foo == "Hello" assert value.ufo is True app.normalize_handlers() await app( get_example_scope( "POST", "/", [[b"content-type", b"application/json"], [b"content-length", b"32"]], ), mock_receive([b'{"foo":"Hello","ufo":true}']), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_json_parameter_default(app, mock_send, mock_receive): @app.router.post("/") async def home(item: FromJSON[Item] = FromJSON(Item("One", "Two", 3))): assert item is not None value = item.value assert value.a == "One" assert value.b == "Two" assert value.c == 3 app.normalize_handlers() await app( get_example_scope( "POST", "/", [], ), mock_receive(), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_json_parameter_default_override( app, mock_send, mock_receive ): @app.router.post("/") async def home(item: FromJSON[Item] = FromJSON(Item("One", "Two", 3))): assert item is not None value = item.value assert value.a == "Hello" assert value.b == "World" assert value.c == 10 app.normalize_handlers() await app( get_example_scope( "POST", "/", [[b"content-type", b"application/json"], [b"content-length", b"32"]], ), mock_receive([b'{"a":"Hello","b":"World","c":10}']), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_json_parameter_implicit(app, mock_send, mock_receive): @app.router.post("/") async def home(item: Item): assert item is not None assert item.a == "Hello" assert item.b == "World" assert item.c == 10 app.normalize_handlers() await app( get_example_scope( "POST", "/", [[b"content-type", b"application/json"], [b"content-length", b"32"]], ), mock_receive([b'{"a":"Hello","b":"World","c":10}']), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_json_parameter_implicit_default( app, mock_send, mock_receive ): @app.router.post("/") async def home(item: Item = Item(1, 2, 3)): assert item is not None assert item.a == 1 assert item.b == 2 assert item.c == 3 app.normalize_handlers() await app( get_example_scope( "POST", "/", [], ), mock_receive(), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_wrong_method_json_parameter_gets_null_if_optional( app, mock_send, mock_receive ): @app.router.get("/") # <--- NB: wrong http method for posting payloads async def home(item: FromJSON[Optional[Item]]): assert item.value is None app.normalize_handlers() await app( get_example_scope( "GET", "/", [[b"content-type", b"application/json"], [b"content-length", b"32"]], ), mock_receive([b'{"a":"Hello","b":"World","c":10}']), mock_send, ) assert app.response.status == 204 @pytest.mark.asyncio async def test_handler_from_wrong_method_json_parameter_gets_bad_request( app, mock_send, mock_receive ): @app.router.get("/") # <--- NB: wrong http method for posting payloads async def home(request, item: FromJSON[Item]): assert item.value is None app.normalize_handlers() await app( get_example_scope( "GET", "/", [[b"content-type", b"application/json"], [b"content-length", b"32"]], ), mock_receive([b'{"a":"Hello","b":"World","c":10}']), mock_send, ) # 400 because the annotation FromJSON[Item] makes the item REQUIRED; assert app.response.status == 400 content = await app.response.text() assert content == "Bad Request: Expected request content" @pytest.mark.asyncio @pytest.mark.parametrize( "parameter_type,parameter,expected_value", [ [str, "Hello", "Hello"], [int, "1349", 1349], [float, "13.2", 13.2], [bool, "True", True], [bool, "1", True], [Optional[bool], "1", True], [Optional[bool], "", None], [bool, "False", False], [Optional[bool], "False", False], [date, "2020-5-30", date(2020, 5, 30)], [date, "2020-1-1", date(2020, 1, 1)], [Optional[date], "", None], [ datetime, "2020-10-24T18:46:19.313346", datetime(2020, 10, 24, 18, 46, 19, 313346), ], [bool, "0", False], [ UUID, "54b2587a-0afc-40ec-a03d-13223d4bb04d", UUID("54b2587a-0afc-40ec-a03d-13223d4bb04d"), ], ], ) async def test_valid_query_parameter_parse( parameter_type, parameter, expected_value, app, mock_send, mock_receive ): @inject() @app.router.get("/") async def home(foo: FromQuery[parameter_type]): assert foo.value == expected_value return status_code(200) app.normalize_handlers() await app( get_example_scope("GET", "/", [], query=f"foo={parameter}".encode()), mock_receive(), mock_send, ) assert app.response.status == 200 @pytest.mark.asyncio @pytest.mark.parametrize( "parameter_type,parameter,expected_value", [ [str, "Hello", "Hello"], [int, "1349", 1349], [float, "13.2", 13.2], [bool, "True", True], [bool, "1", True], [Optional[bool], "1", True], [Optional[bool], "", None], [bool, "False", False], [Optional[bool], "False", False], [date, "2020-5-30", date(2020, 5, 30)], [date, "2020-1-1", date(2020, 1, 1)], [Optional[date], "", None], [ datetime, "2020-10-24T18:46:19.313346", datetime(2020, 10, 24, 18, 46, 19, 313346), ], [bool, "0", False], [ UUID, "54b2587a-0afc-40ec-a03d-13223d4bb04d", UUID("54b2587a-0afc-40ec-a03d-13223d4bb04d"), ], ], ) async def test_valid_cookie_parameter_parse( parameter_type, parameter, expected_value, app, mock_send, mock_receive ): @inject() @app.router.get("/") async def home(foo: FromCookie[parameter_type]): assert foo.value == expected_value return status_code(200) app.normalize_handlers() await app( get_example_scope("GET", "/", [(b"cookie", f"foo={parameter}".encode())]), mock_receive(), mock_send, ) assert app.response.status == 200 @pytest.mark.asyncio @pytest.mark.parametrize( "parameter_type,parameters,expected_value", [ [List, ["Hello", "World"], ["Hello", "World"]], [List[str], ["Hello", "World"], ["Hello", "World"]], [List[int], ["1349"], [1349]], [List[int], ["1", "2", "3"], [1, 2, 3]], [List[float], ["1.12", "2.30", "3.55"], [1.12, 2.30, 3.55]], [List[bool], ["1", "0", "0", "1"], [True, False, False, True]], [ List[date], ["2020-5-30", "2019-5-30", "2018-1-1"], [date(2020, 5, 30), date(2019, 5, 30), date(2018, 1, 1)], ], [ List[datetime], ["2020-10-24T18:46:19.313346", "2019-10-24T18:46:19.313346"], [ datetime(2020, 10, 24, 18, 46, 19, 313346), datetime(2019, 10, 24, 18, 46, 19, 313346), ], ], ], ) async def test_valid_query_parameter_list_parse( parameter_type, parameters, expected_value, app, mock_send, mock_receive ): @inject() @app.router.get("/") async def home(foo: FromQuery[parameter_type]): assert foo.value == expected_value return status_code(200) app.normalize_handlers() query = "&".join(f"foo={parameter}" for parameter in parameters) await app( get_example_scope("GET",
Folder "no-media-directories-error": "No media directories have been set. For shared playlist and file switching features to work properly please select File->Set Media Directories and specify where Syncplay should look to find media files.", "cannot-find-directory-error": "Could not find media directory '{}'. To update your list of media directories please select File->Set Media Directories from the menu bar and specify where Syncplay should look to find media files.", "failed-to-load-server-list-error": "Failed to load public server list. Please visit https://www.syncplay.pl/ in your browser.", # Client arguments "argument-description": 'Solution to synchronize playback of multiple media player instances over the network.', "argument-epilog": 'If no options supplied _config values will be used', "nogui-argument": 'show no GUI', "host-argument": 'server\'s address', "name-argument": 'desired username', "debug-argument": 'debug mode', "force-gui-prompt-argument": 'make configuration prompt appear', "no-store-argument": 'don\'t store values in .syncplay', "room-argument": 'default room', "password-argument": '<PASSWORD>', "player-path-argument": 'path to your player executable', "file-argument": 'file to play', "args-argument": 'player options, if you need to pass options starting with - prepend them with single \'--\' argument', "clear-gui-data-argument": 'resets path and window state GUI data stored as QSettings', "language-argument": 'language for Syncplay messages (de/en/ru/it/es/pt_BR)', "version-argument": 'prints your version', "version-message": "You're using Syncplay version {} ({})", "load-playlist-from-file-argument": "loads playlist from text file (one entry per line)", # Client labels "config-window-title": "Syncplay configuration", "connection-group-title": "Connection settings", "host-label": "Server address: ", "name-label": "Username (optional):", "password-label": "Server password (if any):", "room-label": "Default room: ", "media-setting-title": "Media player settings", "executable-path-label": "Path to media player:", "media-path-label": "Path to video (optional):", "player-arguments-label": "Player arguments (if any):", "browse-label": "Browse", "update-server-list-label": "Update list", "more-title": "Show more settings", "never-rewind-value": "Never", "seconds-suffix": " secs", "privacy-sendraw-option": "Send raw", "privacy-sendhashed-option": "Send hashed", "privacy-dontsend-option": "Don't send", "filename-privacy-label": "Filename information:", "filesize-privacy-label": "File size information:", "checkforupdatesautomatically-label": "Check for Syncplay updates automatically", "slowondesync-label": "Slow down on minor desync (not supported on MPC-HC/BE)", "rewindondesync-label": "Rewind on major desync (recommended)", "fastforwardondesync-label": "Fast-forward if lagging behind (recommended)", "dontslowdownwithme-label": "Never slow down or rewind others (experimental)", "pausing-title": "Pausing", "pauseonleave-label": "Pause when user leaves (e.g. if they are disconnected)", "readiness-title": "Initial readiness state", "readyatstart-label": "Set me as 'ready to watch' by default", "forceguiprompt-label": "Don't always show the Syncplay configuration window", # (Inverted) "showosd-label": "Enable OSD Messages", "showosdwarnings-label": "Include warnings (e.g. when files are different, users not ready)", "showsameroomosd-label": "Include events in your room", "shownoncontrollerosd-label": "Include events from non-operators in managed rooms", "showdifferentroomosd-label": "Include events in other rooms", "showslowdownosd-label": "Include slowing down / reverting notifications", "language-label": "Language:", "automatic-language": "Default ({})", # Default language "showdurationnotification-label": "Warn about media duration mismatches", "basics-label": "Basics", "readiness-label": "Play/Pause", "misc-label": "Misc", "core-behaviour-title": "Core room behaviour", "syncplay-internals-title": "Syncplay internals", "syncplay-mediasearchdirectories-title": "Directories to search for media", "syncplay-mediasearchdirectories-label": "Directories to search for media (one path per line)", "sync-label": "Sync", "sync-otherslagging-title": "If others are lagging behind...", "sync-youlaggging-title": "If you are lagging behind...", "messages-label": "Messages", "messages-osd-title": "On-screen Display settings", "messages-other-title": "Other display settings", "chat-label": "Chat", "privacy-label": "Privacy", # Currently unused, but will be brought back if more space is needed in Misc tab "privacy-title": "Privacy settings", "unpause-title": "If you press play, set as ready and:", "unpause-ifalreadyready-option": "Unpause if already set as ready", "unpause-ifothersready-option": "Unpause if already ready or others in room are ready (default)", "unpause-ifminusersready-option": "Unpause if already ready or if all others ready and min users ready", "unpause-always": "Always unpause", "syncplay-trusteddomains-title": "Trusted domains (for streaming services and hosted content)", "chat-title": "Chat message input", "chatinputenabled-label": "Enable chat input via mpv", "chatdirectinput-label": "Allow instant chat input (bypass having to press enter key to chat)", "chatinputfont-label": "Chat input font", "chatfont-label": "Set font", "chatcolour-label": "Set colour", "chatinputposition-label": "Position of message input area in mpv", "chat-top-option": "Top", "chat-middle-option": "Middle", "chat-bottom-option": "Bottom", "chatoutputheader-label": "Chat message output", "chatoutputfont-label": "Chat output font", "chatoutputenabled-label": "Enable chat output in media player (mpv only for now)", "chatoutputposition-label": "Output mode", "chat-chatroom-option": "Chatroom style", "chat-scrolling-option": "Scrolling style", "mpv-key-tab-hint": "[TAB] to toggle access to alphabet row key shortcuts.", "mpv-key-hint": "[ENTER] to send message. [ESC] to escape chat mode.", "alphakey-mode-warning-first-line": "You can temporarily use old mpv bindings with a-z keys.", "alphakey-mode-warning-second-line": "Press [TAB] to return to Syncplay chat mode.", "help-label": "Help", "reset-label": "Restore defaults", "run-label": "Run Syncplay", "storeandrun-label": "Store configuration and run Syncplay", "contact-label": "Feel free to e-mail <a href=\"mailto:<EMAIL>\"><nobr><EMAIL></nobr></a>, chat via the <a href=\"https://webchat.freenode.net/?channels=#syncplay\"><nobr>#Syncplay IRC channel</nobr></a> on irc.freenode.net, <a href=\"https://github.com/Uriziel/syncplay/issues\"><nobr>raise an issue</nobr></a> via GitHub, <a href=\"https://www.facebook.com/SyncplaySoftware\"><nobr>like us on Facebook</nobr></a>, <a href=\"https://twitter.com/Syncplay/\"><nobr>follow us on Twitter</nobr></a>, or visit <a href=\"https://syncplay.pl/\"><nobr>https://syncplay.pl/</nobr></a>. Do not use Syncplay to send sensitive information.", "joinroom-label": "Join room", "joinroom-menu-label": "Join room {}", "seektime-menu-label": "Seek to time", "undoseek-menu-label": "Undo seek", "play-menu-label": "Play", "pause-menu-label": "Pause", "playbackbuttons-menu-label": "Show playback buttons", "autoplay-menu-label": "Show auto-play button", "autoplay-guipushbuttonlabel": "Play when all ready", "autoplay-minimum-label": "Min users:", "sendmessage-label": "Send", "ready-guipushbuttonlabel": "I'm ready to watch!", "roomuser-heading-label": "Room / User", "size-heading-label": "Size", "duration-heading-label": "Length", "filename-heading-label": "Filename", "notifications-heading-label": "Notifications", "userlist-heading-label": "List of who is playing what", "browseformedia-label": "Browse for media files", "file-menu-label": "&File", # & precedes shortcut key "openmedia-menu-label": "&Open media file", "openstreamurl-menu-label": "Open &media stream URL", "setmediadirectories-menu-label": "Set media &directories", "loadplaylistfromfile-menu-label": "&Load playlist from file", "saveplaylisttofile-menu-label": "&Save playlist to file", "exit-menu-label": "E&xit", "advanced-menu-label": "&Advanced", "window-menu-label": "&Window", "setoffset-menu-label": "Set &offset", "createcontrolledroom-menu-label": "&Create managed room", "identifyascontroller-menu-label": "&Identify as room operator", "settrusteddomains-menu-label": "Set &trusted domains", "addtrusteddomain-menu-label": "Add {} as trusted domain", # Domain "edit-menu-label": "&Edit", "cut-menu-label": "Cu&t", "copy-menu-label": "&Copy", "paste-menu-label": "&Paste", "selectall-menu-label": "&Select All", "playback-menu-label": "&Playback", "help-menu-label": "&Help", "userguide-menu-label": "Open user &guide", "update-menu-label": "Check for &update", "startTLS-initiated": "Attempting secure connection", "startTLS-secure-connection-ok": "Secure connection established ({})", "startTLS-server-certificate-invalid": 'Secure connection failed. The server uses an invalid security certificate. This communication could be intercepted by a third party. For further details and troubleshooting see <a href="https://syncplay.pl/trouble">here</a>.', "startTLS-not-supported-client": "This client does not support TLS", "startTLS-not-supported-server": "This server does not support TLS", # TLS certificate dialog "tls-information-title": "Certificate Details", "tls-dialog-status-label": "<strong>Syncplay is using an encrypted connection to {}.</strong>", "tls-dialog-desc-label": "Encryption with a digital certificate keeps information private as it is sent to or from the<br/>server {}.", "tls-dialog-connection-label": "Information encrypted using Transport Layer Security (TLS), version {} with the cipher<br/>suite: {}.", "tls-dialog-certificate-label": "Certificate issued by {} valid until {}.", # About dialog "about-menu-label": "&About Syncplay", "about-dialog-title": "About Syncplay", "about-dialog-release": "Version {} release {}", "about-dialog-license-text": "Licensed under the Apache&nbsp;License,&nbsp;Version 2.0", "about-dialog-license-button": "License", "about-dialog-dependencies": "Dependencies", "setoffset-msgbox-label": "Set offset", "offsetinfo-msgbox-label": "Offset (see https://syncplay.pl/guide/ for usage instructions):", "promptforstreamurl-msgbox-label": "Open media stream URL", "promptforstreamurlinfo-msgbox-label": "Stream URL", "addfolder-label": "Add folder", "adduris-msgbox-label": "Add URLs to playlist (one per line)", "editplaylist-msgbox-label": "Set playlist (one per line)", "trusteddomains-msgbox-label": "Domains it is okay to automatically switch to (one per line)", "createcontrolledroom-msgbox-label": "Create managed room", "controlledroominfo-msgbox-label": "Enter name of managed room\r\n(see https://syncplay.pl/guide/ for usage instructions):", "identifyascontroller-msgbox-label": "Identify as room operator", "identifyinfo-msgbox-label": "Enter operator password for this room\r\n(see https://syncplay.pl/guide/ for usage instructions):", "public-server-msgbox-label": "Select the public server for this viewing session", "megabyte-suffix": " MB", # Tooltips "host-tooltip": "Hostname or IP to connect to, optionally including port (e.g. syncplay.pl:8999). Only synchronised with people on same server/port.", "name-tooltip": "Nickname you will be known by. No registration, so can easily change later. Random name generated if none specified.", "password-tooltip": "Passwords are only needed for connecting to private servers.", "room-tooltip": "Room to join upon connection can be almost anything, but you will only be synchronised with people in the same room.", "executable-path-tooltip": "Location of your chosen supported media player (mpv, VLC, MPC-HC/BE or mplayer2).", "media-path-tooltip": "Location of video or stream to be opened. Necessary for mplayer2.", "player-arguments-tooltip": "Additional command line arguments / switches to pass on to this media player.", "mediasearcdirectories-arguments-tooltip": "Directories where Syncplay will search for media files, e.g. when you are using the click to switch feature. Syncplay will look recursively through sub-folders.", "more-tooltip": "Display less frequently used settings.", "filename-privacy-tooltip": "Privacy mode for sending currently playing filename to server.", "filesize-privacy-tooltip": "Privacy mode for sending size of currently playing file to server.", "privacy-sendraw-tooltip": "Send this information without obfuscation. This is the default option with most functionality.", "privacy-sendhashed-tooltip": "Send a hashed version of the information, making it less visible to other clients.", "privacy-dontsend-tooltip": "Do not send this information to the server. This provides for maximum privacy.",
#!/usr/bin/env python # -*- coding: utf-8 -*- """priors.py -- This module contains various objects to be used as priors. When called these return the ln-prior-probability, and they can also be used to construct prior transforms (for nested sampling) and can be sampled from. """ import numpy as np import scipy.stats __all__ = ["Prior", "TopHat", "Normal", "ClippedNormal", "LogNormal", "LogUniform", "Beta", "StudentT", "SkewNormal"] class Prior(object): """Encapsulate the priors in an object. Each prior should have a distribution name and optional parameters specifying scale and location (e.g. min/max or mean/sigma). These can be aliased at instantiation using the ``parnames`` keyword. When called, the argument should be a variable and the object should return the ln-prior-probability of that value. .. code-block:: python ln_prior_prob = Prior()(value) Should be able to sample from the prior, and to get the gradient of the prior at any variable value. Methods should also be avilable to give a useful plotting range and, if there are bounds, to return them. :param parnames: A list of names of the parameters, used to alias the intrinsic parameter names. This way different instances of the same Prior can have different parameter names, in case they are being fit for.... """ def __init__(self, parnames=[], name='', **kwargs): """Constructor. :param parnames: A list of names of the parameters, used to alias the intrinsic parameter names. This way different instances of the same Prior can have different parameter names, in case they are being fit for.... """ if len(parnames) == 0: parnames = self.prior_params assert len(parnames) == len(self.prior_params) self.alias = dict(zip(self.prior_params, parnames)) self.params = {} self.name = name self.update(**kwargs) def __repr__(self): argstring = ['{}={}'.format(k, v) for k, v in list(self.params.items())] return '{}({})'.format(self.__class__, ",".join(argstring)) def update(self, **kwargs): """Update `params` values using alias. """ for k in self.prior_params: try: self.params[k] = kwargs[self.alias[k]] except(KeyError): pass # FIXME: Should add a check for unexpected kwargs. def __len__(self): """The length is set by the maximum size of any of the prior_params. Note that the prior params must therefore be scalar of same length as the maximum size of any of the parameters. This is not checked. """ return max([np.size(self.params.get(k, 1)) for k in self.prior_params]) def __call__(self, x, **kwargs): """Compute the value of the probability desnity function at x and return the ln of that. :param x: Value of the parameter, scalar or iterable of same length as the Prior object. :param kwargs: optional All extra keyword arguments are sued to update the `prior_params`. :returns lnp: The natural log of the prior probability at x, scalar or ndarray of same length as the prior object. """ if len(kwargs) > 0: self.update(**kwargs) pdf = self.distribution.pdf try: p = pdf(x, *self.args, loc=self.loc, scale=self.scale) except(ValueError): # Deal with `x` vectors of shape (nsamples, len(prior)) # for pdfs that don't broadcast nicely. p = [pdf(_x, *self.args, loc=self.loc, scale=self.scale) for _x in x] p = np.array(p) with np.errstate(invalid='ignore'): lnp = np.log(p) return lnp def sample(self, nsample=None, **kwargs): """Draw a sample from the prior distribution. :param nsample: (optional) Unused """ if len(kwargs) > 0: self.update(**kwargs) return self.distribution.rvs(*self.args, size=len(self), loc=self.loc, scale=self.scale) def unit_transform(self, x, **kwargs): """Go from a value of the CDF (between 0 and 1) to the corresponding parameter value. :param x: A scalar or vector of same length as the Prior with values between zero and one corresponding to the value of the CDF. :returns theta: The parameter value corresponding to the value of the CDF given by `x`. """ if len(kwargs) > 0: self.update(**kwargs) return self.distribution.ppf(x, *self.args, loc=self.loc, scale=self.scale) def inverse_unit_transform(self, x, **kwargs): """Go from the parameter value to the unit coordinate using the cdf. """ if len(kwargs) > 0: self.update(**kwargs) return self.distribution.cdf(x, *self.args, loc=self.loc, scale=self.scale) def gradient(self, theta): raise(NotImplementedError) @property def loc(self): """This should be overridden. """ return 0 @property def scale(self): """This should be overridden. """ return 1 @property def args(self): return [] @property def range(self): raise(NotImplementedError) @property def bounds(self): raise(NotImplementedError) def serialize(self): raise(NotImplementedError) class TopHat(Prior): """A simple uniform prior, described by two parameters :param mini: Minimum of the distribution :param maxi: Maximum of the distribution """ prior_params = ['mini', 'maxi'] distribution = scipy.stats.uniform @property def scale(self): return self.params['maxi'] - self.params['mini'] @property def loc(self): return self.params['mini'] @property def range(self): return (self.params['mini'], self.params['maxi']) def bounds(self, **kwargs): if len(kwargs) > 0: self.update(**kwargs) return self.range class Normal(Prior): """A simple gaussian prior. :param mean: Mean of the distribution :param sigma: Standard deviation of the distribution """ prior_params = ['mean', 'sigma'] distribution = scipy.stats.norm @property def scale(self): return self.params['sigma'] @property def loc(self): return self.params['mean'] @property def range(self): nsig = 4 return (self.params['mean'] - nsig * self.params['sigma'], self.params['mean'] + self.params['sigma']) def bounds(self, **kwargs): #if len(kwargs) > 0: # self.update(**kwargs) return (-np.inf, np.inf) class ClippedNormal(Prior): """A Gaussian prior clipped to some range. :param mean: Mean of the normal distribution :param sigma: Standard deviation of the normal distribution :param mini: Minimum of the distribution :param maxi: Maximum of the distribution """ prior_params = ['mean', 'sigma', 'mini', 'maxi'] distribution = scipy.stats.truncnorm @property def scale(self): return self.params['sigma'] @property def loc(self): return self.params['mean'] @property def range(self): return (self.params['mini'], self.params['maxi']) @property def args(self): a = (self.params['mini'] - self.params['mean']) / self.params['sigma'] b = (self.params['maxi'] - self.params['mean']) / self.params['sigma'] return [a, b] def bounds(self, **kwargs): if len(kwargs) > 0: self.update(**kwargs) return self.range class LogUniform(Prior): """Like log-normal, but the distribution of natural log of the variable is distributed uniformly instead of normally. :param mini: Minimum of the distribution :param maxi: Maximum of the distribution """ prior_params = ['mini', 'maxi'] distribution = scipy.stats.reciprocal @property def args(self): a = self.params['mini'] b = self.params['maxi'] return [a, b] @property def range(self): return (self.params['mini'], self.params['maxi']) def bounds(self, **kwargs): if len(kwargs) > 0: self.update(**kwargs) return self.range class Beta(Prior): """A Beta distribution. :param mini: Minimum of the distribution :param maxi: Maximum of the distribution :param alpha: :param beta: """ prior_params = ['mini', 'maxi', 'alpha', 'beta'] distribution = scipy.stats.beta @property def scale(self): return self.params.get('maxi', 1) - self.params.get('mini', 0) @property def loc(self): return self.params.get('mini', 0) @property def args(self): a = self.params['alpha'] b = self.params['beta'] return [a, b] @property def range(self): return (self.params.get('mini',0), self.params.get('maxi',1)) def bounds(self, **kwargs): if len(kwargs) > 0: self.update(**kwargs) return self.range class LogNormal(Prior): """A log-normal prior, where the natural log of the variable is distributed normally. Useful for parameters that cannot be less than zero. Note that ``LogNormal(np.exp(mode) / f) == LogNormal(np.exp(mode) * f)`` and ``f = np.exp(sigma)`` corresponds to "one sigma" from the peak. :param mode: Natural log of the variable value at which the probability density is highest. :param sigma: Standard deviation of the distribution of the natural log of the variable. """ prior_params = ['mode', 'sigma'] distribution = scipy.stats.lognorm @property def args(self): return [self.params["sigma"]] @property def scale(self): return np.exp(self.params["mode"] + self.params["sigma"]**2) @property def loc(self): return 0 @property def range(self): nsig = 4 return (np.exp(self.params['mode'] + (nsig * self.params['sigma'])), np.exp(self.params['mode'] - (nsig * self.params['sigma']))) def bounds(self, **kwargs): return (0, np.inf) class LogNormalLinpar(Prior): """A log-normal prior, where the natural log of the variable is distributed normally. Useful for parameters that cannot be less than zero. LogNormal(mode=x, sigma=y) is equivalent to LogNormalLinpar(mode=np.exp(x), sigma_factor=np.exp(y)) :param mode: The (linear) value of the variable where the probability density is highest. Must be > 0. :param sigma_factor: The (linear) factor describing the dispersion of the log of the variable. Must be > 0 """ prior_params = ['mode', 'sigma_factor'] distribution = scipy.stats.lognorm @property def args(self): return [np.log(self.params["sigma_factor"])] @property def scale(self): k = self.params["sigma_factor"]**np.log(self.params["sigma_factor"]) return self.params["mode"] * k @property def loc(self): return 0 @property def range(self): nsig = 4 return (self.params['mode'] * (nsig * self.params['sigma_factor']), self.params['mode'] / (nsig * self.params['sigma_factor'])) def bounds(self, **kwargs): return (0, np.inf) class SkewNormal(Prior): """A normal distribution including a skew parameter :param location: Center (*not* mean, mode, or median) of the distribution. The center will approach the mean as skew approaches zero. :param sigma: Standard deviation of the distribution :param skew: Skewness of the distribution """ prior_params = ['location', 'sigma', 'skew'] distribution = scipy.stats.skewnorm @property def args(self): return [self.params['skew']] @property def scale(self): return
<gh_stars>0 #!/usr/bin/env python """ ----------------------------------------------------------------------------- Copyright (c) 2009-2019, Shotgun Software Inc. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the Shotgun Software Inc nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # Python 2/3 compatibility from .lib import six from .lib import sgsix from .lib.six import BytesIO # used for attachment upload from .lib.six.moves import map import base64 from .lib.six.moves import http_cookiejar # used for attachment upload import datetime import logging import uuid # used for attachment upload import os import re import copy import stat # used for attachment upload import sys import time import json from .lib.six.moves import urllib import shutil # used for attachment download from .lib.six.moves import http_client # Used for secure file upload. from .lib.httplib2 import Http, ProxyInfo, socks, ssl_error_classes from .lib.sgtimezone import SgTimezone # Import Error and ResponseError (even though they're unused in this file) since they need # to be exposed as part of the API. from .lib.six.moves.xmlrpc_client import Error, ProtocolError, ResponseError # noqa LOG = logging.getLogger("shotgun_api3") """ Logging instance for shotgun_api3 Provides a logging instance where log messages are sent during execution. This instance has no handler associated with it. .. seealso:: :ref:`logging` """ LOG.setLevel(logging.WARN) def _is_mimetypes_broken(): """ Checks if this version of Python ships with a broken version of mimetypes :returns: True if the version of mimetypes is broken, False otherwise. """ # mimetypes is broken on Windows only and for Python 2.7.0 to 2.7.9 inclusively. # We're bundling the version from 2.7.10. # See bugs : # http://bugs.python.org/issue9291 <- Fixed in 2.7.7 # http://bugs.python.org/issue21652 <- Fixed in 2.7.8 # http://bugs.python.org/issue22028 <- Fixed in 2.7.10 return (sys.platform == "win32" and sys.version_info[0] == 2 and sys.version_info[1] == 7 and sys.version_info[2] >= 0 and sys.version_info[2] <= 9) if _is_mimetypes_broken(): from .lib import mimetypes as mimetypes else: import mimetypes # mimetypes imported in version specific imports mimetypes.add_type("video/webm", ".webm") # webm and mp4 seem to be missing mimetypes.add_type("video/mp4", ".mp4") # from some OS/distros SG_TIMEZONE = SgTimezone() NO_SSL_VALIDATION = False """ Turns off hostname matching validation for SSL certificates Sometimes there are cases where certificate validation should be disabled. For example, if you have a self-signed internal certificate that isn't included in our certificate bundle, you may not require the added security provided by enforcing this. """ try: import ssl except ImportError as e: if "SHOTGUN_FORCE_CERTIFICATE_VALIDATION" in os.environ: raise ImportError("%s. SHOTGUN_FORCE_CERTIFICATE_VALIDATION environment variable prevents " "disabling SSL certificate validation." % e) LOG.debug("ssl not found, disabling certificate validation") NO_SSL_VALIDATION = True # ---------------------------------------------------------------------------- # Version __version__ = "3.2.4" # ---------------------------------------------------------------------------- # Errors class ShotgunError(Exception): """ Base for all Shotgun API Errors. """ pass class ShotgunFileDownloadError(ShotgunError): """ Exception for file download-related errors. """ pass class ShotgunThumbnailNotReady(ShotgunError): """ Exception for when trying to use a 'pending thumbnail' (aka transient thumbnail) in an operation """ pass class Fault(ShotgunError): """ Exception when server-side exception detected. """ pass class AuthenticationFault(Fault): """ Exception when the server side reports an error related to authentication. """ pass class MissingTwoFactorAuthenticationFault(Fault): """ Exception when the server side reports an error related to missing two-factor authentication credentials. """ pass class UserCredentialsNotAllowedForSSOAuthenticationFault(Fault): """ Exception when the server is configured to use SSO. It is not possible to use a username/password pair to authenticate on such server. """ pass class UserCredentialsNotAllowedForOxygenAuthenticationFault(Fault): """ Exception when the server is configured to use Oxygen. It is not possible to use a username/password pair to authenticate on such server. """ pass # ---------------------------------------------------------------------------- # API class ServerCapabilities(object): """ Container for the servers capabilities, such as version enabled features. .. warning:: This class is part of the internal API and its interfaces may change at any time in the future. Therefore, usage of this class is discouraged. """ def __init__(self, host, meta): """ ServerCapabilities.__init__ :param str host: Host name for the server excluding protocol. :param dict meta: dict of meta data for the server returned from the info() api method. :ivar str host: :ivar dict server_info: :ivar tuple version: Simple version of the Shotgun server. ``(major, minor, rev)`` :ivar bool is_dev: ``True`` if server is running a development version of the Shotgun codebase. """ # Server host name self.host = host self.server_info = meta # Version from server is major.minor.rev or major.minor.rev."Dev" # Store version as tuple and check dev flag try: self.version = meta.get("version", None) except AttributeError: self.version = None if not self.version: raise ShotgunError("The Shotgun Server didn't respond with a version number. " "This may be because you are running an older version of " "Shotgun against a more recent version of the Shotgun API. " "For more information, please contact Shotgun Support.") if len(self.version) > 3 and self.version[3] == "Dev": self.is_dev = True else: self.is_dev = False self.version = tuple(self.version[:3]) self._ensure_json_supported() def _ensure_support(self, feature, raise_hell=True): """ Checks the server version supports a given feature, raises an exception if it does not. :param dict feature: dict where **version** key contains a 3 integer tuple indicating the supported server version and **label** key contains a human-readable label str:: { 'version': (5, 4, 4), 'label': 'project parameter } :param bool raise_hell: Whether to raise an exception if the feature is not supported. Defaults to ``True`` :raises: :class:`ShotgunError` if the current server version does not support ``feature`` """ if not self.version or self.version < feature["version"]: if raise_hell: raise ShotgunError( "%s requires server version %s or higher, " "server is %s" % (feature["label"], _version_str(feature["version"]), _version_str(self.version)) ) return False else: return True def _ensure_json_supported(self): """ Ensures server has support for JSON API endpoint added in v2.4.0. """ self._ensure_support({ "version": (2, 4, 0), "label": "JSON API" }) def ensure_include_archived_projects(self): """ Ensures server has support for archived Projects feature added in v5.3.14. """ self._ensure_support({ "version": (5, 3, 14), "label": "include_archived_projects parameter" }) def ensure_per_project_customization(self): """ Ensures server has support for per-project customization feature added in v5.4.4. """ return self._ensure_support({ "version": (5, 4, 4), "label": "project parameter" }, True) def ensure_support_for_additional_filter_presets(self): """ Ensures server has support for additional filter presets feature added in v7.0.0. """ return self._ensure_support({ "version": (7, 0, 0), "label": "additional_filter_presets parameter" }, True) def ensure_user_following_support(self): """ Ensures server has support for listing items a user is following, added in v7.0.12. """ return self._ensure_support({ "version": (7, 0, 12), "label": "user_following parameter" }, True) def ensure_paging_info_without_counts_support(self): """ Ensures server has support for optimized pagination, added in v7.4.0. """ return self._ensure_support({ "version": (7, 4, 0), "label": "optimized pagination" }, False) def ensure_return_image_urls_support(self): """ Ensures server has support for returning thumbnail URLs without additional round-trips, added in v3.3.0. """ return self._ensure_support({ "version": (3, 3, 0), "label": "return thumbnail URLs" }, False) def __str__(self): return "ServerCapabilities: host %s, version %s, is_dev %s"\ % (self.host, self.version, self.is_dev) class ClientCapabilities(object): """ Container for the client capabilities. .. warning:: This class is part of the internal API and its interfaces may change at any time in the future. Therefore, usage of this class is discouraged. :ivar str platform: The current client platform. Valid values are ``mac``, ``linux``, ``windows``, or ``None`` (if the current platform couldn't be determined). :ivar str local_path_field: The SG field used for local file
dataspace. Parameters: ctx: If not ``None``, TileDB context wrapper for a TileDB storage manager. """ used_dims_no_domain = tuple( dim_name for dim_name, dim in self._dims.items() if dim.domain is None and self._dim_to_arrays[dim_name] ) if used_dims_no_domain: raise ValueError( f"Cannot create a TileDB group schema for this group. Dimensions " f"{used_dims_no_domain} do not a have domain. You can set the domains " f"for these dimensions using the `set_dim_properties` method." ) array_schemas = {} for array_name, array_creator in self._array_creators.items(): try: array_schemas[array_name] = array_creator.to_schema(ctx) except tiledb.libtiledb.TileDBError as err: raise RuntimeError( f"Failed to create an ArraySchema for array '{array_name}'." ) from err group_schema = GroupSchema(array_schemas) return group_schema class ArrayCreator: """Creator for a TileDB array using shared dimension definitions. Parameters: dims: An ordered list of the shared dimensions for the domain of this array. cell_order: The order in which TileDB stores the cells on disk inside a tile. Valid values are: ``row-major`` (default) or ``C`` for row major; ``col-major`` or ``F`` for column major; or ``Hilbert`` for a Hilbert curve. tile_order: The order in which TileDB stores the tiles on disk. Valid values are: ``row-major`` or ``C`` (default) for row major; or ``col-major`` or ``F`` for column major. capacity: The number of cells in a data tile of a sparse fragment. tiles: An optional ordered list of tile sizes for the dimensions of the array. The length must match the number of dimensions in the array. coords_filters: Filters for all dimensions that are not specified explicitly by ``dim_filters``. dim_filters: A dict from dimension name to a ``FilterList`` for dimensions in the array. Overrides the values set in ``coords_filters``. offsets_filters: Filters for the offsets for variable length attributes or dimensions. allows_duplicates: Specifies if multiple values can be stored at the same coordinate. Only allowed for sparse arrays. sparse: Specifies if the array is a sparse TileDB array (true) or dense TileDB array (false). Attributes: cell_order: The order in which TileDB stores the cells on disk inside a tile. Valid values are: ``row-major`` (default) or ``C`` for row major; ``col-major`` or ``F`` for column major; or ``Hilbert`` for a Hilbert curve. tile_order: The order in which TileDB stores the tiles on disk. Valid values are: ``row-major`` or ``C`` (default) for row major; or ``col-major`` or ``F`` for column major. capacity: The number of cells in a data tile of a sparse fragment. coords_filters: Filters for all dimensions that are not specified explicitly by ``dim_filters``. offsets_filters: Filters for the offsets for variable length attributes or dimensions. allows_duplicates: Specifies if multiple values can be stored at the same coordinate. Only allowed for sparse arrays. """ def __init__( self, dims: Sequence[SharedDim], cell_order: str = "row-major", tile_order: str = "row-major", capacity: int = 0, tiles: Optional[Sequence[int]] = None, coords_filters: Optional[tiledb.FilterList] = None, dim_filters: Optional[Dict[str, tiledb.FilterList]] = None, offsets_filters: Optional[tiledb.FilterList] = None, allows_duplicates: bool = False, sparse: bool = False, ): """Constructor for a ArrayCreator object.""" self._dim_creators = tuple(DimCreator(dim) for dim in dims) if not self._dim_creators: raise ValueError( "Cannot create array. Array must have at lease one dimension." ) self._attr_creators: Dict[str, AttrCreator] = OrderedDict() self.cell_order = cell_order self.tile_order = tile_order self.capacity = capacity if tiles is not None: self.tiles = tiles self.coords_filters = coords_filters if dim_filters is not None: self.dim_filters = dim_filters self.offsets_filters = offsets_filters self.allows_duplicates = allows_duplicates self.sparse = sparse self.__post_init__() def __post_init__(self): pass def __repr__(self) -> str: output = StringIO() output.write(" ArrayCreator(\n") output.write(" domain=Domain(*[\n") for dim_creator in self._dim_creators: output.write(f" {repr(dim_creator)},\n") output.write(" ]),\n") output.write(" attrs=[\n") for attr_creator in self._attr_creators.values(): output.write(f" {repr(attr_creator)},\n") output.write(" ],\n") output.write( f" cell_order='{self.cell_order}',\n" f" tile_order='{self.tile_order}',\n" ) output.write(f" capacity={self.capacity},\n") output.write(f" sparse={self.sparse},\n") if self.sparse: output.write(f" allows_duplicates={self.allows_duplicates},\n") if self.coords_filters is not None: output.write(" coords_filters=FilterList([") for index, coord_filter in enumerate(self.coords_filters): output.write(f"{repr(coord_filter)}") if index < len(self.coords_filters): output.write(", ") output.write("])\n") output.write(" )") return output.getvalue() def add_attr(self, attr_creator: AttrCreator): """Adds a new attribute to an array in the CF dataspace. Each attribute name must be unique. It also cannot conflict with the name of a dimension in the array. Parameters: attr_name: Name of the new attribute that will be added. dtype: Numpy dtype of the new attribute. fill: Fill value for unset cells. var: Specifies if the attribute is variable length (automatic for byte/strings). nullable: Specifies if the attribute is nullable using validity tiles. filters: Specifies compression filters for the attribute. """ attr_name = attr_creator.name if attr_name in self._attr_creators: raise ValueError( f"Cannot create new attribute with name '{attr_name}'. An attribute " f"with that name already exists in this array." ) if attr_name in self.dim_names: raise ValueError( f"Cannot create new attribute with name '{attr_name}'. A dimension with" f" that name already exists in this array." ) self._attr_creators[attr_name] = attr_creator @property def attr_names(self): """A view of the names of attributes in the array.""" return self._attr_creators.keys() def create( self, uri: str, key: Optional[str] = None, ctx: Optional[tiledb.Ctx] = None, ): """Creates a TileDB array at the provided URI. Parameters: uri: Uniform resource identifier for the array to be created. key: If not ``None``, encryption key to decrypt arrays. ctx: If not ``None``, TileDB context wrapper for a TileDB storage manager. """ tiledb.Array.create(uri, self.to_schema(ctx), key, ctx) @property def dim_filters(self) -> Mapping[str, Optional[tiledb.FilterList]]: """A dict from dimension name to a ``FilterList`` for dimensions in the array. Overrides the values set in ``coords_filters``. """ return { dim_creator.name: dim_creator.filters for dim_creator in self._dim_creators } @dim_filters.setter def dim_filters( self, dim_filters: Mapping[str, Optional[tiledb.FilterList]], ): dim_map = {dim_creator.name: dim_creator for dim_creator in self._dim_creators} for dim_name, filters in dim_filters.items(): dim_map[dim_name].filters = filters @property def dim_names(self) -> Tuple[str, ...]: """A static snapshot of the names of dimensions of the array.""" return tuple(dim_creator.name for dim_creator in self._dim_creators) def get_attr_property(self, attr_name: str, property_name: str) -> Any: """Returns a requested property for an attribute in the array. Valid properties are: * ``name``: The name of the attribute. * ``dtype``: Numpy dtype of the attribute. * ``fill``: Fill value for unset cells. * ``var``: Specifies if the attribute is variable length (automatic for bytes/strings). * ``nullable``: Specifies if the attribute is nullable using validity tiles. * ``filters``: Specifies compression filters for the attributes. Parameters: attr_name: Name of the attribute to get the property from. property_name: Name of requested property. """ attr_creator = self._attr_creators[attr_name] return getattr(attr_creator, property_name) @property def ndim(self) -> int: """Number of dimensions in the array.""" return len(self._dim_creators) def rename_attr(self, original_name: str, new_name: str): """Renames an attribute in the array. Parameters: original_name: Current name of the attribute to be renamed. new_name: New name the attribute will be renamed to. """ if new_name in self.dim_names: raise ValueError( f"Cannot rename attr '{original_name}' to '{new_name}'. A dimension " f"with that name already exists in this array." ) attr = self._attr_creators.pop(original_name) attr.name = new_name self._attr_creators[new_name] = attr def remove_attr(self, attr_name: str): """Removes the specified attribute from the array. Parameters: attr_name: Name of the attribute that will be removed. """ del self._attr_creators[attr_name] def html_summary(self) -> str: """Returns a string HTML summary of the :class:`ArrayCreator`.""" cell_style = 'style="text-align: left;"' output = StringIO() output.write("<ul>\n") output.write("<li>\n") output.write("Domain\n") output.write("<table>\n") for dim_creator in self._dim_creators: output.write( f"<tr><td {cell_style}>{dim_creator.html_summary()}</td></tr>\n" ) output.write("</table>\n") output.write("</li>\n") output.write("<li>\n") output.write("Attributes\n") output.write("<table>\n") for attr_creator in self._attr_creators.values(): output.write( f"<tr><td {cell_style}>{attr_creator.html_summary()}</td></tr>\n" ) output.write("</table>\n") output.write("</li>\n") output.write("<li>\n") output.write("Array Properties\n") output.write( f"<table>\n" f"<tr><td {cell_style}>cell_order={self.cell_order}</td></tr>\n" f"<tr><td {cell_style}>tile_order={self.tile_order}</td></tr>\n" f"<tr><td {cell_style}>capacity={self.capacity}</td></tr>\n" f"<tr><td {cell_style}>sparse={self.sparse}</td></tr>\n" ) if self.sparse: output.write( f"<tr><td {cell_style}>allows_duplicates" f"={self.allows_duplicates}</td></tr>\n" ) output.write( f"<tr><td {cell_style}>coords_filters={self.coords_filters}</td></tr>\n" ) output.write("</table>\n") output.write("</li>\n") output.write("</ul>\n") return output.getvalue() def set_attr_properties(self, attr_name: str, **properties): """Sets properties for an attribute in the array. Valid properties are: * ``name``: The name of the attribute. * ``dtype``: Numpy dtype of the attribute. * ``fill``: Fill value for unset cells. * ``var``: Specifies if the attribute is variable length (automatic for bytes/strings). * ``nullable``: Specifies if the attribute is nullable using validity tiles. * ``filters``: Specifies compression filters for the attributes. sparst: Specifies if the array is a sparse TileDB array (true) or dense TileDB array (false). Parameters: attr_name: Name of the attribute to set properties for. properties: Keyword arguments for attribute properties. """
<filename>helper.py # global imports import numpy as np import itertools import scipy import scipy.special import scipy.stats import collections from numba import jit def get_states(N): """return all possible states as arrays for N binary units""" return np.array([np.array(x) for x in itertools.product([0, 1], repeat=N)]) def get_states_as_strings(N): """returns all possible states as strings for N binary units""" return np.array([state_array_to_string(s) for s in get_states(N)]) def state_array_to_string(s): return ''.join(np.array(s, dtype=str)) def state_array_to_int(s): """translates a state s into an integer by interpreting the state as a binary represenation""" return int(state_array_to_string(s), 2) def state_string_from_int(i, N): """translates an integer i into a state string by using the binary representation of the integer""" return bin(i)[2:].zfill(N) def state_array_from_int(i, N): """translates an integer i into a state by using the binary representation of the integer""" return np.array([int(si) for si in state_string_from_int(i, N)]) def random_initial_condition(N): return np.random.randint(0, 2, N) def adjust_time_slices(a_time, steps_warmup): return a_time[steps_warmup:] def adjust_recorded_states(a_s, steps_warmup): return a_s[steps_warmup:] def outdegree_distribution(M, K, N, m): """probability to find a source with m outputs for choosing for M neurons K sources from a pool of N neurons, without choosing a source twice for a single target """ return scipy.stats.binom.pmf(m, M, 1. * K / N, 0) def shared_input_distribution(K, N, s): """distribution of choosing s shared inputs for choosing K sources of a pool of N sources """ return scipy.stats.binom.pmf(s, K, 1. * K / N, 0) def create_BM_weight_matrix(N, distribution, mean_weight=None, **kwargs): """creates a random weight matrix for a Boltzmann machine (diagonal=0, and symmetric weights), with weights drawn from distribution. parameters for the distribution need to be passed as kwargs. """ W = distribution(size=(N, N), **kwargs) # we can not just use 0.5 * (W + W.T), without altering distribution of weights for i in xrange(N): for j in xrange(i): W[j, i] = W[i, j] W -= np.diag(W.diagonal()) if mean_weight is not None: W += mean_weight - 1. / (N * (N - 1)) * np.sum(W) W -= np.diag(W.diagonal()) return W def create_BM_biases(N, distribution, **kwargs): """create a random bias vector for a Boltzmann machine, with biases drawn from distribution. parameters for the distribution need to be passed as kwargs. """ return distribution(size=N, **kwargs) def create_multi_BM_weight_matrix(N, M, distribution, **kwargs): Ntot = M * N W = np.zeros((Ntot, Ntot)) for i in range(M): W[i * N:(i + 1) * N, i * N:(i + 1) * N] = create_BM_weight_matrix(N, distribution, **kwargs) return W def create_multi_BM_biases(N, M, distribution, **kwargs): return create_BM_biases(N * M, distribution, **kwargs) def create_BM_biases_threshold_condition(N, mean_weight, mean_activity): """create biases for a Boltzmann machine, by requiring that the average input from other neurons in the BM sums with the bias to zero. this way we can achieve an average activity in the BM of mean_activity. for details see, e.g., <NAME> al. (2014), PloS CB, eq. (5) """ # for a Boltzmann machine with N units, we have (N - 1) inputs return np.ones(N) * -1. * mean_weight * (N - 1) * mean_activity def create_BRN_weight_matrix(N, w, g, epsilon, gamma): """create a random realization of a weight matrix for an E/I network of N neurons with fixed weights. """ return create_BRN_weight_matrix_fixed_indegree(N, w, g, int(epsilon * N), gamma) def create_BRN_weight_matrix_fixed_indegree(N, w, g, K, gamma): """create a random realization of a weight matrix for an E/I network of N neurons with fixed weights. """ W = np.zeros((N, N)) NE = int(gamma * N) NI = int(N - NE) KE = int(gamma * K) KI = int(K - KE) for i in range(N): if NE > 0: indE = np.arange(0, NE) indE = indE[indE != i] indE = np.random.permutation(indE)[:KE] W[i, indE] = w if NI > 0: indI = np.arange(NE, N) indI = indI[indI != i] indI = np.random.permutation(indI)[:KI] W[i, indI] = -g * w return W def create_BRN_biases_threshold_condition(N, w, g, epsilon, gamma, mean_activity): """(see create_BM_biases_threshold_condition)""" return np.ones(N) * -1. * get_mu_input(epsilon, N, gamma, g, w, mean_activity) - w / 2. def create_stoch_biases_from_target_activity(N, mean_activity): """create biases for sigmoidal units from a target activity by using the inverse of the sigmoid""" return np.ones(N) * sigmainv(mean_activity) def create_noise_weight_matrix(Nbm, Nnoise, gamma, g, w, epsilon): return create_noise_weight_matrix_fixed_indegree(Nbm, Nnoise, gamma, g, w, int(epsilon * Nnoise)) def create_noise_weight_matrix_fixed_indegree(Nbm, Nnoise, gamma, g, w, Knoise): """create a random realization of a weight matrix for Nnoise sources projecting to Nbm targets with E/I connections of fixed weight and with fixed total in degree Knoise. """ W = np.zeros((Nbm, Nnoise)) NEnoise = int(gamma * Nnoise) KEnoise = int(gamma * Knoise) KInoise = int(Knoise - KEnoise) for l in W: indE = np.random.permutation(np.arange(0, NEnoise))[:KEnoise] l[indE] = w indI = np.random.permutation(np.arange(NEnoise, Nnoise))[:KInoise] l[indI] = -g * w return W def create_noise_weight_matrix_2dshuffle(Nbm, Nnoise, gamma, g, w, epsilon): return create_noise_weight_matrix_2dshuffle_fixed_indegree(Nbm, Nnoise, gamma, g, w, int(epsilon * Nnoise)) def create_noise_weight_matrix_2dshuffle_fixed_indegree(Nbm, Nnoise, gamma, g, w, Knoise): """create a random realizations of a weight matrix for Nnoise sources projecting to Nbm targets with identity of presynaptic neurons shuffled across E/I populations """ W = np.zeros((Nbm, Nnoise)) KE = int(gamma * Knoise) KI = int(Knoise - KE) for l in range(Nbm): ind = np.random.permutation(np.arange(0, Nnoise))[:KE + KI] W[l, ind[:KE]] = w W[l, ind[KE:]] = -g * w return W def _generate_template(Nbm, K, Kshared, w, Ktot, N, random=False): assert(Nbm > 0 and K > 0) template = np.zeros((Nbm, K)) l = 0 i = 0 Kshared_counts = np.zeros(Nbm) while l < Nbm: if l == 0: template[l, i] = w i += 1 if i == K: i = l l += 1 if random: Kshared = scipy.random.binomial(Ktot, 1. * Ktot / N) else: if Kshared_counts[l] < Kshared: template[l, i] = w Kshared_counts[l] += 1 i += 1 if Kshared_counts[l] == Kshared: l += 1 if random: Kshared = scipy.random.binomial(Ktot, 1. * Ktot / N) return Kshared_counts, template def create_noise_weight_matrix_fixed_pairwise(Nbm, Nnoise, gamma, g, w, epsilon, random_shared=False): return create_noise_weight_matrix_fixed_pairwise_fixed_indegree(Nbm, Nnoise, gamma, g, w, int(epsilon * Nnoise), random_shared=random_shared) def create_noise_weight_matrix_fixed_pairwise_fixed_indegree(Nbm, Nnoise, gamma, g, w, Knoise, random_shared=False): """create a random realizations of a weight matrix for Nnoise sources projecting to Nbm targets with identity of presynaptic neurons shuffled across E/I populations. the number of shared sources (KEshared/KIshared) is fixed and equal for each pair of targets. """ NE = int(gamma * Nnoise) NI = int(Nnoise - NE) KE = int(gamma * Knoise) KI = int(Knoise - KE) KEshared = 0 KIshared = 0 if NE > 0: KEshared = int(1. * KE ** 2 / NE) if NI > 0: KIshared = int(1. * KI ** 2 / NI) # check whether it is possible to realize desired connectivity; # this translate to (Nbm - 1 ) * epsilon <= 1 assert(KEshared * (Nbm - 1) <= KE), '[error] impossible parameter choices' assert(KIshared * (Nbm - 1) <= KI), '[error] impossible parameter choices' W = np.zeros((Nbm, NE + NI)) for k in xrange(2): N = [NE, NI][k] K = [KE, KI][k] Kshared = [KEshared, KIshared][k] wt = [w, -g * w][k] if K > 0: offset_i = k * NE Kshared_offset = np.zeros(Nbm) for l in xrange(Nbm): Kshared_counts, template = _generate_template( Nbm - l, K - Kshared_offset[l], Kshared, wt, K, N, random_shared) W[l:Nbm, offset_i:offset_i + K - Kshared_offset[l]] = template offset_i += K - Kshared_offset[l] Kshared_offset[l:] += Kshared_counts return W def create_indep_noise_weight_matrix(Nbm, Knoise, gamma, g, w): """create a weight matrix for Nbm * Knoise sources projecting to Nbm targets with a fixed indegree of Knoise. no shared inputs are allowed, hence each target receives uncorrelated input of the sources are uncorrelated. """ Nnoise = Nbm * Knoise W = np.zeros((Nbm, Nnoise)) KE = int(gamma * Knoise) for l in range(Nbm): indE = np.arange(l * Knoise, l * Knoise + KE) W[l, indE] = w indI = np.arange(l * Knoise + KE, (l + 1) * Knoise) W[l, indI] = -g * w return W def get_energy(W, b, s, beta=1.): """returns the energy of a state in a boltzmann machine""" return -1. * beta * (0.5 * np.dot(s.T, np.dot(W, s)) + np.dot(b, s)) def get_theo_joints(W, b, beta): """calculate the theoretical state distribution for a Boltzmann machine """ N = len(b) joints = [] states = get_states(N) for s in states: joints.append(np.exp(-1. *
pei_holidays) def test_family_day(self): ab_holidays = holidays.CA(prov="AB") bc_holidays = holidays.CA(prov="BC") mb_holidays = holidays.CA(prov="MB") sk_holidays = holidays.CA(prov="SK") for dt in [date(1990, 2, 19), date(1999, 2, 15), date(2000, 2, 21), date(2006, 2, 20)]: self.assertNotIn(dt, self.holidays) self.assertIn(dt, ab_holidays) self.assertNotIn(dt, bc_holidays) self.assertNotIn(dt, mb_holidays) self.assertNotIn(dt, sk_holidays) dt = date(2007, 2, 19) self.assertNotIn(dt, self.holidays) self.assertIn(dt, ab_holidays) self.assertNotIn(dt, bc_holidays) self.assertNotIn(dt, mb_holidays) self.assertIn(dt, sk_holidays) for dt in [date(2008, 2, 18), date(2012, 2, 20), date(2014, 2, 17), date(2018, 2, 19)]: self.assertIn(dt, self.holidays) self.assertIn(dt, ab_holidays) self.assertNotIn(dt, bc_holidays) self.assertIn(dt, mb_holidays) self.assertIn(dt, sk_holidays) for dt in [date(2019, 2, 18), date(2020, 2, 17)]: self.assertIn(dt, self.holidays) self.assertIn(dt, ab_holidays) self.assertIn(dt, bc_holidays) self.assertIn(dt, mb_holidays) self.assertIn(dt, sk_holidays) for dt in [date(2013, 2, 11), date(2016, 2, 8)]: self.assertNotIn(dt, self.holidays) self.assertNotIn(dt, ab_holidays) self.assertIn(dt, bc_holidays) self.assertNotIn(dt, mb_holidays) self.assertNotIn(dt, sk_holidays) self.assertEqual(mb_holidays[date(2014, 2, 17)], "Louis Riel Day") def test_st_patricks_day(self): nl_holidays = holidays.CA(prov="NL", observed=False) for dt in [date(1900, 3, 19), date(1999, 3, 15), date(2000, 3, 20), date(2012, 3, 19), date(2013, 3, 18), date(2014, 3, 17), date(2015, 3, 16), date(2016, 3, 14), date(2020, 3, 16)]: self.assertNotIn(dt, self.holidays) self.assertIn(dt, nl_holidays) self.assertNotIn(dt + relativedelta(days=-1), nl_holidays) self.assertNotIn(dt + relativedelta(days=+1), nl_holidays) def test_good_friday(self): qc_holidays = holidays.CA(prov="QC") for dt in [date(1900, 4, 13), date(1901, 4, 5), date(1902, 3, 28), date(1999, 4, 2), date(2000, 4, 21), date(2010, 4, 2), date(2018, 3, 30), date(2019, 4, 19), date(2020, 4, 10)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) self.assertNotIn(dt, qc_holidays) def test_easter_monday(self): qc_holidays = holidays.CA(prov="QC") for dt in [date(1900, 4, 16), date(1901, 4, 8), date(1902, 3, 31), date(1999, 4, 5), date(2000, 4, 24), date(2010, 4, 5), date(2018, 4, 2), date(2019, 4, 22), date(2020, 4, 13)]: self.assertNotIn(dt, self.holidays) self.assertIn(dt, qc_holidays) self.assertNotIn(dt + relativedelta(days=-1), qc_holidays) self.assertNotIn(dt + relativedelta(days=+1), qc_holidays) def test_st_georges_day(self): nl_holidays = holidays.CA(prov="NL") for dt in [date(1990, 4, 23), date(1999, 4, 26), date(2000, 4, 24), date(2010, 4, 19), date(2016, 4, 25), date(2020, 4, 20)]: self.assertNotIn(dt, self.holidays) self.assertIn(dt, nl_holidays) self.assertNotIn(dt + relativedelta(days=-1), nl_holidays) self.assertNotIn(dt + relativedelta(days=+1), nl_holidays) def test_victoria_day(self): for dt in [date(1953, 5, 18), date(1999, 5, 24), date(2000, 5, 22), date(2010, 5, 24), date(2015, 5, 18), date(2020, 5, 18)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_national_aboriginal_day(self): nt_holidays = holidays.CA(prov="NT") self.assertNotIn(date(1995, 6, 21), nt_holidays) for year in range(1996, 2100): dt = date(year, 6, 21) self.assertNotIn(dt, self.holidays) self.assertIn(dt, nt_holidays) self.assertNotIn(dt + relativedelta(days=-1), nt_holidays) self.assertNotIn(dt + relativedelta(days=+1), nt_holidays) def test_st_jean_baptiste_day(self): qc_holidays = holidays.CA(prov="QC", observed=False) self.assertNotIn(date(1924, 6, 24), qc_holidays) for year in range(1925, 2100): dt = date(year, 6, 24) self.assertNotIn(dt, self.holidays) self.assertIn(dt, qc_holidays) self.assertNotIn(dt + relativedelta(days=-1), qc_holidays) self.assertNotIn(dt + relativedelta(days=+1), qc_holidays) self.assertNotIn(date(2001, 6, 25), qc_holidays) qc_holidays.observed = True self.assertIn(date(2001, 6, 25), qc_holidays) def test_discovery_day(self): nl_holidays = holidays.CA(prov="NL") yu_holidays = holidays.CA(prov="YU") for dt in [date(1997, 6, 23), date(1999, 6, 21), date(2000, 6, 26), date(2010, 6, 21), date(2016, 6, 27), date(2020, 6, 22)]: self.assertNotIn(dt, self.holidays) self.assertIn(dt, nl_holidays) self.assertNotIn(dt, yu_holidays) for dt in [date(1912, 8, 19), date(1999, 8, 16), date(2000, 8, 21), date(2006, 8, 21), date(2016, 8, 15), date(2020, 8, 17)]: self.assertNotIn(dt, self.holidays) self.assertNotIn(dt, nl_holidays) self.assertIn(dt, yu_holidays) def test_canada_day(self): for year in range(1900, 2100): dt = date(year, 7, 1) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) self.assertNotIn(date(2006, 7, 3), self.holidays) self.assertNotIn(date(2007, 7, 2), self.holidays) self.holidays.observed = True self.assertIn(date(2006, 7, 3), self.holidays) self.assertIn(date(2007, 7, 2), self.holidays) def test_nunavut_day(self): nu_holidays = holidays.CA(prov="NU", observed=False) self.assertNotIn(date(1999, 7, 9), nu_holidays) self.assertNotIn(date(2000, 7, 9), nu_holidays) self.assertIn(date(2000, 4, 1), nu_holidays) for year in range(2001, 2100): dt = date(year, 7, 9) self.assertNotIn(dt, self.holidays) self.assertIn(dt, nu_holidays) self.assertNotIn(dt + relativedelta(days=-1), nu_holidays) self.assertNotIn(dt + relativedelta(days=+1), nu_holidays) self.assertNotIn(date(2017, 7, 10), nu_holidays) nu_holidays.observed = True self.assertIn(date(2017, 7, 10), nu_holidays) def test_civic_holiday(self): bc_holidays = holidays.CA(prov="BC") for dt in [date(1900, 8, 6), date(1955, 8, 1), date(1973, 8, 6)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt, bc_holidays) for dt in [date(1974, 8, 5), date(1999, 8, 2), date(2000, 8, 7), date(2010, 8, 2), date(2015, 8, 3), date(2020, 8, 3)]: self.assertIn(dt, self.holidays) self.assertIn(dt, bc_holidays) def test_labour_day(self): self.assertNotIn(date(1893, 9, 4), self.holidays) for dt in [date(1894, 9, 3), date(1900, 9, 3), date(1999, 9, 6), date(2000, 9, 4), date(2014, 9, 1), date(2015, 9, 7)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_thanksgiving(self): ns_holidays = holidays.CA(prov="NB") for dt in [date(1931, 10, 12), date(1990, 10, 8), date(1999, 10, 11), date(2000, 10, 9), date(2013, 10, 14), date(2020, 10, 12)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) self.assertNotIn(dt, ns_holidays) def test_remembrance_day(self): ab_holidays = holidays.CA(prov="AB", observed=False) nl_holidays = holidays.CA(prov="NL", observed=False) self.assertNotIn(date(1930, 11, 11), ab_holidays) self.assertNotIn(date(1930, 11, 11), nl_holidays) for year in range(1931, 2100): dt = date(year, 11, 11) self.assertNotIn(dt, self.holidays) self.assertIn(dt, ab_holidays) self.assertIn(dt, nl_holidays) self.assertNotIn(dt + relativedelta(days=-1), nl_holidays) self.assertNotIn(dt + relativedelta(days=+1), nl_holidays) self.assertNotIn(date(2007, 11, 12), ab_holidays) self.assertNotIn(date(2007, 11, 12), nl_holidays) ab_holidays.observed = True nl_holidays.observed = True self.assertNotIn(date(2007, 11, 12), ab_holidays) self.assertIn(date(2007, 11, 12), nl_holidays) def test_christmas_day(self): for year in range(1900, 2100): dt = date(year, 12, 25) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(date(2010, 12, 24), self.holidays) self.assertNotEqual(self.holidays[date(2011, 12, 26)], "Christmas Day (Observed)") self.holidays.observed = True self.assertIn(date(2010, 12, 24), self.holidays) self.assertEqual(self.holidays[date(2011, 12, 26)], "Christmas Day (Observed)") def test_boxing_day(self): for year in range(1900, 2100): dt = date(year, 12, 26) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) self.assertNotIn(date(2009, 12, 28), self.holidays) self.assertNotIn(date(2010, 12, 27), self.holidays) self.holidays.observed = True self.assertIn(date(2009, 12, 28), self.holidays) self.assertIn(date(2010, 12, 27), self.holidays) class TestCO(unittest.TestCase): def setUp(self): self.holidays = holidays.CO(observed=True) def test_2016(self): # http://www.officeholidays.com/countries/colombia/ self.assertIn(date(2016, 1, 1), self.holidays) self.assertIn(date(2016, 1, 11), self.holidays) self.assertIn(date(2016, 3, 21), self.holidays) self.assertIn(date(2016, 3, 24), self.holidays) self.assertIn(date(2016, 3, 25), self.holidays) self.assertIn(date(2016, 5, 1), self.holidays) self.assertIn(date(2016, 5, 9), self.holidays) self.assertIn(date(2016, 5, 30), self.holidays) self.assertIn(date(2016, 6, 6), self.holidays) self.assertIn(date(2016, 7, 4), self.holidays) self.assertIn(date(2016, 7, 20), self.holidays) self.assertIn(date(2016, 8, 7), self.holidays) self.assertIn(date(2016, 8, 15), self.holidays) self.assertIn(date(2016, 10, 17), self.holidays) self.assertIn(date(2016, 11, 7), self.holidays) self.assertIn(date(2016, 11, 14), self.holidays) self.assertIn(date(2016, 12, 8), self.holidays) self.assertIn(date(2016, 12, 25), self.holidays) def test_others(self): # holidays falling on weekend self.assertNotIn(date(2017, 1, 1), self.holidays) self.assertNotIn(date(2014, 7, 20), self.holidays) self.assertNotIn(date(2018, 8, 12), self.holidays) self.assertIn(date(2014, 1, 6), self.holidays) self.assertIn(date(2012, 3, 19), self.holidays) self.assertIn(date(2015, 6, 29), self.holidays) self.assertIn(date(2010, 8, 16), self.holidays) self.assertIn(date(2015, 10, 12), self.holidays) self.assertIn(date(2010, 11, 1), self.holidays) self.assertIn(date(2013, 11, 11), self.holidays) self.holidays.observed = False self.assertIn(date(2016, 5, 5), self.holidays) self.assertIn(date(2016, 5, 26), self.holidays) class TestMX(unittest.TestCase): def setUp(self): self.holidays = holidays.MX(observed=False) def test_new_years(self): self.assertNotIn(date(2010, 12, 31), self.holidays) self.assertNotIn(date(2017, 1, 2), self.holidays) self.holidays.observed = True self.assertIn(date(2010, 12, 31), self.holidays) self.assertIn(date(2017, 1, 2), self.holidays) self.holidays.observed = False for year in range(1900, 2100): dt = date(year, 1, 1) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_constitution_day(self): for dt in [date(2005, 2, 5), date(2006, 2, 5), date(2007, 2, 5), date(2008, 2, 4), date(2009, 2, 2), date(2010, 2, 1), date(2015, 2, 2), date(2016, 2, 1), date(2020, 2, 3)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_benito_juarez(self): for dt in [date(2005, 3, 21), date(2006, 3, 21), date(2007, 3, 19), date(2008, 3, 17), date(2009, 3, 16), date(2010, 3, 15), date(2015, 3, 16), date(2016, 3, 21), date(2020, 3, 16)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_labor_day(self): self.assertNotIn(date(2010, 4, 30), self.holidays) self.assertNotIn(date(2011, 5, 2), self.holidays) self.holidays.observed = True self.assertIn(date(2010, 4, 30), self.holidays) self.assertIn(date(2011, 5, 2), self.holidays) self.holidays.observed = False self.assertNotIn(date(1922, 5, 1), self.holidays) for year in range(1923, 2100): dt = date(year, 5, 1) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_independence_day(self): self.assertNotIn(date(2006, 9, 15), self.holidays) self.assertNotIn(date(2007, 9, 17), self.holidays) self.holidays.observed = True self.assertIn(date(2006, 9, 15), self.holidays) self.assertIn(date(2007, 9, 17), self.holidays) self.holidays.observed = False for year in range(1900, 2100): dt = date(year, 9, 16) self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_revolution_day(self): for dt in [date(2005, 11, 20), date(2006, 11, 20), date(2007, 11, 19), date(2008, 11, 17), date(2009, 11, 16), date(2010, 11, 15), date(2015, 11, 16), date(2016, 11, 21), date(2020, 11, 16)]: self.assertIn(dt, self.holidays) self.assertNotIn(dt + relativedelta(days=-1), self.holidays) self.assertNotIn(dt + relativedelta(days=+1), self.holidays) def test_change_of_government(self): self.assertNotIn(date(2012, 11, 30), self.holidays) self.assertNotIn(date(2024, 12, 2), self.holidays) self.holidays.observed = True self.assertIn(date(2012, 11, 30), self.holidays) self.assertIn(date(2024, 12, 2), self.holidays) self.holidays.observed = False for year in range(1970, 2100): dt = date(year, 12, 1) if (2018 - year) % 6 == 0: self.assertIn(dt, self.holidays)
self.assertRaises( cinder_exceptions.ImageUnacceptable, self._driver.copy_image_to_volume, context, volume, image_service, image_id) validate_disk_format.assert_called_once_with(image_meta['disk_format']) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_disk_format') @mock.patch.object(VMDK_DRIVER, '_get_adapter_type', return_value=volumeops.VirtualDiskAdapterType.BUS_LOGIC) @mock.patch('cinder.volume.drivers.vmware.volumeops.' 'VirtualDiskAdapterType.validate') @mock.patch('cinder.volume.drivers.vmware.vmdk.ImageDiskType.' 'validate') @mock.patch.object(VMDK_DRIVER, '_create_volume_from_non_stream_optimized_image') @mock.patch.object(VMDK_DRIVER, '_fetch_stream_optimized_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def _test_copy_image_to_volume(self, extend_backing, vops, fetch_stream_optimized_image, create_volume_from_non_stream_opt_image, validate_image_disk_type, validate_image_adapter_type, get_adapter_type, validate_disk_format, get_disk_type, vmware_disk_type='streamOptimized', backing_disk_size=VOL_SIZE, call_extend_backing=False, container_format='bare'): image_service = mock.Mock() image_meta = self._create_image_meta(vmware_disktype=vmware_disk_type, container_format=container_format) image_service.show.return_value = image_meta backing = mock.sentinel.backing vops.get_backing.return_value = backing vops.get_disk_size.return_value = backing_disk_size * units.Gi disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type context = mock.sentinel.context volume = self._create_volume_dict() image_id = mock.sentinel.image_id self._driver.copy_image_to_volume( context, volume, image_service, image_id) validate_disk_format.assert_called_once_with(image_meta['disk_format']) validate_image_disk_type.assert_called_once_with( image_meta['properties']['vmware_disktype']) validate_image_adapter_type.assert_called_once_with( image_meta['properties']['vmware_adaptertype']) if vmware_disk_type == 'streamOptimized': fetch_stream_optimized_image.assert_called_once_with( context, volume, image_service, image_id, image_meta['size'], image_meta['properties']['vmware_adaptertype']) else: create_volume_from_non_stream_opt_image.assert_called_once_with( context, volume, image_service, image_id, image_meta['size'], image_meta['properties']['vmware_adaptertype'], image_meta['properties']['vmware_disktype']) vops.get_disk_size.assert_called_once_with(backing) if call_extend_backing: extend_backing.assert_called_once_with(backing, volume['size'], disk_type) else: self.assertFalse(extend_backing.called) @ddt.data('sparse', 'preallocated', 'streamOptimized') def test_copy_image_to_volume(self, vmware_disk_type): self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type) @ddt.data('sparse', 'preallocated', 'streamOptimized') def test_copy_image_to_volume_with_extend_backing(self, vmware_disk_type): self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type, backing_disk_size=1, call_extend_backing=True) def test_copy_image_to_volume_with_ova_container(self): self._test_copy_image_to_volume(container_format='ova') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_check_disk_conversion') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image') @mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_preallocated_image') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') def _test_create_volume_from_non_stream_optimized_image( self, delete_tmp_backing, select_ds_for_volume, get_storage_profile_id, create_disk_from_preallocated_image, create_disk_from_sparse_image, vops, get_ds_name_folder_path, create_backing, generate_uuid, check_disk_conversion, get_disk_type, image_disk_type='sparse', disk_conversion=False): disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type check_disk_conversion.return_value = disk_conversion volume = self._create_volume_dict() if disk_conversion: disk_name = "6b77b25a-9136-470e-899e-3c930e570d8e" generate_uuid.return_value = disk_name else: disk_name = volume['name'] backing = mock.sentinel.backing create_backing.return_value = backing ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_ds_name_folder_path.return_value = (ds_name, folder_path) host = mock.sentinel.host dc_ref = mock.sentinel.dc_ref vops.get_host.return_value = host vops.get_dc.return_value = dc_ref vmdk_path = mock.Mock(spec=volumeops.FlatExtentVirtualDiskPath) create_disk_from_sparse_image.return_value = vmdk_path create_disk_from_preallocated_image.return_value = vmdk_path profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id if disk_conversion: rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) clone = mock.sentinel.clone vops.clone_backing.return_value = clone context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = units.Gi adapter_type = mock.sentinel.adapter_type self._driver._create_volume_from_non_stream_optimized_image( context, volume, image_service, image_id, image_size_in_bytes, adapter_type, image_disk_type) check_disk_conversion.assert_called_once_with(image_disk_type, mock.sentinel.disk_type) if disk_conversion: create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True, vmdk.CREATE_PARAM_BACKING_NAME: disk_name, vmdk.CREATE_PARAM_TEMP_BACKING: True}) else: create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True}) if image_disk_type == 'sparse': create_disk_from_sparse_image.assert_called_once_with( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) else: create_disk_from_preallocated_image.assert_called_once_with( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name, adapter_type) get_storage_profile_id.assert_called_once_with(volume) vops.attach_disk_to_backing.assert_called_once_with( backing, image_size_in_bytes / units.Ki, disk_type, adapter_type, profile_id, vmdk_path.get_descriptor_ds_file_path()) if disk_conversion: select_ds_for_volume.assert_called_once_with(volume) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], volumeops.BACKING_UUID_KEY: volume['id']} vops.clone_backing.assert_called_once_with( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, extra_config=extra_config, folder=folder) delete_tmp_backing.assert_called_once_with(backing) vops.update_backing_disk_uuid(clone, volume['id']) else: vops.update_backing_disk_uuid(backing, volume['id']) @ddt.data('sparse', 'preallocated') def test_create_volume_from_non_stream_optimized_image(self, image_disk_type): self._test_create_volume_from_non_stream_optimized_image( image_disk_type=image_disk_type) @ddt.data('sparse', 'preallocated') def test_create_volume_from_non_stream_opt_image_with_disk_conversion( self, image_disk_type): self._test_create_volume_from_non_stream_optimized_image( image_disk_type=image_disk_type, disk_conversion=True) def _test_get_vsphere_url(self, direct_url, exp_vsphere_url=None): image_service = mock.Mock() image_service.get_location.return_value = (direct_url, []) context = mock.sentinel.context image_id = mock.sentinel.image_id ret = self._driver._get_vsphere_url(context, image_service, image_id) self.assertEqual(exp_vsphere_url, ret) image_service.get_location.assert_called_once_with(context, image_id) def test_get_vsphere_url(self): url = "vsphere://foo/folder/glance/img_uuid?dcPath=dc1&dsName=ds1" self._test_get_vsphere_url(url, exp_vsphere_url=url) def test_get_vsphere_url_(self): url = "http://foo/folder/glance/img_uuid?dcPath=dc1&dsName=ds1" self._test_get_vsphere_url(url) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url') @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def _test_create_virtual_disk_from_preallocated_image( self, vops, copy_image, get_vsphere_url, flat_extent_path, generate_uuid, get_temp_image_folder, copy_temp_virtual_disk, vsphere_url=None): dc_ref = vmware_fake.ManagedObjectReference(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) uuid = mock.sentinel.uuid generate_uuid.return_value = uuid path = mock.Mock() dest_path = mock.Mock() flat_extent_path.side_effect = [path, dest_path] get_vsphere_url.return_value = vsphere_url context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dest_dc_ref = \ vmware_fake.ManagedObjectReference(value=mock.sentinel.dest_dc_ref) dest_ds_name = mock.sentinel.dest_ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type ret = self._driver._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) exp_flat_extent_path_calls = [ mock.call(ds_name, folder_path, uuid), mock.call(dest_ds_name, dest_folder_path, dest_disk_name)] self.assertEqual(exp_flat_extent_path_calls, flat_extent_path.call_args_list) create_descriptor = vops.create_flat_extent_virtual_disk_descriptor create_descriptor.assert_called_once_with( dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) get_vsphere_url.assert_called_once_with( context, image_service, image_id) if vsphere_url: vops.copy_datastore_file.assert_called_once_with( vsphere_url, dc_ref, path.get_flat_extent_ds_file_path()) else: copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) copy_temp_virtual_disk.assert_called_once_with(dc_ref, path, dest_dc_ref, dest_path) self.assertEqual(dest_path, ret) def test_create_virtual_disk_from_preallocated_image(self): self._test_create_virtual_disk_from_preallocated_image() def test_create_virtual_disk_from_preallocated_image_on_vsphere(self): self._test_create_virtual_disk_from_preallocated_image( vsphere_url=mock.sentinel.vsphere_url) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url', return_value=None) @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy( self, vops, copy_image, get_vsphere_url, flat_extent_path, get_temp_image_folder, copy_temp_virtual_disk): dc_ref = vmware_fake.ManagedObjectReference(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) path = mock.Mock() flat_extent_path.return_value = path context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dc_ref_value = mock.sentinel.dc_ref dest_dc_ref = vmware_fake.ManagedObjectReference(value=dc_ref_value) dest_ds_name = ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type ret = self._driver._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) flat_extent_path.assert_called_once_with( dest_ds_name, dest_folder_path, dest_disk_name) create_descriptor = vops.create_flat_extent_virtual_disk_descriptor create_descriptor.assert_called_once_with( dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) self.assertFalse(copy_temp_virtual_disk.called) self.assertEqual(path, ret) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url', return_value=None) @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_virtual_disk_from_preallocated_image_with_copy_error( self, vops, copy_image, get_vsphere_url, flat_extent_path, generate_uuid, get_temp_image_folder, copy_temp_virtual_disk): dc_ref = vmware_fake.ManagedObjectReference(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) uuid = mock.sentinel.uuid generate_uuid.return_value = uuid path = mock.Mock() dest_path = mock.Mock() flat_extent_path.side_effect = [path, dest_path] copy_image.side_effect = exceptions.VimException("error") context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dest_dc_ref = \ vmware_fake.ManagedObjectReference(value=mock.sentinel.dest_dc_ref) dest_ds_name = mock.sentinel.dest_ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type self.assertRaises( exceptions.VimException, self._driver._create_virtual_disk_from_preallocated_image, context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) vops.delete_file.assert_called_once_with( path.get_descriptor_ds_file_path(), dc_ref) self.assertFalse(copy_temp_virtual_disk.called) @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.' 'MonolithicSparseVirtualDiskPath') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url') @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def _test_create_virtual_disk_from_sparse_image( self, vops, copy_image, get_vsphere_url, copy_temp_virtual_disk, flat_extent_path, sparse_path, generate_uuid, vsphere_url=None): uuid = mock.sentinel.uuid generate_uuid.return_value = uuid src_path = mock.Mock() sparse_path.return_value = src_path dest_path = mock.Mock() flat_extent_path.return_value = dest_path get_vsphere_url.return_value = vsphere_url context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dc_ref = mock.sentinel.dc_ref ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path disk_name = mock.sentinel.disk_name ret = self._driver._create_virtual_disk_from_sparse_image( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) sparse_path.assert_called_once_with(ds_name, folder_path, uuid) get_vsphere_url.assert_called_once_with( context, image_service, image_id) if vsphere_url: vops.copy_datastore_file.assert_called_once_with( vsphere_url, dc_ref, src_path.get_descriptor_ds_file_path()) else: copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, src_path.get_descriptor_file_path()) flat_extent_path.assert_called_once_with( ds_name, folder_path, disk_name) copy_temp_virtual_disk.assert_called_once_with( dc_ref, src_path, dc_ref, dest_path) self.assertEqual(dest_path, ret) def test_create_virtual_disk_from_sparse_image(self): self._test_create_virtual_disk_from_sparse_image() def test_create_virtual_disk_from_sparse_image_on_vsphere(self): self._test_create_virtual_disk_from_sparse_image( vsphere_url=mock.sentinel.vsphere_url) @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_temp_image_folder(self, vops, select_datastore): host = mock.sentinel.host resource_pool = mock.sentinel.rp summary = mock.Mock() ds_name = mock.sentinel.ds_name summary.name = ds_name select_datastore.return_value = (host, resource_pool, summary) dc = mock.sentinel.dc vops.get_dc.return_value = dc image_size = 2 * units.Gi ret = self._driver._get_temp_image_folder(image_size) self.assertEqual((dc, ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH), ret) exp_req = { hub.DatastoreSelector.SIZE_BYTES: image_size, hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE: {hub.DatastoreType.VMFS, hub.DatastoreType.NFS, hub.DatastoreType.NFS41}} select_datastore.assert_called_once_with(exp_req) vops.create_datastore_folder.assert_called_once_with( ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH, dc) @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_extra_config') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch.object(image_transfer, 'download_stream_optimized_image') def _test_copy_image_to_volume_stream_optimized(self, download_image, session, vops, get_extra_config, get_disk_type, get_profile_id, select_ds_for_volume, download_error=False): host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder # NOTE(mriedem): The summary.name gets logged so it has to be a string summary = mock.Mock(name=six.text_type(mock.sentinel.ds_name)) select_ds_for_volume.return_value = (host, rp, folder, summary) profile_id = mock.sentinel.profile_id get_profile_id.return_value = profile_id disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type extra_config = mock.sentinel.extra_config get_extra_config.return_value = extra_config vm_create_spec = mock.sentinel.vm_create_spec vops.get_create_spec.return_value = vm_create_spec import_spec = mock.Mock() session.vim.client.factory.create.return_value = import_spec backing = mock.sentinel.backing if download_error: download_image.side_effect = exceptions.VimException vops.get_backing.return_value = backing else: download_image.return_value = backing context = mock.sentinel.context volume = self._create_volume_dict(size=3) image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size = 2 * units.Gi adapter_type = mock.sentinel.adapter_type if download_error: self.assertRaises( exceptions.VimException, self._driver._fetch_stream_optimized_image, context, volume, image_service, image_id, image_size, adapter_type) else: self._driver._fetch_stream_optimized_image( context, volume, image_service, image_id, image_size, adapter_type) select_ds_for_volume.assert_called_once_with(volume) vops.get_create_spec.assert_called_once_with( volume['name'], 0, disk_type, summary.name, profile_id=profile_id, adapter_type=adapter_type, extra_config=extra_config) self.assertEqual(vm_create_spec, import_spec.configSpec) download_image.assert_called_with( context, self._config.vmware_image_transfer_timeout_secs, image_service, image_id, session=session, host=self._config.vmware_host_ip, port=self._config.vmware_host_port, resource_pool=rp, vm_folder=folder, vm_import_spec=import_spec, image_size=image_size, http_method='POST') if download_error: self.assertFalse(vops.update_backing_disk_uuid.called) vops.delete_backing.assert_called_once_with(backing) else: vops.update_backing_disk_uuid.assert_called_once_with( backing, volume['id']) def test_copy_image_to_volume_stream_optimized(self): self._test_copy_image_to_volume_stream_optimized() def test_copy_image_to_volume_stream_optimized_with_download_error(self): self._test_copy_image_to_volume_stream_optimized(download_error=True) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) def test_copy_volume_to_image_when_attached(self, in_use): volume = self._create_volume_dict( status="uploading", attachment=[mock.sentinel.attachment_1]) self.assertRaises( cinder_exceptions.InvalidVolume, self._driver.copy_volume_to_image, mock.sentinel.context, volume, mock.sentinel.image_service, mock.sentinel.image_meta) in_use.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, '_validate_disk_format') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch('oslo_vmware.image_transfer.upload_image') @mock.patch.object(VMDK_DRIVER, 'session') def _test_copy_volume_to_image( self, session, upload_image, create_backing, vops, validate_disk_format, backing_exists=True): backing = mock.sentinel.backing if backing_exists: vops.get_backing.return_value = backing else: vops.get_backing.return_value = None create_backing.return_value = backing vmdk_file_path = mock.sentinel.vmdk_file_path vops.get_vmdk_path.return_value = vmdk_file_path context = mock.sentinel.context volume = test_utils.create_volume( self._context, volume_type_id = fake_constants.VOLUME_TYPE_ID, updated_at = self.updated_at) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type(self._context.elevated(), id=fake_constants.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) image_service = mock.sentinel.image_service image_meta = self._create_image_meta() self._driver.copy_volume_to_image( context, volume, image_service, image_meta) validate_disk_format.assert_called_once_with(image_meta['disk_format']) vops.get_backing.assert_called_once_with(volume['name'], volume['id']) if not backing_exists: create_backing.assert_called_once_with(volume) vops.get_vmdk_path.assert_called_once_with(backing) upload_image.assert_called_once_with( context, self._config.vmware_image_transfer_timeout_secs, image_service, image_meta['id'], volume['project_id'], session=session, host=self._config.vmware_host_ip, port=self._config.vmware_host_port, store_id='fake-store', base_image_ref=None, vm=backing, vmdk_file_path=vmdk_file_path, vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], image_version=1) def test_copy_volume_to_image(self): self._test_copy_volume_to_image() def test_copy_volume_to_image_with_no_backing(self): self._test_copy_volume_to_image(backing_exists=False) def test_in_use(self): volume = self._create_volume_dict( attachment=[mock.sentinel.attachment_1]) self.assertTrue(self._driver._in_use(volume)) def
= params['data'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml']) # Authentication setting auth_settings = ['access_token'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProductMaterial', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), collection_formats=collection_formats) def product_sizes_id_materials_rel_fk_delete(self, id, fk, **kwargs): """ Remove the materials relation to an item by id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.product_sizes_id_materials_rel_fk_delete(id, fk, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str id: ProductSize id (required) :param str fk: Foreign key for materials (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.product_sizes_id_materials_rel_fk_delete_with_http_info(id, fk, **kwargs) else: (data) = self.product_sizes_id_materials_rel_fk_delete_with_http_info(id, fk, **kwargs) return data def product_sizes_id_materials_rel_fk_delete_with_http_info(self, id, fk, **kwargs): """ Remove the materials relation to an item by id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.product_sizes_id_materials_rel_fk_delete_with_http_info(id, fk, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str id: ProductSize id (required) :param str fk: Foreign key for materials (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'fk'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method product_sizes_id_materials_rel_fk_delete" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `product_sizes_id_materials_rel_fk_delete`") # verify the required parameter 'fk' is set if ('fk' not in params) or (params['fk'] is None): raise ValueError("Missing the required parameter `fk` when calling `product_sizes_id_materials_rel_fk_delete`") collection_formats = {} resource_path = '/ProductSizes/{id}/materials/rel/{fk}'.replace('{format}', 'json') path_params = {} if 'id' in params: path_params['id'] = params['id'] if 'fk' in params: path_params['fk'] = params['fk'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml']) # Authentication setting auth_settings = ['access_token'] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), collection_formats=collection_formats) def product_sizes_id_materials_rel_fk_head(self, id, fk, **kwargs): """ Check the existence of materials relation to an item by id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.product_sizes_id_materials_rel_fk_head(id, fk, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str id: ProductSize id (required) :param str fk: Foreign key for materials (required) :return: bool If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.product_sizes_id_materials_rel_fk_head_with_http_info(id, fk, **kwargs) else: (data) = self.product_sizes_id_materials_rel_fk_head_with_http_info(id, fk, **kwargs) return data def product_sizes_id_materials_rel_fk_head_with_http_info(self, id, fk, **kwargs): """ Check the existence of materials relation to an item by id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.product_sizes_id_materials_rel_fk_head_with_http_info(id, fk, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str id: ProductSize id (required) :param str fk: Foreign key for materials (required) :return: bool If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'fk'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method product_sizes_id_materials_rel_fk_head" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `product_sizes_id_materials_rel_fk_head`") # verify the required parameter 'fk' is set if ('fk' not in params) or (params['fk'] is None): raise ValueError("Missing the required parameter `fk` when calling `product_sizes_id_materials_rel_fk_head`") collection_formats = {} resource_path = '/ProductSizes/{id}/materials/rel/{fk}'.replace('{format}', 'json') path_params = {} if 'id' in params: path_params['id'] = params['id'] if 'fk' in params: path_params['fk'] = params['fk'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml']) # Authentication setting auth_settings = ['access_token'] return self.api_client.call_api(resource_path, 'HEAD', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='bool', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), collection_formats=collection_formats) def product_sizes_id_materials_rel_fk_put(self, id, fk, **kwargs): """ Add a related item by id for materials. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.product_sizes_id_materials_rel_fk_put(id, fk, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str id: ProductSize id (required) :param str fk: Foreign key for materials (required) :param ProductSizeMaterial data: :return: ProductSizeMaterial If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.product_sizes_id_materials_rel_fk_put_with_http_info(id, fk, **kwargs) else: (data) = self.product_sizes_id_materials_rel_fk_put_with_http_info(id, fk, **kwargs) return data def product_sizes_id_materials_rel_fk_put_with_http_info(self, id, fk, **kwargs): """ Add a related item by id for materials. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.product_sizes_id_materials_rel_fk_put_with_http_info(id, fk, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str id: ProductSize id (required) :param str fk: Foreign key for materials (required) :param ProductSizeMaterial data: :return: ProductSizeMaterial If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'fk', 'data'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method product_sizes_id_materials_rel_fk_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `product_sizes_id_materials_rel_fk_put`") # verify the required parameter 'fk' is set if ('fk' not in params) or (params['fk'] is None): raise ValueError("Missing the required parameter `fk` when calling `product_sizes_id_materials_rel_fk_put`") collection_formats = {} resource_path = '/ProductSizes/{id}/materials/rel/{fk}'.replace('{format}', 'json') path_params = {} if 'id' in params: path_params['id'] = params['id'] if 'fk' in params: path_params['fk'] = params['fk'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'data' in params: body_params = params['data'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml']) # Authentication setting auth_settings = ['access_token'] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProductSizeMaterial', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), collection_formats=collection_formats) def product_sizes_id_patch(self, id, **kwargs): """ Patch attributes for a model instance and persist it into the data source. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.product_sizes_id_patch(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str id: ProductSize id (required) :param ProductSize
h1 = np.histogram(dmat, bins=range(0, int(np.amin(im.shape)/2), spacing)) dmat = dmat[:, hits] h2 = np.histogram(dmat, bins=h1[1]) tpcf = namedtuple('two_point_correlation_function', ('distance', 'probability')) return tpcf(h2[1][:-1], h2[0]/h1[0]) def _radial_profile(autocorr, r_max, nbins=100): r""" Helper functions to calculate the radial profile of the autocorrelation Masks the image in radial segments from the center and averages the values The distance values are normalized and 100 bins are used as default. Parameters ---------- autocorr : ND-array The image of autocorrelation produced by FFT r_max : int or float The maximum radius in pixels to sum the image over Returns ------- result : named_tuple A named tupling containing an array of ``bins`` of radial position and an array of ``counts`` in each bin. """ if len(autocorr.shape) == 2: adj = np.reshape(autocorr.shape, [2, 1, 1]) inds = np.indices(autocorr.shape) - adj/2 dt = np.sqrt(inds[0]**2 + inds[1]**2) elif len(autocorr.shape) == 3: adj = np.reshape(autocorr.shape, [3, 1, 1, 1]) inds = np.indices(autocorr.shape) - adj/2 dt = np.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2) else: raise Exception('Image dimensions must be 2 or 3') bin_size = np.int(np.ceil(r_max/nbins)) bins = np.arange(bin_size, r_max, step=bin_size) radial_sum = np.zeros_like(bins) for i, r in enumerate(bins): # Generate Radial Mask from dt using bins mask = (dt <= r) * (dt > (r-bin_size)) radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask) # Return normalized bin and radially summed autoc norm_autoc_radial = radial_sum/np.max(autocorr) tpcf = namedtuple('two_point_correlation_function', ('distance', 'probability')) return tpcf(bins, norm_autoc_radial) def two_point_correlation_fft(im): r""" Calculates the two-point correlation function using fourier transforms Parameters ---------- im : ND-array The image of the void space on which the 2-point correlation is desired Returns ------- result : named_tuple A tuple containing the x and y data for plotting the two-point correlation function, using the *args feature of matplotlib's plot function. The x array is the distances between points and the y array is corresponding probabilities that points of a given distance both lie in the void space. Notes ----- The fourier transform approach utilizes the fact that the autocorrelation function is the inverse FT of the power spectrum density. For background read the Scipy fftpack docs and for a good explanation see: http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf """ # Calculate half lengths of the image hls = (np.ceil(np.shape(im))/2).astype(int) # Fourier Transform and shift image F = sp_ft.ifftshift(sp_ft.fftn(sp_ft.fftshift(im))) # Compute Power Spectrum P = np.absolute(F**2) # Auto-correlation is inverse of Power Spectrum autoc = np.absolute(sp_ft.ifftshift(sp_ft.ifftn(sp_ft.fftshift(P)))) tpcf = _radial_profile(autoc, r_max=np.min(hls)) return tpcf def pore_size_distribution(im, bins=10, log=True, voxel_size=1): r""" Calculate a pore-size distribution based on the image produced by the ``porosimetry`` or ``local_thickness`` functions. Parameters ---------- im : ND-array The array of containing the sizes of the largest sphere that overlaps each voxel. Obtained from either ``porosimetry`` or ``local_thickness``. bins : scalar or array_like Either an array of bin sizes to use, or the number of bins that should be automatically generated that span the data range. log : boolean If ``True`` (default) the size data is converted to log (base-10) values before processing. This can help to plot wide size distributions or to better visualize the in the small size region. Note that you can anti-log the radii values in the retunred ``tuple``, but the binning is performed on the logged radii values. voxel_size : scalar The size of a voxel side in preferred units. The default is 1, so the user can apply the scaling to the returned results after the fact. Returns ------- result : named_tuple A named-tuple containing several values: *R* or *logR* - radius, equivalent to ``bin_centers`` *pdf* - probability density function *cdf* - cumulative density function *satn* - phase saturation in differential form. For the cumulative saturation, just use *cfd* which is already normalized to 1. *bin_centers* - the center point of each bin *bin_edges* - locations of bin divisions, including 1 more value than the number of bins *bin_widths* - useful for passing to the ``width`` argument of ``matplotlib.pyplot.bar`` Notes ----- (1) To ensure the returned values represent actual sizes you can manually scale the input image by the voxel size first (``im *= voxel_size``) plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k') """ im = im.flatten() vals = im[im > 0]*voxel_size if log: vals = np.log10(vals) h = _parse_histogram(np.histogram(vals, bins=bins, density=True)) psd = namedtuple('pore_size_distribution', (log*'log' + 'R', 'pdf', 'cdf', 'satn', 'bin_centers', 'bin_edges', 'bin_widths')) return psd(h.bin_centers, h.pdf, h.cdf, h.relfreq, h.bin_centers, h.bin_edges, h.bin_widths) def _parse_histogram(h, voxel_size=1): delta_x = h[1] P = h[0] temp = P*(delta_x[1:] - delta_x[:-1]) C = np.cumsum(temp[-1::-1])[-1::-1] S = P*(delta_x[1:] - delta_x[:-1]) bin_edges = delta_x * voxel_size bin_widths = (delta_x[1:] - delta_x[:-1]) * voxel_size bin_centers = ((delta_x[1:] + delta_x[:-1])/2) * voxel_size psd = namedtuple('histogram', ('pdf', 'cdf', 'relfreq', 'bin_centers', 'bin_edges', 'bin_widths')) return psd(P, C, S, bin_centers, bin_edges, bin_widths) def chord_counts(im): r""" Finds the length of each chord in the supplied image and returns a list of their individual sizes Parameters ---------- im : ND-array An image containing chords drawn in the void space. Returns ------- result : 1D-array A 1D array with one element for each chord, containing its length. Notes ---- The returned array can be passed to ``plt.hist`` to plot the histogram, or to ``np.histogram`` to get the histogram data directly. Another useful function is ``np.bincount`` which gives the number of chords of each length in a format suitable for ``plt.plot``. """ labels, N = spim.label(im > 0) props = regionprops(labels, coordinates='xy') chord_lens = np.array([i.filled_area for i in props]) return chord_lens def linear_density(im, bins=25, voxel_size=1, log=False): r""" Determines the probability that a point lies within a certain distance of the opposite phase *along a specified direction* This relates directly the radial density function defined by Torquato [1], but instead of reporting the probability of lying within a stated distance to the nearest solid in any direciton, it considers only linear distances along orthogonal directions.The benefit of this is that anisotropy can be detected in materials by performing the analysis in multiple orthogonal directions. Parameters ---------- im : ND-array An image with each voxel containing the distance to the nearest solid along a linear path, as produced by ``distance_transform_lin``. bins : int or array_like The number of bins or a list of specific bins to use voxel_size : scalar The side length of a voxel. This is used to scale the chord lengths into real units. Note this is applied *after* the binning, so ``bins``, if supplied, should be in terms of voxels, not length units. Returns ------- result : named_tuple References ---------- [1] Torquato, S. Random Heterogeneous Materials: Mircostructure and Macroscopic Properties. Springer, New York (2002) """ x = im[im > 0] h = list(np.histogram(x, bins=bins, density=True)) h = _parse_histogram(h=h, voxel_size=voxel_size) cld = namedtuple('linear_density_function', ('L', 'pdf', 'cdf', 'relfreq', 'bin_centers', 'bin_edges', 'bin_widths')) return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq, h.bin_centers, h.bin_edges, h.bin_widths) def chord_length_distribution(im, bins=None, log=False, voxel_size=1, normalization='count'): r""" Determines the distribution of chord lengths in an image containing chords. Parameters ---------- im : ND-image An image with chords drawn in the pore space, as produced by ``apply_chords`` or ``apply_chords_3d``. ``im`` can be either boolean, in which case each chord will be identified using ``scipy.ndimage.label``, or numerical values in which case it is assumed that chords have already been identifed and labeled. In both cases, the size of each chord will be computed as the number of voxels belonging to each labelled region. bins : scalar or array_like If a scalar is given it is interpreted as the number of bins to use, and if an array is given they are used as the bins directly. log : Boolean If true, the logarithm of the chord lengths will be used, which can make the data more clear. normalization : string Indicates how to normalize the bin heights. Options are: *'count' or 'number'* - (default) This simply counts the number of chords in each bin in the normal sense of a histogram. This is the rigorous definition according to Torquato [1]. *'length'* - This multiplies the number of chords in each bin by the chord
<reponame>laidbackware/ansible-for-nsxt<filename>library/nsxt_services.py #!/usr/bin/env python # # Copyright 2018 VMware, Inc. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_services short_description: 'Create a Service' description: "Creates a new Service. Required parameters are display_name, ports and state. Display_name is required to make module idempotent" version_added: '2.7' author: '<NAME>' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str description: description: Description of this resource. required: False type: str display_name: description: 'Display name' required: true type: str nsservice_element: description: "Custom services should conform to ALGTypeNSService, ICMPTypeNSService, IGMPTypeNSService, IPProtocolNSService or L4PortSetNSService schemas." required: True type: 'dict' alg: choices: ['ORACLE_TNS', 'FTP', 'SUN_RPC_TCP', 'SUN_RPC_UDP', 'MS_RPC_TCP', 'MS_RPC_UDP', 'NBNS_BROADCAST', 'NBDG_BROADCAST', 'TFTP'] description: 'The Application Layer Gateway (ALG) protocol. Consult the documentation for edge rules as not all protocols are supported on edge firewalls.' required: False type: str destination_ports: description: List of ports as integers. Max 15 required: False type: list icmp_code: description: ICMP message code required: False type: int icmp_type: description: ICMP message type required: False type: int l4_protocol: protocol: choices: ['ICMPv4', 'ICMPv6'] description: ICMP protocol type required: False type: list protocol_number: description: The IP protocol number required: False type: int resource_type: choices: ['ALGTypeNSService', 'IPProtocolNSService', 'L4PortSetNSService', 'ICMPTypeNSService', 'IGMPTypeNSService'] description: Type of service. required: False type: list source_ports: description: List of ports as integers. Max 15 required: False type: list state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true tags: description: 'Opaque identifiers meaningful to the API user. Max 30 items' required: false type: list scope: description: 'Tag scope. Tag searches may optionally be restricted by scope. Max len 128 charactors.' required: true type: str tag: description: ' Tag value. Identifier meaningful to user. Max len 128 charactors.' required: true type: str ''' EXAMPLES = ''' - name: Create ip set nsxt_services: hostname: "10.192.167.137" username: "admin" password: "<PASSWORD>" validate_certs: False description: "HTTPS Alt port example" display_name: HTTPS-ALT nsservice_element: destination_ports: - '8443' l4_protocol: TCP resource_type: L4PortSetNSService state: "present" tags: - scope: exmaple tag: https_alt ''' RETURN = '''# ''' import json, time, copy from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_service_params(args=None): '''Strip args from pararms that don't get passed within the JSON''' args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_all_request(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint): '''Handle the API service respondign with a cursor and make subsequent request.''' try: output_list = [] cursor = '' while True: (rc, resp) = request(manager_url + endpoint + cursor, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if resp['results']: output_list += resp['results'] if resp.__contains__('cursor'): cursor = '?cursor=' + resp['cursor'] else: break return output_list except Exception as err: module.fail_json(msg='Error accessing endpoint %s. \nError [%s]' % (endpoint, to_native(err))) def get_service_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): '''Retrn service dict from display name if display name is unique''' services = get_all_request(module, manager_url, mgr_username, mgr_password, validate_certs, '/ns-services') return_service = None for service in services: if service.__contains__('display_name') and service['display_name'] == display_name: if not return_service: # Handle there being 2 sections created with the same display name return_service = service else: module.fail_json(msg='Section with display name %s exists twice.' % (display_name)) return return_service def flatten_list_to_string(nsservice_element): '''Flatten sorted list of ports to a string to a concatenated string allow comparison''' for list_type in ['sourse_ports', 'destination_ports']: if nsservice_element.__contains__(list_type): ports_string = '' for port in sorted(nsservice_element[list_type]): ports_string += str(port) + ',' nsservice_element[list_type] = ports_string def convert_tag_dict_to_string(tag_list): '''Convert list of tag dicts to a list of strings''' existing_tag_strings = [] for tag in tag_list: tag_string = '' for key in ['tag', 'scope']: if tag.__contains__(key) and tag[key] != '': tag_string += key + tag[key] existing_tag_strings.append(tag_string) return existing_tag_strings def compare_tags(module, existing_tags, new_tags): '''Compare tags as lists of strings to check for differences''' if len(existing_tags) != len(new_tags): return True # Convert tag dictionaries to strings to allow list compare and account of empty values in either element. existing_tag_strings = convert_tag_dict_to_string(existing_tags) new_tag_string = convert_tag_dict_to_string(new_tags) if existing_tag_strings != new_tag_string: return True return False def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, service_params): '''Check if any element of a section has changed''' existing_service = get_service_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, service_params['display_name']) if existing_service is None: return False # service_params must be deep copied otherwise pop removes globally. copy_service_params = copy.deepcopy(service_params) copy_nsservice_element = copy_service_params.pop('nsservice_element', []) existing_nsservice_element = existing_service.pop('nsservice_element', None) copy_tags = copy_service_params.pop('tags', []) existing_tags = existing_service.pop('tags', []) # Flatten list of ports for ALGTypeNSService and L4PortSetNSService so that can be easily compared flatten_list_to_string(copy_nsservice_element) flatten_list_to_string(existing_nsservice_element) # Check to ensure that all keys and values in the base of the params match the existing configuration. if not all(k in existing_service and copy_service_params[k] == existing_service[k] for k in copy_service_params): return True # Check to ensure that all keys and values nsservice_element match the existing configration. if not all(k in existing_nsservice_element and copy_nsservice_element[k] == existing_nsservice_element[k] for k in copy_nsservice_element): return True # Compare list of tags as strings if compare_tags(module, existing_tags, copy_tags): return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=False, type='str'), nsservice_element=dict(required=True, type='dict', alg=dict(required=False, type='str', choices=['ORACLE_TNS', 'FTP', 'SUN_RPC_TCP', 'SUN_RPC_UDP', 'MS_RPC_TCP', 'MS_RPC_UDP', 'NBNS_BROADCAST', 'NBDG_BROADCAST', 'TFTP']), destination_ports=dict(required=False, type='list'), icmp_code=dict(required=False, type='int'), icmp_type=dict(required=False, type='int'), l4_protocol=dict(required=False, type='str'), protocol=dict(required=False, type='str', choices=['ICMPv4', 'ICMPv6']), protocol_number=dict(required=False, type='int'), resource_type=dict(required=True, type='str', choices=['ALGTypeNSService', 'IPProtocolNSService', 'L4PortSetNSService', 'ICMPTypeNSService', 'IGMPTypeNSService']), source_ports=dict(required=False, type='list')), resource_type=dict(required=False, type='str', default='NSService'), state=dict(required=True, choices=['present', 'absent']), tags=dict(required=False, type='list', tag=dict(required=False, type='str'), scope=dict(required=False, type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) service_params = get_service_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) service_dict = get_service_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) service_id, revision = None, None if service_dict: service_id = service_dict['id'] revision = service_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, service_params) if not updated: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(service_params)), id='12345') request_data = json.dumps(service_params) try: if service_id: module.exit_json(changed=False, id=service_id, message="Service with display_name %s already exist." % module.params['display_name']) (rc, resp) = request(manager_url+ '/ns-services', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add service. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Service with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(service_params)), id=service_id) service_params['_revision']=revision # update current revision request_data = json.dumps(service_params) id = service_id try: (rc, resp) = request(manager_url+ '/ns-services/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update service with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Service with id %s updated." % id) elif state == 'absent': # delete the array id = service_id if id is None: module.exit_json(changed=False, msg='No service exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(service_params)), id=id) try: (rc, resp) = request(manager_url + "/ns-services/%s" % id, method='DELETE', url_username=mgr_username, url_password=<PASSWORD>, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete service with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True,
'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')}, '861582981':{'en': 'YanAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')}, '861582982':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')}, '861582983':{'en': 'Weinan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')}, '861582984':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')}, '861582985':{'en': 'Ankang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')}, '861582986':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')}, '861582987':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')}, '861582988':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')}, '861582989':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')}, '861582990':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')}, '861582991':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')}, '861582992':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')}, '861582993':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')}, '861582994':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')}, '861582995':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')}, '861582996':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')}, '861582997':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')}, '861582998':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')}, '861582999':{'en': 'YanAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')}, '86158300':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '86158301':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '86158302':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '86158303':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '861583035':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')}, '861583040':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583041':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583042':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '861583043':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583044':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583045':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583046':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861583047':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583048':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583049':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '86158305':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583059':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '86158306':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583060':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861583061':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861583062':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '86158307':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583070':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583071':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583072':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583079':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '86158308':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583080':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583081':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583082':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '86158309':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583096':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583097':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583098':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583099':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '86158310':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '86158311':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '86158312':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '86158313':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '86158314':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583144':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583147':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '86158315':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583150':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583159':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '86158316':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '86158317':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '86158318':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583180':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583187':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583188':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583189':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '86158319':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583190':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583191':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '86158320':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '86158321':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '86158322':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '86158323':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '861583240':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583241':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583242':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583243':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583244':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583245':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583246':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583247':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583248':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583249':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '86158325':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '86158326':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861583269':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '86158327':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '86158328':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583280':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583288':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583289':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '86158329':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '86158330':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583310':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583311':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583312':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583313':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '861583314':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583315':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583316':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')}, '861583317':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583318':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583319':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583320':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583321':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583322':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583323':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')}, '861583324':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583325':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583326':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')}, '861583327':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583328':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583329':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583330':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583331':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '861583332':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583333':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583334':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583335':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')}, '861583336':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583337':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583338':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583339':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583340':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583341':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583342':{'en': 'Ba<NAME>', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583343':{'en': 'Ba<NAME>', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583344':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583345':{'en': 'Tang<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583346':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583347':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583348':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583349':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '86158335':{'en': 'Tang<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '86158336':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583370':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583371':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583372':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583373':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')}, '861583374':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583375':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583376':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583377':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583378':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583379':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')}, '861583380':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')}, '861583381':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')}, '861583382':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583383':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583384':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')}, '861583385':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583386':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583387':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583388':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '861583389':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')}, '86158339':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')}, '86158340':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '861583408':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861583409':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '86158341':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '861583420':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861583421':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861583422':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583423':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583424':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583425':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861583426':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861583427':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861583428':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861583429':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '86158343':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583430':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861583431':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861583432':{'en': 'Shuozhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '861583433':{'en': 'Jincheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '86158344':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')}, '86158345':{'en': 'Tonghua, Jilin', 'zh': u('\u5409\u6797\u7701\u901a\u5316\u5e02')}, '86158346':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u767d\u57ce\u5e02')}, '86158347':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5ef6\u8fb9\u671d\u9c9c\u65cf\u81ea\u6cbb\u5dde')}, '86158348':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u677e\u539f\u5e02')}, '86158349':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u5ef6\u8fb9\u671d\u9c9c\u65cf\u81ea\u6cbb\u5dde')}, '86158350':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861583502':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583503':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')}, '861583504':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')}, '861583505':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')}, '861583510':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '861583511':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '861583512':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '861583513':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')}, '861583514':{'en': 'Changzhi, Shanxi', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')}, '861583515':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')}, '861583516':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583517':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583518':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583519':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '861583520':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583521':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583522':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583523':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861583524':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583525':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861583526':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861583527':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861583528':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '861583529':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '86158353':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')}, '861583536':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583537':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583538':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583539':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '86158354':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')}, '861583549':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u6714\u5dde\u5e02')}, '86158355':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')}, '861583560':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861583561':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861583562':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861583563':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583564':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583565':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')}, '861583566':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861583567':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')}, '861583568':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '861583569':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')}, '86158357':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')}, '86158358':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')}, '86158359':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')}, '86158360':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')}, '86158361':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')}, '86158362':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')}, '86158363':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')}, '86158364':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')}, '86158365':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')}, '86158366':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')}, '86158367':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')}, '86158368':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')}, '86158369':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')}, '86158370':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')}, '86158371':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')}, '86158372':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')}, '86158373':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u65b0\u4e61\u5e02')}, '86158374':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')}, '86158375':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')}, '86158376':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')}, '86158377':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')}, '86158378':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5f00\u5c01\u5e02')}, '86158379':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')}, '86158380':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')}, '86158381':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')}, '86158382':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')}, '86158383':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')}, '86158384':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')}, '86158385':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')}, '86158386':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')}, '86158387':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')}, '86158388':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')}, '86158389':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')}, '86158390':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')}, '86158391':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')}, '86158392':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')}, '86158393':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')}, '86158394':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')}, '86158395':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')}, '86158396':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')}, '86158397':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4fe1\u9633\u5e02')}, '86158398':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4e09\u95e8\u5ce1\u5e02')}, '86158399':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')}, '8615840':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')}, '86158406':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')}, '86158407':{'en': '<NAME>', 'zh':
= False break confirm = validate_entry() if edit is False: break else: self.header_names.append(col_name) col_n += 1 print('') print('Column Headers:', self.header_names) enter_continue() confirm = 'n' while confirm == 'n': self.data_row_idx = input("Enter the row index that data begin " "on: ") try: self.data_row_idx = int(self.data_row_idx) if self.data_row_idx < 0: raise ValueError confirm = validate_entry() except ValueError: print('..invalid entry, enter an integer >= 0') print('') def setTimeHeaders(self, print_banner=True, print_statement=None): """Specify the column(s) containing date/timestamp information. Args: print_banner (bool, optional): If ``'True'``, a banner indicating the title of the section, user input options, and notes will be printed to the console. Defaults to True. Returns: None. """ if print_banner: self.printSelectionBanner('Specify Timestamp columns', options=[self.end_str, self.del_str]) # Create a list of time-like columns, update the col_headers list with # the DateTime type corresponding to the specified header name # Enter in the time like columns [LOOP] if print_statement is not None: print(print_statement) end = False i = 1 while end is False: val = input("Enter Timestamp column name #{0}: ".format(str(i))) if val == 'X': end = True elif val == 'D': try: self.timestamp_col_headers.pop(i-2) print('..removing timestamp column #{0} from ' 'list'.format(str(i-1))) i -= 2 print('..updated timestamp column headers list: ') print(' ', self.timestamp_col_headers) except IndexError: print('Empty list, no entries to delete') continue elif val in self.all_col_headers: self.timestamp_col_headers.append(val) self.add_param_attrib(val, attrib_key='header_class', attrib_val='datetime') self.add_param_attrib(val, attrib_key='sdfs_param', attrib_val='DateTime') self.add_param_attrib(val, attrib_key='drop', attrib_val=False) else: print('..Invalid entry. Choose from the following list:') print(' ', self.all_col_headers) continue i += 1 print('\nTimestamp column list:', self.timestamp_col_headers) enter_continue() def setParamHeaders(self, print_banner=True, col_list=None): """Select the SDFS parameters corresponding to column names discovered by ``ParseDataSets()``. A parameter renaming dictionary is created for reassigning the names of header labels. Args: print_banner (bool, optional): If ``'True'``, a banner indicating the title of the section, user input options, and notes will be printed to the console. Defaults to True. Returns: None. """ param_types = {'S': 'The header corresponds to an SDFS Parameter', 'C': 'The header corresponds to an existing custom Parameter', 'N': 'Create a new custom Parameter for the header', '': '(enter key) Skip the current header and drop from SDFS datasets'} pretty_params = pprint.pformat(param_types) if print_banner: #txt = 'Choose from the following list of SDFS parameter names' self.printSelectionBanner('Specify Parameter columns', options=[self.skip_str], #notes=[txt, self.params] ) # drop time-like columns and ask user for SDFS parameter associated # with remaining cols # param_col_list = [param for param in self.all_col_headers # if param not in self.timestamp_col_headers)] if col_list is None: param_col_list = list(set(param for param in self.all_col_headers if param not in self.timestamp_col_headers)) else: param_col_list = list(set(param for param in col_list if param not in self.timestamp_col_headers)) n_params = len(param_col_list) renaming_dict = {} for i, rec_header in enumerate(param_col_list, 1): valid = False while valid is False: print(f'\n[{i}/{n_params}]') print('-----') header_type = input('Enter the character indicating the type of' f' parameter \n{pretty_params}\n\nParameter type for header' f' name "{rec_header}": ') if header_type == 'S': print('SDFS Parameters:') print(self.sdfs_params) set_header = input(f'From the list above, select the SDFS ' f'parameter associated with {rec_header}: ') if set_header in self.sdfs_params: valid = True self.sdfs_header_names.append(set_header) self.sdfs_header_names = list(set(self.sdfs_header_names)) if self.data_type == 'reference': self.setParamMetaCols(rec_header, set_header) unit_transform = self.checkParamUnits(rec_header, set_header) self.add_param_attrib(rec_header, attrib_key='unit_transform', attrib_val=unit_transform) drop = False else: print('..Invalid entry') if header_type == 'C': if self.custom_params != []: set_header = input('Enter custom parameter associated with ' f'{rec_header}: ') print(self.custom_params) if set_header in self.sdfs_params: valid = True drop = False else: print('..Invalid entry') else: print('No custom Parameters previously configured') if header_type == 'N': set_header = input('Enter new custom parameter associated with ' f'{rec_header}: ') response = validate_entry(statement=f'Do you wish to save {set_header} to the catalog of sensortoolkit.Parameter attributes?') if response == 'y': save_param = True else: save_param = False print('') Parameter(set_header, save_custom_param=save_param) valid = True drop = False if header_type == '': valid = True print('..{0} will be dropped'.format(rec_header)) drop = True set_header = '' if header_type not in param_types.keys(): print('..Invalid parameter header type') renaming_dict[rec_header] = set_header self.add_param_attrib(rec_header, attrib_key='header_class', attrib_val='parameter') self.add_param_attrib(rec_header, attrib_key='sdfs_param', attrib_val=set_header) self.add_param_attrib(rec_header, attrib_key='drop', attrib_val=drop) #TODO: Print dictionary with renaming scheme, ask to confirm # add something like following code block, print('') print('Configured renaming scheme:') self.pp.pprint(renaming_dict) enter_continue() def checkParamUnits(self, param, sdfs_param): """Prompt user to indicate whether units for passed parameter are the same as the preset units specified for the corresponding SDFS parameter. Args: param (str): The name of the parameter as logged in recorded datasets. sdfs_param (str): The name of the SDFS parameter corresponding to the recorded parameter. Returns: val (int, float, or Nonetype): A scalar quantity for converting the concentrations from the unit basis in which data were recorded to the unit basis for the SDFS parameter. """ val = None sdfs_param_units = Parameter(sdfs_param).units print('') print(f' Are the units of measure [{sdfs_param_units}] for column header "{param}"?') confirm = validate_entry(indent_statement=2) print('') if confirm == 'n': if param == 'Temp' or 'DP': if sdfs_param_units == '°F': old_temp_basis = '°C' old_unit_name = 'Celsius' new_unit_name = 'Fahrenheit' elif sdfs_param_units == '°C': old_temp_basis = '°F' old_unit_name = 'Fahrenheit' new_unit_name = 'Celsius' print(f' Are the units of measure [{old_temp_basis}] for column header "{param}"?') temp_confirm = validate_entry(indent_statement=2) if temp_confirm == 'y': print('') old_temp_basis = old_temp_basis.replace("°", "").lower() new_temp_basis = sdfs_param_units.replace("°", "").lower() print(f' "{param}" will be converted from {old_unit_name} to {new_unit_name}') val = f'{old_temp_basis}_{new_temp_basis}' else: print('') print(' Temperature must be in either degree Fahrenheit or Celsius') else: val = input(' Enter the scalar quanitity for converting the ' 'recorded measurements to the following unit basis: ' f'{sdfs_param_units}') return val def setDateTimeFormat(self): """Configure the date/time formatting for date/time column(s) specified in ``setTimeHeaders()``. Returns: None. """ cite = ('..format code list: https://docs.python.org/3/library/' 'datetime.html#strftime-and-strptime-format-codes') epoch = ('..If a timestamp column is formatted as the number of ' 'seconds since the Unix epoch (1 Jan. 1970), enter "epoch"') self.printSelectionBanner('Configure Timestamp Column Formatting', options=[epoch, self.skip_str], notes=[cite]) self.time_format_dict = {} for col in self.timestamp_col_headers: # Pass over previously configured timestamp columns (when using # loadPreviousSetup()) for col_idx in self.col_headers.keys(): if col in self.col_headers[col_idx]: col_attribs = self.col_headers[col_idx][col] if 'dt_format' in col_attribs: continue invalid = True while invalid is True: val = input('Enter date/time formatting for "' + col + '": ') if val == '': self.add_param_attrib(col, attrib_key='drop', attrib_val=True) invalid = False continue else: confirm = validate_entry() if confirm == 'y': invalid = False self.time_format_dict[col] = val self.add_param_attrib(col, attrib_key='dt_format', attrib_val=val) print('') print('Configured formatting scheme:') self.pp.pprint(self.time_format_dict) enter_continue() def setTimeZone(self): """Select the time zone associated with the date/time column(s). Timezones should be valid timezone names recognized by the ``pytz`` library. Returns: None. """ self.printSelectionBanner('Specify DateTime Index Time Zone', options=[self.skip_str], notes=['For a list of all time zones, type' ' "pytz.all_timezones"']) for col in self.timestamp_col_headers: # Pass over previously configured timestamp columns (when using # loadPreviousSetup()) for col_idx in self.col_headers.keys(): if col in self.col_headers[col_idx]: col_attribs = self.col_headers[col_idx][col] if ('dt_timezone' in col_attribs): continue invalid = True while invalid is True: val = input('Enter time zone for "' + col + '": ') if val == '': # timezone is unspecified print('..time zone not specified, continuing with tz-naive' ' DateTime index') tzone = None self.time_format_dict[col + '_tz'] = tzone invalid = False continue else: try: tzone = pytz.timezone(val) except UnknownTimeZoneError: print('..invalid time zone') continue confirm = validate_entry() if confirm == 'y': invalid = False self.time_format_dict[col + '_tz'] = tzone.zone self.add_param_attrib(col, attrib_key='dt_timezone', attrib_val=tzone.zone) print('') print('Configured time zone formatting:') self.pp.pprint(self.time_format_dict) enter_continue() def exportSetup(self): """Save the setup configuration to a ``setup.json`` file. Returns: None. """ self.printSelectionBanner('Setup Configuration') self.config_dict = self.__dict__.copy() drop_attribs = ['end_str', 'del_str', 'skip_str', 'header_names', 'timestamp_col_headers', 'time_format_dict', 'all_col_headers'] for attrib in drop_attribs: try: del self.config_dict[attrib] except KeyError: pass if self.data_type == 'sensor': filename = self.name + '_setup.json' sensor_path = os.path.normpath( os.path.join(self.data_rel_path, '..')) outpath = os.path.normpath(os.path.join(self.path, sensor_path)) if self.data_type == 'reference': filename = 'reference_setup.json' outpath = os.path.normpath(os.path.join(self.path, self.data_rel_path)) if not os.path.isdir(outpath): os.makedirs(outpath) self.outpath = os.path.join(outpath, filename) print('') print('..writing setup configuration to the following path:') print(self.outpath) print('') with open(self.outpath, 'w') as outfile: self.config_dict = json.dumps(self.config_dict, indent=4) outfile.write(self.config_dict) class SensorSetup(_Setup): """Interactive class for handling the sensor data ingestion process. Users specify various attributes about sensor
serializers.UnifiedJobListSerializer parent_model = models.Schedule relationship = 'unifiedjob_set' name = _('Schedule Jobs List') class AuthView(APIView): '''List enabled single-sign-on endpoints''' authentication_classes = [] permission_classes = (AllowAny,) swagger_topic = 'System Configuration' def get(self, request): from rest_framework.reverse import reverse data = OrderedDict() err_backend, err_message = request.session.get('social_auth_error', (None, None)) auth_backends = list(load_backends(settings.AUTHENTICATION_BACKENDS, force_load=True).items()) # Return auth backends in consistent order: Google, GitHub, SAML. auth_backends.sort(key=lambda x: 'g' if x[0] == 'google-oauth2' else x[0]) for name, backend in auth_backends: login_url = reverse('social:begin', args=(name,)) complete_url = request.build_absolute_uri(reverse('social:complete', args=(name,))) backend_data = {'login_url': login_url, 'complete_url': complete_url} if name == 'saml': backend_data['metadata_url'] = reverse('sso:saml_metadata') for idp in sorted(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.keys()): saml_backend_data = dict(backend_data.items()) saml_backend_data['login_url'] = '%s?idp=%s' % (login_url, idp) full_backend_name = '%s:%s' % (name, idp) if (err_backend == full_backend_name or err_backend == name) and err_message: saml_backend_data['error'] = err_message data[full_backend_name] = saml_backend_data else: if err_backend == name and err_message: backend_data['error'] = err_message data[name] = backend_data return Response(data) class TeamList(ListCreateAPIView): model = models.Team serializer_class = serializers.TeamSerializer class TeamDetail(RetrieveUpdateDestroyAPIView): model = models.Team serializer_class = serializers.TeamSerializer class TeamUsersList(BaseUsersList): model = models.User serializer_class = serializers.UserSerializer parent_model = models.Team relationship = 'member_role.members' ordering = ('username',) class TeamRolesList(SubListAttachDetachAPIView): model = models.Role serializer_class = serializers.RoleSerializerWithParentAccess metadata_class = RoleMetadata parent_model = models.Team relationship = 'member_role.children' search_fields = ('role_field', 'content_type__model') def get_queryset(self): team = get_object_or_404(models.Team, pk=self.kwargs['pk']) if not self.request.user.can_access(models.Team, 'read', team): raise PermissionDenied() return models.Role.filter_visible_roles(self.request.user, team.member_role.children.all().exclude(pk=team.read_role.pk)) def post(self, request, *args, **kwargs): sub_id = request.data.get('id', None) if not sub_id: return super(TeamRolesList, self).post(request) role = get_object_or_400(models.Role, pk=sub_id) org_content_type = ContentType.objects.get_for_model(models.Organization) if role.content_type == org_content_type and role.role_field in ['member_role', 'admin_role']: data = dict(msg=_("You cannot assign an Organization participation role as a child role for a Team.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) if role.is_singleton(): data = dict(msg=_("You cannot grant system-level permissions to a team.")) return Response(data, status=status.HTTP_400_BAD_REQUEST) team = get_object_or_404(models.Team, pk=self.kwargs['pk']) credential_content_type = ContentType.objects.get_for_model(models.Credential) if role.content_type == credential_content_type: if not role.content_object.organization or role.content_object.organization.id != team.organization.id: data = dict(msg=_("You cannot grant credential access to a team when the Organization field isn't set, or belongs to a different organization")) return Response(data, status=status.HTTP_400_BAD_REQUEST) return super(TeamRolesList, self).post(request, *args, **kwargs) class TeamObjectRolesList(SubListAPIView): model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.Team search_fields = ('role_field', 'content_type__model') def get_queryset(self): po = self.get_parent_object() content_type = ContentType.objects.get_for_model(self.parent_model) return models.Role.objects.filter(content_type=content_type, object_id=po.pk) class TeamProjectsList(SubListAPIView): model = models.Project serializer_class = serializers.ProjectSerializer parent_model = models.Team def get_queryset(self): team = self.get_parent_object() self.check_parent_access(team) model_ct = ContentType.objects.get_for_model(self.model) parent_ct = ContentType.objects.get_for_model(self.parent_model) proj_roles = models.Role.objects.filter(Q(ancestors__content_type=parent_ct) & Q(ancestors__object_id=team.pk), content_type=model_ct) return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in proj_roles]) class TeamActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.Team relationship = 'activitystream_set' search_fields = ('changes',) def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) return qs.filter( Q(team=parent) | Q(project__in=models.Project.accessible_objects(parent, 'read_role')) | Q(credential__in=models.Credential.accessible_objects(parent, 'read_role')) ) class TeamAccessList(ResourceAccessList): model = models.User # needs to be User for AccessLists's parent_model = models.Team class ExecutionEnvironmentList(ListCreateAPIView): always_allow_superuser = False model = models.ExecutionEnvironment serializer_class = serializers.ExecutionEnvironmentSerializer swagger_topic = "Execution Environments" class ExecutionEnvironmentDetail(RetrieveUpdateDestroyAPIView): always_allow_superuser = False model = models.ExecutionEnvironment serializer_class = serializers.ExecutionEnvironmentSerializer swagger_topic = "Execution Environments" class ExecutionEnvironmentJobTemplateList(SubListAPIView): model = models.UnifiedJobTemplate serializer_class = serializers.UnifiedJobTemplateSerializer parent_model = models.ExecutionEnvironment relationship = 'unifiedjobtemplates' class ExecutionEnvironmentCopy(CopyAPIView): model = models.ExecutionEnvironment copy_return_serializer_class = serializers.ExecutionEnvironmentSerializer class ExecutionEnvironmentActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.ExecutionEnvironment relationship = 'activitystream_set' search_fields = ('changes',) def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) return qs.filter(execution_environment=parent) class ProjectList(ListCreateAPIView): model = models.Project serializer_class = serializers.ProjectSerializer class ProjectDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView): model = models.Project serializer_class = serializers.ProjectSerializer class ProjectPlaybooks(RetrieveAPIView): model = models.Project serializer_class = serializers.ProjectPlaybooksSerializer class ProjectInventories(RetrieveAPIView): model = models.Project serializer_class = serializers.ProjectInventoriesSerializer class ProjectTeamsList(ListAPIView): model = models.Team serializer_class = serializers.TeamSerializer def get_queryset(self): p = get_object_or_404(models.Project, pk=self.kwargs['pk']) if not self.request.user.can_access(models.Project, 'read', p): raise PermissionDenied() project_ct = ContentType.objects.get_for_model(models.Project) team_ct = ContentType.objects.get_for_model(self.model) all_roles = models.Role.objects.filter(Q(descendents__content_type=project_ct) & Q(descendents__object_id=p.pk), content_type=team_ct) return self.model.accessible_objects(self.request.user, 'read_role').filter(pk__in=[t.content_object.pk for t in all_roles]) class ProjectSchedulesList(SubListCreateAPIView): name = _("Project Schedules") model = models.Schedule serializer_class = serializers.ScheduleSerializer parent_model = models.Project relationship = 'schedules' parent_key = 'unified_job_template' class ProjectScmInventorySources(SubListAPIView): name = _("Project SCM Inventory Sources") model = models.InventorySource serializer_class = serializers.InventorySourceSerializer parent_model = models.Project relationship = 'scm_inventory_sources' parent_key = 'source_project' class ProjectActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.Project relationship = 'activitystream_set' search_fields = ('changes',) def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) if parent is None: return qs elif parent.credential is None: return qs.filter(project=parent) return qs.filter(Q(project=parent) | Q(credential=parent.credential)) class ProjectNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): model = models.NotificationTemplate serializer_class = serializers.NotificationTemplateSerializer parent_model = models.Project class ProjectNotificationTemplatesStartedList(ProjectNotificationTemplatesAnyList): relationship = 'notification_templates_started' class ProjectNotificationTemplatesErrorList(ProjectNotificationTemplatesAnyList): relationship = 'notification_templates_error' class ProjectNotificationTemplatesSuccessList(ProjectNotificationTemplatesAnyList): relationship = 'notification_templates_success' class ProjectUpdatesList(SubListAPIView): model = models.ProjectUpdate serializer_class = serializers.ProjectUpdateListSerializer parent_model = models.Project relationship = 'project_updates' class ProjectUpdateView(RetrieveAPIView): model = models.Project serializer_class = serializers.ProjectUpdateViewSerializer permission_classes = (ProjectUpdatePermission,) def post(self, request, *args, **kwargs): obj = self.get_object() if obj.can_update: project_update = obj.update() if not project_update: return Response({}, status=status.HTTP_400_BAD_REQUEST) else: data = OrderedDict() data['project_update'] = project_update.id data.update(serializers.ProjectUpdateSerializer(project_update, context=self.get_serializer_context()).to_representation(project_update)) headers = {'Location': project_update.get_absolute_url(request=request)} return Response(data, headers=headers, status=status.HTTP_202_ACCEPTED) else: return self.http_method_not_allowed(request, *args, **kwargs) class ProjectUpdateList(ListAPIView): model = models.ProjectUpdate serializer_class = serializers.ProjectUpdateListSerializer class ProjectUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView): model = models.ProjectUpdate serializer_class = serializers.ProjectUpdateDetailSerializer class ProjectUpdateEventsList(SubListAPIView): model = models.ProjectUpdateEvent serializer_class = serializers.ProjectUpdateEventSerializer parent_model = models.ProjectUpdate relationship = 'project_update_events' name = _('Project Update Events List') search_fields = ('stdout',) def finalize_response(self, request, response, *args, **kwargs): response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS return super(ProjectUpdateEventsList, self).finalize_response(request, response, *args, **kwargs) class SystemJobEventsList(SubListAPIView): model = models.SystemJobEvent serializer_class = serializers.SystemJobEventSerializer parent_model = models.SystemJob relationship = 'system_job_events' name = _('System Job Events List') search_fields = ('stdout',) def finalize_response(self, request, response, *args, **kwargs): response['X-UI-Max-Events'] = settings.MAX_UI_JOB_EVENTS return super(SystemJobEventsList, self).finalize_response(request, response, *args, **kwargs) class ProjectUpdateCancel(RetrieveAPIView): model = models.ProjectUpdate obj_permission_type = 'cancel' serializer_class = serializers.ProjectUpdateCancelSerializer def post(self, request, *args, **kwargs): obj = self.get_object() if obj.can_cancel: obj.cancel() return Response(status=status.HTTP_202_ACCEPTED) else: return self.http_method_not_allowed(request, *args, **kwargs) class ProjectUpdateNotificationsList(SubListAPIView): model = models.Notification serializer_class = serializers.NotificationSerializer parent_model = models.ProjectUpdate relationship = 'notifications' search_fields = ('subject', 'notification_type', 'body') class ProjectUpdateScmInventoryUpdates(SubListAPIView): name = _("Project Update SCM Inventory Updates") model = models.InventoryUpdate serializer_class = serializers.InventoryUpdateListSerializer parent_model = models.ProjectUpdate relationship = 'scm_inventory_updates' parent_key = 'source_project_update' class ProjectAccessList(ResourceAccessList): model = models.User # needs to be User for AccessLists's parent_model = models.Project class ProjectObjectRolesList(SubListAPIView): model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.Project search_fields = ('role_field', 'content_type__model') def get_queryset(self): po = self.get_parent_object() content_type = ContentType.objects.get_for_model(self.parent_model) return models.Role.objects.filter(content_type=content_type, object_id=po.pk) class ProjectCopy(CopyAPIView): model = models.Project copy_return_serializer_class = serializers.ProjectSerializer class UserList(ListCreateAPIView): model = models.User serializer_class = serializers.UserSerializer permission_classes = (UserPermission,) ordering = ('username',) class UserMeList(ListAPIView): model = models.User serializer_class = serializers.UserSerializer name = _('Me') ordering = ('username',) def get_queryset(self): return self.model.objects.filter(pk=self.request.user.pk) class OAuth2ApplicationList(ListCreateAPIView): name = _("OAuth 2 Applications") model = models.OAuth2Application serializer_class = serializers.OAuth2ApplicationSerializer swagger_topic = 'Authentication' class OAuth2ApplicationDetail(RetrieveUpdateDestroyAPIView): name = _("OAuth 2 Application Detail") model = models.OAuth2Application serializer_class = serializers.OAuth2ApplicationSerializer swagger_topic = 'Authentication' def update_raw_data(self, data): data.pop('client_secret', None) return super(OAuth2ApplicationDetail, self).update_raw_data(data) class ApplicationOAuth2TokenList(SubListCreateAPIView): name = _("OAuth 2 Application Tokens") model = models.OAuth2AccessToken serializer_class = serializers.OAuth2TokenSerializer parent_model = models.OAuth2Application relationship = 'oauth2accesstoken_set' parent_key = 'application' swagger_topic = 'Authentication' class OAuth2ApplicationActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.OAuth2Application relationship = 'activitystream_set' swagger_topic = 'Authentication' search_fields = ('changes',) class OAuth2TokenList(ListCreateAPIView): name = _("OAuth2 Tokens") model = models.OAuth2AccessToken serializer_class = serializers.OAuth2TokenSerializer swagger_topic = 'Authentication' class OAuth2UserTokenList(SubListCreateAPIView): name = _("OAuth2 User Tokens") model = models.OAuth2AccessToken serializer_class = serializers.OAuth2TokenSerializer parent_model = models.User relationship = 'main_oauth2accesstoken' parent_key = 'user' swagger_topic = 'Authentication' class UserAuthorizedTokenList(SubListCreateAPIView): name = _("OAuth2 User Authorized Access Tokens") model = models.OAuth2AccessToken serializer_class = serializers.UserAuthorizedTokenSerializer parent_model = models.User relationship = 'oauth2accesstoken_set' parent_key = 'user' swagger_topic = 'Authentication' def get_queryset(self): return get_access_token_model().objects.filter(application__isnull=False, user=self.request.user) class OrganizationApplicationList(SubListCreateAPIView): name = _("Organization OAuth2 Applications") model = models.OAuth2Application serializer_class = serializers.OAuth2ApplicationSerializer parent_model = models.Organization relationship = 'applications' parent_key = 'organization' swagger_topic = 'Authentication' class UserPersonalTokenList(SubListCreateAPIView): name = _("OAuth2 Personal Access Tokens") model = models.OAuth2AccessToken serializer_class = serializers.UserPersonalTokenSerializer parent_model = models.User relationship = 'main_oauth2accesstoken' parent_key = 'user' swagger_topic = 'Authentication' def get_queryset(self): return get_access_token_model().objects.filter(application__isnull=True, user=self.request.user) class OAuth2TokenDetail(RetrieveUpdateDestroyAPIView): name = _("OAuth Token Detail") model = models.OAuth2AccessToken serializer_class = serializers.OAuth2TokenDetailSerializer swagger_topic = 'Authentication' class OAuth2TokenActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.OAuth2AccessToken relationship = 'activitystream_set' swagger_topic = 'Authentication' search_fields = ('changes',) class UserTeamsList(SubListAPIView): model = models.Team serializer_class = serializers.TeamSerializer parent_model = models.User def get_queryset(self): u = get_object_or_404(models.User, pk=self.kwargs['pk']) if not self.request.user.can_access(models.User, 'read', u): raise PermissionDenied() return models.Team.accessible_objects(self.request.user, 'read_role').filter(Q(member_role__members=u) | Q(admin_role__members=u)).distinct() class UserRolesList(SubListAttachDetachAPIView): model = models.Role serializer_class = serializers.RoleSerializerWithParentAccess metadata_class = RoleMetadata parent_model = models.User relationship = 'roles' permission_classes = (IsAuthenticated,) search_fields = ('role_field', 'content_type__model') def get_queryset(self): u = get_object_or_404(models.User, pk=self.kwargs['pk']) if not self.request.user.can_access(models.User, 'read', u): raise PermissionDenied()
if X1 == 0: # GEMM_gmem = GEMM_l2mem # GEMM_l2mem = 0 return GEMM_flop, num_accesses #Column-Row MM def getCf_kp1(self): #Multiply assert(self.kp_hidden_type == 1) assert(self.kp_hidden_dim1 > 1) assert(self.kp_hidden_dim1 % 4 == 0 or self.kp_hidden_dim1 == 2) #4 bc it is LSTM cell assert((2 * self.D) % self.kp_hidden_dim1 == 0) GEMM_time, reduction_time = self.getDistGEMM_f_kp1(self.miniB, 2 * self.D, self.G * self.D, self.kp_hidden_dim1, "Cf_kp1") #Pointwise ops: all the linear/non-linear ops after MM point_flop = self.miniB * (self.G * self.D / self.kp_hidden_dim1) * 5 #4 refers to the number of pointwise ops (mul + add +tanh + mul + tanh) on #the critical path point_mem = (self.precision * self.miniB * (self.G * self.D / self.kp_hidden_dim1) * (3 * 3 + 2 * 2 )) # 3(3 memory access per operation with two input and one output) # 3(mul + add + mul) on critical path # 2(2 memory access per operation with one input and one output) # 1(tanh) on critical path data_size = 4 * self.miniB * (self.G * self.D / self.kp_hidden_dim1) * self.precision # 4 refers to the number of pointwise ops (mul + add + mul + tanh) on the # critical path whose inputs are located across different GPUs #NOTE:Assuming all communications can happpen in parallel mem_transfer = self.roofline(0, 2 * data_size, name="Cf_kp1: memory accesses before and after data transfer over network") #2: one read from the source and one write to the destination memory data_transfer = data_size / self.IBK1 point_comm = mem_transfer + data_transfer point_time = self.roofline(point_flop, point_mem, name='pointwise_cf_kp1') + 5 * self.O + point_comm return GEMM_time + reduction_time + point_time def getCb_kp1(self): #TODO:Add local accumulation of weights at every time step #Pointwise point_flop = ((self.miniB) * (self.G * self.D / self.kp_hidden_dim1) * 5 + (2 * self.D * self.G * self.D / self.kp_hidden_dim1)) # local accumulation of wts #4 refers to the number of pointwise ops (mul + add +tanh + mul) on #the critical path point_mem = (self.precision * self.miniB * (self.G * self.D / self.kp_hidden_dim1) * (3 * 3 + 2 * 2) + (2 * self.precision * self.D * self.G * self.D / self.kp_hidden_dim1) * 3) # local accumulation of wts # 3(3 memory access per operation with two input and one output) # 3(mul + add + mul) on critical path return GEMM_time + reduction_time + point_time def getCb_kp1(self): #TODO:Add local accumulation of weights at every time step #Pointwise point_flop = ((self.miniB) * (self.G * self.D / self.kp_hidden_dim1) * 5 + (2 * self.D * self.G * self.D / self.kp_hidden_dim1)) # local accumulation of wts #4 refers to the number of pointwise ops (mul + add +tanh + mul) on #the critical path point_mem = (self.precision * self.miniB * (self.G * self.D / self.kp_hidden_dim1) * (3 * 3 + 2 * 2) + (2 * self.precision * self.D * self.G * self.D / self.kp_hidden_dim1) * 3) # local accumulation of wts # 3(3 memory access per operation with two input and one output) # 3(mul + add + mul) on critical path # 2(2 memory access per operation with one input and one output) # 1(tanh) on critical path data_size = 4 * self.miniB * (self.G * self.D / self.kp_hidden_dim1) * self.precision mem_transfer = self.roofline(0, 2 * data_size, name='Cb_kp1: memory accesses before and after data transfer over network') data_transfer = data_size / self.IBK1 point_comm = mem_transfer + data_transfer #3 refers to the number of pointwise ops (mul + tanh + mul) on # critical path whose inputs are located across different GPUs #NOTE:Assuming all communications can happpen in parallel point_time = self.roofline(point_flop, point_mem, name='pointwise_Cb_kp1') + 5 * self.O + point_comm #GEMM_wrt_act and wt is calculated under getDistGEMM_b_kp1 GEMM_time, reduction_time = self.getDistGEMM_b_kp1(self.miniB, 2 * self.D, self.G * self.D, self.kp_hidden_dim1, "Cb_kp1") if self.debug: print("(gr) Hidden point_flop: {:,}, point_mem: {:,}\n".format(int(point_flop/1e9), int(point_mem/1e9))) return GEMM_time + reduction_time + point_time #Row-Column MM def getCf_kp2(self): #Multiply assert(self.kp_hidden_type == 2) assert(self.kp_hidden_dim1 > 1 or self.kp_hidden_dim2 > 1) assert(self.kp_hidden_dim2 % self.G == 0 or self.kp_hidden_dim2 == 2 or self.kp_hidden_dim2 == 1) assert(self.miniB % self.kp_hidden_dim1 == 0) assert(self.G * self.D % self.kp_hidden_dim2 == 0) GEMM_time, reduction_time = self.getDistGEMM_f_kp2(self.miniB, 2 * self.D, self.G * self.D, self.kp_hidden_dim1,self.kp_hidden_dim2, "Cf_kp2") #Pointwise ops point_flop = (self.miniB/self.kp_hidden_dim1) * (self.G * self.D / self.kp_hidden_dim2) * 5 #4 refers to the number of pointwise ops (mul + add +tanh + mul) on #the critical path point_mem = int(self.precision * (self.miniB / self.kp_hidden_dim1) * (self.G * self.D / self.kp_hidden_dim2) * (3 * 3 + 2 * 2 )) # 3(3 memory access per operation with two input and one output) # 3(mul + add + mul) on critical path # 2(2 memory access per operation with one input and one output) # 1(tanh) on critical path data_size = ((self.miniB / self.kp_hidden_dim1) * (self.G * self.D / self.kp_hidden_dim2) * 4 * self.precision) #4 refers to the number of pointwise ops (mul + add + tanh + mul) whose inputs #across different GPU point_comm = 0 if (self.kp_softmax_dim2 > 1): mem_transfer = self.roofline(0, 2 * data_size, name='Cf_kp2: memory accesses before and after data transfer over network') data_transfer = data_size / self.IBK2 point_comm = mem_transfer + data_transfer point_time = self.roofline(point_flop, point_mem, name='pointwise_Cf_kp2') + 5 * self.O + point_comm return GEMM_time + reduction_time + point_time def getCb_kp2(self): #Pointwise ops point_flop = ((self.miniB / self.kp_hidden_dim1) * (self.G * self.D / self.kp_hidden_dim2) * 5 + (2 * self.D * self.G * self.D / self.kp_hidden_dim2)) # local accumulation of wts #4 refers to the number of pointwise ops (mul + add +tanh + mul) on #the critical path # kp_hidden_dim2 is for the reduction sum operation after doing outer product # for (B,4D)x(4D,2D).This is outerproduct due to the data distribution. point_mem = int((self.precision * (self.miniB / self.kp_hidden_dim1) * (self.G * self.D / self.kp_hidden_dim2) * (3 * 3 + 2 * 2)) + (2 * self.precision * self.D * self.G * self.D / self.kp_hidden_dim2) * 3) # local accumulation of wts # 3(3 memory access per operation with two input and one output) # 3(mul + add + mul) on critical path # 2(2 memory access per operation with one input and one output) # 1(tanh) on critical path data_size = int(self.miniB * (self.G * self.D / self.kp_hidden_dim2) * 4 * self.precision) #3 refers to the number of pointwise ops (mul + add +tanh + mul) on #3 refers to the number of hops to gather i,f, o and c in each GPU #in order to perform (B,4D)x(4D,2D) point_comm = 0 if (self.kp_softmax_dim2 > 1): mem_transfer = self.roofline(0, 2 * data_size, name='Cb_kp2:memory accesses before and after data transfer over network') data_transfer = data_size / self.IBK2 point_comm = mem_transfer + data_transfer point_time = self.roofline(point_flop, point_mem, name='pointwise_Cb_kp2') + 5 * self.O + point_comm GEMM_time, reduction_time = self.getDistGEMM_b_kp2(self.miniB, 2 * self.D, self.G * self.D, self.kp_hidden_dim1,self.kp_hidden_dim2, "Cb_kp2") if self.debug: print("(gr) Hidden point_flop: {:,}, point_mem: {:,}\n".format(int(point_flop/1e9), int(point_mem/1e9))) return GEMM_time + reduction_time + point_time def getCf(self, m, n, k): #Add Biad adds """Get LSTM Cell Time on Forward Path""" GEMM_time = self.getGEMMTime(m, k, n, "Cf") point_flop = m * n * 5 #1: add bias #5: add nonlinearities, there is one more than the number of gates (self.G) #1: pointwise muliply and add point_mem = (self.precision * m * n * (3 * 3 + 2 * 2 )) #3: 3 memory accesses for operands with two inputs and one output #2: 1 for bias add + 1 for pointwise mul #2: 2 memory accesses for operands with one input and one output #1: 5/4 non-linearities per gate point_time = self.roofline(point_flop, point_mem, name='pointwise_Cf') + 5 * self.O if self.debug: gigaByte =
pred_prob_mat[:, n_clusters] = y_pred_whole print (' Done evaluation using whole instances') print (' Start to evaluate each cluster ') # --------------------------- SELF ------------------------- # predict the whole instances using each cluster data, while self # prediction using 10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \ y_pred_whole, model, fo_inner) print (' Done evaluation using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make a tmp array a stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat - tmp ) print (' Done calculating error table and fitting ICE models') return [err_mat, models] """ #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \ #model = BaggingClassifier(base_estimator = svm.LinearSVR(), \ #model = BaggingClassifier(base_estimator = svm.LinearSVC(), \ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X, y, clus, model) """ def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the err table to decision table. ''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs: for each instance, which clusters should be used. dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)): # Matlab code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end ) = dec_row(:, end ) - adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self; row = np.copy( err_mat[i, :] ) #print row row[-1] = row[-1] - adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus] - adv_self #print row ix_good_clus = list( np.where( row < row[-1] ) [0] ) #print ix_good_clus if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): ''' Use the training data to predict the testing data. Use whole training data to predict Use each cluster of training data to predict the testing data. ''' y_pred_all = np.zeros(( len(X_te), len(clus) + 1 )) # the first col is the prediction using the whole data model_whole = models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model) #print 'whole model good ' # start from the second col, the result is by each cluster for i in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i) except : if model_i == 0: y_pred_te = np.zeros(len(X_te)) elif model_i == 1: y_pred_te = np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e: # print(repr(e)) # y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+' good ' #y_pred_te = f_tr_te(Xi, yi, X_te, model) if doNorm == True: templete = y_pred_all[:, 0] target = y_pred_te y_pred = f_quantileNorm(templete, target) else: y_pred = y_pred_te y_pred_all[:, i+1] = y_pred return y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing values by replacing them by mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' This version use the err mat to re-clustering ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************** re-clustering ******************** n_iter = 2 for i in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision table') return [clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains the same information that clus is the instances ids for each cluster, while inst_clus stores that for each instance, which cluster(s) it belongs to. dec_ixs stores the good cluster(s) for each instance, which may include even a remote cluster. each instance in dec_ixs does not contain the whole set of instances. ''' # the first col is the prediction using the whole data # start from the second col, the result is by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te) # ---------- for each testing instance ---------- #n_partials = np.zeros( len(X_te) ) #n_wholes = np.zeros( len(X_te) ) for j in range(0, len(X_te) ): # for each testing instance # find the top 10 neighbors for each test instance neighbour_col = neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing inst ' + str(j) #print ' ix of top neighbors:' #print ix_top_neighbors # ---------- find all neighbors' picks ---------- clus_ids_to_use = [] nei_labels = [] for cur_nb in range(0, N): # for each neighbour # find each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use + clus_id_to_use # also find neighbor's label. maybe will be used later as KNN pred # instead of using whole to pred. nei_labels = nei_labels + list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print clus_ids_to_use # cluster id + 1 to make the ix fit the col id in y_pred_all a = clus_ids_to_use a = list( np.array(a) + 1 ) clus_ids_to_use = a # number of partial models used n_partial = len(clus_ids_to_use) # number of whole models used, based on parameters alpha, beta and N. n_whole = int( round( alpha*n_partial + beta*N ) ) clus_ids_to_use = clus_ids_to_use + [0] * n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing instances.') return y_pred_ICE # test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w = 0.4 s = 0.5 N = 5 alpha = 1 beta = 1 k_fold = 10 aucs_ICE = [] aucs_whole = [] # f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for j in range(1, 50): for j in range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) model = svm.SVC(kernel='linear', probability = True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size ) y_preds_whole = np.zeros( y.size ) fold_i = 1 for train_index, test_index in skf.split(X, y): # print("TRAIN:", train_index, "TEST:", test_index) X_tr, X_te = X[train_index], X[test_index] y_tr, y_te = y[train_index], y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole print( j) print( 'fold ' + str(fold_i) + ' finished') fold_i = fold_i + 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\t' + str(auc_ICE) + ' \t '
''' PRÁCTICA 3 Clasificación <NAME> ''' ############################# ####### BIBLIOTECAS ####### ############################# # Biblioteca lectura de datos # ========================== import pandas as pd # matemáticas # ========================== import numpy as np # Modelos lineales a usar # ========================== from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.linear_model import LogisticRegression # Preprocesado # ========================== from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import scale # visualización de datos # ========================== from sklearn.manifold import TSNE import matplotlib.pyplot as plt import seaborn as sns # utilizado para pintar la matriz de correlación # Validación cruzada # ========================== from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_val_score #from sklearn.model_selection import LeaveOneOut # metricas # ========================== from sklearn.metrics import mean_absolute_error from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix # Otros # ========================== from operator import itemgetter #ordenar lista import time np.random.seed(1) ########## CONSTANTES ######### NOMBRE_FICHERO_CLASIFICACION = './datos/Sensorless_drive_diagnosis.txt' SEPARADOR_CLASIFICACION = ' ' NUMERO_CPUS_PARALELO = 4 #################################################### ################### funciones auxiliares def LeerDatos (nombre_fichero, separador): ''' Input: - file_name: nombre del fichero path relativo a dónde se ejecute o absoluto La estructura de los datos debe ser: - Cada fila un vector de características con su etiqueta en la última columna. Outputs: x,y x: matriz de filas de vector de características y: vector fila de la etiquetas ''' datos = pd.read_csv(nombre_fichero, sep = separador, header = None) valores = datos.values # Los datos son todas las filas de todas las columnas salvo la última x = valores [:: -1] y = valores [:, -1] # el vector de características es la últma columana return x,y def VisualizarClasificacion2D(x,y, titulo=None): """Representa conjunto de puntos 2D clasificados. Argumentos posicionales: - x: Coordenadas 2D de los puntos - y: Etiquetas""" _, ax = plt.subplots() # Establece límites xmin, xmax = np.min(x[:, 0]), np.max(x[:, 0]) ax.set_xlim(xmin - 1, xmax + 1) ax.set_ylim(np.min(x[:, 1]) - 1, np.max(x[:, 1]) + 1) # Pinta puntos ax.scatter(x[:, 0], x[:, 1], c=y, cmap="tab10", alpha=0.8) # Pinta etiquetas etiquetas = np.unique(y) for etiqueta in etiquetas: centroid = np.mean(x[y == etiqueta], axis=0) ax.annotate(int(etiqueta), centroid, size=14, weight="bold", color="white", backgroundcolor="black") # Muestra título if titulo is not None: plt.title(titulo) plt.show() def Separador(mensaje = None): ''' Hace parada del código y muestra un menaje en tal caso ''' #print('\n-------- fin apartado, enter para continuar -------\n') input('\n-------- fin apartado, enter para continuar -------\n') if mensaje: print('\n' + mensaje) ########################################################### #### Herramientas básicas def ExploracionInicial(x): media = x.mean(axis = 0) varianza = x.var(axis = 0) print('Exploración inicial datos: \n') print('\nMedia de cada variable') print(media) print('\nVarianza ') print(varianza) print('-'*20) print('Resumen de las tablas') print('-'*20) print('\nMedia') print(f'Valor mínimo de las medias {min(media)}') print(f'Valor máximo de las medias {max(media)}') print('\nVarianza ') print(f'Valor mínimo de las varianzas {min(varianza)}') print(f'Valor máximo de las varianzas {max(varianza)}') print('-'*20) ########################################################### ########################################################### ########################################################### print(f'Procedemos a leer los datos del fichero {NOMBRE_FICHERO_CLASIFICACION}') x,y = LeerDatos( NOMBRE_FICHERO_CLASIFICACION, SEPARADOR_CLASIFICACION) ExploracionInicial(x) ''' # COMENTO PORQUE TARDA MUCHO LA EJECUCIÓN print('PCA con escalado de datos') pca_pipe = make_pipeline(StandardScaler(), PCA()) pca_pipe.fit(x) # Se extrae el modelo entrenado del pipeline modelo_pca = pca_pipe.named_steps['pca'] '' print('Vamos a representar los datos usando el algoritmo TSNE, este tarda un par de minutos') x_tsne = TSNE(n_components=2).fit_transform(modelo_pca.components._modelo_pca.components_T) tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300) tsne_results = tsne.fit_transform(x) print('t-SNE done! Time elapsed: ') VisualizarClasificacion2D(tsne_results, y) Separador('fin de la visualización') ''' ### Comprobación si los datos están balanceados def NumeroDeEtiquetas(y): ''' INPUT: y: etiquetas OUTPUT conteo: diccionario que asocia cada etiqueta con el número de veces que aparece ''' conteo = dict() etiquetas_unicas = np.unique(y) for i in etiquetas_unicas: conteo [i] = np.sum( y == i) return conteo def ImprimeDiccionario(diccionario, titulos): print( ' | '.join(titulos) + ' \t ') print ('--- | ' * (len(titulos)-1) + '--- ') for k,v in diccionario.items(): print(k , ' | ', v , ' ') Separador() print('Comprobación de balanceo') ImprimeDiccionario( NumeroDeEtiquetas(y), ['Etiqueta', 'Número apariciones']) Separador('Separamos test y entrenamiento') ###### separación test y entrenamiento ##### ratio_test_size = 0.2 x_train, x_test, y_train, y_test = train_test_split( x, y, test_size= ratio_test_size, shuffle = True, random_state=1) print('Veamos si ha sido homogéneo la selección etiquetas en test') ImprimeDiccionario( NumeroDeEtiquetas(y_train), ['Etiqueta', 'Número apariciones']) Separador('Normalización') print('Datos sin normalizar ') ExploracionInicial(x_train) print ('Los datos van a ser normalizados.') ## Normalización de los datos scaler = StandardScaler() x_train = scaler.fit_transform( x_train ) x_test = scaler.transform( x_test ) # No es necesario volver a comprobar la normalización # La media deberá ser cero y la desviación típica 1, lo que no sea serán errores de redondeo. # ExploracionInicial(x_train) Separador('Análisis de la correlación entre los datos') #------- correlacion ---- def PlotMatrizCorrelacion(matriz_correlacion): ''' Muestra en pantalla la matriz de correlación de x usa la biblioteca de seaborn ''' plt.figure(figsize=(12,8)) plt.title('Matriz de correlación') sns.heatmap(matriz_correlacion) plt.show() def Pearson( x, umbral, traza = False): '''INPUT x vector de caracteríscas umbral: valor mínimo del coefiente para ser tenido en cuenta traza: Imprime coeficiente de Pearson e índices que guardan esa relación. muestra gráfico: determian si se muestra una tabla con los coeficinetes que superen el umbral OUTPUT indice_explicativo: índice de columnas linealente independientes (con ese coeficiente) relaciones: lista de tuplas (correlacion, índice 1, índice 2) ''' r = np.corrcoef(x.T) longitud_r = len(r[0]) # Restamos la matriz identidad con la diagonal # Ya que queremos encontrar donde alcanza buenos niveles de correlación no triviales sin_diagonal = r - np.identity(len(r[0])) relaciones = [] # guardaremos tupla y # Devolveré un vector con lo índices que no puedan ser explicado, # Esto es, si existe una correlación mayor que el umbra entre i,j # y I es el conjunto de características el nuevo I = I -{j} # Denotarelos a I con la variable índice explicativo indice_explicativo = np.arange( len(x[0])) # muestra tupla con el coefiente de pearson y los dos índices con ese vector de características for i in range(longitud_r): for j in range(i+1, longitud_r): if abs(sin_diagonal[i,j]) > umbral: relaciones.append((sin_diagonal[i,j], i,j)) #print(sin_diagonal[i,j], i,j) indice_explicativo [j] = 0 # Indicamos que la columna j ya no es explicativa #para ello la pongo a cero, ya que el 0 siempre explicará, por ir de menor a mayor los subíndices indice_explicativo = np.unique(indice_explicativo) # dejamos solo un cero relaciones.sort(reverse=True, key =itemgetter(0)) # imprimimos las relaciones en orden if(traza): print(f'\nCoeficiente pearson para umbral {umbral}') print('Coeficiente | Índice 1 | Índice 2 ') print( '--- | --- | --- ') for i,j,k in relaciones: print(i,' | ' , j, ' | ', k , ' ') return indice_explicativo, relaciones Separador('Matriz de correlación asociada a los datos de entrenamiento') print('Visualización de las correlación entre características') PlotMatrizCorrelacion(np.corrcoef(x_train.T)) Separador('Índice de las características a mantener') ### Cálculos para distinto umbrales umbrales = [0.9999, 0.999, 0.95, 0.9] indice_explicativo = dict() relaciones = dict() for umbral in umbrales: Separador(f'Características a manterner con umbrar {umbral}') indice_explicativo[umbral], relaciones[umbral] = Pearson( x_train, umbral, traza = True, ) Separador('Veamos cuántos somos capaces de reducir la dimensión con esta técnica') numero_caracteristicas = len(x_train[0]) print(f'\nEl número inicial de características es de { numero_caracteristicas}\n' ) print('Las reducciones de dimensión total son: \n') print('| umbral | tamaño tras reducción | reducción total | ') print('|:------:|:---------------------:|:---------------:| ') for umbral, ie in indice_explicativo.items(): len_ie = len(ie) print(f'| {umbral} | {len_ie} | {numero_caracteristicas - len_ie} | ') ### Validación cruzada def MostrarMatrizConfusion(clasificador, x, y, titulo, normalizar): ''' normalizar: 'true' o 'false', deben de ser los valores de normalice en mostrar_plot ''' mostrar_plot = plot_confusion_matrix(clasificador, x , y, normalize = normalizar) mostrar_plot.ax_.set_title(titulo) plt.show() def Evaluacion( clasificador, x, y, x_test, y_test, k_folds, nombre_modelo, hacer_test = False ): ''' Función para automatizar el proceso de experimento: 1. Ajustar modelo. 2. Aplicar validación cruzada. 3. Medir tiempo empleado en ajuste y validación cruzada. 4. Medir la precisión. INPUT: - Clasificador: Modelo con el que buscar el clasificador - X datos entrenamiento. - Y etiquetas de los datos de entrenamiento - x_test, y_test - k-folds: número de particiones para la validación cruzada OUTPUT: clasificador ''' ###### constantes a ajustar numero_trabajos_paralelos_en_validacion_cruzada = NUMERO_CPUS_PARALELO ########################## print('\n','-'*20) print (f' Evaluando {nombre_modelo}') # print('-'*20) # print(f'\n------ Ajustando modelo------\n') tiempo_inicio_ajuste = time.time() #ajustamos modelo clasificador.fit(x,y) tiempo_fin_ajuste = time.time() tiempo_ajuste = tiempo_fin_ajuste - tiempo_inicio_ajuste print(f'Tiempo empleado para el ajuste: {tiempo_ajuste}s') #validación cruzada tiempo_inicio_validacion_cruzada = time.time() #accuracy resultado_validacion_cruzada = cross_val_score( clasificador, x, y, scoring = 'accuracy', cv = k_folds, n_jobs = numero_trabajos_paralelos_en_validacion_cruzada ) tiempo_fin_validacion_cruzada = time.time() #
<filename>test_pcn.py import pytest from pcn import PCNetwork import torch @pytest.fixture def net(): net = PCNetwork([3, 4, 2]) return net def test_number_of_layers(net): assert len(net.W) == 2 assert len(net.b) == 2 assert len(net.x) == 3 def test_weight_sizes(net): assert net.W[0].shape == (4, 3) assert net.W[1].shape == (2, 4) def test_x_sizes(net): net.forward(torch.FloatTensor([0.3, -0.2, 0.5])) assert [len(_) for _ in net.x] == [3, 4, 2] def test_all_xs_not_none_after_forward_constrained(net): net.forward_constrained( torch.FloatTensor([-0.1, 0.2, 0.4]), torch.FloatTensor([0.3, -0.4]) ) for x in net.x: assert x is not None def test_all_xs_change_during_forward_constrained(net): # set some starting values for x net.forward(torch.FloatTensor([-0.2, 0.3, 0.1])) old_x = [_.clone() for _ in net.x] net.forward_constrained( torch.FloatTensor([-0.1, 0.2, 0.4]), torch.FloatTensor([0.3, -0.4]) ) for old, new in zip(old_x, net.x): assert not torch.all(torch.isclose(old, new)) def test_all_xs_not_none_after_forward(net): net.forward(torch.FloatTensor([0.3, -0.4, 0.2])) for x in net.x: assert x is not None def test_all_xs_change_during_forward(net): # set some starting values for x net.forward(torch.FloatTensor([-0.2, 0.3, 0.1])) old_x = [_.clone() for _ in net.x] net.forward(torch.FloatTensor([0.3, -0.4, 0.2])) for old, new in zip(old_x, net.x): assert not torch.any(torch.isclose(old, new)) def test_forward_result_is_stationary_point_of_forward_constrained(net): x0 = torch.FloatTensor([0.5, -0.7, 0.2]) net.forward(x0) old_x = [_.clone().detach() for _ in net.x] net.forward_constrained(old_x[0], old_x[-1]) for old, new in zip(old_x, net.x): assert torch.allclose(old, new) def test_weights_and_biases_change_when_optimizing_slow_parameters(net): x0 = torch.FloatTensor([-0.3, -0.2, 0.6]) y0 = torch.FloatTensor([0.9, 0.3]) old_Ws = [_.clone().detach() for _ in net.W] old_bs = [_.clone().detach() for _ in net.b] optimizer = torch.optim.Adam(net.slow_parameters(), lr=1.0) net.forward_constrained(x0, y0) optimizer.zero_grad() loss = net.loss() loss.backward() optimizer.step() for old_W, old_b, new_W, new_b in zip(old_Ws, old_bs, net.W, net.b): assert not torch.any(torch.isclose(old_W, new_W)) assert not torch.any(torch.isclose(old_b, new_b)) def test_loss_is_nonzero_after_forward_constrained(net): x0 = torch.FloatTensor([-0.3, -0.2, 0.6]) y0 = torch.FloatTensor([0.9, 0.3]) net.forward_constrained(x0, y0) assert net.loss().abs().item() > 1e-6 def test_forward_does_not_change_weights_and_biases(net): old_Ws = [_.clone().detach() for _ in net.W] old_bs = [_.clone().detach() for _ in net.b] net.forward(torch.FloatTensor([0.3, -0.4, 0.2])) for old_W, old_b, new_W, new_b in zip(old_Ws, old_bs, net.W, net.b): assert torch.allclose(old_W, new_W) assert torch.allclose(old_b, new_b) def test_forward_constrained_does_not_change_weights_and_biases(net): old_Ws = [_.clone().detach() for _ in net.W] old_bs = [_.clone().detach() for _ in net.b] net.forward_constrained( torch.FloatTensor([0.3, -0.4, 0.2]), torch.FloatTensor([-0.2, 0.2]) ) for old_W, old_b, new_W, new_b in zip(old_Ws, old_bs, net.W, net.b): assert torch.allclose(old_W, new_W) assert torch.allclose(old_b, new_b) def test_loss_does_not_change_weights_and_biases(net): # ensure the x variables have valid values assigned to them net.forward(torch.FloatTensor([0.1, 0.2, 0.3])) old_Ws = [_.clone().detach() for _ in net.W] old_bs = [_.clone().detach() for _ in net.b] net.loss() for old_W, old_b, new_W, new_b in zip(old_Ws, old_bs, net.W, net.b): assert torch.allclose(old_W, new_W) assert torch.allclose(old_b, new_b) def test_no_nan_or_inf_after_a_few_learning_steps(net): torch.manual_seed(0) optimizer = torch.optim.Adam(net.slow_parameters()) for i in range(4): x = torch.Tensor(3).uniform_() y = torch.Tensor(2).uniform_() net.forward_constrained(x, y) optimizer.zero_grad() net.loss().backward() optimizer.step() for W, b in zip(net.W, net.b): assert torch.all(torch.isfinite(W)) assert torch.all(torch.isfinite(b)) for x in net.x: assert torch.all(torch.isfinite(x)) def test_forward_output_depends_on_input(net): y1 = net.forward(torch.FloatTensor([0.1, 0.3, -0.2])) y2 = net.forward(torch.FloatTensor([-0.5, 0.1, 0.2])) assert not torch.allclose(y1, y2) def test_forward_sets_first_element_of_x_to_input_sample(net): x0 = torch.FloatTensor([0.5, 0.2, 0.1]) net.forward(x0) assert torch.allclose(net.x[0], x0) def test_forward_constrained_sets_first_element_of_x_to_input_sample(net): x0 = torch.FloatTensor([0.5, 0.2, 0.1]) y0 = torch.FloatTensor([0.5, -0.2]) net.forward_constrained(x0, y0) assert torch.allclose(net.x[0], x0) def test_forward_constrained_sets_last_element_of_x_to_output_sample(net): x0 = torch.FloatTensor([0.5, 0.2, 0.1]) y0 = torch.FloatTensor([0.5, -0.2]) net.forward_constrained(x0, y0) assert torch.allclose(net.x[-1], y0) def test_initialize_values_same_when_torch_seed_is_same(): seed = 321 dims = [2, 6, 5, 3] torch.manual_seed(seed) net = PCNetwork(dims) old_Ws = [_.clone().detach() for _ in net.W] old_bs = [_.clone().detach() for _ in net.b] torch.manual_seed(seed) net = PCNetwork(dims) new_Ws = [_.clone().detach() for _ in net.W] new_bs = [_.clone().detach() for _ in net.b] for old_W, old_b, new_W, new_b in zip(old_Ws, old_bs, new_Ws, new_bs): assert torch.allclose(old_W, new_W) assert torch.allclose(old_b, new_b) def test_initialize_weights_change_for_subsequent_calls_if_seed_not_reset(): seed = 321 dims = [2, 6, 5, 3] torch.manual_seed(seed) net = PCNetwork(dims) var1 = [_.clone().detach() for _ in net.W] net = PCNetwork(dims) var2 = [_.clone().detach() for _ in net.W] for old, new in zip(var1, var2): assert not torch.any(torch.isclose(old, new)) def test_weights_reproducible_for_same_seed_after_learning(): seed = 321 dims = [2, 6, 5, 3] x = torch.FloatTensor([[0.2, -0.3], [0.5, 0.7], [-0.3, 0.2]]) y = torch.FloatTensor([[-0.5, 0.2, 0.7], [1.5, 0.6, -0.3], [-0.2, 0.5, 0.6]]) # do some learning torch.manual_seed(seed) net = PCNetwork(dims) optimizer = torch.optim.Adam(net.slow_parameters()) for crt_x, crt_y in zip(x, y): net.forward_constrained(crt_x, crt_y) optimizer.zero_grad() net.loss().backward() optimizer.step() var1 = [_.clone().detach() for _ in net.W] # reset and do the learning again torch.manual_seed(seed) net = PCNetwork(dims) optimizer = torch.optim.Adam(net.slow_parameters()) for crt_x, crt_y in zip(x, y): net.forward_constrained(crt_x, crt_y) optimizer.zero_grad() net.loss().backward() optimizer.step() var2 = [_.clone().detach() for _ in net.W] for old, new in zip(var1, var2): assert torch.allclose(old, new) def test_learning_effects_are_different_for_subsequent_runs(): seed = 321 dims = [2, 6, 5, 3] x = torch.FloatTensor([[0.2, -0.3], [0.5, 0.7], [-0.3, 0.2]]) y = torch.FloatTensor([[-0.5, 0.2, 0.7], [1.5, 0.6, -0.3], [-0.2, 0.5, 0.6]]) # do some learning torch.manual_seed(seed) net = PCNetwork(dims) optimizer = torch.optim.Adam(net.slow_parameters()) for crt_x, crt_y in zip(x, y): net.forward_constrained(crt_x, crt_y) optimizer.zero_grad() net.loss().backward() optimizer.step() var1 = [_.clone().detach() for _ in net.W] # reset and do the learning again -- without resetting random seed this time! net = PCNetwork(dims) optimizer = torch.optim.Adam(net.slow_parameters()) for crt_x, crt_y in zip(x, y): net.forward_constrained(crt_x, crt_y) optimizer.zero_grad() net.loss().backward() optimizer.step() var2 = [_.clone().detach() for _ in net.W] for old, new in zip(var1, var2): assert not torch.allclose(old, new) @pytest.fixture def trained_current_and_ref() -> tuple: from pcn_ref import PCNetworkRef seed = 100 dims = [2, 6, 5, 3] variances = [0.5, 1.5, 2.7] lr = 0.2 x = torch.FloatTensor([[0.2, -0.3], [0.5, 0.7], [-0.3, 0.2]]) y = torch.FloatTensor([[-0.5, 0.2, 0.7], [1.5, 0.6, -0.3], [-0.2, 0.5, 0.6]]) # do some learning torch.manual_seed(seed) net = PCNetwork(dims, variances=variances) weights0 = [_.clone().detach() for _ in net.W] biases0 = [_.clone().detach() for _ in net.b] # the Whittington&Bogacz implementation inexplicably multiplies the learning rates # by the output-layer variance optimizer = torch.optim.SGD(net.slow_parameters(), lr=lr * variances[-1]) for crt_x, crt_y in zip(x, y): net.forward_constrained(crt_x, crt_y) optimizer.zero_grad() net.loss().backward() optimizer.step() net_ref = PCNetworkRef(dims, variances=variances, lr=lr) # do some learning torch.manual_seed(seed) net_ref.reset() # ensure matching weights and biases for i, (crt_W, crt_b) in enumerate(zip(weights0, biases0)): net_ref.W[i][:] = crt_W net_ref.b[i][:] = crt_b for crt_x, crt_y in zip(x, y): net_ref.learn(crt_x, crt_y) return net, net_ref def test_compare_forward_result_after_learning_to_ref_impl(trained_current_and_ref): net, net_ref = trained_current_and_ref test_x = torch.FloatTensor([0.5, 0.2]) out = net.forward(test_x) out_ref = net_ref.forward(test_x) assert torch.allclose(out, out_ref) def test_compare_weights_after_learning_to_ref_impl(trained_current_and_ref): net, net_ref = trained_current_and_ref for new_W, new_b, W, b in zip(net.W, net.b, net_ref.W, net_ref.b): assert torch.allclose(new_W, W) assert torch.allclose(new_b, b) def test_forward_constrained_with_nontrivial_variances_vs_ref_impl(): from pcn_ref import PCNetworkRef seed = 100 dims = [2, 6, 5, 3] variances = [0.5, 1.5, 2.7] x = torch.FloatTensor([0.2, -0.3]) y = torch.FloatTensor([-0.5, 0.2, 0.7]) # do some learning torch.manual_seed(seed) net = PCNetwork(dims, variances=variances) weights0 = [_.clone().detach() for _ in net.W] biases0 = [_.clone().detach() for _ in net.b] net.forward_constrained(x, y) # now the reference implementation net_ref = PCNetworkRef(dims, variances=variances) torch.manual_seed(seed) net_ref.reset() # ensure matching weights and biases for i, (crt_W, crt_b) in enumerate(zip(weights0, biases0)): net_ref.W[i][:] = crt_W net_ref.b[i][:] = crt_b net_ref.forward(x) net_ref.infer(y) for crt_x, crt_x_ref in zip(net.x, net_ref.x): assert torch.allclose(crt_x, crt_x_ref) def test_training_with_batches_of_size_one(): seed = 100 dims = [2, 6, 5, 3] variances = [0.5, 1.5, 2.7] lr = 0.2 x = torch.FloatTensor([[0.2, -0.3], [0.5, 0.7], [-0.3, 0.2]]) y = torch.FloatTensor([[-0.5, 0.2, 0.7], [1.5, 0.6, -0.3], [-0.2, 0.5, 0.6]]) # do some learning torch.manual_seed(seed) net = PCNetwork(dims, variances=variances) optimizer = torch.optim.SGD(net.slow_parameters(), lr=lr) for crt_x, crt_y in zip(x, y): net.forward_constrained(crt_x, crt_y) optimizer.zero_grad() net.loss().backward() optimizer.step() test_x = torch.FloatTensor([0.5, 0.2]) out = net.forward(test_x) # do the same learning with batches of size 1 torch.manual_seed(seed) net = PCNetwork(dims, variances=variances) optimizer = torch.optim.SGD(net.slow_parameters(), lr=lr) for crt_x, crt_y in zip(x, y): crt_x_batch = crt_x[None, :] crt_y_batch = crt_y[None, :] net.forward_constrained(crt_x_batch, crt_y_batch) optimizer.zero_grad() net.loss().backward() optimizer.step() test_x = torch.FloatTensor([0.5, 0.2]) out_batch = net.forward(test_x) assert torch.allclose(out, out_batch) def test_training_with_batches_of_nontrivial_size(): seed = 200 dims = [2, 6, 5] variances = [0.5, 1.5] lr = 1e-4 it_inference = 10 n_samples = 50 torch.manual_seed(seed) x = torch.normal(0, 1, size=(n_samples, dims[0])) y = torch.normal(0, 1, size=(n_samples, dims[-1])) # do some learning torch.manual_seed(seed) kwargs = {"variances": variances, "it_inference": it_inference} net = PCNetwork(dims, **kwargs) optimizer = torch.optim.SGD(net.slow_parameters(), lr=lr) for crt_x, crt_y in zip(x, y): net.forward_constrained(crt_x, crt_y) optimizer.zero_grad() net.loss().backward() optimizer.step() test_x = torch.FloatTensor([0.5, -0.2]) out = net.forward(test_x) # do the same learning with batches of size 1 torch.manual_seed(seed) net = PCNetwork(dims, **kwargs) optimizer = torch.optim.SGD(net.slow_parameters(), lr=lr) for crt_x1, crt_x2, crt_y1, crt_y2 in zip(x[::2], x[1::2], y[::2], y[1::2]): crt_x_batch = torch.vstack((crt_x1, crt_x2)) crt_y_batch = torch.vstack((crt_y1, crt_y2)) net.forward_constrained(crt_x_batch, crt_y_batch) optimizer.zero_grad() net.loss().backward() optimizer.step() test_x = torch.FloatTensor([0.5, -0.2]) out_batch = net.forward(test_x) assert torch.allclose(out, out_batch) def test_forward_constrained_returns_sequence_of_correct_length(net): losses = net.forward_constrained( torch.FloatTensor([-0.1, 0.2, 0.4]), torch.FloatTensor([0.3, -0.4])
#!/usr/bin/env python3 # # Copyright 2017 rootkiwi # # AN2Linux-server is licensed under GNU General Public License 3, with the additional # special exception to link portions of this program with the OpenSSL library. # # See LICENSE for more details. import logging import sys try: import ssl except ImportError as e: print('Dependency missing: openssl') print(e) sys.exit(1) try: import gi from gi.repository import GLib except ImportError as e: print('Dependency missing: python-gobject') print(e) sys.exit(1) try: gi.require_version('Notify', '0.7') from gi.repository import Notify except (ImportError, ValueError) as e: print('Dependency missing: libnotify') print(e) sys.exit(1) try: gi.require_version('GdkPixbuf', '2.0') from gi.repository import GdkPixbuf except (ImportError, ValueError) as e: print('Dependency missing: GdkPixbuf') print(e) sys.exit(1) import threading import datetime import os import configparser import struct import socketserver import socket import signal import time import subprocess import hashlib import termios import base64 import select import re from collections import deque class Notification: # this is a deque of the latest notifications hash to be able to skip duplicates latest_notifications = None # this is a list of notification titles that ignore the latest_notifications list titles_that_ignore_latest = None # list of keywords that trigger notifcation to be ignored keywords_to_ignore = None #regexes of title and contents of notifications to be ignored regexes_to_ignore_in_title = None regexes_to_ignore_in_content = None def __init__(self, title, message, icon_bytes=None): self.title = title self.message = message self.icon_bytes = icon_bytes self.notif_hash = hashlib.sha256(title.encode() + message.encode() + icon_bytes if icon_bytes is not None else b'').digest() def show(self): if (self.notif_hash not in Notification.latest_notifications or self.title in Notification.titles_that_ignore_latest) \ and not any(kw in self.title for kw in Notification.keywords_to_ignore if kw != '') \ and not any(regex.match(self.title) for regex in Notification.regexes_to_ignore_in_title) \ and not any(regex.match(self.message) for regex in Notification.regexes_to_ignore_in_content): Notification.latest_notifications.append(self.notif_hash) self.notif = Notify.Notification.new(self.title, self.message, '') self.notif.set_timeout(notification_timeout_milliseconds) self.notif.set_hint('desktop-entry', GLib.Variant('s', 'an2linux')) if self.icon_bytes is not None: pixbuf_loader = GdkPixbuf.PixbufLoader.new() pixbuf_loader.write(self.icon_bytes) pixbuf_loader.close() self.notif.set_image_from_pixbuf(pixbuf_loader.get_pixbuf()) try: self.notif.show() except Exception as e: logging.error('(Notification) Error showing notification:' \ ' {}'.format(e)); logging.error('Please make sure you have a notification' \ ' server installed on your system') class TCPHandler(socketserver.BaseRequestHandler): active_pairing_connection = False cancel_pairing = False def handle(self): try: conn_type = self.request.recv(1) if conn_type == PAIR_REQUEST and not TCPHandler.active_pairing_connection: TCPHandler.active_pairing_connection = True TCPHandler.cancel_pairing = False try: self.handle_pair_request() except Exception as e: logging.error('(TCP) Error pair_request: {}'.format(e)) TCPHandler.active_pairing_connection = False elif conn_type == NOTIF_CONN: try: self.handle_notification_connection() except Exception as e: logging.error('(TCP) Error notif_conn: {}'.format(e)) except Exception as e: logging.error('(TCP) Error handle: {}'.format(e)) def handle_pair_request(self): pair_tls_ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2) pair_tls_ctx.load_cert_chain(CERTIFICATE_PATH, RSA_PRIVATE_KEY_PATH) pair_tls_ctx.set_ciphers('ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA') pair_tls_ctx.set_ecdh_curve('prime256v1') pair_tls_ctx.options |= ssl.OP_SINGLE_ECDH_USE try: tls_socket = pair_tls_ctx.wrap_socket(self.request, server_side=True) except ssl.SSLError as ssle: logging.error('(TCP) Failed TLS handshake pair_request: {}'.format(ssle)) return ip = self.client_address[0] # remove first ::ffff: if ipv4 mapped ipv6 address if len(ip) > 7 and ip[:7] == '::ffff:': ip = ip[7:] logging.info('(TCP) Pair request from: {}\n'.format(ip)) client_cert_size = struct.unpack('>I', recvall(tls_socket, 4))[0] client_cert = recvall(tls_socket, client_cert_size) sha256 = hashlib.sha256(client_cert + SERVER_CERT_DER).hexdigest().upper() sha256_format = [sha256[x:x + 2] for x in range(0, len(sha256), 2)] print('It is very important that you verify that the following hash matches what is viewed on your phone\n' 'It is a sha256 hash like so: sha256(client_cert + server_cert)\n\n' 'If the hash don\'t match there could be a man-in-the-middle attack\n' 'Or something else is not right, you should abort if they don\'t match!\n') print(' '.join(sha256_format[:8])) print(' '.join(sha256_format[8:16])) print(' '.join(sha256_format[16:24])) print(' '.join(sha256_format[24:])) self.server_allow_pair = False self.client_allow_pair = False try: termios.tcflush(sys.stdin, termios.TCIFLUSH) except Exception: pass self.user_input_prompt = 'Enter "yes" to accept pairing or "no" to deny: ' print('\n{}'.format(self.user_input_prompt), end='') threading.Thread(target=self.pair_response_thread, args=(tls_socket,)).start() while not TCPHandler.cancel_pairing: ready = select.select([sys.stdin], [], [], 1)[0] if ready: user_input = sys.stdin.readline().strip() if user_input.casefold() == 'yes'.casefold(): tls_socket.sendall(ACCEPT_PAIRING) self.server_allow_pair = True if not self.client_allow_pair: print('Waiting for client response') while not TCPHandler.cancel_pairing: if self.client_allow_pair: add_to_authorized_certs(client_cert) break else: time.sleep(1) break elif user_input.casefold() == 'no'.casefold(): tls_socket.sendall(DENY_PAIRING) print('Pairing canceled') TCPHandler.cancel_pairing = True else: print(self.user_input_prompt, end='', flush=True) def pair_response_thread(self, tls_socket): while not TCPHandler.cancel_pairing: ready = select.select([tls_socket], [], [], 1)[0] if ready: client_response = tls_socket.recv(1) if client_response == ACCEPT_PAIRING: self.client_allow_pair = True if self.server_allow_pair: print('Client accepted pairing') break else: print('\r{} (Client accepted pairing): '.format(self.user_input_prompt[:-2]), end='') # to notice if socket closed while TCPHandler.active_pairing_connection: ready = select.select([tls_socket], [], [], 1)[0] if ready and tls_socket.recv(1) == b'': if not self.server_allow_pair and not TCPHandler.cancel_pairing: print('\nSocket closed') break TCPHandler.cancel_pairing = True elif client_response == DENY_PAIRING: if self.server_allow_pair: print('Client denied pairing') else: print('\nClient denied pairing') TCPHandler.cancel_pairing = True else: if not TCPHandler.cancel_pairing: print('\nSocket closed or recieved something strange') TCPHandler.cancel_pairing = True def handle_notification_connection(self): try: notif_tls_ctx.load_verify_locations(cadata=parse_authorized_certs()) tls_socket = notif_tls_ctx.wrap_socket(self.request, server_side=True) except Exception as e: logging.error('(TCP) Failed TLS handshake notif_conn: {}'.format(e)) return # one recv should not take longer than 10 sec tls_socket.settimeout(10) notification_flags = struct.unpack('>B', tls_socket.recv(1))[0] include_title = chkflags(notification_flags, FLAG_INCLUDE_TITLE) include_message = chkflags(notification_flags, FLAG_INCLUDE_MESSAGE) include_icon = chkflags(notification_flags, FLAG_INCLUDE_ICON) title = '' message = '' if include_title or include_message: title_and_or_message_size = struct.unpack('>I', recvall(tls_socket, 4))[0] title_and_or_message = recvall(tls_socket, title_and_or_message_size).decode() if include_title: title = title_and_or_message.split('|||')[0] if include_message: message = title_and_or_message.split('|||')[1] if include_icon: icon_size = struct.unpack('>I', recvall(tls_socket, 4))[0] icon_bytes = recvall(tls_socket, icon_size) try: Notification(title, message, icon_bytes).show() except Exception: Notification(title, message).show() else: Notification(title, message).show() class ThreadingDualStackServer(socketserver.ThreadingTCPServer): address_family = socket.AF_INET6 def server_bind(self): self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) super().server_bind() class ThreadingBluetoothServer: def __init__(self): self.bluetooth_server_sock = BluetoothSocket(RFCOMM) self.bluetooth_server_sock.bind(("", PORT_ANY)) self.bluetooth_server_sock.listen(1) self.port = self.bluetooth_server_sock.getsockname()[1] # hardcoded uuid generated from https://www.uuidgenerator.net/ self.uuid = "a97fbf21-2ef3-4daf-adfb-2a53ffa87b8e" advertise_service(self.bluetooth_server_sock, "AN2Linux_bluetooth_server", service_id=self.uuid, service_classes=[self.uuid, SERIAL_PORT_CLASS], profiles=[SERIAL_PORT_PROFILE]) self.shutdown_request = False def serve_forever(self): while not self.shutdown_request: ready = select.select([self.bluetooth_server_sock], [], [], 1)[0] if ready: client_sock, client_info = self.bluetooth_server_sock.accept() threading.Thread(target=BluetoothHandler, args=(client_sock, client_info[0])).start() self.bluetooth_server_sock.close() def shutdown(self): self.shutdown_request = True class BluetoothHandler: active_pairing_connection = False cancel_pairing = False def __init__(self, socket, address): self.socket = socket self.address = address self.tls_bio = None self.incoming = None self.outgoing = None self.handle() def handle(self): try: conn_type = self.socket.recv(1) if conn_type == PAIR_REQUEST and not BluetoothHandler.active_pairing_connection: BluetoothHandler.active_pairing_connection = True BluetoothHandler.cancel_pairing = False try: self.handle_pair_request() except Exception as e: logging.error('(Bluetooth) Error pair_request: {}'.format(e)) BluetoothHandler.active_pairing_connection = False elif conn_type == NOTIF_CONN: try: self.handle_notification_connection() except Exception as e: logging.error('(Bluetooth) Error notif_conn: {}'.format(e)) except Exception as e: logging.error('(Bluetooth) Error handle: {}'.format(e)) finally: self.socket.close() def do_handshake(self): # incoming <- ClientHello client_hello_size = struct.unpack('>I', recvall(self.socket, 4))[0] client_hello = recvall(self.socket, client_hello_size) self.incoming.write(client_hello) # ServerHello..ServerHelloDone -> outgoing try: self.tls_bio.do_handshake() except ssl.SSLWantReadError: server_hello = self.outgoing.read() server_hello_size = struct.pack('>I', len(server_hello)) self.socket.sendall(server_hello_size) self.socket.sendall(server_hello) # incoming <- [client]Certificate*..ClientKeyExchange..Finished client_keyexchange_size = struct.unpack('>I', recvall(self.socket, 4))[0] client_keyexchange = recvall(self.socket, client_keyexchange_size) self.incoming.write(client_keyexchange) # ChangeCipherSpec..Finished -> outgoing self.tls_bio.do_handshake() server_change_cipher_spec = self.outgoing.read() server_change_cipher_spec_size = struct.pack('>I', len(server_change_cipher_spec)) self.socket.sendall(server_change_cipher_spec_size) self.socket.sendall(server_change_cipher_spec) def tls_read_full_record(self): pending = 1 record = bytearray() while pending > 0: record.extend(self.tls_bio.read(4096)) pending = self.tls_bio.pending() return record def tls_encrypt(self, app_data): self.tls_bio.write(app_data) return self.outgoing.read() def tls_decrypt(self, net_data): self.incoming.write(net_data) return self.tls_read_full_record() def handle_pair_request(self): if bluetooth_support_kitkat: pair_tls_ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1) pair_tls_ctx.set_ciphers('DHE-RSA-AES256-SHA') pair_tls_ctx.load_dh_params(DHPARAM_PATH) else: pair_tls_ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2) pair_tls_ctx.set_ciphers('ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA') pair_tls_ctx.set_ecdh_curve('prime256v1') pair_tls_ctx.load_cert_chain(CERTIFICATE_PATH, RSA_PRIVATE_KEY_PATH) self.incoming = ssl.MemoryBIO() self.outgoing = ssl.MemoryBIO() self.tls_bio = pair_tls_ctx.wrap_bio(incoming=self.incoming, outgoing=self.outgoing, server_side=True) try: self.do_handshake() except ssl.SSLError as ssle: logging.error('(Bluetooth) Failed TLS handshake pair_request: {}'.format(ssle)) return logging.info('(Bluetooth) Pair request from: {}\n'.format(self.address)) '''I don't know how else to do this when using SSLEngine/SSL_BIO, I don't see any security issue with sending the length of the encrypted data in cleartext, using something like wireshark it's possible to see the length anyway''' client_cert_size = struct.unpack('>I', recvall(self.socket, 4))[0] client_cert_encrypted = recvall(self.socket, client_cert_size) client_cert = self.tls_decrypt(client_cert_encrypted) sha256 = hashlib.sha256(client_cert + SERVER_CERT_DER).hexdigest().upper() sha256_format = [sha256[x:x + 2] for x in range(0, len(sha256), 2)] print('It is very important that you verify that the following hash matches what is viewed on your phone\n' 'It is a sha256 hash like so: sha256(client_cert + server_cert)\n\n' 'If the hash don\'t match there could be a man-in-the-middle attack\n' 'Or something else is not right, you should abort if they don\'t match!\n') print(' '.join(sha256_format[:8])) print(' '.join(sha256_format[8:16])) print(' '.join(sha256_format[16:24])) print(' '.join(sha256_format[24:])) self.server_allow_pair = False self.client_allow_pair = False try: termios.tcflush(sys.stdin, termios.TCIFLUSH) except Exception: pass self.user_input_prompt = 'Enter "yes" to accept pairing or "no" to deny: ' print('\n{}'.format(self.user_input_prompt), end='') threading.Thread(target=self.pair_response_thread).start() while not BluetoothHandler.cancel_pairing: ready = select.select([sys.stdin], [], [], 1)[0] if ready: user_input = sys.stdin.readline().strip() if user_input.casefold() == 'yes'.casefold(): encrypted = self.tls_encrypt(ACCEPT_PAIRING) encrypted_size = struct.pack('>I', len(encrypted)) self.socket.sendall(encrypted_size) self.socket.sendall(encrypted) self.server_allow_pair = True if not self.client_allow_pair: print('Waiting for client response') while not BluetoothHandler.cancel_pairing: if self.client_allow_pair: add_to_authorized_certs(client_cert) break else: time.sleep(1) break elif user_input.casefold() == 'no'.casefold(): encrypted = self.tls_encrypt(DENY_PAIRING) encrypted_size = struct.pack('>I', len(encrypted)) self.socket.sendall(encrypted_size) self.socket.sendall(encrypted) print('Pairing canceled') BluetoothHandler.cancel_pairing = True else: print(self.user_input_prompt, end='', flush=True) def pair_response_thread(self): while
("nickts", Integer()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchServer: "NO_SUCH_SERVER", UserAlreadyConnected: "UUID_ALREADY_CONNECTED" } requiresAnswer = False class RemoveUser(Command): arguments = [ ("user", String()), ("reason", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class SetIdent(Command): arguments = [ ("user", String()), ("ident", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class SetHost(Command): arguments = [ ("user", String()), ("host", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class SetName(Command): arguments = [ ("user", String()), ("gecos", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class RequestJoinChannel(Command): arguments = [ ("channel", String()), ("user", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class JoinChannel(Command): arguments = [ ("channel", String()), ("user", String()), ("chants", Integer()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class LeaveChannel(Command): arguments = [ ("channel", String()), ("user", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER", NoSuchChannel: "NO_SUCH_CHANNEL" } requiresAnswer = False class RequestSetMode(Command): arguments = [ ("user", String()), ("source", String()), ("modestring", String()), ("params", ListOf(String())) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class SetMode(Command): arguments = [ ("target", String()), ("targetts", Integer()), ("source", String()), ("modestring", String()), ("params", ListOf(String())) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchTarget: "NO_SUCH_TARGET" } requiresAnswer = False class SetTopic(Command): arguments = [ ("channel", String()), ("chants", Integer()), ("topic", String()), ("topicsetter", String()), ("topicts", Integer()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchChannel: "NO_SUCH_CHANNEL" } requiresAnswer = False class RequestNick(Command): arguments = [ ("user", String()), ("newnick", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class ChangeNick(Command): arguments = [ ("user", String()), ("newnick", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } requiresAnswer = False class SetMetadata(Command): arguments = [ ("target", String()), ("targetts", Integer()), ("namespace", String()), ("key", String()), ("value", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchTarget: "NO_SUCH_TARGET" } requiresAnswer = False class SendAnnouncement(Command): arguments = [ ("user", String()), ("type", String()), ("args", ListOf(String())), ("prefix", String()), ("to", String()) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchUser: "NO_SUCH_USER" } class ChannelMessage(Command): arguments = [ ("channel", String()), ("type", String()), ("args", ListOf(String())), ("prefix", String()), ("to", String()), ("skip", ListOf(String())) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE", NoSuchChannel: "NO_SUCH_CHANNEL" } requiresAnswer = False class ModuleMessage(Command): arguments = [ ("destserver", String()), ("type", String()), ("args", ListOf(String())) ] errors = { HandshakeNotYetComplete: "HANDSHAKE_NOT_COMPLETE" } class PingServer(Command): arguments = [ ("data", String()) ] response = [ ("data", String()) ] class ServerProtocol(AMP): def __init__(self, ircd): self.ircd = ircd self.sentDataBurst = None # Just to make sure it can only be sent once self.name = None self.description = None self.remoteServers = set() self.localOrigin = True self.nearHop = self.ircd.name self.nearRemoteLink = self.ircd.name self.hopCount = 1 self.ignoreUsers = set() self.disconnected = Deferred() self.lastping = now() self.lastpong = now() self.pinger = LoopingCall(self.ping) def connectionMade(self): self.pinger.start(60, now=False) def serverHandshake(self, name, password, description, version, commonmodules): if self.name is not None: raise HandshakeAlreadyComplete ("The server handshake has already been completed between these servers.") if version not in compatible_versions: raise IncompatibleVersions ("Protocol version {} is not compatible with this version".format(version)) commonModDiff = set(commonmodules) ^ self.ircd.common_modules if commonModDiff: raise ModuleMismatch ("Common modules are not matched between servers: {}".format(", ".join(commonModDiff))) if name not in self.ircd.servconfig["serverlinks"]: raise ServerNoLink ("There is no link data in the configuration file for the server trying to link.") if name in self.ircd.servers or self.ircd.name == name: raise ServerAlreadyConnected ("The connecting server is already connected to this network.") linkData = self.ircd.servconfig["serverlinks"][name] ip = self.transport.getPeer().host mapped = IPV4_MAPPED_ADDR.match(ip) if mapped: ip = mapped.group(1) if "ip" not in linkData or ip != linkData["ip"]: raise ServerMismatchedIP ("The IP address for this server does not match the one in the configuration.") if "incoming_password" not in linkData or password != linkData["incoming_password"]: raise ServerPasswordIncorrect ("The password provided by the server does not match the one in the configuration.") if self.sentDataBurst is None: self.callRemote(IntroduceServer, name=self.ircd.name, password=linkData["outgoing_password"], description=self.ircd.servconfig["server_description"], version=version, commonmodules=self.ircd.common_modules) self.sentDataBurst = False self.name = name self.description = description self.ircd.servers[self.name] = self self.sendBurstData() for action in self.ircd.actions["netmerge"]: action(self.name) for server in self.ircd.servers.itervalues(): if server.nearHop == self.ircd.name and server != self: server.callRemote(AddNewServer, name=name, description=description, hopcount=1, nearhop=self.ircd.name) return {} IntroduceServer.responder(serverHandshake) def ping(self): if self.lastping > self.lastpong: self.transport.loseConnection() return self.lastping = now() d = self.callRemote(PingServer, data="{} {}".format(self.name, epoch(self.lastping))) d.addCallback(self.handlePong) def handlePong(self, data): self.lastpong = now() def handlePing(self, data): return { "data": data } PingServer.responder(handlePing) def sendBurstData(self): if self.sentDataBurst is not False: return self.sentDataBurst = True serverOrder = [] while len(serverOrder) < len(self.ircd.servers): for server in self.ircd.servers.itervalues(): if server in serverOrder: continue if server.nearHop == self.ircd.name or server.nearHop in serverOrder: serverOrder.append(server) serverOrder.remove(self) for server in serverOrder: self.callRemote(AddNewServer, name=server.name, description=server.description, hopcount=server.hopCount, nearhop=server.nearHop) for u in self.ircd.users.itervalues(): self.callRemote(RegisterUser, uuid=u.uuid, nick=u.nickname, ident=u.username, host=u.hostname, realhost=u.realhost, gecos=u.realname, ip=u.ip, password=<PASSWORD> if u.password else "", server=u.server, secure=u.socket.secure, signon=epoch(u.signon), nickts=epoch(u.nicktime)) modes = [] params = [] for mode, param in u.mode.iteritems(): if self.ircd.user_mode_type[mode] == 0: for item in param: modes.append(mode) params.append(item) elif param is None: modes.append(mode) else: modes.append(mode) params.append(param) self.callRemote(SetMode, target=u.uuid, targetts=epoch(u.signon), source=u.prefix(), modestring="+{}".format("".join(modes)), params=params) for namespace, data in u.metadata.iteritems(): for key, value in data.iteritems(): self.callRemote(SetMetadata, target=u.uuid, targetts=epoch(u.signon), namespace=namespace, key=key, value=value) for chan in self.ircd.channels.itervalues(): modes = [] params = [] for u, status in chan.users.iteritems(): self.callRemote(JoinChannel, channel=chan.name, user=u.uuid, chants=epoch(chan.created)) for mode in status: modes.append(mode) params.append(u.nickname) for mode, param in chan.mode.iteritems(): if self.ircd.channel_mode_type[mode] == 0: for item in param: modes.append(mode) params.append(item) elif param is None: modes.append(mode) else: modes.append(mode) params.append(param) self.callRemote(SetMode, target=chan.name, targetts=epoch(chan.created), source=self.ircd.name, modestring="+{}".format("".join(modes)), params=params) if chan.topic: self.callRemote(SetTopic, channel=chan.name, chants=epoch(chan.created), topic=chan.topic, topicsetter=chan.topicSetter, topicts=epoch(chan.topicTime)) for namespace, data in chan.metadata.iteritems(): for key, value in data.iteritems(): self.callRemote(SetMetadata, target=chan.name, targetts=epoch(chan.created), namespace=namespace, key=key, value=value) def newServer(self, name, description, hopcount, nearhop): if not self.name: raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.") # check for server-related desyncs if name in self.ircd.servers or name == self.ircd.name: raise ServerAlreadyConnected ("The server trying to connect to the network is already connected to the network.") if nearhop not in self.ircd.servers: raise NoSuchServer ("The nearhop for the new server is not on the network.") # Set up the new server(s) newServer = RemoteServer(self.ircd, name, description, nearhop, hopcount + 1) nearHop = self.ircd.servers[nearhop] nearHop.remoteServers.add(name) for server in self.ircd.servers.itervalues(): if nearhop in server.remoteServers: server.remoteServers.add(name) self.ircd.servers[name] = newServer for server in self.ircd.servers.itervalues(): if server.nearHop == self.ircd.name and server != self: # The server is connected to this server but is NOT this server link # so that it goes to each server once and does not get sent back where it came from server.callRemote(AddNewServer, name=name, description=description, hopcount=hopcount+1, nearhop=nearhop) for action in self.ircd.actions["netmerge"]: action(name) return {} AddNewServer.responder(newServer) def splitServer(self, name): if not self.name: raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.") if name not in self.ircd.servers: raise ServerNotConnected ("The server splitting from the network was not connected to the network.") servinfo = self.ircd.servers[name] leavingServers = servinfo.remoteServers leavingServers.add(name) for servname in leavingServers: del self.ircd.servers[servname] for server in self.ircd.servers.itervalues(): for servname in leavingServers: # Remove splitting servers from all remoteServers sets server.remoteServers.discard(servname) if self.ircd.name == server.nearHop and server != self: # propagate to the rest of the servers server.callRemote(DisconnectServer, name=name) for action in self.ircd.actions["netsplit"]: for server in leavingServers: action(server) return {} DisconnectServer.responder(splitServer) def connectionLost(self, reason): if self.name: userList = self.ircd.users.values() for user in userList: if user.server == self.name or user.server in self.remoteServers: user.disconnect("{} {}".format(self.ircd.name, self.name), self.name) for servname in self.remoteServers: del self.ircd.servers[servname] del self.ircd.servers[self.name] for server in self.ircd.servers.itervalues(): server.remoteServers.discard(self.name) for servname in self.remoteServers: server.remoteServers.discard(servname) for server in self.ircd.servers.itervalues(): if self.ircd.name == server.nearHop: server.callRemote(DisconnectServer, name=self.name) for action in self.ircd.actions["netsplit"]: action(self.name) for server in self.remoteServers: action(server) self.pinger.stop() self.disconnected.callback(None) AMP.connectionLost(self, reason) def basicConnectUser(self, uuid, ip, server, secure, signon): if not self.name: raise HandshakeNotYetComplete ("The initial handshake has not occurred over this link.") if server not in self.ircd.servers: raise NoSuchServer ("The server {} is not connected to the network.".format(server)) if uuid in self.ircd.userid: raise UserAlreadyConnected ("The uuid {} already exists on the network.".format(uuid)) newUser = RemoteUser(self.ircd, uuid, None, None, None, None, None, ip, None, server, secure, datetime.utcfromtimestamp(signon), now()) newUser.registered =
# Import libraries import streamlit as st import pandas as pd import yfinance as yf import talib import plotly.graph_objects as go import plotly.express as px import requests import seaborn as sns import os from streamlit_lottie import st_lottie from sqlalchemy import create_engine # Import my custom scripts from patterns import candlestick_patterns from OptimizePortfolio import optimize_portfolio, calculate_portfolio, getCompanyName from chart import areaChart, candlestickChart, gaugeChart, pieChart, fundamentalChart from scan import scanStocks from db import config import user db_string = f"cockroachdb://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=require" db = create_engine(db_string) def calculateSMA(df, window): df[f'{window}sma'] = df['Close'].rolling(window=window).mean() def calculateEMA(df, window): df[f'{window}ema'] = df['Close'].ewm(span=window).mean() def load_lottieurl(url: str): r = requests.get(url) if r.status_code != 200: return None return r.json() def main(): st.title('Stock Tracker') functionality = st.sidebar.selectbox('What would you like to do?', ('Track Individual Stocks', 'Compare Company Fundamentals', 'Optimize my Portfolio', 'Find Candlestick Patterns', 'Scan for Candlestick Patterns')) if (functionality == 'Track Individual Stocks'): st.header('Track Individual Stocks') ticker = st.sidebar.text_input('Enter ticker symbol', value='AMD') companyName = getCompanyName(ticker) df = user.get_db_price(ticker, db) st.subheader(f'Real-time information for {companyName}') type = st.sidebar.selectbox('Choose Chart Type', ('Line Chart', 'Candlestick Chart')) if (type == 'Line Chart'): plot = areaChart(df, ticker) st.plotly_chart(plot) else: plot = candlestickChart(df, ticker) st.plotly_chart(plot) with st.beta_expander("What is a candlestick chart?"): st.write("""A daily candlestick shows the market's open, high, low, and close price for the day. When the body of the candlestick is green, it means the close was higher than the open (ie. the price increased). If the body is red, it means the close was lower than the open (ie. the price decreased).""") st.image("https://upload.wikimedia.org/wikipedia/commons/e/ea/Candlestick_chart_scheme_03-en.svg", use_column_width="auto") st.write("Probe-meteo.com, CC BY-SA 3.0 <https://creativecommons.org/licenses/by-sa/3.0>, via Wikimedia Commons") gauge = gaugeChart(df, ticker) st.plotly_chart(gauge) st.subheader(f"Fundamental Analysis of {companyName}") with st.beta_expander("What is Fundamental Analysis?"): st.write("""Fundamental analysis (FA) is a method of **measuring a security's intrinsic value** by examining related economic and financial factors. These factors include macroeconomic factors such as the state of the economy and industry conditions to microeconomic factors like the effectiveness of the company's management. The **end goal** is to arrive at a number that an investor can compare with a security's current price **in order to see whether the security is undervalued or overvalued.**""") info = user.get_db_fundamentals(ticker, db) st.write(f"**_Business Summary_**: {info['longBusinessSummary'].values[0]}") st.write(f"**_Sector_**: {info['sector'].values[0]}") st.write(f"**_Shares Outstanding_**: {info['sharesOutstanding'].values[0]}") with st.beta_expander("Shares Outstanding"): st.write("""Shares outstanding refer to a company's stock currently held by all its shareholders, including share blocks held by institutional investors and restricted shares owned by the company’s officers and insiders.""") st.write(f"**_Market Capitalization_**: {info['marketCap'].values[0]}") with st.beta_expander("Market Capitalization"): st.write("""Market Capitalization is the total dollar value of all of a company's outstanding shares. It is a measure of corporate size.""") st.text('Market Capital = Current Market Price * Number Of Shares Outstanding') st.write(f"**_Price-to-Earnings (P/E) Ratio_**: {info['forwardPE'].values[0]}") with st.beta_expander("P/E Ratio"): st.write("""The **price-to-earnings (P/E) ratio** is a metric that helps investors determine the market value of a stock compared to the company's earnings. The P/E ratio shows what the market is willing to pay today for a stock based on its past or future earnings. The P/E ratio is important because it provides a measuring stick for comparing whether a stock is overvalued or undervalued.""") st.write("""A **high** P/E ratio could mean that a stock's price is expensive relative to earnings and **possibly overvalued**. Conversely, a **low** P/E ratio might indicate that the **current stock price is cheap relative to earnings**.""") st.text('P/E = Average Common Stock Price / Net Income Per Share') st.write("""The **Forward P/E** uses forecasted earnings to calculate P/E for the next fiscal year. If the earnings are expected to grow in the future, the forward P/E will be lower than the current P/E.""") st.text('Forward P/E = Current Market Price / Forecasted Earnings Per Share') st.write(f"**_Dividend Yield_**: {info['dividendYield'].values[0]}") with st.beta_expander("Dividend Yield"): st.write("""The dividend yield, expressed as a percentage, is a financial ratio (dividend/price) that shows how much a company pays out in dividends each year relative to its stock price.""") st.text('Dividend Yield = Annual Dividend Per Share / Price Per Share') st.write("""New companies that are relatively small, but still growing quickly, may pay a lower average dividend than mature companies in the same sectors. In general, mature companies that aren't growing very quickly pay the highest dividend yields.""") st.write(f"**_Beta_**: {info['beta'].values[0]}") with st.beta_expander("Beta"): st.write("""Beta is a measure of the volatility—or systematic risk—of a security or portfolio compared to the market as a whole. It effectively describes the activity of a security's returns as it responds to swings in the market.""") st.write("If a stock has a beta of **1.0**, it indicates that its price activity is strongly correlated with the market.") st.write("""A beta value that is **less than 1.0** means that the security is theoretically less volatile than the market. Including this stock in a portfolio makes it less risky than the same portfolio without the stock.""") st.write("""A beta that is greater than 1.0 indicates that the security's price is theoretically more volatile than the market. For example, if a stock's beta is 1.2, it is assumed to be 20% more volatile than the market. Technology stocks and small cap stocks tend to have higher betas than the market benchmark.""") st.write("""A negative beta shows that the asset inversely follows the market, meaning it decreases in value if the market goes up and increases if the market goes down.""") st.subheader("Calculate Moving Averages") windowSMA = st.slider("Select Simple Moving Average Period", 5, 200) #st.write(f"{windowSMA} Simple Moving Average selected") try: calculateSMA(df, windowSMA) except Exception as e: st.write(f"Failed to calculate {windowSMA}SMA.") windowEMA = st.slider("Select Exponential Moving Average Period", 5, 200) #st.write(f"{windowEMA} Exponential Moving Average selected") try: calculateEMA(df, windowEMA) except Exception as e: st.write(f"Failed to calculate {windowEMA}EMA.") plot = candlestickChart(df, ticker, sma=windowSMA, ema=windowEMA) st.plotly_chart(plot) if st.checkbox("Get Real-time News Articles"): st.subheader(f'Latest {companyName} News') df = user.get_db_news(ticker, db) df = df[['news_date', 'headline', 'sentiment', 'url']] cm = sns.diverging_palette(20, 145, as_cmap=True) st.dataframe(df.style.background_gradient(cmap=cm)) mean_scores = df.groupby(['news_date']).mean() mean_scores = mean_scores.xs('sentiment', axis="columns").transpose() st.subheader('Sentiment Over Time') st.line_chart(mean_scores) elif (functionality == 'Compare Company Fundamentals'): st.header('Compare Company Fundamentals') lottie_url = load_lottieurl("https://assets10.lottiefiles.com/private_files/lf30_F3v2Nj.json") st_lottie(lottie_url, height=300) choice = st.sidebar.selectbox("Which Companies To Compare?", ('Analyze All Companies','Custom')) if (choice == 'Analyze All Companies'): df = user.get_all_fundamentals(db) df = df.head(100) st.write(df) metric = st.selectbox('Select Metric to Visualize', ('Stock Price', 'Market Cap', 'Beta', 'Forward P/E', 'Dividend Yield', 'Average Volume')) number = st.slider('Number of Companies', 5, 20) order = st.selectbox('Ascending or Descending Order?', ('Ascending', 'Descending')) if metric == 'Stock Price': xaxis = 'previousClose' elif metric == 'Market Cap': xaxis = 'marketCap' elif metric == 'Beta': xaxis = 'beta' elif metric == 'Forward P/E': xaxis = 'forwardPE' elif metric == 'Dividend Yield': xaxis = 'dividendYield' elif metric == 'Average Volume': xaxis = 'averageVolume' plot = fundamentalChart(df, metric, xaxis, number, order) st.plotly_chart(plot) else: number = st.sidebar.slider('Select Number of Companies to Compare', 2, 10) tickers = [] for i in range(1, number+1): ticker = st.sidebar.text_input(f"Enter ticker symbol {i}:") tickers.append(ticker) infos = [] fundamentals = ['sector', 'previousClose', 'beta', 'marketCap', 'averageVolume', 'forwardPE', 'dividendYield', 'sharesOutstanding'] if len(tickers) == number: for ticker in tickers: print(f"Downloading data for {ticker}") infos.append(yf.Ticker(ticker).info) df = pd.DataFrame(infos) df = df.set_index('symbol') df = df[df.columns[df.columns.isin(fundamentals)]] st.write(df) elif (functionality == 'Optimize my Portfolio'): st.header('Optimize my Portfolio') lottie_url = load_lottieurl("https://assets3.lottiefiles.com/packages/lf20_TWo1Pn.json") st_lottie(lottie_url, height=300) index = st.sidebar.selectbox('Select Which Companies to Evaluate', ('Dow Jones Industrial Average (DJIA)', 'S&P500', 'S&P100', 'NASDAQ-100')) portfolio_val = int(st.sidebar.text_input("Enter Amount to Invest", value=10000)) strategy = st.sidebar.selectbox("Select Allocation Strategy", ('Optimize Return & Risk', 'Minimize Risk', 'Custom Risk', 'Custom Return')) if (index == 'S&P500'): st.subheader('S&P 500') st.write('''The S&P 500, or simply the S&P, is a stock market index that measures the stock performance of 500 large companies listed on stock exchanges in the United States. It is one of the most commonly followed equity indices. The S&P 500 index is a capitalization-weighted index and the 10 largest companies in the index account for 27.5% of the market capitalization of the index. The 10 largest companies in the index, in order of weighting, are Apple Inc., Microsoft, Amazon.com, Facebook, Tesla, Inc., Alphabet Inc. (class A & C), Berkshire Hathaway, Johnson & Johnson, and JPMorgan Chase & Co., respectively.''') portfolio = pd.read_csv("S&P500.csv", index_col="Date") elif (index == 'S&P100'): st.subheader('S&P 100') st.write('''The S&P 100 Index is a stock market index of United States stocks maintained by Standard & Poor's. It is a subset of the S&P 500 and includes 101 (because one of its component companies has 2 classes of stock) leading U.S. stocks. Constituents of the S&P 100 are selected for sector balance and represent about 67% of the market capitalization of the S&P 500 and almost 54% of the market capitalization of the U.S. equity markets as of December 2020. The stocks in the S&P 100 tend to be the largest and most established companies in the S&P 500.''') portfolio = pd.read_csv("SP100index.csv", index_col="Date") with st.beta_expander("The S&P 100 consists of:"): tickers = portfolio.columns for ticker in tickers: st.write(f"* {getCompanyName(ticker)}") elif (index == 'NASDAQ-100'): st.subheader('NASDAQ-100') st.write('''The NASDAQ-100 is a stock market index made up of 102 equity securities issued by 100 of the largest non-financial companies listed on the Nasdaq stock market.''') portfolio = pd.read_csv("NASDAQ.csv",
multiple dichotomy sets template = " %%(countedValue)s %%(lblLen)s %s " % tail mrespDef += template % rest mrespDefs.append(mrespDef) mrespDefs = "\n".join(mrespDefs) return mrespDefs def _getMultRespDefsEx(self, mrDef): """Get 'extended' multiple response defintions. This is a helper function for the multRespDefs getter function.""" regex = ("\$(?P<setName>\w+)=(?P<setType>E) (?P<flag1>1)" + "(?P<flag2>1)? (?P<valueLen>[0-9]+) (?P<countedValue>\w+) " + "(?P<lblLen>[0-9]+) (?P<lblVarNames>[\w ]+)") matches = re.findall(regex, mrDef, re.I | re.U) setName, setType, flag1, flag2 = matches[0][:4] valueLen, countedValue, lblLen, lblVarNames = matches[0][4:] length = int(lblLen) label, varNames = lblVarNames[:length], lblVarNames[length:].split() return {setName: {"setType": setType, "firstVarIsLabel": bool(flag2), "label": label, "countedValue": countedValue, "varNames": varNames}} def _setMultRespDefsEx(self, multRespDefs): """Set 'extended' multiple response defintions. This is a helper function for the multRespDefs setter function.""" mrDefs = [] for setName, rest in multRespDefs.iteritems(): if rest["setType"] != "E": continue rest["setName"] = setName v = int(rest["firstVarIsLabel"]) rest["firstVarIsLabel"] = v if v == 1 else "" rest["valueLen"] = len(rest["countedValue"]) rest["lblLen"] = len(rest["label"]) rest["varNames"] = " ".join(rest["varNames"]) mrDef = "$%(setName)s=%(setType)s 1%(firstVarIsLabel)s " mrDef += "%(valueLen)s %(countedValue)s %(lblLen)s %(label)s " mrDef += "%(varNames)s" mrDefs.append((mrDef % rest).replace(" ", " ")) return "\n".join(mrDefs) @property @decode def multRespDefs(self): """Get/Set MRSETS (multiple response) sets. Returns/takes a dictionary of the form: --multiple category sets: {setName: {"setType": "C", "label": lbl, "varNames": [<list_of_varNames>]}} --multiple dichotomy sets: {setName: {"setType": "D", "label": lbl, "varNames": [<list_of_varNames>], "countedValue": countedValue}} --extended multiple dichotomy sets: {setName: {"setType": "E", "label": lbl, "varNames": [<list_of_varNames>], "countedValue": countedValue, 'firstVarIsLabel': <bool>}} For example: categorical = {"setType": "C", "label": "labelC", "varNames": ["salary", "educ"]} dichotomous1 = {"setType": "D", "label": "labelD", "varNames": ["salary", "educ"], "countedValue": "Yes"} dichotomous2 = {"setType": "D", "label": "", "varNames": ["salary", "educ", "jobcat"], "countedValue": "No"} extended1 = {"setType": "E", "label": "", "varNames": ["mevar1", "mevar2", "mevar3"], "countedValue": "1", "firstVarIsLabel": True} extended2 = {"setType": "E", "label": "Enhanced set with user specified label", "varNames": ["mevar4", "mevar5", "mevar6"], "countedValue": "Yes", "firstVarIsLabel": False} multRespDefs = {"testSetC": categorical, "testSetD1": dichotomous1, "testSetD2": dichotomous2, "testSetEx1": extended1, "testSetEx2": extended2} """ ## Normal Multiple response definitions func = self.spssio.spssGetMultRespDefs mrDefs = c_char_p() retcode = func(c_int(self.fh), pointer(mrDefs)) if retcode > 0: msg = "Problem getting multiple response definitions" raise SPSSIOError(msg, retcode) multRespDefs = {} if mrDefs.value: for mrDef in mrDefs.value.split("\n"): for setName, rest in self._getMultRespDef(mrDef).iteritems(): multRespDefs[setName] = rest self.freeMemory("spssFreeMultRespDefs", mrDefs) ## Extended Multiple response definitions mrDefsEx = c_char_p() func = self.spssio.spssGetMultRespDefsEx retcode = func(c_int(self.fh), pointer(mrDefsEx)) if retcode > 0: msg = "Problem getting extended multiple response definitions" raise SPSSIOError(msg, retcode) multRespDefsEx = {} if mrDefsEx.value: for mrDefEx in mrDefsEx.value.split("\n"): for setName, rest in self._getMultRespDef(mrDefEx).iteritems(): multRespDefsEx[setName] = rest self.freeMemory("spssFreeMultRespDefs", mrDefsEx) multRespDefs.update(multRespDefsEx) return multRespDefs @multRespDefs.setter def multRespDefs(self, multRespDefs): if not multRespDefs: return normal = self._setMultRespDefs(multRespDefs) extended = self._setMultRespDefsEx(multRespDefs) if normal and extended: combinedDefs = normal + " \n" + extended elif normal and not extended: combinedDefs = normal elif extended and not normal: combinedDefs = extended func = self.spssio.spssSetMultRespDefs retcode = func(c_int(self.fh), c_char_p(combinedDefs)) if retcode > 0: msg = "Problem setting multiple response definitions" raise SPSSIOError(msg, retcode) @property @decode def caseWeightVar(self): """Get/Set WEIGHT variable. Takes a valid varName, and returns weight variable, if any, as a string.""" varNameBuff = create_string_buffer(65) func = self.spssio.spssGetCaseWeightVar retcode = func(c_int(self.fh), byref(varNameBuff)) if retcode > 0: msg = "Problem getting case weight variable name" raise SPSSIOError(msg, retcode) return varNameBuff.value @caseWeightVar.setter def caseWeightVar(self, varName): if not varName: return func = self.spssio.spssSetCaseWeightVar retcode = func(c_int(self.fh), c_char_p(varName)) if retcode > 0: msg = "Problem setting case weight variable name %r" % varName raise SPSSIOError(msg, retcode) @property @decode def dateVariables(self): # seems to be okay """Get/Set DATE information. This function reports the Forecasting (Trends) date variable information, if any, in IBM SPSS Statistics data files. Entirely untested and not implemented in reader/writer""" # step 1: get array size nElements = c_int() func = self.spssio.spssGetDateVariables MAX_ARRAY_SIZE = 100 dateInfoArr = (POINTER(c_long * MAX_ARRAY_SIZE))() retcode = func(c_int(self.fh), byref(nElements), byref(dateInfoArr)) # step 2: get date info with array of proper size dateInfoArr = (POINTER(c_long * nElements.value))() retcode = func(c_int(self.fh), byref(nElements), byref(dateInfoArr)) if retcode > 0: raise SPSSIOError("Error getting TRENDS information", retcode) # get array contents nElem = nElements.value if not nElem: return {} dateInfo = [dateInfoArr[0][i] for i in xrange(nElem)] fixedDateInfo = dateInfo[:6] otherDateInfo = [dateInfo[i: i + 3] for i in xrange(6, nElem, 3)] dateInfo = {"fixedDateInfo": fixedDateInfo, "otherDateInfo": otherDateInfo} # clean up self.freeMemory("spssFreeDateVariables", dateInfoArr) return dateInfo @dateVariables.setter def dateVariables(self, dateInfo): # entirely untested! dateInfo = dateInfo["fixedDateInfo"] + dateInfo["otherDateInfo"] dateInfo = reduce(list.__add__, dateInfo) # flatten list isAllFloats = all([isinstance(d, float) for d in dateInfo]) isSixPlusTriplets = (len(dateInfo) - 6) % 3 == 0 if not isAllFloats and isSixPlusTriplets: msg = ("TRENDS date info must consist of 6 fixed elements" + "+ <nCases> three-element groups of other date info " + "(all floats)") raise TypeError(msg) func = self.spssio.spssSetDateVariables dateInfoArr = (nElements * c_long)(*dateInfo) retcode = func(c_int(self.fh), c_int(nElements), dateInfoArr) if retcode > 0: raise SPSSIOError("Error setting TRENDS information", retcode) @property @decode def textInfo(self): """Get/Set text information. Takes a savFileName and returns a string of the form: "File %r built using SavReaderWriter.py version %s (%s)". This is akin to, but *not* equivalent to the SPSS syntax command DISPLAY DOCUMENTS""" textInfo = create_string_buffer(256) retcode = self.spssio.spssGetTextInfo(c_int(self.fh), byref(textInfo)) if retcode > 0: raise SPSSIOError("Error getting textInfo", retcode) return textInfo.value @textInfo.setter def textInfo(self, savFileName): info = (os.path.basename(savFileName), __version__, time.asctime()) textInfo = "File '%s' built using SavReaderWriter.py version %s (%s)" textInfo = textInfo % info if self.ioUtf8 and isinstance(savFileName, unicode): textInfo = textInfo.encode("utf-8") func = self.spssio.spssSetTextInfo retcode = func(c_int(self.fh), c_char_p(textInfo[:256])) if retcode > 0: raise SPSSIOError("Error setting textInfo", retcode) @property @decode def fileLabel(self): """Get/Set FILE LABEL (id string) Takes a file label (basestring), and returns file label, if any, as a string.""" idStr = create_string_buffer(65) retcode = self.spssio.spssGetIdString(c_int(self.fh), byref(idStr)) if retcode > 0: raise SPSSIOError("Error getting file label (id string)", retcode) return idStr.value @fileLabel.setter def fileLabel(self, idStr): if idStr is None: idStr = "File created by user %r at %s"[:64] % \ (getpass.getuser(), time.asctime()) if self.ioUtf8 and isinstance(idStr, unicode): idStr = idStr.encode("utf-8") retcode = self.spssio.spssSetIdString(c_int(self.fh), c_char_p(idStr)) if retcode > 0: raise SPSSIOError("Error setting file label (id string)", retcode) class SavHeaderReader(Header): """ This class contains methods that read the data dictionary of an SPSS data file. This yields the same information as the Spss command 'DISPLAY DICTIONARY' NB: do not confuse an Spss dictionary with a Python dictionary! Typical use: with SavHeaderReader(savFileName) as spssDict: wholeDict = spssDict.dataDictionary() print unicode(spssDict) """ def __init__(self, savFileName, ioUtf8=False, ioLocale=None): """ Constructor. Initializes all vars that can be recycled """ super(SavHeaderReader, self).__init__(savFileName, "rb", None, ioUtf8, ioLocale) self.fh = self.openSavFile() self.varNames, self.varTypes = self.varNamesTypes self.numVars = self.numberofVariables self.nCases = self.numberofCases def __str__(self): """ This function returns a report of the SPSS data dictionary (i.e., the header), in the encoding of the spss file""" return unicode(self).encode(self.fileEncoding) def __unicode__(self): """ This function returns a report of the SPSS data dictionary (i.e., the header).""" report = "" if self.textInfo: report += self.textInfo + os.linesep report += self.reportSpssDataDictionary(self.dataDictionary()) return report def __enter__(self): """ This function returns the DictionaryReader object itself so its methods become available for use with context managers ('with' statements).""" return self def __exit__(self, type, value, tb): """ This function closes the spss data file and does some cleaning.""" if type is not None: pass # Exception occurred self.close() def close(self): """This function closes the spss data file and does some cleaning.""" if not segfaults: self.closeSavFile(self.fh, mode="rb") def dataDictionary(self): """ This function returns all the dictionary items. It returns a Python dictionary based on the Spss dictionary of the given Spss file. This is equivalent to the Spss command 'DISPLAY DICTIONARY'.""" items = ["varNames", "varTypes", "valueLabels", "varLabels", "formats", "missingValues", "measureLevels", "columnWidths", "alignments", "varSets", "varRoles", "varAttributes", "fileAttributes", "fileLabel", "multRespDefs", "caseWeightVar", "multRespDefs"] if self.ioUtf8: items = map(unicode, items) dataDictionary = dict([(item, getattr(self, item)) for item in items]) return dataDictionary def reportSpssDataDictionary(self, dataDict): """ This function reports information from the Spss dictionary of the active Spss dataset. The parameter 'dataDict' is the return value of dataDictionary()""" report = [] #import pprint
<gh_stars>1000+ # Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base definition for for a batch simulation client. A batch simulation is a MapReduce-style computation consisting of the following steps: 1. A local client process creates configuration dictionaries. 2. The client brings up worker instances on Compute Engine (GCE). 3. Worker instances map each configuration dictionary to an output. 4. Once all workers are complete, the client reduces all worker outputs to a final, user-determined form. In order to use the batch simulation infrastructure, a user must provide: - A subclass of BatchSimClient with a main() script. - A subclass of BatchSimWorker with a main() script. See the documentation for BatchSimClient (below) and BatchSimWorker (in worker.py) for details. """ # TODO: Support running individual parts of the client. import functools import io import json import logging import os import re import shutil import subprocess import sys import tarfile import time import gflags from googleapiclient import errors as gapi_errors import makani from makani.lib.python import os_util from makani.lib.python import shell_interfaces from makani.lib.python import wing_flag from makani.lib.python.batch_sim import batch_sim_util from makani.lib.python.batch_sim import gcloud_constants from makani.lib.python.batch_sim import gcloud_util wing_flag.AppeaseLintWhenImportingFlagOnly() gflags.DEFINE_boolean('upload_worker_package', True, 'Upload worker package.') gflags.DEFINE_integer('num_workers', None, 'Number of worker instances.') gflags.DEFINE_integer('max_jobs_per_worker', None, 'Set number of workers to assign at most this many ' 'configs per worker.') gflags.RegisterValidator('max_jobs_per_worker', lambda m: m is None or m > 0) gflags.DEFINE_bool('delete_old_workers', False, 'Delete workers left from a previous run. Do not run ' 'a new batch sim.') gflags.DEFINE_string('sim_name', None, 'Name of the batch simulation. This must not match the ' 'name of any currently-running batch simulation, as it ' 'identifies file paths on Cloud Storage and worker ' 'instances on Compute Engine.') gflags.DEFINE_string('local_output_dir', None, 'Directory in which to store local output. If ' 'unspecified, a temp directory will be used.') gflags.DEFINE_string('local_h5_logs_dir', None, 'Directory in which to locally archive h5 logs when ' 'running with a local worker and the keep_h5_logs option.') gflags.DEFINE_enum('worker_machine_type', 'n1-standard-2', gcloud_constants.MACHINE_TYPES, 'Machine type for the Compute Engine workers.') gflags.DEFINE_enum('worker_zone', 'us-central1-f', gcloud_constants.ZONES, 'Zone in which to run Compute Engine workers.') gflags.DEFINE_string('worker_image', '', 'Name of disk image for Compute Engine workers.') gflags.DEFINE_string('worker_image_project', 'google.com:makani', 'Project that owns the image specified by --worker_image.') gflags.DEFINE_string('max_sim_time', 24 * 60 * 60, # 1 day 'Maximum sim time [seconds] that may be specified in a ' 'config. This is primarily a guard against using the ' 'default value of sys.float_info.max by forgetting to ' 'override it.') gflags.DEFINE_boolean('delete_workers_on_error', True, 'Indicates whether workers should be deleted in the ' 'event of an error. If set to False, workers will still ' 'auto-delete themselves as configured in ' 'worker_startup.sh.') gflags.DEFINE_boolean('use_local_worker', False, 'Run locally with a single worker. (--num_workers will ' 'be ignored.)') gflags.DEFINE_boolean('keep_h5_logs', False, 'Indicates whether the raw h5 log files should be ' 'archived.') gflags.DEFINE_boolean('keep_sparse_h5_logs', False, 'Indicates whether the sparse h5 log files should be ' 'archived.') gflags.DEFINE_boolean('reduce_only', False, 'Skip the worker creation and computation step and only ' 'reduce the data that has already been calculated. A ' 'local output directory must be specified.') gflags.DEFINE_boolean('quiet', False, 'Limit console output to warnings and errors.') gflags.DEFINE_boolean('show_events', True, 'Show scoring function events in the result page of ' 'each run.') FLAGS = gflags.FLAGS DEFAULT_BUCKET = 'makani' class BatchSimClientError(Exception): pass # TODO: Make a more generic "batch job client" class. This class has a # few elements specific to batch sims -- the check on sim_time, and logic for # HDF5 logs. class BatchSimClient(object): """Client for a batch simulation. A proper subclass must provide the following: - Define the _GenerateConfigs() method to generate configuration dictionaries. - Define the _ReduceWorkerOutput() method, which reduces workers' outputs and reports them as desired. - Use the constructor arg `worker_main_script` to specify the filepath (relative to makani.HOME) for the worker's main script. - Use the constructor arg `extra_apt_dependencies` to specify any prerequisite packages that are not accounted for in _BASE_APT_DEPENDENCIES below. There are two additional constructor args: - `sim_name` is used to identify the simulation in both Cloud Storage and Compute Engine. This must not match the name of any other currently running batch simulation. - `num_workers` indicates the number of instances that should be used. By default, these will be set using --sim_name and --num_workers, respectively. The main function need only initialize flags (a helper InitMain() is provided below), instantiate the client, and call its Run() method. E.g. def main(argv): client_module.InitMain(argv) # From this module. client = MyClientSubclass() client.Run() """ # The base path for this batch sim. # # The names of the worker script and worker package are inferred from this. _BASE_PATH = None # Path to the bash script that workers will run on startup. _WORKER_STARTUP_SCRIPT = os.path.join( makani.HOME, 'lib/python/batch_sim/worker_startup.sh') # Baseline dependencies. # Before adding dependencies here, consider if building a new worker image is # more appropriate. These packages will be installed on each worker startup. _BASE_APT_DEPENDENCIES = [] # Names of any flags that should be forwarded from the client. _FORWARDED_FLAGS = [] def __init__(self, extra_apt_dependencies=None, num_workers=None, sim_name=None): """Initialize the client. Args: extra_apt_dependencies: List of any packages that should be installied via apt-get, in addition to those specified by _BASE_APT_DEPENDENCIES. num_workers: Number of worker instances to run. Defaults to --num_workers. sim_name: Name of the batch sim. This is used to identify packages on GCS and workers on GCE. Defaults to --sim_name. Raises: BatchSimClientError: Running with keep_h5_logs and use_local_worker but local_h5_logs_dir is unset. """ if FLAGS.use_local_worker: try: subprocess.check_call([ 'bash', '-c', 'source lib/scripts/mbash.sh && mbash::check_multicast_route', 'lo']) except subprocess.CalledProcessError: print ('Multicast has not been configured for interface "lo".\n' 'If trying to run a desktop simulation, please read ' 'lib/scripts/install/README_local_multicast.') sys.exit(1) else: num_workers = 1 self._num_workers = (num_workers if num_workers is not None else FLAGS.num_workers) if not FLAGS.delete_old_workers and FLAGS.max_jobs_per_worker is None: assert self._num_workers > 0, 'Number of workers must be positive.' self._zone = FLAGS.worker_zone self._sim_name = (sim_name if sim_name is not None else FLAGS.sim_name) assert self._sim_name, 'Sim name must be nonempty.' self._worker_name_prefix = ('batch-sim-%s-worker-' % self._sim_name.replace('_', '-')) self._apt_dependencies = self._BASE_APT_DEPENDENCIES[:] if extra_apt_dependencies is not None: self._apt_dependencies += extra_apt_dependencies # GCS paths are relative to gs://makani/. self._gcs_base_dir = 'batch_sim/%s' % self._sim_name self._gcs_error_dir = self._gcs_base_dir + '/error' self._gcs_output_dir = self._gcs_base_dir + '/output' self._gcs_package_dir = self._gcs_base_dir + '/packages' self._gcs_h5_log_dir = self._gcs_base_dir + '/h5_logs' self._packager = shell_interfaces.Packager() self._gstorage = gcloud_util.CloudStorageApi(DEFAULT_BUCKET) self._gcompute = gcloud_util.ComputeEngineApi() self._local_output_dir = FLAGS.local_output_dir # Records whether an error occurred in the remote workers, so # cleanup can be controlled accordingly. self._worker_error = False if (FLAGS.keep_h5_logs or FLAGS.keep_sparse_h5_logs) and FLAGS.use_local_worker: if not FLAGS.local_h5_logs_dir: raise BatchSimClientError('local_h5_logs_dir is unset.') if not os.path.exists(FLAGS.local_h5_logs_dir): os.makedirs(FLAGS.local_h5_logs_dir) def _GetWorkerArgs(self, config_range): """Returns arguments for a worker's shell command. Args: config_range: Pair of integers indicating the inclusive range of configs to be processed. Returns: List of arguments for worker command. """ range_string = '%d,%d' % config_range forwarded_flags = [FLAGS.FlagDict()[f].Serialize() for f in self._FORWARDED_FLAGS] args = ['./worker.par', '--config_dir=gce_config', '--config_range=' + range_string, '--%sscoring_events' % ('' if FLAGS.show_events else 'no') ] + forwarded_flags if not FLAGS.use_local_worker: args.append('--gcloud_authentication_method=service_account') return args def _GenerateConfigs(self): """Generator that yields config dictionaries. Config dictionaries get fairly large, so if this method keeps them all in memory (e.g. by returning a list rather than yielding them as they are created), the client may OOM if producing thousands fo them. """ raise NotImplementedError def _ReduceWorkerOutput(self, output_paths): """Reduces output from workers. This method produces the final output of the batch simulation. Args: output_paths: Iterable of output file paths, one for each simulation completed by a worker. These are in the same order as their corresponding configuration files. Their contents are determined by the workers' _ProcessSimOutput() method. """ raise NotImplementedError def _MakeConfigPackage(self, package_path): """Produces the config package. Args: package_path: Path for the output file. Returns: Number of configs generated. Raises: BatchSimClientError: The final simulation time is unset or is too large, or running
import mybayes as bayes import numpy as np from mybayes.influence import ProbTable, Normal from mybayes.settings import NumberOfSample from copy import deepcopy class TempCache(object): data = {} id_top = 0 def add(self, item): self.id_top+=1 self.data[self.id_top] = item return self.id_top def get(self, id): return self.data[id] def remove(self, id): if id and id in self.data: del self.data[id] export_plot_node = None class Model(object): nodes = [] arcs = [] def new_model(self): bayes.remove_all_network() self.nodes = [] self.arcs = [] global export_plot_node export_plot_node = TempCache() def replace_node(self, node, new_node): self.remove_node(node) self.add_node(new_node) def add_node(self, node): if node not in self.nodes: self.nodes.append(node) def add_arc(self, arc): if arc not in self.arcs: self.arcs.append(arc) def remove_node(self, node): if isinstance(node, ActivityNodeModel): self.nodes.remove(node) else: id = node node = self.get_node(id) if node: self.nodes.remove(node) def remove_arc(self, arc): if isinstance(arc, ArcModel): self.arcs.remove(arc) else: id = arc arc = self.get_arc(id) if arc: self.arcs.remove(arc) def get_arc(self, id): return next((a for a in self.arcs if a.arc_id == id), None) def get_node(self, id): return next((n for n in self.nodes if n.node_id == id), None) def is_node(self, id): nd = self.get_node(id) return True if nd else False def get_arcs_attach_node(self, node_id): arcs = [a for a in self.arcs if ( a.start_id == node_id or a.end_id == node_id)] if arcs and len(arcs): return arcs def build_network(self): # print('Build network') # bayes.remove_network('test') # bayes.new_network('test') # create nodes for node in self.nodes: self.create_action(node) # populate link for arc in self.arcs: self.populate_arc(arc) # start and end succes_set = set([arc.end_id for arc in self.arcs]) pre_set = set([arc.start_id for arc in self.arcs]) full_set = set([node.node_id for node in self.nodes]) end_set = full_set - pre_set start_set = full_set - succes_set if end_set and start_set: for end_node_id in end_set: node = self.get_node(end_node_id) ef = node.get_bayes_node('ef') lf = node.get_bayes_node('lf') lf.add_successors(ef) lf.set_add_value(0) # lf.set_weight([1, ]) for start_node_id in start_set: node = self.get_node(start_node_id) es = node.get_bayes_node('es') es.add_successors(bayes.nfact.Constant(value=0)) es.set_add_value(0) # es.set_weight([1, ]) return True else: return False def run(self): bayes.update() def build_and_run(self): print('Build and run') bayes.remove_network('test') bayes.new_network('test') self.reset() success = self.build_network() if success: # populate duration first for act in self.nodes: self.build_duration(act) print('update') self.run() else: print('graph khong hop le') def populate_arc(self, arc): start = self.get_node(arc.start_id) end = self.get_node(arc.end_id) if start and end: end_es = end.get_bayes_node('es') start_ef = start.get_bayes_node('ef') end_es.add_successors(start_ef) # , bayes.nfact.Constant(value=1)) # end_es.set_weight([1, 1]) start_lf = start.get_bayes_node('lf') end_ls = end.get_bayes_node('ls') start_lf.add_successors(end_ls) # , bayes.nfact.Constant(value=1)) # start_lf.set_weight([1, -1]) def create_action(self, node): es = bayes.nfact.MaxAddValue(add_value=1) # 5 # duration = bayes.nfact.Gaussian(loc=loc, scale=scale) # 7 duration = bayes.nfact.TempNode() ef = bayes.nfact.Equation(es, duration) # 8 lf = bayes.nfact.MaxAddValue(add_value=-1) # 9 ls = bayes.nfact.Equation(lf, duration) # 10 ls.set_weight([1, -1]) node.bayes_nodes = (es, ef, ls, lf, duration) def build_duration(self, activity): duration = activity.duration_model delay_node = self.build_knowned_risk(duration, duration.get_element_by_name('knowned_risk')) duration_node = self.build_trade_off(duration, duration.get_element_by_name('trade_off')) adjust_node = self.build_unknown_factor(duration, duration.get_element_by_name('unknown_factor')) duration_bayes = activity.get_bayes_node('duration') n = NumberOfSample delays = delay_node.get_samples() durations = duration_node.get_samples() adjusts = adjust_node.get_samples() samples = [(1+delays[i])*durations[i]*adjusts[i] for i in range(n)] duration_bayes.set_samples(samples) def build_knowned_risk(self, duration, known_risk): control = known_risk.get_node('control') risk_event = known_risk.get_node('risk_event') impact = known_risk.get_node('impact') response = known_risk.get_node('response') # normalize table control_data = control.get_pre_calc_data() risk_event_data = risk_event.get_pre_calc_data() impact_data = impact.get_pre_calc_data() response_data = response.get_pre_calc_data() # tinh risk_event risk_event_values = bayes.influence.calc_two_cpd_network(control_data, risk_event_data, control.choice_index if control.choice_index!=control.MANUAL else -1) # build model to run # risk_event_node = bayes.nfact.TableNode(values=risk_event_values) # risk_event_samples = risk_event_node.get_samples() # # impact_node = bayes.nfact.TableNode(values=impact.data) # impact_samples = impact_node.get_samples() # # response_node = bayes.nfact.TableNode(values=response.data) # response_samples = response_node.get_samples() # TODO doi cho nay thanh input # calc delay from samples step = 1.0/(len(impact_data)+1) impact_real_values = [step*(i+1) for i in range(len(impact_data))] # gia tri cua impact tuong ung voi cac rank step = 1.0/(len(risk_event_values)-1) risk_event_real_values = [step*i for i in range(len(risk_event_values))] # tu 0...1 step = 1.0 / (len(response_data)) response_real_values = [step * (i+1) for i in range(len(response_data))[::-1]] # tu 1..>0 n = NumberOfSample response_samples = ProbTable(response_data, range(len(response_data))).generate(n) if impact.choice_index < 0: impact_risk_values=[] impact_risk_prob =[] impact_prob = impact_data risk_prob=risk_event_values for i in range(len(impact_prob)): for j in range(len(risk_prob)): impact_risk_prob.append(impact_prob[i]*risk_prob[j]) impact_risk_values.append(impact_real_values[i]*risk_event_real_values[j]) impact_risk_samples = ProbTable(impact_risk_prob, impact_risk_values).generate(n) else: impact_real = impact_real_values[impact.choice_index] values = [impact_real*risk_event_real_values[i] for i in range(len(risk_event_values))] impact_risk_samples = ProbTable(risk_event_values, values).generate(n) delay = [None]*n for i in range(n): pre_delay = bayes.influence.generate_tnormal(impact_risk_samples[i],0.1,0,1) delay[i]= pre_delay*response_real_values[response_samples[i]] # tao node de ve histogram delay_node = bayes.nfact.TempNode(samples=delay) id = export_plot_node.add(('Delay', delay_node)) known_risk.export_plot.append(id) known_risk.output_node = id return delay_node def build_trade_off(self, duration, trade_off): n = NumberOfSample resources = trade_off.get_node('resources') initial_estimate = trade_off.get_node('initial_estimate') if resources.choice_index is not None: resources_samples = [resources.choice_index]*n else: resources_probs= resources.get_pre_calc_data() resources_samples = ProbTable(resources_probs, range(len(resources_probs))).generate(n) if initial_estimate.choice_value is not None: ie_samples = [initial_estimate.choice_value] * n else: ie_samples = Normal(initial_estimate.get_param('loc'), initial_estimate.get_param('scale')).generate(n) samples =[0] * n for i in range(n): index = int(resources_samples[i]) triangle = trade_off.triangle_param_rank[index] ie = ie_samples[i] samples[i] = np.random.triangular(triangle[0]*ie, triangle[1]*ie, triangle[2]*ie,1)[0] # tao node de ve histogram duration_node = bayes.nfact.TempNode(samples=samples) id = export_plot_node.add(('Duration', duration_node)) trade_off.export_plot.append(id) trade_off.output_node = id return duration_node def build_unknown_factor(self, duration, unknown_factor): from scipy.stats import truncnorm adjust = unknown_factor.get_node('adjustment_factor') if not adjust.choice_value is None: samples = [adjust.choice_value] * NumberOfSample else: samples = truncnorm.rvs(0,1, loc=adjust.get_param('loc'), scale=adjust.get_param('scale'), size = NumberOfSample) # tao node de ve histogram adjust_node = bayes.nfact.TempNode(samples=samples) id = export_plot_node.add(('AdjustFactor', adjust_node)) unknown_factor.export_plot.append(id) unknown_factor.output_node = id return adjust_node def dump_data(self): return { 'Model':{ 'Activities':[a.dump_data() for a in self.nodes], 'Arcs':[arc.dump_data() for arc in self.arcs] } } def read_data(self, json_data): activities = json_data['Model']['Activities'] arcs = json_data['Model']['Arcs'] self.nodes = [] self.arcs = [] for a in activities: self.nodes.append(ActivityNodeModel('').read_data(a)) for arc in arcs: self.arcs.append(ArcModel().read_data(arc)) def reset(self): for node in self.nodes: node.reset() class ActivityNodeModel(object): name = '' node_id = None text_id = None ui_position = () # (es, ef, ls, lf, duration) bayes_nodes = () duration_model = None # type: DurationNodeModel def set_name(self, name): self.name = name self.duration_model.activity_rename(name) def copy(self): a = ActivityNodeModel(self.name) a.node_id = self.node_id a.text_id = self.text_id a.ui_position = self.ui_position a.duration_model = deepcopy(self.duration_model) a.bayes_nodes = self.bayes_nodes return a def __init__(self, name): self.name = name self.duration_model = DurationNodeModel(name) def get_bayes_node(self, name): m = ('es', 'ef', 'ls', 'lf', 'duration') for i, v in enumerate(m): if v == name: break return self.bayes_nodes[i] def replace_duration(self, new_duration): self.duration_model = new_duration def get_export_nodes(self): export = [] for i,k in enumerate(self.duration_model.element_names_label): ids = self.duration_model.get_element(i).export_plot for id in ids: tnode = export_plot_node.get(id) name = '%s-%s' %(k,tnode[0]) export.append((name, tnode[1])) ms = ('es', 'ef', 'ls', 'lf', 'duration') if self.bayes_nodes: for m in ms: node = self.get_bayes_node(m) export.append((m,node)) return export def dump_data(self): return { 'name':self.name, 'id':self.node_id, 'ui_pos':self.ui_position, 'duration':self.duration_model.dump_data(), } def read_data(self, json_dict): self.name = json_dict['name'] self.node_id = int(json_dict['id']) self.ui_position = json_dict['ui_pos'] self.duration_model.read_data(json_dict['duration']) return self def reset(self): self.duration_model.reset() self.bayes_nodes = () class ArcModel(object): start_id = None end_id = None arc_id = None start_pos = None end_pos = None def dump_data(self): return [self.start_id, self.end_id] #, self.start_pos, self.end_pos] def read_data(self, ls): self.start_id = ls[0] self.end_id = ls[1] # self.start_pos = ls[2] # self.end_pos = ls[3] return self class DurationNodeModel(object): element_names_label=('Knowned Risks', 'Trade Off', 'Unknown Factor') element_names = ('knowned_risk', 'trade_off', 'unknown_factor') def __init__(self, activity_name): self.activity_name = activity_name self.elements = [None]*len(self.element_names) # create knowned risk knowned_risk = KnownedRiskModel(activity_name) self.elements[0] = knowned_risk # create trade off trade_off = TradeOffModel(activity_name) self.elements[1] = trade_off # create unknown factor unknown_factor = UnknownFactorModel(activity_name) self.elements[2] = unknown_factor def activity_rename(self, name): if self.elements: for e in self.elements: e.activity_rename(name) def get_element_label_index(self, name): return next(i for i in range(len(self.element_names_label)) if name == self.element_names_label[i]) def get_element(self, index): return self.elements[index] def get_element_by_name(self, name): id = next(i for i in range(len(self.element_names)) if name == self.element_names[i]) return self.get_element(id) def dump_data(self): return [e.dump_data() for e in self.elements] def read_data(self, ls): for i in range(len(ls)): self.elements[i].read_data(ls[i]) def reset(self): for e in self.elements: e.reset() class DurationElement(object): def __init__(self, activity_name): self.nodes_name_label = [] self.nodes_name=[] self.nodes = [] self.activity_name = activity_name self.export_plot = [] self.output_node = None # node dau ra cua element def get_node_index_by_name(self, name): return next((i for i in range(len(self.nodes_name)) if self.nodes_name[i] == name)) def set_node(self, name, node): index = self.get_node_index_by_name(name) self.nodes[index] = node def get_node(self, name): index = self.get_node_index_by_name(name) return self.nodes[index] def get_node_by_id(self, id): return self.nodes[id] def dump_data(self): return [node.dump_data() for node in self.nodes] def read_data(self, ls): for i in range(len(ls)): self.nodes[i].read_data(ls[i]) def reset(self): for e in self.export_plot: export_plot_node.remove(e) export_plot_node.remove(self.output_node) self.export_plot = [] self.output_node = None def activity_rename(self, name): if self.nodes: for node in self.nodes: s
<gh_stars>0 import dask.dataframe as dd import pandas as pd """ All queries take in Dask Dataframes, and return results as an uncomputed dask dataframe. """ def query1(tables): pd.set_option('float_format', '{:.2f}'.format) ####################################################### # FROM lineitem ####################################################### lineitem = tables['LINEITEM'] ####################################################### # WHERE l_shipdate <= date '1998-12-01' - interval '[DELTA]' day (3) ####################################################### # Note that the official TPC-H spec requires DELTA to be randomized # between 60-120. # Snowflake just sets it to 90: # https://docs.snowflake.com/en/user-guide/sample-data-tpch.html lineitem = lineitem[(lineitem['l_shipdate'] < '1998-10-01')] ####################################################### # GROUP BY # l_returnflag, # l_linestatus ####################################################### # SELECT # l_returnflag, # l_linestatus, # sum(l_quantity) as sum_qty, # sum(l_extendedprice) as sum_base_price, # sum(l_extendedprice * (1-l_discount)) as sum_disc_price, # sum(l_extendedprice * (1-l_discount) * (1+l_tax)) as sum_charge, # avg(l_quantity) as avg_qty, # avg(l_extendedprice) as avg_price, # avg(l_discount) as avg_disc, # count(*) as count_order ####################################################### # Pre-computing columns for multi-column aggregation. # (I could not find a pandas API for multi-column aggregation, # so this is the cleanest alternative I could think of.) lineitem['disc_price'] = ( lineitem['l_extendedprice'] * (1 - lineitem['l_discount'])) lineitem['charge'] = ( lineitem['disc_price'] * (1 + lineitem['l_tax'])) # Do the groupby (this also sorts on the groups) lineitem = lineitem.groupby(['l_returnflag', 'l_linestatus']) # NamedAgg does not seem to be supported in uncomputed dask dataframes, # so we will do aggregates and then rename the columns. lineitem = lineitem.agg({ 'l_quantity': ['sum', 'mean'], 'l_extendedprice': ['sum', 'mean'], 'disc_price': 'sum', 'charge': 'sum', 'l_discount': 'mean', 'l_orderkey': 'count', }) # Renaming columns. lineitem.columns = [ 'sum_qty', 'avg_qty', 'sum_base_price', 'avg_price', 'sum_disc_price', 'sum_charge', 'avg_disc', 'count_order', ] # Reordering columns. lineitem = lineitem[[ 'sum_qty', 'sum_base_price', 'sum_disc_price', 'sum_charge', 'avg_qty', 'avg_price', 'avg_disc', 'count_order', ]] ##################### # ORDER BY # l_returnflag, # l_linestatus; ##################### # This is already done during the aggregation. return lineitem def query2(tables): pd.set_option('float_format', '{:.2f}'.format) region = tables['REGION'] nation = tables['NATION'] supplier = tables['SUPPLIER'] partsupp = tables['PARTSUPP'] part = tables['PART'] europe = region[region['r_name'] == 'EUROPE'].\ merge(nation, how='inner', on = None, left_on = 'r_regionkey', right_on = 'n_regionkey', left_index=False, right_index=False).\ merge(supplier, how='inner', on = None, left_on = 'n_nationkey', right_on = 's_nationkey', left_index=False, right_index=False).\ merge(partsupp, how='inner', on = None, left_on = 's_suppkey', right_on = 'ps_suppkey', left_index=False, right_index=False) brass = part[(part['p_size'] == 15) & (part['p_type'].str.endswith('BRASS'))].\ merge(europe, how='inner', on=None, left_on = 'p_partkey', right_on = 'ps_partkey', left_index=False, right_index=False) minCost = brass.groupby('ps_partkey').\ agg({'ps_supplycost': 'min'}).\ reset_index(drop=False) minCost['min'] = minCost['ps_supplycost'] minBrass = brass.merge(minCost, how='inner', on=None, left_on = 'ps_partkey', right_on = 'ps_partkey', left_index=False, right_index=False)#\ # [["s_acctbal", "s_name", "n_name", "p_partkey", "p_mfgr", "s_address", "s_phone", "s_comment"]] result = minBrass[minBrass['ps_supplycost_y'] == minBrass['min']] return result def query3(tables): pd.set_option('float_format', '{:.2f}'.format) ############################## # FROM # customer, # orders, # lineitem ############################## customer = tables['CUSTOMER'] orders = tables['ORDERS'] lineitem = tables['LINEITEM'] ####################################### # WHERE c_mktsegment = 'BUILDING' # AND o_orderdate < '1995-03-15' # AND l_shipdate > '1995-03-15' # ... ###################################### # Do the selections. customer = customer[(customer['c_mktsegment'] == 'BUILDING')] orders = orders[(orders['o_orderdate'] < '1995-03-15')] lineitem = lineitem[(lineitem['l_shipdate'] > '1995-03-15')] ###################################### # ... # AND c_custkey = o_custkey # AND l_orderkey = o_orderkey ###################################### # Do the join. o_c = orders.join( customer.set_index('c_custkey'), on='o_custkey', lsuffix='_o', rsuffix='_c') l_o_c = lineitem.join( o_c.set_index('o_orderkey'), on='l_orderkey') ############################# # GROUP BY # l_orderkey, # o_orderdate, # o_shippriority ############################# # SELECT # l_orderkey, # sum(l_extendedprice * (1 - l_discount)) as revenue, # o_orderdate, # o_shippriority ############################# # Precomputing columns for aggregation. l_o_c['revenue_summands'] = ( l_o_c['l_extendedprice'] * (1 - l_o_c['l_discount'])) # GROUP BY l_o_c = l_o_c.groupby([ 'l_orderkey', 'o_orderdate', 'o_shippriority', ]) # Aggregate l_o_c = l_o_c.agg({ 'revenue_summands': 'sum', }) # Rename output column l_o_c.columns = [ 'revenue', ] ###################### # ORDER BY # revenue desc, # o_orderdate; # <-- NOT IMPLEMENTED # LIMIT 10 ###################### # no sort_values in dask dataframes, so we do this: # l_o_c = l_o_c.reset_index() l_o_c = l_o_c.nlargest(n=10, columns=['revenue']) # Unfortunately dask dataframes cannot sort by string (!) # so we will not be sorting by the order date. return l_o_c def query4(tables): orders = tables['ORDERS'] lineitem = tables['LINEITEM'] forders = orders[(orders['o_orderdate'] < "1993-10-01") & (orders['o_orderdate'] >= "1993-07-01")] flineitems = lineitem[lineitem['l_commitdate'] < 'l_receiptdate']#['l_orderkey']#.unique() result = forders.merge(flineitems, how='inner', on=None, left_on='o_orderkey', right_on='l_orderkey', left_index=False, right_index=False, suffixes=("_left", None)).\ groupby('o_orderpriority').agg({'o_orderpriority': 'count'}) return result def query5(tables): orders = tables['ORDERS'] region = tables['REGION'] nation = tables['NATION'] supplier = tables['SUPPLIER'] lineitem = tables['LINEITEM'] customer = tables['CUSTOMER'] forders = orders[(orders['o_orderdate'] < "1995-01-01") & (orders['o_orderdate'] >= "1994-01-01")] fregion = region[region['r_name'] == 'ASIA'].\ merge(nation, how='inner', on = None, left_on = 'r_regionkey', right_on = 'n_regionkey', left_index=False, right_index=False).\ merge(supplier, how='inner', on = None, left_on='n_nationkey', right_on='s_nationkey', left_index=False, right_index=False).\ merge(lineitem, how='inner', on = None, left_on='s_suppkey', right_on='l_suppkey', left_index=False, right_index=False)\ [['n_name', 'l_extendedprice', 'l_discount', 'l_orderkey', 's_nationkey']].\ merge(forders, how='inner', on = None, left_on='l_orderkey', right_on='o_orderkey', left_index=False, right_index=False).\ merge(customer, how='inner', on = None, left_on='o_custkey', right_on='c_custkey', left_index=False, right_index=False).\ merge(customer, how='inner', on = None, left_on='s_nationkey', right_on='c_nationkey', left_index=False, right_index=False) fregion['value'] = fregion['l_extendedprice']*(1 - fregion['l_discount']) revenue = fregion.groupby(['n_name']).agg({'value': 'sum'}).reset_index(drop=False) return revenue def query6(tables): pd.set_option('float_format', '{:.2f}'.format) ####################################################### # FROM lineitem ####################################################### lineitem = tables['LINEITEM'] ####################################################### # WHERE l_shipdate >= date '[DATE]' # AND l_shipdate < date '[DATE]' + interval '1' year # AND l_discount between [DISCOUNT] - 0.01 and [DISCOUNT] + 0.01 # AND l_quantity < [QUANTITY] ####################################################### # For now, use validation parameters for randomized variables: # DATE = 1994-01-01 # DISCOUNT = 0.06 # QUANTITY = 24 lineitem = lineitem[ (lineitem['l_shipdate'] >= '1994-01-01') & (lineitem['l_shipdate'] < '1995-01-01') & (lineitem['l_discount'] >= 0.05) & (lineitem['l_discount'] <= 0.07) & (lineitem['l_quantity'] < 24) ] ####################################################### # SELECT sum(l_extendedprice * l_discount) as revenue ####################################################### lineitem['price_x_discount'] = ( lineitem['l_extendedprice'] * lineitem['l_discount'] ) # Note that this returns a scalar value # instead of a single-cell DF. # Dask dataframes do not support ungrouped aggregations # (or making a vacuous group). lineitem = lineitem[['price_x_discount']] lineitem = lineitem.sum() # Wrapping result in a dataframe. lineitem = lineitem.to_frame() lineitem.columns = ['revenue'] lineitem = lineitem[['revenue']] lineitem = lineitem.reset_index(drop=True) return lineitem def query7(tables): pd.set_option('float_format', '{:.2f}'.format) getYear = lambda x : x[0:4] decrease = lambda x, y : x*(1-y) ####################################################### # FROM SUPPLIER, LINEITEM, ORDERS, CUSTOMER, NATION N1, NATION N2 ####################################################### lineitem = tables['LINEITEM'] nation = tables['NATION'] supplier = tables['SUPPLIER'] customer = tables['CUSTOMER'] order = tables['ORDERS'] fnation = nation[ (nation['n_name'] == "FRANCE") | (nation['n_name'] == "GERMANY")] fline = lineitem[ (lineitem['l_shipdate'] >= "1995-01-01") & (lineitem['l_shipdate'] <= "1996-12-31")] supNation = fnation.merge(supplier, how='inner', on = None, left_on="n_nationkey", right_on='s_nationkey', left_index=False, right_index=False).merge(fline, how='inner', on = None, left_on='s_suppkey', right_on='l_suppkey', left_index=False, right_index=False)[["n_name", "l_orderkey", "l_extendedprice", "l_discount", "l_shipdate"]] supNation = supNation.rename(columns={"n_name": "supp_nation"}) cusNation = fnation.merge(customer, how='inner', on = None, left_on="n_nationkey", right_on='c_nationkey', left_index=False, right_index=False).merge(order, how='inner', on = None, left_on="c_custkey", right_on='o_custkey', left_index=False, right_index=False)[["n_name", "o_orderkey"]] cusNation = cusNation.rename(columns={"n_name": "cust_nation"}) cusSupNation = cusNation.merge(supNation, how='inner', on=None, left_on="o_orderkey", right_on='l_orderkey', left_index=False, right_index=False) cusSupNation['volume'] = cusSupNation['l_extendedprice']*(1 - cusSupNation['l_discount']) cusSupNation['l_year'] = cusSupNation['l_shipdate'].str[:4] cusSupNation1 = cusSupNation[\ ((cusSupNation['supp_nation'] == 'FRANCE') & (cusSupNation['cust_nation'] == 'GERMANY')) \ | ((cusSupNation['supp_nation'] == 'GERMANY') & (cusSupNation['cust_nation'] == 'FRANCE'))]\ [['supp_nation', 'cust_nation', 'l_year', 'volume']] # .groupBy($"supp_nation", $"cust_nation", $"l_year") revenue = cusSupNation1.groupby(['supp_nation', 'cust_nation', 'l_year']).agg({'volume': 'sum'}).reset_index(drop=False) return revenue def query8(tables): #val fregion = region.filter($"r_name" === "AMERICA") lineitem = tables['LINEITEM'] nation = tables['NATION'] supplier = tables['SUPPLIER'] customer = tables['CUSTOMER'] order = tables['ORDERS'] part = tables['PART'] region = tables['REGION'] fregion = region[region['r_name'] == "AMERICA"] forders = order[(order['o_orderdate'] <= "1996-12-31") & (order['o_orderdate'] >= "1995-01-01")] fpart = part[part['p_type'] == 'ECONOMY ANODIZED STEEL'] nat = nation.merge(supplier, how='inner', on = None, left_on="n_nationkey", right_on='s_nationkey', left_index=False, right_index=False, suffixes=("_left", None)) lineitem['volume'] = (1 - lineitem['l_discount'])*lineitem['l_extendedprice'] line = lineitem[['l_partkey', 'l_suppkey', 'l_orderkey', 'volume']].\ merge(fpart, how='inner', on = None, left_on="l_partkey", right_on='p_partkey', left_index=False, right_index=False, suffixes=("_left", None)).\ merge(nat, how='inner', on = None, left_on="l_suppkey", right_on='s_suppkey', left_index=False, right_index=False, suffixes=("_left", None)) natLine = nation.merge(fregion, how='inner', on = None, left_on="n_regionkey", right_on='r_regionkey', left_index=False, right_index=False, suffixes=("_left", None)).\ merge(customer, how='inner', on = None, left_on="n_nationkey", right_on='c_nationkey', left_index=False, right_index=False, suffixes=("_left", None)).\ merge(forders, how='inner', on = None, left_on="c_custkey", right_on='o_custkey', left_index=False, right_index=False, suffixes=("_left", None)).\ merge(line, how='inner', on = None, left_on="o_orderkey", right_on='l_orderkey', left_index=False, right_index=False, suffixes=("_left", None)) natLine['o_year'] = natLine['o_orderdate'].str[0:4] natLine['case_volume'] = natLine['volume'] natLine[natLine['n_name'] != 'BRAZIL']['case_volume'] = 0 result = natLine.groupby('o_year').agg({'case_volume': 'sum', 'volume' : 'sum'}) return result def query9(tables): lineitem = tables['LINEITEM'] nation = tables['NATION'] supplier = tables['SUPPLIER'] #customer = tables['CUSTOMER'] order = tables['ORDERS'] part = tables['PART'] partsupp = tables['PARTSUPP'] #region = tables['REGION'] linePart = part[part['p_name'].str.contains('green')].\ merge(lineitem, how='inner', on = None, left_on ='p_partkey', right_on='l_partkey', left_index=False, right_index=False,
<reponame>JoeLanglands/MICE-MagneticFieldMapping<gh_stars>0 1import os import pickle import sys import time import numpy as np import matplotlib.pyplot as plt import utils from fbutils import applyfb as appFB from fbutils import fbfit as fitFB from fieldmanip.readData import readFile from fieldmanip import fieldManipulation as fm from fieldmanip import polarMeasurement as rphiz from plotting import plots3d as p3d from makefields import mkfieldclass as mkfield from geofit import geofit from geofit import coilfit """ This core module as the name suggests contains a few functions that could be considered as core features of this package. Everything that you definitely would want to do is defined as a function here. """ def performFBfit(residField, magnet, coil, zmax=None, rmax=0.15, n=3, l=20, m=10,\ verbose=True, saveAs=None): if zmax==None: if coil in ['CC', 'ECE']: zmax = 1.8 else: zmax = 1.0 if type(residField) == type('string'): fb_cls = fitFB.FBfitClass(readFile(os.path.join(utils.resid_field_path, residField)), \ coil, magnet, zmax, rmax, n, l, m, verbose, saveAs) else: fb_cls = fitFB.FBfitClass(residField, coil, magnet, zmax, rmax, n, l, m, \ verbose, saveAs) fb_cls.run() def showFBfield(_residField, magnet, coil, fitDict=None, nCores=1): if type(_residField) == type('string'): residField = readFile(os.path.join(utils.resid_field_path, _residField)) else: residField = _residField if fitDict == None: _fitDict = appFB.getDefaultFitDict(coil, magnet) else: with (os.path.join(utils.fb_pickle_path, fitDict), 'rb') as _pickle: _fitDict = pickle.load(_pickle) fb_field = appFB.applyFB_field(residField, _fitDict, coil, magnet, FBonly=True, nCores=nCores) p3d.wireFrame(residField, fb_field) def buildG4BLfield(magDict, gridDict, saveAs=None, FBonly=False, coil=True): """Builds a magnetic field of SSU/SSD and prints it out to a .table file in g4blgrid format. Args: magDict (dict): Dictionary containing magnet, coil currents and custom fitDict paths. If fitDict paths are not specified it pulls the default ones. gridDict (dict): Dictionary containing information about the grid in which to calculate the field over. saveAs (str): Name that the user wishes to call the outputted field (no need to supply full path). If None (default value), the magnet name + todays date is used. FBonly (bool): When True: calculate only FB terms. When False: calculate geofit+FB terms, i.e the full model field is output. coil (bool): When true, the full field is calculated from the coil fit model. If false, the geometrical fit model is used instead. Returns: Doesn't return anything. The outputted field is saved at data/MAUS/saveAs.table. Todo: *The scaleList part could change? May need support so that it can be adjusted by the user """ print 'Calculating field map for magnet:', magDict['magnet'] print 'With currents:' print '\n\t M1 -> %.2f A\n\t M2 -> %.2f A\n\t ECE -> %.2f A\n'%(magDict['M1']['I'], \ magDict['M2']['I'], \ magDict['CC']['I']) if FBonly == False and coil == True: coilfit_calc = get_coilfit_class(magDict) print 'This could take a while...' if saveAs == None: _date = time.localtime() saveAs = '%s_%s%02d%02d.table'%(magDict['magnet'], _date.tm_year, \ _date.tm_mon, _date.tm_mday) xNsteps = int((gridDict['x']['end'] + gridDict['x']['step'])/gridDict['x']['step']) xARR = np.linspace(gridDict['x']['start'], gridDict['x']['end'], xNsteps) yNsteps = int((gridDict['y']['end'] + gridDict['y']['step'])/gridDict['y']['step']) yARR = np.linspace(gridDict['y']['start'], gridDict['y']['end'], yNsteps) zNsteps = int((gridDict['z']['end'] + gridDict['z']['step'])/gridDict['z']['step']) zARR = np.linspace(gridDict['z']['start'], gridDict['z']['end'], zNsteps) scaleList = [' 1 X [1e3]\n', ' 2 Y [1e3]\n', ' 3 Z [1e3]\n', \ ' 4 BX [1e-3]\n', ' 5 BY [1e-3]\n', ' 6 BZ [1e-3]\n', ' 0\n'] print 'Writing out %d field points'%(xNsteps*yNsteps*zNsteps) count = 1 start_time = time.time() with open(os.path.join(utils.maus_field_path, saveAs), 'w') as _output: _output.write('\t%d\t%d\t%d\t1\n'%(xNsteps, yNsteps, zNsteps)) for i in scaleList: _output.write(i) for _x in xARR: for _y in yARR: for _z in zARR: if FBonly == True: Bx, By, Bz = appFB.applyFB_grid(magDict, _x, _y, _z, 0, 0, 0) elif FBonly == False: _Bx, _By, _Bz = coilfit_calc.calc_full_field_at_point_xyz(_x, _y, _z) Bx, By, Bz = appFB.applyFB_grid(magDict, _x, _y, _z, _Bx, _By, _Bz) _output.write('{:.3f}\t{:.3f}\t{:.3f}\t{:.8f}\t{:.8f}\t{:.8f}\n'.format( \ _x, _y,_z, Bx, By, Bz)) utils.progressBar(count, xNsteps*yNsteps*zNsteps, start_time, time.time()) count += 1 print 'Finished! File can be found at %s'%os.path.join(utils.maus_field_path, saveAs) def perform_coil_fit(magnet, coil, FBfit=False, makeresid=True, save_as=None, verbose=True): if magnet.upper() not in ['SSU', 'SSD']: print 'Magnet unrecognised - please use SSU or SSD' return if coil.upper() not in ['M1', 'M2', 'CC', 'ECE']: print 'Coil unrecognised - please use M1, M2, CC or ECE' print '\tN.B You can not fit to the end coils individually, only to E1-CC-E2' return if coil.upper() == 'CC': coil = 'ECE' if save_as == None: save_str = os.path.join(utils.geofit_field_path, magnet.upper() + '_' + coil.upper() \ + '_coilfit_default.pickle') else: save_str = os.path.join(utils.geofit_field_path, save_as) if coil.upper() in ['M1', 'M2']: print 'Performing coil fit on', magnet.upper(), coil.upper() if utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data'] == None: print 'No data to fit to for this magnet!' return _centre = utils.centres_dict[magnet.upper()]['mapper'][coil.upper()] _field = readFile(utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data']) if magnet.upper() == 'SSD': _field = fm.flip_SSD_data(_field) coilFitClass = coilfit.CoilFitClass(utils.coil_datacards[magnet.upper()][coil.upper()], \ _field, _centre) fitDict = coilFitClass.run() print 'Finished with parameters: ' for key, value in fitDict.iteritems(): print key, value print 'Saved fit parameters at: ', save_str with open(save_str, 'wb') as save_pickle: pickle.dump(fitDict, save_pickle, protocol=pickle.HIGHEST_PROTOCOL) elif coil.upper() in ['CC', 'ECE']: print 'Performing coil fit on', magnet.upper(), 'ECE' cc_param = utils.coil_datacards[magnet.upper()]['CC'] e1_param = utils.coil_datacards[magnet.upper()]['E1'] e2_param = utils.coil_datacards[magnet.upper()]['E2'] cc_centre = utils.centres_dict[magnet.upper()]['mapper']['CC'] e1_centre = utils.centres_dict[magnet.upper()]['mapper']['E1'] e2_centre = utils.centres_dict[magnet.upper()]['mapper']['E2'] _field = readFile(utils.coil_datacards[magnet.upper()]['CC']['30A_data']) if magnet.upper() == 'SSD': _field = fm.flip_SSD_data(_field) coilFitClass = coilfit.CoilFitClass_ECE(cc_param, e1_param, e2_param, _field, cc_centre, \ e1_centre, e2_centre) fitDict = coilFitClass.run() print 'Finished with parameters: ' for key, value in fitDict.iteritems(): print key for _k, _v in value.iteritems(): print _k, _v print 'Saved fit parameters at: ', save_str with open(save_str, 'wb') as save_pickle: pickle.dump(fitDict, save_pickle, protocol=pickle.HIGHEST_PROTOCOL) if FBfit == True: residField = make_resid_field(magnet.upper(), coil.upper()) performFBfit(residField, magnet.upper(), coil.upper()) return fitDict def perform_geofit(magnet, coil, makeresid=True, save_as=None): if magnet.upper() not in ['SSU', 'SSD']: print 'Magnet unrecognised - please use SSU or SSD' return if coil.upper() not in ['M1', 'M2', 'CC', 'ECE']: print 'Coil unrecognised - please use M1, M2, CC or ECE' print '\tN.B You can not fit to the end coils individually, only to E1-CC-E2' return if coil.upper() == 'CC': coil = 'ECE' if save_as == None: save_str = os.path.join(utils.geofit_field_path, magnet.upper() + '_' + coil.upper() \ + '_geofit_default.pickle') else: save_str = os.path.join(utils.geofit_field_path, save_as) if coil.upper() in ['M1', 'M2']: print 'Performing geometrical fit on', magnet.upper(), coil.upper() if utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data'] == None: print 'No data to fit to for this magnet!' return _centre = utils.centres_dict[magnet.upper()]['mapper'][coil.upper()] _field = readFile(utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data']) geoFitClass = geofit.GeoFit(utils.coil_datacards[magnet.upper()][coil.upper()], \ _field, _centre) fitDict = geoFitClass.run() print 'Finished with parameters: ' for key, value in fitDict.iteritems(): print key, value print 'Saved fit parameters at: ', save_str with open(save_str, 'wb') as save_pickle: pickle.dump(fitDict, save_pickle, protocol=pickle.HIGHEST_PROTOCOL) return fitDict elif coil.upper() in ['CC', 'ECE']: pass def get_coilfit_class(magDict): coilFitDicts = [] currentList = [] _magnet = magDict['magnet'] for key, item in magDict.iteritems(): if key == 'CC': if item['I'] < 0.001 and item['I'] > -0.001: continue pickle_str = '%s_ECE_coilfit_default.pickle'%_magnet with open(os.path.join(utils.geofit_field_path, pickle_str)) as _handle: ece_dict = pickle.load(_handle) for _key, _dict in ece_dict.iteritems(): coilFitDicts.append(_dict) currentList.append(item['I']) elif key in ['M1', 'M2']: if item['I'] < 0.001 and item['I'] > -0.001: continue pickle_str = '%s_%s_coilfit_default.pickle'%(_magnet, key) with open(os.path.join(utils.geofit_field_path, pickle_str)) as _handle: c_dict = pickle.load(_handle) coilFitDicts.append(c_dict) currentList.append(item['I']) coilfit_class = mkfield.CalcFullField(coilFitDicts, currentList) return coilfit_class def make_resid_field(magnet, coil, coilfit=True, fitDict=None, saveAs=None, _current=30.0): #I f*ing hate the mess that I have made this function... NEEDS CLEANING dataFieldStr = utils.coil_datacards[magnet.upper()][coil.upper()]['30A_data'] if coil.upper() == 'CC': coil = 'ECE' if fitDict == None: if coilfit == True: fitDictStr = '%s_%s_coilfit_default.pickle'%(magnet.upper(), coil.upper()) else: fitDictStr = '%s_%s_geofit_default.pickle'%(magnet.upper(), coil.upper()) elif type(fitDict) == type('string!'): fitDictStr = fitDict elif type(fitDict) == type({}): fitDictStr = 'N/A' pass #Handle passing the actual fitDict here... with open(os.path.join(utils.geofit_field_path, fitDictStr), 'rb') as _file: fitDict = pickle.load(_file) fitDictList, currentList = [], [] if coil == 'ECE': for key, value in fitDict.iteritems(): fitDictList.append(value) currentList.append(_current) else: fitDictList.append(fitDict) currentList.append(_current) if coilfit == True: print 'Making residual field with coilfit using', fitDictStr, 'with data field', dataFieldStr calcFieldClass = mkfield.CalcFullField(fitDictList, currentList) dataField = readFile(dataFieldStr) if magnet == 'SSD': dataField = fm.flip_SSD_data(dataField) residualField = [] for f in dataField: Br, Bphi, Bz = calcFieldClass.calc_full_field_at_point(f.r, f.phi, f.z) residualField.append(rphiz.Measurement(f.r, f.phi, f.z, f.Br - Br, f.Bphi - Bphi, \ f.Bz - Bz, f.sensorNumber)) if coilfit == False: pass #need to implement calcgeofit class if saveAs == None: #obvs need to change this so it can handle geofit instead saveAs = '%s_%s_coilfit_resid.dat'%(magnet.upper(), coil.upper()) saveAsFull = os.path.join(utils.resid_field_path,
# Copyright (c) 2021 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_policy import policy as base_policy from oslo_utils import uuidutils from neutron import policy from neutron.tests.unit.conf.policies import test_base as base class QosPolicyAPITestCase(base.PolicyBaseTestCase): def setUp(self): super(QosPolicyAPITestCase, self).setUp() self.target = {'project_id': self.project_id} self.alt_target = {'project_id': self.alt_project_id} class SystemAdminQosPolicyTests(QosPolicyAPITestCase): def setUp(self): super(SystemAdminQosPolicyTests, self).setUp() self.context = self.system_admin_ctx def test_get_policy(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy', self.alt_target) def test_create_policy(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy', self.alt_target) def test_update_policy(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy', self.alt_target) def test_delete_policy(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy', self.alt_target) class SystemMemberQosPolicyTests(SystemAdminQosPolicyTests): def setUp(self): super(SystemMemberQosPolicyTests, self).setUp() self.context = self.system_member_ctx class SystemReaderQosPolicyTests(SystemMemberQosPolicyTests): def setUp(self): super(SystemReaderQosPolicyTests, self).setUp() self.context = self.system_reader_ctx class ProjectAdminQosPolicyTests(QosPolicyAPITestCase): def setUp(self): super(ProjectAdminQosPolicyTests, self).setUp() self.context = self.project_admin_ctx def test_get_policy(self): self.assertTrue( policy.enforce(self.context, 'get_policy', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_policy', self.alt_target) def test_create_policy(self): self.assertTrue( policy.enforce(self.context, 'create_policy', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy', self.alt_target) def test_update_policy(self): self.assertTrue( policy.enforce(self.context, 'update_policy', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy', self.alt_target) def test_delete_policy(self): self.assertTrue( policy.enforce(self.context, 'delete_policy', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy', self.alt_target) class ProjectMemberQosPolicyTests(ProjectAdminQosPolicyTests): def setUp(self): super(ProjectMemberQosPolicyTests, self).setUp() self.context = self.project_member_ctx def test_create_policy(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy', self.alt_target) def test_update_policy(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy', self.alt_target) def test_delete_policy(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy', self.alt_target) class ProjectReaderQosPolicyTests(ProjectMemberQosPolicyTests): def setUp(self): super(ProjectReaderQosPolicyTests, self).setUp() self.context = self.project_reader_ctx class QosRuleTypeAPITestCase(base.PolicyBaseTestCase): def setUp(self): super(QosRuleTypeAPITestCase, self).setUp() self.target = {} class SystemAdminQosRuleTypeTests(QosRuleTypeAPITestCase): def setUp(self): super(SystemAdminQosRuleTypeTests, self).setUp() self.context = self.system_admin_ctx def test_get_rule_type(self): self.assertTrue( policy.enforce(self.context, 'get_rule_type', self.target)) class SystemMemberQosRuleTypeTests(SystemAdminQosRuleTypeTests): def setUp(self): super(SystemMemberQosRuleTypeTests, self).setUp() self.context = self.system_member_ctx class SystemReaderQosRuleTypeTests(SystemMemberQosRuleTypeTests): def setUp(self): super(SystemReaderQosRuleTypeTests, self).setUp() self.context = self.system_reader_ctx class ProjectAdminQosRuleTypeTests(QosRuleTypeAPITestCase): def setUp(self): super(ProjectAdminQosRuleTypeTests, self).setUp() self.context = self.project_admin_ctx def test_get_rule_type(self): self.assertTrue( policy.enforce(self.context, 'get_rule_type', self.target)) class ProjectMemberQosRuleTypeTests(ProjectAdminQosRuleTypeTests): def setUp(self): super(ProjectMemberQosRuleTypeTests, self).setUp() self.context = self.project_member_ctx def test_get_rule_type(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_rule_type', self.target) class ProjectReaderQosRuleTypeTests(ProjectMemberQosRuleTypeTests): def setUp(self): super(ProjectReaderQosRuleTypeTests, self).setUp() self.context = self.project_reader_ctx class QosRulesAPITestCase(base.PolicyBaseTestCase): def setUp(self): super(QosRulesAPITestCase, self).setUp() self.qos_policy = { 'id': uuidutils.generate_uuid(), 'project_id': self.project_id} self.target = { 'project_id': self.project_id, 'policy_id': self.qos_policy['id'], 'ext_parent_policy_id': self.qos_policy['id']} self.alt_target = { 'project_id': self.alt_project_id, 'policy_id': self.qos_policy['id'], 'ext_parent_policy_id': self.qos_policy['id']} self.plugin_mock = mock.Mock() self.plugin_mock.get_qos_policy.return_value = self.qos_policy mock.patch( 'neutron_lib.plugins.directory.get_plugin', return_value=self.plugin_mock).start() class SystemAdminQosBandwidthLimitRuleTests(QosRulesAPITestCase): def setUp(self): super(SystemAdminQosBandwidthLimitRuleTests, self).setUp() self.context = self.system_admin_ctx def test_get_policy_bandwidth_limit_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_alias_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_alias_bandwidth_limit_rule', self.alt_target) def test_create_policy_bandwidth_limit_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy_bandwidth_limit_rule', self.alt_target) def test_update_policy_bandwidth_limit_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_alias_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_alias_bandwidth_limit_rule', self.alt_target) def test_delete_policy_bandwidth_limit_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_alias_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_alias_bandwidth_limit_rule', self.alt_target) class SystemMemberQosBandwidthLimitRuleTests( SystemAdminQosBandwidthLimitRuleTests): def setUp(self): super(SystemMemberQosBandwidthLimitRuleTests, self).setUp() self.context = self.system_member_ctx class SystemReaderQosBandwidthLimitRuleTests( SystemMemberQosBandwidthLimitRuleTests): def setUp(self): super(SystemReaderQosBandwidthLimitRuleTests, self).setUp() self.context = self.system_reader_ctx class ProjectAdminQosBandwidthLimitRuleTests(QosRulesAPITestCase): def setUp(self): super(ProjectAdminQosBandwidthLimitRuleTests, self).setUp() self.context = self.project_admin_ctx def test_get_policy_bandwidth_limit_rule(self): self.assertTrue( policy.enforce(self.context, 'get_policy_bandwidth_limit_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertTrue( policy.enforce(self.context, 'get_alias_bandwidth_limit_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_alias_bandwidth_limit_rule', self.alt_target) def test_create_policy_bandwidth_limit_rule(self): self.assertTrue( policy.enforce(self.context, 'create_policy_bandwidth_limit_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy_bandwidth_limit_rule', self.alt_target) def test_update_policy_bandwidth_limit_rule(self): self.assertTrue( policy.enforce(self.context, 'update_policy_bandwidth_limit_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertTrue( policy.enforce(self.context, 'update_alias_bandwidth_limit_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_alias_bandwidth_limit_rule', self.alt_target) def test_delete_policy_bandwidth_limit_rule(self): self.assertTrue( policy.enforce(self.context, 'delete_policy_bandwidth_limit_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertTrue( policy.enforce(self.context, 'delete_alias_bandwidth_limit_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_alias_bandwidth_limit_rule', self.alt_target) class ProjectMemberQosBandwidthLimitRuleTests( ProjectAdminQosBandwidthLimitRuleTests): def setUp(self): super(ProjectMemberQosBandwidthLimitRuleTests, self).setUp() self.context = self.project_member_ctx def test_create_policy_bandwidth_limit_rule(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy_bandwidth_limit_rule', self.alt_target) def test_update_policy_bandwidth_limit_rule(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_alias_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_alias_bandwidth_limit_rule', self.alt_target) def test_delete_policy_bandwidth_limit_rule(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy_bandwidth_limit_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_alias_bandwidth_limit_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_alias_bandwidth_limit_rule', self.alt_target) class ProjectReaderQosBandwidthLimitRuleTests( ProjectMemberQosBandwidthLimitRuleTests): def setUp(self): super(ProjectReaderQosBandwidthLimitRuleTests, self).setUp() self.context = self.project_reader_ctx class SystemAdminQosDSCPMarkingRuleTests(QosRulesAPITestCase): def setUp(self): super(SystemAdminQosDSCPMarkingRuleTests, self).setUp() self.context = self.system_admin_ctx def test_get_policy_dscp_marking_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy_dscp_marking_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_alias_dscp_marking_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_alias_dscp_marking_rule', self.alt_target) def test_create_policy_dscp_marking_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy_dscp_marking_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy_dscp_marking_rule', self.alt_target) def test_update_policy_dscp_marking_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy_dscp_marking_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_alias_dscp_marking_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_alias_dscp_marking_rule', self.alt_target) def test_delete_policy_dscp_marking_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy_dscp_marking_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_alias_dscp_marking_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_alias_dscp_marking_rule', self.alt_target) class SystemMemberQosDSCPMarkingRuleTests(SystemAdminQosDSCPMarkingRuleTests): def setUp(self): super(SystemMemberQosDSCPMarkingRuleTests, self).setUp() self.context = self.system_member_ctx class SystemReaderQosDSCPMarkingRuleTests(SystemMemberQosDSCPMarkingRuleTests): def setUp(self): super(SystemReaderQosDSCPMarkingRuleTests, self).setUp() self.context = self.system_reader_ctx class ProjectAdminQosDSCPMarkingRuleTests(QosRulesAPITestCase): def setUp(self): super(ProjectAdminQosDSCPMarkingRuleTests, self).setUp() self.context = self.project_admin_ctx def test_get_policy_dscp_marking_rule(self): self.assertTrue( policy.enforce(self.context, 'get_policy_dscp_marking_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertTrue( policy.enforce(self.context, 'get_alias_dscp_marking_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_alias_dscp_marking_rule', self.alt_target) def test_create_policy_dscp_marking_rule(self): self.assertTrue( policy.enforce(self.context, 'create_policy_dscp_marking_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy_dscp_marking_rule', self.alt_target) def test_update_policy_dscp_marking_rule(self): self.assertTrue( policy.enforce(self.context, 'update_policy_dscp_marking_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertTrue( policy.enforce(self.context, 'update_alias_dscp_marking_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_alias_dscp_marking_rule', self.alt_target) def test_delete_policy_dscp_marking_rule(self): self.assertTrue( policy.enforce(self.context, 'delete_policy_dscp_marking_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertTrue( policy.enforce(self.context, 'update_alias_dscp_marking_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_alias_dscp_marking_rule', self.alt_target) class ProjectMemberQosDSCPMarkingRuleTests( ProjectAdminQosDSCPMarkingRuleTests): def setUp(self): super(ProjectMemberQosDSCPMarkingRuleTests, self).setUp() self.context = self.project_member_ctx def test_create_policy_dscp_marking_rule(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy_dscp_marking_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'create_policy_dscp_marking_rule', self.alt_target) def test_update_policy_dscp_marking_rule(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy_dscp_marking_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_alias_dscp_marking_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'update_alias_dscp_marking_rule', self.alt_target) def test_delete_policy_dscp_marking_rule(self): self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy_dscp_marking_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_policy_dscp_marking_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_alias_dscp_marking_rule', self.target) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'delete_alias_dscp_marking_rule', self.alt_target) class ProjectReaderQosDSCPMarkingRuleTests( ProjectMemberQosDSCPMarkingRuleTests): def setUp(self): super(ProjectReaderQosDSCPMarkingRuleTests, self).setUp() self.context = self.project_reader_ctx class SystemAdminQosMinimumBandwidthRuleTests(QosRulesAPITestCase): def setUp(self): super(SystemAdminQosMinimumBandwidthRuleTests, self).setUp() self.context = self.system_admin_ctx def test_get_policy_minimum_bandwidth_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy_minimum_bandwidth_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_policy_minimum_bandwidth_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_alias_minimum_bandwidth_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'get_alias_minimum_bandwidth_rule', self.alt_target) def test_create_policy_minimum_bandwidth_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy_minimum_bandwidth_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'create_policy_minimum_bandwidth_rule', self.alt_target) def test_update_policy_minimum_bandwidth_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy_minimum_bandwidth_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_policy_minimum_bandwidth_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_alias_minimum_bandwidth_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'update_alias_minimum_bandwidth_rule', self.alt_target) def test_delete_policy_minimum_bandwidth_rule(self): self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy_minimum_bandwidth_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_policy_minimum_bandwidth_rule', self.alt_target) # And the same for aliases self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_alias_minimum_bandwidth_rule', self.target) self.assertRaises( base_policy.InvalidScope, policy.enforce, self.context, 'delete_alias_minimum_bandwidth_rule', self.alt_target) class SystemMemberQosMinimumBandwidthRuleTests( SystemAdminQosMinimumBandwidthRuleTests): def setUp(self): super(SystemMemberQosMinimumBandwidthRuleTests, self).setUp() self.context = self.system_member_ctx class SystemReaderQosMinimumBandwidthRuleTests( SystemMemberQosMinimumBandwidthRuleTests): def setUp(self): super(SystemReaderQosMinimumBandwidthRuleTests, self).setUp() self.context = self.system_reader_ctx class ProjectAdminQosMinimumBandwidthRuleTests(QosRulesAPITestCase): def setUp(self): super(ProjectAdminQosMinimumBandwidthRuleTests, self).setUp() self.context = self.project_admin_ctx def test_get_policy_minimum_bandwidth_rule(self): self.assertTrue( policy.enforce( self.context, 'get_policy_minimum_bandwidth_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_policy_minimum_bandwidth_rule', self.alt_target) # And the same for aliases self.assertTrue( policy.enforce( self.context, 'get_alias_minimum_bandwidth_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce, self.context, 'get_alias_minimum_bandwidth_rule', self.alt_target) def test_create_policy_minimum_bandwidth_rule(self): self.assertTrue( policy.enforce( self.context, 'create_policy_minimum_bandwidth_rule', self.target)) self.assertRaises( base_policy.PolicyNotAuthorized, policy.enforce,
plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius_in, width, **kwargs): radius_out = radius_in + width self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)), 0, 0, radius_in, width) self._default_size = _round_up_to_odd_integer(2 * radius_out) super(Ring2DKernel, self).__init__(**kwargs) self._truncation = 0 class Trapezoid1DKernel(Kernel1D): """ 1D trapezoid kernel. Parameters ---------- width : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, MexicanHat1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Trapezoid1DKernel trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2) plt.plot(trapezoid_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('amplitude') plt.xlim(-1, 28) plt.show() """ _is_bool = False def __init__(self, width, slope=1., **kwargs): self._model = models.Trapezoid1D(1, 0, width, slope) self._default_size = _round_up_to_odd_integer(width + 2. / slope) super(Trapezoid1DKernel, self).__init__(**kwargs) self._truncation = 0 self.normalize() class TrapezoidDisk2DKernel(Kernel2D): """ 2D trapezoid kernel. Parameters ---------- radius : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import TrapezoidDisk2DKernel trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2) plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, slope=1., **kwargs): self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope) self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope) super(TrapezoidDisk2DKernel, self).__init__(**kwargs) self._truncation = 0 self.normalize() class MexicanHat1DKernel(Kernel1D): """ 1D Mexican hat filter kernel. The Mexican Hat, or inverted Gaussian-Laplace filter, is a bandpass filter. It smoothes the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * width. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import MexicanHat1DKernel mexicanhat_1D_kernel = MexicanHat1DKernel(10) plt.plot(mexicanhat_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _is_bool = True def __init__(self, width, **kwargs): amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3) self._model = models.MexicanHat1D(amplitude, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super(MexicanHat1DKernel, self).__init__(**kwargs) self._truncation = np.abs(self._array.sum() / self._array.size) class MexicanHat2DKernel(Kernel2D): """ 2D Mexican hat filter kernel. The Mexican Hat, or inverted Gaussian-Laplace filter, is a bandpass filter. It smoothes the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (pi * width ** 4). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * width. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * width. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import MexicanHat2DKernel mexicanhat_2D_kernel = MexicanHat2DKernel(10) plt.imshow(mexicanhat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, width, **kwargs): amplitude = 1.0 / (np.pi * width ** 4) self._model = models.MexicanHat2D(amplitude, 0, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super(MexicanHat2DKernel, self).__init__(**kwargs) self._truncation = np.abs(self._array.sum() / self._array.size) class AiryDisk2DKernel(Kernel2D): """ 2D Airy disk kernel. This kernel models the diffraction pattern of a circular aperture. This kernel is normalized to a peak value of 1. Parameters ---------- radius : float The radius of the Airy disk kernel (radius of the first zero). x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * radius. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * radius. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import AiryDisk2DKernel airydisk_2D_kernel = AiryDisk2DKernel(10) plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, **kwargs): self._model = models.AiryDisk2D(1, 0, 0, radius) self._default_size = _round_up_to_odd_integer(8 * radius) super(AiryDisk2DKernel, self).__init__(**kwargs) self.normalize() self._truncation = None class Moffat2DKernel(Kernel2D): """ 2D Moffat kernel. This kernel is a typical model for a seeing limited PSF. Parameters ---------- gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. x_size : odd int, optional Size in x direction of the kernel array. Default = 8 * radius. y_size : odd int, optional Size in y direction of the kernel array. Default = 8 * radius. mode : str, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp'
= None): """ bias score for a segmentation result. Bias is the percentage difference in size of a given result Args: include_classes: (list) which classes to include in the output, default is [1:] excluding background modifier: (Callable) an optional function to apply to output before calculations """ MetricBase.__init__(self, out_type="segs", modifier=modifier) self.include_classes = include_classes # which classes to include in the DiceScore def process_single(self, output: torch.Tensor, target: torch.Tensor = None) -> float: """ Processes a single example Args: output: BxCxHxW tensor where C is the number of classes target: Bx1xHxW tensor where every entry is an int in the range [0, C-1] Returns: (float) the mean dice score """ if output.shape[1] == 1: output = convert_binary_output_to_classes(output) intersection, output_sum, target_sum = get_intersection_and_sums(output, target) bias = (output_sum - target_sum).type(torch.float) / (0.5 * output_sum + 0.5 * target_sum).type(torch.float) if self.include_classes is None: return torch.mean(bias[1:]) # same as iou, return mean of values for non-background classes else: return torch.mean(bias[self.include_classes]) # return mean of values for designated classes class CurvatureIndividual(MetricBase): """ Calculate the curvature of the output segmentation """ def __init__(self, segment_name, side): MetricBase.__init__(self, out_type="curve") assert segment_name in ['basal', 'mid', 'apical'] assert side in [1, 2] self.curve_segment = '_'.join([segment_name, 'curvature', str(side), "mean_endo"]) def process_single(self, out_cc: dict, target_cc: dict): res = (out_cc["curvature"][self.curve_segment] - target_cc["curvature"][self.curve_segment]) / \ np.mean([abs(target_cc["curvature"][self.curve_segment]), abs(out_cc["curvature"][self.curve_segment])]) return res class Convexity(MetricBase): """ Calculate the convexity of the segmentation """ def __init__(self, label_val, modifier=None): MetricBase.__init__(self, out_type="segs", modifier=modifier, requires_target=False, calculate_during_train=False) self.label_val = label_val def process_single(self, output: torch.Tensor, target: torch.Tensor = None) -> float: out_labels = convert_to_classes(output).detach().numpy() selected_label_mask = (out_labels.squeeze() == self.label_val).astype(np.uint8) contours, _ = cv2.findContours(selected_label_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) try: largest = get_largest_contour(contours)[0] except ValueError as e: logging.warning(f"finding metric Convexity failed because {e}, skipping...") return None area = cv2.contourArea(largest) hull = cv2.convexHull(largest) hull_area = cv2.contourArea(hull) return area / hull_area class Simplicity(MetricBase): """ Calculate the simplicty of the segmentation """ def __init__(self, label_val, modifier=None): MetricBase.__init__(self, out_type="segs", modifier=modifier, requires_target=False, calculate_during_train=False) self.label_val = label_val def process_single(self, output: torch.Tensor, target: torch.Tensor = None) -> float: out_labels = convert_to_classes(output).detach().numpy() selected_label_mask = (out_labels.squeeze() == self.label_val).astype(np.uint8) contours, _ = cv2.findContours(selected_label_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) try: largest = get_largest_contour(contours)[0] except ValueError as e: logging.warning(f"finding metric Simplicity failed because {e}, skipping...") return None area = cv2.contourArea(largest) perimeter = cv2.arcLength(largest, True) return np.sqrt(4 * np.pi * area) / perimeter class CosineSim(MetricBase): """ Cosine similarity but calculates curvature internally rather than using the output. Reason for doing this is to allow the use of the modifier syntax in metric base to calculate curvature on modified versions of the output. """ def __init__(self, label_val: int, modifier: Callable = None, nsegs: int = None): MetricBase.__init__(self, out_type="segs", modifier=modifier) self.label_val = label_val if nsegs is None: self.suffix = '' else: self.suffix = f'_{nsegs}' def process_single(self, output: torch.Tensor, target: torch.Tensor) -> float: out_labels = convert_to_classes(output).detach().numpy() target = target.detach().numpy() selected_label_mask = 255 * (out_labels.squeeze() == self.label_val).astype(np.uint8) target_mask = 255 * (target.squeeze() == self.label_val).astype(np.uint8) try: out_cc = Mask2Contour(selected_label_mask).get_contour_and_markers(show=False) target_cc = Mask2Contour(target_mask).get_contour_and_markers(show=False) except Exception as e: # in some cases curvature will fail. In those cases do it again, but show what happened logging.warning(f"Mask2Contour failed because {e}. Skipping...") return None res = np.array( [1 - cosine_dist(u, v) for u, v in zip(out_cc["shape" + self.suffix], target_cc["shape" + self.suffix])]) return res.mean() class SurfaceDist(MetricBase): """ Calculate the distance between two surfaces """ def __init__(self, label_val, modifier=None): MetricBase.__init__(self, out_type="segs", modifier=modifier, requires_target=True, calculate_during_train=False) self.label_val = label_val def process_single(self, output: torch.Tensor, target: torch.Tensor = None) -> float: out_labels = convert_to_classes(output).detach().numpy() target_labels = target.detach().numpy() output_mask = out_labels.squeeze() == self.label_val target_mask = target_labels.squeeze() == self.label_val surface_distances = compute_surface_distances(target_mask, output_mask, (1., 1.)) avg_dist = compute_average_surface_distance(surface_distances) return avg_dist[0] class Curvature(MetricBase): def __init__(self): MetricBase.__init__(self, out_type="curve") @staticmethod def mse(inp, tar): return np.array([(i - t) ** 2 for (i, t) in zip(inp.values(), tar.values())]).mean() def process_single(self, out_cc: dict, target_cc: dict): res = self.mse(out_cc["curvature"], target_cc["curvature"]) return res class Hausdorff(MetricBase): def __init__(self): MetricBase.__init__(self, out_type="curve") def process_single(self, out_cc: dict, target_cc: dict): u, v = out_cc["contour"], target_cc["contour"] res = max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0]) return res class Metrics: """Class to hold all of the metrics that will be calculated during training. Will also calculate auxillary outputs (curvature/bbox) if they are needed by any of the metrics """ def __init__(self, metrics: dict, phase, confidence=None): """ Args: metrics: a dict where each key is the metric name and the value is a subclass of MetricBase phase: the current training phase confidence: A confidence metric which can be used to determine if an output should be included in the metrics. This should be a callable which takes as input the segmentation and returns True/False. By default all images will be included. """ self.metrics = metrics self.has_curve = any([m.type == "curve" for m in self.metrics.values()]) self.has_bbox = any([m.type == "bbox" for m in self.metrics.values()]) logging.info(f"Initialized metrics with curve = {self.has_curve}, bbox = {self.has_bbox}") self.failed_confidence = 0 self.total = 0 self.res = dict() self.is_best = dict() self.confidence = confidence self.phase = phase def check_best(self, epoch): """ checks whether each result is the best so far""" self._calc_res() for k, metric_class in self.metrics.items(): if metric_class.check_best(self.res[k]): self.is_best['_'.join([self.phase, "best", k])] = epoch # either epoch num or just True return self.is_best @staticmethod def _detach_and_to_cpu(t): if type(t) == torch.Tensor: if t.get_device() >= 0: t = t.data.cpu() if t.requires_grad: t = t.detach() return t @staticmethod def convert_to_mask(inp): return inp > 0. def _get_curvature(self, outputs: torch.Tensor, targets: torch.Tensor): output_curves, target_curves = list(), list() for output, target in zip(outputs, targets): output = 255 * self.convert_to_mask(output).numpy().squeeze().astype(np.uint8) target = 255 * target.numpy().squeeze().astype(np.uint8) try: out_cc = Mask2Contour(output).get_contour_and_markers(show=False) target_cc = Mask2Contour(target).get_contour_and_markers(show=False) output_curves.append(out_cc) target_curves.append(target_cc) except: # in some cases curvature will fail. In those cases do it again, but show what happened output_curves.append(None) target_curves.append(None) # to ensure length is always the same try: Mask2Contour(output).get_contour_and_markers(show=True) except: # we expect to to fail pass return output_curves, target_curves def __call__(self, outputs: dict, targets: dict = None): # convert to cpu and detach for metric calculations outputs = {ok: self._detach_and_to_cpu(outputs[ok]) for ok in outputs} if targets is not None: targets = {tk: self._detach_and_to_cpu(targets[tk]) for tk in targets} # add curvature if necessary: if self.has_curve: if targets is None: raise NotImplementedError("Need to handle targets being None for curvature") outputs["curve"], targets["curve"] = self._get_curvature(outputs["segs"], targets["segs"]) if self.confidence is not None: passes = [self.confidence(o) for o in outputs["segs"]] self.failed_confidence += len(passes) - sum(passes) else: passes = [True] * outputs["segs"].shape[0] self.total += outputs["segs"].shape[0] batch_res = dict() with torch.no_grad(): # disable backprop when evaluating metrics. Useful for adversarial metrics. for k, metric_class in self.metrics.items(): batch_res[k] = metric_class(outputs, targets, passes) return batch_res def _calc_res(self, method="mean"): """ Calculate the results across an epoch """ for k, metric_class in self.metrics.items(): self.res[k] = metric_class.reduce(method) def epoch_reset(self, phase): for k, metric_class in self.metrics.items(): metric_class.epoch_reset(phase) self.failed_confidence = 0 self.total = 0 self.phase = phase def __repr__(self): self._calc_res() string = f'metrics ({self.phase}): ' for k, metric_class in self.metrics.items(): mn = metric_class.mean() absmn = metric_class.absmean() md = metric_class.median() absmed = metric_class.absmedian() std = metric_class.std() ciL, ciH = metric_class.ci_95() if self.res[k] is None: res = "n/a" else: res = f"med [absmed]: {md:.2g} [{absmed:.2g}], mn [absmn] (std): {mn:.2g} [{absmn:2g}] ({std:.2g}), 95% ci: [{ciL:.2g}, {ciH:.2g}]" string += f"\n\t{k} = {res}" # string = string[:-2] # strip last , return string def to_dict(self, prefix=None, method="mean", include_best=False, clean=True): """ output current metrics as dictionary Args: prefix: prefix to append to each metric method: how to reduce the metrics across the current epoch results include_best: include whether current results are the best clean: clean the dictionary of Nones and nans before returning. If true, replaces those values with -.1 Returns: """ if prefix is None: prefix = self.phase self._calc_res(method=method) if len(prefix) > 0: # if empty prefix is passed in then don't add _ metric_dict = {prefix + "_" + k: v for k, v in self.res.items()} else: metric_dict = {k: v for k, v in self.res.items()} if clean: metric_dict = clean_dict(metric_dict) if include_best: metric_dict.update(self.is_best) return metric_dict def print_best(self): for k, metric_class in self.metrics.items(): print(f"== {k} ==") for phase, val in metric_class.best.items(): if val is not None: print(f"{phase}:\t{val:.4f}") def add_to_summary_v2(self,
<filename>test/helpers/special_chars_and_punct.py # This file is used by various classes in test.unit_test.test_scrubber.py # This file is NOT to be used for character processing in the Lexos app proper! EE_HTML = {'&ae;': 'æ', '&d;': 'ð', '&t;': 'þ', '&e;': 'ę', '&AE;': 'Æ', '&D;': 'Ð', '&T;': 'Þ', '&#541;': 'ȝ', '&#540;': 'Ȝ', '&E;': 'Ę', '&amp;': '&', '&lt;': '<', '&gt;': '>', '&#383;': 'ſ'} EE_HTML_KEYS = "&ae;&d;&t;&e;&AE;&D;&T;&#541;&#540;&E;&amp;&lt;&gt;&#383;" EE_HTML_VALS = "æðþęÆÐÞȝȜĘ&<>ſ" DOE_SGML = {'&ae;': 'æ', '&d;': 'ð', '&t;': 'þ', '&e;': 'ę', '&AE;': 'Æ', '&D;': 'Ð', '&T;': 'Þ', '&E;': 'Ę', '&oe;': 'œ', '&amp;': '⁊', '&egrave;': 'è', '&eacute;': 'é', '&auml;': 'ä', '&ouml;': 'ö', '&uuml;': 'ü', '&amacron;': 'ā', '&cmacron;': 'c̄', '&emacron;': 'ē', '&imacron;': 'ī', '&nmacron;': 'n̄', '&omacron;': 'ō', '&pmacron;': 'p̄', '&qmacron;': 'q̄', '&rmacron;': 'r̄', '&lt;': '<', '&gt;': '>', '&lbar;': 'ł', '&tbar;': 'ꝥ', '&bbar;': 'ƀ'} DOE_SGML_KEYS = "&ae;&d;&t;&e;&AE;&D;&T;&E;&oe;&amp;&egrave;&eacute;&auml;" \ "&ouml;&uuml;&amacron;&cmacron;&emacron;&imacron;&nmacron;" \ "&omacron;&pmacron;&qmacron;&rmacron;&lt;&gt;&lbar;&tbar;" \ "&bbar;" DOE_SGML_VALS = "æðþęÆÐÞĘœ⁊èéäöüāc̄ēīn̄ōp̄q̄r̄<>łꝥƀ" MUFI3 = {'&aenl;': '\ueee0', '&ascap;': 'ᴀ', '&ordf;': 'ª', '&aogon;': 'ą', '&Aogon;': 'Ą', '&acurl;': '\ue433', '&Acurl;': '\ue033', '&adotbl;': 'ạ', '&Adotbl;': 'Ạ', '&adot;': 'ȧ', '&Adot;': 'Ȧ', '&auml;': 'ä', '&Auml;': 'Ä', '&adiaguml;': '\ue8d5', '&adotbluml;': '\ue41d', '&aacute;': 'á', '&Aacute;': 'Á', '&aenlacute;': '\ueaf0', '&aogonacute;': '\ue404', '&Aogonacute;': '\ue004', '&adblac;': '\ue425', '&Adblac;': '\ue025', '&adotacute;': '\uebf5', '&Adotacute;': '\uebf4', '&agrave;': 'à', '&Agrave;': 'À', '&acirc;': 'â', '&Acirc;': 'Â', '&aumlcirc;': '\ue41a', '&aringcirc;': '\ue41f', '&atilde;': 'ã', '&Atilde;': 'Ã', '&aring;': 'å', '&Aring;': 'Å', '&ahook;': 'ả', '&Ahook;': 'Ả', '&abreve;': 'ă', '&Abreve;': 'Ă', '&amacr;': 'ā', '&Amacr;': 'Ā', '&amacrbreve;': '\ue410', '&Amacrbreve;': '\ue010', '&abreveacute;': 'ắ', '&Abreveacute;': 'Ắ', '&amacracute;': '\ue40a', '&Amacracute;': '\ue00a', '&aalig;': 'ꜳ', '&aacloselig;': '\uefa0', '&AAlig;': 'Ꜳ', '&aaligenl;': '\uefdf', '&aaligdotbl;': '\ueff3', '&AAligdotbl;': '\ueff2', '&aaligdot;': '\uefef', '&AAligdot;': '\uefee', '&aaliguml;': '\uefff', '&AAliguml;': '\ueffe', '&aaligacute;': '\uefe1', '&AAligacute;': '\uefe0', '&aaligdblac;': '\uefeb', '&AAligdblac;': '\uefea', '&aelig;': 'æ', '&AElig;': 'Æ', '&aeligenl;': '\ueaf1', '&aeligscap;': 'ᴁ', '&aeligred;': '\uf204', '&aeligcurl;': '\uebeb', '&AEligcurl;': '\uebea', '&aeligogon;': '\ue440', '&AEligogon;': '\ue040', '&aeligdotbl;': '\ue436', '&AEligdotbl;': '\ue036', '&aeligdot;': '\ue443', '&AEligdot;': '\ue043', '&aeliguml;': '\ue442', '&AEliguml;': '\ue042', '&aeligacute;': 'ǽ', '&AEligacute;': 'Ǽ', '&aeligogonacute;': '\ue8d3', '&aeligdblac;': '\ue441', '&AEligdblac;': '\ue041', '&aeligring;': '\ue8d1', '&aeligbreve;': '\ue43f', '&AEligbreve;': '\ue03f', '&aeligmacr;': 'ǣ', '&AEligmacr;': 'Ǣ', '&aeligmacrbreve;': '\ue43d', '&AEligmacrbreve;': '\ue03d', '&aeligmacracute;': '\ue43a', '&AEligmacracute;': '\ue03a', '&aflig;': '\uefa3', '&afinslig;': '\uefa4', '&aglig;': '\uefa5', '&allig;': '\uefa6', '&anlig;': '\uefa7', '&anscaplig;': '\uefa8', '&aolig;': 'ꜵ', '&AOlig;': 'Ꜵ', '&aoligenl;': '\uefde', '&aenlosmalllig;': '\ueaf2', '&aoligred;': '\uf206', '&AOligred;': '\uf205', '&aoligdotbl;': '\ueff5', '&AOligdotbl;': '\ueff4', '&aoligacute;': '\uefe3', '&AOligacute;': '\uefe2', '&aoligdblac;': '\uebc1', '&AOligdblac;': '\uebc0', '&aplig;': '\uefa9', '&arlig;': '\uefaa', '&arscaplig;': '\uefab', '&aulig;': 'ꜷ', '&AUlig;': 'Ꜷ', '&auligdotbl;': '\ueff7', '&AUligdotbl;': '\ueff6', '&auligacute;': '\uefe5', '&AUligacute;': '\uefe4', '&avlig;': 'ꜹ', '&AVlig;': 'Ꜹ', '&avligslash;': 'ꜻ', '&AVligslash;': 'Ꜻ', '&avligslashacute;': '\uebb1', '&AVligslashacute;': '\uebb0', '&avligogon;': '\uebf1', '&AVligogon;': '\uebf0', '&avligdotbl;': '\ueff9', '&AVligdotbl;': '\ueff8', '&avligacute;': '\uefe7', '&AVligacute;': '\uefe6', '&avligdblac;': '\uebc3', '&AVligdblac;': '\uebc2', '&aylig;': 'ꜽ', '&AYlig;': 'Ꜽ', '&ayligdotbl;': '\ueffb', '&AYligdotbl;': '\ueffa', '&ayligdot;': '\ueff1', '&AYligdot;': '\ueff0', '&athornlig;': '\uefac', '&aesup;': '\ue42c', '&Aesup;': '\ue02c', '&iesup;': '\ue54a', '&aosup;': '\ue42d', '&ausup;': '\ue8e1', '&avsup;': '\ue42e', '&aunc;': '\uf214', '&aopen;': '\uf202', '&ains;': '\uf200', '&Ains;': '\uf201', '&aneckless;': '\uf215', '&anecklesselig;': '\uefa1', '&AnecklessElig;': '\uefae', '&anecklessvlig;': '\uefa2', '&aclose;': '\uf203', '&Asqu;': '\uf13a', '&benl;': '\ueee1', '&bscap;': 'ʙ', '&bscapdot;': '\uebd0', '&bscapdotbl;': '\uef25', '&bdotbl;': 'ḅ', '&Bdotbl;': 'Ḅ', '&bdot;': 'ḃ', '&Bdot;': 'Ḃ', '&bacute;': '\ue444', '&Bacute;': '\ue044', '&bstrok;': 'ƀ', '&bovlmed;': '\ue44d', '&bblig;': '\ueec2', '&bglig;': '\ueec3', '&cenl;': '\ueee2', '&cscap;': 'ᴄ', '&ccedil;': 'ç', '&Ccedil;': 'Ç', '&cogon;': '\ue476', '&Cogon;': '\ue076', '&cdotbl;': '\ue466', '&Cdotbl;': '\ue066', '&cdot;': 'ċ', '&Cdot;': 'Ċ', '&cacute;': 'ć', '&Cacute;': 'Ć', '&Covlhigh;': '\uf7b5', '&cklig;': '\ueec4', '&ctlig;': '\ueec5', '&Csqu;': '\uf106', '&ccurl;': '\uf198', '&CONbase;': 'Ↄ', '&conbase;': 'ↄ', '&denl;': '\ueee3', '&dscap;': 'ᴅ', '&dstrok;': 'đ', '&Dstrok;': 'Đ', '&dovlmed;': '\ue491', '&dtailstrok;': 'ꝱ', '&dtail;': 'ɖ', '&dscapdot;': '\uebd2', '&ddotbl;': 'ḍ', '&Ddotbl;': 'Ḍ', '&dscapdotbl;': '\uef26', '&ddot;': 'ḋ', '&Ddot;': 'Ḋ', '&dacute;': '\ue477', '&Dacute;': '\ue077', '&eth;': 'ð', '&ETH;': 'Ð', '&ethenl;': '\ueee5', '&ethscap;': 'ᴆ', '&ethdotbl;': '\ue48f', '&ETHdotbl;': '\ue08f', '&Dovlhigh;': '\uf7b6', '&drotdrotlig;': '\ueec6', '&Drot;': 'Ꝺ', '&drot;': 'ꝺ', '&drotdot;': '\uebd1', '&drotacute;': '\uebb2', '&drotenl;': '\ueee4', '&dscript;': 'ẟ', '&dcurl;': '\uf193', '&eenl;': '\ueee6', '&escap;': 'ᴇ', '&eogon;': 'ę', '&Eogon;': 'Ę', '&ecurl;': '\ue4e9', '&Ecurl;': '\ue0e9', '&eogoncurl;': '\uebf3', '&Eogoncurl;': '\uebf2', '&edotbl;': 'ẹ', '&Edotbl;': 'Ẹ', '&eogondot;': '\ue4eb', '&Eogondot;': '\ue0eb', '&eogondotbl;': '\ue4e8', '&Eogondotbl;': '\ue0e8', '&eogonenl;': '\ueaf3', '&edot;': 'ė', '&Edot;': 'Ė', '&euml;': 'ë', '&Euml;': 'Ë', '&eumlmacr;': '\ue4cd', '&eacute;': 'é', '&Eacute;': 'É', '&eogonacute;': '\ue499', '&Eogonacute;': '\ue099', '&edotblacute;': '\ue498', '&edblac;': '\ue4d1', '&Edblac;': '\ue0d1', '&edotacute;': '\ue4c8', '&Edotacute;': '\ue0c8', '&eogondotacute;': '\ue4ec', '&Eogondotacute;': '\ue0ec', '&eogondblac;': '\ue4ea', '&Eogondblac;': '\ue0ea', '&egrave;': 'è', '&Egrave;': 'È', '&ecirc;': 'ê', '&Ecirc;': 'Ê', '&eogoncirc;': '\ue49f', '&ering;': '\ue4cf', '&ebreve;': 'ĕ', '&Ebreve;': 'Ĕ', '&emacr;': 'ē', '&Emacr;': 'Ē', '&eogonmacr;': '\ue4bc', '&Eogonmacr;': '\ue0bc', '&emacrbreve;': '\ue4b7', '&Emacrbreve;': '\ue0b7', '&emacracute;': 'ḗ', '&Emacracute;': 'Ḗ', '&eylig;': '\ueec7', '&eacombcirc;': '\uebbd', '&eucombcirc;': '\uebbe', '&easup;': '\ue4e1', '&Easup;': '\ue0e1', '&eesup;': '\ue8e2', '&eisup;': '\ue4e2', '&eosup;': '\ue8e3', '&evsup;': '\ue4e3', '&schwa;': 'ə', '&Eunc;': '\uf10a', '&Euncclose;': '\uf217', '&eunc;': '\uf218', '&eext;': '\uf219', '&etall;': '\uf21a', '&fenl;': '\ueee7', '&fscap;': 'ꜰ', '&fdotbl;': '\ue4ee', '&Fdotbl;': '\ue0ee', '&fdot;': 'ḟ', '&Fdot;': 'Ḟ', '&fscapdot;': '\uebd7', '&facute;': '\ue4f0', '&Facute;': '\ue0f0', '&faumllig;': '\ueec8', '&fflig;': 'ff', '&filig;': 'fi', '&fjlig;': '\ueec9', '&foumllig;': '\uf1bc', '&fllig;': 'fl', '&frlig;': '\ueeca', '&ftlig;': '\ueecb', '&fuumllig;': '\ueecc', '&fylig;': '\ueecd', '&ffilig;': 'ffi', '&ffllig;': 'ffl', '&fftlig;': '\ueece', '&ffylig;': '\ueecf', '&ftylig;': '\ueed0', '&fturn;': 'ⅎ', '&Fturn;': 'Ⅎ', '&Frev;': 'ꟻ', '&fins;': 'ꝼ', '&Fins;': 'Ꝼ', '&finsenl;': '\ueeff', '&finsdot;': '\uebd4', '&Finsdot;': '\uebd3', '&finsdothook;': '\uf21c', '&finssemiclose;': '\uf21b', '&finssemiclosedot;': '\uebd5', '&finsclose;': '\uf207', '&finsclosedot;': '\uebd6', '&finsdotbl;': '\ue7e5', '&Finsdotbl;': '\ue3e5', '&finsacute;': '\uebb4', '&Finsacute;': '\uebb3', '&fcurl;': '\uf194', '&genl;': '\ueee8', '&gscap;': 'ɢ', '&gstrok;': 'ǥ', '&Gstrok;': 'Ǥ', '&gdotbl;': '\ue501', '&Gdotbl;': '\ue101', '&gscapdotbl;': '\uef27', '&gdot;': 'ġ', '&Gdot;': 'Ġ', '&gscapdot;': '\uef20', '&Gacute;': 'Ǵ', '&gacute;': 'ǵ', '&gglig;': '\ueed1', '&gdlig;': '\ueed2', '&gdrotlig;': '\ueed3', '&gethlig;': '\ueed4', '&golig;': '\ueede', '&gplig;': '\uead2', '&grlig;': '\uead0', '&gins;': 'ᵹ', '&Gins;': 'Ᵹ', '&ginsturn;': 'ꝿ', '&Ginsturn;': 'Ꝿ', '&Gsqu;': '\uf10e', '&gdivloop;': '\uf21d', '&glglowloop;': '\uf21e', '&gsmlowloop;': '\uf21f', '&gopen;': 'ɡ', '&gcurl;': '\uf196', '&henl;': '\ueee9', '&hscap;': 'ʜ', '&hhook;': 'ɦ', '&hstrok;': 'ħ', '&hovlmed;': '\ue517', '&hdotbl;': 'ḥ', '&Hdotbl;': 'Ḥ', '&Hdot;': 'ḣ', '&hdot;': 'Ḣ', '&hscapdot;': '\uebda', '&hacute;': '\ue516', '&Hacute;': '\ue116', '&hwair;': 'ƕ', '&HWAIR;': 'Ƕ', '&hslonglig;': '\uebad', '&hslongligbar;': '\ue7c7', '&hrarmlig;': '\ue8c3', '&Hrarmlig;': '\ue8c2', '&hhalf;': 'ⱶ', '&Hhalf;': 'Ⱶ', '&Hunc;': '\uf110', '&hrdes;': '\uf23a', '&ienl;': '\ueeea', '&iscap;': 'ɪ', '&inodot;': 'ı', '&inodotenl;': '\ueefd', '&Idot;': 'İ', '&istrok;': 'ɨ', '&iogon;': 'į', '&Iogon;': 'Į', '&icurl;': '\ue52a', '&Icurl;': '\ue12a', '&idotbl;': 'ị', '&Idotbl;': 'Ị', '&ibrevinvbl;': '\ue548', '&iuml;': 'ï', '&Iuml;': 'Ï', '&iacute;': 'í', '&Iacute;': 'Í', '&idblac;': '\ue543', '&Idblac;': '\ue143', '&idotacute;': '\uebf7', '&Idotacute;': '\uebf6', '&igrave;': 'ì', '&Igrave;': 'Ì', '&icirc;': 'î', '&Icirc;': 'Î', '&ihook;': 'ỉ', '&Ihook;': 'Ỉ', '&ibreve;': 'ĭ', '&Ibreve;': 'Ĭ', '&imacr;': 'ī', '&Imacr;': 'Ī', '&iovlmed;': '\ue550', '&Iovlhigh;': '\ue150', '&imacrbreve;': '\ue537', '&Imacrbreve;': '\ue137', '&imacracute;': '\ue535', '&Imacracute;': '\ue135', '&ijlig;': 'ij', '&IJlig;': 'IJ', '&iasup;': '\ue8e4', '&iosup;': '\ue8e5', '&iusup;': '\ue8e6', '&ivsup;': '\ue54b', '&ilong;': '\uf220', '&Ilong;': 'ꟾ', '&jenl;': '\ueeeb', '&jscap;': 'ᴊ', '&jnodot;': 'ȷ', '&jnodotenl;': '\ueefe', '&Jdot;': '\ue15c', '&jnodotstrok;': 'ɟ', '&jbar;': 'ɉ', '&Jbar;': 'Ɉ', '&jcurl;': '\ue563', '&Jcurl;': '\ue163', '&juml;': '\uebe3', '&Juml;': '\uebe2', '&jdotbl;': '\ue551', '&Jdotbl;': '\ue151', '&jacute;': '\ue553', '&Jacute;': '\ue153', '&jdblac;': '\ue562', '&Jdblac;': '\ue162', '&jmacrmed;': '\ue554', '&jovlmed;': '\ue552', '&Jmacrhigh;': '\ue154', '&Jovlhigh;': '\ue152', '&jesup;': '\ue8e7', '&kenl;': '\ueeec', '&kscap;': 'ᴋ', '&khook;': 'ƙ', '&kbar;': 'ꝁ', '&Kbar;': 'Ꝁ', '&kovlmed;': '\ue7c3', '&kstrleg;': 'ꝃ', '&Kstrleg;': 'Ꝃ', '&kstrascleg;': 'ꝅ', '&Kstrascleg;': 'Ꝅ', '&kdot;': '\ue568', '&Kdot;': '\ue168', '&kscapdot;': '\uebdb', '&kdotbl;': 'ḳ', '&Kdotbl;': 'Ḳ', '&kacute;': 'ḱ', '&Kacute;': 'Ḱ', '&kslonglig;': '\uebae', '&kslongligbar;': '\ue7c8', '&krarmlig;': '\ue8c5', '&kunc;': '\uf208', '&ksemiclose;': '\uf221', '&kclose;': '\uf209', '&kcurl;': '\uf195', '&lenl;': '\ueeed', '&lscap;': 'ʟ', '&lbar;': 'ƚ', '&lstrok;': 'ł', '&Lstrok;': 'Ł', '&lhighstrok;': 'ꝉ', '&Lhighstrok;': 'Ꝉ', '&lovlmed;': '\ue5b1', '&ltailstrok;': 'ꝲ', '&ldotbl;': 'ḷ', '&Ldotbl;': 'Ḷ', '&lscapdotbl;': '\uef28', '&ldot;': '\ue59e', '&Ldot;': '\ue19e', '&lscapdot;': '\uebdc', '&lacute;': 'ĺ', '&Lacute;': 'Ĺ', '&lringbl;': '\ue5a4', '&lmacrhigh;': '\ue596', '&lovlhigh;': '\ue58c', '&Lovlhigh;': '\uf7b4', '&lbrk;': 'ꝇ', '&Lbrk;': 'Ꝇ', '&llwelsh;': 'ỻ', '&LLwelsh;': 'Ỻ', '&lllig;': '\uf4f9', '&ldes;': '\uf222', '&lturn;': 'ꞁ', '&Lturn;': 'Ꞁ', '&menl;': '\ueeee', '&mscap;': 'ᴍ', '&mtailstrok;': 'ꝳ', '&mdotbl;': 'ṃ', '&Mdotbl;': 'Ṃ', '&mscapdotbl;': '\uef29', '&mdot;': 'ṁ', '&Mdot;': 'Ṁ', '&mscapdot;': '\uebdd', '&macute;': 'ḿ', '&Macute;': 'Ḿ', '&mringbl;': '\ue5c5', '&mmacrmed;': '\ue5b8', '&Mmacrhigh;': '\ue1b8', '&movlmed;': '\ue5d2', '&Movlhigh;': '\ue1d2', '&mesup;': '\ue8e8', '&Minv;': 'ꟽ', '&mrdes;': '\uf223', '&munc;': '\uf225', '&Munc;': '\uf11a', '&muncdes;': '\uf226', '&Muncdes;': '\uf224', '&muncacute;': '\uebb6', '&Muncacute;': '\uebb5', '&M5leg;': 'ꟿ', '&nenl;': '\ueeef', '&nscap;': 'ɴ', '&nscapldes;': '\uf22b', '&nlrleg;': 'ƞ', '&nlfhook;': 'ɲ', '&nbar;': '\ue7b2', '&ntailstrok;': 'ꝴ', '&ndot;': 'ṅ', '&Ndot;': 'Ṅ', '&nscapdot;': '\uef21', '&nacute;': 'ń', '&Nacute;': 'Ń', '&ndotbl;': 'ṇ', '&Ndotbl;': 'Ṇ', '&nscapdotbl;': '\uef2a', '&ncirc;': '\ue5d7', '&ntilde;': 'ñ', '&Ntilde;': 'Ñ', '&nringbl;': '\ue5ee', '&nmacrmed;': '\ue5dc', '&Nmacrhigh;': '\ue1dc', '&eng;': 'ŋ', '&ENG;': 'Ŋ', '&nscapslonglig;': '\ueed5', '&nrdes;': '\uf228', '&Nrdes;': '\uf229', '&nscaprdes;': '\uf22a', '&nflour;': '\uf19a', '&oenl;': '\ueef0', '&oscap;': 'ᴏ', '&ordm;': 'º', '&oogon;': 'ǫ', '&Oogon;': 'Ǫ', '&ocurl;': '\ue7d3', '&Ocurl;': '\ue3d3', '&oogoncurl;': '\ue64f', '&Oogoncurl;': '\ue24f', '&ocurlacute;': '\uebb8', '&Ocurlacute;': '\uebb7', '&oslash;': 'ø', '&Oslash;': 'Ø', '&oslashcurl;': '\ue7d4', '&Oslashcurl;': '\ue3d4', '&oslashogon;': '\ue655', '&Oslashogon;': '\ue255', '&odotbl;': 'ọ', '&Odotbl;': 'Ọ', '&oslashdotbl;': '\uebe1', '&Oslashdotbl;': '\uebe0', '&odot;': 'ȯ', '&Odot;': 'Ȯ', '&oogondot;': '\uebdf', '&Oogondot;': '\uebde', '&oogonmacr;': 'ǭ', '&Oogonmacr;': 'Ǭ', '&oslashdot;': '\uebce',
21: return 6 else: return 7 else: return 5 else: if f5 <= 31: if f3 <= 9: return 1 else: if f6 <= 22: if f5 <= 12: return 4 else: return 5 else: return 6 else: if f6 <= 14: return 5 else: return 4 else: return 1 else: if f6 <= 3: if f5 <= 4: if f8 <= 9: return 6 else: if f8 <= 11: return 7 else: return 6 else: return 1 else: if f5 <= 2: if f2 <= 17: return 4 else: return 1 else: if f5 <= 11: if f5 <= 9: return 1 else: return 5 else: if f2 <= 17: if f2 <= 16: return 1 else: return 4 else: return 1 else: if f2 <= 15: if f2 <= 3: return 1 else: if f4 <= 31: if f4 <= 24: if f2 <= 8: return 1 else: return 2 else: return 1 else: return 2 else: if f4 <= 32: if f2 <= 18: return 1 else: if f2 <= 19: return 0 else: return 1 else: return 1 else: if f3 <= 19: if f2 <= 16: if f2 <= 3: return 1 else: if f10 <= 0: return 1 else: if f7 <= 0: return 10 else: return 1 else: if f2 <= 17: if f4 <= 30: if f4 <= 3: return 3 else: if f5 <= 30: if f8 <= 0: if f5 <= 0: return 8 else: return 3 else: return 3 else: if f7 <= 13: if f6 <= 0: return 9 else: if f5 <= 34: return 3 else: return 5 else: if f5 <= 34: return 3 else: return 5 else: return 4 else: if f1 <= 24: return 1 else: return 0 else: if f2 <= 15: if f3 <= 30: if f2 <= 13: if f2 <= 1: return 0 else: if f2 <= 3: return 1 else: if f2 <= 5: return 2 else: if f1 <= 24: return 1 else: return 0 else: if f3 <= 27: if f3 <= 23: return 2 else: return 1 else: return 2 else: if f4 <= 21: if f2 <= 3: return 1 else: if f5 <= 16: if f2 <= 5: return 2 else: return 1 else: if f4 <= 10: return 1 else: if f3 <= 31: return 2 else: return 1 else: return 1 else: if f8 <= 0: return 1 else: if f4 <= 0: return 9 else: if f1 <= 24: return 1 else: return 0 else: if f1 <= 30: if f1 <= 26: if f2 <= 1: if f3 <= 15: if f3 <= 14: if f3 <= 5: if f3 <= 3: if f10 <= 25: return 2 else: return 11 else: if f4 <= 22: if f9 <= 0: return 3 else: return 2 else: return 3 else: if f4 <= 18: return 2 else: if f4 <= 20: if f5 <= 25: return 2 else: return 4 else: return 2 else: if f4 <= 13: if f4 <= 10: if f4 <= 1: return 2 else: if f4 <= 6: return 3 else: if f4 <= 8: return 2 else: return 3 else: return 2 else: if f4 <= 30: if f4 <= 27: if f4 <= 23: if f5 <= 10: if f5 <= 1: return 3 else: if f4 <= 19: if f4 <= 17: return 3 else: return 2 else: return 3 else: if f4 <= 22: if f4 <= 18: return 3 else: if f4 <= 19: return 2 else: return 3 else: return 3 else: return 2 else: return 3 else: if f5 <= 21: if f4 <= 31: return 3 else: return 2 else: return 2 else: if f4 <= 6: if f4 <= 1: if f5 <= 27: return 2 else: if f3 <= 22: return 2 else: return 3 else: if f3 <= 19: if f3 <= 17: return 2 else: return 3 else: return 2 else: if f4 <= 19: if f4 <= 18: if f3 <= 25: return 2 else: if f3 <= 26: return 3 else: return 2 else: if f5 <= 30: return 2 else: return 5 else: if f4 <= 32: if f3 <= 25: if f3 <= 21: if f4 <= 23: if f3 <= 16: return 3 else: return 2 else: return 2 else: return 2 else: if f3 <= 26: return 3 else: return 2 else: if f3 <= 22: return 2 else: return 3 else: if f3 <= 14: if f2 <= 11: if f7 <= 0: return 2 else: if f4 <= 10: return 2 else: if f4 <= 11: return 2 else: if f4 <= 17: return 4 else: return 2 else: return 0 else: if f2 <= 12: if f8 <= 0: if f5 <= 0: return 0 else: if f2 <= 9: return 0 else: return 2 else: if f5 <= 34: if f2 <= 9: return 0 else: return 2 else: return 5 else: return 0 else: if f2 <= 7: if f5 <= 22: if f2 <= 6: return 0 else: if f3 <= 26: if f3 <= 17: if f4 <= 2: return 3 else: if f4 <= 8: if f4 <= 6: return 2 else: return 4 else: if f4 <= 27: return 3 else: return 4 else: if f3 <= 19: return 2 else: if f1 <= 28: return 0 else: if f4 <= 9: return 4 else: return 2 else: return 3 else: if f7 <= 3: if f8 <= 23: if f9 <= 0: if f2 <= 6: return 0 else: return 2 else: if f6 <= 4: if f8 <= 5: return 7 else: return 5 else: if f6 <= 11: return 6 else: return 5 else: if f6 <= 6: if f9 <= 9: return 7 else: if f9 <= 11: return 8 else: return 7 else: return 6 else: if f3 <= 17: if f6 <= 1: return 5 else: if f6 <= 32: if f3 <= 15: if f2 <= 4: return 0 else: return 3 else: if f6 <= 11: if f7 <= 21: if f7 <= 10: if f7 <= 9: return 6 else: return 7 else: return 6 else: if f7 <= 23: return 7 else: return 6 else: if f7 <= 22: if f6 <= 12: return 5 else: return 7 else: return 7 else: if f7 <= 14: return 6 else: return 5 else: if f3 <= 18: if f4 <= 12: return 3 else: return 2 else: return 3 else: if f1 <= 28: return 0 else: if f1 <= 29: if f3 <= 21: if f2 <= 11: if f3 <= 8: return 2 else: if f3 <= 10: if f3 <= 9: if f5 <= 2: return 3 else: return 2 else: return 3 else: return 2 else: if f2 <= 13: if f2 <= 12: return 0 else: return 2 else: return 0 else: if f3 <= 23: if f4 <= 21: return 3 else: if f4 <= 23: return 4 else: return 3 else: if f3 <= 24: if f4 <= 27: if f4 <= 9: return 2 else: if f4 <= 12: if f10 <= 0: return 2 else: return 3 else: return 2 else: return 2 else: if f3 <= 33: if f2 <= 11: return 2 else: return 0 else: return 3 else: return 0 else: if f2 <= 18: if f2 <= 8: if f1 <= 32: if f2 <= 6: if f2 <= 3: return 1 else: if f4 <= 2: if f7 <= 22: if f5 <= 32: return 1
<filename>rubik/solve.py import sys import time from rubik import cube from rubik.maths import Point DEBUG = False class Solver: def __init__(self, c): self.cube = c self.colors = c.colors() self.moves = [] self.left_piece = self.cube.find_piece(self.cube.left_color()) self.right_piece = self.cube.find_piece(self.cube.right_color()) self.up_piece = self.cube.find_piece(self.cube.up_color()) self.down_piece = self.cube.find_piece(self.cube.down_color()) def solve(self): if DEBUG: print(self.cube) self.cross() if DEBUG: print('Cross:\n', self.cube) self.cross_corners() if DEBUG: print('Corners:\n', self.cube) self.second_layer() if DEBUG: print('Second layer:\n', self.cube) self.back_face_edges() if DEBUG: print('Last layer edges\n', self.cube) self.last_layer_corners_position() if DEBUG: print('Last layer corners -- position\n', self.cube) self.last_layer_corners_orientation() if DEBUG: print('Last layer corners -- orientation\n', self.cube) self.last_layer_edges() if DEBUG: print('Solved\n', self.cube) def move(self, move_str): self.moves.extend(move_str.split()) self.cube.sequence(move_str) def cross(self): if DEBUG: print("cross") # place the UP-LEFT piece fl_piece = self.cube.find_piece(self.cube.front_color(), self.cube.left_color()) fr_piece = self.cube.find_piece(self.cube.front_color(), self.cube.right_color()) fu_piece = self.cube.find_piece(self.cube.front_color(), self.cube.up_color()) fd_piece = self.cube.find_piece(self.cube.front_color(), self.cube.down_color()) self._cross_left_or_right(fl_piece, self.left_piece, self.cube.left_color(), "L L", "E L Ei Li") self._cross_left_or_right(fr_piece, self.right_piece, self.cube.right_color(), "R R", "Ei R E Ri") self.move("Z") self._cross_left_or_right(fd_piece, self.down_piece, self.cube.left_color(), "L L", "E L Ei Li") self._cross_left_or_right(fu_piece, self.up_piece, self.cube.right_color(), "R R", "Ei R E Ri") self.move("Zi") def _cross_left_or_right(self, edge_piece, face_piece, face_color, move_1, move_2): # don't do anything if piece is in correct place if (edge_piece.pos == (face_piece.pos.x, face_piece.pos.y, 1) and edge_piece.colors[2] == self.cube.front_color()): return # ensure piece is at z = -1 undo_move = None if edge_piece.pos.z == 0: pos = Point(edge_piece.pos) pos.x = 0 # pick the UP or DOWN face cw, cc = cube.get_rot_from_face(pos) if edge_piece.pos in (cube.LEFT + cube.UP, cube.RIGHT + cube.DOWN): self.move(cw) undo_move = cc else: self.move(cc) undo_move = cw elif edge_piece.pos.z == 1: pos = Point(edge_piece.pos) pos.z = 0 cw, cc = cube.get_rot_from_face(pos) self.move("{0} {0}".format(cc)) # don't set the undo move if the piece starts out in the right position # (with wrong orientation) or we'll screw up the remainder of the algorithm if edge_piece.pos.x != face_piece.pos.x: undo_move = "{0} {0}".format(cw) assert edge_piece.pos.z == -1 # piece is at z = -1, rotate to correct face (LEFT or RIGHT) count = 0 while (edge_piece.pos.x, edge_piece.pos.y) != (face_piece.pos.x, face_piece.pos.y): self.move("B") count += 1 if count == 10: raise Exception("Stuck in loop - unsolvable cube?\n" + str(self.cube)) # if we moved a correctly-placed piece, restore it if undo_move: self.move(undo_move) # the piece is on the correct face on plane z = -1, but has two orientations if edge_piece.colors[0] == face_color: self.move(move_1) else: self.move(move_2) def cross_corners(self): if DEBUG: print("cross_corners") fld_piece = self.cube.find_piece(self.cube.front_color(), self.cube.left_color(), self.cube.down_color()) flu_piece = self.cube.find_piece(self.cube.front_color(), self.cube.left_color(), self.cube.up_color()) frd_piece = self.cube.find_piece(self.cube.front_color(), self.cube.right_color(), self.cube.down_color()) fru_piece = self.cube.find_piece(self.cube.front_color(), self.cube.right_color(), self.cube.up_color()) self.place_frd_corner(frd_piece, self.right_piece, self.down_piece, self.cube.front_color()) self.move("Z") self.place_frd_corner(fru_piece, self.up_piece, self.right_piece, self.cube.front_color()) self.move("Z") self.place_frd_corner(flu_piece, self.left_piece, self.up_piece, self.cube.front_color()) self.move("Z") self.place_frd_corner(fld_piece, self.down_piece, self.left_piece, self.cube.front_color()) self.move("Z") def place_frd_corner(self, corner_piece, right_piece, down_piece, front_color): # rotate to z = -1 if corner_piece.pos.z == 1: pos = Point(corner_piece.pos) pos.x = pos.z = 0 cw, cc = cube.get_rot_from_face(pos) # be careful not to screw up other pieces on the front face count = 0 undo_move = cc while corner_piece.pos.z != -1: self.move(cw) count += 1 if count > 1: # go the other direction because I don't know which is which. # we need to do only one flip (net) or we'll move other # correctly-placed corners out of place. for _ in range(count): self.move(cc) count = 0 while corner_piece.pos.z != -1: self.move(cc) count += 1 undo_move = cw self.move("B") for _ in range(count): self.move(undo_move) # rotate piece to be directly below its destination while (corner_piece.pos.x, corner_piece.pos.y) != (right_piece.pos.x, down_piece.pos.y): self.move("B") # there are three possible orientations for a corner if corner_piece.colors[0] == front_color: self.move("B D Bi Di") elif corner_piece.colors[1] == front_color: self.move("Bi Ri B R") else: self.move("Ri B B R Bi Bi D Bi Di") def second_layer(self): rd_piece = self.cube.find_piece(self.cube.right_color(), self.cube.down_color()) ru_piece = self.cube.find_piece(self.cube.right_color(), self.cube.up_color()) ld_piece = self.cube.find_piece(self.cube.left_color(), self.cube.down_color()) lu_piece = self.cube.find_piece(self.cube.left_color(), self.cube.up_color()) self.place_middle_layer_ld_edge(ld_piece, self.cube.left_color(), self.cube.down_color()) self.move("Z") self.place_middle_layer_ld_edge(rd_piece, self.cube.left_color(), self.cube.down_color()) self.move("Z") self.place_middle_layer_ld_edge(ru_piece, self.cube.left_color(), self.cube.down_color()) self.move("Z") self.place_middle_layer_ld_edge(lu_piece, self.cube.left_color(), self.cube.down_color()) self.move("Z") def place_middle_layer_ld_edge(self, ld_piece, left_color, down_color): # move to z == -1 if ld_piece.pos.z == 0: count = 0 while (ld_piece.pos.x, ld_piece.pos.y) != (-1, -1): self.move("Z") count += 1 self.move("B L Bi Li Bi Di B D") for _ in range(count): self.move("Zi") assert ld_piece.pos.z == -1 if ld_piece.colors[2] == left_color: # left_color is on the back face, move piece to to down face while ld_piece.pos.y != -1: self.move("B") self.move("B L Bi Li Bi Di B D") elif ld_piece.colors[2] == down_color: # down_color is on the back face, move to left face while ld_piece.pos.x != -1: self.move("B") self.move("Bi Di B D B L Bi Li") else: raise Exception("BUG!!") def back_face_edges(self): # rotate BACK to FRONT self.move("X X") # States: 1 2 3 4 # -B- -B- --- --- # BBB BB- BBB -B- # -B- --- --- --- def state1(): return (self.cube[0, 1, 1].colors[2] == self.cube.front_color() and self.cube[-1, 0, 1].colors[2] == self.cube.front_color() and self.cube[0, -1, 1].colors[2] == self.cube.front_color() and self.cube[1, 0, 1].colors[2] == self.cube.front_color()) def state2(): return (self.cube[0, 1, 1].colors[2] == self.cube.front_color() and self.cube[-1, 0, 1].colors[2] == self.cube.front_color()) def state3(): return (self.cube[-1, 0, 1].colors[2] == self.cube.front_color() and self.cube[1, 0, 1].colors[2] == self.cube.front_color()) def state4(): return (self.cube[0, 1, 1].colors[2] != self.cube.front_color() and self.cube[-1, 0, 1].colors[2] != self.cube.front_color() and self.cube[0, -1, 1].colors[2] != self.cube.front_color() and self.cube[1, 0, 1].colors[2] != self.cube.front_color()) count = 0 while not state1(): if state4() or state2(): self.move("D F R Fi Ri Di") elif state3(): self.move("D R F Ri Fi Di") else: self.move("F") count += 1 if count == 10: raise Exception("Stuck in loop - unsolvable cube\n" + str(self.cube)) self.move("Xi Xi") def last_layer_corners_position(self): self.move("X X") # UP face: # 4-3 # --- # 2-1 move_1 = "Li Fi L D F Di Li F L F F " # swaps 1 and 2 move_2 = "F Li Fi L D F Di Li F L F " # swaps 1 and 3 c1 = self.cube.find_piece(self.cube.front_color(), self.cube.right_color(), self.cube.down_color()) c2 = self.cube.find_piece(self.cube.front_color(), self.cube.left_color(), self.cube.down_color()) c3 = self.cube.find_piece(self.cube.front_color(), self.cube.right_color(), self.cube.up_color()) c4 = self.cube.find_piece(self.cube.front_color(), self.cube.left_color(), self.cube.up_color()) # place corner 4 if c4.pos == Point(1, -1, 1): self.move(move_1 + "Zi " + move_1 + " Z") elif c4.pos == Point(1, 1, 1): self.move("Z " + move_2 + " Zi") elif c4.pos == Point(-1, -1, 1): self.move("Zi " + move_1 + " Z") assert c4.pos == Point(-1, 1, 1) # place corner 2 if c2.pos == Point(1, 1, 1): self.move(move_2 + move_1) elif c2.pos == Point(1, -1, 1): self.move(move_1) assert c2.pos == Point(-1, -1, 1) # place corner 3 and corner 1 if c3.pos == Point(1, -1, 1): self.move(move_2) assert c3.pos == Point(1, 1, 1) assert c1.pos == Point(1, -1, 1) self.move("Xi Xi") def last_layer_corners_orientation(self): self.move("X X") # States: 1 2 3 4 5 6 7 8 # B B B B B # BB- -B-B BBB -BB -BB B-B- B-B-B BBB # BBB BBB BBB BBB BBB BBB BBB BBB # -B-B BB- -B- -BB BB-B B-B- B-B-B BBB # B B B B B B def state1(): return (self.cube[ 1, 1, 1].colors[1] == self.cube.front_color() and self.cube[-1, -1, 1].colors[1] == self.cube.front_color() and self.cube[ 1, -1, 1].colors[0] == self.cube.front_color()) def state2(): return (self.cube[-1, 1, 1].colors[1] == self.cube.front_color() and self.cube[ 1, 1, 1].colors[0] == self.cube.front_color() and self.cube[ 1, -1, 1].colors[1] == self.cube.front_color()) def state3(): return (self.cube[-1, -1, 1].colors[1] == self.cube.front_color() and self.cube[ 1, -1, 1].colors[1] == self.cube.front_color() and self.cube[-1, 1, 1].colors[2] == self.cube.front_color() and self.cube[ 1, 1, 1].colors[2] == self.cube.front_color()) def state4(): return (self.cube[-1, 1, 1].colors[1] == self.cube.front_color() and self.cube[-1, -1, 1].colors[1] == self.cube.front_color() and self.cube[ 1, 1, 1].colors[2] == self.cube.front_color() and self.cube[ 1, -1, 1].colors[2] == self.cube.front_color()) def state5(): return (self.cube[-1, 1, 1].colors[1] == self.cube.front_color() and self.cube[ 1, -1, 1].colors[0] == self.cube.front_color()) def state6(): return (self.cube[ 1, 1, 1].colors[1] == self.cube.front_color() and self.cube[ 1, -1, 1].colors[1] == self.cube.front_color() and self.cube[-1, -1, 1].colors[0] == self.cube.front_color() and self.cube[-1, 1, 1].colors[0] == self.cube.front_color()) def state7(): return (self.cube[ 1, 1, 1].colors[0] == self.cube.front_color() and self.cube[ 1, -1, 1].colors[0] == self.cube.front_color() and self.cube[-1, -1, 1].colors[0] == self.cube.front_color() and self.cube[-1, 1, 1].colors[0] == self.cube.front_color()) def state8(): return (self.cube[ 1, 1, 1].colors[2] ==
service = create_service(service_permissions=[EMAIL_TYPE, UPLOAD_DOCUMENT]) content = "See attached file." template = create_template(service=service, template_type="email", content=content) data = { "email_address": service.users[0].email_address, "template_id": template.id, "personalisation": {"document": {"file": file_data, "sending_method": sending_method}}, } auth_header = create_authorization_header(service_id=service.id) response = client.post( path="v2/notifications/email", data=json.dumps(data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 resp_json = json.loads(response.get_data(as_text=True)) assert "ValidationError" in resp_json["errors"][0]["error"] assert "filename is a required property" in resp_json["errors"][0]["message"] @pytest.mark.parametrize( "file_data", [ ("VGV4dCBjb250ZW50IGhlcmU="), ], ) def test_post_notification_with_document_upload_missing_sending_method( client, notify_db_session, file_data, ): service = create_service(service_permissions=[EMAIL_TYPE, UPLOAD_DOCUMENT]) content = "See attached file." template = create_template(service=service, template_type="email", content=content) data = { "email_address": service.users[0].email_address, "template_id": template.id, "personalisation": {"document": {"file": file_data}}, } auth_header = create_authorization_header(service_id=service.id) response = client.post( path="v2/notifications/email", data=json.dumps(data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 resp_json = json.loads(response.get_data(as_text=True)) assert "ValidationError" in resp_json["errors"][0]["error"] assert "sending_method is a required property" in resp_json["errors"][0]["message"] @pytest.mark.parametrize( "file_data, sending_method, filename", [ ("VGV4dCBjb250ZW50IGhlcmU=", "attch", "1.txt"), ], ) def test_post_notification_with_document_upload_bad_sending_method( client, notify_db_session, file_data, sending_method, filename ): service = create_service(service_permissions=[EMAIL_TYPE, UPLOAD_DOCUMENT]) content = "See attached file." template = create_template(service=service, template_type="email", content=content) data = { "email_address": service.users[0].email_address, "template_id": template.id, "personalisation": { "document": { "file": file_data, "filename": filename, "sending_method": sending_method, } }, } auth_header = create_authorization_header(service_id=service.id) response = client.post( path="v2/notifications/email", data=json.dumps(data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 resp_json = json.loads(response.get_data(as_text=True)) assert f"personalisation {sending_method} is not one of [attach, link]" in resp_json["errors"][0]["message"] @pytest.mark.parametrize( "file_data, message", [ ("abc", "Incorrect padding"), ("🤡", "string argument should contain only ASCII characters"), ], ) def test_post_notification_with_document_upload_not_base64_file( client, notify_db_session, file_data, message, ): service = create_service(service_permissions=[EMAIL_TYPE, UPLOAD_DOCUMENT]) content = "See attached file." template = create_template(service=service, template_type="email", content=content) data = { "email_address": service.users[0].email_address, "template_id": template.id, "personalisation": { "document": { "file": file_data, "sending_method": "attach", "filename": "1.txt", } }, } auth_header = create_authorization_header(service_id=service.id) response = client.post( path="v2/notifications/email", data=json.dumps(data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 resp_json = json.loads(response.get_data(as_text=True)) assert f"{message} : Error decoding base64 field" in resp_json["errors"][0]["message"] def test_post_notification_with_document_upload_simulated(client, notify_db_session, mocker): service = create_service(service_permissions=[EMAIL_TYPE, UPLOAD_DOCUMENT]) template = create_template(service=service, template_type="email", content="Document: ((document))") mocker.patch("app.celery.provider_tasks.deliver_email.apply_async") document_download_mock = mocker.patch("app.v2.notifications.post_notifications.document_download_client") document_download_mock.get_upload_url.return_value = "https://document-url" data = { "email_address": "<EMAIL>", "template_id": template.id, "personalisation": {"document": {"file": "abababab", "sending_method": "link"}}, } auth_header = create_authorization_header(service_id=service.id) response = client.post( path="v2/notifications/email", data=json.dumps(data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 201 resp_json = json.loads(response.get_data(as_text=True)) assert validate(resp_json, post_email_response) == resp_json assert resp_json["content"]["body"] == "Document: https://document-url/test-document" def test_post_notification_without_document_upload_permission(client, notify_db_session, mocker): service = create_service(service_permissions=[EMAIL_TYPE]) template = create_template(service=service, template_type="email", content="Document: ((document))") mocker.patch("app.celery.provider_tasks.deliver_email.apply_async") document_download_mock = mocker.patch("app.v2.notifications.post_notifications.document_download_client") document_download_mock.upload_document.return_value = document_download_response() data = { "email_address": service.users[0].email_address, "template_id": template.id, "personalisation": {"document": {"file": "abababab"}}, } auth_header = create_authorization_header(service_id=service.id) response = client.post( path="v2/notifications/email", data=json.dumps(data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 def test_post_notification_returns_400_when_get_json_throws_exception(client, sample_email_template): auth_header = create_authorization_header(service_id=sample_email_template.service_id) response = client.post( path="v2/notifications/email", data="[", headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 @pytest.mark.parametrize("args", [{}, {"rows": [1, 2], "csv": "foo"}], ids=["no args", "both args"]) def test_post_bulk_with_invalid_data_arguments( client, sample_email_template, args, ): data = {"name": "job_name", "template_id": str(sample_email_template.id)} | args response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [ { "error": "BadRequestError", "message": "You should specify either rows or csv", } ] def test_post_bulk_with_invalid_reply_to_id(client, sample_email_template): data = { "name": "job_name", "template_id": str(sample_email_template.id), "rows": [["email address"], ["<EMAIL>"]], "reply_to_id": "foo", } response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [ { "error": "ValidationError", "message": "reply_to_id is not a valid UUID", } ] def test_post_bulk_with_non_existing_reply_to_id_for_email(client, sample_email_template, fake_uuid): data = { "name": "job_name", "template_id": str(sample_email_template.id), "rows": [["email address"], ["<EMAIL>"]], "reply_to_id": fake_uuid, } response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [ { "error": "BadRequestError", "message": f"email_reply_to_id {fake_uuid} does not exist in database for service id {sample_email_template.service_id}", } ] def test_post_bulk_with_non_existing_reply_to_id_for_sms(client, sms_code_template, fake_uuid): data = { "name": "job_name", "template_id": str(sms_code_template.id), "rows": [["phone number", "verify_code"], ["<EMAIL>", "123"]], "reply_to_id": fake_uuid, } response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=sms_code_template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [ { "error": "BadRequestError", "message": f"sms_sender_id {fake_uuid} does not exist in database for service id {sms_code_template.service_id}", } ] def test_post_bulk_flags_if_name_is_missing(client, sample_email_template): data = {"template_id": str(sample_email_template.id), "csv": "foo"} response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [{"error": "ValidationError", "message": "name is a required property"}] @pytest.mark.parametrize( "scheduled_for, expected_message", [ (42, "scheduled_for 42 is not of type string, null"), ( "foo", "scheduled_for datetime format is invalid. It must be a valid " "ISO8601 date time format, " "https://en.wikipedia.org/wiki/ISO_8601", ), ("2016-01-01T10:04:00", "scheduled_for datetime cannot be in the past"), ("2016-01-05T10:06:00", "scheduled_for datetime can only be up to 96 hours in the future"), ], ) @freeze_time("2016-01-01 10:05:00") def test_post_bulk_with_invalid_scheduled_for(client, sample_email_template, scheduled_for, expected_message): data = {"name": "job_name", "template_id": str(sample_email_template.id), "scheduled_for": scheduled_for, "rows": [1, 2]} response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [{"error": "ValidationError", "message": expected_message}] def test_post_bulk_with_non_existing_template(client, fake_uuid, sample_email_template): data = {"name": "job_name", "template_id": fake_uuid, "rows": [1, 2]} response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [{"error": "BadRequestError", "message": "Template not found"}] def test_post_bulk_with_archived_template(client, fake_uuid, notify_db, notify_db_session): template = sample_template(notify_db, notify_db_session, archived=True) data = {"name": "job_name", "template_id": template.id, "rows": [1, 2]} response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [{"error": "BadRequestError", "message": f"Template {template.id} has been deleted"}] @pytest.mark.parametrize( "permission_type, notification_type, expected_error", [ ("email", "sms", "text messages"), ("sms", "email", "emails"), ], ) def test_post_bulk_returns_400_if_not_allowed_to_send_notification_type( notify_db_session, client, permission_type, notification_type, expected_error, ): service = create_service(service_permissions=[permission_type]) sample_template_without_permission = create_template(service=service, template_type=notification_type) data = {"name": "job_name", "template_id": sample_template_without_permission.id, "rows": [1, 2]} auth_header = create_authorization_header(service_id=sample_template_without_permission.service.id) response = client.post( path="/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), auth_header], ) assert response.status_code == 400 assert response.headers["Content-type"] == "application/json" error_json = json.loads(response.get_data(as_text=True)) assert error_json["status_code"] == 400 assert error_json["errors"] == [ { "error": "BadRequestError", "message": f"Service is not allowed to send {expected_error}", } ] @pytest.mark.parametrize("data_type", ["rows", "csv"]) @pytest.mark.parametrize( "template_type, content, row_header, expected_error", [ ("email", "Hello!", ["foo"], "email address"), ("email", "Hello ((name))!", ["foo"], "email address, name"), ("sms", "Hello ((name))!", ["foo"], "name, phone number"), ("sms", "Hello ((name))!", ["foo"], "name, phone number"), ("sms", "Hello ((name))!", ["name"], "phone number"), ("sms", "Hello ((name))!", ["NAME"], "phone number"), ], ) def test_post_bulk_flags_missing_column_headers( client, notify_db, notify_db_session, data_type, template_type, content, row_header, expected_error ): template = sample_template(notify_db, notify_db_session, content=content, template_type=template_type) data = {"name": "job_name", "template_id": template.id} rows = [row_header, ["bar"]] if data_type == "csv": data["csv"] = rows_to_csv(rows) else: data["rows"] = rows response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [{"error": "BadRequestError", "message": f"Missing column headers: {expected_error}"}] @pytest.mark.parametrize( "template_type, content, row_header, expected_error", [ ( "email", "Hello!", ["email address", "email address"], "email address", ), ( "email", "Hello ((name))!", ["email address", "email_address", "name"], "email address, email_address", ), ("sms", "Hello!", ["phone number", "phone number"], "phone number"), ( "sms", "Hello!", ["phone number", "phone_number"], "phone number, phone_number", ), ( "sms", "Hello ((name))!", ["phone number", "phone_number", "name"], "phone number, phone_number", ), ], ) def test_post_bulk_flags_duplicate_recipient_column_headers( client, notify_db, notify_db_session, template_type, content, row_header, expected_error, ): template = sample_template(notify_db, notify_db_session, content=content, template_type=template_type) data = {"name": "job_name", "template_id": template.id, "rows": [row_header, ["bar"]]} response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[("Content-Type", "application/json"), create_authorization_header(service_id=template.service_id)], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [{"error": "BadRequestError", "message": f"Duplicate column headers: {expected_error}"}] def test_post_bulk_flags_too_many_rows(client, sample_email_template, notify_api): data = { "name": "job_name", "template_id": sample_email_template.id, "csv": rows_to_csv([["email address"], ["<EMAIL>"], ["<EMAIL>"]]), } with set_config(notify_api, "CSV_MAX_ROWS", 1): response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[ ("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id), ], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [ { "error": "BadRequestError", "message": "Too many rows. Maximum number of rows allowed is 1", } ] def test_post_bulk_flags_recipient_not_in_safelist_with_team_api_key(client, sample_email_template): data = { "name": "job_name", "template_id": sample_email_template.id, "csv": rows_to_csv([["email address"], ["<EMAIL>"], ["<EMAIL>"]]), } response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[ ("Content-Type", "application/json"), create_authorization_header(service_id=sample_email_template.service_id, key_type="team"), ], ) assert response.status_code == 400 error_json = json.loads(response.get_data(as_text=True)) assert error_json["errors"] == [ { "error": "BadRequestError", "message": "You cannot send to these recipients because you used a team and safelist API key.", } ] def test_post_bulk_flags_recipient_not_in_safelist_with_restricted_service(client, notify_db, notify_db_session): service = create_service(restricted=True) template = sample_template(notify_db, notify_db_session, service=service, template_type="email") data = { "name": "job_name", "template_id": template.id, "csv": rows_to_csv([["email address"], ["<EMAIL>"], ["<EMAIL>"]]), } response = client.post( "/v2/notifications/bulk", data=json.dumps(data), headers=[ ("Content-Type", "application/json"), create_authorization_header(service_id=template.service_id, key_type="team"),
#!/usr/bin/env python import os #os.environ["CUDA_VISIBLE_DEVICES"]="-1" import numpy as np from math import sqrt from numpy import zeros import re import math from .batch_object import batch_object from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split import pdb import random import time from numpy import genfromtxt #from scipy.spatial import distance #from PIL import Image #import pickle from pathlib import Path import time from random import randint import gc import csv from . import datadir #os.environ['CUDA_VISIBLE_DEVICES'] = '' np.random.seed(2504) def main(): import apetype as at class CLI_Settings(at.ConfigBase): seq_len: int # Sequence length tr_dir: str # Directory with training data '.ct' files settings = CLI_Settings() # Import tensorflow, after processing CLI_Settings as it prints warnings import tensorflow as tf from tensorflow.python.saved_model import builder as saved_model_builder def conv1d(input, pname, name, kshape, stride=1): with tf.name_scope(name): W = tf.get_variable(name=pname+'w_'+name, shape=kshape) b = tf.get_variable(name=pname+'bias_' + name, shape=[kshape[2]]) out = tf.nn.conv1d(input,W,stride=stride, padding='SAME') out = tf.nn.bias_add(out, b) out = tf.nn.leaky_relu(out) #out = tf.nn.relu(out) return out def fullyConnected(input, name, output_size): with tf.name_scope(name): input_size = input.shape[1:] input_size = int(np.prod(input_size)) W = tf.get_variable(name='w_'+name, shape=[input_size, output_size]) b = tf.get_variable(name='bias_'+name, shape=[output_size]) input = tf.reshape(input, [-1, input_size]) out = tf.add(tf.matmul(input, W), b) out = tf.nn.leaky_relu(out) #out = tf.maximum(out, 0.01 * out, name = "forsoft") return out def model(x, y, keep_ratio1, keep_ratio2): dpi = tf.nn.dropout(x, keep_ratio1) filter_size = 5 lc1 = conv1d(dpi, 'fre_','lc1', [filter_size, 2*max_features, 64]) lc2 = conv1d(lc1, 'fre_', 'lc2', [filter_size, 64, 128]) lc3 = conv1d(lc2, 'fre_', 'lc3', [filter_size, 128, 256]) lc4 = conv1d(lc3, 'fre_', 'lc4', [filter_size, 256, 512]) lc5 = conv1d(lc4, 'fre_', 'lc5', [filter_size, 512, 1024]) lc6 = conv1d(lc5, 'fre_', 'lc6', [filter_size, 1024, 2048]) ml = tf.contrib.layers.flatten(lc6) dp = tf.nn.dropout(ml, keep_ratio2) out = fullyConnected(dp, "output_p", seq_len * seq_len) loss = tf.reduce_mean(tf.squared_difference( y, out)) #vars = tf.trainable_variables() #l2_loss = tf.add_n([ tf.nn.l2_loss(v) for v in vars if 'bias' not in v.name ]) #loss = tf.add(loss, 0.000001*l2_loss) return out, loss def clean_seq(s): ns = s.upper() pattern = re.compile(r'\s+') ns = re.sub(pattern, '', ns) ns = re.sub(r'[^a-zA-Z]{1}', 'N', ns) return ns def encode(ns): ns = ns.replace("A", "1,0,0,0,") ns = ns.replace("U", "0,1,0,0,") ns = ns.replace("G", "0,0,1,0,") ns = ns.replace("C", "0,0,0,1,") if re.search('[a-zA-Z]', ns): #print(s) #print('Non-standard symbol in sequence - changed to A.') ns = re.sub("[a-zA-Z]", "0,0,0,0,", ns) return ns[:-1] def brun(sess, x, y, a, keep_prob1, keep_prob2): preds = [] batch_size = 128 number_of_full_batch=int(math.ceil(float(len(a))/batch_size)) for i in range(number_of_full_batch): preds += list(sess.run(y, feed_dict={x: np.asarray(a[i*batch_size:(i+1)*batch_size]), keep_prob1: 1.0, keep_prob2: 1.0})) return preds max_features = 4 seq_len = settings.seq_len # int(sys.argv[1]) tr_dir = settings.tr_dir # sys.argv[2] pos_seq = [] raw_seq = [] used_seq = set(['N'*seq_len + '_' + 'N'*seq_len]) dup = 0 seq = "" small_classes = [] with open(os.path.join(datadir,'small_classes.csv')) as file: for line in file: small_classes.append(line.strip()) t_classes = [] with open(os.path.join(datadir,'t_classes.csv')) as file: for line in file: t_classes.append(line.strip()) directory = os.fsencode(tr_dir) x_data = [] y_data = [] y_raw = [] filenames = [] steps = [] ctp = 0 rms = 0 for file in os.listdir(directory): filename = os.fsdecode(file) skip_file = False if filename.endswith(".ct"): seq="" prev_nuc = 0 pairs = [] first = True with open(tr_dir + "/" + filename) as fp: for cnt, line in enumerate(fp): if(line.startswith("#")): continue if(first): first = False continue values = line.split() seq = seq + values[1] if(int(values[0]) != prev_nuc + 1): rms = rms + 1 skip_file = True break prev_nuc = int(values[0]) if(int(values[4])>0): pairs.append([int(values[2]), int(values[4]) - 1]) if(skip_file): continue seq = clean_seq(seq) #if(len(seq) <= 140): # continue a = seq_len - 1 nseq = 'N'*(a) + seq + 'N'*(a) pos_seq.append(np.fromstring(encode(nseq), dtype=int, sep=",").reshape(-1, 4)) raw_seq.append(nseq) square_size = len(nseq) y_cm = zeros((square_size, square_size)) for j in range(len(pairs)): y_cm[a + pairs[j][0]][a + pairs[j][1]] = 1.0 y_cm[a + pairs[j][1]][a + pairs[j][0]] = 1.0 y_raw.append(y_cm) ctp = ctp + 1 filename = filename[filename.index("_") + 1 : ] if(filename in small_classes): steps.append(1) elif(filename in t_classes): steps.append(2) else: if(len(seq) < 140): steps.append(4) else: steps.append(40) if(ctp%1000 == 0): print(str(ctp) + " - ", end='', flush=True) #if(len(pos_seq) > 10): # break else: continue min_mat_score = 1 max_step = 1 skip_step = 1 print(" ", flush=True) print("Skipped files: " + str(rms)) print("Min mat score: " + str(min_mat_score)) dup = 0 for i in range(len(pos_seq)): max_step = steps[i] num = len(pos_seq[i]) - seq_len + 1 r_i = 0 while r_i < num: c_i = 0 skip = True while c_i < num: y_cm = y_raw[i][r_i : r_i + seq_len, c_i : c_i + seq_len] if(y_cm.sum()<min_mat_score): c_i = c_i + randint(1, max_step) continue else: skip = False #cseq1 = np.array(list(raw_seq[i][r_i: r_i + seq_len])) #cseq2 = np.array(list(raw_seq[i][c_i: c_i + seq_len])) #sum_r = np.sum(y_cm, axis = 0) #sum_c = np.sum(y_cm, axis = 1) #r_ind = np.where(sum_r == 0)[0] #c_ind = np.where(sum_c == 0)[0] #cseq1[r_ind] = "N" #cseq2[c_ind] = "N" cseq = raw_seq[i][r_i: r_i + seq_len] + "_" + raw_seq[i][c_i: c_i + seq_len] if(cseq in used_seq): c_i = c_i + randint(1, max_step) dup = dup + 1 continue used_seq.add(cseq) zos1 = np.squeeze(pos_seq[i][r_i : r_i + seq_len]) zos2 = np.squeeze(pos_seq[i][c_i : c_i + seq_len]) x_data.append(np.concatenate((zos1, zos2), axis=1)) y_data.append(y_cm.flatten()) c_i = c_i + randint(1, max_step) if(skip): r_i = r_i + randint(1, max_step) else: r_i = r_i + randint(1, max_step) pos_seq[i] = None raw_seq[i] = None y_raw[i] = None if(i%100 == 0): print("(" + str(i) + " : " + str(len(used_seq)) + " : " + str(dup) + ") - ", end='', flush=True) gc.collect() used_seq = None print("", flush=True) gc.collect() print("----------------------------------------------------------------", flush=True) print("Done generating", flush=True) print("Duplicates: " + str(dup)) print("Final size: " + str(len(y_data)), flush=True) print("----------------------------------------------------------------", flush=True) x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.01, random_state=2504) x_data = None y_data = None pos_seq = None raw_seq = None y_raw = None gc.collect() batch_size = 1024 nb_epoch = 10001 # initialize inputs x = tf.placeholder(tf.float32, shape=[None, seq_len, 2*max_features], name="input_rna") y = tf.placeholder(tf.float32, shape=[None, seq_len * seq_len]) keep_prob1 = tf.placeholder(tf.float32, name="kr_rna1") keep_prob2 = tf.placeholder(tf.float32, name="kr_rna2") # build the model out, loss = model(x, y, keep_prob1, keep_prob2) out = tf.identity(out, name="output_rna") # initialize optimizer train_step = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss) # run the training loop with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, "model_rna_m/variables/variables") total = int(len(x_train)/batch_size)+1 for epoch in range(nb_epoch): my_file = Path("trainms") if not my_file.is_file(): break rng_state = np.random.get_state() np.random.shuffle(x_train) np.random.set_state(rng_state) np.random.shuffle(y_train) rng_state = np.random.get_state() np.random.shuffle(x_test) np.random.set_state(rng_state) np.random.shuffle(y_test) x_train_obj = batch_object(x_train, batch_size) y_train_obj = batch_object(y_train, batch_size) for i in range(total): x_train_batch = x_train_obj.next_batch() y_train_batch = y_train_obj.next_batch() #train_batch = generate_random_batch([x_train], y_train, batch_size) #feed = {x : x_train_batch, y_: y_train_batch, keep_prob : 1.0} #print(np.shape(np.squeeze(np.split(x_train_batch, 2, axis=1)[0]))) feed = {x : x_train_batch, y: y_train_batch, keep_prob1 : 1.0, keep_prob2 : 1.0} train_step.run(feed_dict=feed) if epoch % 1 == 0: ts = 1001 pred = brun(sess, x, out, x_train[:ts], keep_prob1, keep_prob2) orig = np.reshape(np.asarray(y_train[:ts]), (ts, seq_len*seq_len)) pred = np.asarray(pred) pred = np.around(pred, 0).astype(int) pred = np.reshape(pred, (ts, seq_len*seq_len)) ae = 0.0 at = 0.0 ac = 0.0 for u in range(ts): o1 = orig[u] p1 = pred[u] for q in range(len(o1)): if(o1[q] == 1): at = at + 1 if(p1[q] == 1): ac = ac + 1 elif(p1[q] == 1): ae = ae + 1 sn = 0 if(at>0): sn = ac/at print("Epoch: %d. Train --- %g - %g" % (epoch, sn, ae), end='', flush=True) ts = 1001 pred = brun(sess, x, out, x_test[:ts], keep_prob1, keep_prob2) orig = np.reshape(np.asarray(y_test[:ts]), (ts, seq_len*seq_len)) pred = np.asarray(pred) pred = np.reshape(pred, (ts, seq_len*seq_len)) # for u in range(ts): # pix = np.reshape(orig[u], (seq_len, seq_len)) # img1 = Image.fromarray(np.uint8(pix * 255) , 'L') # # pix = np.reshape(pred[u], (seq_len, seq_len)).clip(min=0, max=1) # img2 = Image.fromarray(np.uint8(pix * 255) , 'L') # img3 = Image.new('L', (2*seq_len, seq_len)) # img3.paste(img1, (0,0)) # img3.paste(img2, (seq_len,0)) # img3.save("pics/img" + str(u) + ".bmp","BMP") # pred = np.around(pred, 0).astype(int) ae = 0.0 at = 0.0 ac = 0.0 for u in